diff --git a/.ci/docker-compose-file/.env b/.ci/docker-compose-file/.env index 73ec47d00..1b837aea3 100644 --- a/.ci/docker-compose-file/.env +++ b/.ci/docker-compose-file/.env @@ -10,7 +10,7 @@ CASSANDRA_TAG=3.11 MINIO_TAG=RELEASE.2023-03-20T20-16-18Z OPENTS_TAG=9aa7f88 KINESIS_TAG=2.1 -HSTREAMDB_TAG=v0.16.1 +HSTREAMDB_TAG=v0.19.3 HSTREAMDB_ZK_TAG=3.8.1 MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server diff --git a/.ci/docker-compose-file/docker-compose-rabbitmq.yaml b/.ci/docker-compose-file/docker-compose-rabbitmq.yaml index d362eb4e0..03ff12f6f 100644 --- a/.ci/docker-compose-file/docker-compose-rabbitmq.yaml +++ b/.ci/docker-compose-file/docker-compose-rabbitmq.yaml @@ -9,10 +9,12 @@ services: expose: - "15672" - "5672" + - "5671" # We don't want to take ports from the host - # ports: + #ports: # - "15672:15672" # - "5672:5672" + # - "5671:5671" volumes: - ./certs/ca.crt:/opt/certs/ca.crt - ./certs/server.crt:/opt/certs/server.crt diff --git a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml index d648d9d78..568d9129c 100644 --- a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml +++ b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml @@ -39,6 +39,10 @@ services: - 19042:9042 # Cassandra TLS - 19142:9142 + # Cassandra No Auth + - 19043:9043 + # Cassandra TLS No Auth + - 19143:9143 # S3 - 19000:19000 # S3 TLS diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json index c58474039..103bae924 100644 --- a/.ci/docker-compose-file/toxiproxy.json +++ b/.ci/docker-compose-file/toxiproxy.json @@ -96,6 +96,18 @@ "upstream": "cassandra:9142", "enabled": true }, + { + "name": "cassa_no_auth_tcp", + "listen": "0.0.0.0:9043", + "upstream": "cassandra_noauth:9042", + "enabled": true + }, + { + "name": "cassa_no_auth_tls", + "listen": "0.0.0.0:9143", + "upstream": "cassandra_noauth:9142", + "enabled": true + }, { "name": "sqlserver", "listen": "0.0.0.0:1433", diff --git a/.github/actions/package-macos/action.yaml b/.github/actions/package-macos/action.yaml index bae335cf0..1553576b2 100644 --- a/.github/actions/package-macos/action.yaml +++ b/.github/actions/package-macos/action.yaml @@ -51,7 +51,7 @@ runs: echo "SELF_HOSTED=false" >> $GITHUB_OUTPUT ;; esac - - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 id: cache if: steps.prepare.outputs.SELF_HOSTED != 'true' with: diff --git a/.github/actions/prepare-jmeter/action.yaml b/.github/actions/prepare-jmeter/action.yaml index 0d12b1e36..e0c279120 100644 --- a/.github/actions/prepare-jmeter/action.yaml +++ b/.github/actions/prepare-jmeter/action.yaml @@ -8,7 +8,7 @@ inputs: runs: using: composite steps: - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: emqx-docker path: /tmp @@ -31,7 +31,7 @@ runs: architecture: x64 # (x64 or x86) - defaults to x64 # https://github.com/actions/setup-java/blob/main/docs/switching-to-v2.md distribution: 'zulu' - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: apache-jmeter.tgz - name: install jmeter diff --git a/.github/workflows/_pr_entrypoint.yaml b/.github/workflows/_pr_entrypoint.yaml index 86e676ebe..b37a31eac 100644 --- a/.github/workflows/_pr_entrypoint.yaml +++ b/.github/workflows/_pr_entrypoint.yaml @@ -144,11 +144,11 @@ jobs: echo "PROFILE=${PROFILE}" | tee -a .env echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip . - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: ${{ matrix.profile }} path: ${{ matrix.profile }}.zip - retention-days: 1 + retention-days: 7 run_emqx_app_tests: needs: diff --git a/.github/workflows/_push-entrypoint.yaml b/.github/workflows/_push-entrypoint.yaml index 8caece6eb..6c6745eef 100644 --- a/.github/workflows/_push-entrypoint.yaml +++ b/.github/workflows/_push-entrypoint.yaml @@ -28,7 +28,6 @@ jobs: profile: ${{ steps.parse-git-ref.outputs.profile }} release: ${{ steps.parse-git-ref.outputs.release }} latest: ${{ steps.parse-git-ref.outputs.latest }} - version: ${{ steps.parse-git-ref.outputs.version }} ct-matrix: ${{ steps.matrix.outputs.ct-matrix }} ct-host: ${{ steps.matrix.outputs.ct-host }} ct-docker: ${{ steps.matrix.outputs.ct-docker }} @@ -46,18 +45,16 @@ jobs: shell: bash run: | git config --global --add safe.directory "$GITHUB_WORKSPACE" - - name: Detect emqx profile and version + - name: Detect emqx profile id: parse-git-ref run: | JSON="$(./scripts/parse-git-ref.sh $GITHUB_REF)" PROFILE=$(echo "$JSON" | jq -cr '.profile') RELEASE=$(echo "$JSON" | jq -cr '.release') LATEST=$(echo "$JSON" | jq -cr '.latest') - VERSION="$(./pkg-vsn.sh "$PROFILE")" echo "profile=$PROFILE" | tee -a $GITHUB_OUTPUT echo "release=$RELEASE" | tee -a $GITHUB_OUTPUT echo "latest=$LATEST" | tee -a $GITHUB_OUTPUT - echo "version=$VERSION" | tee -a $GITHUB_OUTPUT - name: Build matrix id: matrix run: | @@ -91,7 +88,7 @@ jobs: uses: ./.github/workflows/build_packages.yaml with: profile: ${{ needs.prepare.outputs.profile }} - publish: ${{ needs.prepare.outputs.release }} + publish: true otp_vsn: ${{ needs.prepare.outputs.otp_vsn }} elixir_vsn: ${{ needs.prepare.outputs.elixir_vsn }} builder_vsn: ${{ needs.prepare.outputs.builder_vsn }} @@ -104,8 +101,7 @@ jobs: uses: ./.github/workflows/build_and_push_docker_images.yaml with: profile: ${{ needs.prepare.outputs.profile }} - version: ${{ needs.prepare.outputs.version }} - publish: ${{ needs.prepare.outputs.release }} + publish: true latest: ${{ needs.prepare.outputs.latest }} # TODO: revert this back to needs.prepare.outputs.otp_vsn when OTP 26 bug is fixed otp_vsn: 25.3.2-2 @@ -153,7 +149,7 @@ jobs: echo "PROFILE=${PROFILE}" | tee -a .env echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip . - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: ${{ matrix.profile }} path: ${{ matrix.profile }}.zip diff --git a/.github/workflows/build_and_push_docker_images.yaml b/.github/workflows/build_and_push_docker_images.yaml index 1ab553840..0c123b0c1 100644 --- a/.github/workflows/build_and_push_docker_images.yaml +++ b/.github/workflows/build_and_push_docker_images.yaml @@ -10,15 +10,12 @@ on: profile: required: true type: string - version: - required: true - type: string latest: required: true type: string publish: required: true - type: string + type: boolean otp_vsn: required: true type: string @@ -45,8 +42,6 @@ on: required: false type: string default: 'emqx' - version: - required: true latest: required: false type: boolean @@ -72,8 +67,11 @@ permissions: contents: read jobs: - docker: - runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }} + build: + runs-on: ${{ github.repository_owner == 'emqx' && fromJSON(format('["self-hosted","ephemeral","linux","{0}"]', matrix.arch)) || 'ubuntu-22.04' }} + container: "ghcr.io/emqx/emqx-builder/${{ inputs.builder_vsn }}:${{ inputs.elixir_vsn }}-${{ inputs.otp_vsn }}-debian11" + outputs: + PKG_VSN: ${{ steps.build.outputs.PKG_VSN }} strategy: fail-fast: false @@ -81,54 +79,130 @@ jobs: profile: - ${{ inputs.profile }} - ${{ inputs.profile }}-elixir - registry: - - 'docker.io' - - 'public.ecr.aws' - exclude: - - profile: emqx-enterprise - registry: 'public.ecr.aws' - - profile: emqx-enterprise-elixir - registry: 'public.ecr.aws' + arch: + - x64 + - arm64 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - ref: ${{ github.event.inputs.ref }} - fetch-depth: 0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.inputs.ref }} + - run: git config --global --add safe.directory "$PWD" + - name: build release tarball + id: build + run: | + make ${{ matrix.profile }}-tgz + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: "${{ matrix.profile }}-${{ matrix.arch }}.tar.gz" + path: "_packages/emqx*/emqx-*.tar.gz" + retention-days: 7 + overwrite: true + if-no-files-found: error - - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 - - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 + docker: + runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }} + needs: + - build + defaults: + run: + shell: bash - - name: Login to hub.docker.com - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 - if: matrix.registry == 'docker.io' - with: - username: ${{ secrets.DOCKER_HUB_USER }} - password: ${{ secrets.DOCKER_HUB_TOKEN }} + strategy: + fail-fast: false + matrix: + profile: + - ${{ inputs.profile }} + - ${{ inputs.profile }}-elixir - - name: Login to AWS ECR - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 - if: matrix.registry == 'public.ecr.aws' - with: - registry: public.ecr.aws - username: ${{ secrets.AWS_ACCESS_KEY_ID }} - password: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - ecr: true + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.inputs.ref }} + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 + with: + pattern: "${{ matrix.profile }}-*.tar.gz" + path: _packages + merge-multiple: true - - name: Build docker image - env: - PROFILE: ${{ matrix.profile }} - DOCKER_REGISTRY: ${{ matrix.registry }} - DOCKER_ORG: ${{ github.repository_owner }} - DOCKER_LATEST: ${{ inputs.latest }} - DOCKER_PUSH: ${{ inputs.publish == 'true' || inputs.publish || github.repository_owner != 'emqx' }} - DOCKER_BUILD_NOCACHE: true - DOCKER_PLATFORMS: linux/amd64,linux/arm64 - EMQX_RUNNER: 'debian:11-slim' - EMQX_DOCKERFILE: 'deploy/docker/Dockerfile' - PKG_VSN: ${{ inputs.version }} - EMQX_BUILDER_VERSION: ${{ inputs.builder_vsn }} - EMQX_BUILDER_OTP: ${{ inputs.otp_vsn }} - EMQX_BUILDER_ELIXIR: ${{ inputs.elixir_vsn }} - run: | - ./build ${PROFILE} docker + - name: Move artifacts to root directory + env: + PROFILE: ${{ inputs.profile }} + run: | + ls -lR _packages/$PROFILE + mv _packages/$PROFILE/*.tar.gz ./ + - name: Enable containerd image store on Docker Engine + run: | + echo "$(jq '. += {"features": {"containerd-snapshotter": true}}' /etc/docker/daemon.json)" > daemon.json + sudo mv daemon.json /etc/docker/daemon.json + sudo systemctl restart docker + + - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 + - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 + + - name: Login to hub.docker.com + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + if: inputs.publish || github.repository_owner != 'emqx' + with: + username: ${{ secrets.DOCKER_HUB_USER }} + password: ${{ secrets.DOCKER_HUB_TOKEN }} + + - name: Login to AWS ECR + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + if: inputs.publish || github.repository_owner != 'emqx' + with: + registry: public.ecr.aws + username: ${{ secrets.AWS_ACCESS_KEY_ID }} + password: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + ecr: true + + - name: Build docker image + env: + PROFILE: ${{ matrix.profile }} + DOCKER_REGISTRY: 'docker.io,public.ecr.aws' + DOCKER_ORG: ${{ github.repository_owner }} + DOCKER_LATEST: ${{ inputs.latest }} + DOCKER_PUSH: false + DOCKER_BUILD_NOCACHE: true + DOCKER_PLATFORMS: linux/amd64,linux/arm64 + DOCKER_LOAD: true + EMQX_RUNNER: 'public.ecr.aws/debian/debian:11-slim@sha256:22cfb3c06a7dd5e18d86123a73405664475b9d9fa209cbedcf4c50a25649cc74' + EMQX_DOCKERFILE: 'deploy/docker/Dockerfile' + PKG_VSN: ${{ needs.build.outputs.PKG_VSN }} + EMQX_BUILDER_VERSION: ${{ inputs.builder_vsn }} + EMQX_BUILDER_OTP: ${{ inputs.otp_vsn }} + EMQX_BUILDER_ELIXIR: ${{ inputs.elixir_vsn }} + EMQX_SOURCE_TYPE: tgz + run: | + ./build ${PROFILE} docker + cat .emqx_docker_image_tags + echo "_EMQX_DOCKER_IMAGE_TAG=$(head -n 1 .emqx_docker_image_tags)" >> $GITHUB_ENV + + - name: smoke test + timeout-minutes: 1 + run: | + for tag in $(cat .emqx_docker_image_tags); do + CID=$(docker run -d -P $tag) + HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID) + ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT + docker rm -f $CID + done + - name: dashboard tests + working-directory: ./scripts/ui-tests + timeout-minutes: 5 + run: | + set -eu + docker compose up --abort-on-container-exit --exit-code-from selenium + docker compose rm -fsv + - name: test node_dump + run: | + CID=$(docker run -d -P $_EMQX_DOCKER_IMAGE_TAG) + docker exec -t -u root -w /root $CID bash -c 'apt-get -y update && apt-get -y install net-tools' + docker exec -t -u root $CID node_dump + docker rm -f $CID + - name: push images + if: inputs.publish || github.repository_owner != 'emqx' + run: | + for tag in $(cat .emqx_docker_image_tags); do + docker push $tag + done diff --git a/.github/workflows/build_docker_for_test.yaml b/.github/workflows/build_docker_for_test.yaml index ccff642f9..3ac122575 100644 --- a/.github/workflows/build_docker_for_test.yaml +++ b/.github/workflows/build_docker_for_test.yaml @@ -47,17 +47,17 @@ jobs: id: build run: | make ${EMQX_NAME}-docker - echo "EMQX_IMAGE_TAG=$(cat .docker_image_tag)" >> $GITHUB_ENV + echo "_EMQX_DOCKER_IMAGE_TAG=$(head -n 1 .emqx_docker_image_tags)" >> $GITHUB_ENV - name: smoke test run: | - CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG) + CID=$(docker run -d --rm -P $_EMQX_DOCKER_IMAGE_TAG) HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID) ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT docker stop $CID - name: export docker image run: | - docker save $EMQX_IMAGE_TAG | gzip > $EMQX_NAME-docker-$PKG_VSN.tar.gz - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + docker save $_EMQX_DOCKER_IMAGE_TAG | gzip > $EMQX_NAME-docker-$PKG_VSN.tar.gz + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: "${{ env.EMQX_NAME }}-docker" path: "${{ env.EMQX_NAME }}-docker-${{ env.PKG_VSN }}.tar.gz" diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 31f39d551..0298acedf 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -12,7 +12,7 @@ on: type: string publish: required: true - type: string + type: boolean otp_vsn: required: true type: string @@ -74,12 +74,12 @@ jobs: matrix: profile: - ${{ inputs.profile }} - otp: - - ${{ inputs.otp_vsn }} os: - macos-12 - macos-12-arm64 - macos-13 + otp: + - ${{ inputs.otp_vsn }} runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -95,30 +95,20 @@ jobs: apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }} apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }} apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} - - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: success() with: - name: ${{ matrix.profile }} + name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.otp }} path: _packages/${{ matrix.profile }}/ retention-days: 7 linux: - runs-on: [self-hosted, ephemeral, linux, "${{ matrix.arch }}"] - # always run in builder container because the host might have the wrong OTP version etc. - # otherwise buildx.sh does not run docker if arch and os matches the target arch and os. - container: - image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}" - + runs-on: [self-hosted, ephemeral, linux, "${{ matrix.arch == 'arm64' && 'arm64' || 'x64' }}"] strategy: fail-fast: false matrix: profile: - ${{ inputs.profile }} - otp: - - ${{ inputs.otp_vsn }} - arch: - - x64 - - arm64 os: - ubuntu22.04 - ubuntu20.04 @@ -131,70 +121,53 @@ jobs: - el7 - amzn2 - amzn2023 + arch: + - amd64 + - arm64 + with_elixir: + - 'no' + otp: + - ${{ inputs.otp_vsn }} builder: - ${{ inputs.builder_vsn }} elixir: - ${{ inputs.elixir_vsn }} - with_elixir: - - 'no' include: - profile: ${{ inputs.profile }} - otp: ${{ inputs.otp_vsn }} - arch: x64 os: ubuntu22.04 + arch: amd64 + with_elixir: 'yes' + otp: ${{ inputs.otp_vsn }} builder: ${{ inputs.builder_vsn }} elixir: ${{ inputs.elixir_vsn }} - with_elixir: 'yes' defaults: run: shell: bash steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.event.inputs.ref }} fetch-depth: 0 - - - name: fix workdir - run: | - set -eu - git config --global --add safe.directory "$GITHUB_WORKSPACE" - # Align path for CMake caches - if [ ! "$PWD" = "/emqx" ]; then - ln -s $PWD /emqx - cd /emqx - fi - echo "pwd is $PWD" - - name: build emqx packages env: PROFILE: ${{ matrix.profile }} + ARCH: ${{ matrix.arch }} + OS: ${{ matrix.os }} IS_ELIXIR: ${{ matrix.with_elixir }} - ACLOCAL_PATH: "/usr/share/aclocal:/usr/local/share/aclocal" + BUILDER: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}" + BUILDER_SYSTEM: force_docker run: | - set -eu - if [ "${IS_ELIXIR:-}" == 'yes' ]; then - make "${PROFILE}-elixir-tgz" - else - make "${PROFILE}-tgz" - make "${PROFILE}-pkg" - fi - - name: test emqx packages - env: - PROFILE: ${{ matrix.profile }} - IS_ELIXIR: ${{ matrix.with_elixir }} - run: | - set -eu - if [ "${IS_ELIXIR:-}" == 'yes' ]; then - ./scripts/pkg-tests.sh "${PROFILE}-elixir-tgz" - else - ./scripts/pkg-tests.sh "${PROFILE}-tgz" - ./scripts/pkg-tests.sh "${PROFILE}-pkg" - fi - - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + ./scripts/buildx.sh \ + --profile $PROFILE \ + --arch $ARCH \ + --builder $BUILDER \ + --elixir $IS_ELIXIR \ + --pkgtype pkg + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ matrix.profile }} + name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.with_elixir == 'yes' && '-elixir' || '' }}-${{ matrix.builder }}-${{ matrix.otp }}-${{ matrix.elixir }} path: _packages/${{ matrix.profile }}/ retention-days: 7 @@ -203,17 +176,18 @@ jobs: needs: - mac - linux - if: inputs.publish == 'true' || inputs.publish + if: inputs.publish strategy: fail-fast: false matrix: profile: - ${{ inputs.profile }} steps: - - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: - name: ${{ matrix.profile }} + pattern: "${{ matrix.profile }}-*" path: packages/${{ matrix.profile }} + merge-multiple: true - name: install dos2unix run: sudo apt-get update -y && sudo apt install -y dos2unix - name: get packages @@ -226,7 +200,7 @@ jobs: echo "$(cat $var.sha256) $var" | sha256sum -c || exit 1 done cd - - - uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/build_packages_cron.yaml b/.github/workflows/build_packages_cron.yaml index 56d5c37f2..af969e8f7 100644 --- a/.github/workflows/build_packages_cron.yaml +++ b/.github/workflows/build_packages_cron.yaml @@ -66,14 +66,14 @@ jobs: set -eu ./scripts/pkg-tests.sh "${PROFILE}-tgz" ./scripts/pkg-tests.sh "${PROFILE}-pkg" - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: success() with: name: ${{ matrix.profile[0] }}-${{ matrix.os }} path: _packages/${{ matrix.profile[0] }}/ retention-days: 7 - name: Send notification to Slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 if: failure() env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} @@ -111,14 +111,14 @@ jobs: apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }} apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }} apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: success() with: name: ${{ matrix.profile }}-${{ matrix.os }} path: _packages/${{ matrix.profile }}/ retention-days: 7 - name: Send notification to Slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 if: failure() env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/build_slim_packages.yaml b/.github/workflows/build_slim_packages.yaml index 4b9ca76b9..b3b556ba7 100644 --- a/.github/workflows/build_slim_packages.yaml +++ b/.github/workflows/build_slim_packages.yaml @@ -88,13 +88,13 @@ jobs: run: | make ${EMQX_NAME}-elixir-pkg ./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-pkg - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: "${{ matrix.profile[0] }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}-${{ matrix.profile[3] }}-${{ matrix.profile[4] }}" path: _packages/${{ matrix.profile[0] }}/* retention-days: 7 compression-level: 0 - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: "${{ matrix.profile[0] }}-schema-dump-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}-${{ matrix.profile[3] }}-${{ matrix.profile[4] }}" path: | @@ -128,7 +128,7 @@ jobs: apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }} apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }} apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: ${{ matrix.os }} path: _packages/**/* diff --git a/.github/workflows/check_deps_integrity.yaml b/.github/workflows/check_deps_integrity.yaml index 30d788500..bbbcabf61 100644 --- a/.github/workflows/check_deps_integrity.yaml +++ b/.github/workflows/check_deps_integrity.yaml @@ -36,7 +36,7 @@ jobs: MIX_ENV: emqx-enterprise PROFILE: emqx-enterprise - name: Upload produced lock files - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: failure() with: name: produced_lock_files diff --git a/.github/workflows/performance_test.yaml b/.github/workflows/performance_test.yaml index ede8abf07..537705697 100644 --- a/.github/workflows/performance_test.yaml +++ b/.github/workflows/performance_test.yaml @@ -52,7 +52,7 @@ jobs: id: package_file run: | echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: emqx-ubuntu20.04 path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }} @@ -66,7 +66,7 @@ jobs: steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} @@ -77,7 +77,7 @@ jobs: repository: emqx/tf-emqx-performance-test path: tf-emqx-performance-test ref: v0.2.3 - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: emqx-ubuntu20.04 path: tf-emqx-performance-test/ @@ -105,7 +105,7 @@ jobs: terraform destroy -auto-approve aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id . - name: Send notification to Slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 with: payload-file-path: "./tf-emqx-performance-test/slack-payload.json" - name: terraform destroy @@ -113,13 +113,13 @@ jobs: working-directory: ./tf-emqx-performance-test run: | terraform destroy -auto-approve - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: success() with: name: metrics path: | "./tf-emqx-performance-test/*.tar.gz" - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: failure() with: name: terraform @@ -137,7 +137,7 @@ jobs: steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} @@ -148,7 +148,7 @@ jobs: repository: emqx/tf-emqx-performance-test path: tf-emqx-performance-test ref: v0.2.3 - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: emqx-ubuntu20.04 path: tf-emqx-performance-test/ @@ -176,7 +176,7 @@ jobs: terraform destroy -auto-approve aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id . - name: Send notification to Slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 with: payload-file-path: "./tf-emqx-performance-test/slack-payload.json" - name: terraform destroy @@ -184,13 +184,13 @@ jobs: working-directory: ./tf-emqx-performance-test run: | terraform destroy -auto-approve - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: success() with: name: metrics path: | "./tf-emqx-performance-test/*.tar.gz" - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: failure() with: name: terraform @@ -209,7 +209,7 @@ jobs: steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} @@ -220,7 +220,7 @@ jobs: repository: emqx/tf-emqx-performance-test path: tf-emqx-performance-test ref: v0.2.3 - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: emqx-ubuntu20.04 path: tf-emqx-performance-test/ @@ -249,7 +249,7 @@ jobs: terraform destroy -auto-approve aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id . - name: Send notification to Slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 with: payload-file-path: "./tf-emqx-performance-test/slack-payload.json" - name: terraform destroy @@ -257,13 +257,13 @@ jobs: working-directory: ./tf-emqx-performance-test run: | terraform destroy -auto-approve - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: success() with: name: metrics path: | "./tf-emqx-performance-test/*.tar.gz" - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: failure() with: name: terraform @@ -283,7 +283,7 @@ jobs: steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} @@ -294,7 +294,7 @@ jobs: repository: emqx/tf-emqx-performance-test path: tf-emqx-performance-test ref: v0.2.3 - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: emqx-ubuntu20.04 path: tf-emqx-performance-test/ @@ -322,7 +322,7 @@ jobs: terraform destroy -auto-approve aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id . - name: Send notification to Slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 with: payload-file-path: "./tf-emqx-performance-test/slack-payload.json" - name: terraform destroy @@ -330,13 +330,13 @@ jobs: working-directory: ./tf-emqx-performance-test run: | terraform destroy -auto-approve - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: success() with: name: metrics path: | "./tf-emqx-performance-test/*.tar.gz" - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: failure() with: name: terraform diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 2f441af88..1bed80376 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -31,7 +31,7 @@ jobs: strategy: fail-fast: false steps: - - uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/run_conf_tests.yaml b/.github/workflows/run_conf_tests.yaml index cac63910b..e82b29d3d 100644 --- a/.github/workflows/run_conf_tests.yaml +++ b/.github/workflows/run_conf_tests.yaml @@ -25,7 +25,7 @@ jobs: - emqx - emqx-enterprise steps: - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: ${{ matrix.profile }} - name: extract artifact @@ -40,7 +40,7 @@ jobs: if: failure() run: | cat _build/${{ matrix.profile }}/rel/emqx/logs/erlang.log.* - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: failure() with: name: conftest-logs-${{ matrix.profile }} diff --git a/.github/workflows/run_docker_tests.yaml b/.github/workflows/run_docker_tests.yaml index 9315ac815..3e7b7f9f3 100644 --- a/.github/workflows/run_docker_tests.yaml +++ b/.github/workflows/run_docker_tests.yaml @@ -37,14 +37,14 @@ jobs: steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: ${{ env.EMQX_NAME }}-docker path: /tmp - name: load docker image run: | - EMQX_IMAGE_TAG=$(docker load < /tmp/${EMQX_NAME}-docker-${PKG_VSN}.tar.gz 2>/dev/null | sed 's/Loaded image: //g') - echo "EMQX_IMAGE_TAG=$EMQX_IMAGE_TAG" >> $GITHUB_ENV + _EMQX_DOCKER_IMAGE_TAG=$(docker load < /tmp/${EMQX_NAME}-docker-${PKG_VSN}.tar.gz 2>/dev/null | sed 's/Loaded image: //g') + echo "_EMQX_DOCKER_IMAGE_TAG=$_EMQX_DOCKER_IMAGE_TAG" >> $GITHUB_ENV - name: dashboard tests working-directory: ./scripts/ui-tests run: | @@ -52,7 +52,7 @@ jobs: docker compose up --abort-on-container-exit --exit-code-from selenium - name: test two nodes cluster with proto_dist=inet_tls in docker run: | - ./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG + ./scripts/test/start-two-nodes-in-docker.sh -P $_EMQX_DOCKER_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy) ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT ./scripts/test/start-two-nodes-in-docker.sh -c @@ -84,7 +84,7 @@ jobs: - rlog steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: ${{ env.EMQX_NAME }}-docker path: /tmp @@ -113,4 +113,4 @@ jobs: - name: test node_dump run: | docker exec -t -u root node1.emqx.io bash -c 'apt-get -y update && apt-get -y install net-tools' - docker exec node1.emqx.io node_dump + docker exec -t -u root node1.emqx.io node_dump diff --git a/.github/workflows/run_emqx_app_tests.yaml b/.github/workflows/run_emqx_app_tests.yaml index f7c645aeb..a1eaca14f 100644 --- a/.github/workflows/run_emqx_app_tests.yaml +++ b/.github/workflows/run_emqx_app_tests.yaml @@ -58,7 +58,7 @@ jobs: ./rebar3 eunit -v --name 'eunit@127.0.0.1' ./rebar3 as standalone_test ct --name 'test@127.0.0.1' -v --readable=true ./rebar3 proper -d test/props - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: failure() with: name: logs-emqx-app-tests diff --git a/.github/workflows/run_helm_tests.yaml b/.github/workflows/run_helm_tests.yaml index da4ff0a68..1f6bdb521 100644 --- a/.github/workflows/run_helm_tests.yaml +++ b/.github/workflows/run_helm_tests.yaml @@ -45,7 +45,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: path: source - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: "${{ env.EMQX_NAME }}-docker" path: /tmp diff --git a/.github/workflows/run_jmeter_tests.yaml b/.github/workflows/run_jmeter_tests.yaml index 86cbf220f..5919cb72d 100644 --- a/.github/workflows/run_jmeter_tests.yaml +++ b/.github/workflows/run_jmeter_tests.yaml @@ -16,7 +16,7 @@ jobs: steps: - name: Cache Jmeter id: cache-jmeter - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: /tmp/apache-jmeter.tgz key: apache-jmeter-5.4.3.tgz @@ -35,7 +35,7 @@ jobs: else wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz $ARCHIVE_URL fi - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: apache-jmeter.tgz path: /tmp/apache-jmeter.tgz @@ -86,7 +86,7 @@ jobs: echo "check logs failed" exit 1 fi - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: always() with: name: jmeter_logs-advanced_feat-${{ matrix.scripts_type }} @@ -153,7 +153,7 @@ jobs: if: failure() run: | docker compose -f .ci/docker-compose-file/docker-compose-emqx-cluster.yaml logs --no-color > ./jmeter_logs/emqx.log - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: always() with: name: jmeter_logs-pgsql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.pgsql_tag }} @@ -213,7 +213,7 @@ jobs: echo "check logs failed" exit 1 fi - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: always() with: name: jmeter_logs-mysql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.mysql_tag }} @@ -265,7 +265,7 @@ jobs: echo "check logs failed" exit 1 fi - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: always() with: name: jmeter_logs-JWT_authn-${{ matrix.scripts_type }} @@ -309,7 +309,7 @@ jobs: echo "check logs failed" exit 1 fi - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: always() with: name: jmeter_logs-built_in_database_authn_authz-${{ matrix.scripts_type }} diff --git a/.github/workflows/run_relup_tests.yaml b/.github/workflows/run_relup_tests.yaml index db8cef69d..f626f2c67 100644 --- a/.github/workflows/run_relup_tests.yaml +++ b/.github/workflows/run_relup_tests.yaml @@ -25,7 +25,7 @@ jobs: run: shell: bash steps: - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: emqx-enterprise - name: extract artifact @@ -45,7 +45,7 @@ jobs: run: | export PROFILE='emqx-enterprise' make emqx-enterprise-tgz - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 name: Upload built emqx and test scenario with: name: relup_tests_emqx_built @@ -72,7 +72,7 @@ jobs: run: shell: bash steps: - - uses: erlef/setup-beam@a34c98fd51e370b4d4981854aba1eb817ce4e483 # v1.17.0 + - uses: erlef/setup-beam@8b9cac4c04dbcd7bf8fd673e16f988225d89b09b # v1.17.2 with: otp-version: 26.2.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -88,7 +88,7 @@ jobs: ./configure make echo "$(pwd)/bin" >> $GITHUB_PATH - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 name: Download built emqx and test scenario with: name: relup_tests_emqx_built @@ -111,7 +111,7 @@ jobs: docker logs node2.emqx.io | tee lux_logs/emqx2.log exit 1 fi - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 name: Save debug data if: failure() with: diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index 8841c845b..8480d698c 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -41,7 +41,7 @@ jobs: container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04" steps: - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: ${{ matrix.profile }} - name: extract artifact @@ -64,7 +64,7 @@ jobs: CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} run: make proper - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: coverdata-${{ matrix.profile }}-${{ matrix.otp }} path: _build/test/cover @@ -83,7 +83,7 @@ jobs: shell: bash steps: - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: ${{ matrix.profile }} - name: extract artifact @@ -108,7 +108,7 @@ jobs: ENABLE_COVER_COMPILE: 1 CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }} run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }} - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: coverdata-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }} path: _build/test/cover @@ -116,7 +116,7 @@ jobs: - name: compress logs if: failure() run: tar -czf logs.tar.gz _build/test/logs - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: failure() with: name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }} @@ -138,7 +138,7 @@ jobs: shell: bash steps: - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: ${{ matrix.profile }} - name: extract artifact @@ -155,7 +155,7 @@ jobs: CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }} run: | make "${{ matrix.app }}-ct" - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: coverdata-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }} path: _build/test/cover @@ -164,7 +164,7 @@ jobs: - name: compress logs if: failure() run: tar -czf logs.tar.gz _build/test/logs - - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 if: failure() with: name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }} @@ -196,7 +196,7 @@ jobs: profile: - emqx-enterprise steps: - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: ${{ matrix.profile }} - name: extract artifact @@ -204,7 +204,7 @@ jobs: unzip -o -q ${{ matrix.profile }}.zip git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 name: download coverdata with: pattern: coverdata-${{ matrix.profile }}-* diff --git a/.github/workflows/scorecard.yaml b/.github/workflows/scorecard.yaml index 7c0bdd00a..ee69835b5 100644 --- a/.github/workflows/scorecard.yaml +++ b/.github/workflows/scorecard.yaml @@ -39,7 +39,7 @@ jobs: publish_results: true - name: "Upload artifact" - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: SARIF file path: results.sarif @@ -47,6 +47,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@8e0b1c74b1d5a0077b04d064c76ee714d3da7637 # v2.22.1 + uses: github/codeql-action/upload-sarif@7e187e1c529d80bac7b87a16e7a792427f65cf02 # v2.22.1 with: sarif_file: results.sarif diff --git a/.github/workflows/spellcheck.yaml b/.github/workflows/spellcheck.yaml index 0517cad41..118ceb2dc 100644 --- a/.github/workflows/spellcheck.yaml +++ b/.github/workflows/spellcheck.yaml @@ -19,7 +19,7 @@ jobs: - emqx-enterprise runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }} steps: - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: pattern: "${{ matrix.profile }}-schema-dump-*-x64" merge-multiple: true diff --git a/.github/workflows/static_checks.yaml b/.github/workflows/static_checks.yaml index a092210c8..6168a393b 100644 --- a/.github/workflows/static_checks.yaml +++ b/.github/workflows/static_checks.yaml @@ -30,14 +30,14 @@ jobs: include: ${{ fromJson(inputs.ct-matrix) }} container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04" steps: - - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + - uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: ${{ matrix.profile }} - name: extract artifact run: | unzip -o -q ${{ matrix.profile }}.zip git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: "emqx_dialyzer_${{ matrix.otp }}_plt" key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }}-${{ hashFiles('rebar.*', 'apps/*/rebar.*') }} diff --git a/.github/workflows/upload-helm-charts.yaml b/.github/workflows/upload-helm-charts.yaml index 1125be3a4..378eaca15 100644 --- a/.github/workflows/upload-helm-charts.yaml +++ b/.github/workflows/upload-helm-charts.yaml @@ -18,7 +18,7 @@ jobs: strategy: fail-fast: false steps: - - uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} diff --git a/.gitignore b/.gitignore index 0a76c3807..7068c1c7d 100644 --- a/.gitignore +++ b/.gitignore @@ -73,3 +73,4 @@ apps/emqx_conf/etc/emqx.conf.all.rendered* rebar-git-cache.tar # build docker image locally .docker_image_tag +.git/ diff --git a/Makefile b/Makefile index a778d63b8..fbf68519e 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ endif # Dashboard version # from https://github.com/emqx/emqx-dashboard5 export EMQX_DASHBOARD_VERSION ?= v1.7.0 -export EMQX_EE_DASHBOARD_VERSION ?= e1.5.0 +export EMQX_EE_DASHBOARD_VERSION ?= e1.5.1-s3-beta.1 PROFILE ?= emqx REL_PROFILES := emqx emqx-enterprise @@ -316,10 +316,9 @@ $(foreach tt,$(ALL_ELIXIR_TGZS),$(eval $(call gen-elixir-tgz-target,$(tt)))) .PHONY: fmt fmt: $(REBAR) @$(SCRIPTS)/erlfmt -w 'apps/*/{src,include,priv,test,integration_test}/**/*.{erl,hrl,app.src,eterm}' - @$(SCRIPTS)/erlfmt -w '**/*.escript' --exclude-files '_build/**' - @$(SCRIPTS)/erlfmt -w '**/rebar.config' --exclude-files '_build/**' - @$(SCRIPTS)/erlfmt -w 'rebar.config.erl' - @$(SCRIPTS)/erlfmt -w 'bin/nodetool' + @$(SCRIPTS)/erlfmt -w 'apps/*/rebar.config' 'apps/emqx/rebar.config.script' '.ci/fvt_tests/http_server/rebar.config' + @$(SCRIPTS)/erlfmt -w 'rebar.config' 'rebar.config.erl' + @$(SCRIPTS)/erlfmt -w 'scripts/*.escript' 'bin/*.escript' 'bin/nodetool' @mix format .PHONY: clean-test-cluster-config diff --git a/README-CN.md b/README-CN.md index 84a72912d..c2c5e80f4 100644 --- a/README-CN.md +++ b/README-CN.md @@ -4,6 +4,7 @@ [![Build Status](https://github.com/emqx/emqx/actions/workflows/_push-entrypoint.yaml/badge.svg)](https://github.com/emqx/emqx/actions/workflows/_push-entrypoint.yaml) [![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master) [![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/emqx/emqx/badge)](https://securityscorecards.dev/viewer/?uri=github.com/emqx/emqx) [![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/) [![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES) [![Twitter](https://img.shields.io/badge/Twitter-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech) diff --git a/README-RU.md b/README-RU.md index 9f8347e2b..45bf08102 100644 --- a/README-RU.md +++ b/README-RU.md @@ -4,6 +4,7 @@ [![Build Status](https://github.com/emqx/emqx/actions/workflows/_push-entrypoint.yaml/badge.svg)](https://github.com/emqx/emqx/actions/workflows/_push-entrypoint.yaml) [![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master) [![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/emqx/emqx/badge)](https://securityscorecards.dev/viewer/?uri=github.com/emqx/emqx) [![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/) [![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES) [![Twitter](https://img.shields.io/badge/Follow-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech) diff --git a/README.md b/README.md index 622cbfc99..ad710b5e6 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ [![Build Status](https://github.com/emqx/emqx/actions/workflows/_push-entrypoint.yaml/badge.svg)](https://github.com/emqx/emqx/actions/workflows/_push-entrypoint.yaml) [![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master) [![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/emqx/emqx/badge)](https://securityscorecards.dev/viewer/?uri=github.com/emqx/emqx) [![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/) [![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES) [![Twitter](https://img.shields.io/badge/Follow-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech) diff --git a/Windows.md b/Windows.md deleted file mode 100644 index a3e5deb11..000000000 --- a/Windows.md +++ /dev/null @@ -1,131 +0,0 @@ -# Build and run EMQX on Windows - -NOTE: The instructions and examples are based on Windows 10. - -## Build Environment - -### Visual studio for C/C++ compile and link - -EMQX includes Erlang NIF (Native Implemented Function) components, implemented -in C/C++. To compile and link C/C++ libraries, the easiest way is perhaps to -install Visual Studio. - -Visual Studio 2019 is used in our tests. -If you are like me (@zmstone), do not know where to start, -please follow this OTP guide: -https://github.com/erlang/otp/blob/master/HOWTO/INSTALL-WIN32.md - -NOTE: To avoid surprises, you may need to add below two paths to `Path` environment variable -and order them before other paths. - -``` -C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.28.29910\bin\Hostx64\x64 -C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build -``` - -Depending on your visual studio version and OS, the paths may differ. -The first path is for rebar3 port compiler to find `cl.exe` and `link.exe` -The second path is for CMD to setup environment variables. - -### Erlang/OTP - -Install Erlang/OTP 24 from https://www.erlang.org/downloads -You may need to edit the `Path` environment variable to allow running -Erlang commands such as `erl` from powershell. - -To validate Erlang installation in CMD or powershell: - -* Start (or restart) CMD or powershell - -* Execute `erl` command to enter Erlang shell - -* Evaluate Erlang expression `halt().` to exit Erlang shell. - -e.g. - -``` -PS C:\Users\zmsto> erl -Eshell V12.2.1 (abort with ^G) -1> halt(). -``` - -### bash - -All EMQX build/run scripts are either in `bash` or `escript`. -`escript` is installed as a part of Erlang. To install a `bash` -environment in Windows, there are quite a few options. - -Cygwin is what we tested with. - -* Add `cygwin\bin` dir to `Path` environment variable - To do so, search for Edit environment variable in control panel and - add `C:\tools\cygwin\bin` (depending on the location where it was installed) - to `Path` list. - -* Validate installation. - Start (restart) CMD or powershell console and execute `which bash`, it should - print out `/usr/bin/bash` - -NOTE: Make sure cygwin's bin dir is added before `C:\Windows\system32` in `Path`, -otherwise the build scripts may end up using binaries from wsl instead of cygwin. - -### Other tools - -Some of the unix world tools are required to build EMQX. Including: - -* git -* curl -* make -* cmake -* jq -* zip / unzip - -We recommend using [scoop](https://scoop.sh/), or [Chocolatey](https://chocolatey.org/install) to install the tools. - -When using scoop: - -``` -scoop install git curl make cmake jq zip unzip -``` - -## Build EMQX source code - -* Clone the repo: `git clone https://github.com/emqx/emqx.git` - -* Start CMD console - -* Execute `vcvarsall.bat x86_amd64` to load environment variables - -* Change to emqx directory and execute `make` - -### Possible errors - -* `'cl.exe' is not recognized as an internal or external command` - This error is likely due to Visual Studio executables are not set in `Path` environment variable. - To fix it, either add path like `C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.28.29910\bin\Hostx64\x64` - to `Paht`. Or make sure `vcvarsall.bat x86_amd64` is executed prior to the `make` command - -* `fatal error C1083: Cannot open include file: 'assert.h': No such file or directory` - If Visual Studio is installed correctly, this is likely `LIB` and `LIB_PATH` environment - variables are not set. Make sure `vcvarsall.bat x86_amd64` is executed prior to the `make` command - -* `link: extra operand 'some.obj'` - This is likely due to the usage of GNU `lnik.exe` but not the one from Visual Studio. - Execute `link.exe --version` to inspect which one is in use. The one installed from - Visual Studio should print out `Microsoft (R) Incremental Linker`. - To fix it, Visual Studio's bin paths should be ordered prior to Cygwin's (or similar installation's) - bin paths in `Path` environment variable. - -## Run EMQX - -To start EMQX broker. - -Execute `_build\emqx\rel\emqx>.\bin\emqx console` or `_build\emqx\rel\emqx>.\bin\emqx start` to start EMQX. - -Then execute `_build\emqx\rel\emqx>.\bin\emqx_ctl status` to check status. -If everything works fine, it should print out - -``` -Node 'emqx@127.0.0.1' 4.3-beta.1 is started -Application emqx 4.3.0 is running -``` diff --git a/apps/emqx/include/emqx.hrl b/apps/emqx/include/emqx.hrl index 654d96d8c..13b3373f1 100644 --- a/apps/emqx/include/emqx.hrl +++ b/apps/emqx/include/emqx.hrl @@ -88,10 +88,7 @@ %%-------------------------------------------------------------------- -record(banned, { - who :: - {clientid, binary()} - | {peerhost, inet:ip_address()} - | {username, binary()}, + who :: emqx_types:banned_who(), by :: binary(), reason :: binary(), at :: integer(), diff --git a/apps/emqx/include/emqx_cm.hrl b/apps/emqx/include/emqx_cm.hrl index ae70f131f..a84a06688 100644 --- a/apps/emqx/include/emqx_cm.hrl +++ b/apps/emqx/include/emqx_cm.hrl @@ -23,11 +23,20 @@ -define(CHAN_INFO_TAB, emqx_channel_info). -define(CHAN_LIVE_TAB, emqx_channel_live). -%% Mria/Mnesia Tables for channel management. +%% Mria table for session registration. -define(CHAN_REG_TAB, emqx_channel_registry). -define(T_KICK, 5_000). -define(T_GET_INFO, 5_000). -define(T_TAKEOVER, 15_000). +-define(CM_POOL, emqx_cm_pool). + +%% Registered sessions. +-record(channel, { + chid :: emqx_types:clientid() | '_', + %% pid field is extended in 5.6.0 to support recording unregistration timestamp. + pid :: pid() | non_neg_integer() | '$1' +}). + -endif. diff --git a/apps/emqx/include/http_api.hrl b/apps/emqx/include/http_api.hrl index 0f6372584..f0c5611e9 100644 --- a/apps/emqx/include/http_api.hrl +++ b/apps/emqx/include/http_api.hrl @@ -17,6 +17,7 @@ %% HTTP API Auth -define(BAD_USERNAME_OR_PWD, 'BAD_USERNAME_OR_PWD'). -define(BAD_API_KEY_OR_SECRET, 'BAD_API_KEY_OR_SECRET'). +-define(API_KEY_NOT_ALLOW, 'API_KEY_NOT_ALLOW'). -define(API_KEY_NOT_ALLOW_MSG, <<"This API Key don't have permission to access this resource">>). %% Bad Request diff --git a/apps/emqx/include/logger.hrl b/apps/emqx/include/logger.hrl index a40f9dc9c..f39c88441 100644 --- a/apps/emqx/include/logger.hrl +++ b/apps/emqx/include/logger.hrl @@ -40,6 +40,21 @@ end ). +%% NOTE: do not forget to use atom for msg and add every used msg to +%% the default value of `log.thorttling.msgs` list. +-define(SLOG_THROTTLE(Level, Data), + ?SLOG_THROTTLE(Level, Data, #{}) +). + +-define(SLOG_THROTTLE(Level, Data, Meta), + case emqx_log_throttler:allow(Level, maps:get(msg, Data)) of + true -> + ?SLOG(Level, Data, Meta); + false -> + ok + end +). + -define(AUDIT_HANDLER, emqx_audit). -define(TRACE_FILTER, emqx_trace_filter). -define(OWN_KEYS, [level, filters, filter_default, handlers]). diff --git a/apps/emqx/include/types.hrl b/apps/emqx/include/types.hrl index ec56a9300..75750138f 100644 --- a/apps/emqx/include/types.hrl +++ b/apps/emqx/include/types.hrl @@ -14,7 +14,7 @@ %% limitations under the License. %%-------------------------------------------------------------------- --type maybe(T) :: undefined | T. +-type option(T) :: undefined | T. -type startlink_ret() :: {ok, pid()} | ignore | {error, term()}. diff --git a/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl b/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl index b5beb9ae7..07a41f167 100644 --- a/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl +++ b/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- -module(emqx_persistent_session_ds_SUITE). @@ -18,6 +18,9 @@ %% CT boilerplate %%------------------------------------------------------------------------------ +suite() -> + [{timetrap, {seconds, 60}}]. + all() -> emqx_common_test_helpers:all(?MODULE). @@ -51,12 +54,12 @@ init_per_testcase(TestCase, Config) when init_per_testcase(t_session_gc = TestCase, Config) -> Opts = #{ n => 3, - roles => [core, core, replicant], + roles => [core, core, core], extra_emqx_conf => "\n session_persistence {" "\n last_alive_update_interval = 500ms " - "\n session_gc_interval = 2s " - "\n session_gc_batch_size = 1 " + "\n session_gc_interval = 1s " + "\n session_gc_batch_size = 2 " "\n }" }, Cluster = cluster(Opts), @@ -88,7 +91,7 @@ end_per_testcase(_TestCase, _Config) -> ok. %%------------------------------------------------------------------------------ -%% Helper fns +%% Helper functions %%------------------------------------------------------------------------------ cluster(#{n := N} = Opts) -> @@ -144,9 +147,10 @@ start_client(Opts0 = #{}) -> proto_ver => v5, properties => #{'Session-Expiry-Interval' => 300} }, - Opts = maps:to_list(emqx_utils_maps:deep_merge(Defaults, Opts0)), - ct:pal("starting client with opts:\n ~p", [Opts]), - {ok, Client} = emqtt:start_link(Opts), + Opts = emqx_utils_maps:deep_merge(Defaults, Opts0), + ?tp(notice, "starting client", Opts), + {ok, Client} = emqtt:start_link(maps:to_list(Opts)), + unlink(Client), on_exit(fun() -> catch emqtt:stop(Client) end), Client. @@ -161,58 +165,27 @@ is_persistent_connect_opts(#{properties := #{'Session-Expiry-Interval' := EI}}) EI > 0. list_all_sessions(Node) -> - erpc:call(Node, emqx_persistent_session_ds, list_all_sessions, []). + erpc:call(Node, emqx_persistent_session_ds_state, list_sessions, []). list_all_subscriptions(Node) -> - erpc:call(Node, emqx_persistent_session_ds, list_all_subscriptions, []). + Sessions = list_all_sessions(Node), + lists:flatmap( + fun(ClientId) -> + #{s := #{subscriptions := Subs}} = erpc:call( + Node, emqx_persistent_session_ds, print_session, [ClientId] + ), + maps:to_list(Subs) + end, + Sessions + ). list_all_pubranges(Node) -> erpc:call(Node, emqx_persistent_session_ds, list_all_pubranges, []). -prop_only_cores_run_gc(CoreNodes) -> - {"only core nodes run gc", fun(Trace) -> ?MODULE:prop_only_cores_run_gc(Trace, CoreNodes) end}. -prop_only_cores_run_gc(Trace, CoreNodes) -> - GCNodes = lists:usort([ - N - || #{ - ?snk_kind := K, - ?snk_meta := #{node := N} - } <- Trace, - lists:member(K, [ds_session_gc, ds_session_gc_lock_taken]), - N =/= node() - ]), - ?assertEqual(lists:usort(CoreNodes), GCNodes). - %%------------------------------------------------------------------------------ %% Testcases %%------------------------------------------------------------------------------ -t_non_persistent_session_subscription(_Config) -> - ClientId = atom_to_binary(?FUNCTION_NAME), - SubTopicFilter = <<"t/#">>, - ?check_trace( - begin - ?tp(notice, "starting", #{}), - Client = start_client(#{ - clientid => ClientId, - properties => #{'Session-Expiry-Interval' => 0} - }), - {ok, _} = emqtt:connect(Client), - ?tp(notice, "subscribing", #{}), - {ok, _, [?RC_GRANTED_QOS_2]} = emqtt:subscribe(Client, SubTopicFilter, qos2), - - ok = emqtt:stop(Client), - - ok - end, - fun(Trace) -> - ct:pal("trace:\n ~p", [Trace]), - ?assertEqual([], ?of_kind(ds_session_subscription_added, Trace)), - ok - end - ), - ok. - t_session_subscription_idempotency(Config) -> [Node1Spec | _] = ?config(node_specs, Config), [Node1] = ?config(nodes, Config), @@ -220,6 +193,7 @@ t_session_subscription_idempotency(Config) -> SubTopicFilter = <<"t/+">>, ClientId = <<"myclientid">>, ?check_trace( + #{timetrap => 30_000}, begin ?force_ordering( #{?snk_kind := persistent_session_ds_subscription_added}, @@ -281,11 +255,11 @@ t_session_unsubscription_idempotency(Config) -> SubTopicFilter = <<"t/+">>, ClientId = <<"myclientid">>, ?check_trace( + #{timetrap => 30_000}, begin ?force_ordering( #{ - ?snk_kind := persistent_session_ds_subscription_delete, - ?snk_span := {complete, _} + ?snk_kind := persistent_session_ds_subscription_delete }, _NEvents0 = 1, #{?snk_kind := will_restart_node}, @@ -385,6 +359,7 @@ do_t_session_discard(Params) -> ReconnectOpts = ReconnectOpts0#{clientid => ClientId}, SubTopicFilter = <<"t/+">>, ?check_trace( + #{timetrap => 30_000}, begin ?tp(notice, "starting", #{}), Client0 = start_client(#{ @@ -402,27 +377,26 @@ do_t_session_discard(Params) -> ?retry( _Sleep0 = 100, _Attempts0 = 50, - true = map_size(emqx_persistent_session_ds:list_all_streams()) > 0 + #{} = emqx_persistent_session_ds_state:print_session(ClientId) ), ok = emqtt:stop(Client0), ?tp(notice, "disconnected", #{}), ?tp(notice, "reconnecting", #{}), - %% we still have streams - ?assert(map_size(emqx_persistent_session_ds:list_all_streams()) > 0), + %% we still have the session: + ?assertMatch(#{}, emqx_persistent_session_ds_state:print_session(ClientId)), Client1 = start_client(ReconnectOpts), {ok, _} = emqtt:connect(Client1), ?assertEqual([], emqtt:subscriptions(Client1)), case is_persistent_connect_opts(ReconnectOpts) of true -> - ?assertMatch(#{ClientId := _}, emqx_persistent_session_ds:list_all_sessions()); + ?assertMatch(#{}, emqx_persistent_session_ds_state:print_session(ClientId)); false -> - ?assertEqual(#{}, emqx_persistent_session_ds:list_all_sessions()) + ?assertEqual( + undefined, emqx_persistent_session_ds_state:print_session(ClientId) + ) end, - ?assertEqual(#{}, emqx_persistent_session_ds:list_all_subscriptions()), ?assertEqual([], emqx_persistent_session_ds_router:topics()), - ?assertEqual(#{}, emqx_persistent_session_ds:list_all_streams()), - ?assertEqual(#{}, emqx_persistent_session_ds:list_all_pubranges()), ok = emqtt:stop(Client1), ?tp(notice, "disconnected", #{}), @@ -436,6 +410,8 @@ do_t_session_discard(Params) -> ok. t_session_expiration1(Config) -> + %% This testcase verifies that the properties passed in the + %% CONNECT packet are respected by the GC process: ClientId = atom_to_binary(?FUNCTION_NAME), Opts = #{ clientid => ClientId, @@ -448,6 +424,9 @@ t_session_expiration1(Config) -> do_t_session_expiration(Config, Opts). t_session_expiration2(Config) -> + %% This testcase updates the expiry interval for the session in + %% the _DISCONNECT_ packet. This setting should be respected by GC + %% process: ClientId = atom_to_binary(?FUNCTION_NAME), Opts = #{ clientid => ClientId, @@ -462,6 +441,8 @@ t_session_expiration2(Config) -> do_t_session_expiration(Config, Opts). do_t_session_expiration(_Config, Opts) -> + %% Sequence is a list of pairs of properties passed through the + %% CONNECT and for the DISCONNECT for each session: #{ clientid := ClientId, sequence := [ @@ -472,13 +453,14 @@ do_t_session_expiration(_Config, Opts) -> } = Opts, CommonParams = #{proto_ver => v5, clientid => ClientId}, ?check_trace( + #{timetrap => 30_000}, begin Topic = <<"some/topic">>, Params0 = maps:merge(CommonParams, FirstConn), Client0 = start_client(Params0), {ok, _} = emqtt:connect(Client0), {ok, _, [?RC_GRANTED_QOS_2]} = emqtt:subscribe(Client0, Topic, ?QOS_2), - Subs0 = emqx_persistent_session_ds:list_all_subscriptions(), + #{s := #{subscriptions := Subs0}} = emqx_persistent_session_ds:print_session(ClientId), ?assertEqual(1, map_size(Subs0), #{subs => Subs0}), Info0 = maps:from_list(emqtt:info(Client0)), ?assertEqual(0, maps:get(session_present, Info0), #{info => Info0}), @@ -493,7 +475,7 @@ do_t_session_expiration(_Config, Opts) -> ?assertEqual([], Subs1), emqtt:disconnect(Client1, ?RC_NORMAL_DISCONNECTION, SecondDisconn), - ct:sleep(1_500), + ct:sleep(2_500), Params2 = maps:merge(CommonParams, ThirdConn), Client2 = start_client(Params2), @@ -505,9 +487,9 @@ do_t_session_expiration(_Config, Opts) -> emqtt:publish(Client2, Topic, <<"payload">>), ?assertNotReceive({publish, #{topic := Topic}}), %% ensure subscriptions are absent from table. - ?assertEqual(#{}, emqx_persistent_session_ds:list_all_subscriptions()), + #{s := #{subscriptions := Subs3}} = emqx_persistent_session_ds:print_session(ClientId), + ?assertEqual([], maps:to_list(Subs3)), emqtt:disconnect(Client2, ?RC_NORMAL_DISCONNECTION, ThirdDisconn), - ok end, [] @@ -515,14 +497,13 @@ do_t_session_expiration(_Config, Opts) -> ok. t_session_gc(Config) -> - GCInterval = ?config(gc_interval, Config), [Node1, Node2, _Node3] = Nodes = ?config(nodes, Config), - CoreNodes = [Node1, Node2], [ Port1, Port2, Port3 ] = lists:map(fun(N) -> get_mqtt_port(N, tcp) end, Nodes), + ct:pal("Ports: ~p", [[Port1, Port2, Port3]]), CommonParams = #{ clean_start => false, proto_ver => v5 @@ -539,15 +520,16 @@ t_session_gc(Config) -> end, ?check_trace( + #{timetrap => 30_000}, begin - ClientId0 = <<"session_gc0">>, - Client0 = StartClient(ClientId0, Port1, 30), - ClientId1 = <<"session_gc1">>, - Client1 = StartClient(ClientId1, Port2, 1), + Client1 = StartClient(ClientId1, Port1, 30), ClientId2 = <<"session_gc2">>, - Client2 = StartClient(ClientId2, Port3, 1), + Client2 = StartClient(ClientId2, Port2, 1), + + ClientId3 = <<"session_gc3">>, + Client3 = StartClient(ClientId3, Port3, 1), lists:foreach( fun(Client) -> @@ -557,55 +539,48 @@ t_session_gc(Config) -> {ok, _} = emqtt:publish(Client, Topic, Payload, ?QOS_1), ok end, - [Client0, Client1, Client2] + [Client1, Client2, Client3] ), %% Clients are still alive; no session is garbage collected. - Res0 = ?block_until( - #{ - ?snk_kind := ds_session_gc, - ?snk_span := {complete, _}, - ?snk_meta := #{node := N} - } when - N =/= node(), - 3 * GCInterval + 1_000 - ), - ?assertMatch({ok, _}, Res0), - {ok, #{?snk_meta := #{time := T0}}} = Res0, - Sessions0 = list_all_sessions(Node1), - Subs0 = list_all_subscriptions(Node1), - ?assertEqual(3, map_size(Sessions0), #{sessions => Sessions0}), - ?assertEqual(3, map_size(Subs0), #{subs => Subs0}), - - %% Now we disconnect 2 of them; only those should be GC'ed. ?assertMatch( - {ok, {ok, _}}, - ?wait_async_action( - emqtt:stop(Client1), - #{?snk_kind := terminate}, - 1_000 + {ok, _}, + ?block_until( + #{ + ?snk_kind := ds_session_gc, + ?snk_span := {complete, _}, + ?snk_meta := #{node := N} + } when N =/= node() ) ), - ct:pal("disconnected client1"), + ?assertMatch([_, _, _], list_all_sessions(Node1), sessions), + ?assertMatch([_, _, _], list_all_subscriptions(Node1), subscriptions), + + %% Now we disconnect 2 of them; only those should be GC'ed. + ?assertMatch( {ok, {ok, _}}, ?wait_async_action( emqtt:stop(Client2), - #{?snk_kind := terminate}, - 1_000 + #{?snk_kind := terminate} ) ), - ct:pal("disconnected client2"), + ?tp(notice, "disconnected client1", #{}), + ?assertMatch( + {ok, {ok, _}}, + ?wait_async_action( + emqtt:stop(Client3), + #{?snk_kind := terminate} + ) + ), + ?tp(notice, "disconnected client2", #{}), ?assertMatch( {ok, _}, ?block_until( #{ ?snk_kind := ds_session_gc_cleaned, - ?snk_meta := #{node := N, time := T}, - session_ids := [ClientId1] - } when - N =/= node() andalso T > T0, - 4 * GCInterval + 1_000 + session_id := ClientId2 + } ) ), ?assertMatch( @@ -613,22 +588,14 @@ t_session_gc(Config) -> ?block_until( #{ ?snk_kind := ds_session_gc_cleaned, - ?snk_meta := #{node := N, time := T}, - session_ids := [ClientId2] - } when - N =/= node() andalso T > T0, - 4 * GCInterval + 1_000 + session_id := ClientId3 + } ) ), - Sessions1 = list_all_sessions(Node1), - Subs1 = list_all_subscriptions(Node1), - ?assertEqual(1, map_size(Sessions1), #{sessions => Sessions1}), - ?assertEqual(1, map_size(Subs1), #{subs => Subs1}), - + ?retry(50, 3, [ClientId1] = list_all_sessions(Node1)), + ?assertMatch([_], list_all_subscriptions(Node1), subscriptions), ok end, - [ - prop_only_cores_run_gc(CoreNodes) - ] + [] ), ok. diff --git a/apps/emqx/priv/bpapi.versions b/apps/emqx/priv/bpapi.versions index 859d7fbe0..9497f04cd 100644 --- a/apps/emqx/priv/bpapi.versions +++ b/apps/emqx/priv/bpapi.versions @@ -22,6 +22,8 @@ {emqx_delayed,3}. {emqx_ds,1}. {emqx_ds,2}. +{emqx_ds,3}. +{emqx_ds,4}. {emqx_eviction_agent,1}. {emqx_eviction_agent,2}. {emqx_exhook,1}. diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index bfd981854..9953dd3fc 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -28,9 +28,9 @@ {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.1"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.18.3"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.18.4"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}}, - {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.40.4"}}}, + {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.41.0"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, @@ -71,7 +71,7 @@ {statistics, true} ]}. -{project_plugins, [erlfmt]}. +{project_plugins, [{erlfmt, "1.3.0"}]}. {erlfmt, [ {files, [ diff --git a/apps/emqx/src/emqx_access_control.erl b/apps/emqx/src/emqx_access_control.erl index b786e2c18..d97dbd167 100644 --- a/apps/emqx/src/emqx_access_control.erl +++ b/apps/emqx/src/emqx_access_control.erl @@ -183,8 +183,13 @@ log_result(#{username := Username}, Topic, Action, From, Result) -> } end, case Result of - allow -> ?SLOG(info, (LogMeta())#{msg => "authorization_permission_allowed"}); - deny -> ?SLOG(warning, (LogMeta())#{msg => "authorization_permission_denied"}) + allow -> + ?SLOG(info, (LogMeta())#{msg => "authorization_permission_allowed"}); + deny -> + ?SLOG_THROTTLE( + warning, + (LogMeta())#{msg => authorization_permission_denied} + ) end. %% @private Format authorization rules source. diff --git a/apps/emqx/src/emqx_alarm.erl b/apps/emqx/src/emqx_alarm.erl index 8c0c35334..330e2e917 100644 --- a/apps/emqx/src/emqx_alarm.erl +++ b/apps/emqx/src/emqx_alarm.erl @@ -21,12 +21,9 @@ -include("emqx.hrl"). -include("logger.hrl"). -%% Mnesia bootstrap --export([mnesia/1]). - --boot_mnesia({mnesia, [boot]}). - +-export([create_tables/0]). -export([start_link/0]). + %% API -export([ activate/1, @@ -86,7 +83,7 @@ %% Mnesia bootstrap %%-------------------------------------------------------------------- -mnesia(boot) -> +create_tables() -> ok = mria:create_table( ?ACTIVATED_ALARM, [ @@ -106,7 +103,8 @@ mnesia(boot) -> {record_name, deactivated_alarm}, {attributes, record_info(fields, deactivated_alarm)} ] - ). + ), + [?ACTIVATED_ALARM, ?DEACTIVATED_ALARM]. %%-------------------------------------------------------------------- %% API diff --git a/apps/emqx/src/emqx_banned.erl b/apps/emqx/src/emqx_banned.erl index 1568bf103..db6d63cc7 100644 --- a/apps/emqx/src/emqx_banned.erl +++ b/apps/emqx/src/emqx_banned.erl @@ -25,9 +25,7 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). %% Mnesia bootstrap --export([mnesia/1]). - --boot_mnesia({mnesia, [boot]}). +-export([create_tables/0]). -export([start_link/0, stop/0]). @@ -39,7 +37,9 @@ info/1, format/1, parse/1, - clear/0 + clear/0, + who/2, + tables/0 ]). %% gen_server callbacks @@ -61,7 +61,8 @@ -elvis([{elvis_style, state_record_and_type, disable}]). --define(BANNED_TAB, ?MODULE). +-define(BANNED_INDIVIDUAL_TAB, ?MODULE). +-define(BANNED_RULE_TAB, emqx_banned_rules). %% The default expiration time should be infinite %% but for compatibility, a large number (1 years) is used here to represent the 'infinite' @@ -76,20 +77,26 @@ %% Mnesia bootstrap %%-------------------------------------------------------------------- -mnesia(boot) -> - ok = mria:create_table(?BANNED_TAB, [ +create_tables() -> + Options = [ {type, set}, {rlog_shard, ?COMMON_SHARD}, {storage, disc_copies}, {record_name, banned}, {attributes, record_info(fields, banned)}, {storage_properties, [{ets, [{read_concurrency, true}]}]} - ]). + ], + ok = mria:create_table(?BANNED_INDIVIDUAL_TAB, Options), + ok = mria:create_table(?BANNED_RULE_TAB, Options), + [?BANNED_INDIVIDUAL_TAB, ?BANNED_RULE_TAB]. %%-------------------------------------------------------------------- %% Data backup %%-------------------------------------------------------------------- -backup_tables() -> [?BANNED_TAB]. +backup_tables() -> tables(). + +-spec tables() -> [atom()]. +tables() -> [?BANNED_RULE_TAB, ?BANNED_INDIVIDUAL_TAB]. %% @doc Start the banned server. -spec start_link() -> startlink_ret(). @@ -104,16 +111,10 @@ stop() -> gen_server:stop(?MODULE). check(ClientInfo) -> do_check({clientid, maps:get(clientid, ClientInfo, undefined)}) orelse do_check({username, maps:get(username, ClientInfo, undefined)}) orelse - do_check({peerhost, maps:get(peerhost, ClientInfo, undefined)}). - -do_check({_, undefined}) -> - false; -do_check(Who) when is_tuple(Who) -> - case mnesia:dirty_read(?BANNED_TAB, Who) of - [] -> false; - [#banned{until = Until}] -> Until > erlang:system_time(second) - end. + do_check({peerhost, maps:get(peerhost, ClientInfo, undefined)}) orelse + do_check_rules(ClientInfo). +-spec format(emqx_types:banned()) -> map(). format(#banned{ who = Who0, by = By, @@ -121,7 +122,7 @@ format(#banned{ at = At, until = Until }) -> - {As, Who} = maybe_format_host(Who0), + {As, Who} = format_who(Who0), #{ as => As, who => Who, @@ -131,6 +132,7 @@ format(#banned{ until => to_rfc3339(Until) }. +-spec parse(map()) -> emqx_types:banned() | {error, term()}. parse(Params) -> case parse_who(Params) of {error, Reason} -> @@ -155,24 +157,6 @@ parse(Params) -> {error, ErrorReason} end end. -parse_who(#{as := As, who := Who}) -> - parse_who(#{<<"as">> => As, <<"who">> => Who}); -parse_who(#{<<"as">> := peerhost, <<"who">> := Peerhost0}) -> - case inet:parse_address(binary_to_list(Peerhost0)) of - {ok, Peerhost} -> {peerhost, Peerhost}; - {error, einval} -> {error, "bad peerhost"} - end; -parse_who(#{<<"as">> := As, <<"who">> := Who}) -> - {As, Who}. - -maybe_format_host({peerhost, Host}) -> - AddrBinary = list_to_binary(inet:ntoa(Host)), - {peerhost, AddrBinary}; -maybe_format_host({As, Who}) -> - {As, Who}. - -to_rfc3339(Timestamp) -> - emqx_utils_calendar:epoch_to_rfc3339(Timestamp, second). -spec create(emqx_types:banned() | map()) -> {ok, emqx_types:banned()} | {error, {already_exist, emqx_types:banned()}}. @@ -194,7 +178,7 @@ create(#{ create(Banned = #banned{who = Who}) -> case look_up(Who) of [] -> - insert_banned(Banned), + insert_banned(table(Who), Banned), {ok, Banned}; [OldBanned = #banned{until = Until}] -> %% Don't support shorten or extend the until time by overwrite. @@ -204,33 +188,52 @@ create(Banned = #banned{who = Who}) -> {error, {already_exist, OldBanned}}; %% overwrite expired one is ok. false -> - insert_banned(Banned), + insert_banned(table(Who), Banned), {ok, Banned} end end. +-spec look_up(emqx_types:banned_who() | map()) -> [emqx_types:banned()]. look_up(Who) when is_map(Who) -> look_up(parse_who(Who)); look_up(Who) -> - mnesia:dirty_read(?BANNED_TAB, Who). + mnesia:dirty_read(table(Who), Who). --spec delete( - {clientid, emqx_types:clientid()} - | {username, emqx_types:username()} - | {peerhost, emqx_types:peerhost()} -) -> ok. +-spec delete(map() | emqx_types:banned_who()) -> ok. delete(Who) when is_map(Who) -> delete(parse_who(Who)); delete(Who) -> - mria:dirty_delete(?BANNED_TAB, Who). + mria:dirty_delete(table(Who), Who). -info(InfoKey) -> - mnesia:table_info(?BANNED_TAB, InfoKey). +-spec info(size) -> non_neg_integer(). +info(size) -> + mnesia:table_info(?BANNED_INDIVIDUAL_TAB, size) + mnesia:table_info(?BANNED_RULE_TAB, size). +-spec clear() -> ok. clear() -> - _ = mria:clear_table(?BANNED_TAB), + _ = mria:clear_table(?BANNED_INDIVIDUAL_TAB), + _ = mria:clear_table(?BANNED_RULE_TAB), ok. +%% Creating banned with `#banned{}` records is exposed as a public API +%% so we need helpers to create the `who` field of `#banned{}` records +-spec who(atom(), binary() | inet:ip_address() | esockd_cidr:cidr()) -> emqx_types:banned_who(). +who(clientid, ClientId) when is_binary(ClientId) -> {clientid, ClientId}; +who(username, Username) when is_binary(Username) -> {username, Username}; +who(peerhost, Peerhost) when is_tuple(Peerhost) -> {peerhost, Peerhost}; +who(peerhost, Peerhost) when is_binary(Peerhost) -> + {ok, Addr} = inet:parse_address(binary_to_list(Peerhost)), + {peerhost, Addr}; +who(clientid_re, RE) when is_binary(RE) -> + {ok, RECompiled} = re:compile(RE), + {clientid_re, {RECompiled, RE}}; +who(username_re, RE) when is_binary(RE) -> + {ok, RECompiled} = re:compile(RE), + {username_re, {RECompiled, RE}}; +who(peerhost_net, CIDR) when is_tuple(CIDR) -> {peerhost_net, CIDR}; +who(peerhost_net, CIDR) when is_binary(CIDR) -> + {peerhost_net, esockd_cidr:parse(binary_to_list(CIDR), true)}. + %%-------------------------------------------------------------------- %% gen_server callbacks %%-------------------------------------------------------------------- @@ -265,6 +268,81 @@ code_change(_OldVsn, State, _Extra) -> %% Internal functions %%-------------------------------------------------------------------- +do_check({_, undefined}) -> + false; +do_check(Who) when is_tuple(Who) -> + case mnesia:dirty_read(table(Who), Who) of + [] -> false; + [#banned{until = Until}] -> Until > erlang:system_time(second) + end. + +do_check_rules(ClientInfo) -> + Rules = all_rules(), + Now = erlang:system_time(second), + lists:any( + fun(Rule) -> is_rule_actual(Rule, Now) andalso do_check_rule(Rule, ClientInfo) end, Rules + ). + +is_rule_actual(#banned{until = Until}, Now) -> + Until > Now. + +do_check_rule(#banned{who = {clientid_re, {RE, _}}}, #{clientid := ClientId}) -> + is_binary(ClientId) andalso re:run(ClientId, RE) =/= nomatch; +do_check_rule(#banned{who = {clientid_re, _}}, #{}) -> + false; +do_check_rule(#banned{who = {username_re, {RE, _}}}, #{username := Username}) -> + is_binary(Username) andalso re:run(Username, RE) =/= nomatch; +do_check_rule(#banned{who = {username_re, _}}, #{}) -> + false; +do_check_rule(#banned{who = {peerhost_net, CIDR}}, #{peerhost := Peerhost}) -> + esockd_cidr:match(Peerhost, CIDR); +do_check_rule(#banned{who = {peerhost_net, _}}, #{}) -> + false. + +parse_who(#{as := As, who := Who}) -> + parse_who(#{<<"as">> => As, <<"who">> => Who}); +parse_who(#{<<"as">> := peerhost, <<"who">> := Peerhost0}) -> + case inet:parse_address(binary_to_list(Peerhost0)) of + {ok, Peerhost} -> {peerhost, Peerhost}; + {error, einval} -> {error, "bad peerhost"} + end; +parse_who(#{<<"as">> := peerhost_net, <<"who">> := CIDRString}) -> + try esockd_cidr:parse(binary_to_list(CIDRString), true) of + CIDR -> {peerhost_net, CIDR} + catch + error:Error -> {error, Error} + end; +parse_who(#{<<"as">> := AsRE, <<"who">> := Who}) when + AsRE =:= clientid_re orelse AsRE =:= username_re +-> + case re:compile(Who) of + {ok, RE} -> {AsRE, {RE, Who}}; + {error, _} = Error -> Error + end; +parse_who(#{<<"as">> := As, <<"who">> := Who}) when As =:= clientid orelse As =:= username -> + {As, Who}. + +format_who({peerhost, Host}) -> + AddrBinary = list_to_binary(inet:ntoa(Host)), + {peerhost, AddrBinary}; +format_who({peerhost_net, CIDR}) -> + CIDRBinary = list_to_binary(esockd_cidr:to_string(CIDR)), + {peerhost_net, CIDRBinary}; +format_who({AsRE, {_RE, REOriginal}}) when AsRE =:= clientid_re orelse AsRE =:= username_re -> + {AsRE, REOriginal}; +format_who({As, Who}) when As =:= clientid orelse As =:= username -> + {As, Who}. + +to_rfc3339(Timestamp) -> + emqx_utils_calendar:epoch_to_rfc3339(Timestamp, second). + +table({username, _Username}) -> ?BANNED_INDIVIDUAL_TAB; +table({clientid, _ClientId}) -> ?BANNED_INDIVIDUAL_TAB; +table({peerhost, _Peerhost}) -> ?BANNED_INDIVIDUAL_TAB; +table({username_re, _UsernameRE}) -> ?BANNED_RULE_TAB; +table({clientid_re, _ClientIdRE}) -> ?BANNED_RULE_TAB; +table({peerhost_net, _PeerhostNet}) -> ?BANNED_RULE_TAB. + -ifdef(TEST). ensure_expiry_timer(State) -> State#{expiry_timer := emqx_utils:start_timer(10, expire)}. @@ -274,19 +352,27 @@ ensure_expiry_timer(State) -> -endif. expire_banned_items(Now) -> + lists:foreach( + fun(Tab) -> + expire_banned_items(Now, Tab) + end, + [?BANNED_INDIVIDUAL_TAB, ?BANNED_RULE_TAB] + ). + +expire_banned_items(Now, Tab) -> mnesia:foldl( fun (B = #banned{until = Until}, _Acc) when Until < Now -> - mnesia:delete_object(?BANNED_TAB, B, sticky_write); + mnesia:delete_object(Tab, B, sticky_write); (_, _Acc) -> ok end, ok, - ?BANNED_TAB + Tab ). -insert_banned(Banned) -> - mria:dirty_write(?BANNED_TAB, Banned), +insert_banned(Tab, Banned) -> + mria:dirty_write(Tab, Banned), on_banned(Banned). on_banned(#banned{who = {clientid, ClientId}}) -> @@ -302,3 +388,6 @@ on_banned(#banned{who = {clientid, ClientId}}) -> ok; on_banned(_) -> ok. + +all_rules() -> + ets:tab2list(?BANNED_RULE_TAB). diff --git a/apps/emqx/src/emqx_broker.erl b/apps/emqx/src/emqx_broker.erl index 23679700e..40969ed02 100644 --- a/apps/emqx/src/emqx_broker.erl +++ b/apps/emqx/src/emqx_broker.erl @@ -85,13 +85,13 @@ %% Guards -define(IS_SUBID(Id), (is_binary(Id) orelse is_atom(Id))). --define(cast_or_eval(Pid, Msg, Expr), - case Pid =:= self() of - true -> +-define(cast_or_eval(PICK, Msg, Expr), + case PICK of + __X_Pid when __X_Pid =:= self() -> _ = Expr, ok; - false -> - cast(Pid, Msg) + __X_Pid -> + cast(__X_Pid, Msg) end ). @@ -243,7 +243,7 @@ publish(Msg) when is_record(Msg, message) -> []; Msg1 = #message{topic = Topic} -> PersistRes = persist_publish(Msg1), - PersistRes ++ route(aggre(emqx_router:match_routes(Topic)), delivery(Msg1)) + route(aggre(emqx_router:match_routes(Topic)), delivery(Msg1), PersistRes) end. persist_publish(Msg) -> @@ -283,18 +283,20 @@ delivery(Msg) -> #delivery{sender = self(), message = Msg}. %% Route %%-------------------------------------------------------------------- --spec route([emqx_types:route_entry()], emqx_types:delivery()) -> +-spec route([emqx_types:route_entry()], emqx_types:delivery(), nil() | [persisted]) -> emqx_types:publish_result(). -route([], #delivery{message = Msg}) -> +route([], #delivery{message = Msg}, _PersistRes = []) -> ok = emqx_hooks:run('message.dropped', [Msg, #{node => node()}, no_subscribers]), ok = inc_dropped_cnt(Msg), []; -route(Routes, Delivery) -> +route([], _Delivery, PersistRes = [_ | _]) -> + PersistRes; +route(Routes, Delivery, PersistRes) -> lists:foldl( fun(Route, Acc) -> [do_route(Route, Delivery) | Acc] end, - [], + PersistRes, Routes ). @@ -438,7 +440,7 @@ subscribed(SubId, Topic) when ?IS_SUBID(SubId) -> SubPid = emqx_broker_helper:lookup_subpid(SubId), ets:member(?SUBOPTION, {Topic, SubPid}). --spec get_subopts(pid(), emqx_types:topic() | emqx_types:share()) -> maybe(emqx_types:subopts()). +-spec get_subopts(pid(), emqx_types:topic() | emqx_types:share()) -> option(emqx_types:subopts()). get_subopts(SubPid, Topic) when is_pid(SubPid), ?IS_TOPIC(Topic) -> lookup_value(?SUBOPTION, {Topic, SubPid}); get_subopts(SubId, Topic) when ?IS_SUBID(SubId) -> diff --git a/apps/emqx/src/emqx_broker_helper.erl b/apps/emqx/src/emqx_broker_helper.erl index ea615c2f7..ef238b61a 100644 --- a/apps/emqx/src/emqx_broker_helper.erl +++ b/apps/emqx/src/emqx_broker_helper.erl @@ -71,11 +71,11 @@ register_sub(SubPid, SubId) when is_pid(SubPid) -> error(subid_conflict) end. --spec lookup_subid(pid()) -> maybe(emqx_types:subid()). +-spec lookup_subid(pid()) -> option(emqx_types:subid()). lookup_subid(SubPid) when is_pid(SubPid) -> emqx_utils_ets:lookup_value(?SUBMON, SubPid). --spec lookup_subpid(emqx_types:subid()) -> maybe(pid()). +-spec lookup_subpid(emqx_types:subid()) -> option(pid()). lookup_subpid(SubId) -> emqx_utils_ets:lookup_value(?SUBID, SubId). diff --git a/apps/emqx/src/emqx_broker_sup.erl b/apps/emqx/src/emqx_broker_sup.erl index aee8dff5d..e64ab6745 100644 --- a/apps/emqx/src/emqx_broker_sup.erl +++ b/apps/emqx/src/emqx_broker_sup.erl @@ -23,6 +23,10 @@ -export([init/1]). start_link() -> + ok = mria:wait_for_tables( + emqx_shared_sub:create_tables() ++ + emqx_exclusive_subscription:create_tables() + ), supervisor:start_link({local, ?MODULE}, ?MODULE, []). %%-------------------------------------------------------------------- diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index cf519fd5d..192335a25 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2019-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2019-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -84,21 +84,21 @@ %% MQTT ClientInfo clientinfo :: emqx_types:clientinfo(), %% MQTT Session - session :: maybe(emqx_session:t()), + session :: option(emqx_session:t()), %% Keepalive - keepalive :: maybe(emqx_keepalive:keepalive()), + keepalive :: option(emqx_keepalive:keepalive()), %% MQTT Will Msg - will_msg :: maybe(emqx_types:message()), + will_msg :: option(emqx_types:message()), %% MQTT Topic Aliases topic_aliases :: emqx_types:topic_aliases(), %% MQTT Topic Alias Maximum - alias_maximum :: maybe(map()), + alias_maximum :: option(map()), %% Authentication Data Cache - auth_cache :: maybe(map()), + auth_cache :: option(map()), %% Quota checkers quota :: emqx_limiter_container:container(), %% Timers - timers :: #{atom() => disabled | maybe(reference())}, + timers :: #{atom() => disabled | option(reference())}, %% Conn State conn_state :: conn_state(), %% Takeover @@ -191,7 +191,11 @@ info(topic_aliases, #channel{topic_aliases = Aliases}) -> info(alias_maximum, #channel{alias_maximum = Limits}) -> Limits; info(timers, #channel{timers = Timers}) -> - Timers. + Timers; +info(session_state, #channel{session = Session}) -> + Session; +info(impl, #channel{session = Session}) -> + emqx_session:info(impl, Session). set_conn_state(ConnState, Channel) -> Channel#channel{conn_state = ConnState}. @@ -536,13 +540,17 @@ handle_in(?AUTH_PACKET(), Channel) -> handle_out(disconnect, ?RC_IMPLEMENTATION_SPECIFIC_ERROR, Channel); handle_in({frame_error, Reason}, Channel = #channel{conn_state = idle}) -> shutdown(shutdown_count(frame_error, Reason), Channel); -handle_in({frame_error, frame_too_large}, Channel = #channel{conn_state = connecting}) -> +handle_in( + {frame_error, #{cause := frame_too_large} = R}, Channel = #channel{conn_state = connecting} +) -> shutdown( - shutdown_count(frame_error, frame_too_large), ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), Channel + shutdown_count(frame_error, R), ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), Channel ); handle_in({frame_error, Reason}, Channel = #channel{conn_state = connecting}) -> shutdown(shutdown_count(frame_error, Reason), ?CONNACK_PACKET(?RC_MALFORMED_PACKET), Channel); -handle_in({frame_error, frame_too_large}, Channel = #channel{conn_state = ConnState}) when +handle_in( + {frame_error, #{cause := frame_too_large}}, Channel = #channel{conn_state = ConnState} +) when ConnState =:= connected orelse ConnState =:= reauthenticating -> handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel); @@ -608,10 +616,10 @@ process_publish(Packet = ?PUBLISH_PACKET(QoS, Topic, PacketId), Channel) -> Msg = packet_to_message(NPacket, NChannel), do_publish(PacketId, Msg, NChannel); {error, Rc = ?RC_NOT_AUTHORIZED, NChannel} -> - ?SLOG( + ?SLOG_THROTTLE( warning, #{ - msg => "cannot_publish_to_topic", + msg => cannot_publish_to_topic_due_to_not_authorized, reason => emqx_reason_codes:name(Rc) }, #{topic => Topic} @@ -627,10 +635,10 @@ process_publish(Packet = ?PUBLISH_PACKET(QoS, Topic, PacketId), Channel) -> handle_out(disconnect, Rc, NChannel) end; {error, Rc = ?RC_QUOTA_EXCEEDED, NChannel} -> - ?SLOG( + ?SLOG_THROTTLE( warning, #{ - msg => "cannot_publish_to_topic", + msg => cannot_publish_to_topic_due_to_quota_exceeded, reason => emqx_reason_codes:name(Rc) }, #{topic => Topic} @@ -928,7 +936,8 @@ handle_deliver( Delivers1 = maybe_nack(Delivers), Messages = emqx_session:enrich_delivers(ClientInfo, Delivers1, Session), NSession = emqx_session_mem:enqueue(ClientInfo, Messages, Session), - {ok, Channel#channel{session = NSession}}; + %% we need to update stats here, as the stats_timer is canceled after disconnected + {ok, {event, updated}, Channel#channel{session = NSession}}; handle_deliver(Delivers, Channel) -> Delivers1 = emqx_external_trace:start_trace_send(Delivers, trace_info(Channel)), do_handle_deliver(Delivers1, Channel). @@ -1546,6 +1555,8 @@ set_username( set_username(_ConnPkt, ClientInfo) -> {ok, ClientInfo}. +%% The `is_bridge` bit flag in CONNECT packet (parsed as `bridge_mode`) +%% is invented by mosquitto, named 'try_private': https://mosquitto.org/man/mosquitto-conf-5.html set_bridge_mode(#mqtt_packet_connect{is_bridge = true}, ClientInfo) -> {ok, ClientInfo#{is_bridge => true}}; set_bridge_mode(_ConnPkt, _ClientInfo) -> @@ -2002,14 +2013,15 @@ merge_default_subopts(SubOpts) -> %%-------------------------------------------------------------------- %% Enrich ConnAck Caps -enrich_connack_caps( - AckProps, - ?IS_MQTT_V5 = #channel{ +enrich_connack_caps(AckProps, ?IS_MQTT_V5 = Channel) -> + #channel{ clientinfo = #{ zone := Zone + }, + conninfo = #{ + receive_maximum := ReceiveMaximum } - } -) -> + } = Channel, #{ max_packet_size := MaxPktSize, max_qos_allowed := MaxQoS, @@ -2024,7 +2036,8 @@ enrich_connack_caps( 'Topic-Alias-Maximum' => MaxAlias, 'Wildcard-Subscription-Available' => flag(Wildcard), 'Subscription-Identifier-Available' => 1, - 'Shared-Subscription-Available' => flag(Shared) + 'Shared-Subscription-Available' => flag(Shared), + 'Receive-Maximum' => ReceiveMaximum }, %% MQTT 5.0 - 3.2.2.3.4: %% It is a Protocol Error to include Maximum QoS more than once, @@ -2318,6 +2331,8 @@ shutdown(Reason, Reply, Packet, Channel) -> %% process exits with {shutdown, #{shutdown_count := Kind}} will trigger %% the connection supervisor (esockd) to keep a shutdown-counter grouped by Kind +shutdown_count(_Kind, #{cause := Cause} = Reason) when is_atom(Cause) -> + Reason#{shutdown_count => Cause}; shutdown_count(Kind, Reason) when is_map(Reason) -> Reason#{shutdown_count => Kind}; shutdown_count(Kind, Reason) -> diff --git a/apps/emqx/src/emqx_cm.erl b/apps/emqx/src/emqx_cm.erl index 660ac3cfe..0cf015141 100644 --- a/apps/emqx/src/emqx_cm.erl +++ b/apps/emqx/src/emqx_cm.erl @@ -1,5 +1,5 @@ %%------------------------------------------------------------------- -%% Copyright (c) 2017-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2017-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -124,7 +124,8 @@ {?CHAN_TAB, 'channels.count', 'channels.max'}, {?CHAN_TAB, 'sessions.count', 'sessions.max'}, {?CHAN_CONN_TAB, 'connections.count', 'connections.max'}, - {?CHAN_LIVE_TAB, 'live_connections.count', 'live_connections.max'} + {?CHAN_LIVE_TAB, 'live_connections.count', 'live_connections.max'}, + {?CHAN_REG_TAB, 'cluster_sessions.count', 'cluster_sessions.max'} ]). %% Batch drain @@ -200,12 +201,12 @@ do_unregister_channel({_ClientId, ChanPid} = Chan) -> true. %% @doc Get info of a channel. --spec get_chan_info(emqx_types:clientid()) -> maybe(emqx_types:infos()). +-spec get_chan_info(emqx_types:clientid()) -> option(emqx_types:infos()). get_chan_info(ClientId) -> with_channel(ClientId, fun(ChanPid) -> get_chan_info(ClientId, ChanPid) end). -spec do_get_chan_info(emqx_types:clientid(), chan_pid()) -> - maybe(emqx_types:infos()). + option(emqx_types:infos()). do_get_chan_info(ClientId, ChanPid) -> Chan = {ClientId, ChanPid}, try @@ -215,7 +216,7 @@ do_get_chan_info(ClientId, ChanPid) -> end. -spec get_chan_info(emqx_types:clientid(), chan_pid()) -> - maybe(emqx_types:infos()). + option(emqx_types:infos()). get_chan_info(ClientId, ChanPid) -> wrap_rpc(emqx_cm_proto_v2:get_chan_info(ClientId, ChanPid)). @@ -230,12 +231,12 @@ set_chan_info(ClientId, Info) when ?IS_CLIENTID(ClientId) -> end. %% @doc Get channel's stats. --spec get_chan_stats(emqx_types:clientid()) -> maybe(emqx_types:stats()). +-spec get_chan_stats(emqx_types:clientid()) -> option(emqx_types:stats()). get_chan_stats(ClientId) -> with_channel(ClientId, fun(ChanPid) -> get_chan_stats(ClientId, ChanPid) end). -spec do_get_chan_stats(emqx_types:clientid(), chan_pid()) -> - maybe(emqx_types:stats()). + option(emqx_types:stats()). do_get_chan_stats(ClientId, ChanPid) -> Chan = {ClientId, ChanPid}, try @@ -245,7 +246,7 @@ do_get_chan_stats(ClientId, ChanPid) -> end. -spec get_chan_stats(emqx_types:clientid(), chan_pid()) -> - maybe(emqx_types:stats()). + option(emqx_types:stats()). get_chan_stats(ClientId, ChanPid) -> wrap_rpc(emqx_cm_proto_v2:get_chan_stats(ClientId, ChanPid)). @@ -325,7 +326,7 @@ takeover_session_end({ConnMod, ChanPid}) -> end. -spec pick_channel(emqx_types:clientid()) -> - maybe(pid()). + option(pid()). pick_channel(ClientId) -> case lookup_channels(ClientId) of [] -> @@ -670,7 +671,11 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason}, State = #{chan_pmon := PMon} ChanPids = [Pid | emqx_utils:drain_down(BatchSize)], {Items, PMon1} = emqx_pmon:erase_all(ChanPids, PMon), lists:foreach(fun mark_channel_disconnected/1, ChanPids), - ok = emqx_pool:async_submit(fun lists:foreach/2, [fun ?MODULE:clean_down/1, Items]), + ok = emqx_pool:async_submit_to_pool( + ?CM_POOL, + fun lists:foreach/2, + [fun ?MODULE:clean_down/1, Items] + ), {noreply, State#{chan_pmon := PMon1}}; handle_info(Info, State) -> ?SLOG(error, #{msg => "unexpected_info", info => Info}), diff --git a/apps/emqx/src/emqx_cm_locker.erl b/apps/emqx/src/emqx_cm_locker.erl index f56f9239a..c767901ed 100644 --- a/apps/emqx/src/emqx_cm_locker.erl +++ b/apps/emqx/src/emqx_cm_locker.erl @@ -32,7 +32,7 @@ start_link() -> ekka_locker:start_link(?MODULE). -spec trans( - maybe(emqx_types:clientid()), + option(emqx_types:clientid()), fun(([node()]) -> any()) ) -> any(). trans(undefined, Fun) -> diff --git a/apps/emqx/src/emqx_cm_registry.erl b/apps/emqx/src/emqx_cm_registry.erl index 058bb53ec..683afcb86 100644 --- a/apps/emqx/src/emqx_cm_registry.erl +++ b/apps/emqx/src/emqx_cm_registry.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2019-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2019-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -19,18 +19,15 @@ -behaviour(gen_server). --include("emqx.hrl"). --include("emqx_cm.hrl"). --include("logger.hrl"). --include("types.hrl"). - -export([start_link/0]). --export([is_enabled/0]). +-export([is_enabled/0, is_hist_enabled/0]). -export([ register_channel/1, - unregister_channel/1 + register_channel2/1, + unregister_channel/1, + unregister_channel2/1 ]). -export([lookup_channels/1]). @@ -50,10 +47,13 @@ do_cleanup_channels/1 ]). --define(REGISTRY, ?MODULE). --define(LOCK, {?MODULE, cleanup_down}). +-include("emqx.hrl"). +-include("emqx_cm.hrl"). +-include("logger.hrl"). +-include("types.hrl"). --record(channel, {chid, pid}). +-define(REGISTRY, ?MODULE). +-define(NODE_DOWN_CLEANUP_LOCK, {?MODULE, cleanup_down}). %% @doc Start the global channel registry. -spec start_link() -> startlink_ret(). @@ -69,6 +69,11 @@ start_link() -> is_enabled() -> emqx:get_config([broker, enable_session_registry]). +%% @doc Is the global session registration history enabled? +-spec is_hist_enabled() -> boolean(). +is_hist_enabled() -> + retain_duration() > 0. + %% @doc Register a global channel. -spec register_channel( emqx_types:clientid() @@ -77,11 +82,21 @@ is_enabled() -> register_channel(ClientId) when is_binary(ClientId) -> register_channel({ClientId, self()}); register_channel({ClientId, ChanPid}) when is_binary(ClientId), is_pid(ChanPid) -> + IsHistEnabled = is_hist_enabled(), case is_enabled() of - true -> mria:dirty_write(?CHAN_REG_TAB, record(ClientId, ChanPid)); - false -> ok + true when IsHistEnabled -> + mria:async_dirty(?CM_SHARD, fun ?MODULE:register_channel2/1, [record(ClientId, ChanPid)]); + true -> + mria:dirty_write(?CHAN_REG_TAB, record(ClientId, ChanPid)); + false -> + ok end. +%% @private +register_channel2(#channel{chid = ClientId} = Record) -> + _ = delete_hist_d(ClientId), + mria:dirty_write(?CHAN_REG_TAB, Record). + %% @doc Unregister a global channel. -spec unregister_channel( emqx_types:clientid() @@ -90,19 +105,54 @@ register_channel({ClientId, ChanPid}) when is_binary(ClientId), is_pid(ChanPid) unregister_channel(ClientId) when is_binary(ClientId) -> unregister_channel({ClientId, self()}); unregister_channel({ClientId, ChanPid}) when is_binary(ClientId), is_pid(ChanPid) -> + IsHistEnabled = is_hist_enabled(), case is_enabled() of - true -> mria:dirty_delete_object(?CHAN_REG_TAB, record(ClientId, ChanPid)); - false -> ok + true when IsHistEnabled -> + mria:async_dirty(?CM_SHARD, fun ?MODULE:unregister_channel2/1, [ + record(ClientId, ChanPid) + ]); + true -> + mria:dirty_delete_object(?CHAN_REG_TAB, record(ClientId, ChanPid)); + false -> + ok end. +%% @private +unregister_channel2(#channel{chid = ClientId} = Record) -> + mria:dirty_delete_object(?CHAN_REG_TAB, Record), + ok = insert_hist_d(ClientId). + %% @doc Lookup the global channels. -spec lookup_channels(emqx_types:clientid()) -> list(pid()). lookup_channels(ClientId) -> - [ChanPid || #channel{pid = ChanPid} <- mnesia:dirty_read(?CHAN_REG_TAB, ClientId)]. + lists:filtermap( + fun + (#channel{pid = ChanPid}) when is_pid(ChanPid) -> + case is_pid_down(ChanPid) of + true -> + false; + _ -> + {true, ChanPid} + end; + (_) -> + false + end, + mnesia:dirty_read(?CHAN_REG_TAB, ClientId) + ). + +%% Return 'true' or 'false' if it's a local pid. +%% Otherwise return 'unknown'. +is_pid_down(Pid) when node(Pid) =:= node() -> + not erlang:is_process_alive(Pid); +is_pid_down(_) -> + unknown. record(ClientId, ChanPid) -> #channel{chid = ClientId, pid = ChanPid}. +hist(ClientId) -> + #channel{chid = ClientId, pid = now_ts()}. + %%-------------------------------------------------------------------- %% gen_server callbacks %%-------------------------------------------------------------------- @@ -158,15 +208,95 @@ code_change(_OldVsn, State, _Extra) -> cleanup_channels(Node) -> global:trans( - {?LOCK, self()}, + {?NODE_DOWN_CLEANUP_LOCK, self()}, fun() -> mria:transaction(?CM_SHARD, fun ?MODULE:do_cleanup_channels/1, [Node]) end ). do_cleanup_channels(Node) -> - Pat = [{#channel{pid = '$1', _ = '_'}, [{'==', {node, '$1'}, Node}], ['$_']}], - lists:foreach(fun delete_channel/1, mnesia:select(?CHAN_REG_TAB, Pat, write)). + Pat = [ + { + #channel{pid = '$1', _ = '_'}, + _Match = [{'andalso', {is_pid, '$1'}, {'==', {node, '$1'}, Node}}], + _Return = ['$_'] + } + ], + IsHistEnabled = is_hist_enabled(), + lists:foreach( + fun(Chan) -> delete_channel(IsHistEnabled, Chan) end, + mnesia:select(?CHAN_REG_TAB, Pat, write) + ). -delete_channel(Chan) -> - mnesia:delete_object(?CHAN_REG_TAB, Chan, write). +delete_channel(IsHistEnabled, Chan) -> + mnesia:delete_object(?CHAN_REG_TAB, Chan, write), + case IsHistEnabled of + true -> + insert_hist_t(Chan#channel.chid); + false -> + ok + end. + +%%-------------------------------------------------------------------- +%% History entry operations + +%% Insert unregistration history in a transaction when unregistering the last channel for a clientid. +insert_hist_t(ClientId) -> + case delete_hist_t(ClientId) of + true -> + ok; + false -> + mnesia:write(?CHAN_REG_TAB, hist(ClientId), write) + end. + +%% Dirty insert unregistration history. +%% Since dirty opts are used, async pool workers may race deletes and inserts, +%% so there could be more than one history records for a clientid, +%% but it should be eventually consistent after the client re-registers or the periodic cleanup. +insert_hist_d(ClientId) -> + %% delete old hist records first + case delete_hist_d(ClientId) of + true -> + ok; + false -> + mria:dirty_write(?CHAN_REG_TAB, hist(ClientId)) + end. + +%% Current timestamp in seconds. +now_ts() -> + erlang:system_time(seconds). + +%% Delete all history records for a clientid, return true if there is a Pid found. +delete_hist_t(ClientId) -> + fold_hist( + fun(Hist) -> mnesia:delete_object(?CHAN_REG_TAB, Hist, write) end, + mnesia:read(?CHAN_REG_TAB, ClientId, write) + ). + +%% Delete all history records for a clientid, return true if there is a Pid found. +delete_hist_d(ClientId) -> + fold_hist( + fun(Hist) -> mria:dirty_delete_object(?CHAN_REG_TAB, Hist) end, + mnesia:dirty_read(?CHAN_REG_TAB, ClientId) + ). + +%% Fold over the history records, return true if there is a Pid found. +fold_hist(F, List) -> + lists:foldl( + fun(#channel{pid = Ts} = Record, HasPid) -> + case is_integer(Ts) of + true -> + ok = F(Record), + HasPid; + false -> + true + end + end, + false, + List + ). + +%% Return the session registration history retain duration. +-spec retain_duration() -> non_neg_integer(). +retain_duration() -> + emqx:get_config([broker, session_history_retain]). diff --git a/apps/emqx/src/emqx_cm_registry_keeper.erl b/apps/emqx/src/emqx_cm_registry_keeper.erl new file mode 100644 index 000000000..e96fcdd7d --- /dev/null +++ b/apps/emqx/src/emqx_cm_registry_keeper.erl @@ -0,0 +1,194 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc This module implements the global session registry history cleaner. +-module(emqx_cm_registry_keeper). +-behaviour(gen_server). + +-export([ + start_link/0, + count/1 +]). + +%% gen_server callbacks +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). + +-include_lib("stdlib/include/ms_transform.hrl"). +-include("emqx_cm.hrl"). + +-define(CACHE_COUNT_THRESHOLD, 1000). +-define(MIN_COUNT_INTERVAL_SECONDS, 5). +-define(CLEANUP_CHUNK_SIZE, 10000). + +-define(IS_HIST_ENABLED(RETAIN), (RETAIN > 0)). + +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +init(_) -> + case mria_config:whoami() =:= replicant of + true -> + ignore; + false -> + ok = send_delay_start(), + {ok, #{next_clientid => undefined}} + end. + +%% @doc Count the number of sessions. +%% Include sessions which are expired since the given timestamp if `since' is greater than 0. +-spec count(non_neg_integer()) -> non_neg_integer(). +count(Since) -> + Retain = retain_duration(), + Now = now_ts(), + %% Get table size if hist is not enabled or + %% Since is before the earliest possible retention time. + IsCountAll = (not ?IS_HIST_ENABLED(Retain) orelse (Now - Retain >= Since)), + case IsCountAll of + true -> + mnesia:table_info(?CHAN_REG_TAB, size); + false -> + %% make a gen call to avoid many callers doing the same concurrently + gen_server:call(?MODULE, {count, Since}, infinity) + end. + +handle_call({count, Since}, _From, State) -> + {LastCountTime, LastCount} = + case State of + #{last_count_time := T, last_count := C} -> + {T, C}; + _ -> + {0, 0} + end, + Now = now_ts(), + Total = mnesia:table_info(?CHAN_REG_TAB, size), + %% Always count if the table is small enough + %% or when the last count is too old + IsTableSmall = (Total < ?CACHE_COUNT_THRESHOLD), + IsLastCountOld = (Now - LastCountTime > ?MIN_COUNT_INTERVAL_SECONDS), + case IsTableSmall orelse IsLastCountOld of + true -> + Count = do_count(Since), + CountFinishedAt = now_ts(), + {reply, Count, State#{last_count_time => CountFinishedAt, last_count => Count}}; + false -> + {reply, LastCount, State} + end; +handle_call(_Request, _From, State) -> + {reply, ok, State}. + +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info(start, #{next_clientid := NextClientId} = State) -> + case is_hist_enabled() of + true -> + NewNext = + case cleanup_one_chunk(NextClientId) of + '$end_of_table' -> + ok = send_delay_start(), + undefined; + Id -> + _ = erlang:garbage_collect(), + Id + end, + {noreply, State#{next_clientid := NewNext}}; + false -> + %% if not enabled, delay and check again + %% because it might be enabled from online config change while waiting + ok = send_delay_start(), + {noreply, State} + end; +handle_info(_Info, State) -> + {noreply, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +cleanup_one_chunk(NextClientId) -> + Retain = retain_duration(), + Now = now_ts(), + IsExpired = fun(#channel{pid = Ts}) -> + is_integer(Ts) andalso (Ts < Now - Retain) + end, + cleanup_loop(NextClientId, ?CLEANUP_CHUNK_SIZE, IsExpired). + +cleanup_loop(ClientId, 0, _IsExpired) -> + ClientId; +cleanup_loop('$end_of_table', _Count, _IsExpired) -> + '$end_of_table'; +cleanup_loop(undefined, Count, IsExpired) -> + cleanup_loop(mnesia:dirty_first(?CHAN_REG_TAB), Count, IsExpired); +cleanup_loop(ClientId, Count, IsExpired) -> + Records = mnesia:dirty_read(?CHAN_REG_TAB, ClientId), + Next = mnesia:dirty_next(?CHAN_REG_TAB, ClientId), + lists:foreach( + fun(R) -> + case IsExpired(R) of + true -> + mria:dirty_delete_object(?CHAN_REG_TAB, R); + false -> + ok + end + end, + Records + ), + cleanup_loop(Next, Count - 1, IsExpired). + +is_hist_enabled() -> + retain_duration() > 0. + +%% Return the session registration history retain duration in seconds. +-spec retain_duration() -> non_neg_integer(). +retain_duration() -> + emqx:get_config([broker, session_history_retain]). + +cleanup_delay() -> + Default = timer:minutes(2), + case retain_duration() of + 0 -> + %% prepare for online config change + Default; + RetainSeconds -> + Min = max(timer:seconds(1), timer:seconds(RetainSeconds) div 4), + min(Min, Default) + end. + +send_delay_start() -> + Delay = cleanup_delay(), + ok = send_delay_start(Delay). + +send_delay_start(Delay) -> + _ = erlang:send_after(Delay, self(), start), + ok. + +now_ts() -> + erlang:system_time(seconds). + +do_count(Since) -> + Ms = ets:fun2ms(fun(#channel{pid = V}) -> + is_pid(V) orelse (is_integer(V) andalso (V >= Since)) + end), + ets:select_count(?CHAN_REG_TAB, Ms). diff --git a/apps/emqx/src/emqx_cm_sup.erl b/apps/emqx/src/emqx_cm_sup.erl index e7420b4da..58685804b 100644 --- a/apps/emqx/src/emqx_cm_sup.erl +++ b/apps/emqx/src/emqx_cm_sup.erl @@ -25,11 +25,14 @@ %% for test -export([restart_flapping/0]). +-include("emqx_cm.hrl"). + %%-------------------------------------------------------------------- %% API %%-------------------------------------------------------------------- start_link() -> + ok = mria:wait_for_tables(emqx_banned:create_tables()), supervisor:start_link({local, ?MODULE}, ?MODULE, []). %%-------------------------------------------------------------------- @@ -45,7 +48,9 @@ init([]) -> Banned = child_spec(emqx_banned, 1000, worker), Flapping = child_spec(emqx_flapping, 1000, worker), Locker = child_spec(emqx_cm_locker, 5000, worker), + CmPool = emqx_pool_sup:spec(emqx_cm_pool_sup, [?CM_POOL, random, {emqx_pool, start_link, []}]), Registry = child_spec(emqx_cm_registry, 5000, worker), + RegistryKeeper = child_spec(emqx_cm_registry_keeper, 5000, worker), Manager = child_spec(emqx_cm, 5000, worker), DSSessionGCSup = child_spec(emqx_persistent_session_ds_sup, infinity, supervisor), Children = @@ -53,7 +58,9 @@ init([]) -> Banned, Flapping, Locker, + CmPool, Registry, + RegistryKeeper, Manager, DSSessionGCSup ], diff --git a/apps/emqx/src/emqx_config_handler.erl b/apps/emqx/src/emqx_config_handler.erl index 4cc5b2908..c20a74a5b 100644 --- a/apps/emqx/src/emqx_config_handler.erl +++ b/apps/emqx/src/emqx_config_handler.erl @@ -675,9 +675,19 @@ merge_to_override_config(RawConf, Opts) -> maps:merge(UpgradedOldConf, RawConf). upgrade_conf(Conf) -> + ConfigLoader = emqx_app:get_config_loader(), + %% ensure module loaded + _ = ConfigLoader:module_info(), + case erlang:function_exported(ConfigLoader, schema_module, 0) of + true -> + try_upgrade_conf(apply(ConfigLoader, schema_module, []), Conf); + false -> + %% this happens during emqx app standalone test + Conf + end. + +try_upgrade_conf(SchemaModule, Conf) -> try - ConfLoader = emqx_app:get_config_loader(), - SchemaModule = apply(ConfLoader, schema_module, []), apply(SchemaModule, upgrade_raw_conf, [Conf]) catch ErrorType:Reason:Stack -> diff --git a/apps/emqx/src/emqx_connection.erl b/apps/emqx/src/emqx_connection.erl index d306464c1..96e4f54c7 100644 --- a/apps/emqx/src/emqx_connection.erl +++ b/apps/emqx/src/emqx_connection.erl @@ -99,13 +99,13 @@ %% Channel State channel :: emqx_channel:channel(), %% GC State - gc_state :: maybe(emqx_gc:gc_state()), + gc_state :: option(emqx_gc:gc_state()), %% Stats Timer - stats_timer :: disabled | maybe(reference()), + stats_timer :: disabled | option(reference()), %% Idle Timeout idle_timeout :: integer() | infinity, %% Idle Timer - idle_timer :: maybe(reference()), + idle_timer :: option(reference()), %% Zone name zone :: atom(), %% Listener Type and Name @@ -121,7 +121,7 @@ limiter_timer :: undefined | reference(), %% QUIC conn owner pid if in use. - quic_conn_pid :: maybe(pid()) + quic_conn_pid :: option(pid()) }). -record(retry, { diff --git a/apps/emqx/src/emqx_exclusive_subscription.erl b/apps/emqx/src/emqx_exclusive_subscription.erl index 3bc08eeca..1698eec26 100644 --- a/apps/emqx/src/emqx_exclusive_subscription.erl +++ b/apps/emqx/src/emqx_exclusive_subscription.erl @@ -22,14 +22,11 @@ -logger_header("[exclusive]"). %% Mnesia bootstrap --export([mnesia/1]). +-export([create_tables/0]). %% For upgrade -export([on_add_module/0, on_delete_module/0]). --boot_mnesia({mnesia, [boot]}). --copy_mnesia({mnesia, [copy]}). - -export([ check_subscribe/2, unsubscribe/2, @@ -53,7 +50,7 @@ %% Mnesia bootstrap %%-------------------------------------------------------------------- -mnesia(boot) -> +create_tables() -> StoreProps = [ {ets, [ {read_concurrency, true}, @@ -68,14 +65,14 @@ mnesia(boot) -> {attributes, record_info(fields, exclusive_subscription)}, {storage_properties, StoreProps} ]), - ok = mria_rlog:wait_for_shards([?EXCLUSIVE_SHARD], infinity). + [?TAB]. %%-------------------------------------------------------------------- %% Upgrade %%-------------------------------------------------------------------- on_add_module() -> - mnesia(boot). + mria:wait_for_tables(create_tables()). on_delete_module() -> clear(). diff --git a/apps/emqx/src/emqx_flapping.erl b/apps/emqx/src/emqx_flapping.erl index 7e8b8f9fc..1615c8aba 100644 --- a/apps/emqx/src/emqx_flapping.erl +++ b/apps/emqx/src/emqx_flapping.erl @@ -150,7 +150,7 @@ handle_cast( ), Now = erlang:system_time(second), Banned = #banned{ - who = {clientid, ClientId}, + who = emqx_banned:who(clientid, ClientId), by = <<"flapping detector">>, reason = <<"flapping is detected">>, at = Now, diff --git a/apps/emqx/src/emqx_frame.erl b/apps/emqx/src/emqx_frame.erl index 0799a24ee..b912abcd1 100644 --- a/apps/emqx/src/emqx_frame.erl +++ b/apps/emqx/src/emqx_frame.erl @@ -168,7 +168,7 @@ parse_remaining_len(Rest, Header, Options) -> parse_remaining_len(_Bin, _Header, _Multiplier, Length, #{max_size := MaxSize}) when Length > MaxSize -> - ?PARSE_ERR(frame_too_large); + ?PARSE_ERR(#{cause => frame_too_large, limit => MaxSize, received => Length}); parse_remaining_len(<<>>, Header, Multiplier, Length, Options) -> {more, {{len, #{hdr => Header, len => {Multiplier, Length}}}, Options}}; %% Match DISCONNECT without payload @@ -189,12 +189,12 @@ parse_remaining_len( parse_remaining_len( <<0:8, _Rest/binary>>, _Header = #mqtt_packet_header{type = ?PINGRESP}, 1, 0, _Options ) -> - ?PARSE_ERR(#{hint => unexpected_packet, header_type => 'PINGRESP'}); + ?PARSE_ERR(#{cause => unexpected_packet, header_type => 'PINGRESP'}); %% All other types of messages should not have a zero remaining length. parse_remaining_len( <<0:8, _Rest/binary>>, Header, 1, 0, _Options ) -> - ?PARSE_ERR(#{hint => zero_remaining_len, header_type => Header#mqtt_packet_header.type}); + ?PARSE_ERR(#{cause => zero_remaining_len, header_type => Header#mqtt_packet_header.type}); %% Match PUBACK, PUBREC, PUBREL, PUBCOMP, UNSUBACK... parse_remaining_len(<<0:1, 2:7, Rest/binary>>, Header, 1, 0, Options) -> parse_frame(Rest, Header, 2, Options); @@ -213,7 +213,7 @@ parse_remaining_len( ) -> FrameLen = Value + Len * Multiplier, case FrameLen > MaxSize of - true -> ?PARSE_ERR(frame_too_large); + true -> ?PARSE_ERR(#{cause => frame_too_large, limit => MaxSize, received => FrameLen}); false -> parse_frame(Rest, Header, FrameLen, Options) end. @@ -267,7 +267,7 @@ packet(Header, Variable, Payload) -> #mqtt_packet{header = Header, variable = Variable, payload = Payload}. parse_connect(FrameBin, StrictMode) -> - {ProtoName, Rest} = parse_utf8_string_with_hint(FrameBin, StrictMode, invalid_proto_name), + {ProtoName, Rest} = parse_utf8_string_with_cause(FrameBin, StrictMode, invalid_proto_name), case ProtoName of <<"MQTT">> -> ok; @@ -277,7 +277,7 @@ parse_connect(FrameBin, StrictMode) -> %% from spec: the server MAY send disconnect with reason code 0x84 %% we chose to close socket because the client is likely not talking MQTT anyway ?PARSE_ERR(#{ - hint => invalid_proto_name, + cause => invalid_proto_name, expected => <<"'MQTT' or 'MQIsdp'">>, received => ProtoName }) @@ -296,11 +296,12 @@ parse_connect2( 1 -> ?PARSE_ERR(reserved_connect_flag) end, {Properties, Rest3} = parse_properties(Rest2, ProtoVer, StrictMode), - {ClientId, Rest4} = parse_utf8_string_with_hint(Rest3, StrictMode, invalid_clientid), + {ClientId, Rest4} = parse_utf8_string_with_cause(Rest3, StrictMode, invalid_clientid), ConnPacket = #mqtt_packet_connect{ proto_name = ProtoName, proto_ver = ProtoVer, %% For bridge mode, non-standard implementation + %% Invented by mosquitto, named 'try_private': https://mosquitto.org/man/mosquitto-conf-5.html is_bridge = (BridgeTag =:= 8), clean_start = bool(CleanStart), will_flag = bool(WillFlag), @@ -314,14 +315,14 @@ parse_connect2( {Username, Rest6} = parse_optional( Rest5, fun(Bin) -> - parse_utf8_string_with_hint(Bin, StrictMode, invalid_username) + parse_utf8_string_with_cause(Bin, StrictMode, invalid_username) end, bool(UsernameFlag) ), {Password, Rest7} = parse_optional( Rest6, fun(Bin) -> - parse_utf8_string_with_hint(Bin, StrictMode, invalid_password) + parse_utf8_string_with_cause(Bin, StrictMode, invalid_password) end, bool(PasswordFlag) ), @@ -329,10 +330,14 @@ parse_connect2( <<>> -> ConnPacket1#mqtt_packet_connect{username = Username, password = Password}; _ -> - ?PARSE_ERR(malformed_connect_data) + ?PARSE_ERR(#{ + cause => malformed_connect, + unexpected_trailing_bytes => size(Rest7) + }) end; -parse_connect2(_ProtoName, _, _) -> - ?PARSE_ERR(malformed_connect_header). +parse_connect2(_ProtoName, Bin, _StrictMode) -> + %% sent less than 32 bytes + ?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}). parse_packet( #mqtt_packet_header{type = ?CONNECT}, @@ -361,7 +366,7 @@ parse_packet( Bin, #{strict_mode := StrictMode, version := Ver} ) -> - {TopicName, Rest} = parse_utf8_string_with_hint(Bin, StrictMode, invalid_topic), + {TopicName, Rest} = parse_utf8_string_with_cause(Bin, StrictMode, invalid_topic), {PacketId, Rest1} = case QoS of ?QOS_0 -> {undefined, Rest}; @@ -473,7 +478,7 @@ parse_packet( {Properties, <<>>} = parse_properties(Rest, ?MQTT_PROTO_V5, StrictMode), #mqtt_packet_auth{reason_code = ReasonCode, properties = Properties}; parse_packet(Header, _FrameBin, _Options) -> - ?PARSE_ERR(#{hint => malformed_packet, header_type => Header#mqtt_packet_header.type}). + ?PARSE_ERR(#{cause => malformed_packet, header_type => Header#mqtt_packet_header.type}). parse_will_message( Packet = #mqtt_packet_connect{ @@ -484,8 +489,8 @@ parse_will_message( StrictMode ) -> {Props, Rest} = parse_properties(Bin, Ver, StrictMode), - {Topic, Rest1} = parse_utf8_string_with_hint(Rest, StrictMode, invalid_topic), - {Payload, Rest2} = parse_binary_data(Rest1), + {Topic, Rest1} = parse_utf8_string_with_cause(Rest, StrictMode, invalid_topic), + {Payload, Rest2} = parse_will_payload(Rest1), { Packet#mqtt_packet_connect{ will_props = Props, @@ -517,7 +522,7 @@ parse_properties(Bin, ?MQTT_PROTO_V5, StrictMode) -> {parse_property(PropsBin, #{}, StrictMode), Rest1}; _ -> ?PARSE_ERR(#{ - hint => user_property_not_enough_bytes, + cause => user_property_not_enough_bytes, parsed_key_length => Len, remaining_bytes_length => byte_size(Rest) }) @@ -530,10 +535,10 @@ parse_property(<<16#01, Val, Bin/binary>>, Props, StrictMode) -> parse_property(<<16#02, Val:32/big, Bin/binary>>, Props, StrictMode) -> parse_property(Bin, Props#{'Message-Expiry-Interval' => Val}, StrictMode); parse_property(<<16#03, Bin/binary>>, Props, StrictMode) -> - {Val, Rest} = parse_utf8_string_with_hint(Bin, StrictMode, invalid_content_type), + {Val, Rest} = parse_utf8_string_with_cause(Bin, StrictMode, invalid_content_type), parse_property(Rest, Props#{'Content-Type' => Val}, StrictMode); parse_property(<<16#08, Bin/binary>>, Props, StrictMode) -> - {Val, Rest} = parse_utf8_string_with_hint(Bin, StrictMode, invalid_response_topic), + {Val, Rest} = parse_utf8_string_with_cause(Bin, StrictMode, invalid_response_topic), parse_property(Rest, Props#{'Response-Topic' => Val}, StrictMode); parse_property(<<16#09, Len:16/big, Val:Len/binary, Bin/binary>>, Props, StrictMode) -> parse_property(Bin, Props#{'Correlation-Data' => Val}, StrictMode); @@ -543,12 +548,12 @@ parse_property(<<16#0B, Bin/binary>>, Props, StrictMode) -> parse_property(<<16#11, Val:32/big, Bin/binary>>, Props, StrictMode) -> parse_property(Bin, Props#{'Session-Expiry-Interval' => Val}, StrictMode); parse_property(<<16#12, Bin/binary>>, Props, StrictMode) -> - {Val, Rest} = parse_utf8_string_with_hint(Bin, StrictMode, invalid_assigned_client_id), + {Val, Rest} = parse_utf8_string_with_cause(Bin, StrictMode, invalid_assigned_client_id), parse_property(Rest, Props#{'Assigned-Client-Identifier' => Val}, StrictMode); parse_property(<<16#13, Val:16, Bin/binary>>, Props, StrictMode) -> parse_property(Bin, Props#{'Server-Keep-Alive' => Val}, StrictMode); parse_property(<<16#15, Bin/binary>>, Props, StrictMode) -> - {Val, Rest} = parse_utf8_string_with_hint(Bin, StrictMode, invalid_authn_method), + {Val, Rest} = parse_utf8_string_with_cause(Bin, StrictMode, invalid_authn_method), parse_property(Rest, Props#{'Authentication-Method' => Val}, StrictMode); parse_property(<<16#16, Len:16/big, Val:Len/binary, Bin/binary>>, Props, StrictMode) -> parse_property(Bin, Props#{'Authentication-Data' => Val}, StrictMode); @@ -559,13 +564,13 @@ parse_property(<<16#18, Val:32, Bin/binary>>, Props, StrictMode) -> parse_property(<<16#19, Val, Bin/binary>>, Props, StrictMode) -> parse_property(Bin, Props#{'Request-Response-Information' => Val}, StrictMode); parse_property(<<16#1A, Bin/binary>>, Props, StrictMode) -> - {Val, Rest} = parse_utf8_string_with_hint(Bin, StrictMode, invalid_response_info), + {Val, Rest} = parse_utf8_string_with_cause(Bin, StrictMode, invalid_response_info), parse_property(Rest, Props#{'Response-Information' => Val}, StrictMode); parse_property(<<16#1C, Bin/binary>>, Props, StrictMode) -> - {Val, Rest} = parse_utf8_string_with_hint(Bin, StrictMode, invalid_server_reference), + {Val, Rest} = parse_utf8_string_with_cause(Bin, StrictMode, invalid_server_reference), parse_property(Rest, Props#{'Server-Reference' => Val}, StrictMode); parse_property(<<16#1F, Bin/binary>>, Props, StrictMode) -> - {Val, Rest} = parse_utf8_string_with_hint(Bin, StrictMode, invalid_reason_string), + {Val, Rest} = parse_utf8_string_with_cause(Bin, StrictMode, invalid_reason_string), parse_property(Rest, Props#{'Reason-String' => Val}, StrictMode); parse_property(<<16#21, Val:16/big, Bin/binary>>, Props, StrictMode) -> parse_property(Bin, Props#{'Receive-Maximum' => Val}, StrictMode); @@ -634,7 +639,7 @@ parse_utf8_pair(<>, _StrictMode) when LenK > byte_size(Rest) -> ?PARSE_ERR(#{ - hint => user_property_not_enough_bytes, + cause => user_property_not_enough_bytes, parsed_key_length => LenK, remaining_bytes_length => byte_size(Rest) }); @@ -643,7 +648,7 @@ parse_utf8_pair(<>, _St LenV > byte_size(Rest) -> ?PARSE_ERR(#{ - hint => malformed_user_property_value, + cause => malformed_user_property_value, parsed_key_length => LenK, parsed_value_length => LenV, remaining_bytes_length => byte_size(Rest) @@ -652,16 +657,16 @@ parse_utf8_pair(Bin, _StrictMode) when 4 > byte_size(Bin) -> ?PARSE_ERR(#{ - hint => user_property_not_enough_bytes, + cause => user_property_not_enough_bytes, total_bytes => byte_size(Bin) }). -parse_utf8_string_with_hint(Bin, StrictMode, Hint) -> +parse_utf8_string_with_cause(Bin, StrictMode, Cause) -> try parse_utf8_string(Bin, StrictMode) catch throw:{?FRAME_PARSE_ERROR, Reason} when is_map(Reason) -> - ?PARSE_ERR(Reason#{hint => Hint}) + ?PARSE_ERR(Reason#{cause => Cause}) end. parse_optional(Bin, F, true) -> @@ -677,7 +682,7 @@ parse_utf8_string(<>, _) when Len > byte_size(Rest) -> ?PARSE_ERR(#{ - hint => malformed_utf8_string, + cause => malformed_utf8_string, parsed_length => Len, remaining_bytes_length => byte_size(Rest) }); @@ -686,20 +691,24 @@ parse_utf8_string(Bin, _) when -> ?PARSE_ERR(#{reason => malformed_utf8_string_length}). -parse_binary_data(<>) -> +parse_will_payload(<>) -> {Data, Rest}; -parse_binary_data(<>) when +parse_will_payload(<>) when Len > byte_size(Rest) -> ?PARSE_ERR(#{ - hint => malformed_binary_data, + cause => malformed_will_payload, parsed_length => Len, - remaining_bytes_length => byte_size(Rest) + remaining_bytes => byte_size(Rest) }); -parse_binary_data(Bin) when +parse_will_payload(Bin) when 2 > byte_size(Bin) -> - ?PARSE_ERR(malformed_binary_data_length). + ?PARSE_ERR(#{ + cause => malformed_will_payload, + length_bytes => size(Bin), + expected_bytes => 2 + }). %%-------------------------------------------------------------------- %% Serialize MQTT Packet @@ -772,6 +781,7 @@ serialize_variable( proto_name = ProtoName, proto_ver = ProtoVer, %% For bridge mode, non-standard implementation + %% Invented by mosquitto, named 'try_private': https://mosquitto.org/man/mosquitto-conf-5.html is_bridge = IsBridge, clean_start = CleanStart, will_flag = WillFlag, diff --git a/apps/emqx/src/emqx_gc.erl b/apps/emqx/src/emqx_gc.erl index 61087ba29..525a9bea8 100644 --- a/apps/emqx/src/emqx_gc.erl +++ b/apps/emqx/src/emqx_gc.erl @@ -86,11 +86,11 @@ do_run([{K, N} | T], St) -> end. %% @doc Info of GC state. --spec info(maybe(gc_state())) -> maybe(map()). +-spec info(option(gc_state())) -> option(map()). info(?GCS(St)) -> St. %% @doc Reset counters to zero. --spec reset(maybe(gc_state())) -> gc_state(). +-spec reset(option(gc_state())) -> gc_state(). reset(?GCS(St)) -> ?GCS(do_reset(St)). diff --git a/apps/emqx/src/emqx_hooks.erl b/apps/emqx/src/emqx_hooks.erl index efe2c0de8..57e772ddc 100644 --- a/apps/emqx/src/emqx_hooks.erl +++ b/apps/emqx/src/emqx_hooks.erl @@ -76,7 +76,7 @@ -record(callback, { action :: action(), - filter :: maybe(filter()), + filter :: option(filter()), priority :: integer() }). diff --git a/apps/emqx/src/emqx_kernel_sup.erl b/apps/emqx/src/emqx_kernel_sup.erl index 85724b9b4..5f1bd6ad1 100644 --- a/apps/emqx/src/emqx_kernel_sup.erl +++ b/apps/emqx/src/emqx_kernel_sup.erl @@ -40,7 +40,8 @@ init([]) -> child_spec(emqx_authn_authz_metrics_sup, supervisor), child_spec(emqx_ocsp_cache, worker), child_spec(emqx_crl_cache, worker), - child_spec(emqx_tls_lib_sup, supervisor) + child_spec(emqx_tls_lib_sup, supervisor), + child_spec(emqx_log_throttler, worker) ] }}. diff --git a/apps/emqx/src/emqx_log_throttler.erl b/apps/emqx/src/emqx_log_throttler.erl new file mode 100644 index 000000000..ef29d5a79 --- /dev/null +++ b/apps/emqx/src/emqx_log_throttler.erl @@ -0,0 +1,151 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_log_throttler). + +-behaviour(gen_server). + +-include("logger.hrl"). +-include("types.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-export([start_link/0]). + +%% throttler API +-export([allow/2]). + +%% gen_server callbacks +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). + +-define(SEQ_ID(Msg), {?MODULE, Msg}). +-define(NEW_SEQ, atomics:new(1, [{signed, false}])). +-define(GET_SEQ(Msg), persistent_term:get(?SEQ_ID(Msg), undefined)). +-define(RESET_SEQ(SeqRef), atomics:put(SeqRef, 1, 0)). +-define(INC_SEQ(SeqRef), atomics:add(SeqRef, 1, 1)). +-define(GET_DROPPED(SeqRef), atomics:get(SeqRef, 1) - 1). +-define(IS_ALLOWED(SeqRef), atomics:add_get(SeqRef, 1, 1) =:= 1). + +-define(NEW_THROTTLE(Msg, SeqRef), persistent_term:put(?SEQ_ID(Msg), SeqRef)). + +-define(MSGS_LIST, emqx:get_config([log, throttling, msgs], [])). +-define(TIME_WINDOW_MS, timer:seconds(emqx:get_config([log, throttling, time_window], 60))). + +-spec allow(logger:level(), atom()) -> boolean(). +allow(debug, _Msg) -> + true; +allow(_Level, Msg) when is_atom(Msg) -> + Seq = persistent_term:get(?SEQ_ID(Msg), undefined), + case Seq of + undefined -> + %% This is either a race condition (emqx_log_throttler is not started yet) + %% or a developer mistake (msg used in ?SLOG_THROTTLE/2,3 macro is + %% not added to the default value of `log.throttling.msgs`. + ?SLOG(info, #{ + msg => "missing_log_throttle_sequence", + throttled_msg => Msg + }), + true; + SeqRef -> + ?IS_ALLOWED(SeqRef) + end. + +-spec start_link() -> startlink_ret(). +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +%%-------------------------------------------------------------------- +%% gen_server callbacks +%%-------------------------------------------------------------------- + +init([]) -> + ok = lists:foreach(fun(Msg) -> ?NEW_THROTTLE(Msg, ?NEW_SEQ) end, ?MSGS_LIST), + CurrentPeriodMs = ?TIME_WINDOW_MS, + TimerRef = schedule_refresh(CurrentPeriodMs), + {ok, #{timer_ref => TimerRef, current_period_ms => CurrentPeriodMs}}. + +handle_call(Req, _From, State) -> + ?SLOG(error, #{msg => "unexpected_call", call => Req}), + {reply, ignored, State}. + +handle_cast(Msg, State) -> + ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), + {noreply, State}. + +handle_info(refresh, #{current_period_ms := PeriodMs} = State) -> + Msgs = ?MSGS_LIST, + DroppedStats = lists:foldl( + fun(Msg, Acc) -> + case ?GET_SEQ(Msg) of + %% Should not happen, unless the static ids list is updated at run-time. + undefined -> + ?NEW_THROTTLE(Msg, ?NEW_SEQ), + ?tp(log_throttler_new_msg, #{throttled_msg => Msg}), + Acc; + SeqRef -> + Dropped = ?GET_DROPPED(SeqRef), + ok = ?RESET_SEQ(SeqRef), + ?tp(log_throttler_dropped, #{dropped_count => Dropped, throttled_msg => Msg}), + maybe_add_dropped(Msg, Dropped, Acc) + end + end, + #{}, + Msgs + ), + maybe_log_dropped(DroppedStats, PeriodMs), + NewPeriodMs = ?TIME_WINDOW_MS, + State1 = State#{ + timer_ref => schedule_refresh(NewPeriodMs), + current_period_ms => NewPeriodMs + }, + {noreply, State1}; +handle_info(Info, State) -> + ?SLOG(error, #{msg => "unxpected_info", info => Info}), + {noreply, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%-------------------------------------------------------------------- +%% internal functions +%%-------------------------------------------------------------------- + +maybe_add_dropped(Msg, Dropped, DroppedAcc) when Dropped > 0 -> + DroppedAcc#{Msg => Dropped}; +maybe_add_dropped(_Msg, _Dropped, DroppedAcc) -> + DroppedAcc. + +maybe_log_dropped(DroppedStats, PeriodMs) when map_size(DroppedStats) > 0 -> + ?SLOG(warning, #{ + msg => "log_events_throttled_during_last_period", + dropped => DroppedStats, + period => emqx_utils_calendar:human_readable_duration_string(PeriodMs) + }); +maybe_log_dropped(_DroppedStats, _PeriodMs) -> + ok. + +schedule_refresh(PeriodMs) -> + ?tp(log_throttler_sched_refresh, #{new_period_ms => PeriodMs}), + erlang:send_after(PeriodMs, ?MODULE, refresh). diff --git a/apps/emqx/src/emqx_maybe.erl b/apps/emqx/src/emqx_maybe.erl index af2fd04a7..522cd5e98 100644 --- a/apps/emqx/src/emqx_maybe.erl +++ b/apps/emqx/src/emqx_maybe.erl @@ -23,30 +23,30 @@ -export([define/2]). -export([apply/2]). --type t(T) :: maybe(T). +-type t(T) :: option(T). -export_type([t/1]). --spec to_list(maybe(A)) -> [A]. +-spec to_list(option(A)) -> [A]. to_list(undefined) -> []; to_list(Term) -> [Term]. --spec from_list([A]) -> maybe(A). +-spec from_list([A]) -> option(A). from_list([]) -> undefined; from_list([Term]) -> Term. --spec define(maybe(A), B) -> A | B. +-spec define(option(A), B) -> A | B. define(undefined, Term) -> Term; define(Term, _) -> Term. %% @doc Apply a function to a maybe argument. --spec apply(fun((A) -> B), maybe(A)) -> - maybe(B). +-spec apply(fun((A) -> B), option(A)) -> + option(B). apply(_Fun, undefined) -> undefined; apply(Fun, Term) when is_function(Fun) -> diff --git a/apps/emqx/src/emqx_message.erl b/apps/emqx/src/emqx_message.erl index 6b684c199..b183aa029 100644 --- a/apps/emqx/src/emqx_message.erl +++ b/apps/emqx/src/emqx_message.erl @@ -65,7 +65,7 @@ ]). -export([ - is_expired/1, + is_expired/2, update_expiry/1, timestamp_now/0 ]). @@ -186,7 +186,7 @@ estimate_size(#message{topic = Topic, payload = Payload}) -> TopicLengthSize = 2, FixedHeaderSize + VarLenSize + TopicLengthSize + TopicSize + PacketIdSize + PayloadSize. --spec id(emqx_types:message()) -> maybe(binary()). +-spec id(emqx_types:message()) -> option(binary()). id(#message{id = Id}) -> Id. -spec qos(emqx_types:message()) -> emqx_types:qos(). @@ -229,7 +229,7 @@ get_flag(Flag, Msg) -> get_flag(Flag, #message{flags = Flags}, Default) -> maps:get(Flag, Flags, Default). --spec get_flags(emqx_types:message()) -> maybe(map()). +-spec get_flags(emqx_types:message()) -> option(map()). get_flags(#message{flags = Flags}) -> Flags. -spec set_flag(emqx_types:flag(), emqx_types:message()) -> emqx_types:message(). @@ -252,7 +252,7 @@ unset_flag(Flag, Msg = #message{flags = Flags}) -> set_headers(New, Msg = #message{headers = Old}) when is_map(New) -> Msg#message{headers = maps:merge(Old, New)}. --spec get_headers(emqx_types:message()) -> maybe(map()). +-spec get_headers(emqx_types:message()) -> option(map()). get_headers(Msg) -> Msg#message.headers. -spec get_header(term(), emqx_types:message()) -> term(). @@ -273,14 +273,20 @@ remove_header(Hdr, Msg = #message{headers = Headers}) -> false -> Msg end. --spec is_expired(emqx_types:message()) -> boolean(). -is_expired(#message{ - headers = #{properties := #{'Message-Expiry-Interval' := Interval}}, - timestamp = CreatedAt -}) -> +-spec is_expired(emqx_types:message(), atom()) -> boolean(). +is_expired( + #message{ + headers = #{properties := #{'Message-Expiry-Interval' := Interval}}, + timestamp = CreatedAt + }, + _ +) -> elapsed(CreatedAt) > timer:seconds(Interval); -is_expired(_Msg) -> - false. +is_expired(#message{timestamp = CreatedAt}, Zone) -> + case emqx_config:get_zone_conf(Zone, [mqtt, message_expiry_interval], infinity) of + infinity -> false; + Interval -> elapsed(CreatedAt) > Interval + end. -spec update_expiry(emqx_types:message()) -> emqx_types:message(). update_expiry( diff --git a/apps/emqx/src/emqx_mountpoint.erl b/apps/emqx/src/emqx_mountpoint.erl index c19736690..8da27aad2 100644 --- a/apps/emqx/src/emqx_mountpoint.erl +++ b/apps/emqx/src/emqx_mountpoint.erl @@ -32,7 +32,7 @@ -type mountpoint() :: binary(). --spec mount(maybe(mountpoint()), Any) -> Any when +-spec mount(option(mountpoint()), Any) -> Any when Any :: emqx_types:topic() | emqx_types:share() @@ -47,7 +47,7 @@ mount(MountPoint, Msg = #message{topic = Topic}) when is_binary(Topic) -> mount(MountPoint, TopicFilters) when is_list(TopicFilters) -> [{prefix_maybe_share(MountPoint, Topic), SubOpts} || {Topic, SubOpts} <- TopicFilters]. --spec prefix_maybe_share(maybe(mountpoint()), Any) -> Any when +-spec prefix_maybe_share(option(mountpoint()), Any) -> Any when Any :: emqx_types:topic() | emqx_types:share(). @@ -60,7 +60,7 @@ prefix_maybe_share(MountPoint, #share{group = Group, topic = Topic}) when -> #share{group = Group, topic = prefix_maybe_share(MountPoint, Topic)}. --spec unmount(maybe(mountpoint()), Any) -> Any when +-spec unmount(option(mountpoint()), Any) -> Any when Any :: emqx_types:topic() | emqx_types:share() @@ -84,7 +84,7 @@ unmount_maybe_share(MountPoint, TopicFilter = #share{topic = Topic}) when -> TopicFilter#share{topic = unmount_maybe_share(MountPoint, Topic)}. --spec replvar(maybe(mountpoint()), map()) -> maybe(mountpoint()). +-spec replvar(option(mountpoint()), map()) -> option(mountpoint()). replvar(undefined, _Vars) -> undefined; replvar(MountPoint, Vars) -> diff --git a/apps/emqx/src/emqx_mqueue.erl b/apps/emqx/src/emqx_mqueue.erl index 0ef7d56e5..7b30e5006 100644 --- a/apps/emqx/src/emqx_mqueue.erl +++ b/apps/emqx/src/emqx_mqueue.erl @@ -189,7 +189,7 @@ stats(#mqueue{max_len = MaxLen, dropped = Dropped} = MQ) -> [{len, len(MQ)}, {max_len, MaxLen}, {dropped, Dropped}]. %% @doc Enqueue a message. --spec in(message(), mqueue()) -> {maybe(message()), mqueue()}. +-spec in(message(), mqueue()) -> {option(message()), mqueue()}. in(Msg = #message{qos = ?QOS_0}, MQ = #mqueue{store_qos0 = false}) -> {_Dropped = Msg, MQ}; in( diff --git a/apps/emqx/src/emqx_packet.erl b/apps/emqx/src/emqx_packet.erl index 542dc8b3b..2f45fb37e 100644 --- a/apps/emqx/src/emqx_packet.erl +++ b/apps/emqx/src/emqx_packet.erl @@ -493,8 +493,8 @@ format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, Pa "" -> [HeaderIO, ")"]; VarIO -> [HeaderIO, ", ", VarIO, ")"] end; -%% receive a frame error packet, such as {frame_error,frame_too_large} or -%% {frame_error,#{expected => <<"'MQTT' or 'MQIsdp'">>,hint => invalid_proto_name,received => <<"bad_name">>}} +%% receive a frame error packet, such as {frame_error,#{cause := frame_too_large}} or +%% {frame_error,#{expected => <<"'MQTT' or 'MQIsdp'">>,cause => invalid_proto_name,received => <<"bad_name">>}} format(FrameError, _PayloadEncode) -> lists:flatten(io_lib:format("~tp", [FrameError])). diff --git a/apps/emqx/src/emqx_pd.erl b/apps/emqx/src/emqx_pd.erl index 73e75a771..602c23065 100644 --- a/apps/emqx/src/emqx_pd.erl +++ b/apps/emqx/src/emqx_pd.erl @@ -48,7 +48,7 @@ get_counter(Key) -> Cnt -> Cnt end. --spec inc_counter(key(), number()) -> maybe(number()). +-spec inc_counter(key(), number()) -> option(number()). inc_counter(Key, Inc) -> put(Key, get_counter(Key) + Inc). diff --git a/apps/emqx/src/emqx_persistent_message.erl b/apps/emqx/src/emqx_persistent_message.erl index 295ddd3dc..b178a742c 100644 --- a/apps/emqx/src/emqx_persistent_message.erl +++ b/apps/emqx/src/emqx_persistent_message.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2021-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2021-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -61,7 +61,11 @@ force_ds() -> emqx_config:get([session_persistence, force_persistence]). storage_backend(#{ - builtin := #{enable := true, n_shards := NShards, replication_factor := ReplicationFactor} + builtin := #{ + enable := true, + n_shards := NShards, + replication_factor := ReplicationFactor + } }) -> #{ backend => builtin, @@ -93,7 +97,7 @@ needs_persistence(Msg) -> -spec store_message(emqx_types:message()) -> emqx_ds:store_batch_result(). store_message(Msg) -> - emqx_ds:store_batch(?PERSISTENT_MESSAGE_DB, [Msg]). + emqx_ds:store_batch(?PERSISTENT_MESSAGE_DB, [Msg], #{sync => false}). has_subscribers(#message{topic = Topic}) -> emqx_persistent_session_ds_router:has_any_route(Topic). diff --git a/apps/emqx/src/emqx_persistent_message_ds_gc_worker.erl b/apps/emqx/src/emqx_persistent_message_ds_gc_worker.erl new file mode 100644 index 000000000..b960eae9e --- /dev/null +++ b/apps/emqx/src/emqx_persistent_message_ds_gc_worker.erl @@ -0,0 +1,157 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_persistent_message_ds_gc_worker). + +-behaviour(gen_server). + +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("stdlib/include/qlc.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). + +-include("emqx_persistent_session_ds.hrl"). + +%% API +-export([ + start_link/0, + gc/0 +]). + +%% `gen_server' API +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2 +]). + +%% call/cast/info records +-record(gc, {}). + +%%-------------------------------------------------------------------------------- +%% API +%%-------------------------------------------------------------------------------- + +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +%% For testing or manual ops +gc() -> + gen_server:call(?MODULE, #gc{}, infinity). + +%%-------------------------------------------------------------------------------- +%% `gen_server' API +%%-------------------------------------------------------------------------------- + +init(_Opts) -> + ensure_gc_timer(), + State = #{}, + {ok, State}. + +handle_call(#gc{}, _From, State) -> + maybe_gc(), + {reply, ok, State}; +handle_call(_Call, _From, State) -> + {reply, error, State}. + +handle_cast(_Cast, State) -> + {noreply, State}. + +handle_info(#gc{}, State) -> + try_gc(), + ensure_gc_timer(), + {noreply, State}; +handle_info(_Info, State) -> + {noreply, State}. + +%%-------------------------------------------------------------------------------- +%% Internal fns +%%-------------------------------------------------------------------------------- + +ensure_gc_timer() -> + Timeout = emqx_config:get([session_persistence, message_retention_period]), + _ = erlang:send_after(Timeout, self(), #gc{}), + ok. + +try_gc() -> + %% Only cores should run GC. + CoreNodes = mria_membership:running_core_nodelist(), + Res = global:trans( + {?MODULE, self()}, + fun maybe_gc/0, + CoreNodes, + %% Note: we set retries to 1 here because, in rare occasions, GC might start at the + %% same time in more than one node, and each one will abort the other. By allowing + %% one retry, at least one node will (hopefully) get to enter the transaction and + %% the other will abort. If GC runs too fast, both nodes might run in sequence. + %% But, in that case, GC is clearly not too costly, and that shouldn't be a problem, + %% resource-wise. + _Retries = 1 + ), + case Res of + aborted -> + ?tp(ds_message_gc_lock_taken, #{}), + ok; + ok -> + ok + end. + +now_ms() -> + erlang:system_time(millisecond). + +maybe_gc() -> + AllGens = emqx_ds:list_generations_with_lifetimes(?PERSISTENT_MESSAGE_DB), + NowMS = now_ms(), + RetentionPeriod = emqx_config:get([session_persistence, message_retention_period]), + TimeThreshold = NowMS - RetentionPeriod, + maybe_create_new_generation(AllGens, TimeThreshold), + ?tp_span( + ps_message_gc, + #{}, + begin + ExpiredGens = + maps:filter( + fun(_GenId, #{until := Until}) -> + is_number(Until) andalso Until =< TimeThreshold + end, + AllGens + ), + ExpiredGenIds = maps:keys(ExpiredGens), + lists:foreach( + fun(GenId) -> + ok = emqx_ds:drop_generation(?PERSISTENT_MESSAGE_DB, GenId), + ?tp(message_gc_generation_dropped, #{gen_id => GenId}) + end, + ExpiredGenIds + ) + end + ). + +maybe_create_new_generation(AllGens, TimeThreshold) -> + NeedNewGen = + lists:all( + fun({_GenId, #{created_at := CreatedAt}}) -> + CreatedAt =< TimeThreshold + end, + maps:to_list(AllGens) + ), + case NeedNewGen of + false -> + ?tp(ps_message_gc_too_early, #{}), + ok; + true -> + ok = emqx_ds:add_generation(?PERSISTENT_MESSAGE_DB), + ?tp(ps_message_gc_added_gen, #{}) + end. diff --git a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl b/apps/emqx/src/emqx_persistent_message_ds_replayer.erl deleted file mode 100644 index 1053978dc..000000000 --- a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl +++ /dev/null @@ -1,795 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - -%% @doc This module implements the routines for replaying streams of -%% messages. --module(emqx_persistent_message_ds_replayer). - -%% API: --export([new/0, open/1, next_packet_id/1, n_inflight/1]). - --export([poll/4, replay/2, commit_offset/4]). - --export([seqno_to_packet_id/1, packet_id_to_seqno/2]). - --export([committed_until/2]). - -%% internal exports: --export([]). - --export_type([inflight/0, seqno/0]). - --include_lib("emqx/include/logger.hrl"). --include_lib("emqx/include/emqx_mqtt.hrl"). --include_lib("emqx_utils/include/emqx_message.hrl"). --include("emqx_persistent_session_ds.hrl"). - --ifdef(TEST). --include_lib("proper/include/proper.hrl"). --include_lib("eunit/include/eunit.hrl"). --endif. - --define(EPOCH_SIZE, 16#10000). - --define(ACK, 0). --define(COMP, 1). - --define(TRACK_FLAG(WHICH), (1 bsl WHICH)). --define(TRACK_FLAGS_ALL, ?TRACK_FLAG(?ACK) bor ?TRACK_FLAG(?COMP)). --define(TRACK_FLAGS_NONE, 0). - -%%================================================================================ -%% Type declarations -%%================================================================================ - -%% Note: sequence numbers are monotonic; they don't wrap around: --type seqno() :: non_neg_integer(). - --type track() :: ack | comp. --type commit_type() :: rec. - --record(inflight, { - next_seqno = 1 :: seqno(), - commits = #{ack => 1, comp => 1, rec => 1} :: #{track() | commit_type() => seqno()}, - %% Ranges are sorted in ascending order of their sequence numbers. - offset_ranges = [] :: [ds_pubrange()] -}). - --opaque inflight() :: #inflight{}. - --type message() :: emqx_types:message(). --type replies() :: [emqx_session:reply()]. - --type preproc_fun() :: fun((message()) -> message() | [message()]). - -%%================================================================================ -%% API funcions -%%================================================================================ - --spec new() -> inflight(). -new() -> - #inflight{}. - --spec open(emqx_persistent_session_ds:id()) -> inflight(). -open(SessionId) -> - {Ranges, RecUntil} = ro_transaction( - fun() -> {get_ranges(SessionId), get_committed_offset(SessionId, rec)} end - ), - {Commits, NextSeqno} = compute_inflight_range(Ranges), - #inflight{ - commits = Commits#{rec => RecUntil}, - next_seqno = NextSeqno, - offset_ranges = Ranges - }. - --spec next_packet_id(inflight()) -> {emqx_types:packet_id(), inflight()}. -next_packet_id(Inflight0 = #inflight{next_seqno = LastSeqno}) -> - Inflight = Inflight0#inflight{next_seqno = next_seqno(LastSeqno)}, - {seqno_to_packet_id(LastSeqno), Inflight}. - --spec n_inflight(inflight()) -> non_neg_integer(). -n_inflight(#inflight{offset_ranges = Ranges}) -> - %% TODO - %% This is not very efficient. Instead, we can take the maximum of - %% `range_size(AckedUntil, NextSeqno)` and `range_size(CompUntil, NextSeqno)`. - %% This won't be exact number but a pessimistic estimate, but this way we - %% will penalize clients that PUBACK QoS 1 messages but don't PUBCOMP QoS 2 - %% messages for some reason. For that to work, we need to additionally track - %% actual `AckedUntil` / `CompUntil` during `commit_offset/4`. - lists:foldl( - fun - (#ds_pubrange{type = ?T_CHECKPOINT}, N) -> - N; - (#ds_pubrange{type = ?T_INFLIGHT} = Range, N) -> - N + range_size(Range) - end, - 0, - Ranges - ). - --spec replay(preproc_fun(), inflight()) -> {emqx_session:replies(), inflight()}. -replay(PreprocFunFun, Inflight0 = #inflight{offset_ranges = Ranges0, commits = Commits}) -> - {Ranges, Replies} = lists:mapfoldr( - fun(Range, Acc) -> - replay_range(PreprocFunFun, Commits, Range, Acc) - end, - [], - Ranges0 - ), - Inflight = Inflight0#inflight{offset_ranges = Ranges}, - {Replies, Inflight}. - --spec commit_offset(emqx_persistent_session_ds:id(), Offset, emqx_types:packet_id(), inflight()) -> - {_IsValidOffset :: boolean(), inflight()} -when - Offset :: track() | commit_type(). -commit_offset( - SessionId, - Track, - PacketId, - Inflight0 = #inflight{commits = Commits} -) when Track == ack orelse Track == comp -> - case validate_commit(Track, PacketId, Inflight0) of - CommitUntil when is_integer(CommitUntil) -> - %% TODO - %% We do not preserve `CommitUntil` in the database. Instead, we discard - %% fully acked ranges from the database. In effect, this means that the - %% most recent `CommitUntil` the client has sent may be lost in case of a - %% crash or client loss. - Inflight1 = Inflight0#inflight{commits = Commits#{Track := CommitUntil}}, - Inflight = discard_committed(SessionId, Inflight1), - {true, Inflight}; - false -> - {false, Inflight0} - end; -commit_offset( - SessionId, - CommitType = rec, - PacketId, - Inflight0 = #inflight{commits = Commits} -) -> - case validate_commit(CommitType, PacketId, Inflight0) of - CommitUntil when is_integer(CommitUntil) -> - update_committed_offset(SessionId, CommitType, CommitUntil), - Inflight = Inflight0#inflight{commits = Commits#{CommitType := CommitUntil}}, - {true, Inflight}; - false -> - {false, Inflight0} - end. - --spec poll(preproc_fun(), emqx_persistent_session_ds:id(), inflight(), pos_integer()) -> - {emqx_session:replies(), inflight()}. -poll(PreprocFun, SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < ?EPOCH_SIZE -> - MinBatchSize = emqx_config:get([session_persistence, min_batch_size]), - FetchThreshold = min(MinBatchSize, ceil(WindowSize / 2)), - FreeSpace = WindowSize - n_inflight(Inflight0), - case FreeSpace >= FetchThreshold of - false -> - %% TODO: this branch is meant to avoid fetching data from - %% the DB in chunks that are too small. However, this - %% logic is not exactly good for the latency. Can the - %% client get stuck even? - {[], Inflight0}; - true -> - %% TODO: Wrap this in `mria:async_dirty/2`? - Checkpoints = find_checkpoints(Inflight0#inflight.offset_ranges), - StreamGroups = group_streams(get_streams(SessionId)), - {Publihes, Inflight} = - fetch(PreprocFun, SessionId, Inflight0, Checkpoints, StreamGroups, FreeSpace, []), - %% Discard now irrelevant QoS0-only ranges, if any. - {Publihes, discard_committed(SessionId, Inflight)} - end. - -%% Which seqno this track is committed until. -%% "Until" means this is first seqno that is _not yet committed_ for this track. --spec committed_until(track() | commit_type(), inflight()) -> seqno(). -committed_until(Track, #inflight{commits = Commits}) -> - maps:get(Track, Commits). - --spec seqno_to_packet_id(seqno()) -> emqx_types:packet_id() | 0. -seqno_to_packet_id(Seqno) -> - Seqno rem ?EPOCH_SIZE. - -%% Reconstruct session counter by adding most significant bits from -%% the current counter to the packet id. --spec packet_id_to_seqno(emqx_types:packet_id(), inflight()) -> seqno(). -packet_id_to_seqno(PacketId, #inflight{next_seqno = NextSeqno}) -> - packet_id_to_seqno_(NextSeqno, PacketId). - -%%================================================================================ -%% Internal exports -%%================================================================================ - -%%================================================================================ -%% Internal functions -%%================================================================================ - -compute_inflight_range([]) -> - {#{ack => 1, comp => 1}, 1}; -compute_inflight_range(Ranges) -> - _RangeLast = #ds_pubrange{until = LastSeqno} = lists:last(Ranges), - AckedUntil = find_committed_until(ack, Ranges), - CompUntil = find_committed_until(comp, Ranges), - Commits = #{ - ack => emqx_maybe:define(AckedUntil, LastSeqno), - comp => emqx_maybe:define(CompUntil, LastSeqno) - }, - {Commits, LastSeqno}. - -find_committed_until(Track, Ranges) -> - RangesUncommitted = lists:dropwhile( - fun(Range) -> - case Range of - #ds_pubrange{type = ?T_CHECKPOINT} -> - true; - #ds_pubrange{type = ?T_INFLIGHT, tracks = Tracks} -> - not has_track(Track, Tracks) - end - end, - Ranges - ), - case RangesUncommitted of - [#ds_pubrange{id = {_, CommittedUntil, _StreamRef}} | _] -> - CommittedUntil; - [] -> - undefined - end. - --spec get_ranges(emqx_persistent_session_ds:id()) -> [ds_pubrange()]. -get_ranges(SessionId) -> - Pat = erlang:make_tuple( - record_info(size, ds_pubrange), - '_', - [{1, ds_pubrange}, {#ds_pubrange.id, {SessionId, '_', '_'}}] - ), - mnesia:match_object(?SESSION_PUBRANGE_TAB, Pat, read). - -fetch(PreprocFun, SessionId, Inflight0, CPs, Groups, N, Acc) when N > 0, Groups =/= [] -> - #inflight{next_seqno = FirstSeqno, offset_ranges = Ranges} = Inflight0, - {Stream, Groups2} = get_the_first_stream(Groups), - case get_next_n_messages_from_stream(Stream, CPs, N) of - [] -> - fetch(PreprocFun, SessionId, Inflight0, CPs, Groups2, N, Acc); - {ItBegin, ItEnd, Messages} -> - %% We need to preserve the iterator pointing to the beginning of the - %% range, so that we can replay it if needed. - {Publishes, UntilSeqno} = publish_fetch(PreprocFun, FirstSeqno, Messages), - Size = range_size(FirstSeqno, UntilSeqno), - Range0 = #ds_pubrange{ - id = {SessionId, FirstSeqno, Stream#ds_stream.ref}, - type = ?T_INFLIGHT, - tracks = compute_pub_tracks(Publishes), - until = UntilSeqno, - iterator = ItBegin - }, - ok = preserve_range(Range0), - %% ...Yet we need to keep the iterator pointing past the end of the - %% range, so that we can pick up where we left off: it will become - %% `ItBegin` of the next range for this stream. - Range = keep_next_iterator(ItEnd, Range0), - Inflight = Inflight0#inflight{ - next_seqno = UntilSeqno, - offset_ranges = Ranges ++ [Range] - }, - fetch(PreprocFun, SessionId, Inflight, CPs, Groups2, N - Size, [Publishes | Acc]) - end; -fetch(_ReplyFun, _SessionId, Inflight, _CPs, _Groups, _N, Acc) -> - Publishes = lists:append(lists:reverse(Acc)), - {Publishes, Inflight}. - -discard_committed( - SessionId, - Inflight0 = #inflight{commits = Commits, offset_ranges = Ranges0} -) -> - %% TODO: This could be kept and incrementally updated in the inflight state. - Checkpoints = find_checkpoints(Ranges0), - %% TODO: Wrap this in `mria:async_dirty/2`? - Ranges = discard_committed_ranges(SessionId, Commits, Checkpoints, Ranges0), - Inflight0#inflight{offset_ranges = Ranges}. - -find_checkpoints(Ranges) -> - lists:foldl( - fun(#ds_pubrange{id = {_SessionId, _, StreamRef}} = Range, Acc) -> - %% For each stream, remember the last range over this stream. - Acc#{StreamRef => Range} - end, - #{}, - Ranges - ). - -discard_committed_ranges( - SessionId, - Commits, - Checkpoints, - Ranges = [Range = #ds_pubrange{id = {_SessionId, _, StreamRef}} | Rest] -) -> - case discard_committed_range(Commits, Range) of - discard -> - %% This range has been fully committed. - %% Either discard it completely, or preserve the iterator for the next range - %% over this stream (i.e. a checkpoint). - RangeKept = - case maps:get(StreamRef, Checkpoints) of - Range -> - [checkpoint_range(Range)]; - _Previous -> - discard_range(Range), - [] - end, - %% Since we're (intentionally) not using transactions here, it's important to - %% issue database writes in the same order in which ranges are stored: from - %% the oldest to the newest. This is also why we need to compute which ranges - %% should become checkpoints before we start writing anything. - RangeKept ++ discard_committed_ranges(SessionId, Commits, Checkpoints, Rest); - keep -> - %% This range has not been fully committed. - [Range | discard_committed_ranges(SessionId, Commits, Checkpoints, Rest)]; - keep_all -> - %% The rest of ranges (if any) still have uncommitted messages. - Ranges; - TracksLeft -> - %% Only some track has been committed. - %% Preserve the uncommitted tracks in the database. - RangeKept = Range#ds_pubrange{tracks = TracksLeft}, - preserve_range(restore_first_iterator(RangeKept)), - [RangeKept | discard_committed_ranges(SessionId, Commits, Checkpoints, Rest)] - end; -discard_committed_ranges(_SessionId, _Commits, _Checkpoints, []) -> - []. - -discard_committed_range(_Commits, #ds_pubrange{type = ?T_CHECKPOINT}) -> - discard; -discard_committed_range( - #{ack := AckedUntil, comp := CompUntil}, - #ds_pubrange{until = Until} -) when Until > AckedUntil andalso Until > CompUntil -> - keep_all; -discard_committed_range(Commits, #ds_pubrange{until = Until, tracks = Tracks}) -> - case discard_tracks(Commits, Until, Tracks) of - 0 -> - discard; - Tracks -> - keep; - TracksLeft -> - TracksLeft - end. - -discard_tracks(#{ack := AckedUntil, comp := CompUntil}, Until, Tracks) -> - TAck = - case Until > AckedUntil of - true -> ?TRACK_FLAG(?ACK) band Tracks; - false -> 0 - end, - TComp = - case Until > CompUntil of - true -> ?TRACK_FLAG(?COMP) band Tracks; - false -> 0 - end, - TAck bor TComp. - -replay_range( - PreprocFun, - Commits, - Range0 = #ds_pubrange{ - type = ?T_INFLIGHT, id = {_, First, _StreamRef}, until = Until, iterator = It - }, - Acc -) -> - Size = range_size(First, Until), - {ok, ItNext, MessagesUnacked} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, It, Size), - %% Asserting that range is consistent with the message storage state. - {Replies, Until} = publish_replay(PreprocFun, Commits, First, MessagesUnacked), - %% Again, we need to keep the iterator pointing past the end of the - %% range, so that we can pick up where we left off. - Range = keep_next_iterator(ItNext, Range0), - {Range, Replies ++ Acc}; -replay_range(_PreprocFun, _Commits, Range0 = #ds_pubrange{type = ?T_CHECKPOINT}, Acc) -> - {Range0, Acc}. - -validate_commit( - Track, - PacketId, - Inflight = #inflight{commits = Commits, next_seqno = NextSeqno} -) -> - Seqno = packet_id_to_seqno_(NextSeqno, PacketId), - CommittedUntil = maps:get(Track, Commits), - CommitNext = get_commit_next(Track, Inflight), - case Seqno >= CommittedUntil andalso Seqno < CommitNext of - true -> - next_seqno(Seqno); - false -> - ?SLOG(warning, #{ - msg => "out-of-order_commit", - track => Track, - packet_id => PacketId, - commit_seqno => Seqno, - committed_until => CommittedUntil, - commit_next => CommitNext - }), - false - end. - -get_commit_next(ack, #inflight{next_seqno = NextSeqno}) -> - NextSeqno; -get_commit_next(rec, #inflight{next_seqno = NextSeqno}) -> - NextSeqno; -get_commit_next(comp, #inflight{commits = Commits}) -> - maps:get(rec, Commits). - -publish_fetch(PreprocFun, FirstSeqno, Messages) -> - flatmapfoldl( - fun({_DSKey, MessageIn}, Acc) -> - Message = PreprocFun(MessageIn), - publish_fetch(Message, Acc) - end, - FirstSeqno, - Messages - ). - -publish_fetch(#message{qos = ?QOS_0} = Message, Seqno) -> - {{undefined, Message}, Seqno}; -publish_fetch(#message{} = Message, Seqno) -> - PacketId = seqno_to_packet_id(Seqno), - {{PacketId, Message}, next_seqno(Seqno)}; -publish_fetch(Messages, Seqno) -> - flatmapfoldl(fun publish_fetch/2, Seqno, Messages). - -publish_replay(PreprocFun, Commits, FirstSeqno, Messages) -> - #{ack := AckedUntil, comp := CompUntil, rec := RecUntil} = Commits, - flatmapfoldl( - fun({_DSKey, MessageIn}, Acc) -> - Message = PreprocFun(MessageIn), - publish_replay(Message, AckedUntil, CompUntil, RecUntil, Acc) - end, - FirstSeqno, - Messages - ). - -publish_replay(#message{qos = ?QOS_0}, _, _, _, Seqno) -> - %% QoS 0 (at most once) messages should not be replayed. - {[], Seqno}; -publish_replay(#message{qos = Qos} = Message, AckedUntil, CompUntil, RecUntil, Seqno) -> - case Qos of - ?QOS_1 when Seqno < AckedUntil -> - %% This message has already been acked, so we can skip it. - %% We still need to advance seqno, because previously we assigned this message - %% a unique Packet Id. - {[], next_seqno(Seqno)}; - ?QOS_2 when Seqno < CompUntil -> - %% This message's flow has already been fully completed, so we can skip it. - %% We still need to advance seqno, because previously we assigned this message - %% a unique Packet Id. - {[], next_seqno(Seqno)}; - ?QOS_2 when Seqno < RecUntil -> - %% This message's flow has been partially completed, we need to resend a PUBREL. - PacketId = seqno_to_packet_id(Seqno), - Pub = {pubrel, PacketId}, - {Pub, next_seqno(Seqno)}; - _ -> - %% This message flow hasn't been acked and/or received, we need to resend it. - PacketId = seqno_to_packet_id(Seqno), - Pub = {PacketId, emqx_message:set_flag(dup, true, Message)}, - {Pub, next_seqno(Seqno)} - end; -publish_replay([], _, _, _, Seqno) -> - {[], Seqno}; -publish_replay(Messages, AckedUntil, CompUntil, RecUntil, Seqno) -> - flatmapfoldl( - fun(Message, Acc) -> - publish_replay(Message, AckedUntil, CompUntil, RecUntil, Acc) - end, - Seqno, - Messages - ). - --spec compute_pub_tracks(replies()) -> non_neg_integer(). -compute_pub_tracks(Pubs) -> - compute_pub_tracks(Pubs, ?TRACK_FLAGS_NONE). - -compute_pub_tracks(_Pubs, Tracks = ?TRACK_FLAGS_ALL) -> - Tracks; -compute_pub_tracks([Pub | Rest], Tracks) -> - Track = - case Pub of - {_PacketId, #message{qos = ?QOS_1}} -> ?TRACK_FLAG(?ACK); - {_PacketId, #message{qos = ?QOS_2}} -> ?TRACK_FLAG(?COMP); - {pubrel, _PacketId} -> ?TRACK_FLAG(?COMP); - _ -> ?TRACK_FLAGS_NONE - end, - compute_pub_tracks(Rest, Track bor Tracks); -compute_pub_tracks([], Tracks) -> - Tracks. - -keep_next_iterator(ItNext, Range = #ds_pubrange{iterator = ItFirst, misc = Misc}) -> - Range#ds_pubrange{ - iterator = ItNext, - %% We need to keep the first iterator around, in case we need to preserve - %% this range again, updating still uncommitted tracks it's part of. - misc = Misc#{iterator_first => ItFirst} - }. - -restore_first_iterator(Range = #ds_pubrange{misc = Misc = #{iterator_first := ItFirst}}) -> - Range#ds_pubrange{ - iterator = ItFirst, - misc = maps:remove(iterator_first, Misc) - }. - --spec preserve_range(ds_pubrange()) -> ok. -preserve_range(Range = #ds_pubrange{type = ?T_INFLIGHT}) -> - mria:dirty_write(?SESSION_PUBRANGE_TAB, Range). - -has_track(ack, Tracks) -> - (?TRACK_FLAG(?ACK) band Tracks) > 0; -has_track(comp, Tracks) -> - (?TRACK_FLAG(?COMP) band Tracks) > 0. - --spec discard_range(ds_pubrange()) -> ok. -discard_range(#ds_pubrange{id = RangeId}) -> - mria:dirty_delete(?SESSION_PUBRANGE_TAB, RangeId). - --spec checkpoint_range(ds_pubrange()) -> ds_pubrange(). -checkpoint_range(Range0 = #ds_pubrange{type = ?T_INFLIGHT}) -> - Range = Range0#ds_pubrange{type = ?T_CHECKPOINT, misc = #{}}, - ok = mria:dirty_write(?SESSION_PUBRANGE_TAB, Range), - Range; -checkpoint_range(Range = #ds_pubrange{type = ?T_CHECKPOINT}) -> - %% This range should have been checkpointed already. - Range. - -get_last_iterator(Stream = #ds_stream{ref = StreamRef}, Checkpoints) -> - case maps:get(StreamRef, Checkpoints, none) of - none -> - Stream#ds_stream.beginning; - #ds_pubrange{iterator = ItNext} -> - ItNext - end. - --spec get_streams(emqx_persistent_session_ds:id()) -> [ds_stream()]. -get_streams(SessionId) -> - mnesia:dirty_read(?SESSION_STREAM_TAB, SessionId). - --spec get_committed_offset(emqx_persistent_session_ds:id(), _Name) -> seqno(). -get_committed_offset(SessionId, Name) -> - case mnesia:read(?SESSION_COMMITTED_OFFSET_TAB, {SessionId, Name}) of - [] -> - 1; - [#ds_committed_offset{until = Seqno}] -> - Seqno - end. - --spec update_committed_offset(emqx_persistent_session_ds:id(), _Name, seqno()) -> ok. -update_committed_offset(SessionId, Name, Until) -> - mria:dirty_write(?SESSION_COMMITTED_OFFSET_TAB, #ds_committed_offset{ - id = {SessionId, Name}, until = Until - }). - -next_seqno(Seqno) -> - NextSeqno = Seqno + 1, - case seqno_to_packet_id(NextSeqno) of - 0 -> - %% We skip sequence numbers that lead to PacketId = 0 to - %% simplify math. Note: it leads to occasional gaps in the - %% sequence numbers. - NextSeqno + 1; - _ -> - NextSeqno - end. - -packet_id_to_seqno_(NextSeqno, PacketId) -> - Epoch = NextSeqno bsr 16, - case (Epoch bsl 16) + PacketId of - N when N =< NextSeqno -> - N; - N -> - N - ?EPOCH_SIZE - end. - -range_size(#ds_pubrange{id = {_, First, _StreamRef}, until = Until}) -> - range_size(First, Until). - -range_size(FirstSeqno, UntilSeqno) -> - %% This function assumes that gaps in the sequence ID occur _only_ when the - %% packet ID wraps. - Size = UntilSeqno - FirstSeqno, - Size + (FirstSeqno bsr 16) - (UntilSeqno bsr 16). - -%%================================================================================ -%% stream scheduler - -%% group streams by the first position in the rank --spec group_streams(list(ds_stream())) -> list(list(ds_stream())). -group_streams(Streams) -> - Groups = maps:groups_from_list( - fun(#ds_stream{rank = {RankX, _}}) -> RankX end, - Streams - ), - shuffle(maps:values(Groups)). - --spec shuffle([A]) -> [A]. -shuffle(L0) -> - L1 = lists:map( - fun(A) -> - %% maybe topic/stream prioritization could be introduced here? - {rand:uniform(), A} - end, - L0 - ), - L2 = lists:sort(L1), - {_, L} = lists:unzip(L2), - L. - -get_the_first_stream([Group | Groups]) -> - case get_next_stream_from_group(Group) of - {Stream, {sorted, []}} -> - {Stream, Groups}; - {Stream, Group2} -> - {Stream, [Group2 | Groups]}; - undefined -> - get_the_first_stream(Groups) - end; -get_the_first_stream([]) -> - %% how this possible ? - throw(#{reason => no_valid_stream}). - -%% the scheduler is simple, try to get messages from the same shard, but it's okay to take turns -get_next_stream_from_group({sorted, [H | T]}) -> - {H, {sorted, T}}; -get_next_stream_from_group({sorted, []}) -> - undefined; -get_next_stream_from_group(Streams) -> - [Stream | T] = lists:sort( - fun(#ds_stream{rank = {_, RankA}}, #ds_stream{rank = {_, RankB}}) -> - RankA < RankB - end, - Streams - ), - {Stream, {sorted, T}}. - -get_next_n_messages_from_stream(Stream, CPs, N) -> - ItBegin = get_last_iterator(Stream, CPs), - case emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, N) of - {ok, _ItEnd, []} -> - []; - {ok, ItEnd, Messages} -> - {ItBegin, ItEnd, Messages}; - {ok, end_of_stream} -> - %% TODO: how to skip this closed stream or it should be taken over by lower level layer - [] - end. - -%%================================================================================ - --spec flatmapfoldl(fun((X, Acc) -> {Y | [Y], Acc}), Acc, [X]) -> {[Y], Acc}. -flatmapfoldl(_Fun, Acc, []) -> - {[], Acc}; -flatmapfoldl(Fun, Acc, [X | Xs]) -> - {Ys, NAcc} = Fun(X, Acc), - {Zs, FAcc} = flatmapfoldl(Fun, NAcc, Xs), - case is_list(Ys) of - true -> - {Ys ++ Zs, FAcc}; - _ -> - {[Ys | Zs], FAcc} - end. - -ro_transaction(Fun) -> - {atomic, Res} = mria:ro_transaction(?DS_MRIA_SHARD, Fun), - Res. - --ifdef(TEST). - -%% This test only tests boundary conditions (to make sure property-based test didn't skip them): -packet_id_to_seqno_test() -> - %% Packet ID = 1; first epoch: - ?assertEqual(1, packet_id_to_seqno_(1, 1)), - ?assertEqual(1, packet_id_to_seqno_(10, 1)), - ?assertEqual(1, packet_id_to_seqno_(1 bsl 16 - 1, 1)), - ?assertEqual(1, packet_id_to_seqno_(1 bsl 16, 1)), - %% Packet ID = 1; second and 3rd epochs: - ?assertEqual(1 bsl 16 + 1, packet_id_to_seqno_(1 bsl 16 + 1, 1)), - ?assertEqual(1 bsl 16 + 1, packet_id_to_seqno_(2 bsl 16, 1)), - ?assertEqual(2 bsl 16 + 1, packet_id_to_seqno_(2 bsl 16 + 1, 1)), - %% Packet ID = 16#ffff: - PID = 1 bsl 16 - 1, - ?assertEqual(PID, packet_id_to_seqno_(PID, PID)), - ?assertEqual(PID, packet_id_to_seqno_(1 bsl 16, PID)), - ?assertEqual(1 bsl 16 + PID, packet_id_to_seqno_(2 bsl 16, PID)), - ok. - -packet_id_to_seqno_test_() -> - Opts = [{numtests, 1000}, {to_file, user}], - {timeout, 30, fun() -> ?assert(proper:quickcheck(packet_id_to_seqno_prop(), Opts)) end}. - -packet_id_to_seqno_prop() -> - ?FORALL( - NextSeqNo, - next_seqno_gen(), - ?FORALL( - SeqNo, - seqno_gen(NextSeqNo), - begin - PacketId = seqno_to_packet_id(SeqNo), - ?assertEqual(SeqNo, packet_id_to_seqno_(NextSeqNo, PacketId)), - true - end - ) - ). - -next_seqno_gen() -> - ?LET( - {Epoch, Offset}, - {non_neg_integer(), non_neg_integer()}, - Epoch bsl 16 + Offset - ). - -seqno_gen(NextSeqNo) -> - WindowSize = 1 bsl 16 - 1, - Min = max(0, NextSeqNo - WindowSize), - Max = max(0, NextSeqNo - 1), - range(Min, Max). - -range_size_test_() -> - [ - ?_assertEqual(0, range_size(42, 42)), - ?_assertEqual(1, range_size(42, 43)), - ?_assertEqual(1, range_size(16#ffff, 16#10001)), - ?_assertEqual(16#ffff - 456 + 123, range_size(16#1f0000 + 456, 16#200000 + 123)) - ]. - -compute_inflight_range_test_() -> - [ - ?_assertEqual( - {#{ack => 1, comp => 1}, 1}, - compute_inflight_range([]) - ), - ?_assertEqual( - {#{ack => 12, comp => 13}, 42}, - compute_inflight_range([ - #ds_pubrange{id = {<<>>, 1, 0}, until = 2, type = ?T_CHECKPOINT}, - #ds_pubrange{id = {<<>>, 4, 0}, until = 8, type = ?T_CHECKPOINT}, - #ds_pubrange{id = {<<>>, 11, 0}, until = 12, type = ?T_CHECKPOINT}, - #ds_pubrange{ - id = {<<>>, 12, 0}, - until = 13, - type = ?T_INFLIGHT, - tracks = ?TRACK_FLAG(?ACK) - }, - #ds_pubrange{ - id = {<<>>, 13, 0}, - until = 20, - type = ?T_INFLIGHT, - tracks = ?TRACK_FLAG(?COMP) - }, - #ds_pubrange{ - id = {<<>>, 20, 0}, - until = 42, - type = ?T_INFLIGHT, - tracks = ?TRACK_FLAG(?ACK) bor ?TRACK_FLAG(?COMP) - } - ]) - ), - ?_assertEqual( - {#{ack => 13, comp => 13}, 13}, - compute_inflight_range([ - #ds_pubrange{id = {<<>>, 1, 0}, until = 2, type = ?T_CHECKPOINT}, - #ds_pubrange{id = {<<>>, 4, 0}, until = 8, type = ?T_CHECKPOINT}, - #ds_pubrange{id = {<<>>, 11, 0}, until = 12, type = ?T_CHECKPOINT}, - #ds_pubrange{id = {<<>>, 12, 0}, until = 13, type = ?T_CHECKPOINT} - ]) - ) - ]. - --endif. diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index a8c62fe7a..7494aca95 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2021-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2021-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -19,13 +19,19 @@ -behaviour(emqx_session). -include("emqx.hrl"). --include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/trace.hrl"). -include_lib("stdlib/include/ms_transform.hrl"). -include("emqx_mqtt.hrl"). -include("emqx_persistent_session_ds.hrl"). +-ifdef(TEST). +-include_lib("proper/include/proper.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-endif. + %% Session API -export([ create/3, @@ -60,8 +66,13 @@ terminate/2 ]). +%% Managment APIs: +-export([ + list_client_subscriptions/1 +]). + %% session table operations --export([create_tables/0]). +-export([create_tables/0, sync/1]). %% internal export used by session GC process -export([destroy_session/1]). @@ -73,34 +84,40 @@ do_ensure_all_iterators_closed/1 ]). --export([print_session/1]). +-export([print_session/1, seqno_diff/4]). -ifdef(TEST). -export([ session_open/2, - list_all_sessions/0, - list_all_subscriptions/0, - list_all_streams/0, - list_all_pubranges/0 + list_all_sessions/0 ]). -endif. -export_type([ id/0, + seqno/0, + timestamp/0, + topic_filter/0, subscription_id/0, - session/0 + subscription/0, + session/0, + stream_state/0 ]). +-type seqno() :: non_neg_integer(). + %% Currently, this is the clientid. We avoid `emqx_types:clientid()' because that can be %% an atom, in theory (?). -type id() :: binary(). -type topic_filter() :: emqx_types:topic(). --type topic_filter_words() :: emqx_ds:topic_filter(). --type subscription_id() :: {id(), topic_filter()}. + +-type subscription_id() :: integer(). + -type subscription() :: #{ + id := subscription_id(), start_time := emqx_ds:time(), props := map(), - extra := map() + deleted := boolean() }. -define(TIMER_PULL, timer_pull). @@ -108,29 +125,26 @@ -define(TIMER_BUMP_LAST_ALIVE_AT, timer_bump_last_alive_at). -type timer() :: ?TIMER_PULL | ?TIMER_GET_STREAMS | ?TIMER_BUMP_LAST_ALIVE_AT. --type subscriptions() :: emqx_topic_gbt:t(nil(), subscription()). - -type session() :: #{ %% Client ID id := id(), - %% When the session was created - created_at := timestamp(), - %% When the client was last considered alive - last_alive_at := timestamp(), - %% Client’s Subscriptions. - subscriptions := subscriptions(), - %% Inflight messages - inflight := emqx_persistent_message_ds_replayer:inflight(), - %% Receive maximum - receive_maximum := pos_integer(), - %% Connection Info - conninfo := emqx_types:conninfo(), - %% Timers - timer() => reference(), - %% - props := map() + %% Configuration: + props := map(), + %% Persistent state: + s := emqx_persistent_session_ds_state:t(), + %% Buffer: + inflight := emqx_persistent_session_ds_inflight:t(), + %% Timers: + timer() => reference() }. +-record(req_sync, { + from :: pid(), + ref :: reference() +}). + +-type stream_state() :: #srs{}. + -type timestamp() :: emqx_utils_calendar:epoch_millisecond(). -type millisecond() :: non_neg_integer(). -type clientinfo() :: emqx_types:clientinfo(). @@ -142,22 +156,16 @@ subscriptions_max, inflight_cnt, inflight_max, - next_pkt_id + mqueue_len, + mqueue_dropped ]). --define(IS_EXPIRED(NOW_MS, LAST_ALIVE_AT, EI), - (is_number(LAST_ALIVE_AT) andalso - is_number(EI) andalso - (NOW_MS >= LAST_ALIVE_AT + EI)) -). - %% -spec create(clientinfo(), conninfo(), emqx_session:conf()) -> session(). create(#{clientid := ClientID}, ConnInfo, Conf) -> - Session = session_ensure_new(ClientID, ConnInfo), - apply_conf(ConnInfo, Conf, ensure_timers(Session)). + ensure_timers(session_ensure_new(ClientID, ConnInfo, Conf)). -spec open(clientinfo(), conninfo(), emqx_session:conf()) -> {_IsPresent :: true, session(), []} | false. @@ -171,18 +179,12 @@ open(#{clientid := ClientID} = _ClientInfo, ConnInfo, Conf) -> ok = emqx_cm:discard_session(ClientID), case session_open(ClientID, ConnInfo) of Session0 = #{} -> - Session = apply_conf(ConnInfo, Conf, Session0), + Session = Session0#{props => Conf}, {true, ensure_timers(Session), []}; false -> false end. -apply_conf(ConnInfo, Conf, Session) -> - Session#{ - receive_maximum => receive_maximum(ConnInfo), - props => Conf - }. - -spec destroy(session() | clientinfo()) -> ok. destroy(#{id := ClientID}) -> destroy_session(ClientID); @@ -190,7 +192,7 @@ destroy(#{clientid := ClientID}) -> destroy_session(ClientID). destroy_session(ClientID) -> - session_drop(ClientID). + session_drop(ClientID, destroy). %%-------------------------------------------------------------------- %% Info, Stats @@ -202,14 +204,14 @@ info(id, #{id := ClientID}) -> ClientID; info(clientid, #{id := ClientID}) -> ClientID; -info(created_at, #{created_at := CreatedAt}) -> - CreatedAt; +info(created_at, #{s := S}) -> + emqx_persistent_session_ds_state:get_created_at(S); info(is_persistent, #{}) -> true; -info(subscriptions, #{subscriptions := Subs}) -> - subs_to_map(Subs); -info(subscriptions_cnt, #{subscriptions := Subs}) -> - subs_size(Subs); +info(subscriptions, #{s := S}) -> + emqx_persistent_session_ds_subs:to_map(S); +info(subscriptions_cnt, #{s := S}) -> + emqx_topic_gbt:size(emqx_persistent_session_ds_state:get_subscriptions(S)); info(subscriptions_max, #{props := Conf}) -> maps:get(max_subscriptions, Conf); info(upgrade_qos, #{props := Conf}) -> @@ -217,54 +219,57 @@ info(upgrade_qos, #{props := Conf}) -> info(inflight, #{inflight := Inflight}) -> Inflight; info(inflight_cnt, #{inflight := Inflight}) -> - emqx_persistent_message_ds_replayer:n_inflight(Inflight); -info(inflight_max, #{receive_maximum := ReceiveMaximum}) -> - ReceiveMaximum; + emqx_persistent_session_ds_inflight:n_inflight(Inflight); +info(inflight_max, #{inflight := Inflight}) -> + emqx_persistent_session_ds_inflight:receive_maximum(Inflight); info(retry_interval, #{props := Conf}) -> maps:get(retry_interval, Conf); % info(mqueue, #sessmem{mqueue = MQueue}) -> % MQueue; -% info(mqueue_len, #sessmem{mqueue = MQueue}) -> -% emqx_mqueue:len(MQueue); +info(mqueue_len, #{inflight := Inflight}) -> + emqx_persistent_session_ds_inflight:n_buffered(all, Inflight); % info(mqueue_max, #sessmem{mqueue = MQueue}) -> % emqx_mqueue:max_len(MQueue); -% info(mqueue_dropped, #sessmem{mqueue = MQueue}) -> -% emqx_mqueue:dropped(MQueue); -info(next_pkt_id, #{inflight := Inflight}) -> - {PacketId, _} = emqx_persistent_message_ds_replayer:next_packet_id(Inflight), - PacketId; +info(mqueue_dropped, _Session) -> + 0; +%% info(next_pkt_id, #{s := S}) -> +%% {PacketId, _} = emqx_persistent_message_ds_replayer:next_packet_id(S), +%% PacketId; % info(awaiting_rel, #sessmem{awaiting_rel = AwaitingRel}) -> % AwaitingRel; -% info(awaiting_rel_cnt, #sessmem{awaiting_rel = AwaitingRel}) -> -% maps:size(AwaitingRel); +%% info(awaiting_rel_cnt, #{s := S}) -> +%% seqno_diff(?QOS_2, ?rec, ?committed(?QOS_2), S); info(awaiting_rel_max, #{props := Conf}) -> maps:get(max_awaiting_rel, Conf); -info(await_rel_timeout, #{props := Conf}) -> - maps:get(await_rel_timeout, Conf). +info(await_rel_timeout, #{props := _Conf}) -> + %% TODO: currently this setting is ignored: + %% maps:get(await_rel_timeout, Conf). + 0. -spec stats(session()) -> emqx_types:stats(). stats(Session) -> info(?STATS_KEYS, Session). -%% Debug/troubleshooting +%% Used by management API -spec print_session(emqx_types:clientid()) -> map() | undefined. print_session(ClientId) -> - catch ro_transaction( - fun() -> - case mnesia:read(?SESSION_TAB, ClientId) of - [Session] -> - #{ - session => Session, - streams => mnesia:read(?SESSION_STREAM_TAB, ClientId), - pubranges => session_read_pubranges(ClientId), - offsets => session_read_offsets(ClientId), - subscriptions => session_read_subscriptions(ClientId) - }; - [] -> - undefined - end - end - ). + case try_get_live_session(ClientId) of + {Pid, SessionState} -> + maps:update_with( + s, fun emqx_persistent_session_ds_state:format/1, SessionState#{ + '_alive' => {true, Pid} + } + ); + not_found -> + case emqx_persistent_session_ds_state:print_session(ClientId) of + undefined -> + undefined; + S -> + #{s => S, '_alive' => false} + end; + not_persistent -> + undefined + end. %%-------------------------------------------------------------------- %% Client -> Broker: SUBSCRIBE / UNSUBSCRIBE @@ -275,39 +280,89 @@ print_session(ClientId) -> subscribe( TopicFilter, SubOpts, - Session = #{id := ID, subscriptions := Subs} + Session = #{id := ID, s := S0} ) -> - case subs_lookup(TopicFilter, Subs) of - Subscription = #{} -> - NSubscription = update_subscription(TopicFilter, Subscription, SubOpts, ID), - NSubs = subs_insert(TopicFilter, NSubscription, Subs), - {ok, Session#{subscriptions := NSubs}}; + case emqx_persistent_session_ds_subs:lookup(TopicFilter, S0) of undefined -> - % TODO: max_subscriptions - Subscription = add_subscription(TopicFilter, SubOpts, ID), - NSubs = subs_insert(TopicFilter, Subscription, Subs), - {ok, Session#{subscriptions := NSubs}} - end. + %% TODO: max subscriptions + + %% N.B.: we chose to update the router before adding the + %% subscription to the session/iterator table. The + %% reasoning for this is as follows: + %% + %% Messages matching this topic filter should start to be + %% persisted as soon as possible to avoid missing + %% messages. If this is the first such persistent session + %% subscription, it's important to do so early on. + %% + %% This could, in turn, lead to some inconsistency: if + %% such a route gets created but the session/iterator data + %% fails to be updated accordingly, we have a dangling + %% route. To remove such dangling routes, we may have a + %% periodic GC process that removes routes that do not + %% have a matching persistent subscription. Also, route + %% operations use dirty mnesia operations, which + %% inherently have room for inconsistencies. + %% + %% In practice, we use the iterator reference table as a + %% source of truth, since it is guarded by a transaction + %% context: we consider a subscription operation to be + %% successful if it ended up changing this table. Both + %% router and iterator information can be reconstructed + %% from this table, if needed. + ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, ID), + {SubId, S1} = emqx_persistent_session_ds_state:new_id(S0), + Subscription = #{ + start_time => now_ms(), + props => SubOpts, + id => SubId, + deleted => false + }, + IsNew = true; + Subscription0 = #{} -> + Subscription = Subscription0#{props => SubOpts}, + IsNew = false, + S1 = S0 + end, + S = emqx_persistent_session_ds_subs:on_subscribe(TopicFilter, Subscription, S1), + ?tp(persistent_session_ds_subscription_added, #{ + topic_filter => TopicFilter, sub => Subscription, is_new => IsNew + }), + {ok, Session#{s => S}}. -spec unsubscribe(topic_filter(), session()) -> {ok, session(), emqx_types:subopts()} | {error, emqx_types:reason_code()}. unsubscribe( TopicFilter, - Session = #{id := ID, subscriptions := Subs} + Session = #{id := ID, s := S0} ) -> - case subs_lookup(TopicFilter, Subs) of - _Subscription = #{props := SubOpts} -> - ok = del_subscription(TopicFilter, ID), - NSubs = subs_delete(TopicFilter, Subs), - {ok, Session#{subscriptions := NSubs}, SubOpts}; + case emqx_persistent_session_ds_subs:lookup(TopicFilter, S0) of undefined -> - {error, ?RC_NO_SUBSCRIPTION_EXISTED} + {error, ?RC_NO_SUBSCRIPTION_EXISTED}; + Subscription = #{props := SubOpts} -> + S = do_unsubscribe(ID, TopicFilter, Subscription, S0), + {ok, Session#{s => S}, SubOpts} end. +-spec do_unsubscribe(id(), topic_filter(), subscription(), emqx_persistent_session_ds_state:t()) -> + emqx_persistent_session_ds_state:t(). +do_unsubscribe(SessionId, TopicFilter, Subscription = #{id := SubId}, S0) -> + S1 = emqx_persistent_session_ds_subs:on_unsubscribe(TopicFilter, Subscription, S0), + ?tp(persistent_session_ds_subscription_delete, #{ + session_id => SessionId, topic_filter => TopicFilter + }), + S = emqx_persistent_session_ds_stream_scheduler:on_unsubscribe(SubId, S1), + ?tp_span( + persistent_session_ds_subscription_route_delete, + #{session_id => SessionId, topic_filter => TopicFilter}, + ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, SessionId) + ), + S. + -spec get_subscription(topic_filter(), session()) -> emqx_types:subopts() | undefined. -get_subscription(TopicFilter, #{subscriptions := Subs}) -> - case subs_lookup(TopicFilter, Subs) of +get_subscription(TopicFilter, #{s := S}) -> + case emqx_persistent_session_ds_subs:lookup(TopicFilter, S) of _Subscription = #{props := SubOpts} -> SubOpts; undefined -> @@ -333,15 +388,12 @@ publish(_PacketId, Msg, Session) -> -spec puback(clientinfo(), emqx_types:packet_id(), session()) -> {ok, emqx_types:message(), replies(), session()} | {error, emqx_types:reason_code()}. -puback(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) -> - case emqx_persistent_message_ds_replayer:commit_offset(Id, ack, PacketId, Inflight0) of - {true, Inflight} -> - %% TODO: we pass a bogus message into the hook: - Msg = emqx_message:make(Id, <<>>, <<>>), - {ok, Msg, [], pull_now(Session#{inflight => Inflight})}; - {false, _} -> - %% Invalid Packet Id - {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} +puback(_ClientInfo, PacketId, Session0) -> + case update_seqno(puback, PacketId, Session0) of + {ok, Msg, Session} -> + {ok, Msg, [], pull_now(Session)}; + Error -> + Error end. %%-------------------------------------------------------------------- @@ -351,15 +403,12 @@ puback(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) -> -spec pubrec(emqx_types:packet_id(), session()) -> {ok, emqx_types:message(), session()} | {error, emqx_types:reason_code()}. -pubrec(PacketId, Session = #{id := Id, inflight := Inflight0}) -> - case emqx_persistent_message_ds_replayer:commit_offset(Id, rec, PacketId, Inflight0) of - {true, Inflight} -> - %% TODO: we pass a bogus message into the hook: - Msg = emqx_message:make(Id, <<>>, <<>>), - {ok, Msg, pull_now(Session#{inflight => Inflight})}; - {false, _} -> - %% Invalid Packet Id - {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} +pubrec(PacketId, Session0) -> + case update_seqno(pubrec, PacketId, Session0) of + {ok, Msg, Session} -> + {ok, Msg, Session}; + Error = {error, _} -> + Error end. %%-------------------------------------------------------------------- @@ -379,238 +428,187 @@ pubrel(_PacketId, Session = #{}) -> -spec pubcomp(clientinfo(), emqx_types:packet_id(), session()) -> {ok, emqx_types:message(), replies(), session()} | {error, emqx_types:reason_code()}. -pubcomp(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) -> - case emqx_persistent_message_ds_replayer:commit_offset(Id, comp, PacketId, Inflight0) of - {true, Inflight} -> - %% TODO - Msg = emqx_message:make(Id, <<>>, <<>>), - {ok, Msg, [], Session#{inflight => Inflight}}; - {false, _} -> - %% Invalid Packet Id - {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} +pubcomp(_ClientInfo, PacketId, Session0) -> + case update_seqno(pubcomp, PacketId, Session0) of + {ok, Msg, Session} -> + {ok, Msg, [], pull_now(Session)}; + Error = {error, _} -> + Error end. %%-------------------------------------------------------------------- -spec deliver(clientinfo(), [emqx_types:deliver()], session()) -> {ok, replies(), session()}. -deliver(_ClientInfo, _Delivers, Session) -> - %% TODO: system messages end up here. - {ok, [], Session}. +deliver(ClientInfo, Delivers, Session0) -> + %% Durable sessions still have to handle some transient messages. + %% For example, retainer sends messages to the session directly. + Session = lists:foldl( + fun(Msg, Acc) -> enqueue_transient(ClientInfo, Msg, Acc) end, Session0, Delivers + ), + {ok, [], pull_now(Session)}. -spec handle_timeout(clientinfo(), _Timeout, session()) -> {ok, replies(), session()} | {ok, replies(), timeout(), session()}. handle_timeout( ClientInfo, ?TIMER_PULL, - Session0 = #{ - id := Id, - inflight := Inflight0, - subscriptions := Subs, - props := Conf, - receive_maximum := ReceiveMaximum - } + Session0 ) -> - MaxBatchSize = emqx_config:get([session_persistence, max_batch_size]), - BatchSize = min(ReceiveMaximum, MaxBatchSize), - UpgradeQoS = maps:get(upgrade_qos, Conf), - PreprocFun = make_preproc_fun(ClientInfo, Subs, UpgradeQoS), - {Publishes, Inflight} = emqx_persistent_message_ds_replayer:poll( - PreprocFun, - Id, - Inflight0, - BatchSize - ), - IdlePollInterval = emqx_config:get([session_persistence, idle_poll_interval]), + {Publishes, Session1} = drain_buffer(fetch_new_messages(Session0, ClientInfo)), Timeout = case Publishes of [] -> - IdlePollInterval; + emqx_config:get([session_persistence, idle_poll_interval]); [_ | _] -> 0 end, - Session = emqx_session:ensure_timer(?TIMER_PULL, Timeout, Session0#{inflight := Inflight}), + Session = emqx_session:ensure_timer(?TIMER_PULL, Timeout, Session1), {ok, Publishes, Session}; -handle_timeout(_ClientInfo, ?TIMER_GET_STREAMS, Session) -> - renew_streams(Session), +handle_timeout(_ClientInfo, ?TIMER_GET_STREAMS, Session0 = #{s := S0}) -> + S1 = emqx_persistent_session_ds_subs:gc(S0), + S = emqx_persistent_session_ds_stream_scheduler:renew_streams(S1), Interval = emqx_config:get([session_persistence, renew_streams_interval]), - {ok, [], emqx_session:ensure_timer(?TIMER_GET_STREAMS, Interval, Session)}; -handle_timeout(_ClientInfo, ?TIMER_BUMP_LAST_ALIVE_AT, Session0) -> + Session = emqx_session:ensure_timer( + ?TIMER_GET_STREAMS, + Interval, + Session0#{s => S} + ), + {ok, [], Session}; +handle_timeout(_ClientInfo, ?TIMER_BUMP_LAST_ALIVE_AT, Session0 = #{s := S0}) -> + S = emqx_persistent_session_ds_state:commit(bump_last_alive(S0)), + Session = emqx_session:ensure_timer( + ?TIMER_BUMP_LAST_ALIVE_AT, + bump_interval(), + Session0#{s => S} + ), + {ok, [], Session}; +handle_timeout(_ClientInfo, #req_sync{from = From, ref = Ref}, Session = #{s := S0}) -> + S = emqx_persistent_session_ds_state:commit(S0), + From ! Ref, + {ok, [], Session#{s => S}}; +handle_timeout(_ClientInfo, Timeout, Session) -> + ?SLOG(warning, #{msg => "unknown_ds_timeout", timeout => Timeout}), + {ok, [], Session}. + +bump_last_alive(S0) -> %% Note: we take a pessimistic approach here and assume that the client will be alive %% until the next bump timeout. With this, we avoid garbage collecting this session %% too early in case the session/connection/node crashes earlier without having time %% to commit the time. - BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]), - EstimatedLastAliveAt = now_ms() + BumpInterval, - Session = session_set_last_alive_at_trans(Session0, EstimatedLastAliveAt), - {ok, [], emqx_session:ensure_timer(?TIMER_BUMP_LAST_ALIVE_AT, BumpInterval, Session)}; -handle_timeout(_ClientInfo, expire_awaiting_rel, Session) -> - %% TODO: stub - {ok, [], Session}. + EstimatedLastAliveAt = now_ms() + bump_interval(), + emqx_persistent_session_ds_state:set_last_alive_at(EstimatedLastAliveAt, S0). -spec replay(clientinfo(), [], session()) -> {ok, replies(), session()}. -replay( - ClientInfo, - [], - Session = #{inflight := Inflight0, subscriptions := Subs, props := Conf} -) -> - UpgradeQoS = maps:get(upgrade_qos, Conf), - PreprocFun = make_preproc_fun(ClientInfo, Subs, UpgradeQoS), - {Replies, Inflight} = emqx_persistent_message_ds_replayer:replay(PreprocFun, Inflight0), - {ok, Replies, Session#{inflight := Inflight}}. +replay(ClientInfo, [], Session0 = #{s := S0}) -> + Streams = emqx_persistent_session_ds_stream_scheduler:find_replay_streams(S0), + Session = lists:foldl( + fun({_StreamKey, Stream}, SessionAcc) -> + replay_batch(Stream, SessionAcc, ClientInfo) + end, + Session0, + Streams + ), + %% Note: we filled the buffer with the historical messages, and + %% from now on we'll rely on the normal inflight/flow control + %% mechanisms to replay them: + {ok, [], pull_now(Session)}. + +-spec replay_batch(stream_state(), session(), clientinfo()) -> session(). +replay_batch(Srs0, Session, ClientInfo) -> + #srs{batch_size = BatchSize} = Srs0, + %% TODO: retry on errors: + {Srs, Inflight} = enqueue_batch(true, BatchSize, Srs0, Session, ClientInfo), + %% Assert: + Srs =:= Srs0 orelse + ?tp(warning, emqx_persistent_session_ds_replay_inconsistency, #{ + expected => Srs0, + got => Srs + }), + Session#{inflight => Inflight}. %%-------------------------------------------------------------------- -spec disconnect(session(), emqx_types:conninfo()) -> {shutdown, session()}. -disconnect(Session0, ConnInfo) -> - Session = session_set_last_alive_at_trans(Session0, ConnInfo, now_ms()), - {shutdown, Session}. +disconnect(Session = #{s := S0}, ConnInfo) -> + S1 = emqx_persistent_session_ds_state:set_last_alive_at(now_ms(), S0), + S2 = + case ConnInfo of + #{expiry_interval := EI} when is_number(EI) -> + emqx_persistent_session_ds_state:set_expiry_interval(EI, S1); + _ -> + S1 + end, + S = emqx_persistent_session_ds_state:commit(S2), + {shutdown, Session#{s => S}}. -spec terminate(Reason :: term(), session()) -> ok. -terminate(_Reason, _Session = #{}) -> +terminate(_Reason, _Session = #{id := Id, s := S}) -> + _ = emqx_persistent_session_ds_state:commit(S), + ?tp(debug, persistent_session_ds_terminate, #{id => Id}), ok. %%-------------------------------------------------------------------- +%% Management APIs (dashboard) +%%-------------------------------------------------------------------- -make_preproc_fun(ClientInfo, Subs, UpgradeQoS) -> - fun(Message = #message{topic = Topic}) -> - emqx_utils:flattermap( - fun(Match) -> - #{props := SubOpts} = subs_get_match(Match, Subs), - emqx_session:enrich_message(ClientInfo, Message, SubOpts, UpgradeQoS) - end, - subs_matches(Topic, Subs) - ) +-spec list_client_subscriptions(emqx_types:clientid()) -> + {node() | undefined, [{emqx_types:topic() | emqx_types:share(), emqx_types:subopts()}]} + | {error, not_found}. +list_client_subscriptions(ClientId) -> + case emqx_persistent_message:is_persistence_enabled() of + true -> + %% TODO: this is not the most optimal implementation, since it + %% should be possible to avoid reading extra data (streams, etc.) + case print_session(ClientId) of + Sess = #{s := #{subscriptions := Subs}} -> + Node = + case Sess of + #{'_alive' := {true, Pid}} -> + node(Pid); + _ -> + undefined + end, + SubList = + maps:fold( + fun(Topic, #{props := SubProps}, Acc) -> + Elem = {Topic, SubProps}, + [Elem | Acc] + end, + [], + Subs + ), + {Node, SubList}; + undefined -> + {error, not_found} + end; + false -> + {error, not_found} end. -%%-------------------------------------------------------------------- - --spec add_subscription(topic_filter(), emqx_types:subopts(), id()) -> - subscription(). -add_subscription(TopicFilter, SubOpts, DSSessionID) -> - %% N.B.: we chose to update the router before adding the subscription to the - %% session/iterator table. The reasoning for this is as follows: - %% - %% Messages matching this topic filter should start to be persisted as soon as - %% possible to avoid missing messages. If this is the first such persistent - %% session subscription, it's important to do so early on. - %% - %% This could, in turn, lead to some inconsistency: if such a route gets - %% created but the session/iterator data fails to be updated accordingly, we - %% have a dangling route. To remove such dangling routes, we may have a - %% periodic GC process that removes routes that do not have a matching - %% persistent subscription. Also, route operations use dirty mnesia - %% operations, which inherently have room for inconsistencies. - %% - %% In practice, we use the iterator reference table as a source of truth, - %% since it is guarded by a transaction context: we consider a subscription - %% operation to be successful if it ended up changing this table. Both router - %% and iterator information can be reconstructed from this table, if needed. - ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, DSSessionID), - {ok, DSSubExt, IsNew} = session_add_subscription( - DSSessionID, TopicFilter, SubOpts - ), - ?tp(persistent_session_ds_subscription_added, #{sub => DSSubExt, is_new => IsNew}), - %% we'll list streams and open iterators when implementing message replay. - DSSubExt. - --spec update_subscription(topic_filter(), subscription(), emqx_types:subopts(), id()) -> - subscription(). -update_subscription(TopicFilter, DSSubExt, SubOpts, DSSessionID) -> - {ok, NDSSubExt, false} = session_add_subscription( - DSSessionID, TopicFilter, SubOpts - ), - ok = ?tp(persistent_session_ds_iterator_updated, #{sub => DSSubExt}), - NDSSubExt. - --spec del_subscription(topic_filter(), id()) -> - ok. -del_subscription(TopicFilter, DSSessionId) -> - %% TODO: transaction? - ?tp_span( - persistent_session_ds_subscription_delete, - #{session_id => DSSessionId}, - ok = session_del_subscription(DSSessionId, TopicFilter) - ), - ?tp_span( - persistent_session_ds_subscription_route_delete, - #{session_id => DSSessionId}, - ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, DSSessionId) - ). - %%-------------------------------------------------------------------- %% Session tables operations %%-------------------------------------------------------------------- create_tables() -> - ok = mria:create_table( - ?SESSION_TAB, - [ - {rlog_shard, ?DS_MRIA_SHARD}, - {type, set}, - {storage, storage()}, - {record_name, session}, - {attributes, record_info(fields, session)} - ] - ), - ok = mria:create_table( - ?SESSION_SUBSCRIPTIONS_TAB, - [ - {rlog_shard, ?DS_MRIA_SHARD}, - {type, ordered_set}, - {storage, storage()}, - {record_name, ds_sub}, - {attributes, record_info(fields, ds_sub)} - ] - ), - ok = mria:create_table( - ?SESSION_STREAM_TAB, - [ - {rlog_shard, ?DS_MRIA_SHARD}, - {type, bag}, - {storage, storage()}, - {record_name, ds_stream}, - {attributes, record_info(fields, ds_stream)} - ] - ), - ok = mria:create_table( - ?SESSION_PUBRANGE_TAB, - [ - {rlog_shard, ?DS_MRIA_SHARD}, - {type, ordered_set}, - {storage, storage()}, - {record_name, ds_pubrange}, - {attributes, record_info(fields, ds_pubrange)} - ] - ), - ok = mria:create_table( - ?SESSION_COMMITTED_OFFSET_TAB, - [ - {rlog_shard, ?DS_MRIA_SHARD}, - {type, set}, - {storage, storage()}, - {record_name, ds_committed_offset}, - {attributes, record_info(fields, ds_committed_offset)} - ] - ), - ok = mria:wait_for_tables([ - ?SESSION_TAB, - ?SESSION_SUBSCRIPTIONS_TAB, - ?SESSION_STREAM_TAB, - ?SESSION_PUBRANGE_TAB, - ?SESSION_COMMITTED_OFFSET_TAB - ]), - ok. + emqx_persistent_session_ds_state:create_tables(). --dialyzer({nowarn_function, storage/0}). -storage() -> - %% FIXME: This is a temporary workaround to avoid crashes when starting on Windows - case mria:rocksdb_backend_available() of - true -> - rocksdb_copies; - _ -> - disc_copies +%% @doc Force syncing of the transient state to persistent storage +sync(ClientId) -> + case emqx_cm:lookup_channels(ClientId) of + [Pid] -> + Ref = monitor(process, Pid), + Pid ! {emqx_session, #req_sync{from = self(), ref = Ref}}, + receive + {'DOWN', Ref, process, _Pid, Reason} -> + {error, Reason}; + Ref -> + demonitor(Ref, [flush]), + ok + end; + [] -> + {error, noproc} end. %% @doc Called when a client connects. This function looks up a @@ -622,204 +620,84 @@ storage() -> session() | false. session_open(SessionId, NewConnInfo) -> NowMS = now_ms(), - transaction(fun() -> - case mnesia:read(?SESSION_TAB, SessionId, write) of - [Record0 = #session{last_alive_at = LastAliveAt, conninfo = ConnInfo}] -> - EI = expiry_interval(ConnInfo), - case ?IS_EXPIRED(NowMS, LastAliveAt, EI) of - true -> - session_drop(SessionId), - false; - false -> - %% new connection being established - Record1 = Record0#session{conninfo = NewConnInfo}, - Record = session_set_last_alive_at(Record1, NowMS), - Session = export_session(Record), - DSSubs = session_read_subscriptions(SessionId), - Subscriptions = export_subscriptions(DSSubs), - Inflight = emqx_persistent_message_ds_replayer:open(SessionId), - Session#{ - conninfo => NewConnInfo, - inflight => Inflight, - subscriptions => Subscriptions - } - end; - _ -> - false - end - end). + case emqx_persistent_session_ds_state:open(SessionId) of + {ok, S0} -> + EI = emqx_persistent_session_ds_state:get_expiry_interval(S0), + LastAliveAt = emqx_persistent_session_ds_state:get_last_alive_at(S0), + case NowMS >= LastAliveAt + EI of + true -> + session_drop(SessionId, expired), + false; + false -> + ?tp(open_session, #{ei => EI, now => NowMS, laa => LastAliveAt}), + %% New connection being established + S1 = emqx_persistent_session_ds_state:set_expiry_interval(EI, S0), + S2 = emqx_persistent_session_ds_state:set_last_alive_at(NowMS, S1), + S = emqx_persistent_session_ds_state:commit(S2), + Inflight = emqx_persistent_session_ds_inflight:new( + receive_maximum(NewConnInfo) + ), + #{ + id => SessionId, + s => S, + inflight => Inflight, + props => #{} + } + end; + undefined -> + false + end. --spec session_ensure_new(id(), emqx_types:conninfo()) -> +-spec session_ensure_new(id(), emqx_types:conninfo(), emqx_session:conf()) -> session(). -session_ensure_new(SessionId, ConnInfo) -> - transaction(fun() -> - ok = session_drop_records(SessionId), - Session = export_session(session_create(SessionId, ConnInfo)), - Session#{ - subscriptions => subs_new(), - inflight => emqx_persistent_message_ds_replayer:new() - } - end). - -session_create(SessionId, ConnInfo) -> - Session = #session{ - id = SessionId, - created_at = now_ms(), - last_alive_at = now_ms(), - conninfo = ConnInfo - }, - ok = mnesia:write(?SESSION_TAB, Session, write), - Session. - -session_set_last_alive_at_trans(Session, LastAliveAt) -> - #{conninfo := ConnInfo} = Session, - session_set_last_alive_at_trans(Session, ConnInfo, LastAliveAt). - -session_set_last_alive_at_trans(Session, NewConnInfo, LastAliveAt) -> - #{id := SessionId} = Session, - transaction(fun() -> - case mnesia:read(?SESSION_TAB, SessionId, write) of - [#session{} = SessionRecord0] -> - SessionRecord = SessionRecord0#session{conninfo = NewConnInfo}, - _ = session_set_last_alive_at(SessionRecord, LastAliveAt), - ok; - _ -> - %% log and crash? - ok - end - end), - Session#{conninfo := NewConnInfo, last_alive_at := LastAliveAt}. - -session_set_last_alive_at(SessionRecord0, LastAliveAt) -> - SessionRecord = SessionRecord0#session{last_alive_at = LastAliveAt}, - ok = mnesia:write(?SESSION_TAB, SessionRecord, write), - SessionRecord. +session_ensure_new(Id, ConnInfo, Conf) -> + ?tp(debug, persistent_session_ds_ensure_new, #{id => Id}), + Now = now_ms(), + S0 = emqx_persistent_session_ds_state:create_new(Id), + S1 = emqx_persistent_session_ds_state:set_expiry_interval(expiry_interval(ConnInfo), S0), + S2 = bump_last_alive(S1), + S3 = emqx_persistent_session_ds_state:set_created_at(Now, S2), + S4 = lists:foldl( + fun(Track, Acc) -> + emqx_persistent_session_ds_state:put_seqno(Track, 0, Acc) + end, + S3, + [ + ?next(?QOS_1), + ?dup(?QOS_1), + ?committed(?QOS_1), + ?next(?QOS_2), + ?dup(?QOS_2), + ?rec, + ?committed(?QOS_2) + ] + ), + S = emqx_persistent_session_ds_state:commit(S4), + #{ + id => Id, + props => Conf, + s => S, + inflight => emqx_persistent_session_ds_inflight:new(receive_maximum(ConnInfo)) + }. %% @doc Called when a client reconnects with `clean session=true' or %% during session GC --spec session_drop(id()) -> ok. -session_drop(DSSessionId) -> - transaction(fun() -> - ok = session_drop_records(DSSessionId), - ok = mnesia:delete(?SESSION_TAB, DSSessionId, write) - end). - --spec session_drop_records(id()) -> ok. -session_drop_records(DSSessionId) -> - ok = session_drop_subscriptions(DSSessionId), - ok = session_drop_pubranges(DSSessionId), - ok = session_drop_offsets(DSSessionId), - ok = session_drop_streams(DSSessionId). - --spec session_drop_subscriptions(id()) -> ok. -session_drop_subscriptions(DSSessionId) -> - Subscriptions = session_read_subscriptions(DSSessionId, write), - lists:foreach( - fun(#ds_sub{id = DSSubId} = DSSub) -> - TopicFilter = subscription_id_to_topic_filter(DSSubId), - ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, DSSessionId), - ok = session_del_subscription(DSSub) - end, - Subscriptions - ). - -%% @doc Called when a client subscribes to a topic. Idempotent. --spec session_add_subscription(id(), topic_filter(), _Props :: map()) -> - {ok, subscription(), _IsNew :: boolean()}. -session_add_subscription(DSSessionId, TopicFilter, Props) -> - DSSubId = {DSSessionId, TopicFilter}, - transaction(fun() -> - case mnesia:read(?SESSION_SUBSCRIPTIONS_TAB, DSSubId, write) of - [] -> - DSSub = session_insert_subscription(DSSessionId, TopicFilter, Props), - DSSubExt = export_subscription(DSSub), - ?tp( - ds_session_subscription_added, - #{sub => DSSubExt, session_id => DSSessionId} - ), - {ok, DSSubExt, _IsNew = true}; - [#ds_sub{} = DSSub] -> - NDSSub = session_update_subscription(DSSub, Props), - NDSSubExt = export_subscription(NDSSub), - ?tp( - ds_session_subscription_present, - #{sub => NDSSubExt, session_id => DSSessionId} - ), - {ok, NDSSubExt, _IsNew = false} - end - end). - --spec session_insert_subscription(id(), topic_filter(), map()) -> ds_sub(). -session_insert_subscription(DSSessionId, TopicFilter, Props) -> - {DSSubId, StartMS} = new_subscription_id(DSSessionId, TopicFilter), - DSSub = #ds_sub{ - id = DSSubId, - start_time = StartMS, - props = Props, - extra = #{} - }, - ok = mnesia:write(?SESSION_SUBSCRIPTIONS_TAB, DSSub, write), - DSSub. - --spec session_update_subscription(ds_sub(), map()) -> ds_sub(). -session_update_subscription(DSSub, Props) -> - NDSSub = DSSub#ds_sub{props = Props}, - ok = mnesia:write(?SESSION_SUBSCRIPTIONS_TAB, NDSSub, write), - NDSSub. - -session_del_subscription(DSSessionId, TopicFilter) -> - DSSubId = {DSSessionId, TopicFilter}, - transaction(fun() -> - mnesia:delete(?SESSION_SUBSCRIPTIONS_TAB, DSSubId, write) - end). - -session_del_subscription(#ds_sub{id = DSSubId}) -> - mnesia:delete(?SESSION_SUBSCRIPTIONS_TAB, DSSubId, write). - -session_read_subscriptions(DSSessionID) -> - session_read_subscriptions(DSSessionID, read). - -session_read_subscriptions(DSSessionId, LockKind) -> - MS = ets:fun2ms( - fun(Sub = #ds_sub{id = {Sess, _}}) when Sess =:= DSSessionId -> - Sub - end - ), - mnesia:select(?SESSION_SUBSCRIPTIONS_TAB, MS, LockKind). - -session_read_pubranges(DSSessionID) -> - session_read_pubranges(DSSessionID, read). - -session_read_pubranges(DSSessionId, LockKind) -> - MS = ets:fun2ms( - fun(#ds_pubrange{id = ID}) when element(1, ID) =:= DSSessionId -> - ID - end - ), - mnesia:select(?SESSION_PUBRANGE_TAB, MS, LockKind). - -session_read_offsets(DSSessionID) -> - session_read_offsets(DSSessionID, read). - -session_read_offsets(DSSessionId, LockKind) -> - MS = ets:fun2ms( - fun(#ds_committed_offset{id = {Sess, Type}}) when Sess =:= DSSessionId -> - {DSSessionId, Type} - end - ), - mnesia:select(?SESSION_COMMITTED_OFFSET_TAB, MS, LockKind). - --spec new_subscription_id(id(), topic_filter()) -> {subscription_id(), integer()}. -new_subscription_id(DSSessionId, TopicFilter) -> - %% Note: here we use _milliseconds_ to match with the timestamp - %% field of `#message' record. - NowMS = now_ms(), - DSSubId = {DSSessionId, TopicFilter}, - {DSSubId, NowMS}. - --spec subscription_id_to_topic_filter(subscription_id()) -> topic_filter(). -subscription_id_to_topic_filter({_DSSessionId, TopicFilter}) -> - TopicFilter. +-spec session_drop(id(), _Reason) -> ok. +session_drop(ID, Reason) -> + case emqx_persistent_session_ds_state:open(ID) of + {ok, S0} -> + ?tp(debug, drop_persistent_session, #{client_id => ID, reason => Reason}), + _S = emqx_persistent_session_ds_subs:fold( + fun(TopicFilter, Subscription, S) -> + do_unsubscribe(ID, TopicFilter, Subscription, S) + end, + S0, + S0 + ), + emqx_persistent_session_ds_state:delete(ID); + undefined -> + ok + end. now_ms() -> erlang:system_time(millisecond). @@ -845,166 +723,229 @@ do_ensure_all_iterators_closed(_DSSessionID) -> ok. %%-------------------------------------------------------------------- -%% Reading batches +%% Normal replay: %%-------------------------------------------------------------------- --spec renew_streams(session()) -> ok. -renew_streams(#{id := SessionId, subscriptions := Subscriptions}) -> - transaction(fun() -> - ExistingStreams = mnesia:read(?SESSION_STREAM_TAB, SessionId, write), - subs_fold( - fun(TopicFilter, #{start_time := StartTime}, Streams) -> - TopicFilterWords = emqx_topic:words(TopicFilter), - renew_topic_streams(SessionId, TopicFilterWords, StartTime, Streams) - end, - ExistingStreams, - Subscriptions - ) - end), - ok. +fetch_new_messages(Session = #{s := S}, ClientInfo) -> + Streams = emqx_persistent_session_ds_stream_scheduler:find_new_streams(S), + fetch_new_messages(Streams, Session, ClientInfo). --spec renew_topic_streams(id(), topic_filter_words(), emqx_ds:time(), _Acc :: [ds_stream()]) -> ok. -renew_topic_streams(DSSessionId, TopicFilter, StartTime, ExistingStreams) -> - TopicStreams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime), - lists:foldl( - fun({Rank, Stream}, Streams) -> - case lists:keymember(Stream, #ds_stream.stream, Streams) of - true -> - Streams; - false -> - StreamRef = length(Streams) + 1, - DSStream = session_store_stream( - DSSessionId, - StreamRef, - Stream, - Rank, - TopicFilter, - StartTime - ), - [DSStream | Streams] - end - end, - ExistingStreams, - TopicStreams - ). - -session_store_stream(DSSessionId, StreamRef, Stream, Rank, TopicFilter, StartTime) -> - {ok, ItBegin} = emqx_ds:make_iterator( - ?PERSISTENT_MESSAGE_DB, - Stream, - TopicFilter, - StartTime - ), - DSStream = #ds_stream{ - session = DSSessionId, - ref = StreamRef, - stream = Stream, - rank = Rank, - beginning = ItBegin - }, - mnesia:write(?SESSION_STREAM_TAB, DSStream, write), - DSStream. - -%% must be called inside a transaction --spec session_drop_streams(id()) -> ok. -session_drop_streams(DSSessionId) -> - mnesia:delete(?SESSION_STREAM_TAB, DSSessionId, write). - -%% must be called inside a transaction --spec session_drop_pubranges(id()) -> ok. -session_drop_pubranges(DSSessionId) -> - RangeIds = session_read_pubranges(DSSessionId, write), - lists:foreach( - fun(RangeId) -> - mnesia:delete(?SESSION_PUBRANGE_TAB, RangeId, write) - end, - RangeIds - ). - -%% must be called inside a transaction --spec session_drop_offsets(id()) -> ok. -session_drop_offsets(DSSessionId) -> - OffsetIds = session_read_offsets(DSSessionId, write), - lists:foreach( - fun(OffsetId) -> - mnesia:delete(?SESSION_COMMITTED_OFFSET_TAB, OffsetId, write) - end, - OffsetIds - ). - -%%-------------------------------------------------------------------------------- - -subs_new() -> - emqx_topic_gbt:new(). - -subs_lookup(TopicFilter, Subs) -> - emqx_topic_gbt:lookup(TopicFilter, [], Subs, undefined). - -subs_insert(TopicFilter, Subscription, Subs) -> - emqx_topic_gbt:insert(TopicFilter, [], Subscription, Subs). - -subs_delete(TopicFilter, Subs) -> - emqx_topic_gbt:delete(TopicFilter, [], Subs). - -subs_matches(Topic, Subs) -> - emqx_topic_gbt:matches(Topic, Subs, []). - -subs_get_match(M, Subs) -> - emqx_topic_gbt:get_record(M, Subs). - -subs_size(Subs) -> - emqx_topic_gbt:size(Subs). - -subs_to_map(Subs) -> - subs_fold( - fun(TopicFilter, #{props := Props}, Acc) -> Acc#{TopicFilter => Props} end, - #{}, - Subs - ). - -subs_fold(Fun, AccIn, Subs) -> - emqx_topic_gbt:fold( - fun(Key, Sub, Acc) -> Fun(emqx_topic_gbt:get_topic(Key), Sub, Acc) end, - AccIn, - Subs - ). - -%%-------------------------------------------------------------------------------- - -transaction(Fun) -> - case mnesia:is_transaction() of +fetch_new_messages([], Session, _ClientInfo) -> + Session; +fetch_new_messages([I | Streams], Session0 = #{inflight := Inflight}, ClientInfo) -> + BatchSize = emqx_config:get([session_persistence, max_batch_size]), + case emqx_persistent_session_ds_inflight:n_buffered(all, Inflight) >= BatchSize of true -> - Fun(); + %% Buffer is full: + Session0; false -> - {atomic, Res} = mria:transaction(?DS_MRIA_SHARD, Fun), - Res + Session = new_batch(I, BatchSize, Session0, ClientInfo), + fetch_new_messages(Streams, Session, ClientInfo) end. -ro_transaction(Fun) -> - {atomic, Res} = mria:ro_transaction(?DS_MRIA_SHARD, Fun), - Res. +new_batch({StreamKey, Srs0}, BatchSize, Session = #{s := S0}, ClientInfo) -> + SN1 = emqx_persistent_session_ds_state:get_seqno(?next(?QOS_1), S0), + SN2 = emqx_persistent_session_ds_state:get_seqno(?next(?QOS_2), S0), + Srs1 = Srs0#srs{ + first_seqno_qos1 = SN1, + first_seqno_qos2 = SN2, + batch_size = 0, + last_seqno_qos1 = SN1, + last_seqno_qos2 = SN2 + }, + {Srs, Inflight} = enqueue_batch(false, BatchSize, Srs1, Session, ClientInfo), + S1 = emqx_persistent_session_ds_state:put_seqno(?next(?QOS_1), Srs#srs.last_seqno_qos1, S0), + S2 = emqx_persistent_session_ds_state:put_seqno(?next(?QOS_2), Srs#srs.last_seqno_qos2, S1), + S = emqx_persistent_session_ds_state:put_stream(StreamKey, Srs, S2), + Session#{s => S, inflight => Inflight}. -%%-------------------------------------------------------------------------------- - -export_subscriptions(DSSubs) -> - lists:foldl( - fun(DSSub = #ds_sub{id = {_DSSessionId, TopicFilter}}, Acc) -> - subs_insert(TopicFilter, export_subscription(DSSub), Acc) +enqueue_batch(IsReplay, BatchSize, Srs0, Session = #{inflight := Inflight0}, ClientInfo) -> + #srs{ + it_begin = ItBegin0, + it_end = ItEnd0, + first_seqno_qos1 = FirstSeqnoQos1, + first_seqno_qos2 = FirstSeqnoQos2 + } = Srs0, + ItBegin = + case IsReplay of + true -> ItBegin0; + false -> ItEnd0 end, - subs_new(), - DSSubs + case emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, BatchSize) of + {ok, ItEnd, Messages} -> + {Inflight, LastSeqnoQos1, LastSeqnoQos2} = process_batch( + IsReplay, Session, ClientInfo, FirstSeqnoQos1, FirstSeqnoQos2, Messages, Inflight0 + ), + Srs = Srs0#srs{ + it_begin = ItBegin, + it_end = ItEnd, + %% TODO: it should be possible to avoid calling + %% length here by diffing size of inflight before + %% and after inserting messages: + batch_size = length(Messages), + last_seqno_qos1 = LastSeqnoQos1, + last_seqno_qos2 = LastSeqnoQos2 + }, + {Srs, Inflight}; + {ok, end_of_stream} -> + %% No new messages; just update the end iterator: + {Srs0#srs{it_begin = ItBegin, it_end = end_of_stream, batch_size = 0}, Inflight0}; + {error, _} when not IsReplay -> + ?SLOG(info, #{msg => "failed_to_fetch_batch", iterator => ItBegin}), + {Srs0, Inflight0} + end. + +%% key_of_iter(#{3 := #{3 := #{5 := K}}}) -> +%% K. + +process_batch(_IsReplay, _Session, _ClientInfo, LastSeqNoQos1, LastSeqNoQos2, [], Inflight) -> + {Inflight, LastSeqNoQos1, LastSeqNoQos2}; +process_batch( + IsReplay, Session, ClientInfo, FirstSeqNoQos1, FirstSeqNoQos2, [KV | Messages], Inflight0 +) -> + #{s := S, props := #{upgrade_qos := UpgradeQoS}} = Session, + {_DsMsgKey, Msg0 = #message{topic = Topic}} = KV, + Comm1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S), + Comm2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S), + Dup1 = emqx_persistent_session_ds_state:get_seqno(?dup(?QOS_1), S), + Dup2 = emqx_persistent_session_ds_state:get_seqno(?dup(?QOS_2), S), + Rec = emqx_persistent_session_ds_state:get_seqno(?rec, S), + Subs = emqx_persistent_session_ds_state:get_subscriptions(S), + Msgs = [ + Msg + || SubMatch <- emqx_topic_gbt:matches(Topic, Subs, []), + Msg <- begin + #{props := SubOpts} = emqx_topic_gbt:get_record(SubMatch, Subs), + emqx_session:enrich_message(ClientInfo, Msg0, SubOpts, UpgradeQoS) + end + ], + {Inflight, LastSeqNoQos1, LastSeqNoQos2} = lists:foldl( + fun(Msg = #message{qos = Qos}, {Acc, SeqNoQos10, SeqNoQos20}) -> + case Qos of + ?QOS_0 -> + SeqNoQos1 = SeqNoQos10, + SeqNoQos2 = SeqNoQos20; + ?QOS_1 -> + SeqNoQos1 = inc_seqno(?QOS_1, SeqNoQos10), + SeqNoQos2 = SeqNoQos20; + ?QOS_2 -> + SeqNoQos1 = SeqNoQos10, + SeqNoQos2 = inc_seqno(?QOS_2, SeqNoQos20) + end, + { + case Qos of + ?QOS_0 when IsReplay -> + %% We ignore QoS 0 messages during replay: + Acc; + ?QOS_0 -> + emqx_persistent_session_ds_inflight:push({undefined, Msg}, Acc); + ?QOS_1 when SeqNoQos1 =< Comm1 -> + %% QoS1 message has been acked by the client, ignore: + Acc; + ?QOS_1 when SeqNoQos1 =< Dup1 -> + %% QoS1 message has been sent but not + %% acked. Retransmit: + Msg1 = emqx_message:set_flag(dup, true, Msg), + emqx_persistent_session_ds_inflight:push({SeqNoQos1, Msg1}, Acc); + ?QOS_1 -> + emqx_persistent_session_ds_inflight:push({SeqNoQos1, Msg}, Acc); + ?QOS_2 when SeqNoQos2 =< Comm2 -> + %% QoS2 message has been PUBCOMP'ed by the client, ignore: + Acc; + ?QOS_2 when SeqNoQos2 =< Rec -> + %% QoS2 message has been PUBREC'ed by the client, resend PUBREL: + emqx_persistent_session_ds_inflight:push({pubrel, SeqNoQos2}, Acc); + ?QOS_2 when SeqNoQos2 =< Dup2 -> + %% QoS2 message has been sent, but we haven't received PUBREC. + %% + %% TODO: According to the MQTT standard 4.3.3: + %% DUP flag is never set for QoS2 messages? We + %% do so for mem sessions, though. + Msg1 = emqx_message:set_flag(dup, true, Msg), + emqx_persistent_session_ds_inflight:push({SeqNoQos2, Msg1}, Acc); + ?QOS_2 -> + emqx_persistent_session_ds_inflight:push({SeqNoQos2, Msg}, Acc) + end, + SeqNoQos1, + SeqNoQos2 + } + end, + {Inflight0, FirstSeqNoQos1, FirstSeqNoQos2}, + Msgs + ), + process_batch( + IsReplay, Session, ClientInfo, LastSeqNoQos1, LastSeqNoQos2, Messages, Inflight ). -export_session(#session{} = Record) -> - export_record(Record, #session.id, [id, created_at, last_alive_at, conninfo, props], #{}). +%%-------------------------------------------------------------------- +%% Transient messages +%%-------------------------------------------------------------------- -export_subscription(#ds_sub{} = Record) -> - export_record(Record, #ds_sub.start_time, [start_time, props, extra], #{}). +enqueue_transient(ClientInfo, Msg0, Session = #{s := S, props := #{upgrade_qos := UpgradeQoS}}) -> + %% TODO: Such messages won't be retransmitted, should the session + %% reconnect before transient messages are acked. + %% + %% Proper solution could look like this: session publishes + %% transient messages to a separate DS DB that serves as a queue, + %% then subscribes to a special system topic that contains the + %% queued messages. Since streams in this DB are exclusive to the + %% session, messages from the queue can be dropped as soon as they + %% are acked. + Subs = emqx_persistent_session_ds_state:get_subscriptions(S), + Msgs = [ + Msg + || SubMatch <- emqx_topic_gbt:matches(Msg0#message.topic, Subs, []), + Msg <- begin + #{props := SubOpts} = emqx_topic_gbt:get_record(SubMatch, Subs), + emqx_session:enrich_message(ClientInfo, Msg0, SubOpts, UpgradeQoS) + end + ], + lists:foldl(fun do_enqueue_transient/2, Session, Msgs). -export_record(Record, I, [Field | Rest], Acc) -> - export_record(Record, I + 1, Rest, Acc#{Field => element(I, Record)}); -export_record(_, _, [], Acc) -> - Acc. +do_enqueue_transient(Msg = #message{qos = Qos}, Session = #{inflight := Inflight0, s := S0}) -> + case Qos of + ?QOS_0 -> + S = S0, + Inflight = emqx_persistent_session_ds_inflight:push({undefined, Msg}, Inflight0); + QoS when QoS =:= ?QOS_1; QoS =:= ?QOS_2 -> + SeqNo = inc_seqno( + QoS, emqx_persistent_session_ds_state:get_seqno(?next(QoS), S0) + ), + S = emqx_persistent_session_ds_state:put_seqno(?next(QoS), SeqNo, S0), + Inflight = emqx_persistent_session_ds_inflight:push({SeqNo, Msg}, Inflight0) + end, + Session#{ + inflight => Inflight, + s => S + }. + +%%-------------------------------------------------------------------- +%% Buffer drain +%%-------------------------------------------------------------------- + +drain_buffer(Session = #{inflight := Inflight0, s := S0}) -> + {Publishes, Inflight, S} = do_drain_buffer(Inflight0, S0, []), + {Publishes, Session#{inflight => Inflight, s := S}}. + +do_drain_buffer(Inflight0, S0, Acc) -> + case emqx_persistent_session_ds_inflight:pop(Inflight0) of + undefined -> + {lists:reverse(Acc), Inflight0, S0}; + {{pubrel, SeqNo}, Inflight} -> + Publish = {pubrel, seqno_to_packet_id(?QOS_2, SeqNo)}, + do_drain_buffer(Inflight, S0, [Publish | Acc]); + {{SeqNo, Msg}, Inflight} -> + case Msg#message.qos of + ?QOS_0 -> + do_drain_buffer(Inflight, S0, [{undefined, Msg} | Acc]); + Qos -> + S = emqx_persistent_session_ds_state:put_seqno(?dup(Qos), SeqNo, S0), + Publish = {seqno_to_packet_id(Qos, SeqNo), Msg}, + do_drain_buffer(Inflight, S, [Publish | Acc]) + end + end. + +%%-------------------------------------------------------------------------------- %% TODO: find a more reliable way to perform actions that have side %% effects. Add `CBM:init' callback to the session behavior? @@ -1029,75 +970,250 @@ receive_maximum(ConnInfo) -> expiry_interval(ConnInfo) -> maps:get(expiry_interval, ConnInfo, 0). --ifdef(TEST). -list_all_sessions() -> - DSSessionIds = mnesia:dirty_all_keys(?SESSION_TAB), - ConnInfo = #{}, - Sessions = lists:filtermap( - fun(SessionID) -> - Sess = session_open(SessionID, ConnInfo), - case Sess of - false -> - false; - _ -> - {true, {SessionID, Sess}} - end - end, - DSSessionIds - ), - maps:from_list(Sessions). +bump_interval() -> + emqx_config:get([session_persistence, last_alive_update_interval]). -list_all_subscriptions() -> - DSSubIds = mnesia:dirty_all_keys(?SESSION_SUBSCRIPTIONS_TAB), - Subscriptions = lists:map( - fun(DSSubId) -> - [DSSub] = mnesia:dirty_read(?SESSION_SUBSCRIPTIONS_TAB, DSSubId), - {DSSubId, export_subscription(DSSub)} - end, - DSSubIds - ), - maps:from_list(Subscriptions). +-spec try_get_live_session(emqx_types:clientid()) -> + {pid(), session()} | not_found | not_persistent. +try_get_live_session(ClientId) -> + case emqx_cm:lookup_channels(local, ClientId) of + [Pid] -> + try + #{channel := ChanState} = emqx_connection:get_state(Pid), + case emqx_channel:info(impl, ChanState) of + ?MODULE -> + {Pid, emqx_channel:info(session_state, ChanState)}; + _ -> + not_persistent + end + catch + _:_ -> + not_found + end; + _ -> + not_found + end. -list_all_streams() -> - DSStreamIds = mnesia:dirty_all_keys(?SESSION_STREAM_TAB), - DSStreams = lists:map( - fun(DSStreamId) -> - Records = mnesia:dirty_read(?SESSION_STREAM_TAB, DSStreamId), - ExtDSStreams = - lists:map( - fun(Record) -> - export_record( - Record, - #ds_stream.session, - [session, topic_filter, stream, rank], - #{} - ) - end, - Records - ), - {DSStreamId, ExtDSStreams} - end, - DSStreamIds - ), - maps:from_list(DSStreams). +%%-------------------------------------------------------------------- +%% SeqNo tracking +%% -------------------------------------------------------------------- -list_all_pubranges() -> - DSPubranges = mnesia:dirty_match_object(?SESSION_PUBRANGE_TAB, #ds_pubrange{_ = '_'}), - lists:foldl( - fun(Record = #ds_pubrange{id = {SessionId, First, StreamRef}}, Acc) -> - Range = #{ - session => SessionId, - stream => StreamRef, - first => First, - until => Record#ds_pubrange.until, - type => Record#ds_pubrange.type, - iterator => Record#ds_pubrange.iterator - }, - maps:put(SessionId, maps:get(SessionId, Acc, []) ++ [Range], Acc) - end, - #{}, - DSPubranges +-spec update_seqno(puback | pubrec | pubcomp, emqx_types:packet_id(), session()) -> + {ok, emqx_types:message(), session()} | {error, _}. +update_seqno(Track, PacketId, Session = #{id := SessionId, s := S, inflight := Inflight0}) -> + SeqNo = packet_id_to_seqno(PacketId, S), + case Track of + puback -> + SeqNoKey = ?committed(?QOS_1), + Result = emqx_persistent_session_ds_inflight:puback(SeqNo, Inflight0); + pubrec -> + SeqNoKey = ?rec, + Result = emqx_persistent_session_ds_inflight:pubrec(SeqNo, Inflight0); + pubcomp -> + SeqNoKey = ?committed(?QOS_2), + Result = emqx_persistent_session_ds_inflight:pubcomp(SeqNo, Inflight0) + end, + case Result of + {ok, Inflight} -> + %% TODO: we pass a bogus message into the hook: + Msg = emqx_message:make(SessionId, <<>>, <<>>), + {ok, Msg, Session#{ + s => emqx_persistent_session_ds_state:put_seqno(SeqNoKey, SeqNo, S), + inflight => Inflight + }}; + {error, Expected} -> + ?SLOG(warning, #{ + msg => "out-of-order_commit", + track => Track, + packet_id => PacketId, + seqno => SeqNo, + expected => Expected + }), + {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} + end. + +%%-------------------------------------------------------------------- +%% Functions for dealing with the sequence number and packet ID +%% generation +%% -------------------------------------------------------------------- + +-define(EPOCH_BITS, 15). +-define(PACKET_ID_MASK, 2#111_1111_1111_1111). + +%% Epoch size = `16#10000 div 2' since we generate different sets of +%% packet IDs for QoS1 and QoS2: +-define(EPOCH_SIZE, 16#8000). + +%% Reconstruct session counter by adding most significant bits from +%% the current counter to the packet id: +-spec packet_id_to_seqno(emqx_types:packet_id(), emqx_persistent_session_ds_state:t()) -> + seqno(). +packet_id_to_seqno(PacketId, S) -> + NextSeqNo = emqx_persistent_session_ds_state:get_seqno(?next(packet_id_to_qos(PacketId)), S), + Epoch = NextSeqNo bsr ?EPOCH_BITS, + SeqNo = (Epoch bsl ?EPOCH_BITS) + (PacketId band ?PACKET_ID_MASK), + case SeqNo =< NextSeqNo of + true -> + SeqNo; + false -> + SeqNo - ?EPOCH_SIZE + end. + +-spec inc_seqno(?QOS_1 | ?QOS_2, seqno()) -> emqx_types:packet_id(). +inc_seqno(Qos, SeqNo) -> + NextSeqno = SeqNo + 1, + case seqno_to_packet_id(Qos, NextSeqno) of + 0 -> + %% We skip sequence numbers that lead to PacketId = 0 to + %% simplify math. Note: it leads to occasional gaps in the + %% sequence numbers. + NextSeqno + 1; + _ -> + NextSeqno + end. + +%% Note: we use the most significant bit to store the QoS. +seqno_to_packet_id(?QOS_1, SeqNo) -> + SeqNo band ?PACKET_ID_MASK; +seqno_to_packet_id(?QOS_2, SeqNo) -> + SeqNo band ?PACKET_ID_MASK bor ?EPOCH_SIZE. + +packet_id_to_qos(PacketId) -> + PacketId bsr ?EPOCH_BITS + 1. + +seqno_diff(Qos, A, B, S) -> + seqno_diff( + Qos, + emqx_persistent_session_ds_state:get_seqno(A, S), + emqx_persistent_session_ds_state:get_seqno(B, S) ). -%% ifdef(TEST) +%% Dialyzer complains about the second clause, since it's currently +%% unused, shut it up: +-dialyzer({nowarn_function, seqno_diff/3}). +seqno_diff(?QOS_1, A, B) -> + %% For QoS1 messages we skip a seqno every time the epoch changes, + %% we need to substract that from the diff: + EpochA = A bsr ?EPOCH_BITS, + EpochB = B bsr ?EPOCH_BITS, + A - B - (EpochA - EpochB); +seqno_diff(?QOS_2, A, B) -> + A - B. + +%%-------------------------------------------------------------------- +%% Tests +%%-------------------------------------------------------------------- + +-ifdef(TEST). + +%% Warning: the below functions may return out-of-date results because +%% the sessions commit data to mria asynchronously. + +list_all_sessions() -> + maps:from_list( + [ + {Id, print_session(Id)} + || Id <- emqx_persistent_session_ds_state:list_sessions() + ] + ). + +%%%% Proper generators: + +%% Generate a sequence number that smaller than the given `NextSeqNo' +%% number by at most `?EPOCH_SIZE': +seqno_gen(NextSeqNo) -> + WindowSize = ?EPOCH_SIZE - 1, + Min = max(0, NextSeqNo - WindowSize), + Max = max(0, NextSeqNo - 1), + range(Min, Max). + +%% Generate a sequence number: +next_seqno_gen() -> + ?LET( + {Epoch, Offset}, + {non_neg_integer(), range(0, ?EPOCH_SIZE)}, + Epoch bsl ?EPOCH_BITS + Offset + ). + +%%%% Property-based tests: + +%% erlfmt-ignore +packet_id_to_seqno_prop() -> + ?FORALL( + {Qos, NextSeqNo}, {oneof([?QOS_1, ?QOS_2]), next_seqno_gen()}, + ?FORALL( + ExpectedSeqNo, seqno_gen(NextSeqNo), + begin + PacketId = seqno_to_packet_id(Qos, ExpectedSeqNo), + SeqNo = packet_id_to_seqno(PacketId, NextSeqNo), + ?WHENFAIL( + begin + io:format(user, " *** PacketID = ~p~n", [PacketId]), + io:format(user, " *** SeqNo = ~p -> ~p~n", [ExpectedSeqNo, SeqNo]), + io:format(user, " *** NextSeqNo = ~p~n", [NextSeqNo]) + end, + PacketId < 16#10000 andalso SeqNo =:= ExpectedSeqNo + ) + end)). + +inc_seqno_prop() -> + ?FORALL( + {Qos, SeqNo}, + {oneof([?QOS_1, ?QOS_2]), next_seqno_gen()}, + begin + NewSeqNo = inc_seqno(Qos, SeqNo), + PacketId = seqno_to_packet_id(Qos, NewSeqNo), + ?WHENFAIL( + begin + io:format(user, " *** QoS = ~p~n", [Qos]), + io:format(user, " *** SeqNo = ~p -> ~p~n", [SeqNo, NewSeqNo]), + io:format(user, " *** PacketId = ~p~n", [PacketId]) + end, + PacketId > 0 andalso PacketId < 16#10000 + ) + end + ). + +seqno_diff_prop() -> + ?FORALL( + {Qos, SeqNo, N}, + {oneof([?QOS_1, ?QOS_2]), next_seqno_gen(), range(0, 100)}, + ?IMPLIES( + seqno_to_packet_id(Qos, SeqNo) > 0, + begin + NewSeqNo = apply_n_times(N, fun(A) -> inc_seqno(Qos, A) end, SeqNo), + Diff = seqno_diff(Qos, NewSeqNo, SeqNo), + ?WHENFAIL( + begin + io:format(user, " *** QoS = ~p~n", [Qos]), + io:format(user, " *** SeqNo = ~p -> ~p~n", [SeqNo, NewSeqNo]), + io:format(user, " *** N : ~p == ~p~n", [N, Diff]) + end, + N =:= Diff + ) + end + ) + ). + +seqno_proper_test_() -> + Props = [packet_id_to_seqno_prop(), inc_seqno_prop(), seqno_diff_prop()], + Opts = [{numtests, 1000}, {to_file, user}], + {timeout, 30, + {setup, + fun() -> + meck:new(emqx_persistent_session_ds_state, [no_history]), + ok = meck:expect(emqx_persistent_session_ds_state, get_seqno, fun(_Track, Seqno) -> + Seqno + end) + end, + fun(_) -> + meck:unload(emqx_persistent_session_ds_state) + end, + [?_assert(proper:quickcheck(Prop, Opts)) || Prop <- Props]}}. + +apply_n_times(0, _Fun, A) -> + A; +apply_n_times(N, Fun, A) when N > 0 -> + apply_n_times(N - 1, Fun, Fun(A)). + -endif. diff --git a/apps/emqx/src/emqx_persistent_session_ds.hrl b/apps/emqx/src/emqx_persistent_session_ds.hrl index 31c9b2faf..8a24be31e 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.hrl +++ b/apps/emqx/src/emqx_persistent_session_ds.hrl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -25,75 +25,54 @@ -define(SESSION_COMMITTED_OFFSET_TAB, emqx_ds_committed_offset_tab). -define(DS_MRIA_SHARD, emqx_ds_session_shard). --define(T_INFLIGHT, 1). --define(T_CHECKPOINT, 2). +%%%%% Session sequence numbers: --record(ds_sub, { - id :: emqx_persistent_session_ds:subscription_id(), - start_time :: emqx_ds:time(), - props = #{} :: map(), - extra = #{} :: map() -}). --type ds_sub() :: #ds_sub{}. +%% +%% -----|----------|-----|-----|------> seqno +%% | | | | +%% committed dup rec next +%% (Qos2) --record(ds_stream, { - session :: emqx_persistent_session_ds:id(), - ref :: _StreamRef, - stream :: emqx_ds:stream(), - rank :: emqx_ds:stream_rank(), - beginning :: emqx_ds:iterator() -}). --type ds_stream() :: #ds_stream{}. +%% Seqno becomes committed after receiving PUBACK for QoS1 or PUBCOMP +%% for QoS2. +-define(committed(QOS), QOS). +%% Seqno becomes dup after broker sends QoS1 or QoS2 message to the +%% client. Upon session reconnect, messages with seqno in the +%% committed..dup range are retransmitted with DUP flag. +%% +-define(dup(QOS), (10 + QOS)). +%% Rec flag is specific for the QoS2. It contains seqno of the last +%% PUBREC received from the client. When the session reconnects, +%% PUBREL packages for the dup..rec range are retransmitted. +-define(rec, 22). +%% Last seqno assigned to a message (it may not be sent yet). +-define(next(QOS), (30 + QOS)). --record(ds_pubrange, { - id :: { - %% What session this range belongs to. - _Session :: emqx_persistent_session_ds:id(), - %% Where this range starts. - _First :: emqx_persistent_message_ds_replayer:seqno(), - %% Which stream this range is over. - _StreamRef - }, - %% Where this range ends: the first seqno that is not included in the range. - until :: emqx_persistent_message_ds_replayer:seqno(), - %% Type of a range: - %% * Inflight range is a range of yet unacked messages from this stream. - %% * Checkpoint range was already acked, its purpose is to keep track of the - %% very last iterator for this stream. - type :: ?T_INFLIGHT | ?T_CHECKPOINT, - %% What commit tracks this range is part of. - tracks = 0 :: non_neg_integer(), - %% Meaning of this depends on the type of the range: - %% * For inflight range, this is the iterator pointing to the first message in - %% the range. - %% * For checkpoint range, this is the iterator pointing right past the last - %% message in the range. - iterator :: emqx_ds:iterator(), - %% Reserved for future use. - misc = #{} :: map() -}). --type ds_pubrange() :: #ds_pubrange{}. - --record(ds_committed_offset, { - id :: { - %% What session this marker belongs to. - _Session :: emqx_persistent_session_ds:id(), - %% Marker name. - _CommitType - }, - %% Where this marker is pointing to: the first seqno that is not marked. - until :: emqx_persistent_message_ds_replayer:seqno() +%%%%% Stream Replay State: +-record(srs, { + rank_x :: emqx_ds:rank_x(), + rank_y :: emqx_ds:rank_y(), + %% Iterators at the beginning and the end of the last batch: + it_begin :: emqx_ds:iterator() | undefined, + it_end :: emqx_ds:iterator() | end_of_stream, + %% Size of the last batch: + batch_size = 0 :: non_neg_integer(), + %% Session sequence numbers at the time when the batch was fetched: + first_seqno_qos1 = 0 :: emqx_persistent_session_ds:seqno(), + first_seqno_qos2 = 0 :: emqx_persistent_session_ds:seqno(), + %% Sequence numbers that have to be committed for the batch: + last_seqno_qos1 = 0 :: emqx_persistent_session_ds:seqno(), + last_seqno_qos2 = 0 :: emqx_persistent_session_ds:seqno(), + %% This stream belongs to an unsubscribed topic-filter, and is + %% marked for deletion: + unsubscribed = false :: boolean() }). --record(session, { - %% same as clientid - id :: emqx_persistent_session_ds:id(), - %% creation time - created_at :: _Millisecond :: non_neg_integer(), - last_alive_at :: _Millisecond :: non_neg_integer(), - conninfo :: emqx_types:conninfo(), - %% for future usage - props = #{} :: map() -}). +%% Session metadata keys: +-define(created_at, created_at). +-define(last_alive_at, last_alive_at). +-define(expiry_interval, expiry_interval). +%% Unique integer used to create unique identities +-define(last_id, last_id). -endif. diff --git a/apps/emqx/src/emqx_persistent_session_ds_gc_worker.erl b/apps/emqx/src/emqx_persistent_session_ds_gc_worker.erl index af387d2ca..a4d1fe638 100644 --- a/apps/emqx/src/emqx_persistent_session_ds_gc_worker.erl +++ b/apps/emqx/src/emqx_persistent_session_ds_gc_worker.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -69,7 +69,7 @@ handle_info(_Info, State) -> {noreply, State}. %%-------------------------------------------------------------------------------- -%% Internal fns +%% Internal functions %%-------------------------------------------------------------------------------- ensure_gc_timer() -> @@ -104,58 +104,33 @@ now_ms() -> erlang:system_time(millisecond). start_gc() -> - do_gc(more). - -zombie_session_ms() -> - NowMS = now_ms(), GCInterval = emqx_config:get([session_persistence, session_gc_interval]), BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]), TimeThreshold = max(GCInterval, BumpInterval) * 3, - ets:fun2ms( - fun( - #session{ - id = DSSessionId, - last_alive_at = LastAliveAt, - conninfo = #{expiry_interval := EI} - } - ) when - LastAliveAt + EI + TimeThreshold =< NowMS - -> - DSSessionId - end - ). + MinLastAlive = now_ms() - TimeThreshold, + gc_loop(MinLastAlive, emqx_persistent_session_ds_state:make_session_iterator()). -do_gc(more) -> +gc_loop(MinLastAlive, It0) -> GCBatchSize = emqx_config:get([session_persistence, session_gc_batch_size]), - MS = zombie_session_ms(), - {atomic, Next} = mria:transaction(?DS_MRIA_SHARD, fun() -> - Res = mnesia:select(?SESSION_TAB, MS, GCBatchSize, write), - case Res of - '$end_of_table' -> - done; - {[], Cont} -> - %% since `GCBatchsize' is just a "recommendation" for `select', we try only - %% _once_ the continuation and then stop if it yields nothing, to avoid a - %% dead loop. - case mnesia:select(Cont) of - '$end_of_table' -> - done; - {[], _Cont} -> - done; - {DSSessionIds0, _Cont} -> - do_gc_(DSSessionIds0), - more - end; - {DSSessionIds0, _Cont} -> - do_gc_(DSSessionIds0), - more - end - end), - do_gc(Next); -do_gc(done) -> - ok. + case emqx_persistent_session_ds_state:session_iterator_next(It0, GCBatchSize) of + {[], _It} -> + ok; + {Sessions, It} -> + [do_gc(SessionId, MinLastAlive, Metadata) || {SessionId, Metadata} <- Sessions], + gc_loop(MinLastAlive, It) + end. -do_gc_(DSSessionIds) -> - lists:foreach(fun emqx_persistent_session_ds:destroy_session/1, DSSessionIds), - ?tp(ds_session_gc_cleaned, #{session_ids => DSSessionIds}), - ok. +do_gc(SessionId, MinLastAlive, Metadata) -> + #{?last_alive_at := LastAliveAt, ?expiry_interval := EI} = Metadata, + case LastAliveAt + EI < MinLastAlive of + true -> + emqx_persistent_session_ds:destroy_session(SessionId), + ?tp(debug, ds_session_gc_cleaned, #{ + session_id => SessionId, + last_alive_at => LastAliveAt, + expiry_interval => EI, + min_last_alive => MinLastAlive + }); + false -> + ok + end. diff --git a/apps/emqx/src/emqx_persistent_session_ds_inflight.erl b/apps/emqx/src/emqx_persistent_session_ds_inflight.erl new file mode 100644 index 000000000..21194c8c2 --- /dev/null +++ b/apps/emqx/src/emqx_persistent_session_ds_inflight.erl @@ -0,0 +1,347 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_persistent_session_ds_inflight). + +%% API: +-export([ + new/1, + push/2, + pop/1, + n_buffered/2, + n_inflight/1, + puback/2, + pubrec/2, + pubcomp/2, + receive_maximum/1 +]). + +%% internal exports: +-export([]). + +-export_type([t/0]). + +-include("emqx.hrl"). +-include("emqx_mqtt.hrl"). + +-ifdef(TEST). +-include_lib("proper/include/proper.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-endif. + +%%================================================================================ +%% Type declarations +%%================================================================================ + +-type payload() :: + {emqx_persistent_session_ds:seqno() | undefined, emqx_types:message()} + | {pubrel, emqx_persistent_session_ds:seqno()}. + +-record(inflight, { + receive_maximum :: pos_integer(), + %% Main queue: + queue :: queue:queue(payload()), + %% Queues that are used to track sequence numbers of ack tracks: + puback_queue :: iqueue(), + pubrec_queue :: iqueue(), + pubcomp_queue :: iqueue(), + %% Counters: + n_inflight = 0 :: non_neg_integer(), + n_qos0 = 0 :: non_neg_integer(), + n_qos1 = 0 :: non_neg_integer(), + n_qos2 = 0 :: non_neg_integer() +}). + +-type t() :: #inflight{}. + +%%================================================================================ +%% API funcions +%%================================================================================ + +-spec new(non_neg_integer()) -> t(). +new(ReceiveMaximum) when ReceiveMaximum > 0 -> + #inflight{ + receive_maximum = ReceiveMaximum, + queue = queue:new(), + puback_queue = iqueue_new(), + pubrec_queue = iqueue_new(), + pubcomp_queue = iqueue_new() + }. + +-spec receive_maximum(t()) -> pos_integer(). +receive_maximum(#inflight{receive_maximum = ReceiveMaximum}) -> + ReceiveMaximum. + +-spec push(payload(), t()) -> t(). +push(Payload = {pubrel, _SeqNo}, Rec = #inflight{queue = Q}) -> + Rec#inflight{queue = queue:in(Payload, Q)}; +push(Payload = {_, Msg}, Rec) -> + #inflight{queue = Q0, n_qos0 = NQos0, n_qos1 = NQos1, n_qos2 = NQos2} = Rec, + Q = queue:in(Payload, Q0), + case Msg#message.qos of + ?QOS_0 -> + Rec#inflight{queue = Q, n_qos0 = NQos0 + 1}; + ?QOS_1 -> + Rec#inflight{queue = Q, n_qos1 = NQos1 + 1}; + ?QOS_2 -> + Rec#inflight{queue = Q, n_qos2 = NQos2 + 1} + end. + +-spec pop(t()) -> {payload(), t()} | undefined. +pop(Rec0) -> + #inflight{ + receive_maximum = ReceiveMaximum, + n_inflight = NInflight, + queue = Q0, + puback_queue = QAck, + pubrec_queue = QRec, + pubcomp_queue = QComp, + n_qos0 = NQos0, + n_qos1 = NQos1, + n_qos2 = NQos2 + } = Rec0, + case NInflight < ReceiveMaximum andalso queue:out(Q0) of + {{value, Payload}, Q} -> + Rec = + case Payload of + {pubrel, _} -> + Rec0#inflight{queue = Q}; + {SeqNo, #message{qos = Qos}} -> + case Qos of + ?QOS_0 -> + Rec0#inflight{queue = Q, n_qos0 = NQos0 - 1}; + ?QOS_1 -> + Rec0#inflight{ + queue = Q, + n_qos1 = NQos1 - 1, + n_inflight = NInflight + 1, + puback_queue = ipush(SeqNo, QAck) + }; + ?QOS_2 -> + Rec0#inflight{ + queue = Q, + n_qos2 = NQos2 - 1, + n_inflight = NInflight + 1, + pubrec_queue = ipush(SeqNo, QRec), + pubcomp_queue = ipush(SeqNo, QComp) + } + end + end, + {Payload, Rec}; + _ -> + undefined + end. + +-spec n_buffered(?QOS_0..?QOS_2 | all, t()) -> non_neg_integer(). +n_buffered(?QOS_0, #inflight{n_qos0 = NQos0}) -> + NQos0; +n_buffered(?QOS_1, #inflight{n_qos1 = NQos1}) -> + NQos1; +n_buffered(?QOS_2, #inflight{n_qos2 = NQos2}) -> + NQos2; +n_buffered(all, #inflight{n_qos0 = NQos0, n_qos1 = NQos1, n_qos2 = NQos2}) -> + NQos0 + NQos1 + NQos2. + +-spec n_inflight(t()) -> non_neg_integer(). +n_inflight(#inflight{n_inflight = NInflight}) -> + NInflight. + +-spec puback(emqx_persistent_session_ds:seqno(), t()) -> {ok, t()} | {error, Expected} when + Expected :: emqx_persistent_session_ds:seqno() | undefined. +puback(SeqNo, Rec = #inflight{puback_queue = Q0, n_inflight = N}) -> + case ipop(Q0) of + {{value, SeqNo}, Q} -> + {ok, Rec#inflight{ + puback_queue = Q, + n_inflight = max(0, N - 1) + }}; + {{value, Expected}, _} -> + {error, Expected}; + _ -> + {error, undefined} + end. + +-spec pubcomp(emqx_persistent_session_ds:seqno(), t()) -> {ok, t()} | {error, Expected} when + Expected :: emqx_persistent_session_ds:seqno() | undefined. +pubcomp(SeqNo, Rec = #inflight{pubcomp_queue = Q0, n_inflight = N}) -> + case ipop(Q0) of + {{value, SeqNo}, Q} -> + {ok, Rec#inflight{ + pubcomp_queue = Q, + n_inflight = max(0, N - 1) + }}; + {{value, Expected}, _} -> + {error, Expected}; + _ -> + {error, undefined} + end. + +%% PUBREC doesn't affect inflight window: +%% https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Flow_Control +-spec pubrec(emqx_persistent_session_ds:seqno(), t()) -> {ok, t()} | {error, Expected} when + Expected :: emqx_persistent_session_ds:seqno() | undefined. +pubrec(SeqNo, Rec = #inflight{pubrec_queue = Q0}) -> + case ipop(Q0) of + {{value, SeqNo}, Q} -> + {ok, Rec#inflight{ + pubrec_queue = Q + }}; + {{value, Expected}, _} -> + {error, Expected}; + _ -> + {error, undefined} + end. + +%%================================================================================ +%% Internal functions +%%================================================================================ + +%%%% Interval queue: + +%% "Interval queue": a data structure that represents a queue of +%% monotonically increasing non-negative integers in a compact manner. +%% It is functionally equivalent to a `queue:queue(integer())'. +-record(iqueue, { + %% Head interval: + head = 0 :: integer(), + head_end = 0 :: integer(), + %% Intermediate ranges: + queue :: queue:queue({integer(), integer()}), + %% End interval: + tail = 0 :: integer(), + tail_end = 0 :: integer() +}). + +-type iqueue() :: #iqueue{}. + +iqueue_new() -> + #iqueue{ + queue = queue:new() + }. + +%% @doc Push a value into the interval queue: +-spec ipush(integer(), iqueue()) -> iqueue(). +ipush(Val, Q = #iqueue{tail_end = Val, head_end = Val}) -> + %% Optimization: head and tail intervals overlap, and the newly + %% inserted value extends both. Attach it to both intervals, to + %% avoid `queue:out' in `ipop': + Q#iqueue{ + tail_end = Val + 1, + head_end = Val + 1 + }; +ipush(Val, Q = #iqueue{tail_end = Val}) -> + %% Extend tail interval: + Q#iqueue{ + tail_end = Val + 1 + }; +ipush(Val, Q = #iqueue{tail = Tl, tail_end = End, queue = IQ0}) when is_number(Val), Val > End -> + IQ = queue:in({Tl, End}, IQ0), + %% Begin a new interval: + Q#iqueue{ + queue = IQ, + tail = Val, + tail_end = Val + 1 + }. + +-spec ipop(iqueue()) -> {{value, integer()}, iqueue()} | {empty, iqueue()}. +ipop(Q = #iqueue{head = Hd, head_end = HdEnd}) when Hd < HdEnd -> + %% Head interval is not empty. Consume a value from it: + {{value, Hd}, Q#iqueue{head = Hd + 1}}; +ipop(Q = #iqueue{head_end = End, tail_end = End}) -> + %% Head interval is fully consumed, and it's overlaps with the + %% tail interval. It means the queue is empty: + {empty, Q}; +ipop(Q = #iqueue{head = Hd0, tail = Tl, tail_end = TlEnd, queue = IQ0}) -> + %% Head interval is fully consumed, and it doesn't overlap with + %% the tail interval. Replace the head interval with the next + %% interval from the queue or with the tail interval: + case queue:out(IQ0) of + {{value, {Hd, HdEnd}}, IQ} -> + ipop(Q#iqueue{head = max(Hd0, Hd), head_end = HdEnd, queue = IQ}); + {empty, _} -> + ipop(Q#iqueue{head = max(Hd0, Tl), head_end = TlEnd}) + end. + +-ifdef(TEST). + +%% Test that behavior of iqueue is identical to that of a regular queue of integers: +iqueue_compat_test_() -> + Props = [iqueue_compat()], + Opts = [{numtests, 1000}, {to_file, user}, {max_size, 100}], + {timeout, 30, [?_assert(proper:quickcheck(Prop, Opts)) || Prop <- Props]}. + +%% Generate a sequence of pops and pushes with monotonically +%% increasing arguments, and verify replaying produces equivalent +%% results for the optimized and the reference implementation: +iqueue_compat() -> + ?FORALL( + Cmds, + iqueue_commands(), + begin + lists:foldl( + fun + ({push, N}, {IQ, Q, Acc}) -> + {ipush(N, IQ), queue:in(N, Q), [N | Acc]}; + (pop, {IQ0, Q0, Acc}) -> + {Ret, IQ} = ipop(IQ0), + {Expected, Q} = queue:out(Q0), + ?assertEqual( + Expected, + Ret, + #{ + sequence => lists:reverse(Acc), + q => queue:to_list(Q0), + iq0 => iqueue_print(IQ0), + iq => iqueue_print(IQ) + } + ), + {IQ, Q, [pop | Acc]} + end, + {iqueue_new(), queue:new(), []}, + Cmds + ), + true + end + ). + +iqueue_cmd() -> + oneof([ + pop, + {push, range(1, 3)} + ]). + +iqueue_commands() -> + ?LET( + Cmds, + list(iqueue_cmd()), + process_test_cmds(Cmds, 0) + ). + +process_test_cmds([], _) -> + []; +process_test_cmds([pop | Tl], Cnt) -> + [pop | process_test_cmds(Tl, Cnt)]; +process_test_cmds([{push, N} | Tl], Cnt0) -> + Cnt = Cnt0 + N, + [{push, Cnt} | process_test_cmds(Tl, Cnt)]. + +iqueue_print(I = #iqueue{head = Hd, head_end = HdEnd, queue = Q, tail = Tl, tail_end = TlEnd}) -> + #{ + hd => {Hd, HdEnd}, + tl => {Tl, TlEnd}, + q => queue:to_list(Q) + }. + +-endif. diff --git a/apps/emqx/src/emqx_persistent_session_ds_state.erl b/apps/emqx/src/emqx_persistent_session_ds_state.erl new file mode 100644 index 000000000..4912ebe95 --- /dev/null +++ b/apps/emqx/src/emqx_persistent_session_ds_state.erl @@ -0,0 +1,586 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc CRUD interface for the persistent session +%% +%% This module encapsulates the data related to the state of the +%% inflight messages for the persistent session based on DS. +%% +%% It is responsible for saving, caching, and restoring session state. +%% It is completely devoid of business logic. Not even the default +%% values should be set in this module. +-module(emqx_persistent_session_ds_state). + +-export([create_tables/0]). + +-export([open/1, create_new/1, delete/1, commit/1, format/1, print_session/1, list_sessions/0]). +-export([get_created_at/1, set_created_at/2]). +-export([get_last_alive_at/1, set_last_alive_at/2]). +-export([get_expiry_interval/1, set_expiry_interval/2]). +-export([new_id/1]). +-export([get_stream/2, put_stream/3, del_stream/2, fold_streams/3]). +-export([get_seqno/2, put_seqno/3]). +-export([get_rank/2, put_rank/3, del_rank/2, fold_ranks/3]). +-export([get_subscriptions/1, put_subscription/4, del_subscription/3]). + +-export([make_session_iterator/0, session_iterator_next/2]). + +-export_type([ + t/0, metadata/0, subscriptions/0, seqno_type/0, stream_key/0, rank_key/0, session_iterator/0 +]). + +-include("emqx_mqtt.hrl"). +-include("emqx_persistent_session_ds.hrl"). +-include_lib("snabbkaffe/include/trace.hrl"). +-include_lib("stdlib/include/qlc.hrl"). + +%%================================================================================ +%% Type declarations +%%================================================================================ + +-type subscriptions() :: emqx_topic_gbt:t(_SubId, emqx_persistent_session_ds:subscription()). + +-opaque session_iterator() :: emqx_persistent_session_ds:id() | '$end_of_table'. + +%% Generic key-value wrapper that is used for exporting arbitrary +%% terms to mnesia: +-record(kv, {k, v}). + +%% Persistent map. +%% +%% Pmap accumulates the updates in a term stored in the heap of a +%% process, so they can be committed all at once in a single +%% transaction. +%% +%% It should be possible to make frequent changes to the pmap without +%% stressing Mria. +%% +%% It's implemented as two maps: `cache', and `dirty'. `cache' stores +%% the data, and `dirty' contains information about dirty and deleted +%% keys. When `commit/1' is called, dirty keys are dumped to the +%% tables, and deleted keys are removed from the tables. +-record(pmap, {table, cache, dirty}). + +-type pmap(K, V) :: + #pmap{ + table :: atom(), + cache :: #{K => V}, + dirty :: #{K => dirty | del} + }. + +-type metadata() :: + #{ + ?created_at => emqx_persistent_session_ds:timestamp(), + ?last_alive_at => emqx_persistent_session_ds:timestamp(), + ?expiry_interval => non_neg_integer(), + ?last_id => integer() + }. + +-type seqno_type() :: + ?next(?QOS_1) + | ?dup(?QOS_1) + | ?committed(?QOS_1) + | ?next(?QOS_2) + | ?dup(?QOS_2) + | ?rec + | ?committed(?QOS_2). + +-opaque t() :: #{ + id := emqx_persistent_session_ds:id(), + dirty := boolean(), + metadata := metadata(), + subscriptions := subscriptions(), + seqnos := pmap(seqno_type(), emqx_persistent_session_ds:seqno()), + streams := pmap(emqx_ds:stream(), emqx_persistent_session_ds:stream_state()), + ranks := pmap(term(), integer()) +}. + +-define(session_tab, emqx_ds_session_tab). +-define(subscription_tab, emqx_ds_session_subscriptions). +-define(stream_tab, emqx_ds_session_streams). +-define(seqno_tab, emqx_ds_session_seqnos). +-define(rank_tab, emqx_ds_session_ranks). +-define(pmap_tables, [?stream_tab, ?seqno_tab, ?rank_tab, ?subscription_tab]). + +%% Enable this flag if you suspect some code breaks the sequence: +-ifndef(CHECK_SEQNO). +-define(set_dirty, dirty => true). +-define(unset_dirty, dirty => false). +-else. +-define(set_dirty, dirty => true, '_' => do_seqno()). +-define(unset_dirty, dirty => false, '_' => do_seqno()). +-endif. + +%%================================================================================ +%% API funcions +%%================================================================================ + +-spec create_tables() -> ok. +create_tables() -> + ok = mria:create_table( + ?session_tab, + [ + {rlog_shard, ?DS_MRIA_SHARD}, + {type, ordered_set}, + {storage, rocksdb_copies}, + {record_name, kv}, + {attributes, record_info(fields, kv)} + ] + ), + [create_kv_pmap_table(Table) || Table <- ?pmap_tables], + mria:wait_for_tables([?session_tab | ?pmap_tables]). + +-spec open(emqx_persistent_session_ds:id()) -> {ok, t()} | undefined. +open(SessionId) -> + ro_transaction(fun() -> + case kv_restore(?session_tab, SessionId) of + [Metadata] -> + Rec = #{ + id => SessionId, + metadata => Metadata, + subscriptions => read_subscriptions(SessionId), + streams => pmap_open(?stream_tab, SessionId), + seqnos => pmap_open(?seqno_tab, SessionId), + ranks => pmap_open(?rank_tab, SessionId), + ?unset_dirty + }, + {ok, Rec}; + [] -> + undefined + end + end). + +-spec print_session(emqx_persistent_session_ds:id()) -> map() | undefined. +print_session(SessionId) -> + case open(SessionId) of + undefined -> + undefined; + {ok, Session} -> + format(Session) + end. + +-spec format(t()) -> map(). +format(#{ + metadata := Metadata, + subscriptions := SubsGBT, + streams := Streams, + seqnos := Seqnos, + ranks := Ranks +}) -> + Subs = emqx_topic_gbt:fold( + fun(Key, Sub, Acc) -> + maps:put(emqx_topic_gbt:get_topic(Key), Sub, Acc) + end, + #{}, + SubsGBT + ), + #{ + metadata => Metadata, + subscriptions => Subs, + streams => pmap_format(Streams), + seqnos => pmap_format(Seqnos), + ranks => pmap_format(Ranks) + }. + +-spec list_sessions() -> [emqx_persistent_session_ds:id()]. +list_sessions() -> + mnesia:dirty_all_keys(?session_tab). + +-spec delete(emqx_persistent_session_ds:id()) -> ok. +delete(Id) -> + transaction( + fun() -> + [kv_pmap_delete(Table, Id) || Table <- ?pmap_tables], + mnesia:delete(?session_tab, Id, write) + end + ). + +-spec commit(t()) -> t(). +commit(Rec = #{dirty := false}) -> + Rec; +commit( + Rec = #{ + id := SessionId, + metadata := Metadata, + streams := Streams, + seqnos := SeqNos, + ranks := Ranks + } +) -> + check_sequence(Rec), + transaction(fun() -> + kv_persist(?session_tab, SessionId, Metadata), + Rec#{ + streams => pmap_commit(SessionId, Streams), + seqnos => pmap_commit(SessionId, SeqNos), + ranks => pmap_commit(SessionId, Ranks), + ?unset_dirty + } + end). + +-spec create_new(emqx_persistent_session_ds:id()) -> t(). +create_new(SessionId) -> + transaction(fun() -> + delete(SessionId), + #{ + id => SessionId, + metadata => #{}, + subscriptions => emqx_topic_gbt:new(), + streams => pmap_open(?stream_tab, SessionId), + seqnos => pmap_open(?seqno_tab, SessionId), + ranks => pmap_open(?rank_tab, SessionId), + ?set_dirty + } + end). + +%% + +-spec get_created_at(t()) -> emqx_persistent_session_ds:timestamp() | undefined. +get_created_at(Rec) -> + get_meta(?created_at, Rec). + +-spec set_created_at(emqx_persistent_session_ds:timestamp(), t()) -> t(). +set_created_at(Val, Rec) -> + set_meta(?created_at, Val, Rec). + +-spec get_last_alive_at(t()) -> emqx_persistent_session_ds:timestamp() | undefined. +get_last_alive_at(Rec) -> + get_meta(?last_alive_at, Rec). + +-spec set_last_alive_at(emqx_persistent_session_ds:timestamp(), t()) -> t(). +set_last_alive_at(Val, Rec) -> + set_meta(?last_alive_at, Val, Rec). + +-spec get_expiry_interval(t()) -> non_neg_integer() | undefined. +get_expiry_interval(Rec) -> + get_meta(?expiry_interval, Rec). + +-spec set_expiry_interval(non_neg_integer(), t()) -> t(). +set_expiry_interval(Val, Rec) -> + set_meta(?expiry_interval, Val, Rec). + +-spec new_id(t()) -> {emqx_persistent_session_ds:subscription_id(), t()}. +new_id(Rec) -> + LastId = + case get_meta(?last_id, Rec) of + undefined -> 0; + N when is_integer(N) -> N + end, + {LastId, set_meta(?last_id, LastId + 1, Rec)}. + +%% + +-spec get_subscriptions(t()) -> subscriptions(). +get_subscriptions(#{subscriptions := Subs}) -> + Subs. + +-spec put_subscription( + emqx_persistent_session_ds:topic_filter(), + _SubId, + emqx_persistent_session_ds:subscription(), + t() +) -> t(). +put_subscription(TopicFilter, SubId, Subscription, Rec = #{id := Id, subscriptions := Subs0}) -> + %% Note: currently changes to the subscriptions are persisted immediately. + Key = {TopicFilter, SubId}, + transaction(fun() -> kv_pmap_persist(?subscription_tab, Id, Key, Subscription) end), + Subs = emqx_topic_gbt:insert(TopicFilter, SubId, Subscription, Subs0), + Rec#{subscriptions => Subs}. + +-spec del_subscription(emqx_persistent_session_ds:topic_filter(), _SubId, t()) -> t(). +del_subscription(TopicFilter, SubId, Rec = #{id := Id, subscriptions := Subs0}) -> + %% Note: currently the subscriptions are persisted immediately. + Key = {TopicFilter, SubId}, + transaction(fun() -> kv_pmap_delete(?subscription_tab, Id, Key) end), + Subs = emqx_topic_gbt:delete(TopicFilter, SubId, Subs0), + Rec#{subscriptions => Subs}. + +%% + +-type stream_key() :: {emqx_persistent_session_ds:subscription_id(), _StreamId}. + +-spec get_stream(stream_key(), t()) -> + emqx_persistent_session_ds:stream_state() | undefined. +get_stream(Key, Rec) -> + gen_get(streams, Key, Rec). + +-spec put_stream(stream_key(), emqx_persistent_session_ds:stream_state(), t()) -> t(). +put_stream(Key, Val, Rec) -> + gen_put(streams, Key, Val, Rec). + +-spec del_stream(stream_key(), t()) -> t(). +del_stream(Key, Rec) -> + gen_del(streams, Key, Rec). + +-spec fold_streams(fun(), Acc, t()) -> Acc. +fold_streams(Fun, Acc, Rec) -> + gen_fold(streams, Fun, Acc, Rec). + +%% + +-spec get_seqno(seqno_type(), t()) -> emqx_persistent_session_ds:seqno() | undefined. +get_seqno(Key, Rec) -> + gen_get(seqnos, Key, Rec). + +-spec put_seqno(seqno_type(), emqx_persistent_session_ds:seqno(), t()) -> t(). +put_seqno(Key, Val, Rec) -> + gen_put(seqnos, Key, Val, Rec). + +%% + +-type rank_key() :: {emqx_persistent_session_ds:subscription_id(), emqx_ds:rank_x()}. + +-spec get_rank(rank_key(), t()) -> integer() | undefined. +get_rank(Key, Rec) -> + gen_get(ranks, Key, Rec). + +-spec put_rank(rank_key(), integer(), t()) -> t(). +put_rank(Key, Val, Rec) -> + gen_put(ranks, Key, Val, Rec). + +-spec del_rank(rank_key(), t()) -> t(). +del_rank(Key, Rec) -> + gen_del(ranks, Key, Rec). + +-spec fold_ranks(fun(), Acc, t()) -> Acc. +fold_ranks(Fun, Acc, Rec) -> + gen_fold(ranks, Fun, Acc, Rec). + +-spec make_session_iterator() -> session_iterator(). +make_session_iterator() -> + case mnesia:dirty_first(?session_tab) of + '$end_of_table' -> + '$end_of_table'; + Key -> + Key + end. + +-spec session_iterator_next(session_iterator(), pos_integer()) -> + {[{emqx_persistent_session_ds:id(), metadata()}], session_iterator()}. +session_iterator_next(Cursor, 0) -> + {[], Cursor}; +session_iterator_next('$end_of_table', _N) -> + {[], '$end_of_table'}; +session_iterator_next(Cursor0, N) -> + ThisVal = [ + {Cursor0, Metadata} + || #kv{v = Metadata} <- mnesia:dirty_read(?session_tab, Cursor0) + ], + {NextVals, Cursor} = session_iterator_next(mnesia:dirty_next(?session_tab, Cursor0), N - 1), + {ThisVal ++ NextVals, Cursor}. + +%%================================================================================ +%% Internal functions +%%================================================================================ + +%% All mnesia reads and writes are passed through this function. +%% Backward compatiblity issues can be handled here. +encoder(encode, _Table, Term) -> + Term; +encoder(decode, _Table, Term) -> + Term. + +%% + +get_meta(K, #{metadata := Meta}) -> + maps:get(K, Meta, undefined). + +set_meta(K, V, Rec = #{metadata := Meta}) -> + check_sequence(Rec#{metadata => maps:put(K, V, Meta), ?set_dirty}). + +%% + +gen_get(Field, Key, Rec) -> + check_sequence(Rec), + pmap_get(Key, maps:get(Field, Rec)). + +gen_fold(Field, Fun, Acc, Rec) -> + check_sequence(Rec), + pmap_fold(Fun, Acc, maps:get(Field, Rec)). + +gen_put(Field, Key, Val, Rec) -> + check_sequence(Rec), + maps:update_with( + Field, + fun(PMap) -> pmap_put(Key, Val, PMap) end, + Rec#{?set_dirty} + ). + +gen_del(Field, Key, Rec) -> + check_sequence(Rec), + maps:update_with( + Field, + fun(PMap) -> pmap_del(Key, PMap) end, + Rec#{?set_dirty} + ). + +%% + +read_subscriptions(SessionId) -> + Records = kv_pmap_restore(?subscription_tab, SessionId), + lists:foldl( + fun({{TopicFilter, SubId}, Subscription}, Acc) -> + emqx_topic_gbt:insert(TopicFilter, SubId, Subscription, Acc) + end, + emqx_topic_gbt:new(), + Records + ). + +%% + +%% @doc Open a PMAP and fill the clean area with the data from DB. +%% This functtion should be ran in a transaction. +-spec pmap_open(atom(), emqx_persistent_session_ds:id()) -> pmap(_K, _V). +pmap_open(Table, SessionId) -> + Clean = maps:from_list(kv_pmap_restore(Table, SessionId)), + #pmap{ + table = Table, + cache = Clean, + dirty = #{} + }. + +-spec pmap_get(K, pmap(K, V)) -> V | undefined. +pmap_get(K, #pmap{cache = Cache}) -> + maps:get(K, Cache, undefined). + +-spec pmap_put(K, V, pmap(K, V)) -> pmap(K, V). +pmap_put(K, V, Pmap = #pmap{dirty = Dirty, cache = Cache}) -> + Pmap#pmap{ + cache = maps:put(K, V, Cache), + dirty = Dirty#{K => dirty} + }. + +-spec pmap_del(K, pmap(K, V)) -> pmap(K, V). +pmap_del( + Key, + Pmap = #pmap{dirty = Dirty, cache = Cache} +) -> + Pmap#pmap{ + cache = maps:remove(Key, Cache), + dirty = Dirty#{Key => del} + }. + +-spec pmap_fold(fun((K, V, A) -> A), A, pmap(K, V)) -> A. +pmap_fold(Fun, Acc, #pmap{cache = Cache}) -> + maps:fold(Fun, Acc, Cache). + +-spec pmap_commit(emqx_persistent_session_ds:id(), pmap(K, V)) -> pmap(K, V). +pmap_commit( + SessionId, Pmap = #pmap{table = Tab, dirty = Dirty, cache = Cache} +) -> + maps:foreach( + fun + (K, del) -> + kv_pmap_delete(Tab, SessionId, K); + (K, dirty) -> + V = maps:get(K, Cache), + kv_pmap_persist(Tab, SessionId, K, V) + end, + Dirty + ), + Pmap#pmap{ + dirty = #{} + }. + +-spec pmap_format(pmap(_K, _V)) -> map(). +pmap_format(#pmap{cache = Cache}) -> + Cache. + +%% Functions dealing with set tables: + +kv_persist(Tab, SessionId, Val0) -> + Val = encoder(encode, Tab, Val0), + mnesia:write(Tab, #kv{k = SessionId, v = Val}, write). + +kv_restore(Tab, SessionId) -> + [encoder(decode, Tab, V) || #kv{v = V} <- mnesia:read(Tab, SessionId)]. + +%% Functions dealing with bags: + +%% @doc Create a mnesia table for the PMAP: +-spec create_kv_pmap_table(atom()) -> ok. +create_kv_pmap_table(Table) -> + mria:create_table(Table, [ + {type, ordered_set}, + {rlog_shard, ?DS_MRIA_SHARD}, + {storage, rocksdb_copies}, + {record_name, kv}, + {attributes, record_info(fields, kv)} + ]). + +kv_pmap_persist(Tab, SessionId, Key, Val0) -> + %% Write data to mnesia: + Val = encoder(encode, Tab, Val0), + mnesia:write(Tab, #kv{k = {SessionId, Key}, v = Val}, write). + +kv_pmap_restore(Table, SessionId) -> + MS = [{#kv{k = {SessionId, '$1'}, v = '$2'}, [], [{{'$1', '$2'}}]}], + Objs = mnesia:select(Table, MS, read), + [{K, encoder(decode, Table, V)} || {K, V} <- Objs]. + +kv_pmap_delete(Table, SessionId) -> + MS = [{#kv{k = {SessionId, '$1'}, _ = '_'}, [], ['$1']}], + Keys = mnesia:select(Table, MS, read), + [mnesia:delete(Table, {SessionId, K}, write) || K <- Keys], + ok. + +kv_pmap_delete(Table, SessionId, Key) -> + %% Note: this match spec uses a fixed primary key, so it doesn't + %% require a table scan, and the transaction doesn't grab the + %% whole table lock: + mnesia:delete(Table, {SessionId, Key}, write). + +%% + +transaction(Fun) -> + mria:async_dirty(?DS_MRIA_SHARD, Fun). + +ro_transaction(Fun) -> + mria:async_dirty(?DS_MRIA_SHARD, Fun). + +%% transaction(Fun) -> +%% case mnesia:is_transaction() of +%% true -> +%% Fun(); +%% false -> +%% {atomic, Res} = mria:transaction(?DS_MRIA_SHARD, Fun), +%% Res +%% end. + +%% ro_transaction(Fun) -> +%% {atomic, Res} = mria:ro_transaction(?DS_MRIA_SHARD, Fun), +%% Res. + +-compile({inline, check_sequence/1}). + +-ifdef(CHECK_SEQNO). +do_seqno() -> + case erlang:get(?MODULE) of + undefined -> + put(?MODULE, 0), + 0; + N -> + put(?MODULE, N + 1), + N + 1 + end. + +check_sequence(A = #{'_' := N}) -> + N = erlang:get(?MODULE), + A. +-else. +check_sequence(A) -> + A. +-endif. diff --git a/apps/emqx/src/emqx_persistent_session_ds_stream_scheduler.erl b/apps/emqx/src/emqx_persistent_session_ds_stream_scheduler.erl new file mode 100644 index 000000000..286d32ef4 --- /dev/null +++ b/apps/emqx/src/emqx_persistent_session_ds_stream_scheduler.erl @@ -0,0 +1,380 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_persistent_session_ds_stream_scheduler). + +%% API: +-export([find_new_streams/1, find_replay_streams/1, is_fully_acked/2]). +-export([renew_streams/1, on_unsubscribe/2]). + +%% behavior callbacks: +-export([]). + +%% internal exports: +-export([]). + +-export_type([]). + +-include_lib("emqx/include/logger.hrl"). +-include("emqx_mqtt.hrl"). +-include("emqx_persistent_session_ds.hrl"). + +%%================================================================================ +%% Type declarations +%%================================================================================ + +%%================================================================================ +%% API functions +%%================================================================================ + +%% @doc Find the streams that have uncommitted (in-flight) messages. +%% Return them in the order they were previously replayed. +-spec find_replay_streams(emqx_persistent_session_ds_state:t()) -> + [{emqx_persistent_session_ds_state:stream_key(), emqx_persistent_session_ds:stream_state()}]. +find_replay_streams(S) -> + Comm1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S), + Comm2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S), + %% 1. Find the streams that aren't fully acked + Streams = emqx_persistent_session_ds_state:fold_streams( + fun(Key, Stream, Acc) -> + case is_fully_acked(Comm1, Comm2, Stream) of + false -> + [{Key, Stream} | Acc]; + true -> + Acc + end + end, + [], + S + ), + lists:sort(fun compare_streams/2, Streams). + +%% @doc Find streams from which the new messages can be fetched. +%% +%% Currently it amounts to the streams that don't have any inflight +%% messages, since for performance reasons we keep only one record of +%% in-flight messages per stream, and we don't want to overwrite these +%% records prematurely. +%% +%% This function is non-detereministic: it randomizes the order of +%% streams to ensure fair replay of different topics. +-spec find_new_streams(emqx_persistent_session_ds_state:t()) -> + [{emqx_persistent_session_ds_state:stream_key(), emqx_persistent_session_ds:stream_state()}]. +find_new_streams(S) -> + %% FIXME: this function is currently very sensitive to the + %% consistency of the packet IDs on both broker and client side. + %% + %% If the client fails to properly ack packets due to a bug, or a + %% network issue, or if the state of streams and seqno tables ever + %% become de-synced, then this function will return an empty list, + %% and the replay cannot progress. + %% + %% In other words, this function is not robust, and we should find + %% some way to get the replays un-stuck at the cost of potentially + %% losing messages during replay (or just kill the stuck channel + %% after timeout?) + Comm1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S), + Comm2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S), + shuffle( + emqx_persistent_session_ds_state:fold_streams( + fun + (_Key, #srs{it_end = end_of_stream}, Acc) -> + Acc; + (Key, Stream, Acc) -> + case is_fully_acked(Comm1, Comm2, Stream) andalso not Stream#srs.unsubscribed of + true -> + [{Key, Stream} | Acc]; + false -> + Acc + end + end, + [], + S + ) + ). + +%% @doc This function makes the session aware of the new streams. +%% +%% It has the following properties: +%% +%% 1. For each RankX, it keeps only the streams with the same RankY. +%% +%% 2. For each RankX, it never advances RankY until _all_ streams with +%% the same RankX are replayed. +%% +%% 3. Once all streams with the given rank are replayed, it advances +%% the RankY to the smallest known RankY that is greater than replayed +%% RankY. +%% +%% 4. If the RankX has never been replayed, it selects the streams +%% with the smallest RankY. +%% +%% This way, messages from the same topic/shard are never reordered. +-spec renew_streams(emqx_persistent_session_ds_state:t()) -> emqx_persistent_session_ds_state:t(). +renew_streams(S0) -> + S1 = remove_unsubscribed_streams(S0), + S2 = remove_fully_replayed_streams(S1), + emqx_persistent_session_ds_subs:fold( + fun + (Key, #{start_time := StartTime, id := SubId, deleted := false}, Acc) -> + TopicFilter = emqx_topic:words(Key), + Streams = select_streams( + SubId, + emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime), + Acc + ), + lists:foldl( + fun(I, Acc1) -> + ensure_iterator(TopicFilter, StartTime, SubId, I, Acc1) + end, + Acc, + Streams + ); + (_Key, _DeletedSubscription, Acc) -> + Acc + end, + S2, + S2 + ). + +-spec on_unsubscribe( + emqx_persistent_session_ds:subscription_id(), emqx_persistent_session_ds_state:t() +) -> + emqx_persistent_session_ds_state:t(). +on_unsubscribe(SubId, S0) -> + %% NOTE: this function only marks the streams for deletion, + %% instead of outright deleting them. + %% + %% It's done for two reasons: + %% + %% - MQTT standard states that the broker MUST process acks for + %% all sent messages, and it MAY keep on sending buffered + %% messages: + %% https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901186 + %% + %% - Deleting the streams may lead to gaps in the sequence number + %% series, and lead to problems with acknowledgement tracking, we + %% avoid that by delaying the deletion. + %% + %% When the stream is marked for deletion, the session won't fetch + %% _new_ batches from it. Actual deletion is done by + %% `renew_streams', when it detects that all in-flight messages + %% from the stream have been acked by the client. + emqx_persistent_session_ds_state:fold_streams( + fun(Key, Srs, Acc) -> + case Key of + {SubId, _Stream} -> + %% This stream belongs to a deleted subscription. + %% Mark for deletion: + emqx_persistent_session_ds_state:put_stream( + Key, Srs#srs{unsubscribed = true}, Acc + ); + _ -> + Acc + end + end, + S0, + S0 + ). + +-spec is_fully_acked( + emqx_persistent_session_ds:stream_state(), emqx_persistent_session_ds_state:t() +) -> boolean(). +is_fully_acked(Srs, S) -> + CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S), + CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S), + is_fully_acked(CommQos1, CommQos2, Srs). + +%%================================================================================ +%% Internal functions +%%================================================================================ + +ensure_iterator(TopicFilter, StartTime, SubId, {{RankX, RankY}, Stream}, S) -> + Key = {SubId, Stream}, + case emqx_persistent_session_ds_state:get_stream(Key, S) of + undefined -> + ?SLOG(debug, #{ + msg => new_stream, key => Key, stream => Stream + }), + {ok, Iterator} = emqx_ds:make_iterator( + ?PERSISTENT_MESSAGE_DB, Stream, TopicFilter, StartTime + ), + NewStreamState = #srs{ + rank_x = RankX, + rank_y = RankY, + it_begin = Iterator, + it_end = Iterator + }, + emqx_persistent_session_ds_state:put_stream(Key, NewStreamState, S); + #srs{} -> + S + end. + +select_streams(SubId, Streams0, S) -> + TopicStreamGroups = maps:groups_from_list(fun({{X, _}, _}) -> X end, Streams0), + maps:fold( + fun(RankX, Streams, Acc) -> + select_streams(SubId, RankX, Streams, S) ++ Acc + end, + [], + TopicStreamGroups + ). + +select_streams(SubId, RankX, Streams0, S) -> + %% 1. Find the streams with the rank Y greater than the recorded one: + Streams1 = + case emqx_persistent_session_ds_state:get_rank({SubId, RankX}, S) of + undefined -> + Streams0; + ReplayedY -> + [I || I = {{_, Y}, _} <- Streams0, Y > ReplayedY] + end, + %% 2. Sort streams by rank Y: + Streams = lists:sort( + fun({{_, Y1}, _}, {{_, Y2}, _}) -> + Y1 =< Y2 + end, + Streams1 + ), + %% 3. Select streams with the least rank Y: + case Streams of + [] -> + []; + [{{_, MinRankY}, _} | _] -> + lists:takewhile(fun({{_, Y}, _}) -> Y =:= MinRankY end, Streams) + end. + +%% @doc Remove fully acked streams for the deleted subscriptions. +-spec remove_unsubscribed_streams(emqx_persistent_session_ds_state:t()) -> + emqx_persistent_session_ds_state:t(). +remove_unsubscribed_streams(S0) -> + CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S0), + CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S0), + emqx_persistent_session_ds_state:fold_streams( + fun(Key, ReplayState, S1) -> + case + ReplayState#srs.unsubscribed andalso is_fully_acked(CommQos1, CommQos2, ReplayState) + of + true -> + emqx_persistent_session_ds_state:del_stream(Key, S1); + false -> + S1 + end + end, + S0, + S0 + ). + +%% @doc Advance RankY for each RankX that doesn't have any unreplayed +%% streams. +%% +%% Drop streams with the fully replayed rank. This function relies on +%% the fact that all streams with the same RankX have also the same +%% RankY. +-spec remove_fully_replayed_streams(emqx_persistent_session_ds_state:t()) -> + emqx_persistent_session_ds_state:t(). +remove_fully_replayed_streams(S0) -> + CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S0), + CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S0), + %% 1. For each subscription, find the X ranks that were fully replayed: + Groups = emqx_persistent_session_ds_state:fold_streams( + fun({SubId, _Stream}, StreamState = #srs{rank_x = RankX, rank_y = RankY}, Acc) -> + Key = {SubId, RankX}, + case {is_fully_replayed(CommQos1, CommQos2, StreamState), Acc} of + {_, #{Key := false}} -> + Acc; + {true, #{Key := {true, RankY}}} -> + Acc; + {true, #{Key := {true, _RankYOther}}} -> + %% assert, should never happen + error(multiple_rank_y_for_rank_x); + {true, #{}} -> + Acc#{Key => {true, RankY}}; + {false, #{}} -> + Acc#{Key => false} + end + end, + #{}, + S0 + ), + %% 2. Advance rank y for each fully replayed set of streams: + S1 = maps:fold( + fun + (Key, {true, RankY}, Acc) -> + emqx_persistent_session_ds_state:put_rank(Key, RankY, Acc); + (_, _, Acc) -> + Acc + end, + S0, + Groups + ), + %% 3. Remove the fully replayed streams: + emqx_persistent_session_ds_state:fold_streams( + fun(Key = {SubId, _Stream}, #srs{rank_x = RankX, rank_y = RankY}, Acc) -> + case emqx_persistent_session_ds_state:get_rank({SubId, RankX}, Acc) of + undefined -> + Acc; + MinRankY when RankY =< MinRankY -> + ?SLOG(debug, #{ + msg => del_fully_preplayed_stream, + key => Key, + rank => {RankX, RankY}, + min => MinRankY + }), + emqx_persistent_session_ds_state:del_stream(Key, Acc); + _ -> + Acc + end + end, + S1, + S1 + ). + +%% @doc Compare the streams by the order in which they were replayed. +compare_streams( + {_KeyA, #srs{first_seqno_qos1 = A1, first_seqno_qos2 = A2}}, + {_KeyB, #srs{first_seqno_qos1 = B1, first_seqno_qos2 = B2}} +) -> + case A1 =:= B1 of + true -> + A2 =< B2; + false -> + A1 < B1 + end. + +is_fully_replayed(Comm1, Comm2, S = #srs{it_end = It}) -> + It =:= end_of_stream andalso is_fully_acked(Comm1, Comm2, S). + +is_fully_acked(_, _, #srs{ + first_seqno_qos1 = Q1, last_seqno_qos1 = Q1, first_seqno_qos2 = Q2, last_seqno_qos2 = Q2 +}) -> + %% Streams where the last chunk doesn't contain any QoS1 and 2 + %% messages are considered fully acked: + true; +is_fully_acked(Comm1, Comm2, #srs{last_seqno_qos1 = S1, last_seqno_qos2 = S2}) -> + (Comm1 >= S1) andalso (Comm2 >= S2). + +-spec shuffle([A]) -> [A]. +shuffle(L0) -> + L1 = lists:map( + fun(A) -> + %% maybe topic/stream prioritization could be introduced here? + {rand:uniform(), A} + end, + L0 + ), + L2 = lists:sort(L1), + {_, L} = lists:unzip(L2), + L. diff --git a/apps/emqx/src/emqx_persistent_session_ds_subs.erl b/apps/emqx/src/emqx_persistent_session_ds_subs.erl new file mode 100644 index 000000000..92f17b108 --- /dev/null +++ b/apps/emqx/src/emqx_persistent_session_ds_subs.erl @@ -0,0 +1,154 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc This module encapsulates the data related to the client's +%% subscriptions. It tries to reppresent the subscriptions as if they +%% were a simple key-value map. +%% +%% In reality, however, the session has to retain old the +%% subscriptions for longer to ensure the consistency of message +%% replay. +-module(emqx_persistent_session_ds_subs). + +%% API: +-export([on_subscribe/3, on_unsubscribe/3, gc/1, lookup/2, to_map/1, fold/3, fold_all/3]). + +-export_type([]). + +%%================================================================================ +%% Type declarations +%%================================================================================ + +%%================================================================================ +%% API functions +%%================================================================================ + +%% @doc Process a new subscription +-spec on_subscribe( + emqx_persistent_session_ds:topic_filter(), + emqx_persistent_session_ds:subscription(), + emqx_persistent_session_ds_state:t() +) -> + emqx_persistent_session_ds_state:t(). +on_subscribe(TopicFilter, Subscription, S) -> + emqx_persistent_session_ds_state:put_subscription(TopicFilter, [], Subscription, S). + +%% @doc Process UNSUBSCRIBE +-spec on_unsubscribe( + emqx_persistent_session_ds:topic_filter(), + emqx_persistent_session_ds:subscription(), + emqx_persistent_session_ds_state:t() +) -> + emqx_persistent_session_ds_state:t(). +on_unsubscribe(TopicFilter, Subscription0, S0) -> + %% Note: we cannot delete the subscription immediately, since its + %% metadata can be used during replay (see `process_batch'). We + %% instead mark it as deleted, and let `subscription_gc' function + %% dispatch it later: + Subscription = Subscription0#{deleted => true}, + emqx_persistent_session_ds_state:put_subscription(TopicFilter, [], Subscription, S0). + +%% @doc Remove subscriptions that have been marked for deletion, and +%% that don't have any unacked messages: +-spec gc(emqx_persistent_session_ds_state:t()) -> emqx_persistent_session_ds_state:t(). +gc(S0) -> + fold_all( + fun(TopicFilter, #{id := SubId, deleted := Deleted}, Acc) -> + case Deleted andalso has_no_unacked_streams(SubId, S0) of + true -> + emqx_persistent_session_ds_state:del_subscription(TopicFilter, [], Acc); + false -> + Acc + end + end, + S0, + S0 + ). + +%% @doc Fold over active subscriptions: +-spec lookup(emqx_persistent_session_ds:topic_filter(), emqx_persistent_session_ds_state:t()) -> + emqx_persistent_session_ds:subscription() | undefined. +lookup(TopicFilter, S) -> + Subs = emqx_persistent_session_ds_state:get_subscriptions(S), + case emqx_topic_gbt:lookup(TopicFilter, [], Subs, undefined) of + #{deleted := true} -> + undefined; + Sub -> + Sub + end. + +%% @doc Convert active subscriptions to a map, for information +%% purpose: +-spec to_map(emqx_persistent_session_ds_state:t()) -> map(). +to_map(S) -> + fold( + fun(TopicFilter, #{props := Props}, Acc) -> Acc#{TopicFilter => Props} end, + #{}, + S + ). + +%% @doc Fold over active subscriptions: +-spec fold( + fun((emqx_types:topic(), emqx_persistent_session_ds:subscription(), Acc) -> Acc), + Acc, + emqx_persistent_session_ds_state:t() +) -> + Acc. +fold(Fun, AccIn, S) -> + fold_all( + fun(TopicFilter, Sub = #{deleted := Deleted}, Acc) -> + case Deleted of + true -> Acc; + false -> Fun(TopicFilter, Sub, Acc) + end + end, + AccIn, + S + ). + +%% @doc Fold over all subscriptions, including inactive ones: +-spec fold_all( + fun((emqx_types:topic(), emqx_persistent_session_ds:subscription(), Acc) -> Acc), + Acc, + emqx_persistent_session_ds_state:t() +) -> + Acc. +fold_all(Fun, AccIn, S) -> + Subs = emqx_persistent_session_ds_state:get_subscriptions(S), + emqx_topic_gbt:fold( + fun(Key, Sub, Acc) -> Fun(emqx_topic_gbt:get_topic(Key), Sub, Acc) end, + AccIn, + Subs + ). + +%%================================================================================ +%% Internal functions +%%================================================================================ + +-spec has_no_unacked_streams( + emqx_persistent_session_ds:subscription_id(), emqx_persistent_session_ds_state:t() +) -> boolean(). +has_no_unacked_streams(SubId, S) -> + emqx_persistent_session_ds_state:fold_streams( + fun + ({SID, _Stream}, Srs, Acc) when SID =:= SubId -> + emqx_persistent_session_ds_stream_scheduler:is_fully_acked(Srs, S) andalso Acc; + (_StreamKey, _Srs, Acc) -> + Acc + end, + true, + S + ). diff --git a/apps/emqx/src/emqx_persistent_session_ds_sup.erl b/apps/emqx/src/emqx_persistent_session_ds_sup.erl index 5bd620e8b..7b3fb7abb 100644 --- a/apps/emqx/src/emqx_persistent_session_ds_sup.erl +++ b/apps/emqx/src/emqx_persistent_session_ds_sup.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -48,13 +48,14 @@ init(Opts) -> do_init(_Opts) -> SupFlags = #{ - strategy => rest_for_one, + strategy => one_for_one, intensity => 10, period => 2, auto_shutdown => never }, CoreChildren = [ - worker(gc_worker, emqx_persistent_session_ds_gc_worker, []) + worker(session_gc_worker, emqx_persistent_session_ds_gc_worker, []), + worker(message_gc_worker, emqx_persistent_message_ds_gc_worker, []) ], Children = case mria_rlog:role() of diff --git a/apps/emqx/src/emqx_pool.erl b/apps/emqx/src/emqx_pool.erl index 1cb5f429c..39c585133 100644 --- a/apps/emqx/src/emqx_pool.erl +++ b/apps/emqx/src/emqx_pool.erl @@ -28,11 +28,15 @@ submit/1, submit/2, async_submit/1, - async_submit/2 + async_submit/2, + submit_to_pool/2, + submit_to_pool/3, + async_submit_to_pool/2, + async_submit_to_pool/3 ]). -ifdef(TEST). --export([worker/0, flush_async_tasks/0]). +-export([worker/0, flush_async_tasks/0, flush_async_tasks/1]). -endif. %% gen_server callbacks @@ -57,7 +61,7 @@ -spec start_link(atom(), pos_integer()) -> startlink_ret(). start_link(Pool, Id) -> gen_server:start_link( - {local, emqx_utils:proc_name(?MODULE, Id)}, + {local, emqx_utils:proc_name(Pool, Id)}, ?MODULE, [Pool, Id], [{hibernate_after, 1000}] @@ -66,32 +70,48 @@ start_link(Pool, Id) -> %% @doc Submit work to the pool. -spec submit(task()) -> any(). submit(Task) -> - call({submit, Task}). + submit_to_pool(?POOL, Task). -spec submit(fun(), list(any())) -> any(). submit(Fun, Args) -> - call({submit, {Fun, Args}}). - -%% @private -call(Req) -> - gen_server:call(worker(), Req, infinity). + submit_to_pool(?POOL, Fun, Args). %% @doc Submit work to the pool asynchronously. -spec async_submit(task()) -> ok. async_submit(Task) -> - cast({async_submit, Task}). + async_submit_to_pool(?POOL, Task). -spec async_submit(fun(), list(any())) -> ok. async_submit(Fun, Args) -> - cast({async_submit, {Fun, Args}}). + async_submit_to_pool(?POOL, Fun, Args). + +-spec submit_to_pool(any(), task()) -> any(). +submit_to_pool(Pool, Task) -> + call(Pool, {submit, Task}). + +-spec submit_to_pool(any(), fun(), list(any())) -> any(). +submit_to_pool(Pool, Fun, Args) -> + call(Pool, {submit, {Fun, Args}}). + +-spec async_submit_to_pool(any(), task()) -> ok. +async_submit_to_pool(Pool, Task) -> + cast(Pool, {async_submit, Task}). + +-spec async_submit_to_pool(any(), fun(), list(any())) -> ok. +async_submit_to_pool(Pool, Fun, Args) -> + cast(Pool, {async_submit, {Fun, Args}}). %% @private -cast(Msg) -> - gen_server:cast(worker(), Msg). +call(Pool, Req) -> + gen_server:call(worker(Pool), Req, infinity). %% @private -worker() -> - gproc_pool:pick_worker(?POOL). +cast(Pool, Msg) -> + gen_server:cast(worker(Pool), Msg). + +%% @private +worker(Pool) -> + gproc_pool:pick_worker(Pool). %%-------------------------------------------------------------------- %% gen_server callbacks @@ -146,15 +166,25 @@ run(Fun) when is_function(Fun) -> Fun(). -ifdef(TEST). + +worker() -> + worker(?POOL). + +flush_async_tasks() -> + flush_async_tasks(?POOL). + %% This help function creates a large enough number of async tasks %% to force flush the pool workers. %% The number of tasks should be large enough to ensure all workers have %% the chance to work on at least one of the tasks. -flush_async_tasks() -> +flush_async_tasks(Pool) -> Ref = make_ref(), Self = self(), L = lists:seq(1, 997), - lists:foreach(fun(I) -> emqx_pool:async_submit(fun() -> Self ! {done, Ref, I} end, []) end, L), + lists:foreach( + fun(I) -> emqx_pool:async_submit_to_pool(Pool, fun() -> Self ! {done, Ref, I} end, []) end, + L + ), lists:foreach( fun(I) -> receive diff --git a/apps/emqx/src/emqx_router.erl b/apps/emqx/src/emqx_router.erl index e7ab37ace..3576ad679 100644 --- a/apps/emqx/src/emqx_router.erl +++ b/apps/emqx/src/emqx_router.erl @@ -24,9 +24,7 @@ -include_lib("emqx/include/emqx_router.hrl"). %% Mnesia bootstrap --export([mnesia/1]). - --boot_mnesia({mnesia, [boot]}). +-export([create_tables/0]). -export([start_link/2]). @@ -123,7 +121,7 @@ %% Mnesia bootstrap %%-------------------------------------------------------------------- -mnesia(boot) -> +create_tables() -> mria_config:set_dirty_shard(?ROUTE_SHARD, true), ok = mria:create_table(?ROUTE_TAB, [ {type, bag}, @@ -151,7 +149,8 @@ mnesia(boot) -> {decentralized_counters, true} ]} ]} - ]). + ]), + [?ROUTE_TAB, ?ROUTE_TAB_FILTERS]. %%-------------------------------------------------------------------- %% Start a router diff --git a/apps/emqx/src/emqx_router_helper.erl b/apps/emqx/src/emqx_router_helper.erl index c43192d4e..48e5bfba4 100644 --- a/apps/emqx/src/emqx_router_helper.erl +++ b/apps/emqx/src/emqx_router_helper.erl @@ -25,9 +25,7 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). %% Mnesia bootstrap --export([mnesia/1]). - --boot_mnesia({mnesia, [boot]}). +-export([create_tables/0]). %% API -export([ @@ -63,7 +61,7 @@ %% Mnesia bootstrap %%-------------------------------------------------------------------- -mnesia(boot) -> +create_tables() -> ok = mria:create_table(?ROUTING_NODE, [ {type, set}, {rlog_shard, ?ROUTE_SHARD}, @@ -71,7 +69,8 @@ mnesia(boot) -> {record_name, routing_node}, {attributes, record_info(fields, routing_node)}, {storage_properties, [{ets, [{read_concurrency, true}]}]} - ]). + ]), + [?ROUTING_NODE]. %%-------------------------------------------------------------------- %% API diff --git a/apps/emqx/src/emqx_router_sup.erl b/apps/emqx/src/emqx_router_sup.erl index 588b0de8e..d2bd4afc8 100644 --- a/apps/emqx/src/emqx_router_sup.erl +++ b/apps/emqx/src/emqx_router_sup.erl @@ -24,6 +24,11 @@ start_link() -> %% Init and log routing table type + ok = mria:wait_for_tables( + emqx_trie:create_trie() ++ + emqx_router:create_tables() ++ + emqx_router_helper:create_tables() + ), ok = emqx_router:init_schema(), supervisor:start_link({local, ?MODULE}, ?MODULE, []). diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index ae22db14f..d5989687d 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2017-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2017-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -94,6 +94,7 @@ non_empty_string/1, validations/0, naive_env_interpolation/1, + ensure_unicode_path/2, validate_server_ssl_opts/1, validate_tcp_keepalive/1, parse_tcp_keepalive/1 @@ -181,7 +182,7 @@ -define(DEFAULT_MULTIPLIER, 1.5). -define(DEFAULT_BACKOFF, 0.75). -namespace() -> broker. +namespace() -> emqx. tags() -> [<<"EMQX">>]. @@ -229,7 +230,7 @@ roots(high) -> ); roots(medium) -> [ - {"broker", + {broker, sc( ref("broker"), #{ @@ -1103,6 +1104,14 @@ fields("ws_opts") -> sc( ref("deflate_opts"), #{} + )}, + {"validate_utf8", + sc( + boolean(), + #{ + default => true, + desc => ?DESC(fields_ws_opts_validate_utf8) + } )} ]; fields("tcp_opts") -> @@ -1338,24 +1347,43 @@ fields("deflate_opts") -> ]; fields("broker") -> [ - {"enable_session_registry", + {enable_session_registry, sc( boolean(), #{ default => true, + importance => ?IMPORTANCE_HIGH, desc => ?DESC(broker_enable_session_registry) } )}, - {"session_locking_strategy", + {session_history_retain, + sc( + duration_s(), + #{ + default => <<"0s">>, + importance => ?IMPORTANCE_LOW, + desc => ?DESC("broker_session_history_retain") + } + )}, + {session_locking_strategy, sc( hoconsc:enum([local, leader, quorum, all]), #{ default => quorum, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(broker_session_locking_strategy) } )}, - shared_subscription_strategy(), - {"shared_dispatch_ack_enabled", + %% moved to under mqtt root + {shared_subscription_strategy, + sc( + string(), + #{ + deprecated => {since, "5.1.0"}, + importance => ?IMPORTANCE_HIDDEN + } + )}, + {shared_dispatch_ack_enabled, sc( boolean(), #{ @@ -1365,7 +1393,7 @@ fields("broker") -> desc => ?DESC(broker_shared_dispatch_ack_enabled) } )}, - {"route_batch_clean", + {route_batch_clean, sc( boolean(), #{ @@ -1374,18 +1402,18 @@ fields("broker") -> importance => ?IMPORTANCE_HIDDEN } )}, - {"perf", + {perf, sc( ref("broker_perf"), #{importance => ?IMPORTANCE_HIDDEN} )}, - {"routing", + {routing, sc( ref("broker_routing"), #{importance => ?IMPORTANCE_HIDDEN} )}, %% FIXME: Need new design for shared subscription group - {"shared_subscription_group", + {shared_subscription_group, sc( map(name, ref("shared_subscription_group")), #{ @@ -1801,7 +1829,7 @@ fields("session_persistence") -> sc( pos_integer(), #{ - default => 1000, + default => 100, desc => ?DESC(session_ds_max_batch_size) } )}, @@ -1854,6 +1882,14 @@ fields("session_persistence") -> desc => ?DESC(session_ds_session_gc_batch_size) } )}, + {"message_retention_period", + sc( + timeout_duration(), + #{ + default => <<"1d">>, + desc => ?DESC(session_ds_message_retention_period) + } + )}, {"force_persistence", sc( boolean(), @@ -1882,6 +1918,16 @@ fields("session_storage_backend_builtin") -> default => true } )}, + {"data_dir", + sc( + string(), + #{ + desc => ?DESC(session_builtin_data_dir), + mapping => "emqx_durable_storage.db_data_dir", + required => false, + importance => ?IMPORTANCE_LOW + } + )}, {"n_shards", sc( pos_integer(), @@ -1897,6 +1943,24 @@ fields("session_storage_backend_builtin") -> default => 3, importance => ?IMPORTANCE_HIDDEN } + )}, + {"egress_batch_size", + sc( + pos_integer(), + #{ + default => 1000, + mapping => "emqx_durable_storage.egress_batch_size", + importance => ?IMPORTANCE_HIDDEN + } + )}, + {"egress_flush_interval", + sc( + timeout_duration_ms(), + #{ + default => 100, + mapping => "emqx_durable_storage.egress_flush_interval", + importance => ?IMPORTANCE_HIDDEN + } )} ]. @@ -3595,7 +3659,22 @@ mqtt_general() -> desc => ?DESC(mqtt_shared_subscription) } )}, - shared_subscription_strategy(), + {"shared_subscription_strategy", + sc( + hoconsc:enum([ + random, + round_robin, + round_robin_per_group, + sticky, + local, + hash_topic, + hash_clientid + ]), + #{ + default => round_robin, + desc => ?DESC(mqtt_shared_subscription_strategy) + } + )}, {"exclusive_subscription", sc( boolean(), @@ -3700,6 +3779,15 @@ mqtt_session() -> importance => ?IMPORTANCE_LOW } )}, + {"message_expiry_interval", + sc( + hoconsc:union([duration(), infinity]), + #{ + default => infinity, + desc => ?DESC(mqtt_message_expiry_interval), + importance => ?IMPORTANCE_LOW + } + )}, {"max_awaiting_rel", sc( hoconsc:union([non_neg_integer(), infinity]), @@ -3792,24 +3880,6 @@ mqtt_session() -> )} ]. -shared_subscription_strategy() -> - {"shared_subscription_strategy", - sc( - hoconsc:enum([ - random, - round_robin, - round_robin_per_group, - sticky, - local, - hash_topic, - hash_clientid - ]), - #{ - default => round_robin, - desc => ?DESC(broker_shared_subscription_strategy) - } - )}. - default_mem_check_interval() -> case emqx_os_mon:is_os_check_supported() of true -> <<"60s">>; @@ -3836,3 +3906,20 @@ tags_schema() -> importance => ?IMPORTANCE_LOW } ). + +ensure_unicode_path(undefined, _) -> + undefined; +ensure_unicode_path(Path, #{make_serializable := true}) -> + %% format back to serializable string + unicode:characters_to_binary(Path, utf8); +ensure_unicode_path(Path, Opts) when is_binary(Path) -> + case unicode:characters_to_list(Path, utf8) of + {R, _, _} when R =:= error orelse R =:= incomplete -> + throw({"bad_file_path_string", Path}); + PathStr -> + ensure_unicode_path(PathStr, Opts) + end; +ensure_unicode_path(Path, _) when is_list(Path) -> + Path; +ensure_unicode_path(Path, _) -> + throw({"not_string", Path}). diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index c08109fe8..a84ed4d83 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2017-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2017-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -135,7 +135,7 @@ -type custom_timer_name() :: atom(). -type message() :: emqx_types:message(). --type publish() :: {maybe(emqx_types:packet_id()), emqx_types:message()}. +-type publish() :: {option(emqx_types:packet_id()), emqx_types:message()}. -type pubrel() :: {pubrel, emqx_types:packet_id()}. -type reply() :: publish() | pubrel(). -type replies() :: [reply()] | reply(). @@ -409,12 +409,8 @@ enrich_delivers(ClientInfo, Delivers, Session) -> enrich_delivers(_ClientInfo, [], _UpgradeQoS, _Session) -> []; enrich_delivers(ClientInfo, [D | Rest], UpgradeQoS, Session) -> - case enrich_deliver(ClientInfo, D, UpgradeQoS, Session) of - [] -> - enrich_delivers(ClientInfo, Rest, UpgradeQoS, Session); - Msg -> - [Msg | enrich_delivers(ClientInfo, Rest, UpgradeQoS, Session)] - end. + enrich_deliver(ClientInfo, D, UpgradeQoS, Session) ++ + enrich_delivers(ClientInfo, Rest, UpgradeQoS, Session). enrich_deliver(ClientInfo, {deliver, Topic, Msg}, UpgradeQoS, Session) -> SubOpts = @@ -435,13 +431,15 @@ enrich_message( _ = emqx_session_events:handle_event(ClientInfo, {dropped, Msg, no_local}), []; enrich_message(_ClientInfo, MsgIn, SubOpts = #{}, UpgradeQoS) -> - maps:fold( - fun(SubOpt, V, Msg) -> enrich_subopts(SubOpt, V, Msg, UpgradeQoS) end, - MsgIn, - SubOpts - ); + [ + maps:fold( + fun(SubOpt, V, Msg) -> enrich_subopts(SubOpt, V, Msg, UpgradeQoS) end, + MsgIn, + SubOpts + ) + ]; enrich_message(_ClientInfo, Msg, undefined, _UpgradeQoS) -> - Msg. + [Msg]. enrich_subopts(nl, 1, Msg, _) -> emqx_message:set_flag(nl, Msg); diff --git a/apps/emqx/src/emqx_session_events.erl b/apps/emqx/src/emqx_session_events.erl index f46144020..ac8dee262 100644 --- a/apps/emqx/src/emqx_session_events.erl +++ b/apps/emqx/src/emqx_session_events.erl @@ -62,10 +62,10 @@ handle_event(ClientInfo, {dropped, Msg, #{reason := queue_full, logctx := Ctx}}) ok = emqx_metrics:inc('delivery.dropped.queue_full'), ok = inc_pd('send_msg.dropped', 1), ok = inc_pd('send_msg.dropped.queue_full', 1), - ?SLOG( + ?SLOG_THROTTLE( warning, Ctx#{ - msg => "dropped_msg_due_to_mqueue_is_full", + msg => dropped_msg_due_to_mqueue_is_full, payload => Msg#message.payload }, #{topic => Msg#message.topic} diff --git a/apps/emqx/src/emqx_session_mem.erl b/apps/emqx/src/emqx_session_mem.erl index c8affdaea..e8c7a7d18 100644 --- a/apps/emqx/src/emqx_session_mem.erl +++ b/apps/emqx/src/emqx_session_mem.erl @@ -468,12 +468,12 @@ dequeue(ClientInfo, Session = #session{inflight = Inflight, mqueue = Q}) -> dequeue(_ClientInfo, 0, Msgs, Q) -> {lists:reverse(Msgs), Q}; -dequeue(ClientInfo, Cnt, Msgs, Q) -> +dequeue(ClientInfo = #{zone := Zone}, Cnt, Msgs, Q) -> case emqx_mqueue:out(Q) of {empty, _Q} -> dequeue(ClientInfo, 0, Msgs, Q); {{value, Msg}, Q1} -> - case emqx_message:is_expired(Msg) of + case emqx_message:is_expired(Msg, Zone) of true -> _ = emqx_session_events:handle_event(ClientInfo, {expired, Msg}), dequeue(ClientInfo, Cnt, Msgs, Q1); @@ -619,14 +619,14 @@ retry_delivery( end. do_retry_delivery( - ClientInfo, + ClientInfo = #{zone := Zone}, PacketId, #inflight_data{phase = wait_ack, message = Msg} = Data, Now, Acc, Inflight ) -> - case emqx_message:is_expired(Msg) of + case emqx_message:is_expired(Msg, Zone) of true -> _ = emqx_session_events:handle_event(ClientInfo, {expired, Msg}), {Acc, emqx_inflight:delete(PacketId, Inflight)}; diff --git a/apps/emqx/src/emqx_shared_sub.erl b/apps/emqx/src/emqx_shared_sub.erl index 0a6538282..f35621758 100644 --- a/apps/emqx/src/emqx_shared_sub.erl +++ b/apps/emqx/src/emqx_shared_sub.erl @@ -25,9 +25,7 @@ -include("types.hrl"). %% Mnesia bootstrap --export([mnesia/1]). - --boot_mnesia({mnesia, [boot]}). +-export([create_tables/0]). %% APIs -export([start_link/0]). @@ -107,14 +105,15 @@ %% Mnesia bootstrap %%-------------------------------------------------------------------- -mnesia(boot) -> +create_tables() -> ok = mria:create_table(?TAB, [ {type, bag}, {rlog_shard, ?SHARED_SUB_SHARD}, {storage, ram_copies}, {record_name, emqx_shared_subscription}, {attributes, record_info(fields, emqx_shared_subscription)} - ]). + ]), + [?TAB]. %%-------------------------------------------------------------------- %% API diff --git a/apps/emqx/src/emqx_stats.erl b/apps/emqx/src/emqx_stats.erl index 9685823ff..b919d472d 100644 --- a/apps/emqx/src/emqx_stats.erl +++ b/apps/emqx/src/emqx_stats.erl @@ -62,7 +62,7 @@ -record(update, {name, countdown, interval, func}). -record(state, { - timer :: maybe(reference()), + timer :: option(reference()), updates :: [#update{}], tick_ms :: timeout() }). @@ -99,7 +99,11 @@ [ 'sessions.count', %% Maximum Number of Concurrent Sessions - 'sessions.max' + 'sessions.max', + %% Count of Sessions in the cluster + 'cluster_sessions.count', + %% Maximum Number of Sessions in the cluster + 'cluster_sessions.max' ] ). @@ -164,6 +168,8 @@ names() -> emqx_connections_max, emqx_live_connections_count, emqx_live_connections_max, + emqx_cluster_sessions_count, + emqx_cluster_sessions_max, emqx_sessions_count, emqx_sessions_max, emqx_channels_count, diff --git a/apps/emqx/src/emqx_sys.erl b/apps/emqx/src/emqx_sys.erl index 509429796..e7c19cabd 100644 --- a/apps/emqx/src/emqx_sys.erl +++ b/apps/emqx/src/emqx_sys.erl @@ -65,8 +65,8 @@ -import(emqx_utils, [start_timer/2]). -record(state, { - heartbeat :: maybe(reference()), - ticker :: maybe(reference()), + heartbeat :: option(reference()), + ticker :: option(reference()), sysdescr :: binary() }). diff --git a/apps/emqx/src/emqx_sys_sup.erl b/apps/emqx/src/emqx_sys_sup.erl index 25718ba76..fc1f8f320 100644 --- a/apps/emqx/src/emqx_sys_sup.erl +++ b/apps/emqx/src/emqx_sys_sup.erl @@ -22,6 +22,7 @@ -export([init/1]). start_link() -> + _ = mria:wait_for_tables(emqx_alarm:create_tables()), supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> diff --git a/apps/emqx/src/emqx_topic_gbt.erl b/apps/emqx/src/emqx_topic_gbt.erl index 6e9e7d2fc..b399903f4 100644 --- a/apps/emqx/src/emqx_topic_gbt.erl +++ b/apps/emqx/src/emqx_topic_gbt.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -39,11 +39,11 @@ -type match(ID) :: key(ID). -opaque t(ID, Value) :: gb_trees:tree(key(ID), Value). --opaque t() :: t(_ID, _Value). +-type t() :: t(_ID, _Value). %% @doc Create a new gb_tree and store it in the persitent_term with the %% given name. --spec new() -> t(). +-spec new() -> t(_ID, _Value). new() -> gb_trees:empty(). @@ -54,19 +54,19 @@ size(Gbt) -> %% @doc Insert a new entry into the index that associates given topic filter to given %% record ID, and attaches arbitrary record to the entry. This allows users to choose %% between regular and "materialized" indexes, for example. --spec insert(emqx_types:topic() | words(), _ID, _Record, t()) -> t(). +-spec insert(emqx_types:topic() | words(), ID, Record, t(ID, Record)) -> t(ID, Record). insert(Filter, ID, Record, Gbt) -> Key = key(Filter, ID), gb_trees:enter(Key, Record, Gbt). %% @doc Delete an entry from the index that associates given topic filter to given %% record ID. Deleting non-existing entry is not an error. --spec delete(emqx_types:topic() | words(), _ID, t()) -> t(). +-spec delete(emqx_types:topic() | words(), ID, t(ID, Record)) -> t(ID, Record). delete(Filter, ID, Gbt) -> Key = key(Filter, ID), gb_trees:delete_any(Key, Gbt). --spec lookup(emqx_types:topic() | words(), _ID, t(), Default) -> _Record | Default. +-spec lookup(emqx_types:topic() | words(), ID, t(ID, Record), Default) -> Record | Default. lookup(Filter, ID, Gbt, Default) -> Key = key(Filter, ID), case gb_trees:lookup(Key, Gbt) of @@ -76,7 +76,7 @@ lookup(Filter, ID, Gbt, Default) -> Default end. --spec fold(fun((key(_ID), _Record, Acc) -> Acc), Acc, t()) -> Acc. +-spec fold(fun((key(ID), Record, Acc) -> Acc), Acc, t(ID, Record)) -> Acc. fold(Fun, Acc, Gbt) -> Iter = gb_trees:iterator(Gbt), fold_iter(Fun, Acc, Iter). @@ -91,13 +91,13 @@ fold_iter(Fun, Acc, Iter) -> %% @doc Match given topic against the index and return the first match, or `false` if %% no match is found. --spec match(emqx_types:topic(), t()) -> match(_ID) | false. +-spec match(emqx_types:topic(), t(ID, _Record)) -> match(ID) | false. match(Topic, Gbt) -> emqx_trie_search:match(Topic, make_nextf(Gbt)). %% @doc Match given topic against the index and return _all_ matches. %% If `unique` option is given, return only unique matches by record ID. --spec matches(emqx_types:topic(), t(), emqx_trie_search:opts()) -> [match(_ID)]. +-spec matches(emqx_types:topic(), t(ID, _Record), emqx_trie_search:opts()) -> [match(ID)]. matches(Topic, Gbt, Opts) -> emqx_trie_search:matches(Topic, make_nextf(Gbt), Opts). @@ -112,7 +112,7 @@ get_topic(Key) -> emqx_trie_search:get_topic(Key). %% @doc Fetch the record associated with the match. --spec get_record(match(_ID), t()) -> _Record. +-spec get_record(match(ID), t(ID, Record)) -> Record. get_record(Key, Gbt) -> gb_trees:get(Key, Gbt). diff --git a/apps/emqx/src/emqx_trie.erl b/apps/emqx/src/emqx_trie.erl index 76be97d3e..fbac28856 100644 --- a/apps/emqx/src/emqx_trie.erl +++ b/apps/emqx/src/emqx_trie.erl @@ -20,13 +20,11 @@ %% Mnesia bootstrap -export([ - mnesia/1, + create_trie/0, wait_for_tables/0, create_session_trie/1 ]). --boot_mnesia({mnesia, [boot]}). - %% Trie APIs -export([ insert/1, @@ -65,8 +63,8 @@ %%-------------------------------------------------------------------- %% @doc Create or replicate topics table. --spec mnesia(boot | copy) -> ok. -mnesia(boot) -> +-spec create_trie() -> [mria:table()]. +create_trie() -> %% Optimize storage StoreProps = [ {ets, [ @@ -80,7 +78,8 @@ mnesia(boot) -> {attributes, record_info(fields, ?TRIE)}, {type, ordered_set}, {storage_properties, StoreProps} - ]). + ]), + [?TRIE]. create_session_trie(Type) -> Storage = diff --git a/apps/emqx/src/emqx_types.erl b/apps/emqx/src/emqx_types.erl index 436fffe4e..c99ddbe13 100644 --- a/apps/emqx/src/emqx_types.erl +++ b/apps/emqx/src/emqx_types.erl @@ -100,6 +100,7 @@ -export_type([ banned/0, + banned_who/0, command/0 ]). @@ -173,7 +174,7 @@ atom() => term() }. -type clientinfo() :: #{ - zone := maybe(zone()), + zone := option(zone()), protocol := protocol(), peerhost := peerhost(), sockport := non_neg_integer(), @@ -181,9 +182,9 @@ username := username(), is_bridge := boolean(), is_superuser := boolean(), - mountpoint := maybe(binary()), - ws_cookie => maybe(list()), - password => maybe(binary()), + mountpoint := option(binary()), + ws_cookie => option(list()), + password => option(binary()), auth_result => auth_result(), anonymous => boolean(), cn => binary(), @@ -191,8 +192,8 @@ atom() => term() }. -type clientid() :: binary() | atom(). --type username() :: maybe(binary()). --type password() :: maybe(binary()). +-type username() :: option(binary()). +-type password() :: option(binary()). -type peerhost() :: inet:ip_address(). -type peername() :: {inet:ip_address(), inet:port_number()} @@ -222,8 +223,8 @@ -type packet_id() :: 1..16#FFFF. -type alias_id() :: 0..16#FFFF. -type topic_aliases() :: #{ - inbound => maybe(map()), - outbound => maybe(map()) + inbound => option(map()), + outbound => option(map()) }. -type properties() :: #{atom() => term()}. -type topic_filters() :: list({topic(), subopts()}). @@ -246,6 +247,14 @@ }. -type banned() :: #banned{}. +-type banned_who() :: + {clientid, binary()} + | {peerhost, inet:ip_address()} + | {username, binary()} + | {clientid_re, {_RE :: tuple(), binary()}} + | {username_re, {_RE :: tuple(), binary()}} + | {peerhost_net, esockd_cidr:cidr()}. + -type deliver() :: {deliver, topic(), message()}. -type delivery() :: #delivery{}. -type deliver_result() :: ok | {ok, non_neg_integer()} | {error, term()}. diff --git a/apps/emqx/src/emqx_ws_connection.erl b/apps/emqx/src/emqx_ws_connection.erl index 4a25494ad..59e120e47 100644 --- a/apps/emqx/src/emqx_ws_connection.erl +++ b/apps/emqx/src/emqx_ws_connection.erl @@ -76,15 +76,15 @@ %% Channel channel :: emqx_channel:channel(), %% GC State - gc_state :: maybe(emqx_gc:gc_state()), + gc_state :: option(emqx_gc:gc_state()), %% Postponed Packets|Cmds|Events postponed :: list(emqx_types:packet() | ws_cmd() | tuple()), %% Stats Timer - stats_timer :: disabled | maybe(reference()), + stats_timer :: disabled | option(reference()), %% Idle Timeout idle_timeout :: timeout(), %% Idle Timer - idle_timer :: maybe(reference()), + idle_timer :: option(reference()), %% Zone name zone :: atom(), %% Listener Type and Name @@ -205,7 +205,8 @@ init(Req, #{listener := {Type, Listener}} = Opts) -> compress => get_ws_opts(Type, Listener, compress), deflate_opts => get_ws_opts(Type, Listener, deflate_opts), max_frame_size => get_ws_opts(Type, Listener, max_frame_size), - idle_timeout => get_ws_opts(Type, Listener, idle_timeout) + idle_timeout => get_ws_opts(Type, Listener, idle_timeout), + validate_utf8 => get_ws_opts(Type, Listener, validate_utf8) }, case check_origin_header(Req, Opts) of {error, Reason} -> diff --git a/apps/emqx/test/emqx_banned_SUITE.erl b/apps/emqx/test/emqx_banned_SUITE.erl index 8c86e17f6..b4bd3d444 100644 --- a/apps/emqx/test/emqx_banned_SUITE.erl +++ b/apps/emqx/test/emqx_banned_SUITE.erl @@ -34,7 +34,7 @@ end_per_suite(Config) -> t_add_delete(_) -> Banned = #banned{ - who = {clientid, <<"TestClient">>}, + who = emqx_banned:who(clientid, <<"TestClient">>), by = <<"banned suite">>, reason = <<"test">>, at = erlang:system_time(second), @@ -47,54 +47,91 @@ t_add_delete(_) -> emqx_banned:create(Banned#banned{until = erlang:system_time(second) + 100}), ?assertEqual(1, emqx_banned:info(size)), - ok = emqx_banned:delete({clientid, <<"TestClient">>}), + ok = emqx_banned:delete(emqx_banned:who(clientid, <<"TestClient">>)), ?assertEqual(0, emqx_banned:info(size)). t_check(_) -> - {ok, _} = emqx_banned:create(#banned{who = {clientid, <<"BannedClient">>}}), - {ok, _} = emqx_banned:create(#banned{who = {username, <<"BannedUser">>}}), - {ok, _} = emqx_banned:create(#banned{who = {peerhost, {192, 168, 0, 1}}}), - ?assertEqual(3, emqx_banned:info(size)), - ClientInfo1 = #{ + {ok, _} = emqx_banned:create(#banned{who = emqx_banned:who(clientid, <<"BannedClient">>)}), + {ok, _} = emqx_banned:create(#banned{who = emqx_banned:who(username, <<"BannedUser">>)}), + {ok, _} = emqx_banned:create(#banned{who = emqx_banned:who(peerhost, {192, 168, 0, 1})}), + {ok, _} = emqx_banned:create(#banned{who = emqx_banned:who(peerhost, <<"192.168.0.2">>)}), + {ok, _} = emqx_banned:create(#banned{who = emqx_banned:who(clientid_re, <<"BannedClientRE.*">>)}), + {ok, _} = emqx_banned:create(#banned{who = emqx_banned:who(username_re, <<"BannedUserRE.*">>)}), + {ok, _} = emqx_banned:create(#banned{who = emqx_banned:who(peerhost_net, <<"192.168.3.0/24">>)}), + + ?assertEqual(7, emqx_banned:info(size)), + ClientInfoBannedClientId = #{ clientid => <<"BannedClient">>, username => <<"user">>, peerhost => {127, 0, 0, 1} }, - ClientInfo2 = #{ + ClientInfoBannedUsername = #{ clientid => <<"client">>, username => <<"BannedUser">>, peerhost => {127, 0, 0, 1} }, - ClientInfo3 = #{ + ClientInfoBannedAddr1 = #{ clientid => <<"client">>, username => <<"user">>, peerhost => {192, 168, 0, 1} }, - ClientInfo4 = #{ + ClientInfoBannedAddr2 = #{ + clientid => <<"client">>, + username => <<"user">>, + peerhost => {192, 168, 0, 2} + }, + ClientInfoBannedClientIdRE = #{ + clientid => <<"BannedClientRE1">>, + username => <<"user">>, + peerhost => {127, 0, 0, 1} + }, + ClientInfoBannedUsernameRE = #{ + clientid => <<"client">>, + username => <<"BannedUserRE1">>, + peerhost => {127, 0, 0, 1} + }, + ClientInfoBannedAddrNet = #{ + clientid => <<"client">>, + username => <<"user">>, + peerhost => {192, 168, 3, 1} + }, + ClientInfoValidFull = #{ clientid => <<"client">>, username => <<"user">>, peerhost => {127, 0, 0, 1} }, - ClientInfo5 = #{}, - ClientInfo6 = #{clientid => <<"client1">>}, - ?assert(emqx_banned:check(ClientInfo1)), - ?assert(emqx_banned:check(ClientInfo2)), - ?assert(emqx_banned:check(ClientInfo3)), - ?assertNot(emqx_banned:check(ClientInfo4)), - ?assertNot(emqx_banned:check(ClientInfo5)), - ?assertNot(emqx_banned:check(ClientInfo6)), - ok = emqx_banned:delete({clientid, <<"BannedClient">>}), - ok = emqx_banned:delete({username, <<"BannedUser">>}), - ok = emqx_banned:delete({peerhost, {192, 168, 0, 1}}), - ?assertNot(emqx_banned:check(ClientInfo1)), - ?assertNot(emqx_banned:check(ClientInfo2)), - ?assertNot(emqx_banned:check(ClientInfo3)), - ?assertNot(emqx_banned:check(ClientInfo4)), + ClientInfoValidEmpty = #{}, + ClientInfoValidOnlyClientId = #{clientid => <<"client1">>}, + ?assert(emqx_banned:check(ClientInfoBannedClientId)), + ?assert(emqx_banned:check(ClientInfoBannedUsername)), + ?assert(emqx_banned:check(ClientInfoBannedAddr1)), + ?assert(emqx_banned:check(ClientInfoBannedAddr2)), + ?assert(emqx_banned:check(ClientInfoBannedClientIdRE)), + ?assert(emqx_banned:check(ClientInfoBannedUsernameRE)), + ?assert(emqx_banned:check(ClientInfoBannedAddrNet)), + ?assertNot(emqx_banned:check(ClientInfoValidFull)), + ?assertNot(emqx_banned:check(ClientInfoValidEmpty)), + ?assertNot(emqx_banned:check(ClientInfoValidOnlyClientId)), + ok = emqx_banned:delete(emqx_banned:who(clientid, <<"BannedClient">>)), + ok = emqx_banned:delete(emqx_banned:who(username, <<"BannedUser">>)), + ok = emqx_banned:delete(emqx_banned:who(peerhost, {192, 168, 0, 1})), + ok = emqx_banned:delete(emqx_banned:who(peerhost, <<"192.168.0.2">>)), + ok = emqx_banned:delete(emqx_banned:who(clientid_re, <<"BannedClientRE.*">>)), + ok = emqx_banned:delete(emqx_banned:who(username_re, <<"BannedUserRE.*">>)), + ok = emqx_banned:delete(emqx_banned:who(peerhost_net, <<"192.168.3.0/24">>)), + ?assertNot(emqx_banned:check(ClientInfoBannedClientId)), + ?assertNot(emqx_banned:check(ClientInfoBannedUsername)), + ?assertNot(emqx_banned:check(ClientInfoBannedAddr1)), + ?assertNot(emqx_banned:check(ClientInfoBannedAddr2)), + ?assertNot(emqx_banned:check(ClientInfoBannedClientIdRE)), + ?assertNot(emqx_banned:check(ClientInfoBannedUsernameRE)), + ?assertNot(emqx_banned:check(ClientInfoBannedAddrNet)), + ?assertNot(emqx_banned:check(ClientInfoValidFull)), ?assertEqual(0, emqx_banned:info(size)). t_unused(_) -> - Who1 = {clientid, <<"BannedClient1">>}, - Who2 = {clientid, <<"BannedClient2">>}, + Who1 = emqx_banned:who(clientid, <<"BannedClient1">>), + Who2 = emqx_banned:who(clientid, <<"BannedClient2">>), ?assertMatch( {ok, _}, @@ -123,7 +160,7 @@ t_kick(_) -> snabbkaffe:start_trace(), Now = erlang:system_time(second), - Who = {clientid, ClientId}, + Who = emqx_banned:who(clientid, ClientId), emqx_banned:create(#{ who => Who, @@ -194,7 +231,7 @@ t_session_taken(_) -> Publish(), Now = erlang:system_time(second), - Who = {clientid, ClientId2}, + Who = emqx_banned:who(clientid, ClientId2), emqx_banned:create(#{ who => Who, by => <<"test">>, diff --git a/apps/emqx/test/emqx_channel_SUITE.erl b/apps/emqx/test/emqx_channel_SUITE.erl index ca038ac85..5b21b4aca 100644 --- a/apps/emqx/test/emqx_channel_SUITE.erl +++ b/apps/emqx/test/emqx_channel_SUITE.erl @@ -427,19 +427,32 @@ t_handle_in_auth(_) -> t_handle_in_frame_error(_) -> IdleChannel = channel(#{conn_state => idle}), - {shutdown, #{shutdown_count := frame_error, reason := frame_too_large}, _Chan} = - emqx_channel:handle_in({frame_error, frame_too_large}, IdleChannel), + {shutdown, #{shutdown_count := frame_too_large, cause := frame_too_large}, _Chan} = + emqx_channel:handle_in({frame_error, #{cause => frame_too_large}}, IdleChannel), ConnectingChan = channel(#{conn_state => connecting}), ConnackPacket = ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), - {shutdown, #{shutdown_count := frame_error, reason := frame_too_large}, ConnackPacket, _} = - emqx_channel:handle_in({frame_error, frame_too_large}, ConnectingChan), + {shutdown, + #{ + shutdown_count := frame_too_large, + cause := frame_too_large, + limit := 100, + received := 101 + }, + ConnackPacket, + _} = + emqx_channel:handle_in( + {frame_error, #{cause => frame_too_large, received => 101, limit => 100}}, + ConnectingChan + ), DisconnectPacket = ?DISCONNECT_PACKET(?RC_PACKET_TOO_LARGE), ConnectedChan = channel(#{conn_state => connected}), - {ok, [{outgoing, DisconnectPacket}, {close, frame_too_large}], _} = - emqx_channel:handle_in({frame_error, frame_too_large}, ConnectedChan), + ?assertMatch( + {ok, [{outgoing, DisconnectPacket}, {close, frame_too_large}], _}, + emqx_channel:handle_in({frame_error, #{cause => frame_too_large}}, ConnectedChan) + ), DisconnectedChan = channel(#{conn_state => disconnected}), {ok, DisconnectedChan} = - emqx_channel:handle_in({frame_error, frame_too_large}, DisconnectedChan). + emqx_channel:handle_in({frame_error, #{cause => frame_too_large}}, DisconnectedChan). t_handle_in_expected_packet(_) -> Packet = ?DISCONNECT_PACKET(?RC_PROTOCOL_ERROR), diff --git a/apps/emqx/test/emqx_client_SUITE.erl b/apps/emqx/test/emqx_client_SUITE.erl index bb4ef0826..3e5babd2e 100644 --- a/apps/emqx/test/emqx_client_SUITE.erl +++ b/apps/emqx/test/emqx_client_SUITE.erl @@ -72,7 +72,7 @@ groups() -> t_dollar_topics, t_sub_non_utf8_topic ]}, - {mqttv5, [non_parallel_tests], [t_basic_with_props_v5]}, + {mqttv5, [non_parallel_tests], [t_basic_with_props_v5, t_v5_receive_maximim_in_connack]}, {others, [non_parallel_tests], [ t_username_as_clientid, t_certcn_as_clientid_default_config_tls, @@ -103,14 +103,14 @@ end_per_testcase(_Case, _Config) -> %%-------------------------------------------------------------------- t_basic_v3(_) -> - t_basic([{proto_ver, v3}]). + run_basic([{proto_ver, v3}]). %%-------------------------------------------------------------------- %% Test cases for MQTT v4 %%-------------------------------------------------------------------- t_basic_v4(_Config) -> - t_basic([{proto_ver, v4}]). + run_basic([{proto_ver, v4}]). t_cm(_) -> emqx_config:put_zone_conf(default, [mqtt, idle_timeout], 1000), @@ -335,19 +335,30 @@ t_sub_non_utf8_topic(_) -> %% Test cases for MQTT v5 %%-------------------------------------------------------------------- -t_basic_with_props_v5(_) -> - t_basic([ +v5_conn_props(ReceiveMaximum) -> + [ {proto_ver, v5}, - {properties, #{'Receive-Maximum' => 4}} - ]). + {properties, #{'Receive-Maximum' => ReceiveMaximum}} + ]. + +t_basic_with_props_v5(_) -> + run_basic(v5_conn_props(4)). + +t_v5_receive_maximim_in_connack(_) -> + ReceiveMaximum = 7, + {ok, C} = emqtt:start_link(v5_conn_props(ReceiveMaximum)), + {ok, Props} = emqtt:connect(C), + ?assertMatch(#{'Receive-Maximum' := ReceiveMaximum}, Props), + ok = emqtt:disconnect(C), + ok. %%-------------------------------------------------------------------- %% General test cases. %%-------------------------------------------------------------------- -t_basic(_Opts) -> +run_basic(Opts) -> Topic = nth(1, ?TOPICS), - {ok, C} = emqtt:start_link([{proto_ver, v4}]), + {ok, C} = emqtt:start_link(Opts), {ok, _} = emqtt:connect(C), {ok, _, [1]} = emqtt:subscribe(C, Topic, qos1), {ok, _, [2]} = emqtt:subscribe(C, Topic, qos2), diff --git a/apps/emqx/test/emqx_cm_SUITE.erl b/apps/emqx/test/emqx_cm_SUITE.erl index 4ecea9a4b..aba4bc744 100644 --- a/apps/emqx/test/emqx_cm_SUITE.erl +++ b/apps/emqx/test/emqx_cm_SUITE.erl @@ -221,7 +221,7 @@ t_open_session_race_condition(_) -> end, %% sync ignored = gen_server:call(?CM, ignore, infinity), - ok = emqx_pool:flush_async_tasks(), + ok = emqx_pool:flush_async_tasks(?CM_POOL), ?assertEqual([], emqx_cm:lookup_channels(ClientId)). t_kick_session_discard_normal(_) -> @@ -343,29 +343,9 @@ test_stepdown_session(Action, Reason) -> end, % sync ignored = gen_server:call(?CM, ignore, infinity), - ok = flush_emqx_pool(), + ok = emqx_pool:flush_async_tasks(?CM_POOL), ?assertEqual([], emqx_cm:lookup_channels(ClientId)). -%% Channel deregistration is delegated to emqx_pool as a sync tasks. -%% The emqx_pool is pool of workers, and there is no way to know -%% which worker was picked for the last deregistration task. -%% This help function creates a large enough number of async tasks -%% to sync with the pool workers. -%% The number of tasks should be large enough to ensure all workers have -%% the chance to work on at least one of the tasks. -flush_emqx_pool() -> - Self = self(), - L = lists:seq(1, 1000), - lists:foreach(fun(I) -> emqx_pool:async_submit(fun() -> Self ! {done, I} end, []) end, L), - lists:foreach( - fun(I) -> - receive - {done, I} -> ok - end - end, - L - ). - t_discard_session_race(_) -> ClientId = rand_client_id(), ?check_trace( diff --git a/apps/emqx/test/emqx_cm_registry_keeper_SUITE.erl b/apps/emqx/test/emqx_cm_registry_keeper_SUITE.erl new file mode 100644 index 000000000..f3899fb3a --- /dev/null +++ b/apps/emqx/test/emqx_cm_registry_keeper_SUITE.erl @@ -0,0 +1,100 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_cm_registry_keeper_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include("emqx_cm.hrl"). + +%%-------------------------------------------------------------------- +%% CT callbacks +%%-------------------------------------------------------------------- + +all() -> emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + AppConfig = "broker.session_history_retain = 2s", + Apps = emqx_cth_suite:start( + [{emqx, #{config => AppConfig}}], + #{work_dir => emqx_cth_suite:work_dir(Config)} + ), + [{apps, Apps} | Config]. + +end_per_suite(Config) -> + emqx_cth_suite:stop(proplists:get_value(apps, Config)). + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(_TestCase, Config) -> + Config. + +t_cleanup_after_retain(_) -> + Pid = spawn(fun() -> + receive + stop -> ok + end + end), + ClientId = <<"clientid">>, + ClientId2 = <<"clientid2">>, + emqx_cm_registry:register_channel({ClientId, Pid}), + emqx_cm_registry:register_channel({ClientId2, Pid}), + ?assertEqual([Pid], emqx_cm_registry:lookup_channels(ClientId)), + ?assertEqual([Pid], emqx_cm_registry:lookup_channels(ClientId2)), + ?assertEqual(2, emqx_cm_registry_keeper:count(0)), + T0 = erlang:system_time(seconds), + exit(Pid, kill), + %% lookup_channel chesk if the channel is still alive + ?assertEqual([], emqx_cm_registry:lookup_channels(ClientId)), + ?assertEqual([], emqx_cm_registry:lookup_channels(ClientId2)), + %% simulate a DOWN message which causes emqx_cm to call clean_down + %% to clean the channels for real + ok = emqx_cm:clean_down({Pid, ClientId}), + ok = emqx_cm:clean_down({Pid, ClientId2}), + ?assertEqual(2, emqx_cm_registry_keeper:count(T0)), + ?assertEqual(2, emqx_cm_registry_keeper:count(0)), + ?retry(_Interval = 1000, _Attempts = 4, begin + ?assertEqual(0, emqx_cm_registry_keeper:count(T0)), + ?assertEqual(0, emqx_cm_registry_keeper:count(0)) + end), + ok. + +%% count is cached when the number of entries is greater than 1000 +t_count_cache(_) -> + Pid = self(), + ClientsCount = 999, + ClientIds = lists:map(fun erlang:integer_to_binary/1, lists:seq(1, ClientsCount)), + Channels = lists:map(fun(ClientId) -> {ClientId, Pid} end, ClientIds), + lists:foreach( + fun emqx_cm_registry:register_channel/1, + Channels + ), + T0 = erlang:system_time(seconds), + ?assertEqual(ClientsCount, emqx_cm_registry_keeper:count(0)), + ?assertEqual(ClientsCount, emqx_cm_registry_keeper:count(T0)), + %% insert another one to trigger the cache threshold + emqx_cm_registry:register_channel({<<"-1">>, Pid}), + ?assertEqual(ClientsCount + 1, emqx_cm_registry_keeper:count(0)), + ?assertEqual(ClientsCount, emqx_cm_registry_keeper:count(T0)), + mnesia:clear_table(?CHAN_REG_TAB), + ok. + +channel(Id, Pid) -> + #channel{chid = Id, pid = Pid}. diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl index 81314ce23..8a0d31fa9 100644 --- a/apps/emqx/test/emqx_common_test_helpers.erl +++ b/apps/emqx/test/emqx_common_test_helpers.erl @@ -584,7 +584,14 @@ is_tcp_server_available(Host, Port) -> Timeout :: integer() ) -> boolean. is_tcp_server_available(Host, Port, Timeout) -> - case gen_tcp:connect(Host, Port, [], Timeout) of + case + gen_tcp:connect( + emqx_utils_conv:str(Host), + emqx_utils_conv:int(Port), + [], + Timeout + ) + of {ok, Socket} -> gen_tcp:close(Socket), true; diff --git a/apps/emqx/test/emqx_common_test_http.erl b/apps/emqx/test/emqx_common_test_http.erl index 30ebe409f..5dbc96b26 100644 --- a/apps/emqx/test/emqx_common_test_http.erl +++ b/apps/emqx/test/emqx_common_test_http.erl @@ -93,15 +93,22 @@ default_auth_header() -> create_default_app() -> Now = erlang:system_time(second), ExpiredAt = Now + timer:minutes(10), - emqx_mgmt_auth:create( - ?DEFAULT_APP_ID, - ?DEFAULT_APP_KEY, - ?DEFAULT_APP_SECRET, - true, - ExpiredAt, - <<"default app key for test">>, - ?ROLE_API_SUPERUSER - ). + case + emqx_mgmt_auth:create( + ?DEFAULT_APP_ID, + ?DEFAULT_APP_KEY, + ?DEFAULT_APP_SECRET, + true, + ExpiredAt, + <<"default app key for test">>, + ?ROLE_API_SUPERUSER + ) + of + {ok, App} -> + {ok, App}; + {error, name_already_existed} -> + {ok, _} = emqx_mgmt_auth:read(?DEFAULT_APP_ID) + end. delete_default_app() -> emqx_mgmt_auth:delete(?DEFAULT_APP_ID). diff --git a/apps/emqx/test/emqx_config_SUITE.erl b/apps/emqx/test/emqx_config_SUITE.erl index 6a49507a6..72611c3a6 100644 --- a/apps/emqx/test/emqx_config_SUITE.erl +++ b/apps/emqx/test/emqx_config_SUITE.erl @@ -76,8 +76,7 @@ t_fill_default_values(C) when is_list(C) -> <<"trie_compaction">> := true }, <<"route_batch_clean">> := false, - <<"session_locking_strategy">> := <<"quorum">>, - <<"shared_subscription_strategy">> := <<"round_robin">> + <<"session_history_retain">> := <<"0s">> } }, WithDefaults @@ -446,6 +445,7 @@ zone_global_defaults() -> response_information => [], retain_available => true, retry_interval => 30000, + message_expiry_interval => infinity, server_keepalive => disabled, session_expiry_interval => 7200000, shared_subscription => true, diff --git a/apps/emqx/test/emqx_cth_cluster.erl b/apps/emqx/test/emqx_cth_cluster.erl index d53bb1e90..54e399795 100644 --- a/apps/emqx/test/emqx_cth_cluster.erl +++ b/apps/emqx/test/emqx_cth_cluster.erl @@ -38,7 +38,7 @@ %% in `end_per_suite/1` or `end_per_group/2`) with the result from step 2. -module(emqx_cth_cluster). --export([start/1, start/2, restart/2]). +-export([start/1, start/2, restart/1, restart/2]). -export([stop/1, stop_node/1]). -export([start_bare_nodes/1, start_bare_nodes/2]). @@ -162,6 +162,9 @@ wait_clustered([Node | Nodes] = All, Check, Deadline) -> wait_clustered(All, Check, Deadline) end. +restart(NodeSpec) -> + restart(maps:get(name, NodeSpec), NodeSpec). + restart(Node, Spec) -> ct:pal("Stopping peer node ~p", [Node]), ok = emqx_cth_peer:stop(Node), @@ -381,6 +384,7 @@ node_init(Node) -> _ = share_load_module(Node, cthr), % Enable snabbkaffe trace forwarding ok = snabbkaffe:forward_trace(Node), + when_cover_enabled(fun() -> {ok, _} = cover:start([Node]) end), ok. %% Returns 'true' if this node should appear in running nodes list. @@ -445,6 +449,7 @@ stop(Nodes) -> stop_node(Name) -> Node = node_name(Name), + when_cover_enabled(fun() -> cover:flush([Node]) end), ok = emqx_cth_peer:stop(Node). %% Ports @@ -506,3 +511,20 @@ host() -> format(Format, Args) -> unicode:characters_to_binary(io_lib:format(Format, Args)). + +is_cover_enabled() -> + case os:getenv("ENABLE_COVER_COMPILE") of + "1" -> true; + "true" -> true; + _ -> false + end. + +when_cover_enabled(Fun) -> + %% We need to check if cover is enabled to avoid crashes when attempting to start it + %% on the peer. + case is_cover_enabled() of + true -> + Fun(); + false -> + ok + end. diff --git a/apps/emqx/test/emqx_cth_suite.erl b/apps/emqx/test/emqx_cth_suite.erl index fbb9da595..eae12145f 100644 --- a/apps/emqx/test/emqx_cth_suite.erl +++ b/apps/emqx/test/emqx_cth_suite.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -48,7 +48,7 @@ %% %% Most of the time, you just need to: %% 1. Describe the appspecs for the applications you want to test. -%% 2. Call `emqx_cth_sutie:start/2` to start the applications before the testrun +%% 2. Call `emqx_cth_suite:start/2` to start the applications before the testrun %% (e.g. in `init_per_suite/1` / `init_per_group/2`), providing the appspecs %% and unique work dir for the testrun (e.g. `work_dir/1`). Save the result %% in a context. @@ -177,10 +177,9 @@ load_appspec({App, _Opts}) -> load_app_deps(App). load_app_deps(App) -> - AlreadyLoaded = [A || {A, _, _} <- application:loaded_applications()], case application:get_key(App, applications) of {ok, Deps} -> - Apps = Deps -- AlreadyLoaded, + Apps = [D || D <- Deps, application:get_key(D, id) == undefined], ok = lists:foreach(fun emqx_common_test_helpers:load/1, Apps), ok = lists:foreach(fun load_app_deps/1, Apps); undefined -> @@ -471,9 +470,12 @@ clean_suite_state() -> app_schema(App) -> Mod = list_to_atom(atom_to_list(App) ++ "_schema"), - try is_list(Mod:roots()) of - true -> {ok, Mod}; - false -> {error, schema_no_roots} + try + Exports = Mod:module_info(exports), + case lists:member({roots, 0}, Exports) of + true -> {ok, Mod}; + false -> {error, schema_no_roots} + end catch error:undef -> {error, schema_not_found} diff --git a/apps/emqx/test/emqx_frame_SUITE.erl b/apps/emqx/test/emqx_frame_SUITE.erl index 23e8972e9..bdafa4eed 100644 --- a/apps/emqx/test/emqx_frame_SUITE.erl +++ b/apps/emqx/test/emqx_frame_SUITE.erl @@ -57,11 +57,12 @@ groups() -> t_serialize_parse_v5_connect, t_serialize_parse_connect_without_clientid, t_serialize_parse_connect_with_will, + t_serialize_parse_connect_with_malformed_will, t_serialize_parse_bridge_connect, t_parse_invalid_remaining_len, t_parse_malformed_properties, t_malformed_connect_header, - t_malformed_connect_payload, + t_malformed_connect_data, t_reserved_connect_flag, t_invalid_clientid ]}, @@ -138,8 +139,8 @@ t_parse_cont(_) -> t_parse_frame_too_large(_) -> Packet = ?PUBLISH_PACKET(?QOS_1, <<"t">>, 1, payload(1000)), - ?ASSERT_FRAME_THROW(frame_too_large, parse_serialize(Packet, #{max_size => 256})), - ?ASSERT_FRAME_THROW(frame_too_large, parse_serialize(Packet, #{max_size => 512})), + ?ASSERT_FRAME_THROW(#{cause := frame_too_large}, parse_serialize(Packet, #{max_size => 256})), + ?ASSERT_FRAME_THROW(#{cause := frame_too_large}, parse_serialize(Packet, #{max_size => 512})), ?assertEqual(Packet, parse_serialize(Packet, #{max_size => 2048, version => ?MQTT_PROTO_V4})). t_parse_frame_malformed_variable_byte_integer(_) -> @@ -277,6 +278,37 @@ t_serialize_parse_connect_with_will(_) -> ?assertEqual(Bin, serialize_to_binary(Packet)), ?assertMatch({ok, Packet, <<>>, _}, emqx_frame:parse(Bin)). +t_serialize_parse_connect_with_malformed_will(_) -> + Packet2 = #mqtt_packet{ + header = #mqtt_packet_header{type = ?CONNECT}, + variable = #mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V3, + proto_name = <<"MQIsdp">>, + clientid = <<"mosqpub/10452-iMac.loca">>, + clean_start = true, + keepalive = 60, + will_retain = false, + will_qos = ?QOS_1, + will_flag = true, + will_topic = <<"/will">>, + will_payload = <<>> + } + }, + <<16, 46, Body:44/binary, 0, 0>> = serialize_to_binary(Packet2), + %% too short + BadBin1 = <<16, 45, Body/binary, 0>>, + ?ASSERT_FRAME_THROW( + #{cause := malformed_will_payload, length_bytes := 1, expected_bytes := 2}, + emqx_frame:parse(BadBin1) + ), + %% too long + BadBin2 = <<16, 47, Body/binary, 0, 2, 0>>, + ?ASSERT_FRAME_THROW( + #{cause := malformed_will_payload, parsed_length := 2, remaining_bytes := 1}, + emqx_frame:parse(BadBin2) + ), + ok. + t_serialize_parse_bridge_connect(_) -> Bin = <<16, 86, 0, 6, 77, 81, 73, 115, 100, 112, 131, 44, 0, 60, 0, 19, 67, 95, 48, 48, 58, 48, @@ -585,7 +617,7 @@ t_serialize_parse_pingresp(_) -> Packet = serialize_to_binary(PingResp), ?assertException( throw, - {frame_parse_error, #{hint := unexpected_packet, header_type := 'PINGRESP'}}, + {frame_parse_error, #{cause := unexpected_packet, header_type := 'PINGRESP'}}, emqx_frame:parse(Packet) ). @@ -632,7 +664,9 @@ t_serialize_parse_auth_v5(_) -> t_parse_invalid_remaining_len(_) -> ?assertException( - throw, {frame_parse_error, #{hint := zero_remaining_len}}, emqx_frame:parse(<>) + throw, + {frame_parse_error, #{cause := zero_remaining_len}}, + emqx_frame:parse(<>) ). t_parse_malformed_properties(_) -> @@ -643,16 +677,14 @@ t_parse_malformed_properties(_) -> ). t_malformed_connect_header(_) -> - ?assertException( - throw, - {frame_parse_error, malformed_connect_header}, + ?ASSERT_FRAME_THROW( + #{cause := malformed_connect, header_bytes := _}, emqx_frame:parse(<<16, 11, 0, 6, 77, 81, 73, 115, 100, 112, 3, 130, 1, 6>>) ). -t_malformed_connect_payload(_) -> - ?assertException( - throw, - {frame_parse_error, malformed_connect_data}, +t_malformed_connect_data(_) -> + ?ASSERT_FRAME_THROW( + #{cause := malformed_connect, unexpected_trailing_bytes := _}, emqx_frame:parse(<<16, 15, 0, 6, 77, 81, 73, 115, 100, 112, 3, 0, 0, 0, 0, 0, 0>>) ). @@ -666,7 +698,7 @@ t_reserved_connect_flag(_) -> t_invalid_clientid(_) -> ?assertException( throw, - {frame_parse_error, #{hint := invalid_clientid}}, + {frame_parse_error, #{cause := invalid_clientid}}, emqx_frame:parse(<<16, 15, 0, 6, 77, 81, 73, 115, 100, 112, 3, 0, 0, 0, 1, 0, 0>>) ). diff --git a/apps/emqx/test/emqx_listeners_SUITE.erl b/apps/emqx/test/emqx_listeners_SUITE.erl index 2d2a13e31..0dcd27612 100644 --- a/apps/emqx/test/emqx_listeners_SUITE.erl +++ b/apps/emqx/test/emqx_listeners_SUITE.erl @@ -206,7 +206,8 @@ t_ssl_update_opts(Config) -> {verify, verify_peer}, {customize_hostname_check, [{match_fun, fun(_, _) -> true end}]} ], - with_listener(ssl, updated, Conf, fun() -> + Name = ?FUNCTION_NAME, + with_listener(ssl, Name, Conf, fun() -> %% Client connects successfully. C1 = emqtt_connect_ssl(Host, Port, [ {cacertfile, filename:join(PrivDir, "ca.pem")} | ClientSSLOpts @@ -214,7 +215,7 @@ t_ssl_update_opts(Config) -> %% Change the listener SSL configuration: another set of cert/key files. {ok, _} = emqx:update_config( - [listeners, ssl, updated], + [listeners, ssl, Name], {update, #{ <<"ssl_options">> => #{ <<"cacertfile">> => filename:join(PrivDir, "ca-next.pem"), @@ -238,7 +239,7 @@ t_ssl_update_opts(Config) -> %% Change the listener SSL configuration: require peer certificate. {ok, _} = emqx:update_config( - [listeners, ssl, updated], + [listeners, ssl, Name], {update, #{ <<"ssl_options">> => #{ <<"verify">> => verify_peer, @@ -292,7 +293,8 @@ t_wss_update_opts(Config) -> {verify, verify_peer}, {customize_hostname_check, [{match_fun, fun(_, _) -> true end}]} ], - with_listener(wss, updated, Conf, fun() -> + Name = ?FUNCTION_NAME, + with_listener(wss, Name, Conf, fun() -> %% Start a client. C1 = emqtt_connect_wss(Host, Port, [ {cacertfile, filename:join(PrivDir, "ca.pem")} @@ -303,7 +305,7 @@ t_wss_update_opts(Config) -> %% 1. Another set of (password protected) cert/key files. %% 2. Require peer certificate. {ok, _} = emqx:update_config( - [listeners, wss, updated], + [listeners, wss, Name], {update, #{ <<"ssl_options">> => #{ <<"cacertfile">> => filename:join(PrivDir, "ca-next.pem"), @@ -327,7 +329,7 @@ t_wss_update_opts(Config) -> %% Change the listener SSL configuration: require peer certificate. {ok, _} = emqx:update_config( - [listeners, wss, updated], + [listeners, wss, Name], {update, #{ <<"ssl_options">> => #{ <<"verify">> => verify_peer, @@ -384,7 +386,8 @@ t_quic_update_opts(Config) -> {verify, verify_peer}, {customize_hostname_check, [{match_fun, fun(_, _) -> true end}]} ], - with_listener(ListenerType, updated, Conf, fun() -> + Name = ?FUNCTION_NAME, + with_listener(ListenerType, Name, Conf, fun() -> %% Client connects successfully. C1 = ConnectFun(Host, Port, [ {cacertfile, filename:join(PrivDir, "ca.pem")} | ClientSSLOpts @@ -392,7 +395,7 @@ t_quic_update_opts(Config) -> %% Change the listener SSL configuration: another set of cert/key files. {ok, _} = emqx:update_config( - [listeners, ListenerType, updated], + [listeners, ListenerType, Name], {update, #{ <<"ssl_options">> => #{ <<"cacertfile">> => filename:join(PrivDir, "ca-next.pem"), @@ -405,9 +408,9 @@ t_quic_update_opts(Config) -> %% Unable to connect with old SSL options, server's cert is signed by another CA. ?assertError( {transport_down, #{error := _, status := Status}} when - (Status =:= bad_certificate orelse + ((Status =:= bad_certificate orelse Status =:= cert_untrusted_root orelse - Status =:= handshake_failure), + Status =:= handshake_failure)), ConnectFun(Host, Port, [ {cacertfile, filename:join(PrivDir, "ca.pem")} | ClientSSLOpts ]) @@ -419,7 +422,7 @@ t_quic_update_opts(Config) -> %% Change the listener SSL configuration: require peer certificate. {ok, _} = emqx:update_config( - [listeners, ListenerType, updated], + [listeners, ListenerType, Name], {update, #{ <<"ssl_options">> => #{ <<"verify">> => verify_peer, @@ -447,7 +450,7 @@ t_quic_update_opts(Config) -> %% Change the listener port NewPort = emqx_common_test_helpers:select_free_port(ListenerType), {ok, _} = emqx:update_config( - [listeners, ListenerType, updated], + [listeners, ListenerType, Name], {update, #{ <<"bind">> => format_bind({Host, NewPort}) }} @@ -506,7 +509,8 @@ t_quic_update_opts_fail(Config) -> {verify, verify_peer}, {customize_hostname_check, [{match_fun, fun(_, _) -> true end}]} ], - with_listener(ListenerType, updated, Conf, fun() -> + Name = ?FUNCTION_NAME, + with_listener(ListenerType, Name, Conf, fun() -> %% GIVEN: an working Listener that client could connect to. C1 = ConnectFun(Host, Port, [ {cacertfile, filename:join(PrivDir, "ca.pem")} | ClientSSLOpts @@ -514,7 +518,7 @@ t_quic_update_opts_fail(Config) -> %% WHEN: reload the listener with invalid SSL options (certfile and keyfile missmatch). UpdateResult1 = emqx:update_config( - [listeners, ListenerType, updated], + [listeners, ListenerType, Name], {update, #{ <<"ssl_options">> => #{ <<"cacertfile">> => filename:join(PrivDir, "ca-next.pem"), @@ -537,7 +541,7 @@ t_quic_update_opts_fail(Config) -> %% WHEN: Change the listener SSL configuration again UpdateResult2 = emqx:update_config( - [listeners, ListenerType, updated], + [listeners, ListenerType, Name], {update, #{ <<"ssl_options">> => #{ <<"cacertfile">> => filename:join(PrivDir, "ca-next.pem"), @@ -553,9 +557,9 @@ t_quic_update_opts_fail(Config) -> %% Unable to connect with old SSL options, server's cert is signed by another CA. ?assertError( {transport_down, #{error := _, status := Status}} when - (Status =:= bad_certificate orelse + ((Status =:= bad_certificate orelse Status =:= cert_untrusted_root orelse - Status =:= handshake_failure), + Status =:= handshake_failure)), ConnectFun(Host, Port, [ {cacertfile, filename:join(PrivDir, "ca.pem")} | ClientSSLOpts ]) @@ -581,7 +585,8 @@ with_listener(Type, Name, Config, Then) -> try Then() after - emqx:update_config([listeners, Type, Name], ?TOMBSTONE_CONFIG_CHANGE_REQ) + ok = emqx_listeners:stop(), + emqx:remove_config([listeners, Type, Name]) end. emqtt_connect_ssl(Host, Port, SSLOpts) -> diff --git a/apps/emqx/test/emqx_log_throttler_SUITE.erl b/apps/emqx/test/emqx_log_throttler_SUITE.erl new file mode 100644 index 000000000..441ef2d95 --- /dev/null +++ b/apps/emqx/test/emqx_log_throttler_SUITE.erl @@ -0,0 +1,170 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_log_throttler_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% Have to use real msgs, as the schema is guarded by enum. +-define(THROTTLE_MSG, authorization_permission_denied). +-define(THROTTLE_MSG1, cannot_publish_to_topic_due_to_not_authorized). +-define(TIME_WINDOW, <<"1s">>). + +all() -> emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + %% This test suite can't be run in standalone tests (without emqx_conf) + case module_exists(emqx_conf) of + true -> + Apps = emqx_cth_suite:start( + [ + {emqx_conf, #{ + config => + #{ + log => #{ + throttling => #{ + time_window => ?TIME_WINDOW, msgs => [?THROTTLE_MSG] + } + } + } + }}, + emqx + ], + #{work_dir => emqx_cth_suite:work_dir(Config)} + ), + [{suite_apps, Apps} | Config]; + false -> + {skip, standalone_not_supported} + end. + +end_per_suite(Config) -> + emqx_cth_suite:stop(?config(suite_apps, Config)), + emqx_config:delete_override_conf_files(). + +init_per_testcase(t_throttle_add_new_msg, Config) -> + ok = snabbkaffe:start_trace(), + [?THROTTLE_MSG] = Conf = emqx:get_config([log, throttling, msgs]), + {ok, _} = emqx_conf:update([log, throttling, msgs], [?THROTTLE_MSG1 | Conf], #{}), + Config; +init_per_testcase(_TC, Config) -> + ok = snabbkaffe:start_trace(), + Config. + +end_per_testcase(t_throttle_add_new_msg, _Config) -> + ok = snabbkaffe:stop(), + {ok, _} = emqx_conf:update([log, throttling, msgs], [?THROTTLE_MSG], #{}), + ok; +end_per_testcase(t_update_time_window, _Config) -> + ok = snabbkaffe:stop(), + {ok, _} = emqx_conf:update([log, throttling, time_window], ?TIME_WINDOW, #{}), + ok; +end_per_testcase(_TC, _Config) -> + ok = snabbkaffe:stop(). + +%%-------------------------------------------------------------------- +%% Test cases +%%-------------------------------------------------------------------- + +t_throttle(_Config) -> + ?check_trace( + begin + %% Warm-up and block to increase the probability that next events + %% will be in the same throttling time window. + lists:foreach( + fun(_) -> emqx_log_throttler:allow(warning, ?THROTTLE_MSG) end, + lists:seq(1, 100) + ), + {ok, _} = ?block_until( + #{?snk_kind := log_throttler_dropped, throttled_msg := ?THROTTLE_MSG}, 5000 + ), + + ?assert(emqx_log_throttler:allow(warning, ?THROTTLE_MSG)), + ?assertNot(emqx_log_throttler:allow(warning, ?THROTTLE_MSG)), + %% Debug is always allowed + ?assert(emqx_log_throttler:allow(debug, ?THROTTLE_MSG)), + {ok, _} = ?block_until( + #{ + ?snk_kind := log_throttler_dropped, + throttled_msg := ?THROTTLE_MSG, + dropped_count := 1 + }, + 3000 + ) + end, + [] + ). + +t_throttle_add_new_msg(_Config) -> + ?check_trace( + begin + ?block_until( + #{?snk_kind := log_throttler_new_msg, throttled_msg := ?THROTTLE_MSG1}, 5000 + ), + ?assert(emqx_log_throttler:allow(warning, ?THROTTLE_MSG1)), + ?assertNot(emqx_log_throttler:allow(warning, ?THROTTLE_MSG1)), + {ok, _} = ?block_until( + #{ + ?snk_kind := log_throttler_dropped, + throttled_msg := ?THROTTLE_MSG1, + dropped_count := 1 + }, + 3000 + ) + end, + [] + ). + +t_throttle_no_msg(_Config) -> + %% Must simply pass with no crashes + ?assert(emqx_log_throttler:allow(warning, no_test_throttle_msg)), + ?assert(emqx_log_throttler:allow(warning, no_test_throttle_msg)), + timer:sleep(10), + ?assert(erlang:is_process_alive(erlang:whereis(emqx_log_throttler))). + +t_update_time_window(_Config) -> + ?check_trace( + begin + ?wait_async_action( + emqx_conf:update([log, throttling, time_window], <<"2s">>, #{}), + #{?snk_kind := log_throttler_sched_refresh, new_period_ms := 2000}, + 5000 + ), + timer:sleep(10), + ?assert(erlang:is_process_alive(erlang:whereis(emqx_log_throttler))) + end, + [] + ). + +%%-------------------------------------------------------------------- +%% internal functions +%%-------------------------------------------------------------------- + +module_exists(Mod) -> + case erlang:module_loaded(Mod) of + true -> + true; + false -> + case code:ensure_loaded(Mod) of + ok -> true; + {module, Mod} -> true; + _ -> false + end + end. diff --git a/apps/emqx/test/emqx_message_SUITE.erl b/apps/emqx/test/emqx_message_SUITE.erl index 2e4164652..7bf6c9a7e 100644 --- a/apps/emqx/test/emqx_message_SUITE.erl +++ b/apps/emqx/test/emqx_message_SUITE.erl @@ -141,18 +141,50 @@ t_undefined_headers(_) -> Msg2 = emqx_message:set_header(c, 3, Msg), ?assertEqual(3, emqx_message:get_header(c, Msg2)). -t_is_expired(_) -> +t_is_expired_1(_) -> + test_msg_expired_property(?MODULE). + +t_is_expired_2(_) -> + %% if the 'Message-Expiry-Interval' property is set, the message_expiry_interval should be ignored + try + emqx_config:put( + maps:from_list([{list_to_atom(Root), #{}} || Root <- emqx_zone_schema:roots()]) + ), + emqx_config:put_zone_conf(?MODULE, [mqtt, message_expiry_interval], timer:seconds(10)), + test_msg_expired_property(?MODULE) + after + emqx_config:erase_all() + end. + +t_is_expired_3(_) -> + try + emqx_config:put( + maps:from_list([{list_to_atom(Root), #{}} || Root <- emqx_zone_schema:roots()]) + ), + emqx_config:put_zone_conf(?MODULE, [mqtt, message_expiry_interval], 100), + Msg = emqx_message:make(<<"clientid">>, <<"topic">>, <<"payload">>), + ?assertNot(emqx_message:is_expired(Msg, ?MODULE)), + timer:sleep(120), + ?assert(emqx_message:is_expired(Msg, ?MODULE)) + after + emqx_config:erase_all() + end. + +test_msg_expired_property(Zone) -> Msg = emqx_message:make(<<"clientid">>, <<"topic">>, <<"payload">>), - ?assertNot(emqx_message:is_expired(Msg)), + ?assertNot(emqx_message:is_expired(Msg, Zone)), Msg1 = emqx_message:set_headers(#{properties => #{'Message-Expiry-Interval' => 1}}, Msg), timer:sleep(500), - ?assertNot(emqx_message:is_expired(Msg1)), + ?assertNot(emqx_message:is_expired(Msg1, Zone)), timer:sleep(600), - ?assert(emqx_message:is_expired(Msg1)), + ?assert(emqx_message:is_expired(Msg1, Zone)). + +t_update_expired(_) -> + Msg = emqx_message:make(<<"clientid">>, <<"topic">>, <<"payload">>), timer:sleep(1000), - Msg = emqx_message:update_expiry(Msg), - Msg2 = emqx_message:update_expiry(Msg1), - Props = emqx_message:get_header(properties, Msg2), + ?assertEqual(Msg, emqx_message:update_expiry(Msg)), + Msg1 = emqx_message:set_headers(#{properties => #{'Message-Expiry-Interval' => 1}}, Msg), + Props = emqx_message:get_header(properties, emqx_message:update_expiry(Msg1)), ?assertEqual(1, maps:get('Message-Expiry-Interval', Props)). % t_to_list(_) -> diff --git a/apps/emqx/test/emqx_persistent_messages_SUITE.erl b/apps/emqx/test/emqx_persistent_messages_SUITE.erl index f25f38098..0ca1daa1c 100644 --- a/apps/emqx/test/emqx_persistent_messages_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_messages_SUITE.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ -include_lib("stdlib/include/assert.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). -compile(export_all). @@ -45,10 +46,20 @@ init_per_testcase(t_session_subscription_iterators = TestCase, Config) -> Cluster = cluster(), Nodes = emqx_cth_cluster:start(Cluster, #{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}), [{nodes, Nodes} | Config]; +init_per_testcase(t_message_gc = TestCase, Config) -> + Opts = #{ + extra_emqx_conf => + "\n session_persistence.message_retention_period = 1s" + "\n session_persistence.storage.builtin.n_shards = 3" + }, + common_init_per_testcase(TestCase, [{n_shards, 3} | Config], Opts); init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config, _Opts = #{}). + +common_init_per_testcase(TestCase, Config, Opts) -> ok = emqx_ds:drop_db(?PERSISTENT_MESSAGE_DB), Apps = emqx_cth_suite:start( - app_specs(), + app_specs(Opts), #{work_dir => emqx_cth_suite:work_dir(TestCase, Config)} ), [{apps, Apps} | Config]. @@ -205,31 +216,7 @@ t_session_subscription_iterators(Config) -> messages => [Message1, Message2, Message3, Message4] } end, - fun(Trace) -> - ct:pal("trace:\n ~p", [Trace]), - case ?of_kind(ds_session_subscription_added, Trace) of - [] -> - %% Since `emqx_durable_storage' is a dependency of `emqx', it gets - %% compiled in "prod" mode when running emqx standalone tests. - ok; - [_ | _] -> - ?assertMatch( - [ - #{?snk_kind := ds_session_subscription_added}, - #{?snk_kind := ds_session_subscription_present} - ], - ?of_kind( - [ - ds_session_subscription_added, - ds_session_subscription_present - ], - Trace - ) - ), - ok - end, - ok - end + [] ), ok. @@ -307,11 +294,6 @@ t_qos0_only_many_streams(_Config) -> receive_messages(3) ), - ?assertMatch( - #{pubranges := [_, _, _]}, - emqx_persistent_session_ds:print_session(ClientId) - ), - Inflight1 = get_session_inflight(ConnPid), %% TODO: Kinda stupid way to verify that the runtime state is not growing. @@ -365,7 +347,7 @@ t_publish_empty_topic_levels(_Config) -> {<<"t/3/bar">>, <<"6">>} ], [emqtt:publish(Pub, Topic, Payload, ?QOS_1) || {Topic, Payload} <- Messages], - Received = receive_messages(length(Messages), 1_500), + Received = receive_messages(length(Messages)), ?assertMatch( [ #{topic := <<"t//1/">>, payload := <<"2">>}, @@ -379,6 +361,100 @@ t_publish_empty_topic_levels(_Config) -> emqtt:stop(Pub) end. +t_message_gc_too_young(_Config) -> + %% Check that GC doesn't attempt to create a new generation if there are fresh enough + %% generations around. The stability of this test relies on the default value for + %% message retention being long enough. Currently, the default is 1 hour. + ?check_trace( + ok = emqx_persistent_message_ds_gc_worker:gc(), + fun(Trace) -> + ?assertMatch([_], ?of_kind(ps_message_gc_too_early, Trace)), + ok + end + ), + ok. + +t_message_gc(Config) -> + %% Check that, after GC runs, a new generation is created, retaining messages, and + %% older messages no longer are accessible. + NShards = ?config(n_shards, Config), + ?check_trace( + #{timetrap => 10_000}, + begin + %% ensure some messages are in the first generation + ?force_ordering( + #{?snk_kind := inserted_batch}, + #{?snk_kind := ps_message_gc_added_gen} + ), + Msgs0 = [ + message(<<"foo/bar">>, <<"1">>, 0), + message(<<"foo/baz">>, <<"2">>, 1) + ], + ok = emqx_ds:store_batch(?PERSISTENT_MESSAGE_DB, Msgs0), + ?tp(inserted_batch, #{}), + {ok, _} = ?block_until(#{?snk_kind := ps_message_gc_added_gen}), + + Now = emqx_message:timestamp_now(), + Msgs1 = [ + message(<<"foo/bar">>, <<"3">>, Now + 100), + message(<<"foo/baz">>, <<"4">>, Now + 101) + ], + ok = emqx_ds:store_batch(?PERSISTENT_MESSAGE_DB, Msgs1), + + {ok, _} = snabbkaffe:block_until( + ?match_n_events(NShards, #{?snk_kind := message_gc_generation_dropped}), + infinity + ), + + TopicFilter = emqx_topic:words(<<"#">>), + StartTime = 0, + Msgs = consume(TopicFilter, StartTime), + %% "1" and "2" should have been GC'ed + PresentMessages = sets:from_list( + [emqx_message:payload(Msg) || Msg <- Msgs], + [{version, 2}] + ), + ?assert( + sets:is_empty( + sets:intersection( + PresentMessages, + sets:from_list([<<"1">>, <<"2">>], [{version, 2}]) + ) + ), + #{present_messages => PresentMessages} + ), + + ok + end, + [] + ), + ok. + +t_metrics_not_dropped(_Config) -> + %% Asserts that, if only persisted sessions are subscribed to a topic being published + %% to, we don't bump the `message.dropped' metric, nor we run the equivalent hook. + Sub = connect(<>, true, 30), + on_exit(fun() -> emqtt:stop(Sub) end), + Pub = connect(<>, true, 30), + on_exit(fun() -> emqtt:stop(Pub) end), + Hookpoint = 'message.dropped', + emqx_hooks:add(Hookpoint, {?MODULE, on_message_dropped, [self()]}, 1_000), + on_exit(fun() -> emqx_hooks:del(Hookpoint, {?MODULE, on_message_dropped}) end), + + DroppedBefore = emqx_metrics:val('messages.dropped'), + DroppedNoSubBefore = emqx_metrics:val('messages.dropped.no_subscribers'), + + {ok, _, [?RC_GRANTED_QOS_1]} = emqtt:subscribe(Sub, <<"t/+">>, ?QOS_1), + emqtt:publish(Pub, <<"t/ps">>, <<"payload">>, ?QOS_1), + ?assertMatch([_], receive_messages(1)), + DroppedAfter = emqx_metrics:val('messages.dropped'), + DroppedNoSubAfter = emqx_metrics:val('messages.dropped.no_subscribers'), + + ?assertEqual(DroppedBefore, DroppedAfter), + ?assertEqual(DroppedNoSubBefore, DroppedNoSubAfter), + + ok. + %% connect(ClientId, CleanStart, EI) -> @@ -419,7 +495,7 @@ consume(It) -> end. receive_messages(Count) -> - receive_messages(Count, 5_000). + receive_messages(Count, 10_000). receive_messages(Count, Timeout) -> lists:reverse(receive_messages(Count, [], Timeout)). @@ -438,9 +514,13 @@ publish(Node, Message) -> erpc:call(Node, emqx, publish, [Message]). app_specs() -> + app_specs(_Opts = #{}). + +app_specs(Opts) -> + ExtraEMQXConf = maps:get(extra_emqx_conf, Opts, ""), [ emqx_durable_storage, - {emqx, "session_persistence {enable = true}"} + {emqx, "session_persistence {enable = true}" ++ ExtraEMQXConf} ]. cluster() -> @@ -459,3 +539,16 @@ clear_db() -> mria:stop(), ok = mnesia:delete_schema([node()]), ok. + +message(Topic, Payload, PublishedAt) -> + #message{ + topic = Topic, + payload = Payload, + timestamp = PublishedAt, + id = emqx_guid:gen() + }. + +on_message_dropped(Msg, Context, Res, TestPid) -> + ErrCtx = #{msg => Msg, ctx => Context, res => Res}, + ct:pal("this hook should not be called.\n ~p", [ErrCtx]), + exit(TestPid, {hookpoint_called, ErrCtx}). diff --git a/apps/emqx/test/emqx_persistent_session_SUITE.erl b/apps/emqx/test/emqx_persistent_session_SUITE.erl index 09cbf306d..a5c171f67 100644 --- a/apps/emqx/test/emqx_persistent_session_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_session_SUITE.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2021-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2021-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -53,7 +53,7 @@ all() -> groups() -> TCs = emqx_common_test_helpers:all(?MODULE), - TCsNonGeneric = [t_choose_impl], + TCsNonGeneric = [t_choose_impl, t_transient], TCGroups = [{group, tcp}, {group, quic}, {group, ws}], [ {persistence_disabled, TCGroups}, @@ -71,7 +71,12 @@ init_per_group(persistence_disabled, Config) -> ]; init_per_group(persistence_enabled, Config) -> [ - {emqx_config, "session_persistence { enable = true }"}, + {emqx_config, + "session_persistence {\n" + " enable = true\n" + " last_alive_update_interval = 100ms\n" + " renew_streams_interval = 100ms\n" + "}"}, {persistence, ds} | Config ]; @@ -260,7 +265,15 @@ messages(Topic, Payloads) -> messages(Topic, Payloads, ?QOS_2). messages(Topic, Payloads, QoS) -> - [#mqtt_msg{topic = Topic, payload = P, qos = QoS} || P <- Payloads]. + lists:map( + fun + (Bin) when is_binary(Bin) -> + #mqtt_msg{topic = Topic, payload = Bin, qos = QoS}; + (Msg = #mqtt_msg{}) -> + Msg#mqtt_msg{topic = Topic} + end, + Payloads + ). publish(Topic, Payload) -> publish(Topic, Payload, ?QOS_2). @@ -273,7 +286,10 @@ publish_many(Messages) -> publish_many(Messages, WaitForUnregister) -> Fun = fun(Client, Message) -> - {ok, _} = emqtt:publish(Client, Message) + case emqtt:publish(Client, Message) of + ok -> ok; + {ok, _} -> ok + end end, do_publish(Messages, Fun, WaitForUnregister). @@ -530,42 +546,47 @@ t_process_dies_session_expires(Config) -> %% Emulate an error in the connect process, %% or that the node of the process goes down. %% A persistent session should eventually expire. - ConnFun = ?config(conn_fun, Config), - ClientId = ?config(client_id, Config), - Topic = ?config(topic, Config), - STopic = ?config(stopic, Config), - Payload = <<"test">>, - {ok, Client1} = emqtt:start_link([ - {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 1}}, - {clean_start, true} - | Config - ]), - {ok, _} = emqtt:ConnFun(Client1), - {ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2), - ok = emqtt:disconnect(Client1), + ?check_trace( + begin + ConnFun = ?config(conn_fun, Config), + ClientId = ?config(client_id, Config), + Topic = ?config(topic, Config), + STopic = ?config(stopic, Config), + Payload = <<"test">>, + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 1}}, + {clean_start, true} + | Config + ]), + {ok, _} = emqtt:ConnFun(Client1), + {ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2), + ok = emqtt:disconnect(Client1), - maybe_kill_connection_process(ClientId, Config), + maybe_kill_connection_process(ClientId, Config), - ok = publish(Topic, Payload), + ok = publish(Topic, Payload), - timer:sleep(1100), + timer:sleep(1500), - {ok, Client2} = emqtt:start_link([ - {proto_ver, v5}, - {clientid, ClientId}, - {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, false} - | Config - ]), - {ok, _} = emqtt:ConnFun(Client2), - ?assertEqual(0, client_info(session_present, Client2)), + {ok, Client2} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, false} + | Config + ]), + {ok, _} = emqtt:ConnFun(Client2), + ?assertEqual(0, client_info(session_present, Client2)), - %% We should not receive the pending message - ?assertEqual([], receive_messages(1)), + %% We should not receive the pending message + ?assertEqual([], receive_messages(1)), - emqtt:disconnect(Client2). + emqtt:disconnect(Client2) + end, + [] + ). t_publish_while_client_is_gone_qos1(Config) -> %% A persistent session should receive messages in its @@ -672,6 +693,7 @@ t_publish_many_while_client_is_gone_qos1(Config) -> ), NAcked = 4, + ?assert(NMsgs1 >= NAcked), [ok = emqtt:puback(Client1, PktId) || #{packet_id := PktId} <- lists:sublist(Msgs1, NAcked)], %% Ensure that PUBACKs are propagated to the channel. @@ -681,7 +703,7 @@ t_publish_many_while_client_is_gone_qos1(Config) -> maybe_kill_connection_process(ClientId, Config), Pubs2 = [ - #mqtt_msg{topic = <<"loc/3/4/5">>, payload = <<"M8">>, qos = 1}, + #mqtt_msg{topic = <<"loc/3/4/6">>, payload = <<"M8">>, qos = 1}, #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M9">>, qos = 1}, #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M10">>, qos = 1}, #mqtt_msg{topic = <<"msg/feed/friend">>, payload = <<"M11">>, qos = 1}, @@ -690,27 +712,30 @@ t_publish_many_while_client_is_gone_qos1(Config) -> ok = publish_many(Pubs2), NPubs2 = length(Pubs2), + %% Now reconnect with auto ack to make sure all streams are + %% replayed till the end: {ok, Client2} = emqtt:start_link([ {proto_ver, v5}, {clientid, ClientId}, {properties, #{'Session-Expiry-Interval' => 30}}, - {clean_start, false}, - {auto_ack, false} + {clean_start, false} | Config ]), + {ok, _} = emqtt:ConnFun(Client2), %% Try to receive _at most_ `NPubs` messages. %% There shouldn't be that much unacked messages in the replay anyway, %% but it's an easy number to pick. NPubs = NPubs1 + NPubs2, + Msgs2 = receive_messages(NPubs, _Timeout = 2000), NMsgs2 = length(Msgs2), ct:pal("Msgs2 = ~p", [Msgs2]), - ?assert(NMsgs2 < NPubs, Msgs2), - ?assert(NMsgs2 > NPubs2, Msgs2), + ?assert(NMsgs2 < NPubs, {NMsgs2, '<', NPubs}), + ?assert(NMsgs2 > NPubs2, {NMsgs2, '>', NPubs2}), ?assert(NMsgs2 >= NPubs - NAcked, Msgs2), NSame = NMsgs2 - NPubs2, ?assert( @@ -773,6 +798,11 @@ t_publish_many_while_client_is_gone(Config) -> %% for its subscriptions after the client dies or reconnects, in addition %% to PUBRELs for the messages it has PUBRECed. While client must send %% PUBACKs and PUBRECs in order, those orders are independent of each other. + %% + %% Developer's note: for simplicity we publish all messages to the + %% same topic, since persistent session ds may reorder messages + %% that belong to different streams, and this particular test is + %% very sensitive the order. ClientId = ?config(client_id, Config), ConnFun = ?config(conn_fun, Config), ClientOpts = [ @@ -785,20 +815,18 @@ t_publish_many_while_client_is_gone(Config) -> {ok, Client1} = emqtt:start_link([{clean_start, true} | ClientOpts]), {ok, _} = emqtt:ConnFun(Client1), - {ok, _, [?QOS_1]} = emqtt:subscribe(Client1, <<"t/+/foo">>, ?QOS_1), - {ok, _, [?QOS_2]} = emqtt:subscribe(Client1, <<"msg/feed/#">>, ?QOS_2), - {ok, _, [?QOS_2]} = emqtt:subscribe(Client1, <<"loc/+/+/+">>, ?QOS_2), + {ok, _, [?QOS_2]} = emqtt:subscribe(Client1, <<"t">>, ?QOS_2), Pubs1 = [ - #mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M1">>, qos = 1}, - #mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M2">>, qos = 1}, - #mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M3">>, qos = 2}, - #mqtt_msg{topic = <<"loc/1/2/42">>, payload = <<"M4">>, qos = 2}, - #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M5">>, qos = 2}, - #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M6">>, qos = 1}, - #mqtt_msg{topic = <<"loc/3/4/5">>, payload = <<"M7">>, qos = 2}, - #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M8">>, qos = 1}, - #mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M9">>, qos = 2} + #mqtt_msg{topic = <<"t">>, payload = <<"M1">>, qos = 1}, + #mqtt_msg{topic = <<"t">>, payload = <<"M2">>, qos = 1}, + #mqtt_msg{topic = <<"t">>, payload = <<"M3">>, qos = 2}, + #mqtt_msg{topic = <<"t">>, payload = <<"M4">>, qos = 2}, + #mqtt_msg{topic = <<"t">>, payload = <<"M5">>, qos = 2}, + #mqtt_msg{topic = <<"t">>, payload = <<"M6">>, qos = 1}, + #mqtt_msg{topic = <<"t">>, payload = <<"M7">>, qos = 2}, + #mqtt_msg{topic = <<"t">>, payload = <<"M8">>, qos = 1}, + #mqtt_msg{topic = <<"t">>, payload = <<"M9">>, qos = 2} ], ok = publish_many(Pubs1), NPubs1 = length(Pubs1), @@ -806,11 +834,12 @@ t_publish_many_while_client_is_gone(Config) -> Msgs1 = receive_messages(NPubs1), ct:pal("Msgs1 = ~p", [Msgs1]), NMsgs1 = length(Msgs1), - ?assertEqual(NPubs1, NMsgs1), + ?assertEqual(NPubs1, NMsgs1, emqx_persistent_session_ds:print_session(ClientId)), ?assertEqual( get_topicwise_order(Pubs1), - get_topicwise_order(Msgs1) + get_topicwise_order(Msgs1), + emqx_persistent_session_ds:print_session(ClientId) ), %% PUBACK every QoS 1 message. @@ -819,7 +848,7 @@ t_publish_many_while_client_is_gone(Config) -> [PktId || #{qos := 1, packet_id := PktId} <- Msgs1] ), - %% PUBREC first `NRecs` QoS 2 messages. + %% PUBREC first `NRecs` QoS 2 messages (up to "M5") NRecs = 3, PubRecs1 = lists:sublist([PktId || #{qos := 2, packet_id := PktId} <- Msgs1], NRecs), lists:foreach( @@ -843,9 +872,9 @@ t_publish_many_while_client_is_gone(Config) -> maybe_kill_connection_process(ClientId, Config), Pubs2 = [ - #mqtt_msg{topic = <<"loc/3/4/5">>, payload = <<"M10">>, qos = 2}, - #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M11">>, qos = 1}, - #mqtt_msg{topic = <<"msg/feed/friend">>, payload = <<"M12">>, qos = 2} + #mqtt_msg{topic = <<"t">>, payload = <<"M10">>, qos = 2}, + #mqtt_msg{topic = <<"t">>, payload = <<"M11">>, qos = 1}, + #mqtt_msg{topic = <<"t">>, payload = <<"M12">>, qos = 2} ], ok = publish_many(Pubs2), NPubs2 = length(Pubs2), @@ -878,8 +907,8 @@ t_publish_many_while_client_is_gone(Config) -> Msgs2Dups ), - %% Now complete all yet incomplete QoS 2 message flows instead. - PubRecs2 = [PktId || #{qos := 2, packet_id := PktId} <- Msgs2], + %% Ack more messages: + PubRecs2 = lists:sublist([PktId || #{qos := 2, packet_id := PktId} <- Msgs2], 2), lists:foreach( fun(PktId) -> ok = emqtt:pubrec(Client2, PktId) end, PubRecs2 @@ -895,6 +924,7 @@ t_publish_many_while_client_is_gone(Config) -> %% PUBCOMP every PUBREL. PubComps = [PktId || {pubrel, #{packet_id := PktId}} <- PubRels1 ++ PubRels2], + ct:pal("PubComps: ~p", [PubComps]), lists:foreach( fun(PktId) -> ok = emqtt:pubcomp(Client2, PktId) end, PubComps @@ -902,19 +932,19 @@ t_publish_many_while_client_is_gone(Config) -> %% Ensure that PUBCOMPs are propagated to the channel. pong = emqtt:ping(Client2), - + %% Reconnect for the last time ok = disconnect_client(Client2), maybe_kill_connection_process(ClientId, Config), {ok, Client3} = emqtt:start_link([{clean_start, false} | ClientOpts]), {ok, _} = emqtt:ConnFun(Client3), - %% Only the last unacked QoS 1 message should be retransmitted. + %% Check that we receive the rest of the messages: Msgs3 = receive_messages(NPubs, _Timeout = 2000), ct:pal("Msgs3 = ~p", [Msgs3]), ?assertMatch( - [#{topic := <<"t/100/foo">>, payload := <<"M11">>, qos := 1, dup := true}], - Msgs3 + [<<"M10">>, <<"M11">>, <<"M12">>], + [I || #{payload := I} <- Msgs3] ), ok = disconnect_client(Client3). @@ -1007,6 +1037,167 @@ t_unsubscribe(Config) -> ?assertMatch([], [Sub || {ST, _} = Sub <- emqtt:subscriptions(Client), ST =:= STopic]), ok = emqtt:disconnect(Client). +%% This testcase verifies that un-acked messages that were once sent +%% to the client are still retransmitted after the session +%% unsubscribes from the topic and reconnects. +t_unsubscribe_replay(Config) -> + ConnFun = ?config(conn_fun, Config), + TopicPrefix = ?config(topic, Config), + ClientId = atom_to_binary(?FUNCTION_NAME), + ClientOpts = [ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30, 'Receive-Maximum' => 10}}, + {max_inflight, 10} + | Config + ], + {ok, Sub} = emqtt:start_link([{clean_start, true}, {auto_ack, never} | ClientOpts]), + {ok, _} = emqtt:ConnFun(Sub), + %% 1. Make two subscriptions, one is to be deleted: + Topic1 = iolist_to_binary([TopicPrefix, $/, <<"unsub">>]), + Topic2 = iolist_to_binary([TopicPrefix, $/, <<"sub">>]), + ?assertMatch({ok, _, _}, emqtt:subscribe(Sub, Topic1, qos2)), + ?assertMatch({ok, _, _}, emqtt:subscribe(Sub, Topic2, qos2)), + %% 2. Publish 2 messages to the first and second topics each + %% (client doesn't ack them): + ok = publish(Topic1, <<"1">>, ?QOS_1), + ok = publish(Topic1, <<"2">>, ?QOS_2), + [Msg1, Msg2] = receive_messages(2), + ?assertMatch( + [ + #{payload := <<"1">>}, + #{payload := <<"2">>} + ], + [Msg1, Msg2] + ), + ok = publish(Topic2, <<"3">>, ?QOS_1), + ok = publish(Topic2, <<"4">>, ?QOS_2), + [Msg3, Msg4] = receive_messages(2), + ?assertMatch( + [ + #{payload := <<"3">>}, + #{payload := <<"4">>} + ], + [Msg3, Msg4] + ), + %% 3. Unsubscribe from the topic and disconnect: + ?assertMatch({ok, _, _}, emqtt:unsubscribe(Sub, Topic1)), + ok = emqtt:disconnect(Sub), + %% 5. Publish more messages to the disconnected topic: + ok = publish(Topic1, <<"5">>, ?QOS_1), + ok = publish(Topic1, <<"6">>, ?QOS_2), + %% 4. Reconnect the client. It must only receive only four + %% messages from the time when it was subscribed: + {ok, Sub1} = emqtt:start_link([{clean_start, false}, {auto_ack, true} | ClientOpts]), + ?assertMatch({ok, _}, emqtt:ConnFun(Sub1)), + %% Note: we ask for 6 messages, but expect only 4, it's + %% intentional: + ?assertMatch( + #{ + Topic1 := [<<"1">>, <<"2">>], + Topic2 := [<<"3">>, <<"4">>] + }, + get_topicwise_order(receive_messages(6, 5_000)), + debug_info(ClientId) + ), + %% 5. Now let's resubscribe, and check that the session can receive new messages: + ?assertMatch({ok, _, _}, emqtt:subscribe(Sub1, Topic1, qos2)), + ok = publish(Topic1, <<"7">>, ?QOS_0), + ok = publish(Topic1, <<"8">>, ?QOS_1), + ok = publish(Topic1, <<"9">>, ?QOS_2), + ?assertMatch( + [<<"7">>, <<"8">>, <<"9">>], + lists:map(fun get_msgpub_payload/1, receive_messages(3)) + ), + ok = emqtt:disconnect(Sub1). + +%% This testcase verifies that persistent sessions handle "transient" +%% mesages correctly. +%% +%% Transient messages are delivered to the channel directly, bypassing +%% the broker code that decides whether the messages should be +%% persisted or not, and therefore they are not persisted. +%% +%% `emqx_retainer' is an example of application that uses this +%% mechanism. +%% +%% This testcase creates the conditions when the transient messages +%% appear in the middle of the replay, to make sure the durable +%% session doesn't get confused and/or stuck if retained messages are +%% changed while the session was down. +t_transient(Config) -> + ConnFun = ?config(conn_fun, Config), + TopicPrefix = ?config(topic, Config), + ClientId = atom_to_binary(?FUNCTION_NAME), + ClientOpts = [ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30, 'Receive-Maximum' => 100}}, + {max_inflight, 100} + | Config + ], + Deliver = fun(Topic, Payload, QoS) -> + [Pid] = emqx_cm:lookup_channels(ClientId), + Msg = emqx_message:make(_From = <<"test">>, QoS, Topic, Payload), + Pid ! {deliver, Topic, Msg} + end, + Topic1 = <>, + Topic2 = <>, + Topic3 = <>, + %% 1. Start the client and subscribe to the topic: + {ok, Sub} = emqtt:start_link([{clean_start, true}, {auto_ack, never} | ClientOpts]), + ?assertMatch({ok, _}, emqtt:ConnFun(Sub)), + ?assertMatch({ok, _, _}, emqtt:subscribe(Sub, <>, qos2)), + %% 2. Publish regular messages: + publish(Topic1, <<"1">>, ?QOS_1), + publish(Topic1, <<"2">>, ?QOS_2), + Msgs1 = receive_messages(2), + [#{payload := <<"1">>, packet_id := PI1}, #{payload := <<"2">>, packet_id := PI2}] = Msgs1, + %% 3. Publish and recieve transient messages: + Deliver(Topic2, <<"3">>, ?QOS_0), + Deliver(Topic2, <<"4">>, ?QOS_1), + Deliver(Topic2, <<"5">>, ?QOS_2), + Msgs2 = receive_messages(3), + ?assertMatch( + [ + #{payload := <<"3">>, qos := ?QOS_0}, + #{payload := <<"4">>, qos := ?QOS_1}, + #{payload := <<"5">>, qos := ?QOS_2} + ], + Msgs2 + ), + %% 4. Publish more regular messages: + publish(Topic3, <<"6">>, ?QOS_1), + publish(Topic3, <<"7">>, ?QOS_2), + Msgs3 = receive_messages(2), + [#{payload := <<"6">>, packet_id := PI6}, #{payload := <<"7">>, packet_id := PI7}] = Msgs3, + %% 5. Reconnect the client: + ok = emqtt:disconnect(Sub), + {ok, Sub1} = emqtt:start_link([{clean_start, false}, {auto_ack, true} | ClientOpts]), + ?assertMatch({ok, _}, emqtt:ConnFun(Sub1)), + %% 6. Recieve the historic messages and check that their packet IDs didn't change: + %% Note: durable session currenty WON'T replay transient messages. + ProcessMessage = fun(#{payload := P, packet_id := ID}) -> {ID, P} end, + ?assertMatch( + #{ + Topic1 := [{PI1, <<"1">>}, {PI2, <<"2">>}], + Topic3 := [{PI6, <<"6">>}, {PI7, <<"7">>}] + }, + maps:groups_from_list(fun get_msgpub_topic/1, ProcessMessage, receive_messages(7, 5_000)) + ), + %% 7. Finish off by sending messages to all the topics to make + %% sure none of the streams are blocked: + [publish(T, <<"fin">>, ?QOS_2) || T <- [Topic1, Topic2, Topic3]], + ?assertMatch( + #{ + Topic1 := [<<"fin">>], + Topic2 := [<<"fin">>], + Topic3 := [<<"fin">>] + }, + get_topicwise_order(receive_messages(3)) + ), + ok = emqtt:disconnect(Sub1). + t_multiple_subscription_matches(Config) -> ConnFun = ?config(conn_fun, Config), Topic = ?config(topic, Config), @@ -1073,10 +1264,6 @@ get_msgs_essentials(Msgs) -> pick_respective_msgs(MsgRefs, Msgs) -> [M || M <- Msgs, Ref <- MsgRefs, maps:get(packet_id, M) =:= maps:get(packet_id, Ref)]. -skip_ds_tc(Config) -> - case ?config(persistence, Config) of - ds -> - {skip, "Testcase not yet supported under 'emqx_persistent_session_ds' implementation"}; - _ -> - Config - end. +debug_info(ClientId) -> + Info = emqx_persistent_session_ds:print_session(ClientId), + ct:pal("*** State:~n~p", [Info]). diff --git a/apps/emqx/test/emqx_persistent_session_ds_state_tests.erl b/apps/emqx/test/emqx_persistent_session_ds_state_tests.erl new file mode 100644 index 000000000..61e0575a8 --- /dev/null +++ b/apps/emqx/test/emqx_persistent_session_ds_state_tests.erl @@ -0,0 +1,373 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_persistent_session_ds_state_tests). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("proper/include/proper.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-define(tab, ?MODULE). + +%%================================================================================ +%% Type declarations +%%================================================================================ + +%% Note: here `committed' != `dirty'. It means "has been committed at +%% least once since the creation", and it's used by the iteration +%% test. +-record(s, {subs = #{}, metadata = #{}, streams = #{}, seqno = #{}, committed = false}). + +-type state() :: #{emqx_persistent_session_ds:id() => #s{}}. + +%%================================================================================ +%% Properties +%%================================================================================ + +seqno_proper_test_() -> + Props = [prop_consistency()], + Opts = [{numtests, 10}, {to_file, user}, {max_size, 100}], + {timeout, 300, [?_assert(proper:quickcheck(Prop, Opts)) || Prop <- Props]}. + +prop_consistency() -> + ?FORALL( + Cmds, + commands(?MODULE), + begin + init(), + {_History, State, Result} = run_commands(?MODULE, Cmds), + clean(), + ?WHENFAIL( + io:format( + user, + "Operations: ~p~nState: ~p\nResult: ~p~n", + [Cmds, State, Result] + ), + aggregate(command_names(Cmds), Result =:= ok) + ) + end + ). + +%%================================================================================ +%% Generators +%%================================================================================ + +-define(n_sessions, 10). + +session_id() -> + oneof([integer_to_binary(I) || I <- lists:seq(1, ?n_sessions)]). + +topic() -> + oneof([<<"foo">>, <<"bar">>, <<"foo/#">>, <<"//+/#">>]). + +subid() -> + oneof([[]]). + +subscription() -> + oneof([#{}]). + +session_id(S) -> + oneof(maps:keys(S)). + +batch_size() -> + range(1, ?n_sessions). + +put_metadata() -> + oneof([ + ?LET( + Val, + range(0, 100), + {last_alive_at, set_last_alive_at, Val} + ), + ?LET( + Val, + range(0, 100), + {created_at, set_created_at, Val} + ) + ]). + +get_metadata() -> + oneof([ + {last_alive_at, get_last_alive_at}, + {created_at, get_created_at} + ]). + +seqno_track() -> + range(0, 1). + +seqno() -> + range(1, 100). + +stream_id() -> + range(1, 1). + +stream() -> + oneof([#{}]). + +put_req() -> + oneof([ + ?LET( + {Id, Stream}, + {stream_id(), stream()}, + {#s.streams, put_stream, Id, Stream} + ), + ?LET( + {Track, Seqno}, + {seqno_track(), seqno()}, + {#s.seqno, put_seqno, Track, Seqno} + ) + ]). + +get_req() -> + oneof([ + {#s.streams, get_stream, stream_id()}, + {#s.seqno, get_seqno, seqno_track()} + ]). + +del_req() -> + oneof([ + {#s.streams, del_stream, stream_id()} + ]). + +command(S) -> + case maps:size(S) > 0 of + true -> + frequency([ + %% Global CRUD operations: + {1, {call, ?MODULE, create_new, [session_id()]}}, + {1, {call, ?MODULE, delete, [session_id(S)]}}, + {2, {call, ?MODULE, reopen, [session_id(S)]}}, + {2, {call, ?MODULE, commit, [session_id(S)]}}, + + %% Subscriptions: + {3, + {call, ?MODULE, put_subscription, [ + session_id(S), topic(), subid(), subscription() + ]}}, + {3, {call, ?MODULE, del_subscription, [session_id(S), topic(), subid()]}}, + + %% Metadata: + {3, {call, ?MODULE, put_metadata, [session_id(S), put_metadata()]}}, + {3, {call, ?MODULE, get_metadata, [session_id(S), get_metadata()]}}, + + %% Key-value: + {3, {call, ?MODULE, gen_put, [session_id(S), put_req()]}}, + {3, {call, ?MODULE, gen_get, [session_id(S), get_req()]}}, + {3, {call, ?MODULE, gen_del, [session_id(S), del_req()]}}, + + %% Getters: + {4, {call, ?MODULE, get_subscriptions, [session_id(S)]}}, + {1, {call, ?MODULE, iterate_sessions, [batch_size()]}} + ]); + false -> + frequency([ + {1, {call, ?MODULE, create_new, [session_id()]}}, + {1, {call, ?MODULE, iterate_sessions, [batch_size()]}} + ]) + end. + +precondition(_, _) -> + true. + +postcondition(S, {call, ?MODULE, iterate_sessions, [_]}, Result) -> + {Sessions, _} = lists:unzip(Result), + %% No lingering sessions: + ?assertMatch([], Sessions -- maps:keys(S)), + %% All committed sessions are visited by the iterator: + CommittedSessions = lists:sort([K || {K, #s{committed = true}} <- maps:to_list(S)]), + ?assertMatch([], CommittedSessions -- Sessions), + true; +postcondition(S, {call, ?MODULE, get_metadata, [SessionId, {MetaKey, _Fun}]}, Result) -> + #{SessionId := #s{metadata = Meta}} = S, + ?assertEqual( + maps:get(MetaKey, Meta, undefined), + Result, + #{session_id => SessionId, meta => MetaKey} + ), + true; +postcondition(S, {call, ?MODULE, gen_get, [SessionId, {Idx, Fun, Key}]}, Result) -> + #{SessionId := Record} = S, + ?assertEqual( + maps:get(Key, element(Idx, Record), undefined), + Result, + #{session_id => SessionId, key => Key, 'fun' => Fun} + ), + true; +postcondition(S, {call, ?MODULE, get_subscriptions, [SessionId]}, Result) -> + #{SessionId := #s{subs = Subs}} = S, + ?assertEqual(maps:size(Subs), emqx_topic_gbt:size(Result)), + maps:foreach( + fun({TopicFilter, Id}, Expected) -> + ?assertEqual( + Expected, + emqx_topic_gbt:lookup(TopicFilter, Id, Result, default) + ) + end, + Subs + ), + true; +postcondition(_, _, _) -> + true. + +next_state(S, _V, {call, ?MODULE, create_new, [SessionId]}) -> + S#{SessionId => #s{}}; +next_state(S, _V, {call, ?MODULE, delete, [SessionId]}) -> + maps:remove(SessionId, S); +next_state(S, _V, {call, ?MODULE, put_subscription, [SessionId, TopicFilter, SubId, Subscription]}) -> + Key = {TopicFilter, SubId}, + update( + SessionId, + #s.subs, + fun(Subs) -> Subs#{Key => Subscription} end, + S + ); +next_state(S, _V, {call, ?MODULE, del_subscription, [SessionId, TopicFilter, SubId]}) -> + Key = {TopicFilter, SubId}, + update( + SessionId, + #s.subs, + fun(Subs) -> maps:remove(Key, Subs) end, + S + ); +next_state(S, _V, {call, ?MODULE, put_metadata, [SessionId, {Key, _Fun, Val}]}) -> + update( + SessionId, + #s.metadata, + fun(Map) -> Map#{Key => Val} end, + S + ); +next_state(S, _V, {call, ?MODULE, gen_put, [SessionId, {Idx, _Fun, Key, Val}]}) -> + update( + SessionId, + Idx, + fun(Map) -> Map#{Key => Val} end, + S + ); +next_state(S, _V, {call, ?MODULE, gen_del, [SessionId, {Idx, _Fun, Key}]}) -> + update( + SessionId, + Idx, + fun(Map) -> maps:remove(Key, Map) end, + S + ); +next_state(S, _V, {call, ?MODULE, commit, [SessionId]}) -> + update( + SessionId, + #s.committed, + fun(_) -> true end, + S + ); +next_state(S, _V, {call, ?MODULE, _, _}) -> + S. + +initial_state() -> + #{}. + +%%================================================================================ +%% Operations +%%================================================================================ + +create_new(SessionId) -> + put_state(SessionId, emqx_persistent_session_ds_state:create_new(SessionId)). + +delete(SessionId) -> + emqx_persistent_session_ds_state:delete(SessionId), + ets:delete(?tab, SessionId). + +commit(SessionId) -> + put_state(SessionId, emqx_persistent_session_ds_state:commit(get_state(SessionId))). + +reopen(SessionId) -> + _ = emqx_persistent_session_ds_state:commit(get_state(SessionId)), + {ok, S} = emqx_persistent_session_ds_state:open(SessionId), + put_state(SessionId, S). + +put_subscription(SessionId, TopicFilter, SubId, Subscription) -> + S = emqx_persistent_session_ds_state:put_subscription( + TopicFilter, SubId, Subscription, get_state(SessionId) + ), + put_state(SessionId, S). + +del_subscription(SessionId, TopicFilter, SubId) -> + S = emqx_persistent_session_ds_state:del_subscription(TopicFilter, SubId, get_state(SessionId)), + put_state(SessionId, S). + +get_subscriptions(SessionId) -> + emqx_persistent_session_ds_state:get_subscriptions(get_state(SessionId)). + +put_metadata(SessionId, {_MetaKey, Fun, Value}) -> + S = apply(emqx_persistent_session_ds_state, Fun, [Value, get_state(SessionId)]), + put_state(SessionId, S). + +get_metadata(SessionId, {_MetaKey, Fun}) -> + apply(emqx_persistent_session_ds_state, Fun, [get_state(SessionId)]). + +gen_put(SessionId, {_Idx, Fun, Key, Value}) -> + S = apply(emqx_persistent_session_ds_state, Fun, [Key, Value, get_state(SessionId)]), + put_state(SessionId, S). + +gen_del(SessionId, {_Idx, Fun, Key}) -> + S = apply(emqx_persistent_session_ds_state, Fun, [Key, get_state(SessionId)]), + put_state(SessionId, S). + +gen_get(SessionId, {_Idx, Fun, Key}) -> + apply(emqx_persistent_session_ds_state, Fun, [Key, get_state(SessionId)]). + +iterate_sessions(BatchSize) -> + Fun = fun F(It0) -> + case emqx_persistent_session_ds_state:session_iterator_next(It0, BatchSize) of + {[], _} -> + []; + {Sessions, It} -> + Sessions ++ F(It) + end + end, + Fun(emqx_persistent_session_ds_state:make_session_iterator()). + +%%================================================================================ +%% Misc. +%%================================================================================ + +update(SessionId, Key, Fun, S) -> + maps:update_with( + SessionId, + fun(SS) -> + setelement(Key, SS, Fun(erlang:element(Key, SS))) + end, + S + ). + +get_state(SessionId) -> + case ets:lookup(?tab, SessionId) of + [{_, S}] -> + S; + [] -> + error({not_found, SessionId}) + end. + +put_state(SessionId, S) -> + ets:insert(?tab, {SessionId, S}). + +init() -> + _ = ets:new(?tab, [named_table, public, {keypos, 1}]), + mria:start(), + emqx_persistent_session_ds_state:create_tables(). + +clean() -> + ets:delete(?tab), + mria:stop(), + mria_mnesia:delete_schema(). diff --git a/apps/emqx/test/emqx_proper_types.erl b/apps/emqx/test/emqx_proper_types.erl index 243a39007..d1b4b7554 100644 --- a/apps/emqx/test/emqx_proper_types.erl +++ b/apps/emqx/test/emqx_proper_types.erl @@ -114,8 +114,8 @@ clientinfo() -> {username, username()}, {is_bridge, boolean()}, {is_supuser, boolean()}, - {mountpoint, maybe(utf8())}, - {ws_cookie, maybe(list())} + {mountpoint, option(utf8())}, + {ws_cookie, option(list())} % password, % auth_result, % anonymous, @@ -496,7 +496,7 @@ pubsub() -> %% Basic Types %%-------------------------------------------------------------------- -maybe(T) -> +option(T) -> oneof([undefined, T]). socktype() -> @@ -522,7 +522,7 @@ clientid() -> utf8(). username() -> - maybe(utf8()). + option(utf8()). properties() -> map(limited_latin_atom(), binary()). diff --git a/apps/emqx/test/emqx_session_mem_SUITE.erl b/apps/emqx/test/emqx_session_mem_SUITE.erl index 20d622941..a539dde9a 100644 --- a/apps/emqx/test/emqx_session_mem_SUITE.erl +++ b/apps/emqx/test/emqx_session_mem_SUITE.erl @@ -545,6 +545,7 @@ clientinfo() -> clientinfo(#{}). clientinfo(Init) -> maps:merge( #{ + zone => ?MODULE, clientid => <<"clientid">>, username => <<"username">> }, diff --git a/apps/emqx/test/emqx_static_checks_data/5.5.bpapi2 b/apps/emqx/test/emqx_static_checks_data/5.5.bpapi2 deleted file mode 100644 index 6192442f1..000000000 --- a/apps/emqx/test/emqx_static_checks_data/5.5.bpapi2 +++ /dev/null @@ -1 +0,0 @@ -#{api => #{{emqx_node_rebalance_api,1} => #{calls => [{{emqx_node_rebalance_api_proto_v1,node_rebalance_stop,['Node']},{emqx_node_rebalance,stop,[]}},{{emqx_node_rebalance_api_proto_v1,node_rebalance_start,['Node','Opts']},{emqx_node_rebalance,start,['Opts']}},{{emqx_node_rebalance_api_proto_v1,node_rebalance_evacuation_stop,['Node']},{emqx_node_rebalance_evacuation,stop,[]}},{{emqx_node_rebalance_api_proto_v1,node_rebalance_evacuation_start,['Node','Opts']},{emqx_node_rebalance_evacuation,start,['Opts']}}],casts => []},{emqx_bridge,1} => #{calls => [{{emqx_bridge_proto_v1,lookup_from_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_api,lookup_from_local_node,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v1,stop_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v1,restart_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v1,stop_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v1,restart_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v1,list_bridges,['Node']},{emqx_bridge,list,[]}}],casts => []},{emqx_topic_metrics,1} => #{calls => [{{emqx_topic_metrics_proto_v1,reset,['Nodes','Topic']},{emqx_topic_metrics,reset,['Topic']}},{{emqx_topic_metrics_proto_v1,reset,['Nodes']},{emqx_topic_metrics,reset,[]}},{{emqx_topic_metrics_proto_v1,metrics,['Nodes','Topic']},{emqx_topic_metrics,metrics,['Topic']}},{{emqx_topic_metrics_proto_v1,metrics,['Nodes']},{emqx_topic_metrics,metrics,[]}}],casts => []},{emqx_license,1} => #{calls => [{{emqx_license_proto_v1,remote_connection_counts,['Nodes']},{emqx_license_resources,local_connection_count,[]}}],casts => []},{emqx_persistent_session_ds,1} => #{calls => [{{emqx_persistent_session_ds_proto_v1,close_all_iterators,['Nodes','DSSessionID']},{emqx_persistent_session_ds,do_ensure_all_iterators_closed,['DSSessionID']}},{{emqx_persistent_session_ds_proto_v1,close_iterator,['Nodes','IteratorID']},{emqx_persistent_session_ds,do_ensure_iterator_closed,['IteratorID']}},{{emqx_persistent_session_ds_proto_v1,open_iterator,['Nodes','TopicFilter','StartMS','IteratorID']},{emqx_persistent_session_ds,do_open_iterator,['TopicFilter','StartMS','IteratorID']}}],casts => []},{emqx,1} => #{calls => [{{emqx_proto_v1,delete_all_deactivated_alarms,['Node']},{emqx_alarm,delete_all_deactivated_alarms,[]}},{{emqx_proto_v1,deactivate_alarm,['Node','Name']},{emqx_alarm,deactivate,['Name']}},{{emqx_proto_v1,clean_pem_cache,['Node']},{ssl_pem_cache,clear,[]}},{{emqx_proto_v1,clean_authz_cache,['Node']},{emqx_authz_cache,drain_cache,[]}},{{emqx_proto_v1,clean_authz_cache,['Node','ClientId']},{emqx_authz_cache,drain_cache,['ClientId']}},{{emqx_proto_v1,get_metrics,['Node']},{emqx_metrics,all,[]}},{{emqx_proto_v1,get_stats,['Node']},{emqx_stats,getstats,[]}},{{emqx_proto_v1,get_alarms,['Node','Type']},{emqx_alarm,get_alarms,['Type']}},{{emqx_proto_v1,is_running,['Node']},{emqx,is_running,[]}}],casts => []},{emqx_mgmt_trace,1} => #{calls => [{{emqx_mgmt_trace_proto_v1,read_trace_file,['Node','Name','Position','Limit']},{emqx_mgmt_api_trace,read_trace_file,['Name','Position','Limit']}},{{emqx_mgmt_trace_proto_v1,trace_file,['Nodes','File']},{emqx_trace,trace_file,['File']}},{{emqx_mgmt_trace_proto_v1,get_trace_size,['Nodes']},{emqx_mgmt_api_trace,get_trace_size,[]}}],casts => []},{emqx_license,2} => #{calls => [{{emqx_license_proto_v2,remote_connection_counts,['Nodes']},{emqx_license_resources,local_connection_count,[]}}],casts => []},{emqx_management,4} => #{calls => [{{emqx_management_proto_v4,kickout_clients,['Node','ClientIds']},{emqx_mgmt,do_kickout_clients,['ClientIds']}},{{emqx_management_proto_v4,get_full_config,['Node']},{emqx_mgmt_api_configs,get_full_config,[]}},{{emqx_management_proto_v4,call_client,['Node','ClientId','Req']},{emqx_mgmt,do_call_client,['ClientId','Req']}},{{emqx_management_proto_v4,unsubscribe,['Node','ClientId','Topic']},{emqx_mgmt,do_unsubscribe,['ClientId','Topic']}},{{emqx_management_proto_v4,subscribe,['Node','ClientId','TopicTables']},{emqx_mgmt,do_subscribe,['ClientId','TopicTables']}},{{emqx_management_proto_v4,list_listeners,['Node']},{emqx_mgmt_api_listeners,do_list_listeners,[]}},{{emqx_management_proto_v4,list_subscriptions,['Node']},{emqx_mgmt,do_list_subscriptions,[]}},{{emqx_management_proto_v4,broker_info,['Nodes']},{emqx_mgmt,broker_info,[]}},{{emqx_management_proto_v4,node_info,['Nodes']},{emqx_mgmt,node_info,[]}},{{emqx_management_proto_v4,unsubscribe_batch,['Node','ClientId','Topics']},{emqx_mgmt,do_unsubscribe_batch,['ClientId','Topics']}}],casts => []},{emqx_management,1} => #{calls => [{{emqx_management_proto_v1,get_full_config,['Node']},{emqx_mgmt_api_configs,get_full_config,[]}},{{emqx_management_proto_v1,call_client,['Node','ClientId','Req']},{emqx_mgmt,do_call_client,['ClientId','Req']}},{{emqx_management_proto_v1,unsubscribe,['Node','ClientId','Topic']},{emqx_mgmt,do_unsubscribe,['ClientId','Topic']}},{{emqx_management_proto_v1,subscribe,['Node','ClientId','TopicTables']},{emqx_mgmt,do_subscribe,['ClientId','TopicTables']}},{{emqx_management_proto_v1,list_listeners,['Node']},{emqx_mgmt_api_listeners,do_list_listeners,[]}},{{emqx_management_proto_v1,list_subscriptions,['Node']},{emqx_mgmt,do_list_subscriptions,[]}},{{emqx_management_proto_v1,broker_info,['Node']},{emqx_mgmt,broker_info,[]}},{{emqx_management_proto_v1,node_info,['Node']},{emqx_mgmt,node_info,[]}}],casts => []},{emqx_management,2} => #{calls => [{{emqx_management_proto_v2,get_full_config,['Node']},{emqx_mgmt_api_configs,get_full_config,[]}},{{emqx_management_proto_v2,call_client,['Node','ClientId','Req']},{emqx_mgmt,do_call_client,['ClientId','Req']}},{{emqx_management_proto_v2,unsubscribe,['Node','ClientId','Topic']},{emqx_mgmt,do_unsubscribe,['ClientId','Topic']}},{{emqx_management_proto_v2,subscribe,['Node','ClientId','TopicTables']},{emqx_mgmt,do_subscribe,['ClientId','TopicTables']}},{{emqx_management_proto_v2,list_listeners,['Node']},{emqx_mgmt_api_listeners,do_list_listeners,[]}},{{emqx_management_proto_v2,list_subscriptions,['Node']},{emqx_mgmt,do_list_subscriptions,[]}},{{emqx_management_proto_v2,broker_info,['Node']},{emqx_mgmt,broker_info,[]}},{{emqx_management_proto_v2,node_info,['Node']},{emqx_mgmt,node_info,[]}},{{emqx_management_proto_v2,unsubscribe_batch,['Node','ClientId','Topics']},{emqx_mgmt,do_unsubscribe_batch,['ClientId','Topics']}}],casts => []},{emqx_bridge,4} => #{calls => [{{emqx_bridge_proto_v4,get_metrics_from_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_api,get_metrics_from_local_node,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v4,lookup_from_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_api,lookup_from_local_node,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v4,stop_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v4,start_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v4,restart_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v4,stop_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v4,start_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v4,restart_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v4,list_bridges_on_nodes,['Nodes']},{emqx_bridge,list,[]}}],casts => []},{emqx_cm,2} => #{calls => [{{emqx_cm_proto_v2,kick_session,['Action','ClientId','ChanPid']},{emqx_cm,do_kick_session,['Action','ClientId','ChanPid']}},{{emqx_cm_proto_v2,takeover_finish,['ConnMod','ChanPid']},{emqx_cm,takeover_finish,['ConnMod','ChanPid']}},{{emqx_cm_proto_v2,takeover_session,['ClientId','ChanPid']},{emqx_cm,takeover_session,['ClientId','ChanPid']}},{{emqx_cm_proto_v2,get_chann_conn_mod,['ClientId','ChanPid']},{emqx_cm,do_get_chann_conn_mod,['ClientId','ChanPid']}},{{emqx_cm_proto_v2,get_chan_info,['ClientId','ChanPid']},{emqx_cm,do_get_chan_info,['ClientId','ChanPid']}},{{emqx_cm_proto_v2,get_chan_stats,['ClientId','ChanPid']},{emqx_cm,do_get_chan_stats,['ClientId','ChanPid']}},{{emqx_cm_proto_v2,lookup_client,['Node','Key']},{emqx_cm,lookup_client,['Key']}},{{emqx_cm_proto_v2,kickout_client,['Node','ClientId']},{emqx_cm,kick_session,['ClientId']}}],casts => []},{emqx_telemetry,1} => #{calls => [{{emqx_telemetry_proto_v1,get_cluster_uuid,['Node']},{emqx_telemetry,get_cluster_uuid,[]}},{{emqx_telemetry_proto_v1,get_node_uuid,['Node']},{emqx_telemetry,get_node_uuid,[]}}],casts => []},{emqx_dashboard,1} => #{calls => [{{emqx_dashboard_proto_v1,current_rate,['Node']},{emqx_dashboard_monitor,current_rate,['Node']}},{{emqx_dashboard_proto_v1,do_sample,['Node','Latest']},{emqx_dashboard_monitor,do_sample,['Node','Latest']}}],casts => []},{emqx_node_rebalance_status,2} => #{calls => [{{emqx_node_rebalance_status_proto_v2,purge_status,['Nodes']},{emqx_node_rebalance_status,purge_status,[]}},{{emqx_node_rebalance_status_proto_v2,evacuation_status,['Nodes']},{emqx_node_rebalance_status,evacuation_status,[]}},{{emqx_node_rebalance_status_proto_v2,rebalance_status,['Nodes']},{emqx_node_rebalance_status,rebalance_status,[]}},{{emqx_node_rebalance_status_proto_v2,local_status,['Node']},{emqx_node_rebalance_status,local_status,[]}}],casts => []},{emqx_connector,1} => #{calls => [{{emqx_connector_proto_v1,start_connectors_to_all_nodes,['Nodes','ConnectorType','ConnectorName']},{emqx_connector_resource,start,['ConnectorType','ConnectorName']}},{{emqx_connector_proto_v1,start_connector_to_node,['Node','ConnectorType','ConnectorName']},{emqx_connector_resource,start,['ConnectorType','ConnectorName']}},{{emqx_connector_proto_v1,lookup_from_all_nodes,['Nodes','ConnectorType','ConnectorName']},{emqx_connector_api,lookup_from_local_node,['ConnectorType','ConnectorName']}},{{emqx_connector_proto_v1,list_connectors_on_nodes,['Nodes']},{emqx_connector,list,[]}}],casts => []},{emqx_broker,1} => #{calls => [{{emqx_broker_proto_v1,list_subscriptions_via_topic,['Node','Topic']},{emqx_broker,subscriptions_via_topic,['Topic']}},{{emqx_broker_proto_v1,list_client_subscriptions,['Node','ClientId']},{emqx_broker,subscriptions,['ClientId']}},{{emqx_broker_proto_v1,forward,['Node','Topic','Delivery']},{emqx_broker,dispatch,['Topic','Delivery']}}],casts => [{{emqx_broker_proto_v1,forward_async,['Node','Topic','Delivery']},{emqx_broker,dispatch,['Topic','Delivery']}}]},{emqx_gateway_http,1} => #{calls => [{{emqx_gateway_http_proto_v1,get_cluster_status,['Nodes','GwName']},{emqx_gateway_http,gateway_status,['GwName']}}],casts => []},{emqx_bridge,6} => #{calls => [{{emqx_bridge_proto_v6,v2_start_bridge_on_node_v6,['Node','ConfRootKey','BridgeType','BridgeName']},{emqx_bridge_v2,start,['ConfRootKey','BridgeType','BridgeName']}},{{emqx_bridge_proto_v6,v2_start_bridge_on_all_nodes_v6,['Nodes','ConfRootKey','BridgeType','BridgeName']},{emqx_bridge_v2,start,['ConfRootKey','BridgeType','BridgeName']}},{{emqx_bridge_proto_v6,v2_get_metrics_from_all_nodes_v6,['Nodes','ConfRootKey','ActionType','ActionName']},{emqx_bridge_v2_api,get_metrics_from_local_node_v6,['ConfRootKey','ActionType','ActionName']}},{{emqx_bridge_proto_v6,v2_list_bridges_on_nodes_v6,['Nodes','ConfRootKey']},{emqx_bridge_v2,list,['ConfRootKey']}},{{emqx_bridge_proto_v6,v2_lookup_from_all_nodes_v6,['Nodes','ConfRootKey','BridgeType','BridgeName']},{emqx_bridge_v2_api,lookup_from_local_node_v6,['ConfRootKey','BridgeType','BridgeName']}},{{emqx_bridge_proto_v6,get_metrics_from_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_api,get_metrics_from_local_node,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v6,lookup_from_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_api,lookup_from_local_node,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v6,stop_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v6,start_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v6,restart_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v6,stop_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v6,start_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v6,restart_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v6,list_bridges_on_nodes,['Nodes']},{emqx_bridge,list,[]}}],casts => []},{emqx,2} => #{calls => [{{emqx_proto_v2,delete_all_deactivated_alarms,['Node']},{emqx_alarm,delete_all_deactivated_alarms,[]}},{{emqx_proto_v2,deactivate_alarm,['Node','Name']},{emqx_alarm,deactivate,['Name']}},{{emqx_proto_v2,clean_pem_cache,['Node']},{ssl_pem_cache,clear,[]}},{{emqx_proto_v2,clean_authz_cache,['Node']},{emqx_authz_cache,drain_cache,[]}},{{emqx_proto_v2,clean_authz_cache,['Node','ClientId']},{emqx_authz_cache,drain_cache,['ClientId']}},{{emqx_proto_v2,get_metrics,['Node']},{emqx_metrics,all,[]}},{{emqx_proto_v2,get_stats,['Node']},{emqx_stats,getstats,[]}},{{emqx_proto_v2,get_alarms,['Node','Type']},{emqx_alarm,get_alarms,['Type']}},{{emqx_proto_v2,are_running,['Nodes']},{emqx,is_running,[]}},{{emqx_proto_v2,is_running,['Node']},{emqx,is_running,[]}}],casts => []},{emqx_ft_storage_fs_reader,1} => #{calls => [{{emqx_ft_storage_fs_reader_proto_v1,read,['Node','Pid','Bytes']},{emqx_ft_storage_fs_reader,read,['Pid','Bytes']}}],casts => []},{emqx_node_rebalance,1} => #{calls => [{{emqx_node_rebalance_proto_v1,disconnected_session_counts,['Nodes']},{emqx_node_rebalance,disconnected_session_count,[]}},{{emqx_node_rebalance_proto_v1,disable_rebalance_agent,['Nodes','OwnerPid']},{emqx_node_rebalance_agent,disable,['OwnerPid']}},{{emqx_node_rebalance_proto_v1,enable_rebalance_agent,['Nodes','OwnerPid']},{emqx_node_rebalance_agent,enable,['OwnerPid']}},{{emqx_node_rebalance_proto_v1,session_counts,['Nodes']},{emqx_node_rebalance,session_count,[]}},{{emqx_node_rebalance_proto_v1,connection_counts,['Nodes']},{emqx_node_rebalance,connection_count,[]}},{{emqx_node_rebalance_proto_v1,evict_sessions,['Nodes','Count','RecipientNodes','ConnState']},{emqx_eviction_agent,evict_sessions,['Count','RecipientNodes','ConnState']}},{{emqx_node_rebalance_proto_v1,evict_connections,['Nodes','Count']},{emqx_eviction_agent,evict_connections,['Count']}},{{emqx_node_rebalance_proto_v1,available_nodes,['Nodes']},{emqx_node_rebalance,is_node_available,[]}}],casts => []},{emqx_exhook,1} => #{calls => [{{emqx_exhook_proto_v1,server_hooks_metrics,['Nodes','Name']},{emqx_exhook_mgr,server_hooks_metrics,['Name']}},{{emqx_exhook_proto_v1,server_info,['Nodes','Name']},{emqx_exhook_mgr,server_info,['Name']}},{{emqx_exhook_proto_v1,all_servers_info,['Nodes']},{emqx_exhook_mgr,all_servers_info,[]}}],casts => []},{emqx_node_rebalance_api,2} => #{calls => [{{emqx_node_rebalance_api_proto_v2,node_rebalance_purge_stop,['Node']},{emqx_node_rebalance_purge,stop,[]}},{{emqx_node_rebalance_api_proto_v2,node_rebalance_purge_start,['Node','Opts']},{emqx_node_rebalance_purge,start,['Opts']}},{{emqx_node_rebalance_api_proto_v2,node_rebalance_stop,['Node']},{emqx_node_rebalance,stop,[]}},{{emqx_node_rebalance_api_proto_v2,node_rebalance_start,['Node','Opts']},{emqx_node_rebalance,start,['Opts']}},{{emqx_node_rebalance_api_proto_v2,node_rebalance_evacuation_stop,['Node']},{emqx_node_rebalance_evacuation,stop,[]}},{{emqx_node_rebalance_api_proto_v2,node_rebalance_evacuation_start,['Node','Opts']},{emqx_node_rebalance_evacuation,start,['Opts']}}],casts => []},{emqx_plugins,1} => #{calls => [{{emqx_plugins_proto_v1,get_tar,['Node','NameVsn','Timeout']},{emqx_plugins,get_tar,['NameVsn']}}],casts => []},{emqx_node_rebalance_status,1} => #{calls => [{{emqx_node_rebalance_status_proto_v1,evacuation_status,['Nodes']},{emqx_node_rebalance_status,evacuation_status,[]}},{{emqx_node_rebalance_status_proto_v1,rebalance_status,['Nodes']},{emqx_node_rebalance_status,rebalance_status,[]}},{{emqx_node_rebalance_status_proto_v1,local_status,['Node']},{emqx_node_rebalance_status,local_status,[]}}],casts => []},{emqx_conf,1} => #{calls => [{{emqx_conf_proto_v1,get_override_config_file,['Nodes']},{emqx_conf_app,get_override_config_file,[]}},{{emqx_conf_proto_v1,reset,['Node','KeyPath','Opts']},{emqx,reset_config,['KeyPath','Opts']}},{{emqx_conf_proto_v1,reset,['KeyPath','Opts']},{emqx,reset_config,['KeyPath','Opts']}},{{emqx_conf_proto_v1,remove_config,['Node','KeyPath','Opts']},{emqx,remove_config,['KeyPath','Opts']}},{{emqx_conf_proto_v1,remove_config,['KeyPath','Opts']},{emqx,remove_config,['KeyPath','Opts']}},{{emqx_conf_proto_v1,update,['Node','KeyPath','UpdateReq','Opts']},{emqx,update_config,['KeyPath','UpdateReq','Opts']}},{{emqx_conf_proto_v1,update,['KeyPath','UpdateReq','Opts']},{emqx,update_config,['KeyPath','UpdateReq','Opts']}},{{emqx_conf_proto_v1,get_all,['KeyPath']},{emqx_conf,get_node_and_config,['KeyPath']}},{{emqx_conf_proto_v1,get_config,['Node','KeyPath','Default']},{emqx,get_config,['KeyPath','Default']}},{{emqx_conf_proto_v1,get_config,['Node','KeyPath']},{emqx,get_config,['KeyPath']}}],casts => []},{emqx_prometheus,2} => #{calls => [{{emqx_prometheus_proto_v2,raw_prom_data,['Nodes','M','F','A']},{emqx_prometheus_api,lookup_from_local_nodes,['M','F','A']}},{{emqx_prometheus_proto_v2,stop,['Nodes']},{emqx_prometheus,do_stop,[]}},{{emqx_prometheus_proto_v2,start,['Nodes']},{emqx_prometheus,do_start,[]}}],casts => []},{emqx_ft_storage_exporter_fs,1} => #{calls => [{{emqx_ft_storage_exporter_fs_proto_v1,read_export_file,['Node','Filepath','CallerPid']},{emqx_ft_storage_exporter_fs_proxy,read_export_file_local,['Filepath','CallerPid']}},{{emqx_ft_storage_exporter_fs_proto_v1,list_exports,['Nodes','Query']},{emqx_ft_storage_exporter_fs_proxy,list_exports_local,['Query']}}],casts => []},{emqx_metrics,1} => #{calls => [{{emqx_metrics_proto_v1,get_metrics,['Nodes','HandlerName','MetricId','Timeout']},{emqx_metrics_worker,get_metrics,['HandlerName','MetricId']}}],casts => []},{emqx_conf,3} => #{calls => [{{emqx_conf_proto_v3,get_hocon_config,['Node','Key']},{emqx_conf_cli,get_config,['Key']}},{{emqx_conf_proto_v3,get_hocon_config,['Node']},{emqx_conf_cli,get_config,[]}},{{emqx_conf_proto_v3,get_override_config_file,['Nodes']},{emqx_conf_app,get_override_config_file,[]}},{{emqx_conf_proto_v3,reset,['Node','KeyPath','Opts']},{emqx,reset_config,['KeyPath','Opts']}},{{emqx_conf_proto_v3,reset,['KeyPath','Opts']},{emqx,reset_config,['KeyPath','Opts']}},{{emqx_conf_proto_v3,remove_config,['Node','KeyPath','Opts']},{emqx,remove_config,['KeyPath','Opts']}},{{emqx_conf_proto_v3,remove_config,['KeyPath','Opts']},{emqx,remove_config,['KeyPath','Opts']}},{{emqx_conf_proto_v3,update,['Node','KeyPath','UpdateReq','Opts']},{emqx,update_config,['KeyPath','UpdateReq','Opts']}},{{emqx_conf_proto_v3,update,['KeyPath','UpdateReq','Opts']},{emqx,update_config,['KeyPath','UpdateReq','Opts']}},{{emqx_conf_proto_v3,get_all,['KeyPath']},{emqx_conf,get_node_and_config,['KeyPath']}},{{emqx_conf_proto_v3,get_config,['Node','KeyPath','Default']},{emqx,get_config,['KeyPath','Default']}},{{emqx_conf_proto_v3,get_config,['Node','KeyPath']},{emqx,get_config,['KeyPath']}},{{emqx_conf_proto_v3,sync_data_from_node,['Node']},{emqx_conf_app,sync_data_from_node,[]}}],casts => []},{emqx_mgmt_cluster,2} => #{calls => [{{emqx_mgmt_cluster_proto_v2,connected_replicants,['Nodes']},{emqx_mgmt_api_cluster,connected_replicants,[]}},{{emqx_mgmt_cluster_proto_v2,invite_node,['Node','Self']},{emqx_mgmt_api_cluster,join,['Self']}}],casts => []},{emqx_retainer,2} => #{calls => [{{emqx_retainer_proto_v2,active_mnesia_indices,['Nodes']},{emqx_retainer_mnesia,active_indices,[]}},{{emqx_retainer_proto_v2,wait_dispatch_complete,['Nodes','Timeout']},{emqx_retainer_dispatcher,wait_dispatch_complete,['Timeout']}}],casts => []},{emqx_node_rebalance,3} => #{calls => [{{emqx_node_rebalance_proto_v3,enable_rebalance_agent,['Nodes','OwnerPid','Kind','Options']},{emqx_node_rebalance_agent,enable,['OwnerPid','Kind','Options']}},{{emqx_node_rebalance_proto_v3,purge_sessions,['Nodes','Count']},{emqx_eviction_agent,purge_sessions,['Count']}},{{emqx_node_rebalance_proto_v3,disable_rebalance_agent,['Nodes','OwnerPid','Kind']},{emqx_node_rebalance_agent,disable,['OwnerPid','Kind']}},{{emqx_node_rebalance_proto_v3,enable_rebalance_agent,['Nodes','OwnerPid','Kind']},{emqx_node_rebalance_agent,enable,['OwnerPid','Kind']}},{{emqx_node_rebalance_proto_v3,disconnected_session_counts,['Nodes']},{emqx_node_rebalance,disconnected_session_count,[]}},{{emqx_node_rebalance_proto_v3,disable_rebalance_agent,['Nodes','OwnerPid']},{emqx_node_rebalance_agent,disable,['OwnerPid']}},{{emqx_node_rebalance_proto_v3,enable_rebalance_agent,['Nodes','OwnerPid']},{emqx_node_rebalance_agent,enable,['OwnerPid']}},{{emqx_node_rebalance_proto_v3,session_counts,['Nodes']},{emqx_node_rebalance,session_count,[]}},{{emqx_node_rebalance_proto_v3,connection_counts,['Nodes']},{emqx_node_rebalance,connection_count,[]}},{{emqx_node_rebalance_proto_v3,evict_sessions,['Nodes','Count','RecipientNodes','ConnState']},{emqx_eviction_agent,evict_sessions,['Count','RecipientNodes','ConnState']}},{{emqx_node_rebalance_proto_v3,evict_connections,['Nodes','Count']},{emqx_eviction_agent,evict_connections,['Count']}},{{emqx_node_rebalance_proto_v3,available_nodes,['Nodes']},{emqx_node_rebalance,is_node_available,[]}}],casts => []},{emqx_delayed,3} => #{calls => [{{emqx_delayed_proto_v3,delete_delayed_messages_by_topic_name,['Nodes','TopicName']},{emqx_delayed,do_delete_delayed_messages_by_topic_name,['TopicName']}},{{emqx_delayed_proto_v3,clear_all,['Nodes']},{emqx_delayed,clear_all_local,[]}},{{emqx_delayed_proto_v3,delete_delayed_message,['Node','Id']},{emqx_delayed,delete_delayed_message,['Id']}},{{emqx_delayed_proto_v3,get_delayed_message,['Node','Id']},{emqx_delayed,get_delayed_message,['Id']}}],casts => []},{emqx_eviction_agent,2} => #{calls => [{{emqx_eviction_agent_proto_v2,all_channels_count,['Nodes','Timeout']},{emqx_eviction_agent,all_local_channels_count,[]}},{{emqx_eviction_agent_proto_v2,evict_session_channel,['Node','ClientId','ConnInfo','ClientInfo']},{emqx_eviction_agent,evict_session_channel,['ClientId','ConnInfo','ClientInfo']}}],casts => []},{emqx_ds,1} => #{calls => [{{emqx_ds_proto_v1,store_batch,['Node','DB','Shard','Batch','Options']},{emqx_ds_replication_layer,do_store_batch_v1,['DB','Shard','Batch','Options']}},{{emqx_ds_proto_v1,next,['Node','DB','Shard','Iter','BatchSize']},{emqx_ds_replication_layer,do_next_v1,['DB','Shard','Iter','BatchSize']}},{{emqx_ds_proto_v1,make_iterator,['Node','DB','Shard','Stream','TopicFilter','StartTime']},{emqx_ds_replication_layer,do_make_iterator_v1,['DB','Shard','Stream','TopicFilter','StartTime']}},{{emqx_ds_proto_v1,get_streams,['Node','DB','Shard','TopicFilter','Time']},{emqx_ds_replication_layer,do_get_streams_v1,['DB','Shard','TopicFilter','Time']}},{{emqx_ds_proto_v1,drop_db,['Node','DB']},{emqx_ds_replication_layer,do_drop_db_v1,['DB']}}],casts => []},{emqx_node_rebalance_purge,1} => #{calls => [{{emqx_node_rebalance_purge_proto_v1,stop,['Nodes']},{emqx_node_rebalance_purge,stop,[]}},{{emqx_node_rebalance_purge_proto_v1,start,['Nodes','Opts']},{emqx_node_rebalance_purge,start,['Opts']}}],casts => []},{emqx_gateway_api_listeners,1} => #{calls => [{{emqx_gateway_api_listeners_proto_v1,listeners_cluster_status,['Nodes','Listeners']},{emqx_gateway_api_listeners,do_listeners_cluster_status,['Listeners']}}],casts => []},{emqx_mgmt_trace,2} => #{calls => [{{emqx_mgmt_trace_proto_v2,read_trace_file,['Node','Name','Position','Limit']},{emqx_mgmt_api_trace,read_trace_file,['Name','Position','Limit']}},{{emqx_mgmt_trace_proto_v2,trace_file_detail,['Nodes','File']},{emqx_trace,trace_file_detail,['File']}},{{emqx_mgmt_trace_proto_v2,trace_file,['Nodes','File']},{emqx_trace,trace_file,['File']}},{{emqx_mgmt_trace_proto_v2,get_trace_size,['Nodes']},{emqx_mgmt_api_trace,get_trace_size,[]}}],casts => []},{emqx_slow_subs,1} => #{calls => [{{emqx_slow_subs_proto_v1,get_history,['Nodes']},{emqx_slow_subs_api,get_history,[]}},{{emqx_slow_subs_proto_v1,clear_history,['Nodes']},{emqx_slow_subs,clear_history,[]}}],casts => []},{emqx_mgmt_api_plugins,1} => #{calls => [{{emqx_mgmt_api_plugins_proto_v1,ensure_action,['Name','Action']},{emqx_mgmt_api_plugins,ensure_action,['Name','Action']}},{{emqx_mgmt_api_plugins_proto_v1,delete_package,['Name']},{emqx_mgmt_api_plugins,delete_package,['Name']}},{{emqx_mgmt_api_plugins_proto_v1,describe_package,['Name']},{emqx_mgmt_api_plugins,describe_package,['Name']}},{{emqx_mgmt_api_plugins_proto_v1,install_package,['Filename','Bin']},{emqx_mgmt_api_plugins,install_package,['Filename','Bin']}},{{emqx_mgmt_api_plugins_proto_v1,get_plugins,[]},{emqx_mgmt_api_plugins,get_plugins,[]}}],casts => []},{emqx_conf,2} => #{calls => [{{emqx_conf_proto_v2,get_override_config_file,['Nodes']},{emqx_conf_app,get_override_config_file,[]}},{{emqx_conf_proto_v2,reset,['Node','KeyPath','Opts']},{emqx,reset_config,['KeyPath','Opts']}},{{emqx_conf_proto_v2,reset,['KeyPath','Opts']},{emqx,reset_config,['KeyPath','Opts']}},{{emqx_conf_proto_v2,remove_config,['Node','KeyPath','Opts']},{emqx,remove_config,['KeyPath','Opts']}},{{emqx_conf_proto_v2,remove_config,['KeyPath','Opts']},{emqx,remove_config,['KeyPath','Opts']}},{{emqx_conf_proto_v2,update,['Node','KeyPath','UpdateReq','Opts']},{emqx,update_config,['KeyPath','UpdateReq','Opts']}},{{emqx_conf_proto_v2,update,['KeyPath','UpdateReq','Opts']},{emqx,update_config,['KeyPath','UpdateReq','Opts']}},{{emqx_conf_proto_v2,get_all,['KeyPath']},{emqx_conf,get_node_and_config,['KeyPath']}},{{emqx_conf_proto_v2,get_config,['Node','KeyPath','Default']},{emqx,get_config,['KeyPath','Default']}},{{emqx_conf_proto_v2,get_config,['Node','KeyPath']},{emqx,get_config,['KeyPath']}},{{emqx_conf_proto_v2,sync_data_from_node,['Node']},{emqx_conf_app,sync_data_from_node,[]}}],casts => []},{emqx_ds,2} => #{calls => [{{emqx_ds_proto_v2,add_generation,['Node','DB']},{emqx_ds_replication_layer,do_add_generation_v2,['DB']}},{{emqx_ds_proto_v2,update_iterator,['Node','DB','Shard','OldIter','DSKey']},{emqx_ds_replication_layer,do_update_iterator_v2,['DB','Shard','OldIter','DSKey']}},{{emqx_ds_proto_v2,store_batch,['Node','DB','Shard','Batch','Options']},{emqx_ds_replication_layer,do_store_batch_v1,['DB','Shard','Batch','Options']}},{{emqx_ds_proto_v2,next,['Node','DB','Shard','Iter','BatchSize']},{emqx_ds_replication_layer,do_next_v1,['DB','Shard','Iter','BatchSize']}},{{emqx_ds_proto_v2,make_iterator,['Node','DB','Shard','Stream','TopicFilter','StartTime']},{emqx_ds_replication_layer,do_make_iterator_v1,['DB','Shard','Stream','TopicFilter','StartTime']}},{{emqx_ds_proto_v2,get_streams,['Node','DB','Shard','TopicFilter','Time']},{emqx_ds_replication_layer,do_get_streams_v1,['DB','Shard','TopicFilter','Time']}},{{emqx_ds_proto_v2,drop_db,['Node','DB']},{emqx_ds_replication_layer,do_drop_db_v1,['DB']}}],casts => []},{emqx_shared_sub,1} => #{calls => [{{emqx_shared_sub_proto_v1,dispatch_with_ack,['Pid','Group','Topic','Msg','Timeout']},{emqx_shared_sub,do_dispatch_with_ack,['Pid','Group','Topic','Msg']}}],casts => [{{emqx_shared_sub_proto_v1,send,['Node','Pid','Topic','Msg']},{erlang,send,['Pid','Msg']}}]},{emqx_ft_storage_fs,1} => #{calls => [{{emqx_ft_storage_fs_proto_v1,list_assemblers,['Nodes','Transfer']},{emqx_ft_storage_fs_proxy,lookup_local_assembler,['Transfer']}},{{emqx_ft_storage_fs_proto_v1,pread,['Node','Transfer','Frag','Offset','Size']},{emqx_ft_storage_fs_proxy,pread_local,['Transfer','Frag','Offset','Size']}},{{emqx_ft_storage_fs_proto_v1,multilist,['Nodes','Transfer','What']},{emqx_ft_storage_fs_proxy,list_local,['Transfer','What']}}],casts => []},{emqx_cm,1} => #{calls => [{{emqx_cm_proto_v1,kick_session,['Action','ClientId','ChanPid']},{emqx_cm,do_kick_session,['Action','ClientId','ChanPid']}},{{emqx_cm_proto_v1,takeover_session,['ClientId','ChanPid']},{emqx_cm,takeover_session,['ClientId','ChanPid']}},{{emqx_cm_proto_v1,get_chann_conn_mod,['ClientId','ChanPid']},{emqx_cm,do_get_chann_conn_mod,['ClientId','ChanPid']}},{{emqx_cm_proto_v1,get_chan_info,['ClientId','ChanPid']},{emqx_cm,do_get_chan_info,['ClientId','ChanPid']}},{{emqx_cm_proto_v1,get_chan_stats,['ClientId','ChanPid']},{emqx_cm,do_get_chan_stats,['ClientId','ChanPid']}},{{emqx_cm_proto_v1,lookup_client,['Node','Key']},{emqx_cm,lookup_client,['Key']}},{{emqx_cm_proto_v1,kickout_client,['Node','ClientId']},{emqx_cm,kick_session,['ClientId']}}],casts => []},{emqx_authn,1} => #{calls => [{{emqx_authn_proto_v1,lookup_from_all_nodes,['Nodes','ChainName','AuthenticatorID']},{emqx_authn_api,lookup_from_local_node,['ChainName','AuthenticatorID']}}],casts => []},{emqx_resource,1} => #{calls => [{{emqx_resource_proto_v1,reset_metrics,['ResId']},{emqx_resource,reset_metrics_local,['ResId']}},{{emqx_resource_proto_v1,remove,['ResId']},{emqx_resource,remove_local,['ResId']}},{{emqx_resource_proto_v1,recreate,['ResId','ResourceType','Config','Opts']},{emqx_resource,recreate_local,['ResId','ResourceType','Config','Opts']}},{{emqx_resource_proto_v1,create_dry_run,['ResourceType','Config']},{emqx_resource,create_dry_run_local,['ResourceType','Config']}},{{emqx_resource_proto_v1,create,['ResId','Group','ResourceType','Config','Opts']},{emqx_resource,create_local,['ResId','Group','ResourceType','Config','Opts']}}],casts => []},{emqx_bridge,5} => #{calls => [{{emqx_bridge_proto_v5,v2_start_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_v2,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v5,v2_start_bridge_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_v2,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v5,v2_get_metrics_from_all_nodes,['Nodes','ActionType','ActionName']},{emqx_bridge_v2_api,get_metrics_from_local_node,['ActionType','ActionName']}},{{emqx_bridge_proto_v5,v2_lookup_from_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_v2_api,lookup_from_local_node,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v5,v2_list_bridges_on_nodes,['Nodes']},{emqx_bridge_v2,list,[]}},{{emqx_bridge_proto_v5,get_metrics_from_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_api,get_metrics_from_local_node,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v5,lookup_from_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_api,lookup_from_local_node,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v5,stop_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v5,start_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v5,restart_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v5,stop_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v5,start_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v5,restart_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v5,list_bridges_on_nodes,['Nodes']},{emqx_bridge,list,[]}}],casts => []},{emqx_eviction_agent,1} => #{calls => [{{emqx_eviction_agent_proto_v1,evict_session_channel,['Node','ClientId','ConnInfo','ClientInfo']},{emqx_eviction_agent,evict_session_channel,['ClientId','ConnInfo','ClientInfo']}}],casts => []},{emqx_authz,1} => #{calls => [{{emqx_authz_proto_v1,lookup_from_all_nodes,['Nodes','Type']},{emqx_authz_api_sources,lookup_from_local_node,['Type']}}],casts => []},{emqx_gateway_cm,1} => #{calls => [{{emqx_gateway_cm_proto_v1,cast,['GwName','ClientId','ChanPid','Req']},{emqx_gateway_cm,do_cast,['GwName','ClientId','ChanPid','Req']}},{{emqx_gateway_cm_proto_v1,call,['GwName','ClientId','ChanPid','Req']},{emqx_gateway_cm,do_call,['GwName','ClientId','ChanPid','Req']}},{{emqx_gateway_cm_proto_v1,call,['GwName','ClientId','ChanPid','Req','Timeout']},{emqx_gateway_cm,do_call,['GwName','ClientId','ChanPid','Req','Timeout']}},{{emqx_gateway_cm_proto_v1,takeover_session,['GwName','ClientId','ChanPid']},{emqx_gateway_cm,do_takeover_session,['GwName','ClientId','ChanPid']}},{{emqx_gateway_cm_proto_v1,get_chann_conn_mod,['GwName','ClientId','ChanPid']},{emqx_gateway_cm,do_get_chann_conn_mod,['GwName','ClientId','ChanPid']}},{{emqx_gateway_cm_proto_v1,kick_session,['GwName','Action','ClientId','ChanPid']},{emqx_gateway_cm,do_kick_session,['GwName','Action','ClientId','ChanPid']}},{{emqx_gateway_cm_proto_v1,set_chan_stats,['GwName','ClientId','ChanPid','Stats']},{emqx_gateway_cm,do_set_chan_stats,['GwName','ClientId','ChanPid','Stats']}},{{emqx_gateway_cm_proto_v1,get_chan_stats,['GwName','ClientId','ChanPid']},{emqx_gateway_cm,do_get_chan_stats,['GwName','ClientId','ChanPid']}},{{emqx_gateway_cm_proto_v1,set_chan_info,['GwName','ClientId','ChanPid','Infos']},{emqx_gateway_cm,do_set_chan_info,['GwName','ClientId','ChanPid','Infos']}},{{emqx_gateway_cm_proto_v1,get_chan_info,['GwName','ClientId','ChanPid']},{emqx_gateway_cm,do_get_chan_info,['GwName','ClientId','ChanPid']}},{{emqx_gateway_cm_proto_v1,lookup_by_clientid,['Nodes','GwName','ClientId']},{emqx_gateway_cm,do_lookup_by_clientid,['GwName','ClientId']}}],casts => []},{emqx_bridge,3} => #{calls => [{{emqx_bridge_proto_v3,lookup_from_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_api,lookup_from_local_node,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v3,stop_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v3,start_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v3,restart_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v3,stop_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v3,start_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v3,restart_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v3,list_bridges_on_nodes,['Nodes']},{emqx_bridge,list,[]}},{{emqx_bridge_proto_v3,list_bridges,['Node']},{emqx_bridge,list,[]}}],casts => []},{emqx_rule_engine,1} => #{calls => [{{emqx_rule_engine_proto_v1,reset_metrics,['RuleId']},{emqx_rule_engine,reset_metrics_for_rule,['RuleId']}}],casts => []},{emqx_node_rebalance_evacuation,1} => #{calls => [{{emqx_node_rebalance_evacuation_proto_v1,available_nodes,['Nodes']},{emqx_node_rebalance_evacuation,is_node_available,[]}}],casts => []},{emqx_bridge,2} => #{calls => [{{emqx_bridge_proto_v2,lookup_from_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_api,lookup_from_local_node,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v2,stop_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v2,start_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v2,restart_bridges_to_all_nodes,['Nodes','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v2,stop_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,stop,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v2,start_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,start,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v2,restart_bridge_to_node,['Node','BridgeType','BridgeName']},{emqx_bridge_resource,restart,['BridgeType','BridgeName']}},{{emqx_bridge_proto_v2,list_bridges,['Node']},{emqx_bridge,list,[]}}],casts => []},{emqx_node_rebalance,2} => #{calls => [{{emqx_node_rebalance_proto_v2,purge_sessions,['Nodes','Count']},{emqx_eviction_agent,purge_sessions,['Count']}},{{emqx_node_rebalance_proto_v2,disable_rebalance_agent,['Nodes','OwnerPid','Kind']},{emqx_node_rebalance_agent,disable,['OwnerPid','Kind']}},{{emqx_node_rebalance_proto_v2,enable_rebalance_agent,['Nodes','OwnerPid','Kind']},{emqx_node_rebalance_agent,enable,['OwnerPid','Kind']}},{{emqx_node_rebalance_proto_v2,disconnected_session_counts,['Nodes']},{emqx_node_rebalance,disconnected_session_count,[]}},{{emqx_node_rebalance_proto_v2,disable_rebalance_agent,['Nodes','OwnerPid']},{emqx_node_rebalance_agent,disable,['OwnerPid']}},{{emqx_node_rebalance_proto_v2,enable_rebalance_agent,['Nodes','OwnerPid']},{emqx_node_rebalance_agent,enable,['OwnerPid']}},{{emqx_node_rebalance_proto_v2,session_counts,['Nodes']},{emqx_node_rebalance,session_count,[]}},{{emqx_node_rebalance_proto_v2,connection_counts,['Nodes']},{emqx_node_rebalance,connection_count,[]}},{{emqx_node_rebalance_proto_v2,evict_sessions,['Nodes','Count','RecipientNodes','ConnState']},{emqx_eviction_agent,evict_sessions,['Count','RecipientNodes','ConnState']}},{{emqx_node_rebalance_proto_v2,evict_connections,['Nodes','Count']},{emqx_eviction_agent,evict_connections,['Count']}},{{emqx_node_rebalance_proto_v2,available_nodes,['Nodes']},{emqx_node_rebalance,is_node_available,[]}}],casts => []},{emqx_mgmt_data_backup,1} => #{calls => [{{emqx_mgmt_data_backup_proto_v1,delete_file,['Node','FileName','Timeout']},{emqx_mgmt_data_backup,delete_file,['FileName']}},{{emqx_mgmt_data_backup_proto_v1,read_file,['Node','FileName','Timeout']},{emqx_mgmt_data_backup,read_file,['FileName']}},{{emqx_mgmt_data_backup_proto_v1,import_file,['Node','FileNode','FileName','Timeout']},{emqx_mgmt_data_backup,maybe_copy_and_import,['FileNode','FileName']}},{{emqx_mgmt_data_backup_proto_v1,list_files,['Nodes','Timeout']},{emqx_mgmt_data_backup,list_files,[]}}],casts => []},{emqx_retainer,1} => #{calls => [{{emqx_retainer_proto_v1,wait_dispatch_complete,['Nodes','Timeout']},{emqx_retainer_dispatcher,wait_dispatch_complete,['Timeout']}}],casts => []},{emqx_delayed,2} => #{calls => [{{emqx_delayed_proto_v2,clear_all,['Nodes']},{emqx_delayed,clear_all_local,[]}},{{emqx_delayed_proto_v2,delete_delayed_message,['Node','Id']},{emqx_delayed,delete_delayed_message,['Id']}},{{emqx_delayed_proto_v2,get_delayed_message,['Node','Id']},{emqx_delayed,get_delayed_message,['Id']}}],casts => []},{emqx_mgmt_api_plugins,2} => #{calls => [{{emqx_mgmt_api_plugins_proto_v2,ensure_action,['Name','Action']},{emqx_mgmt_api_plugins,ensure_action,['Name','Action']}},{{emqx_mgmt_api_plugins_proto_v2,delete_package,['Name']},{emqx_mgmt_api_plugins,delete_package,['Name']}},{{emqx_mgmt_api_plugins_proto_v2,describe_package,['Nodes','Name']},{emqx_mgmt_api_plugins,describe_package,['Name']}},{{emqx_mgmt_api_plugins_proto_v2,install_package,['Nodes','Filename','Bin']},{emqx_mgmt_api_plugins,install_package,['Filename','Bin']}},{{emqx_mgmt_api_plugins_proto_v2,get_plugins,['Nodes']},{emqx_mgmt_api_plugins,get_plugins,[]}}],casts => []},{emqx_mgmt_cluster,1} => #{calls => [{{emqx_mgmt_cluster_proto_v1,invite_node,['Node','Self']},{emqx_mgmt_api_cluster,join,['Self']}}],casts => []},{emqx_mgmt_cluster,3} => #{calls => [{{emqx_mgmt_cluster_proto_v3,connected_replicants,['Nodes']},{emqx_mgmt_api_cluster,connected_replicants,[]}},{{emqx_mgmt_cluster_proto_v3,invite_node,['Node','Self','Timeout']},{emqx_mgmt_api_cluster,join,['Self']}}],casts => []},{emqx_prometheus,1} => #{calls => [{{emqx_prometheus_proto_v1,stop,['Nodes']},{emqx_prometheus,do_stop,[]}},{{emqx_prometheus_proto_v1,start,['Nodes']},{emqx_prometheus,do_start,[]}}],casts => []},{emqx_delayed,1} => #{calls => [{{emqx_delayed_proto_v1,delete_delayed_message,['Node','Id']},{emqx_delayed,delete_delayed_message,['Id']}},{{emqx_delayed_proto_v1,get_delayed_message,['Node','Id']},{emqx_delayed,get_delayed_message,['Id']}}],casts => []},{emqx_management,3} => #{calls => [{{emqx_management_proto_v3,get_full_config,['Node']},{emqx_mgmt_api_configs,get_full_config,[]}},{{emqx_management_proto_v3,call_client,['Node','ClientId','Req']},{emqx_mgmt,do_call_client,['ClientId','Req']}},{{emqx_management_proto_v3,unsubscribe,['Node','ClientId','Topic']},{emqx_mgmt,do_unsubscribe,['ClientId','Topic']}},{{emqx_management_proto_v3,subscribe,['Node','ClientId','TopicTables']},{emqx_mgmt,do_subscribe,['ClientId','TopicTables']}},{{emqx_management_proto_v3,list_listeners,['Node']},{emqx_mgmt_api_listeners,do_list_listeners,[]}},{{emqx_management_proto_v3,list_subscriptions,['Node']},{emqx_mgmt,do_list_subscriptions,[]}},{{emqx_management_proto_v3,broker_info,['Nodes']},{emqx_mgmt,broker_info,[]}},{{emqx_management_proto_v3,node_info,['Nodes']},{emqx_mgmt,node_info,[]}},{{emqx_management_proto_v3,unsubscribe_batch,['Node','ClientId','Topics']},{emqx_mgmt,do_unsubscribe_batch,['ClientId','Topics']}}],casts => []}},release => "master",signatures => #{{emqx_retainer_proto_v1,wait_dispatch_complete,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_bridge_proto_v6,restart_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_ft_storage_fs_proxy,pread_local,4} => {any,[any,any,any,any]},{emqx_bridge_proto_v2,stop_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_node_rebalance_proto_v2,disable_rebalance_agent,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,identifier,[pid],unknown}]},{emqx_node_rebalance_proto_v2,disconnected_session_counts,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_shared_sub_proto_v1,dispatch_with_ack,5} => {any,[{c,identifier,[pid],unknown},{c,union,[{c,atom,['_'],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,binary,{8,0},unknown},{c,tuple,[{c,atom,[message],unknown},{c,binary,{8,0},unknown},any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,map,{[],{c,atom,any,unknown},{c,atom,[false,true],unknown}},unknown},{c,map,{[{{c,atom,[allow_publish],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[peerhost],unknown},optional,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown}},{{c,atom,[properties],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[proto_ver],unknown},optional,{c,union,[none,{c,binary,{8,0},unknown},none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}},{{c,atom,[protocol],unknown},optional,{c,atom,any,unknown}},{{c,atom,[username],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}}],{c,atom,any,unknown},any},unknown},{c,binary,{8,0},unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown},{c,number,any,integer},any],{10,{c,atom,[message],unknown}}},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_bridge_proto_v3,restart_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_dashboard_monitor,current_rate,1} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[badrpc],unknown},any],{2,{c,atom,[badrpc],unknown}}},{c,tuple,[{c,atom,[ok],unknown},any],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,atom,any,unknown}]},{emqx_ft_storage_exporter_fs_proxy,list_exports_local,1} => {any,[any]},{emqx_management_proto_v3,list_subscriptions,1} => {any,[{c,atom,any,unknown}]},{emqx_connector_proto_v1,list_connectors_on_nodes,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_management_proto_v2,broker_info,1} => {any,[{c,atom,any,unknown}]},{emqx_gateway_cm,do_lookup_by_clientid,2} => {{c,list,{any,{c,nil,[],unknown}},unknown},[{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},{c,number,any,unknown},none,none,none],unknown},any]},{emqx_persistent_session_ds,do_open_iterator,3} => {{c,tuple,[{c,atom,[error],unknown},{c,atom,[not_implemented],unknown}],{2,{c,atom,[error],unknown}}},[any,any,any]},{emqx_cm,do_get_chann_conn_mod,2} => {any,[any,any]},{emqx_persistent_session_ds_proto_v1,close_iterator,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},any]},{emqx_mgmt_data_backup,read_file,1} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,any,unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,binary,{8,0},unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},none,none],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown}]},{emqx_node_rebalance_proto_v2,purge_sessions,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_resource,create_local,5} => {{c,tuple,[{c,atom,[ok],unknown},{c,map,{[{{c,atom,[added_channels],unknown},mandatory,any},{{c,atom,[callback_mode],unknown},mandatory,{c,atom,[always_sync,async_if_possible],unknown}},{{c,atom,[config],unknown},mandatory,any},{{c,atom,[error],unknown},mandatory,any},{{c,atom,[id],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[mod],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[query_mode],unknown},mandatory,{c,atom,[async,no_queries,simple_async,simple_async_internal_buffer,simple_sync,simple_sync_internal_buffer,sync],unknown}},{{c,atom,[state],unknown},mandatory,any},{{c,atom,[status],unknown},mandatory,{c,atom,[connected,connecting,disconnected,stopped],unknown}}],none,none},unknown}],{2,{c,atom,[ok],unknown}}},[{c,binary,{8,0},unknown},{c,binary,{8,0},unknown},{c,atom,any,unknown},any,{c,map,{[{{c,atom,[auto_restart_interval],unknown},optional,{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,1,pos_inf},integer},none,none,none],unknown}},{{c,atom,[auto_retry_interval],unknown},optional,{c,number,any,integer}},{{c,atom,[batch_size],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[batch_time],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[health_check_interval],unknown},optional,{c,number,any,integer}},{{c,atom,[health_check_timeout],unknown},optional,{c,number,any,integer}},{{c,atom,[inflight_window],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[max_buffer_bytes],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[query_mode],unknown},optional,{c,atom,[async,no_queries,simple_async,simple_async_internal_buffer,simple_sync,simple_sync_internal_buffer,sync],unknown}},{{c,atom,[resume_interval],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[spawn_buffer_workers],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[start_after_created],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[start_timeout],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[wait_for_resource_ready],unknown},optional,{c,number,any,integer}},{{c,atom,[worker_pool_size],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}}],none,none},unknown}]},{emqx_authz_api_sources,lookup_from_local_node,1} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,tuple,[{c,atom,any,unknown},{c,union,[{c,atom,[not_found_resource],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}],{2,any}}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,tuple,[{c,atom,any,unknown},{c,atom,[connected,connecting,disconnected,stopped],unknown},{c,map,{[{{c,atom,[counters],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[gauges],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[rate],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[slides],unknown},mandatory,{c,map,{[],any,any},unknown}}],none,none},unknown},{c,map,{[{{c,atom,[counters],unknown},optional,{c,map,{[],any,any},unknown}},{{c,atom,[gauges],unknown},optional,{c,map,{[],any,any},unknown}},{{c,atom,[rate],unknown},optional,{c,map,{[],any,any},unknown}},{{c,atom,[slides],unknown},optional,{c,map,{[],any,any},unknown}}],none,none},unknown}],{4,any}}],{2,{c,atom,[ok],unknown}}}]}],unknown},[any]},{emqx_management_proto_v3,node_info,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_ft_storage_exporter_fs_proto_v1,list_exports,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,map,{[{{c,atom,[following],unknown},optional,any},{{c,atom,[limit],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[transfer],unknown},optional,{c,tuple,[{c,binary,{8,0},unknown},{c,binary,{8,0},unknown}],{2,any}}}],none,none},unknown}]},{emqx,get_config,1} => {any,[{c,list,{any,{c,nil,[],unknown}},unknown}]},{emqx_resource_proto_v1,remove,1} => {any,[{c,binary,{8,0},unknown}]},{emqx_management_proto_v2,list_listeners,1} => {any,[{c,atom,any,unknown}]},{emqx_broker_proto_v1,list_client_subscriptions,2} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_bridge_proto_v4,restart_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_resource_proto_v1,reset_metrics,1} => {any,[{c,binary,{8,0},unknown}]},{emqx_delayed,clear_all_local,0} => {{c,atom,[ok],unknown},[]},{emqx_bridge_proto_v5,v2_list_bridges_on_nodes,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx,reset_config,2} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,map,{[{{c,atom,[config],unknown},optional,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},none,none,none,{c,map,{[],{c,atom,any,unknown},any},unknown}],unknown}},{{c,atom,[post_config_update],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[raw_config],unknown},optional,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},none,none,none,{c,map,{[],{c,binary,{8,0},unknown},any},unknown}],unknown}}],none,none},unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},any]},{emqx_mgmt_api_plugins,delete_package,1} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_mgmt_data_backup_proto_v1,list_files,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_mgmt_data_backup_proto_v1,import_file,4} => {any,[{c,atom,any,unknown},{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_retainer_mnesia,active_indices,0} => {{c,tuple,[any,any],{2,any}},[]},{emqx_cm,do_get_chan_stats,2} => {any,[any,any]},{emqx_node_rebalance_proto_v3,disconnected_session_counts,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_mgmt_api_listeners,do_list_listeners,0} => {{c,map,{[],{c,binary,{40,32},unknown},{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{{c,map,{[],any,any},unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},unknown},[]},{emqx_conf_app,get_override_config_file,0} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,map,{[{{c,atom,[msg],unknown},mandatory,any},{{c,atom,[node],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[release],unknown},mandatory,{c,list,{any,{c,nil,[],unknown}},nonempty}},{{c,atom,[wall_clock],unknown},mandatory,{c,tuple,[any,any],{2,any}}}],none,none},unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},any],{2,{c,atom,[ok],unknown}}}]}],unknown},[]},{emqx_management_proto_v4,list_listeners,1} => {any,[{c,atom,any,unknown}]},{emqx_node_rebalance_proto_v2,evict_sessions,4} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer},{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,[connected,connecting,disconnected,idle,reauthenticating],unknown}]},{emqx_persistent_session_ds,do_ensure_all_iterators_closed,1} => {{c,atom,[ok],unknown},[any]},{emqx_ds_replication_layer,do_make_iterator_v1,5} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,opaque,[{opaque,emqx_ds_storage_layer,iterator,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[2]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,opaque,[{opaque,emqx_ds_storage_layer,stream,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[1]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown},{c,list,{{c,union,[{c,atom,['','#','+'],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_authz_proto_v1,lookup_from_all_nodes,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,any,unknown}]},{emqx_ft_storage_fs_reader_proto_v1,read,3} => {any,[{c,atom,any,unknown},{c,identifier,[pid],unknown},{c,number,{int_rng,1,pos_inf},integer}]},{emqx_slow_subs,clear_history,0} => {any,[]},{emqx_proto_v1,get_stats,1} => {any,[{c,atom,any,unknown}]},{emqx_cm,do_kick_session,3} => {{c,atom,[ok],unknown},[any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_gateway_cm_proto_v1,get_chann_conn_mod,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_eviction_agent_proto_v2,evict_session_channel,4} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,map,{[{{c,atom,[clean_start],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[clientid],unknown},optional,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[conn_mod],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[conn_props],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[connected],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[connected_at],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[disconnected_at],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[expiry_interval],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[keepalive],unknown},optional,{c,number,{int_rng,0,1114111},integer}},{{c,atom,[peercert],unknown},optional,{c,union,[{c,atom,[nossl,undefined],unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[{c,atom,any,unknown},none,none,none,none,none,{c,tuple,any,{any,any}},none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[peername],unknown},mandatory,{c,tuple,[{c,union,[{c,atom,[local,undefined,unspec],unknown},none,none,none,none,none,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown},none,none],unknown},any],{2,any}}},{{c,atom,[proto_name],unknown},optional,{c,binary,{8,0},unknown}},{{c,atom,[proto_ver],unknown},optional,{c,union,[none,{c,binary,{8,0},unknown},none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}},{{c,atom,[receive_maximum],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[sockname],unknown},mandatory,{c,tuple,[{c,union,[{c,atom,[local,undefined,unspec],unknown},none,none,none,none,none,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown},none,none],unknown},any],{2,any}}},{{c,atom,[socktype],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[username],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}}],{c,atom,any,unknown},any},unknown},{c,map,{[{{c,atom,[anonymous],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[auth_result],unknown},optional,{c,atom,[bad_authentication_method,bad_clientid_or_password,bad_username_or_password,banned,client_identifier_not_valid,not_authorized,server_busy,server_unavailable,success],unknown}},{{c,atom,[clientid],unknown},mandatory,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[cn],unknown},optional,{c,binary,{8,0},unknown}},{{c,atom,[dn],unknown},optional,{c,binary,{8,0},unknown}},{{c,atom,[is_bridge],unknown},mandatory,{c,atom,[false,true],unknown}},{{c,atom,[is_superuser],unknown},mandatory,{c,atom,[false,true],unknown}},{{c,atom,[mountpoint],unknown},mandatory,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[password],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[peerhost],unknown},mandatory,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown}},{{c,atom,[protocol],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[sockport],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[username],unknown},mandatory,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[ws_cookie],unknown},optional,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[zone],unknown},mandatory,{c,atom,any,unknown}}],{c,atom,any,unknown},any},unknown}]},{emqx_mgmt_api_plugins_proto_v1,install_package,2} => {any,[{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,binary,{8,0},unknown}]},{emqx_bridge_proto_v6,list_bridges_on_nodes,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_management_proto_v1,call_client,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},any]},{emqx_bridge_v2_api,get_metrics_from_local_node,2} => {{c,map,{[{{c,atom,[dropped],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.expired'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.other'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.queue_full'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.resource_not_found'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.resource_stopped'],unknown},mandatory,{c,number,any,integer}},{{c,atom,[failed],unknown},mandatory,{c,number,any,integer}},{{c,atom,[inflight],unknown},mandatory,any},{{c,atom,[late_reply],unknown},mandatory,{c,number,any,integer}},{{c,atom,[matched],unknown},mandatory,{c,number,any,integer}},{{c,atom,[queuing],unknown},mandatory,any},{{c,atom,[rate],unknown},mandatory,any},{{c,atom,[rate_last5m],unknown},mandatory,any},{{c,atom,[rate_max],unknown},mandatory,any},{{c,atom,[received],unknown},mandatory,{c,number,any,integer}},{{c,atom,[retried],unknown},mandatory,{c,number,any,integer}},{{c,atom,[success],unknown},mandatory,{c,number,any,integer}}],none,none},unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_mgmt_trace_proto_v1,get_trace_size,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_topic_metrics,reset,1} => {any,[any]},{emqx_bridge_proto_v1,restart_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_management_proto_v3,subscribe,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,list,{{c,tuple,[{c,binary,{8,0},unknown},{c,map,{[{{c,atom,[nl],unknown},mandatory,{c,number,{int_set,[0,1]},integer}},{{c,atom,[qos],unknown},mandatory,{c,number,{int_set,[0,1,2]},integer}},{{c,atom,[rap],unknown},mandatory,{c,number,{int_set,[0,1]},integer}},{{c,atom,[rh],unknown},mandatory,{c,number,{int_set,[0,1,2]},integer}}],{c,atom,any,unknown},any},unknown}],{2,any}},{c,nil,[],unknown}},unknown}]},{emqx_cm_proto_v2,kick_session,3} => {any,[{c,atom,[discard,kick],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_bridge_proto_v5,v2_get_metrics_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_bridge_proto_v5,stop_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_delayed,delete_delayed_message,1} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,atom,[not_found],unknown}],{2,{c,atom,[error],unknown}}},none,none],unknown},[any]},{emqx_bridge_proto_v6,start_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_node_rebalance_api_proto_v2,node_rebalance_evacuation_start,2} => {any,[{c,atom,any,unknown},{c,map,{[{{c,atom,[conn_evict_rate],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[migrate_to],unknown},optional,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[server_reference],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[sess_evict_rate],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[wait_health_check],unknown},optional,{c,number,any,unknown}},{{c,atom,[wait_takeover],unknown},optional,{c,number,any,unknown}}],none,none},unknown}]},{emqx_conf_proto_v2,get_all,1} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},unknown}]},{emqx_mgmt_api_plugins_proto_v1,delete_package,1} => {any,[{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_gateway_api_listeners_proto_v1,listeners_cluster_status,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,list,{any,{c,nil,[],unknown}},unknown}]},{emqx_conf_proto_v3,get_hocon_config,2} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown}]},{emqx_exhook_mgr,server_info,1} => {any,[any]},{emqx_management_proto_v2,call_client,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},any]},{emqx_management_proto_v4,node_info,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_gateway_cm,do_call,4} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown},any]},{emqx_cm_proto_v1,get_chann_conn_mod,2} => {any,[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_dashboard_proto_v1,do_sample,2} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,1,pos_inf},integer},none,none,none],unknown}]},{emqx_node_rebalance_api_proto_v1,node_rebalance_stop,1} => {any,[{c,atom,any,unknown}]},{emqx_conf_proto_v2,get_override_config_file,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_bridge_proto_v5,start_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_node_rebalance_status_proto_v2,evacuation_status,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{ssl_pem_cache,clear,0} => {any,[]},{emqx_mgmt_api_plugins,ensure_action,2} => {{c,atom,[ok],unknown},[{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,atom,[restart,start,stop],unknown}]},{emqx_conf_proto_v3,reset,2} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_resource_proto_v1,recreate,4} => {any,[{c,binary,{8,0},unknown},{c,atom,any,unknown},any,{c,map,{[{{c,atom,[auto_restart_interval],unknown},optional,{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,1,pos_inf},integer},none,none,none],unknown}},{{c,atom,[auto_retry_interval],unknown},optional,{c,number,any,integer}},{{c,atom,[batch_size],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[batch_time],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[health_check_interval],unknown},optional,{c,number,any,integer}},{{c,atom,[health_check_timeout],unknown},optional,{c,number,any,integer}},{{c,atom,[inflight_window],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[max_buffer_bytes],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[query_mode],unknown},optional,{c,atom,[async,no_queries,simple_async,simple_async_internal_buffer,simple_sync,simple_sync_internal_buffer,sync],unknown}},{{c,atom,[resume_interval],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[spawn_buffer_workers],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[start_after_created],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[start_timeout],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[wait_for_resource_ready],unknown},optional,{c,number,any,integer}},{{c,atom,[worker_pool_size],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}}],none,none},unknown}]},{emqx_conf_proto_v3,remove_config,2} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_gateway_api_listeners,do_listeners_cluster_status,1} => {{c,map,{[],any,any},unknown},[{c,list,{any,{c,nil,[],unknown}},unknown}]},{emqx_bridge_v2_api,lookup_from_local_node_v6,3} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,[not_found],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},any],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,atom,[actions,sources],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_telemetry_proto_v1,get_cluster_uuid,1} => {any,[{c,atom,any,unknown}]},{emqx_management_proto_v1,unsubscribe,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,binary,{8,0},unknown}]},{emqx_resource,create_dry_run_local,2} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,atom,any,unknown},any]},{emqx_gateway_cm,do_set_chan_info,4} => {{c,atom,[false,true],unknown},[any,any,any,any]},{emqx_mgmt_api_cluster,connected_replicants,0} => {{c,list,{{c,tuple,[{c,atom,any,unknown},{c,atom,any,unknown},{c,identifier,[pid],unknown}],{3,any}},{c,nil,[],unknown}},unknown},[]},{emqx_mgmt,do_list_subscriptions,0} => {none,[]},{emqx_node_rebalance_api_proto_v2,node_rebalance_purge_stop,1} => {any,[{c,atom,any,unknown}]},{emqx_bridge_v2,list,1} => {any,[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_node_rebalance_status,rebalance_status,0} => {{c,tuple,[{c,atom,any,unknown},{c,union,[{c,atom,[disabled],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[enabled],unknown},{c,map,{[],any,any},unknown}],{2,{c,atom,[enabled],unknown}}},none,none],unknown}],{2,any}},[]},{emqx_bridge_proto_v5,stop_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_node_rebalance_status,evacuation_status,0} => {{c,tuple,[{c,atom,any,unknown},{c,union,[{c,atom,[disabled],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[enabled],unknown},{c,map,{[{{c,atom,[conn_evict_rate],unknown},mandatory,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[current_conns],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[current_sessions],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[initial_conns],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[initial_sessions],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[migrate_to],unknown},mandatory,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[server_reference],unknown},mandatory,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[sess_evict_rate],unknown},mandatory,{c,number,{int_rng,1,pos_inf},integer}}],none,none},unknown}],{2,{c,atom,[enabled],unknown}}},none,none],unknown}],{2,any}},[]},{emqx_mgmt_trace_proto_v2,trace_file,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_management_proto_v3,get_full_config,1} => {any,[{c,atom,any,unknown}]},{emqx_gateway_cm,do_takeover_session,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_gateway_cm_proto_v1,set_chan_stats,4} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown},{c,list,{{c,tuple,[{c,atom,any,unknown},any],{2,any}},{c,nil,[],unknown}},unknown}]},{emqx,get_config,2} => {any,[any,any]},{emqx_proto_v2,delete_all_deactivated_alarms,1} => {any,[{c,atom,any,unknown}]},{emqx_ds_proto_v1,make_iterator,6} => {any,[{c,atom,any,unknown},{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,opaque,[{opaque,emqx_ds_storage_layer,stream,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[1]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown},{c,list,{{c,union,[{c,atom,['','#','+'],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_topic_metrics,metrics,0} => {{c,list,{{c,map,{[{{c,atom,[create_time],unknown},mandatory,any},{{c,atom,[metrics],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[reset_time],unknown},optional,any},{{c,atom,[topic],unknown},mandatory,any}],none,none},unknown},{c,nil,[],unknown}},unknown},[]},{emqx_node_rebalance_agent,enable,3} => {any,[any,any,any]},{emqx_ft_storage_fs_proto_v1,pread,5} => {any,[{c,atom,any,unknown},{c,tuple,[{c,binary,{8,0},unknown},{c,binary,{8,0},unknown}],{2,any}},{c,map,{[{{c,atom,[fragment],unknown},mandatory,{c,tuple_set,[{2,[{c,tuple,[{c,atom,[filemeta],unknown},{c,map,{[{{c,atom,[checksum],unknown},optional,{c,tuple,[{c,atom,any,unknown},{c,binary,{8,0},unknown}],{2,any}}},{{c,atom,[expire_at],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[name],unknown},mandatory,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown}},{{c,atom,[segments_ttl],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[size],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[user_data],unknown},optional,{c,union,[{c,atom,[false,null,true],unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[{c,atom,[false,null,true],unknown},{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,nil,[],unknown}},unknown},{c,number,any,unknown},none,none,{c,map,{[],{c,binary,{8,0},unknown},any},unknown}],unknown},{c,nil,[],unknown}},unknown},{c,number,any,unknown},none,none,{c,map,{[],{c,binary,{8,0},unknown},{c,union,[{c,atom,[false,null,true],unknown},{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,nil,[],unknown}},unknown},{c,number,any,unknown},none,none,{c,map,{[],{c,binary,{8,0},unknown},any},unknown}],unknown}},unknown}],unknown}}],none,none},unknown}],{2,{c,atom,[filemeta],unknown}}},{c,tuple,[{c,atom,[segment],unknown},{c,map,{[{{c,atom,[offset],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[size],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}}],none,none},unknown}],{2,{c,atom,[segment],unknown}}}]}],unknown}},{{c,atom,[path],unknown},mandatory,{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[size],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[timestamp],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}}],none,none},unknown},{c,number,{int_rng,0,pos_inf},integer},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_ft_storage_exporter_fs_proxy,read_export_file_local,2} => {any,[any,any]},{emqx_trace,trace_file,1} => {{c,tuple_set,[{3,[{c,tuple,[{c,atom,[error],unknown},{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},{c,atom,any,unknown}],{3,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},{c,binary,{8,0},unknown}],{3,{c,atom,[ok],unknown}}}]}],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_topic_metrics,reset,0} => {any,[]},{emqx_conf_proto_v1,get_config,2} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},unknown}]},{emqx_gateway_cm_proto_v1,takeover_session,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_cm_proto_v1,get_chan_stats,2} => {any,[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_topic_metrics_proto_v1,reset,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_mgmt_api_plugins,get_plugins,0} => {{c,tuple,[{c,atom,any,unknown},{c,list,{{c,map,{[],any,any},unknown},{c,nil,[],unknown}},unknown}],{2,any}},[]},{emqx_node_rebalance_proto_v3,disable_rebalance_agent,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,identifier,[pid],unknown}]},{emqx_conf_proto_v3,get_override_config_file,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_management_proto_v2,unsubscribe,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,binary,{8,0},unknown}]},{emqx_exhook_proto_v1,all_servers_info,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_cm_proto_v1,takeover_session,2} => {any,[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_management_proto_v1,subscribe,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,list,{{c,tuple,[{c,binary,{8,0},unknown},{c,map,{[{{c,atom,[nl],unknown},mandatory,{c,number,{int_set,[0,1]},integer}},{{c,atom,[qos],unknown},mandatory,{c,number,{int_set,[0,1,2]},integer}},{{c,atom,[rap],unknown},mandatory,{c,number,{int_set,[0,1]},integer}},{{c,atom,[rh],unknown},mandatory,{c,number,{int_set,[0,1,2]},integer}}],{c,atom,any,unknown},any},unknown}],{2,any}},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_proto_v1,evict_connections,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_conf_proto_v3,get_all,1} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_proto_v1,disconnected_session_counts,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_proto_v2,clean_authz_cache,1} => {any,[{c,atom,any,unknown}]},{emqx_prometheus_proto_v1,stop,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_bridge_proto_v1,list_bridges,1} => {any,[{c,atom,any,unknown}]},{emqx_bridge_proto_v5,v2_start_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_telemetry,get_cluster_uuid,0} => {any,[]},{emqx_proto_v1,deactivate_alarm,2} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_node_rebalance_status,purge_status,0} => {{c,tuple,[{c,atom,any,unknown},{c,union,[{c,atom,[disabled],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[enabled],unknown},{c,map,{[{{c,atom,[current_sessions],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[initial_sessions],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[purge_rate],unknown},mandatory,{c,number,{int_rng,1,pos_inf},integer}}],none,none},unknown}],{2,{c,atom,[enabled],unknown}}},none,none],unknown}],{2,any}},[]},{emqx_resource_proto_v1,create_dry_run,2} => {any,[{c,atom,any,unknown},any]},{emqx_mgmt_data_backup,delete_file,1} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,any,unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,binary,{8,0},unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},none,none],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown}]},{emqx_rule_engine_proto_v1,reset_metrics,1} => {any,[{c,binary,{8,0},unknown}]},{emqx_mgmt_data_backup,list_files,0} => {{c,list,{any,{c,nil,[],unknown}},unknown},[]},{emqx_proto_v1,get_metrics,1} => {any,[{c,atom,any,unknown}]},{emqx_topic_metrics_proto_v1,metrics,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_bridge_v2,start,2} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_delayed_proto_v2,get_delayed_message,2} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown}]},{emqx_management_proto_v1,broker_info,1} => {any,[{c,atom,any,unknown}]},{emqx_connector_proto_v1,lookup_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_mgmt_api_plugins,describe_package,1} => {{c,tuple,[{c,atom,any,unknown},{c,list,{{c,map,{[],any,any},unknown},{c,nil,[],unknown}},unknown}],{2,any}},[{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_delayed_proto_v2,delete_delayed_message,2} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown}]},{emqx_bridge_proto_v6,v2_get_metrics_from_all_nodes_v6,4} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,[actions,sources],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_bridge_proto_v2,start_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_dashboard_proto_v1,current_rate,1} => {any,[{c,atom,any,unknown}]},{emqx_gateway_cm_proto_v1,set_chan_info,4} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown},{c,map,{[],{c,atom,any,unknown},any},unknown}]},{emqx_node_rebalance_proto_v2,available_nodes,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_gateway_cm,do_set_chan_stats,4} => {{c,atom,[false,true],unknown},[any,any,any,any]},{emqx_mgmt_trace_proto_v2,trace_file_detail,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_proto_v1,delete_all_deactivated_alarms,1} => {any,[{c,atom,any,unknown}]},{emqx_connector_proto_v1,start_connector_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_bridge_v2,list,0} => {any,[]},{emqx_node_rebalance_proto_v2,evict_connections,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_conf_proto_v2,update,3} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},any,{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_slow_subs_proto_v1,clear_history,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_delayed,get_delayed_message,1} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,[not_found],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,map,{[{{c,atom,[delayed_interval],unknown},mandatory,any},{{c,atom,[delayed_remaining],unknown},mandatory,{c,number,any,integer}},{{c,atom,[expected_at],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[from_clientid],unknown},mandatory,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[from_username],unknown},mandatory,any},{{c,atom,[msgid],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[node],unknown},mandatory,any},{{c,atom,[payload],unknown},optional,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[publish_at],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[qos],unknown},mandatory,any},{{c,atom,[topic],unknown},mandatory,{c,binary,{8,0},unknown}}],none,none},unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[any]},{emqx_node_rebalance_proto_v2,disable_rebalance_agent,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,identifier,[pid],unknown},any]},{emqx_mgmt,do_unsubscribe,2} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,[channel_not_found],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[unsubscribe],unknown},{c,list,{{c,tuple,[{c,union,[none,{c,binary,{8,0},unknown},none,none,none,none,{c,tuple,[any,any,any],{3,any}},none,none],unknown},{c,map,{[],any,any},unknown}],{2,any}},{c,nil,[],unknown}},nonempty}],{2,{c,atom,[unsubscribe],unknown}}}]}],unknown},[any,any]},{emqx_mgmt_api_plugins_proto_v2,install_package,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,binary,{8,0},unknown}]},{emqx_gateway_cm,do_call,5} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown},any,any]},{emqx_mgmt,do_call_client,2} => {any,[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},any]},{emqx_cm_proto_v2,lookup_client,2} => {any,[{c,atom,any,unknown},{c,tuple_set,[{2,[{c,tuple,[{c,atom,[clientid],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}],{2,{c,atom,[clientid],unknown}}},{c,tuple,[{c,atom,[username],unknown},{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}],{2,{c,atom,[username],unknown}}}]}],unknown}]},{emqx_ds_replication_layer,do_drop_db_v1,1} => {{c,atom,[ok],unknown},[{c,atom,any,unknown}]},{emqx_eviction_agent,all_local_channels_count,0} => {any,[]},{emqx_bridge_proto_v4,restart_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_mgmt_cluster_proto_v3,connected_replicants,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_status_proto_v1,rebalance_status,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_proto_v2,enable_rebalance_agent,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,identifier,[pid],unknown},any]},{emqx_persistent_session_ds_proto_v1,open_iterator,4} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,list,{{c,union,[{c,atom,['','#','+'],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer},any]},{emqx_cm_proto_v2,kickout_client,2} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_proto_v1,clean_authz_cache,2} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_node_rebalance_evacuation_proto_v1,available_nodes,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_mgmt_cluster_proto_v1,invite_node,2} => {any,[{c,atom,any,unknown},{c,atom,any,unknown}]},{emqx_prometheus_proto_v2,stop,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_ds_proto_v2,get_streams,5} => {any,[{c,atom,any,unknown},{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,list,{{c,union,[{c,atom,['','#','+'],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_bridge_proto_v3,start_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_eviction_agent,evict_sessions,3} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,atom,[disabled],unknown}],{2,{c,atom,[error],unknown}}},none,none],unknown},[any,{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},none,none,none,none],unknown},any]},{emqx_gateway_http,gateway_status,1} => {{c,map,{[{{c,atom,[current_connections],unknown},optional,any},{{c,atom,[max_connections],unknown},optional,{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}},{{c,atom,[node],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[status],unknown},mandatory,{c,atom,[running,stopped,unloaded],unknown}}],none,none},unknown},[{c,atom,any,unknown}]},{emqx_bridge_proto_v4,get_metrics_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_mgmt_data_backup_proto_v1,read_file,3} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_cm_proto_v1,get_chan_info,2} => {any,[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_delayed_proto_v3,clear_all,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_proto_v3,enable_rebalance_agent,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,identifier,[pid],unknown},any]},{emqx_conf_proto_v1,update,4} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},any,{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_bridge_proto_v3,list_bridges,1} => {any,[{c,atom,any,unknown}]},{emqx_management_proto_v2,unsubscribe_batch,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,list,{{c,binary,{8,0},unknown},{c,nil,[],unknown}},unknown}]},{emqx_topic_metrics_proto_v1,reset,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,binary,{8,0},unknown}]},{emqx_bridge_proto_v5,lookup_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_conf_proto_v1,get_all,1} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},unknown}]},{emqx_bridge_proto_v3,stop_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_ds_replication_layer,do_store_batch_v1,4} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[3]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,list,{{c,tuple,[{c,atom,[message],unknown},{c,binary,{8,0},unknown},any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,map,{[],any,any},unknown},{c,map,{[],any,any},unknown},{c,binary,{8,0},unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown},{c,number,any,integer},any],{10,{c,atom,[message],unknown}}},{c,nil,[],unknown}},unknown}}],any,any},unknown},{c,map,{[],none,none},unknown}]},{emqx_management_proto_v1,list_subscriptions,1} => {any,[{c,atom,any,unknown}]},{emqx_conf_cli,get_config,1} => {{c,union,[none,none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,list,{{c,number,{int_set,"_defknotuy"},integer},{c,nil,[],unknown}},nonempty}],{2,{c,atom,[error],unknown}}},none,{c,map,{[],any,any},unknown}],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_management_proto_v4,subscribe,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,list,{{c,tuple,[{c,binary,{8,0},unknown},{c,map,{[{{c,atom,[nl],unknown},mandatory,{c,number,{int_set,[0,1]},integer}},{{c,atom,[qos],unknown},mandatory,{c,number,{int_set,[0,1,2]},integer}},{{c,atom,[rap],unknown},mandatory,{c,number,{int_set,[0,1]},integer}},{{c,atom,[rh],unknown},mandatory,{c,number,{int_set,[0,1,2]},integer}}],{c,atom,any,unknown},any},unknown}],{2,any}},{c,nil,[],unknown}},unknown}]},{emqx_trace,trace_file_detail,1} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,map,{[{{c,atom,[file],unknown},mandatory,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[node],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[reason],unknown},mandatory,{c,atom,any,unknown}}],none,none},unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,map,{[{{c,atom,[mtime],unknown},mandatory,{c,union,[{c,atom,[undefined],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},{c,tuple,[{c,tuple,[any,any,any],{3,any}},{c,tuple,[any,any,any],{3,any}}],{2,any}},none,none],unknown}},{{c,atom,[node],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[size],unknown},mandatory,{c,union,[{c,atom,[undefined],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}}],none,none},unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_bridge_proto_v6,v2_start_bridge_on_node_v6,4} => {any,[{c,atom,any,unknown},{c,atom,[actions,sources],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_bridge_proto_v6,stop_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_mgmt_api_trace,get_trace_size,0} => {{c,map,{[],any,any},unknown},[]},{emqx_prometheus_proto_v1,start,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_topic_metrics_proto_v1,metrics,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,binary,{8,0},unknown}]},{emqx_retainer_proto_v2,active_mnesia_indices,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_proto_v3,evict_connections,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_stats,getstats,0} => {{c,list,{{c,tuple,any,{any,any}},{c,nil,[],unknown}},unknown},[]},{emqx_alarm,deactivate,1} => {any,[any]},{emqx_conf_proto_v2,remove_config,3} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_bridge_proto_v3,start_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_license_resources,local_connection_count,0} => {any,[]},{emqx_bridge_resource,start,2} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},none,none],unknown},[any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown}]},{emqx_persistent_session_ds_proto_v1,close_all_iterators,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,binary,{8,0},unknown}]},{emqx_bridge_proto_v2,list_bridges,1} => {any,[{c,atom,any,unknown}]},{emqx_ds_proto_v1,drop_db,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,any,unknown}]},{emqx_management_proto_v3,broker_info,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_proto_v3,session_counts,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_bridge_proto_v5,get_metrics_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_license_proto_v2,remote_connection_counts,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_proto_v1,evict_sessions,4} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer},{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,[connected,connecting,disconnected,idle,reauthenticating],unknown}]},{emqx_broker_proto_v1,forward,3} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,tuple,[{c,atom,[delivery],unknown},{c,identifier,[pid],unknown},{c,tuple,[{c,atom,[message],unknown},{c,binary,{8,0},unknown},any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,map,{[],{c,atom,any,unknown},{c,atom,[false,true],unknown}},unknown},{c,map,{[{{c,atom,[allow_publish],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[peerhost],unknown},optional,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown}},{{c,atom,[properties],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[proto_ver],unknown},optional,{c,union,[none,{c,binary,{8,0},unknown},none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}},{{c,atom,[protocol],unknown},optional,{c,atom,any,unknown}},{{c,atom,[username],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}}],{c,atom,any,unknown},any},unknown},{c,binary,{8,0},unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown},{c,number,any,integer},any],{10,{c,atom,[message],unknown}}}],{3,{c,atom,[delivery],unknown}}}]},{emqx_node_rebalance_proto_v1,connection_counts,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_bridge_proto_v1,stop_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_conf_proto_v3,remove_config,3} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_exhook_proto_v1,server_hooks_metrics,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,binary,{8,0},unknown}]},{emqx_prometheus_api,lookup_from_local_nodes,3} => {any,[{c,union,[{c,atom,any,unknown},none,none,none,none,none,{c,tuple,any,{any,any}},none,none],unknown},{c,atom,any,unknown},{c,list,{any,{c,nil,[],unknown}},unknown}]},{emqx_proto_v2,get_metrics,1} => {any,[{c,atom,any,unknown}]},{emqx_eviction_agent_proto_v2,all_channels_count,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_bridge_proto_v4,stop_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_proto_v2,clean_authz_cache,2} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_bridge_proto_v3,restart_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_bridge_proto_v2,stop_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_conf_proto_v3,update,3} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},any,{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_gateway_cm_proto_v1,call,4} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown},any]},{emqx_prometheus_proto_v2,start,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_proto_v1,session_counts,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_management_proto_v4,call_client,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},any]},{emqx_bridge_proto_v5,restart_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_management_proto_v4,unsubscribe,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,binary,{8,0},unknown}]},{emqx_management_proto_v3,unsubscribe_batch,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,list,{{c,binary,{8,0},unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_status_proto_v2,rebalance_status,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_gateway_http_proto_v1,get_cluster_status,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,any,unknown}]},{emqx_prometheus_proto_v2,raw_prom_data,4} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,any,unknown},{c,atom,any,unknown},{c,list,{any,{c,nil,[],unknown}},unknown}]},{emqx,update_config,3} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,map,{[{{c,atom,[config],unknown},optional,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},none,none,none,{c,map,{[],{c,atom,any,unknown},any},unknown}],unknown}},{{c,atom,[post_config_update],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[raw_config],unknown},optional,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},none,none,none,{c,map,{[],{c,binary,{8,0},unknown},any},unknown}],unknown}}],none,none},unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},any,{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_bridge_v2,start,3} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_management_proto_v4,get_full_config,1} => {any,[{c,atom,any,unknown}]},{emqx_alarm,delete_all_deactivated_alarms,0} => {any,[]},{emqx_management_proto_v3,list_listeners,1} => {any,[{c,atom,any,unknown}]},{emqx_mgmt_api_cluster,join,1} => {{c,union,[{c,atom,[ignore,ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,atom,any,unknown}]},{emqx_cm_proto_v2,takeover_session,2} => {any,[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_bridge_proto_v3,stop_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_broker_proto_v1,forward_async,3} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,tuple,[{c,atom,[delivery],unknown},{c,identifier,[pid],unknown},{c,tuple,[{c,atom,[message],unknown},{c,binary,{8,0},unknown},any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,map,{[],{c,atom,any,unknown},{c,atom,[false,true],unknown}},unknown},{c,map,{[{{c,atom,[allow_publish],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[peerhost],unknown},optional,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown}},{{c,atom,[properties],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[proto_ver],unknown},optional,{c,union,[none,{c,binary,{8,0},unknown},none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}},{{c,atom,[protocol],unknown},optional,{c,atom,any,unknown}},{{c,atom,[username],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}}],{c,atom,any,unknown},any},unknown},{c,binary,{8,0},unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown},{c,number,any,integer},any],{10,{c,atom,[message],unknown}}}],{3,{c,atom,[delivery],unknown}}}]},{emqx_delayed_proto_v3,delete_delayed_messages_by_topic_name,2} => {any,[{c,list,{any,{c,nil,[],unknown}},unknown},{c,binary,{8,0},unknown}]},{emqx_connector,list,0} => {any,[]},{emqx_cm_proto_v2,get_chann_conn_mod,2} => {any,[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_mgmt_api_plugins_proto_v1,ensure_action,2} => {any,[{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,atom,[restart,start,stop],unknown}]},{emqx_prometheus,do_stop,0} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,atom,[not_found,restarting,running,simple_one_for_one],unknown}],{2,{c,atom,[error],unknown}}},none,none],unknown},[]},{emqx_exhook_mgr,server_hooks_metrics,1} => {any,[any]},{emqx_ds_proto_v2,update_iterator,5} => {any,[{c,atom,any,unknown},{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,opaque,[{opaque,emqx_ds_storage_layer,iterator,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[2]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown},{c,binary,{8,0},unknown}]},{emqx_mgmt_api_plugins_proto_v1,describe_package,1} => {any,[{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_bridge_proto_v4,start_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{erlang,send,2} => {any,[any,any]},{emqx_metrics_worker,get_metrics,2} => {{c,map,{[{{c,atom,[counters],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[gauges],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[rate],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[slides],unknown},mandatory,{c,map,{[],any,any},unknown}}],none,none},unknown},[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_node_rebalance_purge_proto_v1,stop,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_mgmt_api_configs,get_full_config,0} => {{c,map,{[],any,any},unknown},[]},{emqx_ft_storage_fs_proxy,list_local,2} => {any,[any,any]},{emqx_ft_storage_fs_reader,read,2} => {any,[{c,identifier,[pid],unknown},{c,number,{int_rng,1,pos_inf},integer}]},{emqx_node_rebalance_status_proto_v2,purge_status,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_api_proto_v2,node_rebalance_start,2} => {any,[{c,atom,any,unknown},{c,map,{[{{c,atom,[abs_conn_threshold],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[abs_sess_threshold],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[conn_evict_rate],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[nodes],unknown},optional,{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}},{{c,atom,[rel_conn_threshold],unknown},optional,{c,number,any,unknown}},{{c,atom,[rel_sess_threshold],unknown},optional,{c,number,any,unknown}},{{c,atom,[sess_evict_rate],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[wait_health_check],unknown},optional,{c,number,any,unknown}},{{c,atom,[wait_takeover],unknown},optional,{c,number,any,unknown}}],none,none},unknown}]},{emqx_delayed_proto_v1,delete_delayed_message,2} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown}]},{emqx_proto_v1,clean_pem_cache,1} => {any,[{c,atom,any,unknown}]},{emqx_connector_api,lookup_from_local_node,2} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,[not_found],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},any],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_proto_v2,get_alarms,2} => {any,[{c,atom,any,unknown},{c,atom,[activated,all,deactivated],unknown}]},{emqx_gateway_cm,do_kick_session,4} => {{c,atom,[ok],unknown},[{c,atom,any,unknown},any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_node_rebalance_api_proto_v1,node_rebalance_evacuation_stop,1} => {any,[{c,atom,any,unknown}]},{emqx_management_proto_v2,subscribe,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,list,{{c,tuple,[{c,binary,{8,0},unknown},{c,map,{[{{c,atom,[nl],unknown},mandatory,{c,number,{int_set,[0,1]},integer}},{{c,atom,[qos],unknown},mandatory,{c,number,{int_set,[0,1,2]},integer}},{{c,atom,[rap],unknown},mandatory,{c,number,{int_set,[0,1]},integer}},{{c,atom,[rh],unknown},mandatory,{c,number,{int_set,[0,1,2]},integer}}],{c,atom,any,unknown},any},unknown}],{2,any}},{c,nil,[],unknown}},unknown}]},{emqx_gateway_cm,do_get_chann_conn_mod,3} => {any,[any,any,any]},{emqx_mgmt_cluster_proto_v3,invite_node,3} => {any,[{c,atom,any,unknown},{c,atom,any,unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_proto_v1,clean_authz_cache,1} => {any,[{c,atom,any,unknown}]},{emqx_bridge_proto_v6,v2_list_bridges_on_nodes_v6,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,[actions,sources],unknown}]},{emqx_authn_proto_v1,lookup_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,any,unknown},{c,binary,{8,0},unknown}]},{emqx_authz_cache,drain_cache,0} => {{c,atom,[ok],unknown},[]},{emqx_ds_proto_v1,store_batch,5} => {any,[{c,atom,any,unknown},{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[3]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,list,{{c,tuple,[{c,atom,[message],unknown},{c,binary,{8,0},unknown},any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,map,{[],{c,atom,any,unknown},{c,atom,[false,true],unknown}},unknown},{c,map,{[{{c,atom,[allow_publish],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[peerhost],unknown},optional,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown}},{{c,atom,[properties],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[proto_ver],unknown},optional,{c,union,[none,{c,binary,{8,0},unknown},none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}},{{c,atom,[protocol],unknown},optional,{c,atom,any,unknown}},{{c,atom,[username],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}}],{c,atom,any,unknown},any},unknown},{c,binary,{8,0},unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown},{c,number,any,integer},any],{10,{c,atom,[message],unknown}}},{c,nil,[],unknown}},unknown}}],none,none},unknown},{c,map,{[],none,none},unknown}]},{emqx_proto_v2,is_running,1} => {any,[{c,atom,any,unknown}]},{emqx_conf_proto_v2,get_config,3} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},unknown},any]},{emqx_mgmt_data_backup_proto_v1,delete_file,3} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_gateway_cm,do_get_chan_info,3} => {{c,union,[{c,atom,[undefined],unknown},none,none,none,none,none,none,none,{c,map,{[{{c,atom,[node],unknown},mandatory,{c,atom,any,unknown}}],any,any},unknown}],unknown},[any,any,any]},{emqx_retainer_dispatcher,wait_dispatch_complete,1} => {{c,atom,[ok],unknown},[any]},{emqx_resource,remove_local,1} => {{c,atom,[ok],unknown},[{c,binary,{8,0},unknown}]},{emqx_shared_sub,do_dispatch_with_ack,4} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,identifier,[pid,port],unknown},any,any,{c,tuple,[{c,atom,[message],unknown},{c,binary,{8,0},unknown},any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,map,{[],{c,atom,any,unknown},{c,atom,[false,true],unknown}},unknown},{c,map,{[{{c,atom,[allow_publish],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[peerhost],unknown},optional,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown}},{{c,atom,[properties],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[proto_ver],unknown},optional,{c,union,[none,{c,binary,{8,0},unknown},none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}},{{c,atom,[protocol],unknown},optional,{c,atom,any,unknown}},{{c,atom,[username],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}}],{c,atom,any,unknown},any},unknown},{c,binary,{8,0},unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown},{c,number,any,integer},any],{10,{c,atom,[message],unknown}}}]},{emqx_node_rebalance_status,local_status,0} => {{c,union,[{c,atom,[disabled],unknown},none,none,none,none,none,{c,tuple_set,[{2,[{c,tuple,[{c,atom,[evacuation],unknown},any],{2,{c,atom,[evacuation],unknown}}},{c,tuple,[{c,atom,[purge],unknown},any],{2,{c,atom,[purge],unknown}}},{c,tuple,[{c,atom,[rebalance],unknown},{c,map,{[],{c,atom,[connection_eviction_rate,connection_goal,coordinator_node,disconnected_session_goal,recipients,session_eviction_rate,state,stats],unknown},any},unknown}],{2,{c,atom,[rebalance],unknown}}}]}],unknown},none,none],unknown},[]},{emqx_ds_replication_layer,do_next_v1,4} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,atom,[end_of_stream],unknown}],{2,{c,atom,[ok],unknown}}}]},{3,[{c,tuple,[{c,atom,[ok],unknown},{c,opaque,[{opaque,emqx_ds_storage_layer,iterator,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[2]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown},{c,list,{{c,tuple,[{c,binary,{8,0},unknown},{c,tuple,[any,any,any,any,any,any,any,any,any,any],{10,any}}],{2,any}},{c,nil,[],unknown}},unknown}],{3,{c,atom,[ok],unknown}}}]}],unknown},[{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,opaque,[{opaque,emqx_ds_storage_layer,iterator,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[2]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown},{c,number,{int_rng,1,pos_inf},integer}]},{emqx_conf_proto_v3,update,4} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},any,{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_node_rebalance_api_proto_v2,node_rebalance_purge_start,2} => {any,[{c,atom,any,unknown},{c,map,{[{{c,atom,[purge_rate],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}}],none,none},unknown}]},{emqx_ds_proto_v1,next,5} => {any,[{c,atom,any,unknown},{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,opaque,[{opaque,emqx_ds_storage_layer,iterator,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[2]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown},{c,number,{int_rng,1,pos_inf},integer}]},{emqx_proto_v1,get_alarms,2} => {any,[{c,atom,any,unknown},{c,atom,[activated,all,deactivated],unknown}]},{emqx_proto_v2,are_running,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_bridge_proto_v1,stop_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_cm_proto_v1,kickout_client,2} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_gateway_cm_proto_v1,cast,4} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown},any]},{emqx_delayed_proto_v3,delete_delayed_message,2} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown}]},{emqx_mgmt_api_plugins_proto_v2,describe_package,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_eviction_agent,purge_sessions,1} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,atom,[disabled],unknown}],{2,{c,atom,[error],unknown}}},none,none],unknown},[any]},{emqx_slow_subs_proto_v1,get_history,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_delayed_proto_v1,get_delayed_message,2} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown}]},{emqx_conf,get_node_and_config,1} => {{c,tuple,[{c,atom,any,unknown},any],{2,any}},[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},unknown}]},{emqx_proto_v1,is_running,1} => {any,[{c,atom,any,unknown}]},{emqx_node_rebalance_proto_v3,available_nodes,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_broker,subscriptions_via_topic,1} => {{c,list,{any,{c,nil,[],unknown}},unknown},[any]},{emqx_persistent_session_ds,do_ensure_iterator_closed,1} => {{c,atom,[ok],unknown},[any]},{emqx_bridge_resource,stop,2} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},none,none],unknown},[any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown}]},{emqx_management_proto_v3,unsubscribe,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,binary,{8,0},unknown}]},{emqx_mgmt_trace_proto_v1,trace_file,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_conf_proto_v1,reset,2} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_proto_v2,get_stats,1} => {any,[{c,atom,any,unknown}]},{emqx_conf_cli,get_config,0} => {{c,map,{[],any,any},unknown},[]},{emqx_ft_storage_exporter_fs_proto_v1,read_export_file,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{{c,union,[{c,atom,any,unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_node_rebalance_proto_v3,purge_sessions,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_management_proto_v2,list_subscriptions,1} => {any,[{c,atom,any,unknown}]},{emqx_telemetry_proto_v1,get_node_uuid,1} => {any,[{c,atom,any,unknown}]},{emqx_broker,subscriptions,1} => {{c,list,{{c,tuple,[any,any],{2,any}},{c,nil,[],unknown}},unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,{c,identifier,[pid],unknown},none,none,none,none,none],unknown}]},{emqx_slow_subs_api,get_history,0} => {{c,list,{{c,map,{[{{c,atom,[clientid],unknown},mandatory,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[last_update_time],unknown},mandatory,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[node],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[timespan],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[topic],unknown},mandatory,{c,binary,{8,0},unknown}}],none,none},unknown},{c,nil,[],unknown}},unknown},[]},{emqx_gateway_cm_proto_v1,lookup_by_clientid,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_ds_replication_layer,do_get_streams_v1,4} => {{c,list,{{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,opaque,[{opaque,emqx_ds_storage_layer,stream,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[1]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown}],{2,any}},{c,nil,[],unknown}},unknown},[{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,list,{{c,union,[{c,atom,['','#','+'],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_plugins,get_tar,1} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,binary,{8,0},unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown}]},{emqx_conf_proto_v1,get_config,3} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},unknown},any]},{emqx_connector_proto_v1,start_connectors_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_conf_proto_v3,get_config,3} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},unknown},any]},{emqx_prometheus,do_start,0} => {{c,atom,[ok],unknown},[]},{emqx_gateway_cm_proto_v1,get_chan_stats,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_node_rebalance_api_proto_v2,node_rebalance_stop,1} => {any,[{c,atom,any,unknown}]},{emqx_node_rebalance_status_proto_v1,evacuation_status,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_topic_metrics,metrics,1} => {{c,union,[none,none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,atom,[topic_not_found],unknown}],{2,{c,atom,[error],unknown}}},none,{c,map,{[{{c,atom,[create_time],unknown},mandatory,any},{{c,atom,[metrics],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[reset_time],unknown},optional,any},{{c,atom,[topic],unknown},mandatory,any}],none,none},unknown}],unknown},[any]},{emqx_cm_proto_v1,kick_session,3} => {any,[{c,atom,[discard,kick],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_broker_proto_v1,list_subscriptions_via_topic,2} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown}]},{emqx_node_rebalance_proto_v2,connection_counts,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_metrics_proto_v1,get_metrics,4} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_management_proto_v2,get_full_config,1} => {any,[{c,atom,any,unknown}]},{emqx_connector_resource,start,2} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown}]},{emqx_bridge_proto_v1,restart_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_cm,takeover_finish,2} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,[noproc,timeout,unexpected_exception],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},any],{2,{c,atom,[ok],unknown}}}]}],unknown},none,none],unknown},[{c,atom,any,unknown},{c,identifier,[pid],unknown}]},{emqx_bridge_proto_v6,v2_lookup_from_all_nodes_v6,4} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,[actions,sources],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_gateway_cm_proto_v1,get_chan_info,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_retainer_proto_v2,wait_dispatch_complete,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_bridge_proto_v4,stop_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_mgmt_api_plugins_proto_v2,delete_package,1} => {any,[{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_node_rebalance_proto_v1,available_nodes,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_mgmt_api_plugins_proto_v2,ensure_action,2} => {any,[{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,atom,[restart,start,stop],unknown}]},{emqx_node_rebalance_api_proto_v1,node_rebalance_evacuation_start,2} => {any,[{c,atom,any,unknown},{c,map,{[{{c,atom,[conn_evict_rate],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[migrate_to],unknown},optional,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[server_reference],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[sess_evict_rate],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[wait_health_check],unknown},optional,{c,number,any,unknown}},{{c,atom,[wait_takeover],unknown},optional,{c,number,any,unknown}}],none,none},unknown}]},{emqx_cm,lookup_client,1} => {{c,list,{any,{c,nil,[],unknown}},unknown},[{c,tuple_set,[{2,[{c,tuple,[{c,atom,[clientid],unknown},any],{2,{c,atom,[clientid],unknown}}},{c,tuple,[{c,atom,[username],unknown},any],{2,{c,atom,[username],unknown}}}]}],unknown}]},{emqx_bridge_proto_v6,start_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_proto_v2,clean_pem_cache,1} => {any,[{c,atom,any,unknown}]},{emqx_dashboard_monitor,do_sample,2} => {any,[{c,atom,any,unknown},any]},{emqx_broker,dispatch,2} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,[no_subscribers,not_running],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,number,{int_rng,0,pos_inf},integer}],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,binary,{8,0},unknown},{c,tuple,[{c,atom,[delivery],unknown},{c,identifier,[pid],unknown},{c,tuple,[{c,atom,[message],unknown},{c,binary,{8,0},unknown},any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,map,{[],{c,atom,any,unknown},{c,atom,[false,true],unknown}},unknown},{c,map,{[{{c,atom,[allow_publish],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[peerhost],unknown},optional,{c,tuple_set,[{4,[{c,tuple,[any,any,any,any],{4,any}}]},{8,[{c,tuple,[any,any,any,any,any,any,any,any],{8,any}}]}],unknown}},{{c,atom,[properties],unknown},optional,{c,map,{[],any,any},unknown}},{{c,atom,[proto_ver],unknown},optional,{c,union,[none,{c,binary,{8,0},unknown},none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}},{{c,atom,[protocol],unknown},optional,{c,atom,any,unknown}},{{c,atom,[username],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}}],{c,atom,any,unknown},any},unknown},{c,binary,{8,0},unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown},{c,number,any,integer},any],{10,{c,atom,[message],unknown}}}],{3,{c,atom,[delivery],unknown}}}]},{emqx_gateway_cm,do_get_chan_stats,3} => {any,[any,any,any]},{emqx_bridge_proto_v4,list_bridges_on_nodes,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance,is_node_available,0} => {{c,atom,any,unknown},[]},{emqx_cm_proto_v2,get_chan_info,2} => {any,[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_conf_proto_v1,reset,3} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_management_proto_v1,node_info,1} => {any,[{c,atom,any,unknown}]},{emqx_resource,reset_metrics_local,1} => {{c,atom,[ok],unknown},[{c,binary,{8,0},unknown}]},{emqx_mgmt,do_subscribe,2} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,[channel_not_found],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[subscribe],unknown},any],{2,{c,atom,[subscribe],unknown}}}]}],unknown},[any,any]},{emqx_mgmt_trace_proto_v1,read_trace_file,4} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,number,{int_rng,0,pos_inf},integer},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_ft_storage_fs_proto_v1,multilist,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,tuple,[{c,binary,{8,0},unknown},{c,binary,{8,0},unknown}],{2,any}},{c,atom,[fragment,result],unknown}]},{emqx,is_running,0} => {{c,atom,[false,true],unknown},[]},{emqx_eviction_agent_proto_v1,evict_session_channel,4} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,map,{[{{c,atom,[clean_start],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[clientid],unknown},optional,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[conn_mod],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[conn_props],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[connected],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[connected_at],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[disconnected_at],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[expiry_interval],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[keepalive],unknown},optional,{c,number,{int_rng,0,1114111},integer}},{{c,atom,[peercert],unknown},optional,{c,union,[{c,atom,[nossl,undefined],unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[{c,atom,any,unknown},none,none,none,none,none,{c,tuple,any,{any,any}},none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[peername],unknown},mandatory,{c,tuple,[{c,union,[{c,atom,[local,undefined,unspec],unknown},none,none,none,none,none,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown},none,none],unknown},any],{2,any}}},{{c,atom,[proto_name],unknown},optional,{c,binary,{8,0},unknown}},{{c,atom,[proto_ver],unknown},optional,{c,union,[none,{c,binary,{8,0},unknown},none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}},{{c,atom,[receive_maximum],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[sockname],unknown},mandatory,{c,tuple,[{c,union,[{c,atom,[local,undefined,unspec],unknown},none,none,none,none,none,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown},none,none],unknown},any],{2,any}}},{{c,atom,[socktype],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[username],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}}],{c,atom,any,unknown},any},unknown},{c,map,{[{{c,atom,[anonymous],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[auth_result],unknown},optional,{c,atom,[bad_authentication_method,bad_clientid_or_password,bad_username_or_password,banned,client_identifier_not_valid,not_authorized,server_busy,server_unavailable,success],unknown}},{{c,atom,[clientid],unknown},mandatory,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[cn],unknown},optional,{c,binary,{8,0},unknown}},{{c,atom,[dn],unknown},optional,{c,binary,{8,0},unknown}},{{c,atom,[is_bridge],unknown},mandatory,{c,atom,[false,true],unknown}},{{c,atom,[is_superuser],unknown},mandatory,{c,atom,[false,true],unknown}},{{c,atom,[mountpoint],unknown},mandatory,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[password],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[peerhost],unknown},mandatory,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown}},{{c,atom,[protocol],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[sockport],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[username],unknown},mandatory,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[ws_cookie],unknown},optional,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[zone],unknown},mandatory,{c,atom,any,unknown}}],{c,atom,any,unknown},any},unknown}]},{emqx_resource_proto_v1,create,5} => {any,[{c,binary,{8,0},unknown},{c,binary,{8,0},unknown},{c,atom,any,unknown},any,{c,map,{[{{c,atom,[auto_restart_interval],unknown},optional,{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,1,pos_inf},integer},none,none,none],unknown}},{{c,atom,[auto_retry_interval],unknown},optional,{c,number,any,integer}},{{c,atom,[batch_size],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[batch_time],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[health_check_interval],unknown},optional,{c,number,any,integer}},{{c,atom,[health_check_timeout],unknown},optional,{c,number,any,integer}},{{c,atom,[inflight_window],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[max_buffer_bytes],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[query_mode],unknown},optional,{c,atom,[async,no_queries,simple_async,simple_async_internal_buffer,simple_sync,simple_sync_internal_buffer,sync],unknown}},{{c,atom,[resume_interval],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[spawn_buffer_workers],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[start_after_created],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[start_timeout],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[wait_for_resource_ready],unknown},optional,{c,number,any,integer}},{{c,atom,[worker_pool_size],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}}],none,none},unknown}]},{emqx_ds_proto_v2,next,5} => {any,[{c,atom,any,unknown},{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,opaque,[{opaque,emqx_ds_storage_layer,iterator,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[2]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown},{c,number,{int_rng,1,pos_inf},integer}]},{emqx_cm_proto_v1,lookup_client,2} => {any,[{c,atom,any,unknown},{c,tuple_set,[{2,[{c,tuple,[{c,atom,[clientid],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}],{2,{c,atom,[clientid],unknown}}},{c,tuple,[{c,atom,[username],unknown},{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}],{2,{c,atom,[username],unknown}}}]}],unknown}]},{emqx_node_rebalance_proto_v3,evict_sessions,4} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer},{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,[connected,connecting,disconnected,idle,reauthenticating],unknown}]},{emqx_bridge_resource,restart,2} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},none,none],unknown},[any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown}]},{emqx_node_rebalance,start,1} => {any,[{c,map,{[],any,any},unknown}]},{emqx_bridge_proto_v6,lookup_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_mgmt_trace_proto_v2,read_trace_file,4} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,number,{int_rng,0,pos_inf},integer},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_mgmt_api_plugins_proto_v1,get_plugins,0} => {any,[]},{emqx_gateway_cm,do_cast,4} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown},any]},{emqx_bridge_proto_v2,lookup_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_ds_proto_v1,get_streams,5} => {any,[{c,atom,any,unknown},{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,list,{{c,union,[{c,atom,['','#','+'],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_conf_proto_v1,remove_config,2} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_bridge_proto_v5,v2_start_bridge_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_conf_proto_v2,sync_data_from_node,1} => {any,[{c,atom,any,unknown}]},{emqx_node_rebalance,session_count,0} => {{c,tuple,[{c,atom,[ok],unknown},any],{2,{c,atom,[ok],unknown}}},[]},{emqx_rule_engine,reset_metrics_for_rule,1} => {{c,atom,[ok],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_resource,recreate_local,4} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,[not_found,updating_to_incorrect_resource_type],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,map,{[{{c,atom,[added_channels],unknown},mandatory,any},{{c,atom,[callback_mode],unknown},mandatory,{c,atom,[always_sync,async_if_possible],unknown}},{{c,atom,[config],unknown},mandatory,any},{{c,atom,[error],unknown},mandatory,any},{{c,atom,[id],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[mod],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[query_mode],unknown},mandatory,{c,atom,[async,no_queries,simple_async,simple_async_internal_buffer,simple_sync,simple_sync_internal_buffer,sync],unknown}},{{c,atom,[state],unknown},mandatory,any},{{c,atom,[status],unknown},mandatory,{c,atom,[connected,connecting,disconnected,stopped],unknown}}],none,none},unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,binary,{8,0},unknown},{c,atom,any,unknown},any,{c,map,{[{{c,atom,[auto_restart_interval],unknown},optional,{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,1,pos_inf},integer},none,none,none],unknown}},{{c,atom,[auto_retry_interval],unknown},optional,{c,number,any,integer}},{{c,atom,[batch_size],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[batch_time],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[health_check_interval],unknown},optional,{c,number,any,integer}},{{c,atom,[health_check_timeout],unknown},optional,{c,number,any,integer}},{{c,atom,[inflight_window],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[max_buffer_bytes],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[query_mode],unknown},optional,{c,atom,[async,no_queries,simple_async,simple_async_internal_buffer,simple_sync,simple_sync_internal_buffer,sync],unknown}},{{c,atom,[resume_interval],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[spawn_buffer_workers],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[start_after_created],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[start_timeout],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[wait_for_resource_ready],unknown},optional,{c,number,any,integer}},{{c,atom,[worker_pool_size],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}}],none,none},unknown}]},{emqx_delayed_proto_v3,get_delayed_message,2} => {any,[{c,atom,any,unknown},{c,binary,{8,0},unknown}]},{emqx_node_rebalance_proto_v3,connection_counts,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_proto_v2,enable_rebalance_agent,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,identifier,[pid],unknown}]},{emqx_node_rebalance_proto_v3,disable_rebalance_agent,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,identifier,[pid],unknown},any]},{emqx_mgmt,node_info,0} => {{c,map,{[{{c,atom,[connections],unknown},mandatory,any},{{c,atom,[edition],unknown},mandatory,{c,binary,{0,80},unknown}},{{c,atom,[live_connections],unknown},mandatory,any},{{c,atom,[load1],unknown},optional,{c,number,any,float}},{{c,atom,[load15],unknown},optional,{c,number,any,float}},{{c,atom,[load5],unknown},optional,{c,number,any,float}},{{c,atom,[log_path],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[max_fds],unknown},mandatory,any},{{c,atom,[memory_total],unknown},mandatory,{c,number,any,unknown}},{{c,atom,[memory_used],unknown},mandatory,{c,number,any,integer}},{{c,atom,[node],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[node_status],unknown},mandatory,{c,atom,[running],unknown}},{{c,atom,[otp_release],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[process_available],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[process_used],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[role],unknown},mandatory,{c,atom,[core,replicant],unknown}},{{c,atom,[sys_path],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[uptime],unknown},mandatory,any},{{c,atom,[version],unknown},mandatory,{c,binary,{8,0},unknown}}],none,none},unknown},[]},{emqx_ft_storage_fs_proxy,lookup_local_assembler,1} => {any,[any]},{emqx_cm_proto_v2,get_chan_stats,2} => {any,[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_bridge_api,lookup_from_local_node,2} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,[not_bridge_v1_compatible,not_found],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},any],{2,{c,atom,[ok],unknown}}}]},{3,[{c,tuple,[{c,atom,[ok],unknown},{c,atom,[actions,sources],unknown},{c,map,{[{{c,atom,[error],unknown},mandatory,{c,union,[none,{c,binary,{8,0},unknown},none,none,none,none,{c,tuple_set,[{3,[{c,tuple,[{c,atom,[error],unknown},{c,binary,{8,0},unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown}],{3,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[incomplete],unknown},{c,binary,{8,0},unknown},{c,binary,{8,0},unknown}],{3,{c,atom,[incomplete],unknown}}}]}],unknown},none,none],unknown}},{{c,atom,[name],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[raw_config],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[resource_data],unknown},mandatory,{c,map,{[{{c,atom,[added_channels],unknown},optional,any},{{c,atom,[callback_mode],unknown},optional,{c,atom,[always_sync,async_if_possible],unknown}},{{c,atom,[config],unknown},optional,any},{{c,atom,[error],unknown},optional,any},{{c,atom,[id],unknown},optional,{c,binary,{8,0},unknown}},{{c,atom,[mod],unknown},optional,{c,atom,any,unknown}},{{c,atom,[query_mode],unknown},optional,{c,atom,[async,no_queries,simple_async,simple_async_internal_buffer,simple_sync,simple_sync_internal_buffer,sync],unknown}},{{c,atom,[state],unknown},optional,any},{{c,atom,[status],unknown},optional,{c,atom,[connected,connecting,disconnected,stopped],unknown}}],none,none},unknown}},{{c,atom,[status],unknown},mandatory,{c,atom,[connected,connecting,disconnected,stopped],unknown}},{{c,atom,[type],unknown},mandatory,{c,binary,{8,0},unknown}}],none,none},unknown}],{3,{c,atom,[ok],unknown}}}]}],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_ds_replication_layer,do_update_iterator_v2,4} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,opaque,[{opaque,emqx_ds_storage_layer,iterator,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[2]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,opaque,[{opaque,emqx_ds_storage_layer,iterator,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[2]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown},{c,binary,{8,0},unknown}]},{emqx_node_rebalance_evacuation,start,1} => {any,[{c,map,{[],any,any},unknown}]},{emqx_node_rebalance_proto_v1,enable_rebalance_agent,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,identifier,[pid],unknown}]},{emqx_metrics,all,0} => {{c,list,{{c,tuple,[any,{c,number,any,integer}],{2,any}},{c,nil,[],unknown}},unknown},[]},{emqx_bridge_proto_v5,v2_lookup_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_ft_storage_fs_proto_v1,list_assemblers,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,tuple,[{c,binary,{8,0},unknown},{c,binary,{8,0},unknown}],{2,any}}]},{emqx_node_rebalance_agent,disable,2} => {any,[any,any]},{emqx_bridge_proto_v2,restart_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_authn_api,lookup_from_local_node,2} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,tuple,[{c,atom,any,unknown},{c,union,[{c,atom,[not_found_resource],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}],{2,any}}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,tuple,[{c,atom,any,unknown},{c,atom,[connected,connecting,disconnected,stopped],unknown},{c,map,{[{{c,atom,[counters],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[gauges],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[rate],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[slides],unknown},mandatory,{c,map,{[],any,any},unknown}}],none,none},unknown},{c,map,{[{{c,atom,[counters],unknown},optional,{c,map,{[],any,any},unknown}},{{c,atom,[gauges],unknown},optional,{c,map,{[],any,any},unknown}},{{c,atom,[rate],unknown},optional,{c,map,{[],any,any},unknown}},{{c,atom,[slides],unknown},optional,{c,map,{[],any,any},unknown}}],none,none},unknown}],{4,any}}],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,atom,any,unknown},{c,binary,{8,0},unknown}]},{emqx_bridge_proto_v4,start_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_conf_proto_v1,remove_config,3} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_management_proto_v4,broker_info,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_conf_proto_v2,reset,2} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_eviction_agent,evict_connections,1} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,atom,[disabled],unknown}],{2,{c,atom,[error],unknown}}},none,none],unknown},[any]},{emqx_bridge_proto_v3,lookup_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_conf_proto_v2,get_config,2} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_purge_proto_v1,start,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,map,{[{{c,atom,[purge_rate],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}}],none,none},unknown}]},{emqx_eviction_agent,evict_session_channel,3} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,union,[{c,atom,[undefined],unknown},none,none,{c,identifier,[pid],unknown},none,none,none,none,none],unknown}],{2,{c,atom,[ok],unknown}}}]},{3,[{c,tuple,[{c,atom,[ok],unknown},{c,union,[{c,atom,[undefined],unknown},none,none,{c,identifier,[pid],unknown},none,none,none,none,none],unknown},any],{3,{c,atom,[ok],unknown}}}]}],unknown},[any,{c,map,{[{{c,atom,[clean_start],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[clientid],unknown},optional,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[conn_mod],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[conn_props],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[connected],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[connected_at],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[disconnected_at],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[expiry_interval],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[keepalive],unknown},optional,{c,number,{int_rng,0,1114111},integer}},{{c,atom,[peercert],unknown},optional,{c,union,[{c,atom,[nossl,undefined],unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[{c,atom,any,unknown},none,none,none,none,none,{c,tuple,any,{any,any}},none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[peername],unknown},mandatory,{c,tuple,[{c,union,[{c,atom,[local,undefined,unspec],unknown},none,none,none,none,none,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown},none,none],unknown},any],{2,any}}},{{c,atom,[proto_name],unknown},optional,{c,binary,{8,0},unknown}},{{c,atom,[proto_ver],unknown},optional,{c,union,[none,{c,binary,{8,0},unknown},none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}},{{c,atom,[receive_maximum],unknown},optional,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[sockname],unknown},mandatory,{c,tuple,[{c,union,[{c,atom,[local,undefined,unspec],unknown},none,none,none,none,none,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown},none,none],unknown},any],{2,any}}},{{c,atom,[socktype],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[username],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}}],{c,atom,any,unknown},any},unknown},{c,map,{[{{c,atom,[anonymous],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[auth_result],unknown},optional,{c,atom,[bad_authentication_method,bad_clientid_or_password,bad_username_or_password,banned,client_identifier_not_valid,not_authorized,server_busy,server_unavailable,success],unknown}},{{c,atom,[clientid],unknown},mandatory,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[cn],unknown},optional,{c,binary,{8,0},unknown}},{{c,atom,[dn],unknown},optional,{c,binary,{8,0},unknown}},{{c,atom,[is_bridge],unknown},mandatory,{c,atom,[false,true],unknown}},{{c,atom,[is_superuser],unknown},mandatory,{c,atom,[false,true],unknown}},{{c,atom,[mountpoint],unknown},mandatory,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[password],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[peerhost],unknown},mandatory,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown}},{{c,atom,[protocol],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[sockport],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[username],unknown},mandatory,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}},{{c,atom,[ws_cookie],unknown},optional,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},none,none,none,none],unknown}},{{c,atom,[zone],unknown},mandatory,{c,atom,any,unknown}}],{c,atom,any,unknown},any},unknown}]},{emqx_management_proto_v1,list_listeners,1} => {any,[{c,atom,any,unknown}]},{emqx_node_rebalance_agent,enable,2} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,atom,[eviction_agent_busy,invalid_coordinator],unknown}],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,identifier,[pid],unknown},any]},{emqx_mgmt_cluster_proto_v2,connected_replicants,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_bridge_proto_v5,restart_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_bridge_proto_v2,start_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_shared_sub_proto_v1,send,4} => {any,[{c,atom,any,unknown},{c,identifier,[pid],unknown},{c,binary,{8,0},unknown},any]},{emqx_mgmt,do_unsubscribe_batch,2} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,[channel_not_found],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[unsubscribe],unknown},{c,list,{{c,tuple,[any,any],{2,any}},{c,nil,[],unknown}},unknown}],{2,{c,atom,[unsubscribe],unknown}}}]}],unknown},[any,any]},{emqx_management_proto_v4,unsubscribe_batch,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,list,{{c,binary,{8,0},unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_api_proto_v1,node_rebalance_start,2} => {any,[{c,atom,any,unknown},{c,map,{[{{c,atom,[abs_conn_threshold],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[abs_sess_threshold],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[conn_evict_rate],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[nodes],unknown},optional,{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}},{{c,atom,[rel_conn_threshold],unknown},optional,{c,number,any,unknown}},{{c,atom,[rel_sess_threshold],unknown},optional,{c,number,any,unknown}},{{c,atom,[sess_evict_rate],unknown},optional,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[wait_health_check],unknown},optional,{c,number,any,unknown}},{{c,atom,[wait_takeover],unknown},optional,{c,number,any,unknown}}],none,none},unknown}]},{emqx_bridge_v2_api,lookup_from_local_node,2} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},{c,atom,[not_found],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},any],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_node_rebalance_purge,start,1} => {any,[{c,map,{[],any,any},unknown}]},{emqx_bridge_proto_v6,get_metrics_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_management_proto_v3,call_client,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},any]},{emqx_node_rebalance,disconnected_session_count,0} => {{c,tuple,[{c,atom,[ok],unknown},any],{2,{c,atom,[ok],unknown}}},[]},{emqx_bridge_proto_v2,restart_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_mgmt_trace_proto_v2,get_trace_size,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_cm_proto_v2,takeover_finish,2} => {any,[{c,atom,any,unknown},{c,identifier,[pid],unknown}]},{emqx_conf_proto_v1,get_override_config_file,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_proto_v3,enable_rebalance_agent,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,identifier,[pid],unknown}]},{emqx_conf_proto_v2,update,4} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},any,{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_ds_proto_v2,make_iterator,6} => {any,[{c,atom,any,unknown},{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,opaque,[{opaque,emqx_ds_storage_layer,stream,0,{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[1]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,number,{int_rng,0,1114111},integer}},{{c,number,{int_set,[3]},integer},mandatory,any}],none,none},unknown}}],unknown},{c,list,{{c,union,[{c,atom,['','#','+'],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,pos_inf},integer}]},{emqx_exhook_proto_v1,server_info,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,binary,{8,0},unknown}]},{emqx_conf_proto_v3,reset,3} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_delayed,do_delete_delayed_messages_by_topic_name,1} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,atom,[not_found],unknown}],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,binary,{8,0},unknown}]},{emqx_node_rebalance_purge,stop,0} => {any,[]},{emqx_mgmt_api_trace,read_trace_file,3} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[eof],unknown},{c,union,[{c,atom,[undefined],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}],{2,{c,atom,[eof],unknown}}},{c,tuple,[{c,atom,[error],unknown},{c,union,[{c,atom,any,unknown},none,none,none,none,none,{c,tuple,[{c,atom,[no_translation],unknown},{c,atom,[unicode],unknown},{c,atom,[latin1],unknown}],{3,{c,atom,[no_translation],unknown}}},none,none],unknown}],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,binary,{8,0},unknown},any,any]},{emqx_mgmt_api_plugins_proto_v2,get_plugins,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_conf_proto_v3,get_hocon_config,1} => {any,[{c,atom,any,unknown}]},{emqx_proto_v2,deactivate_alarm,2} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_delayed_proto_v2,clear_all,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_cm,do_get_chan_info,2} => {any,[any,any]},{emqx_node_rebalance_agent,enable,1} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,atom,[eviction_agent_busy,invalid_coordinator],unknown}],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,identifier,[pid],unknown}]},{emqx_mgmt,broker_info,0} => {{c,map,{[{{c,atom,[node],unknown},mandatory,{c,atom,any,unknown}},{{c,atom,[node_status],unknown},mandatory,{c,atom,[running],unknown}},{{c,atom,[otp_release],unknown},mandatory,{c,binary,{8,0},unknown}}],any,any},unknown},[]},{emqx_telemetry,get_node_uuid,0} => {any,[]},{emqx_management_proto_v1,get_full_config,1} => {any,[{c,atom,any,unknown}]},{emqx_bridge_proto_v3,list_bridges_on_nodes,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_api_proto_v2,node_rebalance_evacuation_stop,1} => {any,[{c,atom,any,unknown}]},{emqx_gateway_cm_proto_v1,call,5} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown},any,{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_mgmt_data_backup,maybe_copy_and_import,2} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[bardrpc],unknown},any],{2,{c,atom,[bardrpc],unknown}}},{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,map,{[{{c,atom,[config_errors],unknown},mandatory,{c,map,{[],{c,list,{any,{c,nil,[],unknown}},unknown},{c,tuple,[any,any],{2,any}}},unknown}},{{c,atom,[db_errors],unknown},mandatory,{c,map,{[],{c,atom,any,unknown},{c,tuple,[any,any],{2,any}}},unknown}}],none,none},unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_license_proto_v1,remote_connection_counts,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_cm,kick_session,1} => {{c,atom,[ok],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_bridge_proto_v5,start_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_conf_proto_v1,update,3} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},any,{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_ds_replication_layer,do_add_generation_v2,1} => {{c,atom,[ok],unknown},[{c,atom,any,unknown}]},{emqx_bridge_v2_api,get_metrics_from_local_node_v6,3} => {{c,map,{[{{c,atom,[dropped],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.expired'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.other'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.queue_full'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.resource_not_found'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.resource_stopped'],unknown},mandatory,{c,number,any,integer}},{{c,atom,[failed],unknown},mandatory,{c,number,any,integer}},{{c,atom,[inflight],unknown},mandatory,any},{{c,atom,[late_reply],unknown},mandatory,{c,number,any,integer}},{{c,atom,[matched],unknown},mandatory,{c,number,any,integer}},{{c,atom,[queuing],unknown},mandatory,any},{{c,atom,[rate],unknown},mandatory,any},{{c,atom,[rate_last5m],unknown},mandatory,any},{{c,atom,[rate_max],unknown},mandatory,any},{{c,atom,[received],unknown},mandatory,{c,number,any,integer}},{{c,atom,[retried],unknown},mandatory,{c,number,any,integer}},{{c,atom,[success],unknown},mandatory,{c,number,any,integer}}],none,none},unknown},[{c,atom,[actions,sources],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_ds_proto_v2,drop_db,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,any,unknown}]},{emqx,remove_config,2} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,map,{[{{c,atom,[config],unknown},optional,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},none,none,none,{c,map,{[],{c,atom,any,unknown},any},unknown}],unknown}},{{c,atom,[post_config_update],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[raw_config],unknown},optional,{c,union,[{c,atom,[undefined],unknown},none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},none,none,none,{c,map,{[],{c,binary,{8,0},unknown},any},unknown}],unknown}}],none,none},unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_ds_proto_v2,add_generation,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,any,unknown}]},{emqx_node_rebalance_evacuation,is_node_available,0} => {{c,atom,any,unknown},[]},{emqx_node_rebalance_agent,disable,1} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,atom,[already_disabled,invalid_coordinator],unknown}],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,identifier,[pid],unknown}]},{emqx_node_rebalance_proto_v3,enable_rebalance_agent,4} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,identifier,[pid],unknown},any,{c,map,{[{{c,atom,[allow_connections],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_plugins_proto_v1,get_tar,3} => {any,[{c,atom,any,unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,1114111},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}]},{emqx_mgmt_api_plugins,install_package,2} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,map,{[],any,any},unknown}],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,none,none,none,{c,list,{any,{c,nil,[],unknown}},unknown},{c,number,{int_rng,0,1114111},integer},none,none,none],unknown},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown}]},{emqx_ds_proto_v2,store_batch,5} => {any,[{c,atom,any,unknown},{c,atom,any,unknown},{c,binary,{8,0},unknown},{c,map,{[{{c,number,{int_set,[1]},integer},mandatory,{c,number,{int_set,[3]},integer}},{{c,number,{int_set,[2]},integer},mandatory,{c,list,{{c,tuple,[{c,atom,[message],unknown},{c,binary,{8,0},unknown},any,{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,map,{[],{c,atom,any,unknown},{c,atom,[false,true],unknown}},unknown},{c,map,{[{{c,atom,[allow_publish],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[peerhost],unknown},optional,{c,tuple_set,[{4,[{c,tuple,[{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer},{c,number,{int_rng,0,255},integer}],{4,any}}]},{8,[{c,tuple,[{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer},{c,number,{int_rng,0,1114111},integer}],{8,any}}]}],unknown}},{{c,atom,[properties],unknown},optional,{c,map,{[],{c,atom,any,unknown},any},unknown}},{{c,atom,[proto_ver],unknown},optional,{c,union,[none,{c,binary,{8,0},unknown},none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown}},{{c,atom,[protocol],unknown},optional,{c,atom,any,unknown}},{{c,atom,[username],unknown},optional,{c,union,[{c,atom,[undefined],unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}}],{c,atom,any,unknown},any},unknown},{c,binary,{8,0},unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown},{c,number,any,integer},any],{10,{c,atom,[message],unknown}}},{c,nil,[],unknown}},unknown}}],none,none},unknown},{c,map,{[],none,none},unknown}]},{emqx_bridge,list,0} => {{c,list,{any,{c,nil,[],unknown}},unknown},[]},{emqx_mgmt,do_kickout_clients,1} => {{c,atom,[ok],unknown},[{c,list,{any,{c,nil,[],unknown}},unknown}]},{emqx_bridge_proto_v5,list_bridges_on_nodes,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_bridge_proto_v6,restart_bridge_to_node,3} => {any,[{c,atom,any,unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_conf_proto_v3,sync_data_from_node,1} => {any,[{c,atom,any,unknown}]},{emqx_mgmt_cluster_proto_v2,invite_node,2} => {any,[{c,atom,any,unknown},{c,atom,any,unknown}]},{emqx_conf_proto_v3,get_config,2} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_evacuation,stop,0} => {any,[]},{emqx_management_proto_v4,list_subscriptions,1} => {any,[{c,atom,any,unknown}]},{emqx_node_rebalance,stop,0} => {any,[]},{emqx_bridge_proto_v4,lookup_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_node_rebalance_status_proto_v1,local_status,1} => {any,[{c,atom,any,unknown}]},{emqx_node_rebalance_proto_v2,session_counts,1} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown}]},{emqx_node_rebalance_status_proto_v2,local_status,1} => {any,[{c,atom,any,unknown}]},{emqx_bridge_proto_v6,v2_start_bridge_on_all_nodes_v6,4} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,atom,[actions,sources],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_bridge_proto_v1,lookup_from_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_gateway_cm_proto_v1,kick_session,4} => {any,[{c,atom,any,unknown},{c,atom,[discard,kick],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,identifier,[pid],unknown}]},{emqx_conf_proto_v2,remove_config,2} => {any,[{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_bridge_api,get_metrics_from_local_node,2} => {{c,map,{[{{c,atom,[dropped],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.expired'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.other'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.queue_full'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.resource_not_found'],unknown},mandatory,{c,number,any,integer}},{{c,atom,['dropped.resource_stopped'],unknown},mandatory,{c,number,any,integer}},{{c,atom,[failed],unknown},mandatory,{c,number,any,integer}},{{c,atom,[inflight],unknown},mandatory,any},{{c,atom,[late_reply],unknown},mandatory,{c,number,any,integer}},{{c,atom,[matched],unknown},mandatory,{c,number,any,integer}},{{c,atom,[queuing],unknown},mandatory,any},{{c,atom,[rate],unknown},mandatory,any},{{c,atom,[rate_last5m],unknown},mandatory,any},{{c,atom,[rate_max],unknown},mandatory,any},{{c,atom,[received],unknown},mandatory,{c,number,any,integer}},{{c,atom,[retried],unknown},mandatory,{c,number,any,integer}},{{c,atom,[success],unknown},mandatory,{c,number,any,integer}}],none,none},unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,list,{any,{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},{c,number,{int_rng,0,255},integer},none,none,none],unknown},{c,union,[none,{c,binary,{8,0},unknown},none,none,{c,nil,[],unknown},none,none,none,none],unknown}},unknown},none,none,none,none],unknown}]},{emqx_authz_cache,drain_cache,1} => {{c,union,[{c,atom,[ok],unknown},none,none,none,none,none,{c,tuple,[{c,atom,[error],unknown},{c,atom,[not_found],unknown}],{2,{c,atom,[error],unknown}}},none,none],unknown},[{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown}]},{emqx_node_rebalance_proto_v1,disable_rebalance_agent,2} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,identifier,[pid],unknown}]},{emqx_conf_proto_v2,reset,3} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,nil,[],unknown}},nonempty},{c,map,{[{{c,atom,[lazy_evaluator],unknown},optional,{c,function,{{c,product,[{c,function,{any,any},unknown}],unknown},any},unknown}},{{c,atom,[override_to],unknown},optional,{c,atom,[cluster,local],unknown}},{{c,atom,[persistent],unknown},optional,{c,atom,[false,true],unknown}},{{c,atom,[rawconf_with_defaults],unknown},optional,{c,atom,[false,true],unknown}}],none,none},unknown}]},{emqx_bridge_proto_v6,stop_bridges_to_all_nodes,3} => {any,[{c,list,{{c,atom,any,unknown},{c,nil,[],unknown}},unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,{c,list,{{c,number,{int_rng,0,255},integer},{c,nil,[],unknown}},unknown},none,none,none,none],unknown}]},{emqx_cm,takeover_session,2} => {{c,union,[{c,atom,[none,ok,undefined],unknown},none,none,none,{c,list,{{c,tuple,[{c,atom,any,unknown},any],{2,any}},{c,nil,[],unknown}},unknown},none,{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[expired],unknown},{c,union,[none,none,none,none,none,none,{c,tuple,[{c,atom,[session],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},any,{c,atom,[false,true],unknown},{c,map,{[],any,any},unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown},{c,atom,[false,true],unknown},{c,opaque,[{opaque,emqx_inflight,inflight,0,{c,tuple,[any,any,any],{3,any}}}],unknown},{c,tuple,[any,any,any,any,any,any,any,any,any,any,any],{11,any}},{c,number,{int_rng,1,1114111},integer},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown},{c,map,{[],any,any},unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown},{c,number,{int_rng,1,pos_inf},integer}],{15,{c,atom,[session],unknown}}},none,{c,map,{[{{c,atom,[conninfo],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[created_at],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[id],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[inflight],unknown},mandatory,{c,opaque,[{opaque,emqx_persistent_message_ds_replayer,inflight,0,{c,tuple,[any,any,any,any],{4,any}}}],unknown}},{{c,atom,[last_alive_at],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[props],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[receive_maximum],unknown},mandatory,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[subscriptions],unknown},mandatory,{c,opaque,[{opaque,emqx_topic_gbt,t,2,{c,opaque,[{opaque,gb_trees,tree,2,{c,tuple,[any,any],{2,any}}}],unknown}}],unknown}},{{c,atom,[timer_bump_last_alive_at],unknown},optional,{c,identifier,[reference],unknown}},{{c,atom,[timer_get_streams],unknown},optional,{c,identifier,[reference],unknown}},{{c,atom,[timer_pull],unknown},optional,{c,identifier,[reference],unknown}}],none,none},unknown}],unknown}],{2,{c,atom,[expired],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,union,[none,none,none,none,{c,list,{{c,tuple,[any,any,any],{3,any}},{c,nil,[],unknown}},unknown},none,none,none,{c,map,{[],any,any},unknown}],unknown}],{2,{c,atom,[ok],unknown}}},{c,tuple,[{c,atom,[persistent],unknown},{c,union,[none,none,none,none,none,none,{c,tuple,[{c,atom,[session],unknown},{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},any,{c,atom,[false,true],unknown},{c,map,{[],any,any},unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown},{c,atom,[false,true],unknown},{c,opaque,[{opaque,emqx_inflight,inflight,0,{c,tuple,[any,any,any],{3,any}}}],unknown},{c,tuple,[any,any,any,any,any,any,any,any,any,any,any],{11,any}},{c,number,{int_rng,1,1114111},integer},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown},{c,map,{[],any,any},unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown},{c,union,[{c,atom,[infinity],unknown},none,none,none,none,{c,number,{int_rng,0,pos_inf},integer},none,none,none],unknown},{c,number,{int_rng,1,pos_inf},integer}],{15,{c,atom,[session],unknown}}},none,{c,map,{[{{c,atom,[conninfo],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[created_at],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[id],unknown},mandatory,{c,binary,{8,0},unknown}},{{c,atom,[inflight],unknown},mandatory,{c,opaque,[{opaque,emqx_persistent_message_ds_replayer,inflight,0,{c,tuple,[any,any,any,any],{4,any}}}],unknown}},{{c,atom,[last_alive_at],unknown},mandatory,{c,number,{int_rng,0,pos_inf},integer}},{{c,atom,[props],unknown},mandatory,{c,map,{[],any,any},unknown}},{{c,atom,[receive_maximum],unknown},mandatory,{c,number,{int_rng,1,pos_inf},integer}},{{c,atom,[subscriptions],unknown},mandatory,{c,opaque,[{opaque,emqx_topic_gbt,t,2,{c,opaque,[{opaque,gb_trees,tree,2,{c,tuple,[any,any],{2,any}}}],unknown}}],unknown}},{{c,atom,[timer_bump_last_alive_at],unknown},optional,{c,identifier,[reference],unknown}},{{c,atom,[timer_get_streams],unknown},optional,{c,identifier,[reference],unknown}},{{c,atom,[timer_pull],unknown},optional,{c,identifier,[reference],unknown}}],none,none},unknown}],unknown}],{2,{c,atom,[persistent],unknown}}}]},{4,[{c,tuple,[{c,atom,[living],unknown},{c,atom,any,unknown},{c,identifier,[pid],unknown},any],{4,{c,atom,[living],unknown}}}]}],unknown},none,{c,map,{[],{c,atom,any,unknown},any},unknown}],unknown},[any,any]},{emqx_node_rebalance,connection_count,0} => {{c,tuple,[{c,atom,[ok],unknown},any],{2,{c,atom,[ok],unknown}}},[]},{emqx_conf_app,sync_data_from_node,0} => {{c,tuple_set,[{2,[{c,tuple,[{c,atom,[error],unknown},any],{2,{c,atom,[error],unknown}}},{c,tuple,[{c,atom,[ok],unknown},{c,binary,{8,0},unknown}],{2,{c,atom,[ok],unknown}}}]}],unknown},[]},{emqx_management_proto_v4,kickout_clients,2} => {any,[{c,atom,any,unknown},{c,list,{{c,union,[{c,atom,any,unknown},{c,binary,{8,0},unknown},none,none,none,none,none,none,none],unknown},{c,nil,[],unknown}},unknown}]},{emqx_alarm,get_alarms,1} => {any,[{c,atom,[activated,all,deactivated],unknown}]},{emqx_management_proto_v2,node_info,1} => {any,[{c,atom,any,unknown}]},{emqx_exhook_mgr,all_servers_info,0} => {any,[]}}}. diff --git a/apps/emqx/test/emqx_trie_SUITE.erl b/apps/emqx/test/emqx_trie_SUITE.erl index 06696b9ed..f9b060aba 100644 --- a/apps/emqx/test/emqx_trie_SUITE.erl +++ b/apps/emqx/test/emqx_trie_SUITE.erl @@ -21,6 +21,7 @@ -include_lib("emqx/include/emqx.hrl"). -include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). all() -> [ @@ -43,14 +44,14 @@ end_per_group(_, _) -> ok. init_per_suite(Config) -> - application:load(emqx), - ok = ekka:start(), - Config. + Apps = emqx_cth_suite:start( + [{emqx, #{override_env => [{boot_modules, [broker]}]}}], + #{work_dir => emqx_cth_suite:work_dir(Config)} + ), + [{suite_apps, Apps} | Config]. -end_per_suite(_Config) -> - ekka:stop(), - mria:stop(), - mria_mnesia:delete_schema(). +end_per_suite(Config) -> + ok = emqx_cth_suite:stop(?config(suite_apps, Config)). init_per_testcase(_TestCase, Config) -> clear_tables(), @@ -185,7 +186,8 @@ t_delete3(_) -> ?assertEqual([], ?TRIE:match(<<"sensor">>)), ?assertEqual([], ?TRIE:lookup_topic(<<"sensor/+">>, ?TRIE)). -clear_tables() -> emqx_trie:clear_tables(). +clear_tables() -> + emqx_trie:clear_tables(). trans(Fun) -> mria:transaction(?ROUTE_SHARD, Fun). diff --git a/apps/emqx/test/emqx_ws_connection_SUITE.erl b/apps/emqx/test/emqx_ws_connection_SUITE.erl index 83224958e..97a1ca672 100644 --- a/apps/emqx/test/emqx_ws_connection_SUITE.erl +++ b/apps/emqx/test/emqx_ws_connection_SUITE.erl @@ -532,7 +532,7 @@ t_parse_incoming_frame_error(_) -> {incoming, {frame_error, #{ header_type := _, - hint := malformed_packet + cause := malformed_packet }}} ], Packets diff --git a/apps/emqx_audit/BSL.txt b/apps/emqx_audit/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_audit/BSL.txt +++ b/apps/emqx_audit/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_auth/include/emqx_authz.hrl b/apps/emqx_auth/include/emqx_authz.hrl index 9af795a82..4d3fc75dc 100644 --- a/apps/emqx_auth/include/emqx_authz.hrl +++ b/apps/emqx_auth/include/emqx_authz.hrl @@ -28,6 +28,7 @@ -define(CMD_APPEND, append). -define(CMD_MOVE, move). -define(CMD_MERGE, merge). +-define(CMD_REORDER, reorder). -define(CMD_MOVE_FRONT, front). -define(CMD_MOVE_REAR, rear). diff --git a/apps/emqx_auth/src/emqx_auth.app.src b/apps/emqx_auth/src/emqx_auth.app.src index 769929cc9..548af740e 100644 --- a/apps/emqx_auth/src/emqx_auth.app.src +++ b/apps/emqx_auth/src/emqx_auth.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_auth, [ {description, "EMQX Authentication and authorization"}, - {vsn, "0.2.1"}, + {vsn, "0.2.2"}, {modules, []}, {registered, [emqx_auth_sup]}, {applications, [ diff --git a/apps/emqx_auth/src/emqx_authn/emqx_authn_api.erl b/apps/emqx_auth/src/emqx_authn/emqx_authn_api.erl index 1b299fa64..07584c76e 100644 --- a/apps/emqx_auth/src/emqx_authn/emqx_authn_api.erl +++ b/apps/emqx_auth/src/emqx_authn/emqx_authn_api.erl @@ -32,6 +32,8 @@ -define(INTERNAL_ERROR, 'INTERNAL_ERROR'). -define(CONFIG, emqx_authn_config). +-define(join(List), lists:join(", ", List)). + % Swagger -define(API_TAGS_GLOBAL, [<<"Authentication">>]). @@ -40,7 +42,8 @@ -export([ api_spec/0, paths/0, - schema/1 + schema/1, + namespace/0 ]). -export([ @@ -56,6 +59,7 @@ listener_authenticator/2, listener_authenticator_status/2, authenticator_position/2, + authenticators_order/2, listener_authenticator_position/2, authenticator_users/2, authenticator_user/2, @@ -92,6 +96,8 @@ -elvis([{elvis_style, god_modules, disable}]). +namespace() -> undefined. + api_spec() -> emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). @@ -102,7 +108,8 @@ paths() -> "/authentication/:id/status", "/authentication/:id/position/:position", "/authentication/:id/users", - "/authentication/:id/users/:user_id" + "/authentication/:id/users/:user_id", + "/authentication/order" %% hide listener authn api since 5.1.0 %% "/listeners/:listener_id/authentication", @@ -118,7 +125,8 @@ roots() -> request_user_create, request_user_update, response_user, - response_users + response_users, + request_authn_order ]. fields(request_user_create) -> @@ -137,7 +145,16 @@ fields(response_user) -> {is_superuser, mk(boolean(), #{default => false, required => false})} ]; fields(response_users) -> - paginated_list_type(ref(response_user)). + paginated_list_type(ref(response_user)); +fields(request_authn_order) -> + [ + {id, + mk(binary(), #{ + desc => ?DESC(param_auth_id), + required => true, + example => "password_based:built_in_database" + })} + ]. schema("/authentication") -> #{ @@ -218,7 +235,7 @@ schema("/authentication/:id/status") -> parameters => [param_auth_id()], responses => #{ 200 => emqx_dashboard_swagger:schema_with_examples( - hoconsc:ref(emqx_authn_schema, "metrics_status_fields"), + ref(emqx_authn_schema, "metrics_status_fields"), status_metrics_example() ), 404 => error_codes([?NOT_FOUND], <<"Not Found">>), @@ -313,7 +330,7 @@ schema("/listeners/:listener_id/authentication/:id/status") -> parameters => [param_listener_id(), param_auth_id()], responses => #{ 200 => emqx_dashboard_swagger:schema_with_examples( - hoconsc:ref(emqx_authn_schema, "metrics_status_fields"), + ref(emqx_authn_schema, "metrics_status_fields"), status_metrics_example() ), 400 => error_codes([?BAD_REQUEST], <<"Bad Request">>) @@ -530,6 +547,22 @@ schema("/listeners/:listener_id/authentication/:id/users/:user_id") -> 404 => error_codes([?NOT_FOUND], <<"Not Found">>) } } + }; +schema("/authentication/order") -> + #{ + 'operationId' => authenticators_order, + put => #{ + tags => ?API_TAGS_GLOBAL, + description => ?DESC(authentication_order_put), + 'requestBody' => mk( + hoconsc:array(ref(?MODULE, request_authn_order)), + #{} + ), + responses => #{ + 204 => <<"Authenticators order updated">>, + 400 => error_codes([?BAD_REQUEST], <<"Bad Request">>) + } + } }. param_auth_id() -> @@ -670,6 +703,17 @@ listener_authenticator_status( end ). +authenticators_order(put, #{body := AuthnOrder}) -> + AuthnIdsOrder = [Id || #{<<"id">> := Id} <- AuthnOrder], + case update_config([authentication], {reorder_authenticators, AuthnIdsOrder}) of + {ok, _} -> + {204}; + {error, {_PrePostConfigUpdate, ?CONFIG, Reason}} -> + serialize_error(Reason); + {error, Reason} -> + serialize_error(Reason) + end. + authenticator_position( put, #{bindings := #{id := AuthenticatorID, position := Position}} @@ -1253,6 +1297,21 @@ serialize_error({unknown_authn_type, Type}) -> code => <<"BAD_REQUEST">>, message => binfmt("Unknown type '~p'", [Type]) }}; +serialize_error(#{not_found := NotFound, not_reordered := NotReordered}) -> + NotFoundFmt = "Authenticators: ~ts are not found", + NotReorderedFmt = "No positions are specified for authenticators: ~ts", + Msg = + case {NotFound, NotReordered} of + {[_ | _], []} -> + binfmt(NotFoundFmt, [?join(NotFound)]); + {[], [_ | _]} -> + binfmt(NotReorderedFmt, [?join(NotReordered)]); + _ -> + binfmt(NotFoundFmt ++ ", " ++ NotReorderedFmt, [ + ?join(NotFound), ?join(NotReordered) + ]) + end, + {400, #{code => <<"BAD_REQUEST">>, message => Msg}}; serialize_error(Reason) -> {400, #{ code => <<"BAD_REQUEST">>, diff --git a/apps/emqx_auth/src/emqx_authn/emqx_authn_config.erl b/apps/emqx_auth/src/emqx_authn/emqx_authn_config.erl index 70f0f31a4..cb55428b2 100644 --- a/apps/emqx_auth/src/emqx_authn/emqx_authn_config.erl +++ b/apps/emqx_auth/src/emqx_authn/emqx_authn_config.erl @@ -135,6 +135,8 @@ do_pre_config_update(_, {move_authenticator, _ChainName, AuthenticatorID, Positi do_pre_config_update(ConfPath, {merge_authenticators, NewConfig}, OldConfig) -> MergeConfig = merge_authenticators(OldConfig, NewConfig), do_pre_config_update(ConfPath, MergeConfig, OldConfig); +do_pre_config_update(_ConfPath, {reorder_authenticators, NewOrder}, OldConfig) -> + reorder_authenticators(NewOrder, OldConfig); do_pre_config_update(_, OldConfig, OldConfig) -> {ok, OldConfig}; do_pre_config_update(ConfPath, NewConfig, _OldConfig) -> @@ -194,6 +196,15 @@ do_post_config_update( _AppEnvs ) -> emqx_authn_chains:move_authenticator(ChainName, AuthenticatorID, Position); +do_post_config_update( + ConfPath, + {reorder_authenticators, NewOrder}, + _NewConfig, + _OldConfig, + _AppEnvs +) -> + ChainName = chain_name(ConfPath), + ok = emqx_authn_chains:reorder_authenticator(ChainName, NewOrder); do_post_config_update(_, _UpdateReq, OldConfig, OldConfig, _AppEnvs) -> ok; do_post_config_update(ConfPath, _UpdateReq, NewConfig0, OldConfig0, _AppEnvs) -> @@ -389,6 +400,24 @@ merge_authenticators(OriginConf0, NewConf0) -> ), lists:reverse(OriginConf1) ++ NewConf1. +reorder_authenticators(NewOrder, OldConfig) -> + OldConfigWithIds = [{authenticator_id(Auth), Auth} || Auth <- OldConfig], + reorder_authenticators(NewOrder, OldConfigWithIds, [], []). + +reorder_authenticators([], [] = _RemConfigWithIds, ReorderedConfig, [] = _NotFoundIds) -> + {ok, lists:reverse(ReorderedConfig)}; +reorder_authenticators([], RemConfigWithIds, _ReorderedConfig, NotFoundIds) -> + {error, #{not_found => NotFoundIds, not_reordered => [Id || {Id, _} <- RemConfigWithIds]}}; +reorder_authenticators([Id | RemOrder], RemConfigWithIds, ReorderedConfig, NotFoundIds) -> + case lists:keytake(Id, 1, RemConfigWithIds) of + {value, {_Id, Auth}, RemConfigWithIds1} -> + reorder_authenticators( + RemOrder, RemConfigWithIds1, [Auth | ReorderedConfig], NotFoundIds + ); + false -> + reorder_authenticators(RemOrder, RemConfigWithIds, ReorderedConfig, [Id | NotFoundIds]) + end. + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -compile(nowarn_export_all). diff --git a/apps/emqx_auth/src/emqx_authz/emqx_authz.erl b/apps/emqx_auth/src/emqx_authz/emqx_authz.erl index 5bc5e88df..d1253b516 100644 --- a/apps/emqx_auth/src/emqx_authz/emqx_authz.erl +++ b/apps/emqx_auth/src/emqx_authz/emqx_authz.erl @@ -36,6 +36,7 @@ lookup/0, lookup/1, move/2, + reorder/1, update/2, merge/1, merge_local/2, @@ -64,6 +65,8 @@ maybe_write_files/1 ]). +-import(emqx_utils_conv, [bin/1]). + -type default_result() :: allow | deny. -type authz_result_value() :: #{result := allow | deny, from => _}. @@ -181,6 +184,9 @@ move(Type, Position) -> ?CONF_KEY_PATH, {?CMD_MOVE, type(Type), Position} ). +reorder(SourcesOrder) -> + emqx_authz_utils:update_config(?CONF_KEY_PATH, {?CMD_REORDER, SourcesOrder}). + update({?CMD_REPLACE, Type}, Sources) -> emqx_authz_utils:update_config(?CONF_KEY_PATH, {{?CMD_REPLACE, type(Type)}, Sources}); update({?CMD_DELETE, Type}, Sources) -> @@ -258,6 +264,8 @@ do_pre_config_update({?CMD_REPLACE, Sources}, _OldSources) -> NSources = lists:map(fun maybe_write_source_files/1, Sources), ok = check_dup_types(NSources), NSources; +do_pre_config_update({?CMD_REORDER, NewSourcesOrder}, OldSources) -> + reorder_sources(NewSourcesOrder, OldSources); do_pre_config_update({Op, Source}, Sources) -> throw({bad_request, #{op => Op, source => Source, sources => Sources}}). @@ -290,6 +298,16 @@ do_post_config_update(?CONF_KEY_PATH, {{?CMD_DELETE, Type}, _RawNewSource}, _Sou Front ++ Rear; do_post_config_update(?CONF_KEY_PATH, {?CMD_REPLACE, _RawNewSources}, Sources) -> overwrite_entire_sources(Sources); +do_post_config_update(?CONF_KEY_PATH, {?CMD_REORDER, NewSourcesOrder}, _Sources) -> + OldSources = lookup(), + lists:map( + fun(Type) -> + Type1 = type(Type), + {value, Val} = lists:search(fun(S) -> type(S) =:= Type1 end, OldSources), + Val + end, + NewSourcesOrder + ); do_post_config_update(?ROOT_KEY, Conf, Conf) -> #{sources := Sources} = Conf, Sources; @@ -729,6 +747,29 @@ type_take(Type, Sources) -> throw:{not_found_source, Type} -> not_found end. +reorder_sources(NewOrder, OldSources) -> + NewOrder1 = lists:map(fun type/1, NewOrder), + OldSourcesWithTypes = [{type(Source), Source} || Source <- OldSources], + reorder_sources(NewOrder1, OldSourcesWithTypes, [], []). + +reorder_sources([], [] = _RemSourcesWithTypes, ReorderedSources, [] = _NotFoundTypes) -> + lists:reverse(ReorderedSources); +reorder_sources([], RemSourcesWithTypes, _ReorderedSources, NotFoundTypes) -> + {error, #{ + not_found => NotFoundTypes, not_reordered => [bin(Type) || {Type, _} <- RemSourcesWithTypes] + }}; +reorder_sources([Type | RemOrder], RemSourcesWithTypes, ReorderedSources, NotFoundTypes) -> + case lists:keytake(Type, 1, RemSourcesWithTypes) of + {value, {_Type, Source}, RemSourcesWithTypes1} -> + reorder_sources( + RemOrder, RemSourcesWithTypes1, [Source | ReorderedSources], NotFoundTypes + ); + false -> + reorder_sources(RemOrder, RemSourcesWithTypes, ReorderedSources, [ + bin(Type) | NotFoundTypes + ]) + end. + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -compile(nowarn_export_all). diff --git a/apps/emqx_auth/src/emqx_authz/emqx_authz_api_sources.erl b/apps/emqx_auth/src/emqx_authz/emqx_authz_api_sources.erl index 00345a108..0af910d18 100644 --- a/apps/emqx_auth/src/emqx_authz/emqx_authz_api_sources.erl +++ b/apps/emqx_auth/src/emqx_authz/emqx_authz_api_sources.erl @@ -27,6 +27,8 @@ -define(BAD_REQUEST, 'BAD_REQUEST'). -define(NOT_FOUND, 'NOT_FOUND'). +-define(join(List), lists:join(", ", List)). + -export([ get_raw_sources/0, get_raw_source/1, @@ -39,13 +41,15 @@ api_spec/0, paths/0, schema/1, - fields/1 + fields/1, + namespace/0 ]). -export([ sources/2, source/2, source_move/2, + sources_order/2, aggregate_metrics/1 ]). @@ -53,6 +57,9 @@ -define(TAGS, [<<"Authorization">>]). +namespace() -> + undefined. + api_spec() -> emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). @@ -61,7 +68,8 @@ paths() -> "/authorization/sources", "/authorization/sources/:type", "/authorization/sources/:type/status", - "/authorization/sources/:type/move" + "/authorization/sources/:type/move", + "/authorization/sources/order" ]. fields(sources) -> @@ -77,6 +85,15 @@ fields(position) -> in => body } )} + ]; +fields(request_sources_order) -> + [ + {type, + mk(enum(emqx_authz_schema:source_types()), #{ + desc => ?DESC(source_type), + required => true, + example => "file" + })} ]. %%-------------------------------------------------------------------- @@ -196,6 +213,22 @@ schema("/authorization/sources/:type/move") -> 404 => emqx_dashboard_swagger:error_codes([?NOT_FOUND], <<"Not Found">>) } } + }; +schema("/authorization/sources/order") -> + #{ + 'operationId' => sources_order, + put => #{ + tags => ?TAGS, + description => ?DESC(authorization_sources_order_put), + 'requestBody' => mk( + hoconsc:array(ref(?MODULE, request_sources_order)), + #{} + ), + responses => #{ + 204 => <<"Authorization sources order updated">>, + 400 => emqx_dashboard_swagger:error_codes([?BAD_REQUEST], <<"Bad Request">>) + } + } }. %%-------------------------------------------------------------------- @@ -317,6 +350,30 @@ source_move(post, #{bindings := #{type := Type}, body := #{<<"position">> := Pos end ). +sources_order(put, #{body := AuthzOrder}) -> + SourcesOrder = [Type || #{<<"type">> := Type} <- AuthzOrder], + case emqx_authz:reorder(SourcesOrder) of + {ok, _} -> + {204}; + {error, {_PrePostConfUpd, _, #{not_found := NotFound, not_reordered := NotReordered}}} -> + NotFoundFmt = "Authorization sources: ~ts are not found", + NotReorderedFmt = "No positions are specified for authorization sources: ~ts", + Msg = + case {NotFound, NotReordered} of + {[_ | _], []} -> + binfmt(NotFoundFmt, [?join(NotFound)]); + {[], [_ | _]} -> + binfmt(NotReorderedFmt, [?join(NotReordered)]); + _ -> + binfmt(NotFoundFmt ++ ", " ++ NotReorderedFmt, [ + ?join(NotFound), ?join(NotReordered) + ]) + end, + {400, #{code => <<"BAD_REQUEST">>, message => Msg}}; + {error, Reason} -> + {400, #{code => <<"BAD_REQUEST">>, message => bin(Reason)}} + end. + %%-------------------------------------------------------------------- %% Internal functions %%-------------------------------------------------------------------- @@ -556,7 +613,9 @@ position_example() -> } }. -bin(Term) -> erlang:iolist_to_binary(io_lib:format("~p", [Term])). +bin(Term) -> binfmt("~p", [Term]). + +binfmt(Fmt, Args) -> iolist_to_binary(io_lib:format(Fmt, Args)). status_metrics_example() -> #{ diff --git a/apps/emqx_auth/src/emqx_authz/emqx_authz_schema.erl b/apps/emqx_auth/src/emqx_authz/emqx_authz_schema.erl index 426c7a9f6..8e82ea061 100644 --- a/apps/emqx_auth/src/emqx_authz/emqx_authz_schema.erl +++ b/apps/emqx_auth/src/emqx_authz/emqx_authz_schema.erl @@ -22,7 +22,8 @@ -export([ roots/0, fields/1, - desc/1 + desc/1, + namespace/0 ]). -export([ @@ -41,6 +42,10 @@ authz_common_fields/1 ]). +-ifdef(TEST). +-export([source_schema_mods/0]). +-endif. + -define(AUTHZ_MODS_PT_KEY, {?MODULE, authz_schema_mods}). %%-------------------------------------------------------------------- @@ -65,6 +70,8 @@ roots() -> []. +namespace() -> undefined. + fields(?CONF_NS) -> emqx_schema:authz_fields() ++ authz_fields(); fields("metrics_status_fields") -> diff --git a/apps/emqx_auth/test/emqx_authn/emqx_authn_api_SUITE.erl b/apps/emqx_auth/test/emqx_authn/emqx_authn_api_SUITE.erl index cceab0d54..fbd0bb9e4 100644 --- a/apps/emqx_auth/test/emqx_authn/emqx_authn_api_SUITE.erl +++ b/apps/emqx_auth/test/emqx_authn/emqx_authn_api_SUITE.erl @@ -124,6 +124,111 @@ t_authenticator_fail(_) -> t_authenticator_position(_) -> test_authenticator_position([]). +t_authenticators_reorder(_) -> + AuthenticatorConfs = [ + emqx_authn_test_lib:http_example(), + %% Disabling an authenticator must not affect the requested order + (emqx_authn_test_lib:jwt_example())#{enable => false}, + emqx_authn_test_lib:built_in_database_example() + ], + lists:foreach( + fun(Conf) -> + {ok, 200, _} = request( + post, + uri([?CONF_NS]), + Conf + ) + end, + AuthenticatorConfs + ), + ?assertAuthenticatorsMatch( + [ + #{<<"mechanism">> := <<"password_based">>, <<"backend">> := <<"http">>}, + #{<<"mechanism">> := <<"jwt">>, <<"enable">> := false}, + #{<<"mechanism">> := <<"password_based">>, <<"backend">> := <<"built_in_database">>} + ], + [?CONF_NS] + ), + + OrderUri = uri([?CONF_NS, "order"]), + + %% Invalid moves + + %% Bad schema + {ok, 400, _} = request( + put, + OrderUri, + [ + #{<<"not-id">> => <<"password_based:http">>}, + #{<<"not-id">> => <<"jwt">>} + ] + ), + + %% Partial order + {ok, 400, _} = request( + put, + OrderUri, + [ + #{<<"id">> => <<"password_based:http">>}, + #{<<"id">> => <<"jwt">>} + ] + ), + + %% Not found authenticators + {ok, 400, _} = request( + put, + OrderUri, + [ + #{<<"id">> => <<"password_based:http">>}, + #{<<"id">> => <<"jwt">>}, + #{<<"id">> => <<"password_based:built_in_database">>}, + #{<<"id">> => <<"password_based:mongodb">>} + ] + ), + + %% Both partial and not found errors + {ok, 400, _} = request( + put, + OrderUri, + [ + #{<<"id">> => <<"password_based:http">>}, + #{<<"id">> => <<"password_based:built_in_database">>}, + #{<<"id">> => <<"password_based:mongodb">>} + ] + ), + + %% Duplicates + {ok, 400, _} = request( + put, + OrderUri, + [ + #{<<"id">> => <<"password_based:http">>}, + #{<<"id">> => <<"password_based:built_in_database">>}, + #{<<"id">> => <<"jwt">>}, + #{<<"id">> => <<"password_based:http">>} + ] + ), + + %% Valid moves + {ok, 204, _} = request( + put, + OrderUri, + [ + #{<<"id">> => <<"password_based:built_in_database">>}, + #{<<"id">> => <<"jwt">>}, + #{<<"id">> => <<"password_based:http">>} + ] + ), + + ?assertAuthenticatorsMatch( + [ + #{<<"mechanism">> := <<"password_based">>, <<"backend">> := <<"built_in_database">>}, + #{<<"mechanism">> := <<"jwt">>, <<"enable">> := false}, + #{<<"mechanism">> := <<"password_based">>, <<"backend">> := <<"http">>} + ], + [?CONF_NS] + ). + %t_listener_authenticators(_) -> % test_authenticators(["listeners", ?TCP_DEFAULT]). diff --git a/apps/emqx_auth/test/emqx_authz/emqx_authz_SUITE.erl b/apps/emqx_auth/test/emqx_authz/emqx_authz_SUITE.erl index e0ef906ad..746f3daad 100644 --- a/apps/emqx_auth/test/emqx_authz/emqx_authz_SUITE.erl +++ b/apps/emqx_auth/test/emqx_authz/emqx_authz_SUITE.erl @@ -292,16 +292,19 @@ t_update_source(_) -> t_replace_all(_) -> RootKey = [<<"authorization">>], Conf = emqx:get_raw_config(RootKey), - emqx_authz_utils:update_config(RootKey, Conf#{ - <<"sources">> => [ - ?SOURCE_FILE1, - ?SOURCE_REDIS, - ?SOURCE_POSTGRESQL, - ?SOURCE_MYSQL, - ?SOURCE_MONGODB, - ?SOURCE_HTTP - ] - }), + ?assertMatch( + {ok, _}, + emqx_authz_utils:update_config(RootKey, Conf#{ + <<"sources">> => [ + ?SOURCE_FILE1, + ?SOURCE_REDIS, + ?SOURCE_POSTGRESQL, + ?SOURCE_MYSQL, + ?SOURCE_MONGODB, + ?SOURCE_HTTP + ] + }) + ), %% config ?assertMatch( [ @@ -561,7 +564,7 @@ t_publish_last_will_testament_banned_client_connecting(_Config) -> %% Now we ban the client while it is connected. Now = erlang:system_time(second), - Who = {username, Username}, + Who = emqx_banned:who(username, Username), emqx_banned:create(#{ who => Who, by => <<"test">>, diff --git a/apps/emqx_auth/test/emqx_authz/emqx_authz_api_sources_SUITE.erl b/apps/emqx_auth/test/emqx_authz/emqx_authz_api_sources_SUITE.erl index e9dfa6a7b..246594777 100644 --- a/apps/emqx_auth/test/emqx_authz/emqx_authz_api_sources_SUITE.erl +++ b/apps/emqx_auth/test/emqx_authz/emqx_authz_api_sources_SUITE.erl @@ -29,7 +29,7 @@ -define(PGSQL_HOST, "pgsql"). -define(REDIS_SINGLE_HOST, "redis"). --define(SOURCE_REDIS1, #{ +-define(SOURCE_HTTP, #{ <<"type">> => <<"http">>, <<"enable">> => true, <<"url">> => <<"https://fake.com:443/acl?username=", ?PH_USERNAME/binary>>, @@ -74,7 +74,7 @@ <<"ssl">> => #{<<"enable">> => false}, <<"query">> => <<"abcb">> }). --define(SOURCE_REDIS2, #{ +-define(SOURCE_REDIS, #{ <<"type">> => <<"redis">>, <<"enable">> => true, <<"servers">> => <>, @@ -188,10 +188,10 @@ t_api(_) -> {ok, 204, _} = request(post, uri(["authorization", "sources"]), Source) end || Source <- lists:reverse([ - ?SOURCE_MONGODB, ?SOURCE_MYSQL, ?SOURCE_POSTGRESQL, ?SOURCE_REDIS2, ?SOURCE_FILE + ?SOURCE_MONGODB, ?SOURCE_MYSQL, ?SOURCE_POSTGRESQL, ?SOURCE_REDIS, ?SOURCE_FILE ]) ], - {ok, 204, _} = request(post, uri(["authorization", "sources"]), ?SOURCE_REDIS1), + {ok, 204, _} = request(post, uri(["authorization", "sources"]), ?SOURCE_HTTP), {ok, 200, Result2} = request(get, uri(["authorization", "sources"]), []), Sources = get_sources(Result2), @@ -211,7 +211,7 @@ t_api(_) -> {ok, 204, _} = request( put, uri(["authorization", "sources", "http"]), - ?SOURCE_REDIS1#{<<"enable">> := false} + ?SOURCE_HTTP#{<<"enable">> := false} ), {ok, 200, Result3} = request(get, uri(["authorization", "sources", "http"]), []), ?assertMatch( @@ -338,7 +338,7 @@ t_api(_) -> {ok, 204, _} = request( put, uri(["authorization", "sources", "redis"]), - ?SOURCE_REDIS2#{ + ?SOURCE_REDIS#{ <<"servers">> := [ <<"192.168.1.100:6379">>, <<"192.168.1.100:6380">> @@ -503,7 +503,7 @@ t_api(_) -> t_source_move(_) -> {ok, _} = emqx_authz:update(replace, [ - ?SOURCE_REDIS1, ?SOURCE_MONGODB, ?SOURCE_MYSQL, ?SOURCE_POSTGRESQL, ?SOURCE_REDIS2 + ?SOURCE_HTTP, ?SOURCE_MONGODB, ?SOURCE_MYSQL, ?SOURCE_POSTGRESQL, ?SOURCE_REDIS ]), ?assertMatch( [ @@ -582,6 +582,123 @@ t_source_move(_) -> ok. +t_sources_reorder(_) -> + %% Disabling an auth source must not affect the requested order + MongoDbDisabled = (?SOURCE_MONGODB)#{<<"enable">> => false}, + {ok, _} = emqx_authz:update(replace, [ + ?SOURCE_HTTP, MongoDbDisabled, ?SOURCE_MYSQL, ?SOURCE_POSTGRESQL, ?SOURCE_REDIS + ]), + ?assertMatch( + [ + #{type := http}, + #{type := mongodb}, + #{type := mysql}, + #{type := postgresql}, + #{type := redis} + ], + emqx_authz:lookup() + ), + + OrderUri = uri(["authorization", "sources", "order"]), + + %% Valid moves + {ok, 204, _} = request( + put, + OrderUri, + [ + #{<<"type">> => <<"redis">>}, + #{<<"type">> => <<"http">>}, + #{<<"type">> => <<"postgresql">>}, + #{<<"type">> => <<"mysql">>}, + #{<<"type">> => <<"mongodb">>} + ] + ), + ?assertMatch( + [ + #{type := redis}, + #{type := http}, + #{type := postgresql}, + #{type := mysql}, + #{type := mongodb, enable := false} + ], + emqx_authz:lookup() + ), + + %% Invalid moves + + %% Bad schema + {ok, 400, _} = request( + put, + OrderUri, + [#{<<"not-type">> => <<"redis">>}] + ), + {ok, 400, _} = request( + put, + OrderUri, + [ + #{<<"type">> => <<"unkonw">>}, + #{<<"type">> => <<"redis">>}, + #{<<"type">> => <<"http">>}, + #{<<"type">> => <<"postgresql">>}, + #{<<"type">> => <<"mysql">>}, + #{<<"type">> => <<"mongodb">>} + ] + ), + + %% Partial order + {ok, 400, _} = request( + put, + OrderUri, + [ + #{<<"type">> => <<"redis">>}, + #{<<"type">> => <<"http">>}, + #{<<"type">> => <<"postgresql">>}, + #{<<"type">> => <<"mysql">>} + ] + ), + + %% Not found authenticators + {ok, 400, _} = request( + put, + OrderUri, + [ + #{<<"type">> => <<"redis">>}, + #{<<"type">> => <<"http">>}, + #{<<"type">> => <<"postgresql">>}, + #{<<"type">> => <<"mysql">>}, + #{<<"type">> => <<"mongodb">>}, + #{<<"type">> => <<"built_in_database">>}, + #{<<"type">> => <<"file">>} + ] + ), + + %% Both partial and not found errors + {ok, 400, _} = request( + put, + OrderUri, + [ + #{<<"type">> => <<"redis">>}, + #{<<"type">> => <<"http">>}, + #{<<"type">> => <<"postgresql">>}, + #{<<"type">> => <<"mysql">>}, + #{<<"type">> => <<"built_in_database">>} + ] + ), + + %% Duplicates + {ok, 400, _} = request( + put, + OrderUri, + [ + #{<<"type">> => <<"redis">>}, + #{<<"type">> => <<"http">>}, + #{<<"type">> => <<"postgresql">>}, + #{<<"type">> => <<"mysql">>}, + #{<<"type">> => <<"mongodb">>}, + #{<<"type">> => <<"http">>} + ] + ). + t_aggregate_metrics(_) -> Metrics = #{ 'emqx@node1.emqx.io' => #{ diff --git a/apps/emqx_auth_ldap/BSL.txt b/apps/emqx_auth_ldap/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_auth_ldap/BSL.txt +++ b/apps/emqx_auth_ldap/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.app.src b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.app.src index 28b8d7535..a91849d24 100644 --- a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.app.src +++ b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_auth_mnesia, [ {description, "EMQX Buitl-in Database Authentication and Authorization"}, - {vsn, "0.1.3"}, + {vsn, "0.1.4"}, {registered, []}, {mod, {emqx_auth_mnesia_app, []}}, {applications, [ diff --git a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_app.erl b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_app.erl index d08d38e10..5b3d1c6c8 100644 --- a/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_app.erl +++ b/apps/emqx_auth_mnesia/src/emqx_auth_mnesia_app.erl @@ -25,6 +25,7 @@ start(_StartType, _StartArgs) -> ok = emqx_authz_mnesia:init_tables(), ok = emqx_authn_mnesia:init_tables(), + ok = emqx_authn_scram_mnesia:init_tables(), ok = emqx_authz:register_source(?AUTHZ_TYPE, emqx_authz_mnesia), ok = emqx_authn:register_provider(?AUTHN_TYPE_SIMPLE, emqx_authn_mnesia), ok = emqx_authn:register_provider(?AUTHN_TYPE_SCRAM, emqx_authn_scram_mnesia), diff --git a/apps/emqx_auth_mnesia/src/emqx_authn_mnesia.erl b/apps/emqx_auth_mnesia/src/emqx_authn_mnesia.erl index 8cbd8f35e..e5cad0005 100644 --- a/apps/emqx_auth_mnesia/src/emqx_authn_mnesia.erl +++ b/apps/emqx_auth_mnesia/src/emqx_authn_mnesia.erl @@ -55,7 +55,7 @@ do_update_user/3 ]). --export([mnesia/1, init_tables/0]). +-export([init_tables/0]). -export([backup_tables/0]). @@ -69,8 +69,6 @@ is_superuser :: boolean() }). --boot_mnesia({mnesia, [boot]}). - -define(TAB, ?MODULE). -define(AUTHN_QSCHEMA, [ {<<"like_user_id">>, binary}, @@ -83,8 +81,8 @@ %%------------------------------------------------------------------------------ %% @doc Create or replicate tables. --spec mnesia(boot | copy) -> ok. -mnesia(boot) -> +-spec create_tables() -> [mria:table()]. +create_tables() -> ok = mria:create_table(?TAB, [ {rlog_shard, ?AUTHN_SHARD}, {type, ordered_set}, @@ -92,12 +90,13 @@ mnesia(boot) -> {record_name, user_info}, {attributes, record_info(fields, user_info)}, {storage_properties, [{ets, [{read_concurrency, true}]}]} - ]). + ]), + [?TAB]. %% Init -spec init_tables() -> ok. init_tables() -> - ok = mria_rlog:wait_for_shards([?AUTHN_SHARD], infinity). + ok = mria:wait_for_tables(create_tables()). %%------------------------------------------------------------------------------ %% Data backup diff --git a/apps/emqx_auth_mnesia/src/emqx_authn_scram_mnesia.erl b/apps/emqx_auth_mnesia/src/emqx_authn_scram_mnesia.erl index a66ae5786..705924ea4 100644 --- a/apps/emqx_auth_mnesia/src/emqx_authn_scram_mnesia.erl +++ b/apps/emqx_auth_mnesia/src/emqx_authn_scram_mnesia.erl @@ -65,9 +65,7 @@ -type user_group() :: binary(). --export([mnesia/1]). - --boot_mnesia({mnesia, [boot]}). +-export([init_tables/0]). -record(user_info, { user_id, @@ -84,8 +82,8 @@ %%------------------------------------------------------------------------------ %% @doc Create or replicate tables. --spec mnesia(boot | copy) -> ok. -mnesia(boot) -> +-spec create_tables() -> [mria:table()]. +create_tables() -> ok = mria:create_table(?TAB, [ {rlog_shard, ?AUTHN_SHARD}, {type, ordered_set}, @@ -93,7 +91,12 @@ mnesia(boot) -> {record_name, user_info}, {attributes, record_info(fields, user_info)}, {storage_properties, [{ets, [{read_concurrency, true}]}]} - ]). + ]), + [?TAB]. + +-spec init_tables() -> ok. +init_tables() -> + mria:wait_for_tables(create_tables()). %%------------------------------------------------------------------------------ %% Data backup diff --git a/apps/emqx_auth_mnesia/src/emqx_authz_api_mnesia.erl b/apps/emqx_auth_mnesia/src/emqx_authz_api_mnesia.erl index 5fc1ec280..cab596da3 100644 --- a/apps/emqx_auth_mnesia/src/emqx_authz_api_mnesia.erl +++ b/apps/emqx_auth_mnesia/src/emqx_authz_api_mnesia.erl @@ -35,7 +35,8 @@ api_spec/0, paths/0, schema/1, - fields/1 + fields/1, + namespace/0 ]). %% operation funs @@ -69,6 +70,8 @@ -define(PUT_MAP_EXAMPLE, in_put_requestBody). -define(POST_ARRAY_EXAMPLE, in_post_requestBody). +namespace() -> undefined. + api_spec() -> emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). diff --git a/apps/emqx_auth_mnesia/src/emqx_authz_mnesia.erl b/apps/emqx_auth_mnesia/src/emqx_authz_mnesia.erl index 27000b7a3..d1a40d5cd 100644 --- a/apps/emqx_auth_mnesia/src/emqx_authz_mnesia.erl +++ b/apps/emqx_auth_mnesia/src/emqx_authz_mnesia.erl @@ -56,7 +56,6 @@ %% Management API -export([ - mnesia/1, init_tables/0, store_rules/2, purge_rules/0, @@ -74,17 +73,16 @@ -compile(nowarn_export_all). -endif. --boot_mnesia({mnesia, [boot]}). - --spec mnesia(boot | copy) -> ok. -mnesia(boot) -> +-spec create_tables() -> [mria:table()]. +create_tables() -> ok = mria:create_table(?ACL_TABLE, [ {type, ordered_set}, {rlog_shard, ?ACL_SHARDED}, {storage, disc_copies}, {attributes, record_info(fields, ?ACL_TABLE)}, {storage_properties, [{ets, [{read_concurrency, true}]}]} - ]). + ]), + [?ACL_TABLE]. %%-------------------------------------------------------------------- %% emqx_authz callbacks @@ -138,7 +136,7 @@ backup_tables() -> [?ACL_TABLE]. %% Init -spec init_tables() -> ok. init_tables() -> - ok = mria_rlog:wait_for_shards([?ACL_SHARDED], infinity). + ok = mria:wait_for_tables(create_tables()). %% @doc Update authz rules -spec store_rules(who(), rules()) -> ok. diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index d80050191..5a62ae139 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -39,6 +39,7 @@ transform_bridge_v1_config_to_action_config/4, action_convert_from_connector/3 ]). +-export([clean_cache/0]). -callback bridge_v1_type_name() -> atom() @@ -77,7 +78,7 @@ ]). %% ==================================================================== -%% Hadcoded list of info modules for actions +%% HardCoded list of info modules for actions %% TODO: Remove this list once we have made sure that all relevants %% apps are loaded before this module is called. %% ==================================================================== @@ -89,16 +90,27 @@ hard_coded_action_info_modules_ee() -> emqx_bridge_confluent_producer_action_info, emqx_bridge_gcp_pubsub_producer_action_info, emqx_bridge_kafka_action_info, + emqx_bridge_kinesis_action_info, + emqx_bridge_hstreamdb_action_info, emqx_bridge_matrix_action_info, emqx_bridge_mongodb_action_info, + emqx_bridge_oracle_action_info, + emqx_bridge_rocketmq_action_info, emqx_bridge_influxdb_action_info, + emqx_bridge_cassandra_action_info, + emqx_bridge_clickhouse_action_info, emqx_bridge_mysql_action_info, emqx_bridge_pgsql_action_info, emqx_bridge_syskeeper_action_info, emqx_bridge_timescale_action_info, emqx_bridge_redis_action_info, emqx_bridge_iotdb_action_info, - emqx_bridge_es_action_info + emqx_bridge_es_action_info, + emqx_bridge_opents_action_info, + emqx_bridge_rabbitmq_action_info, + emqx_bridge_greptimedb_action_info, + emqx_bridge_tdengine_action_info, + emqx_bridge_s3_action_info ]. -else. hard_coded_action_info_modules_ee() -> @@ -307,6 +319,9 @@ build_cache() -> persistent_term:put(internal_emqx_action_persistent_term_info_key(), ActionInfoMap), ActionInfoMap. +clean_cache() -> + persistent_term:erase(internal_emqx_action_persistent_term_info_key()). + action_info_modules() -> ActionInfoModules = [ action_info_modules(App) diff --git a/apps/emqx_bridge/src/emqx_bridge.app.src b/apps/emqx_bridge/src/emqx_bridge.app.src index cadbf35a0..9ef567f23 100644 --- a/apps/emqx_bridge/src/emqx_bridge.app.src +++ b/apps/emqx_bridge/src/emqx_bridge.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge, [ {description, "EMQX bridges"}, - {vsn, "0.1.33"}, + {vsn, "0.1.34"}, {registered, [emqx_bridge_sup]}, {mod, {emqx_bridge_app, []}}, {applications, [ diff --git a/apps/emqx_bridge/src/emqx_bridge_app.erl b/apps/emqx_bridge/src/emqx_bridge_app.erl index 321f59f28..285102aa9 100644 --- a/apps/emqx_bridge/src/emqx_bridge_app.erl +++ b/apps/emqx_bridge/src/emqx_bridge_app.erl @@ -44,6 +44,7 @@ stop(_State) -> emqx_conf:remove_handler(?TOP_LELVE_HDLR_PATH), ok = emqx_bridge:unload(), ok = emqx_bridge_v2:unload(), + emqx_action_info:clean_cache(), ok. -if(?EMQX_RELEASE_EDITION == ee). diff --git a/apps/emqx_bridge/src/emqx_bridge_v2_api.erl b/apps/emqx_bridge/src/emqx_bridge_v2_api.erl index 6dcd24355..8f852e391 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2_api.erl @@ -747,7 +747,7 @@ schema("/source_types") -> %%------------------------------------------------------------------------------ handle_list(ConfRootKey) -> - Nodes = emqx:running_nodes(), + Nodes = nodes_supporting_bpapi_version(6), NodeReplies = emqx_bridge_proto_v6:v2_list_bridges_on_nodes_v6(Nodes, ConfRootKey), case is_ok(NodeReplies) of {ok, NodeBridges} -> @@ -942,7 +942,7 @@ is_ok(ResL) -> %% bridge helpers -spec lookup_from_all_nodes(emqx_bridge_v2:root_cfg_key(), _, _, _) -> _. lookup_from_all_nodes(ConfRootKey, BridgeType, BridgeName, SuccCode) -> - Nodes = emqx:running_nodes(), + Nodes = nodes_supporting_bpapi_version(6), case is_ok( emqx_bridge_proto_v6:v2_lookup_from_all_nodes_v6( @@ -959,7 +959,7 @@ lookup_from_all_nodes(ConfRootKey, BridgeType, BridgeName, SuccCode) -> end. get_metrics_from_all_nodes(ConfRootKey, Type, Name) -> - Nodes = emqx:running_nodes(), + Nodes = nodes_supporting_bpapi_version(6), Result = maybe_unwrap( emqx_bridge_proto_v6:v2_get_metrics_from_all_nodes_v6(Nodes, ConfRootKey, Type, Name) ), @@ -1058,6 +1058,16 @@ supported_versions(_Call) -> bpapi_version_range(6, 6). bpapi_version_range(From, To) -> lists:seq(From, To). +nodes_supporting_bpapi_version(Vsn) -> + [ + N + || N <- emqx:running_nodes(), + case emqx_bpapi:supported_version(N, ?BPAPI_NAME) of + undefined -> false; + NVsn when is_number(NVsn) -> NVsn >= Vsn + end + ]. + maybe_unwrap({error, not_implemented}) -> {error, not_implemented}; maybe_unwrap(RpcMulticallResult) -> diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl index cc4c6eb01..3822c7a34 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl @@ -13,7 +13,8 @@ examples/1, resource_type/1, bridge_impl_module/1, - fields/1 + fields/1, + namespace/0 ]). api_schemas(Method) -> @@ -139,6 +140,8 @@ bridge_impl_module(azure_event_hub_producer) -> bridge_impl_module(_BridgeType) -> undefined. +namespace() -> undefined. + fields(bridges) -> [ {hstreamdb, diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_v2_SUITE.erl index ba631f71a..37cb8aef3 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_SUITE.erl @@ -221,6 +221,32 @@ t_create_remove(_) -> ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge), ok. +t_create_disabled_bridge(_) -> + Config = #{<<"connector">> := Connector} = bridge_config(), + Disable = Config#{<<"enable">> => false}, + BridgeType = bridge_type(), + {ok, _} = emqx_bridge_v2:create(BridgeType, my_enable_bridge, Config), + {ok, _} = emqx_bridge_v2:create(BridgeType, my_disable_bridge, Disable), + ConnectorId = emqx_connector_resource:resource_id(con_type(), Connector), + ?assertMatch( + [ + {_, #{ + enable := true, + connector := Connector, + bridge_type := _ + }}, + {_, #{ + enable := false, + connector := Connector, + bridge_type := _ + }} + ], + emqx_bridge_v2:get_channels_for_connector(ConnectorId) + ), + ok = emqx_bridge_v2:remove(bridge_type(), my_enable_bridge), + ok = emqx_bridge_v2:remove(bridge_type(), my_disable_bridge), + ok. + t_list(_) -> [] = emqx_bridge_v2:list(), {ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()), diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl index 4f98baebf..d56b45a17 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl @@ -106,7 +106,6 @@ -define(KAFKA_BRIDGE_UPDATE(Name, Connector), maps:without([<<"name">>, <<"type">>], ?KAFKA_BRIDGE(Name, Connector)) ). --define(KAFKA_BRIDGE_UPDATE(Name), ?KAFKA_BRIDGE_UPDATE(Name, ?ACTION_CONNECTOR_NAME)). -define(SOURCE_TYPE_STR, "mqtt"). -define(SOURCE_TYPE, <>). @@ -1477,7 +1476,7 @@ t_cluster_later_join_metrics(Config) -> ?assertMatch( {ok, 200, #{ <<"metrics">> := #{<<"success">> := _}, - <<"node_metrics">> := [#{<<"metrics">> := #{}}, #{<<"metrics">> := #{}} | _] + <<"node_metrics">> := [#{<<"metrics">> := #{}} | _] }}, request_json(get, uri([?ACTIONS_ROOT, ActionID, "metrics"]), Config) ), @@ -1512,3 +1511,47 @@ t_raw_config_response_defaults(Config) -> ) ), ok. + +t_older_version_nodes_in_cluster(matrix) -> + [ + [cluster, actions], + [cluster, sources] + ]; +t_older_version_nodes_in_cluster(Config) -> + [_, Kind | _] = group_path(Config), + PrimaryNode = ?config(node, Config), + OtherNode = maybe_get_other_node(Config), + ?assertNotEqual(OtherNode, PrimaryNode), + Name = atom_to_binary(?FUNCTION_NAME), + ?check_trace( + begin + #{api_root_key := APIRootKey} = get_common_values(Kind, Name), + erpc:call(PrimaryNode, fun() -> + meck:new(emqx_bpapi, [no_history, passthrough, no_link]), + meck:expect(emqx_bpapi, supported_version, fun(N, Api) -> + case N =:= OtherNode of + true -> 1; + false -> meck:passthrough([N, Api]) + end + end) + end), + erpc:call(OtherNode, fun() -> + meck:new(emqx_bridge_v2, [no_history, passthrough, no_link]), + meck:expect(emqx_bridge_v2, list, fun(_ConfRootKey) -> + error(should_not_be_called) + end) + end), + ?assertMatch( + {ok, 200, _}, + request_json( + get, + uri([APIRootKey]), + Config + ) + ), + ok + end, + [] + ), + + ok. diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl index 7fef33115..6e731cb80 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl @@ -119,11 +119,13 @@ delete_all_connectors() -> ). %% test helpers -parse_and_check(BridgeType, BridgeName, ConfigString) -> - {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), - hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), - #{<<"bridges">> := #{BridgeType := #{BridgeName := BridgeConfig}}} = RawConf, - BridgeConfig. +parse_and_check(Type, Name, InnerConfigMap0) -> + TypeBin = emqx_utils_conv:bin(Type), + RawConf = #{<<"actions">> => #{TypeBin => #{Name => InnerConfigMap0}}}, + #{<<"actions">> := #{TypeBin := #{Name := InnerConfigMap}}} = hocon_tconf:check_plain( + emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false} + ), + InnerConfigMap. bridge_id(Config) -> BridgeType = ?config(bridge_type, Config), diff --git a/apps/emqx_bridge_azure_event_hub/BSL.txt b/apps/emqx_bridge_azure_event_hub/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_azure_event_hub/BSL.txt +++ b/apps/emqx_bridge_azure_event_hub/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_azure_event_hub/rebar.config b/apps/emqx_bridge_azure_event_hub/rebar.config index 76e1ae0ef..269239620 100644 --- a/apps/emqx_bridge_azure_event_hub/rebar.config +++ b/apps/emqx_bridge_azure_event_hub/rebar.config @@ -2,8 +2,8 @@ {erl_opts, [debug_info]}. {deps, [ - {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.9.1"}}}, - {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}, + {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.10.2"}}}, + {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.5"}}}, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.1"}}}, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}, {snappyer, "1.2.9"}, diff --git a/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl b/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl index 9661004d0..0a3d75427 100644 --- a/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl +++ b/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl @@ -212,10 +212,7 @@ serde_roundtrip(InnerConfigMap0) -> InnerConfigMap. parse_and_check_bridge_config(InnerConfigMap, Name) -> - TypeBin = ?BRIDGE_TYPE_BIN, - RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}}, - hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}), - InnerConfigMap. + emqx_bridge_v2_testlib:parse_and_check(?BRIDGE_TYPE_BIN, Name, InnerConfigMap). shared_secret_path() -> os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret"). diff --git a/apps/emqx_bridge_cassandra/BSL.txt b/apps/emqx_bridge_cassandra/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_cassandra/BSL.txt +++ b/apps/emqx_bridge_cassandra/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_cassandra/rebar.config b/apps/emqx_bridge_cassandra/rebar.config index c0a72fef9..e98146d78 100644 --- a/apps/emqx_bridge_cassandra/rebar.config +++ b/apps/emqx_bridge_cassandra/rebar.config @@ -2,7 +2,7 @@ {erl_opts, [debug_info]}. {deps, [ - {ecql, {git, "https://github.com/emqx/ecql.git", {tag, "v0.5.2"}}}, + {ecql, {git, "https://github.com/emqx/ecql.git", {tag, "v0.6.1"}}}, {emqx_connector, {path, "../../apps/emqx_connector"}}, {emqx_resource, {path, "../../apps/emqx_resource"}}, {emqx_bridge, {path, "../../apps/emqx_bridge"}} diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src index 97be100d2..aa8290b98 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_cassandra, [ {description, "EMQX Enterprise Cassandra Bridge"}, - {vsn, "0.1.6"}, + {vsn, "0.2.0"}, {registered, []}, {applications, [ kernel, @@ -8,7 +8,7 @@ emqx_resource, ecql ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_cassandra_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl index 2724b7c09..83268cab5 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl @@ -12,11 +12,17 @@ %% schema examples -export([ - conn_bridge_examples/1, values/2, fields/2 ]). +%% Examples +-export([ + bridge_v2_examples/1, + conn_bridge_examples/1, + connector_examples/1 +]). + %% schema -export([ namespace/0, @@ -26,10 +32,13 @@ ]). -define(DEFAULT_CQL, << - "insert into mqtt_msg(topic, msgid, sender, qos, payload, arrived, retain) " - "values (${topic}, ${id}, ${clientid}, ${qos}, ${payload}, ${timestamp}, ${flags.retain})" + "insert into mqtt_msg(msgid, topic, qos, payload, arrived) " + "values (${id}, ${topic}, ${qos}, ${payload}, ${timestamp})" >>). +-define(CONNECTOR_TYPE, cassandra). +-define(ACTION_TYPE, cassandra). + %%-------------------------------------------------------------------- %% schema examples @@ -43,6 +52,41 @@ conn_bridge_examples(Method) -> } ]. +bridge_v2_examples(Method) -> + ParamsExample = #{ + parameters => #{ + cql => ?DEFAULT_CQL + } + }, + [ + #{ + <<"cassandra">> => #{ + summary => <<"Cassandra Action">>, + value => emqx_bridge_v2_schema:action_values( + Method, cassandra, cassandra, ParamsExample + ) + } + } + ]. + +connector_examples(Method) -> + [ + #{ + <<"cassandra">> => #{ + summary => <<"Cassandra Connector">>, + value => emqx_connector_schema:connector_values( + Method, cassandra, #{ + servers => <<"127.0.0.1:9042">>, + keyspace => <<"mqtt">>, + username => <<"root">>, + password => <<"******">>, + pool_size => 8 + } + ) + } + } + ]. + %% no difference in get/post/put method values(_Method, Type) -> #{ @@ -73,14 +117,47 @@ namespace() -> "bridge_cassa". roots() -> []. +fields("config_connector") -> + emqx_connector_schema:common_fields() ++ + emqx_bridge_cassandra_connector:fields("connector") ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts); +fields(action) -> + {cassandra, + mk( + hoconsc:map(name, ref(?MODULE, cassandra_action)), + #{desc => <<"Cassandra Action Config">>, required => false} + )}; +fields(cassandra_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + mk(ref(?MODULE, action_parameters), #{ + required => true, desc => ?DESC(action_parameters) + }) + ); +fields(action_parameters) -> + [ + cql_field() + ]; +fields(connector_resource_opts) -> + emqx_connector_schema:resource_opts_fields(); +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + Fields = + emqx_bridge_cassandra_connector:fields("connector") ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts), + emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, Fields); +fields(Field) when + Field == "get_bridge_v2"; + Field == "post_bridge_v2"; + Field == "put_bridge_v2" +-> + emqx_bridge_v2_schema:api_fields(Field, ?ACTION_TYPE, fields(cassandra_action)); fields("config") -> [ + cql_field(), {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, - {cql, - mk( - binary(), - #{desc => ?DESC("cql_template"), default => ?DEFAULT_CQL, format => <<"sql">>} - )}, {local_topic, mk( binary(), @@ -99,8 +176,23 @@ fields("get") -> fields("post", Type) -> [type_field(Type), name_field() | fields("config")]. +cql_field() -> + {cql, + mk( + binary(), + #{desc => ?DESC("cql_template"), default => ?DEFAULT_CQL, format => <<"sql">>} + )}. + desc("config") -> ?DESC("desc_config"); +desc(cassandra_action) -> + ?DESC(cassandra_action); +desc(action_parameters) -> + ?DESC(action_parameters); +desc("config_connector") -> + ?DESC("desc_config"); +desc(connector_resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for Cassandra using `", string:to_upper(Method), "` method."]; desc(_) -> diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_action_info.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_action_info.erl new file mode 100644 index 000000000..14db7cf50 --- /dev/null +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_action_info.erl @@ -0,0 +1,62 @@ +-module(emqx_bridge_cassandra_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_config_to_action_config/2, + bridge_v1_config_to_connector_config/1, + connector_action_config_to_bridge_v1_config/2, + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +-import(emqx_utils_conv, [bin/1]). + +-define(SCHEMA_MODULE, emqx_bridge_cassandra). + +bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) -> + ActionTopLevelKeys = schema_keys(cassandra_action), + ActionParametersKeys = schema_keys(action_parameters), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ActionConfig = make_config_map(ActionKeys, ActionParametersKeys, BridgeV1Config), + emqx_utils_maps:update_if_present( + <<"resource_opts">>, + fun emqx_bridge_v2_schema:project_to_actions_resource_opts/1, + ActionConfig#{<<"connector">> => ConnectorName} + ). + +bridge_v1_config_to_connector_config(BridgeV1Config) -> + ActionTopLevelKeys = schema_keys(cassandra_action), + ActionParametersKeys = schema_keys(action_parameters), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ConnectorTopLevelKeys = schema_keys("config_connector"), + ConnectorKeys = maps:keys(BridgeV1Config) -- (ActionKeys -- ConnectorTopLevelKeys), + ConnConfig0 = maps:with(ConnectorKeys, BridgeV1Config), + emqx_utils_maps:update_if_present( + <<"resource_opts">>, + fun emqx_connector_schema:project_to_connector_resource_opts/1, + ConnConfig0 + ). + +connector_action_config_to_bridge_v1_config(ConnectorRawConf, ActionRawConf) -> + RawConf = emqx_action_info:connector_action_config_to_bridge_v1_config( + ConnectorRawConf, ActionRawConf + ), + maps:without([<<"cassandra_type">>], RawConf). + +bridge_v1_type_name() -> cassandra. + +action_type_name() -> cassandra. + +connector_type_name() -> cassandra. + +schema_module() -> ?SCHEMA_MODULE. + +make_config_map(PickKeys, IndentKeys, Config) -> + Conf0 = maps:with(PickKeys, Config), + emqx_utils_maps:indent(<<"parameters">>, IndentKeys, Conf0). + +schema_keys(Name) -> + [bin(Key) || Key <- proplists:get_keys(?SCHEMA_MODULE:fields(Name))]. diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl index c6bc7098c..c9d3246d3 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl @@ -14,13 +14,17 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). %% schema --export([roots/0, fields/1]). +-export([roots/0, fields/1, desc/1, namespace/0]). %% callbacks of behaviour emqx_resource -export([ callback_mode/0, on_start/2, on_stop/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channel_status/3, + on_get_channels/1, on_query/3, on_query_async/4, on_batch_query/3, @@ -39,16 +43,10 @@ -export([do_get_status/1]). --type prepares() :: #{atom() => binary()}. --type params_tokens() :: #{atom() => list()}. - -type state() :: #{ pool_name := binary(), - prepare_cql := prepares(), - params_tokens := params_tokens(), - %% returned by ecql:prepare/2 - prepare_statement := binary() + channels := #{} }. -define(DEFAULT_SERVER_OPTION, #{default_port => ?CASSANDRA_DEFAULT_PORT}). @@ -56,13 +54,17 @@ %%-------------------------------------------------------------------- %% schema +namespace() -> cassandra. + roots() -> [{config, #{type => hoconsc:ref(?MODULE, config)}}]. fields(config) -> cassandra_db_fields() ++ emqx_connector_schema_lib:ssl_fields() ++ - emqx_connector_schema_lib:prepare_statement_fields(). + emqx_connector_schema_lib:prepare_statement_fields(); +fields("connector") -> + cassandra_db_fields() ++ emqx_connector_schema_lib:ssl_fields(). cassandra_db_fields() -> [ @@ -83,6 +85,11 @@ keyspace(desc) -> ?DESC("keyspace"); keyspace(required) -> true; keyspace(_) -> undefined. +desc(config) -> + ?DESC("config"); +desc("connector") -> + ?DESC("connector"). + %%-------------------------------------------------------------------- %% callbacks for emqx_resource @@ -130,10 +137,9 @@ on_start( false -> [] end, - State = parse_prepare_cql(Config), case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of ok -> - {ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})}; + {ok, #{pool_name => InstId, channels => #{}}}; {error, Reason} -> ?tp( cassandra_connector_start_failed, @@ -149,23 +155,49 @@ on_stop(InstId, _State) -> }), emqx_resource_pool:stop(InstId). +on_add_channel(_InstId, #{channels := Channs} = OldState, ChannId, ChannConf0) -> + #{parameters := #{cql := CQL}} = ChannConf0, + {PrepareCQL, ParamsTokens} = emqx_placeholder:preproc_sql(CQL, '?'), + ParsedCql = #{ + prepare_key => make_prepare_key(ChannId), + prepare_cql => PrepareCQL, + params_tokens => ParamsTokens + }, + NewChanns = Channs#{ChannId => #{parsed_cql => ParsedCql, prepare_result => not_prepared}}, + {ok, OldState#{channels => NewChanns}}. + +on_remove_channel(_InstanceId, #{channels := Channels} = State, ChannId) -> + NewState = State#{channels => maps:remove(ChannId, Channels)}, + {ok, NewState}. + +on_get_channel_status(InstanceId, ChannId, #{channels := Channels, pool_name := PoolName} = State) -> + case on_get_status(InstanceId, State) of + connected -> + #{parsed_cql := ParsedCql} = maps:get(ChannId, Channels), + case prepare_cql_to_cassandra(ParsedCql, PoolName) of + {ok, _} -> connected; + {error, Reason} -> {connecting, Reason} + end; + _ -> + connecting + end. + +on_get_channels(InstanceId) -> + emqx_bridge_v2:get_channels_for_connector(InstanceId). + -type request() :: % emqx_bridge.erl - {send_message, Params :: map()} + {ChannId :: binary(), Params :: map()} % common query - | {query, SQL :: binary()} - | {query, SQL :: binary(), Params :: map()}. + | {query, CQL :: binary()} + | {query, CQL :: binary(), Params :: map()}. -spec on_query( emqx_resource:resource_id(), request(), state() ) -> ok | {ok, ecql:cql_result()} | {error, {recoverable_error | unrecoverable_error, term()}}. -on_query( - InstId, - Request, - State -) -> +on_query(InstId, Request, State) -> do_single_query(InstId, Request, sync, State). -spec on_query_async( @@ -174,21 +206,11 @@ on_query( {function(), list()}, state() ) -> ok | {error, {recoverable_error | unrecoverable_error, term()}}. -on_query_async( - InstId, - Request, - Callback, - State -) -> +on_query_async(InstId, Request, Callback, State) -> do_single_query(InstId, Request, {async, Callback}, State). -do_single_query( - InstId, - Request, - Async, - #{pool_name := PoolName} = State -) -> - {Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request), +do_single_query(InstId, Request, Async, #{pool_name := PoolName} = State) -> + {Type, PreparedKeyOrCQL, Params} = parse_request_to_cql(Request), ?tp( debug, cassandra_connector_received_cql_query, @@ -196,12 +218,12 @@ do_single_query( connector => InstId, type => Type, params => Params, - prepared_key_or_cql => PreparedKeyOrSQL, + prepared_key_or_cql => PreparedKeyOrCQL, state => State } ), - {PreparedKeyOrSQL1, Data} = proc_cql_params(Type, PreparedKeyOrSQL, Params, State), - Res = exec_cql_query(InstId, PoolName, Type, Async, PreparedKeyOrSQL1, Data), + {PreparedKeyOrCQL1, Data} = proc_cql_params(Type, PreparedKeyOrCQL, Params, State), + Res = exec_cql_query(InstId, PoolName, Type, Async, PreparedKeyOrCQL1, Data), handle_result(Res). -spec on_batch_query( @@ -209,11 +231,7 @@ do_single_query( [request()], state() ) -> ok | {error, {recoverable_error | unrecoverable_error, term()}}. -on_batch_query( - InstId, - Requests, - State -) -> +on_batch_query(InstId, Requests, State) -> do_batch_query(InstId, Requests, sync, State). -spec on_batch_query_async( @@ -222,25 +240,15 @@ on_batch_query( {function(), list()}, state() ) -> ok | {error, {recoverable_error | unrecoverable_error, term()}}. -on_batch_query_async( - InstId, - Requests, - Callback, - State -) -> +on_batch_query_async(InstId, Requests, Callback, State) -> do_batch_query(InstId, Requests, {async, Callback}, State). -do_batch_query( - InstId, - Requests, - Async, - #{pool_name := PoolName} = State -) -> +do_batch_query(InstId, Requests, Async, #{pool_name := PoolName} = State) -> CQLs = lists:map( fun(Request) -> - {Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request), - proc_cql_params(Type, PreparedKeyOrSQL, Params, State) + {Type, PreparedKeyOrCQL, Params} = parse_request_to_cql(Request), + proc_cql_params(Type, PreparedKeyOrCQL, Params, State) end, Requests ), @@ -256,26 +264,38 @@ do_batch_query( Res = exec_cql_batch_query(InstId, PoolName, Async, CQLs), handle_result(Res). -parse_request_to_cql({send_message, Params}) -> - {prepared_query, _Key = send_message, Params}; -parse_request_to_cql({query, SQL}) -> - parse_request_to_cql({query, SQL, #{}}); -parse_request_to_cql({query, SQL, Params}) -> - {query, SQL, Params}. +parse_request_to_cql({query, CQL}) -> + {query, CQL, #{}}; +parse_request_to_cql({query, CQL, Params}) -> + {query, CQL, Params}; +parse_request_to_cql({ChannId, Params}) -> + {prepared_query, ChannId, Params}. -proc_cql_params( - prepared_query, - PreparedKey0, - Params, - #{prepare_statement := Prepares, params_tokens := ParamsTokens} -) -> - %% assert - _PreparedKey = maps:get(PreparedKey0, Prepares), - Tokens = maps:get(PreparedKey0, ParamsTokens), - {PreparedKey0, assign_type_for_params(emqx_placeholder:proc_sql(Tokens, Params))}; -proc_cql_params(query, SQL, Params, _State) -> - {SQL1, Tokens} = emqx_placeholder:preproc_sql(SQL, '?'), - {SQL1, assign_type_for_params(emqx_placeholder:proc_sql(Tokens, Params))}. +proc_cql_params(prepared_query, ChannId, Params, #{channels := Channs}) -> + #{ + parsed_cql := #{ + prepare_key := PrepareKey, + params_tokens := ParamsTokens + } + } = maps:get(ChannId, Channs), + {PrepareKey, assign_type_for_params(proc_sql(ParamsTokens, Params))}; +proc_cql_params(query, CQL, Params, _State) -> + {CQL1, Tokens} = emqx_placeholder:preproc_sql(CQL, '?'), + {CQL1, assign_type_for_params(proc_sql(Tokens, Params))}. + +proc_sql(Tokens, Params) -> + VarTrans = fun + (null) -> null; + (X) -> emqx_placeholder:sql_data(X) + end, + emqx_placeholder:proc_tmpl( + Tokens, + Params, + #{ + return => rawlist, + var_trans => VarTrans + } + ). exec_cql_query(InstId, PoolName, Type, Async, PreparedKey, Data) when Type == query; Type == prepared_query @@ -314,38 +334,15 @@ exec_cql_batch_query(InstId, PoolName, Async, CQLs) -> exec(PoolName, Query) -> ecpool:pick_and_do(PoolName, Query, no_handover). -on_get_status(_InstId, #{pool_name := PoolName} = State) -> +on_get_status(_InstId, #{pool_name := PoolName}) -> case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of - true -> - case do_check_prepares(State) of - ok -> - connected; - {ok, NState} -> - %% return new state with prepared statements - {connected, NState}; - false -> - %% do not log error, it is logged in prepare_cql_to_conn - connecting - end; - false -> - connecting + true -> connected; + false -> connecting end. do_get_status(Conn) -> ok == element(1, ecql:query(Conn, "SELECT cluster_name FROM system.local")). -do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) -> - ok; -do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepares}}) -> - %% retry to prepare - case prepare_cql(Prepares, PoolName) of - {ok, Sts} -> - %% remove the error - {ok, State#{prepare_cql => Prepares, prepare_statement := Sts}}; - _Error -> - false - end. - %%-------------------------------------------------------------------- %% callbacks query @@ -394,88 +391,50 @@ conn_opts([Opt | Opts], Acc) -> %%-------------------------------------------------------------------- %% prepare - -%% XXX: hardcode -%% note: the `cql` param is passed by emqx_bridge_cassandra -parse_prepare_cql(#{cql := SQL}) -> - parse_prepare_cql([{send_message, SQL}], #{}, #{}); -parse_prepare_cql(_) -> - #{prepare_cql => #{}, params_tokens => #{}}. - -parse_prepare_cql([{Key, H} | T], Prepares, Tokens) -> - {PrepareSQL, ParamsTokens} = emqx_placeholder:preproc_sql(H, '?'), - parse_prepare_cql( - T, Prepares#{Key => PrepareSQL}, Tokens#{Key => ParamsTokens} - ); -parse_prepare_cql([], Prepares, Tokens) -> - #{ - prepare_cql => Prepares, - params_tokens => Tokens - }. - -init_prepare(State = #{prepare_cql := Prepares, pool_name := PoolName}) -> - case maps:size(Prepares) of - 0 -> - State; - _ -> - case prepare_cql(Prepares, PoolName) of - {ok, Sts} -> - State#{prepare_statement := Sts}; - Error -> - ?tp( - error, - cassandra_prepare_cql_failed, - #{prepares => Prepares, reason => Error} - ), - %% mark the prepare_cql as failed - State#{prepare_cql => {error, Prepares}} - end - end. - -prepare_cql(Prepares, PoolName) when is_map(Prepares) -> - prepare_cql(maps:to_list(Prepares), PoolName); -prepare_cql(Prepares, PoolName) -> - case do_prepare_cql(Prepares, PoolName) of - {ok, _Sts} = Ok -> +prepare_cql_to_cassandra(ParsedCql, PoolName) -> + case prepare_cql_to_cassandra(ecpool:workers(PoolName), ParsedCql, #{}) of + {ok, Statement} -> %% prepare for reconnect - ecpool:add_reconnect_callback(PoolName, {?MODULE, prepare_cql_to_conn, [Prepares]}), - Ok; + ecpool:add_reconnect_callback(PoolName, {?MODULE, prepare_cql_to_conn, [ParsedCql]}), + {ok, Statement}; Error -> + ?tp( + error, + cassandra_prepare_cql_failed, + #{parsed_cql => ParsedCql, reason => Error} + ), Error end. -do_prepare_cql(Prepares, PoolName) -> - do_prepare_cql(ecpool:workers(PoolName), Prepares, #{}). - -do_prepare_cql([{_Name, Worker} | T], Prepares, _LastSts) -> +prepare_cql_to_cassandra([{_Name, Worker} | T], ParsedCql, _LastSts) -> {ok, Conn} = ecpool_worker:client(Worker), - case prepare_cql_to_conn(Conn, Prepares) of - {ok, Sts} -> - do_prepare_cql(T, Prepares, Sts); + case prepare_cql_to_conn(Conn, ParsedCql) of + {ok, Statement} -> + prepare_cql_to_cassandra(T, ParsedCql, Statement); Error -> Error end; -do_prepare_cql([], _Prepares, LastSts) -> +prepare_cql_to_cassandra([], _ParsedCql, LastSts) -> {ok, LastSts}. -prepare_cql_to_conn(Conn, Prepares) -> - prepare_cql_to_conn(Conn, Prepares, #{}). - -prepare_cql_to_conn(Conn, [], Statements) when is_pid(Conn) -> {ok, Statements}; -prepare_cql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Conn) -> - ?SLOG(info, #{msg => "cassandra_prepare_cql", name => Key, prepare_cql => SQL}), - case ecql:prepare(Conn, Key, SQL) of +prepare_cql_to_conn(Conn, #{prepare_key := PrepareKey, prepare_cql := PrepareCQL}) when + is_pid(Conn) +-> + ?SLOG(info, #{ + msg => "cassandra_prepare_cql", prepare_key => PrepareKey, prepare_cql => PrepareCQL + }), + case ecql:prepare(Conn, PrepareKey, PrepareCQL) of {ok, Statement} -> - prepare_cql_to_conn(Conn, PrepareList, Statements#{Key => Statement}); - {error, Error} = Other -> + {ok, Statement}; + {error, Reason} = Error -> ?SLOG(error, #{ msg => "cassandra_prepare_cql_failed", worker_pid => Conn, - name => Key, - prepare_cql => SQL, - error => Error + name => PrepareKey, + prepare_cql => PrepareCQL, + reason => Reason }), - Other + Error end. handle_result({error, disconnected}) -> @@ -513,3 +472,6 @@ maybe_assign_type(V) when is_integer(V) -> maybe_assign_type(V) when is_float(V) -> {double, V}; maybe_assign_type(V) -> V. + +make_prepare_key(ChannId) -> + ChannId. diff --git a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl index 9df219296..18d6993b3 100644 --- a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl @@ -11,6 +11,16 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +%% To run this test locally: +%% ./scripts/ct/run.sh --app apps/emqx_bridge_cassandra --only-up +%% PROFILE=emqx-enterprise PROXY_HOST=localhost CASSA_TLS_HOST=localhost \ +%% CASSA_TLS_PORT=19142 CASSA_TCP_HOST=localhost CASSA_TCP_NO_AUTH_HOST=localhost \ +%% CASSA_TCP_PORT=19042 CASSA_TCP_NO_AUTH_PORT=19043 \ +%% ./rebar3 ct --name 'test@127.0.0.1' -v --suite \ +%% apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE + +-import(emqx_common_test_helpers, [on_exit/1]). + % SQL definitions -define(SQL_BRIDGE, "insert into mqtt_msg_test(topic, payload, arrived) " @@ -121,12 +131,15 @@ init_per_group(_Group, Config) -> Config. end_per_group(Group, Config) when - Group == without_batch; Group == without_batch + Group == with_batch; + Group == without_batch -> connect_and_drop_table(Config), ProxyHost = ?config(proxy_host, Config), ProxyPort = ?config(proxy_port, Config), emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + Apps = ?config(apps, Config), + emqx_cth_suite:stop(Apps), ok; end_per_group(_Group, _Config) -> ok. @@ -152,6 +165,7 @@ end_per_testcase(_Testcase, Config) -> ok = snabbkaffe:stop(), connect_and_clear_table(Config), delete_bridge(Config), + emqx_common_test_helpers:call_janitor(), ok. %%------------------------------------------------------------------------------ @@ -169,19 +183,32 @@ common_init(Config0) -> ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), - % Ensure EE bridge module is loaded - ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), - _ = emqx_bridge_enterprise:module_info(), - emqx_mgmt_api_test_util:init_suite(), + Apps = emqx_cth_suite:start( + [ + emqx, + emqx_conf, + emqx_bridge_cassandra, + emqx_bridge, + emqx_rule_engine, + emqx_management, + {emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"} + ], + #{work_dir => emqx_cth_suite:work_dir(Config0)} + ), + {ok, _Api} = emqx_common_test_http:create_default_app(), % Connect to cassnadra directly and create the table catch connect_and_drop_table(Config0), connect_and_create_table(Config0), {Name, CassaConf} = cassa_config(BridgeType, Config0), Config = [ + {apps, Apps}, {cassa_config, CassaConf}, {cassa_bridge_type, BridgeType}, {cassa_name, Name}, + {bridge_type, BridgeType}, + {bridge_name, Name}, + {bridge_config, CassaConf}, {proxy_host, ProxyHost}, {proxy_port, ProxyPort} | Config0 @@ -293,17 +320,24 @@ send_message(Config, Payload) -> query_resource(Config, Request) -> Name = ?config(cassa_name, Config), BridgeType = ?config(cassa_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - emqx_resource:query(ResourceID, Request, #{timeout => 1_000}). + BridgeV2Id = emqx_bridge_v2:id(BridgeType, Name), + ConnectorResId = emqx_connector_resource:resource_id(BridgeType, Name), + emqx_resource:query(BridgeV2Id, Request, #{ + timeout => 1_000, connector_resource_id => ConnectorResId + }). query_resource_async(Config, Request) -> Name = ?config(cassa_name, Config), BridgeType = ?config(cassa_bridge_type, Config), Ref = alias([reply]), AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end, - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - Return = emqx_resource:query(ResourceID, Request, #{ - timeout => 500, async_reply_fun => {AsyncReplyFun, []} + BridgeV2Id = emqx_bridge_v2:id(BridgeType, Name), + ConnectorResId = emqx_connector_resource:resource_id(BridgeType, Name), + Return = emqx_resource:query(BridgeV2Id, Request, #{ + timeout => 500, + async_reply_fun => {AsyncReplyFun, []}, + connector_resource_id => ConnectorResId, + query_mode => async }), {Return, Ref}. @@ -345,13 +379,19 @@ connect_direct_cassa(Config) -> % These funs connect and then stop the cassandra connection connect_and_create_table(Config) -> + connect_and_create_table(Config, ?SQL_CREATE_TABLE). + +connect_and_create_table(Config, SQL) -> with_direct_conn(Config, fun(Conn) -> - {ok, _} = ecql:query(Conn, ?SQL_CREATE_TABLE) + {ok, _} = ecql:query(Conn, SQL) end). connect_and_drop_table(Config) -> + connect_and_drop_table(Config, ?SQL_DROP_TABLE). + +connect_and_drop_table(Config, SQL) -> with_direct_conn(Config, fun(Conn) -> - {ok, _} = ecql:query(Conn, ?SQL_DROP_TABLE) + {ok, _} = ecql:query(Conn, SQL) end). connect_and_clear_table(Config) -> @@ -360,8 +400,11 @@ connect_and_clear_table(Config) -> end). connect_and_get_payload(Config) -> + connect_and_get_payload(Config, ?SQL_SELECT). + +connect_and_get_payload(Config, SQL) -> with_direct_conn(Config, fun(Conn) -> - {ok, {_Keyspace, _ColsSpec, [[Result]]}} = ecql:query(Conn, ?SQL_SELECT), + {ok, {_Keyspace, _ColsSpec, [[Result]]}} = ecql:query(Conn, SQL), Result end). @@ -670,3 +713,48 @@ t_nasty_sql_string(Config) -> %% XXX: why ok instead of {ok, AffectedLines}? ?assertEqual(ok, send_message(Config, Message)), ?assertEqual(Payload, connect_and_get_payload(Config)). + +t_insert_null_into_int_column(Config) -> + BridgeType = ?config(bridge_type, Config), + connect_and_create_table( + Config, + << + "CREATE TABLE mqtt.mqtt_msg_test2 (\n" + " topic text,\n" + " payload text,\n" + " arrived timestamp,\n" + " x int,\n" + " PRIMARY KEY (topic)\n" + ")" + >> + ), + on_exit(fun() -> connect_and_drop_table(Config, "DROP TABLE mqtt.mqtt_msg_test2") end), + {ok, {{_, 201, _}, _, _}} = + emqx_bridge_testlib:create_bridge_api( + Config, + #{ + <<"cql">> => << + "insert into mqtt_msg_test2(topic, payload, x, arrived) " + "values (${topic}, ${payload}, ${x}, ${timestamp})" + >> + } + ), + RuleTopic = <<"t/c">>, + Opts = #{ + sql => <<"select *, first(jq('null', payload)) as x from \"", RuleTopic/binary, "\"">> + }, + {ok, _} = emqx_bridge_testlib:create_rule_and_action_http(BridgeType, RuleTopic, Config, Opts), + + Payload = <<"{}">>, + Msg = emqx_message:make(RuleTopic, Payload), + {_, {ok, _}} = + ?wait_async_action( + emqx:publish(Msg), + #{?snk_kind := cassandra_connector_query_return}, + 10_000 + ), + + %% Would return `1853189228' if it encodes `null' as an integer... + ?assertEqual(null, connect_and_get_payload(Config, "select x from mqtt.mqtt_msg_test2")), + + ok. diff --git a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl index de306e3f0..50d82397a 100644 --- a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl @@ -14,19 +14,15 @@ -include_lib("emqx/include/emqx.hrl"). -include_lib("stdlib/include/assert.hrl"). -%% Cassandra servers are defined at `.ci/docker-compose-file/docker-compose-cassandra.yaml` -%% You can change it to `127.0.0.1`, if you run this SUITE locally --define(CASSANDRA_HOST, "cassandra"). --define(CASSANDRA_HOST_NOAUTH, "cassandra_noauth"). --define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_connector). +%% To run this test locally: +%% ./scripts/ct/run.sh --app apps/emqx_bridge_cassandra --only-up +%% PROFILE=emqx-enterprise PROXY_HOST=localhost CASSA_TLS_HOST=localhost \ +%% CASSA_TLS_PORT=9142 CASSA_TCP_HOST=localhost CASSA_TCP_NO_AUTH_HOST=localhost \ +%% CASSA_TCP_PORT=19042 CASSA_TCP_NO_AUTH_PORT=19043 \ +%% ./rebar3 ct --name 'test@127.0.0.1' -v --suite \ +%% apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE -%% This test SUITE requires a running cassandra instance. If you don't want to -%% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script -%% you can create a cassandra instance with the following command (execute it -%% from root of the EMQX directory.). You also need to set ?CASSANDRA_HOST and -%% ?CASSANDRA_PORT to appropriate values. -%% -%% sudo docker run --rm -d --name cassandra --network host cassandra:3.11.14 +-define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_connector). %% Cassandra default username & password once enable `authenticator: PasswordAuthenticator` %% in cassandra config @@ -45,14 +41,14 @@ groups() -> {noauth, [t_lifecycle]} ]. -cassandra_servers(CassandraHost) -> +cassandra_servers(CassandraHost, CassandraPort) -> lists:map( fun(#{hostname := Host, port := Port}) -> {Host, Port} end, emqx_schema:parse_servers( - iolist_to_binary([CassandraHost, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]), - #{default_port => ?CASSANDRA_DEFAULT_PORT} + iolist_to_binary([CassandraHost, ":", erlang:integer_to_list(CassandraPort)]), + #{default_port => CassandraPort} ) ). @@ -63,25 +59,30 @@ init_per_suite(Config) -> Config. init_per_group(Group, Config) -> - {CassandraHost, AuthOpts} = + {CassandraHost, CassandraPort, AuthOpts} = case Group of auth -> - {?CASSANDRA_HOST, [{username, ?CASSA_USERNAME}, {password, ?CASSA_PASSWORD}]}; + TcpHost = os:getenv("CASSA_TCP_HOST", "toxiproxy"), + TcpPort = list_to_integer(os:getenv("CASSA_TCP_PORT", "9042")), + {TcpHost, TcpPort, [{username, ?CASSA_USERNAME}, {password, ?CASSA_PASSWORD}]}; noauth -> - {?CASSANDRA_HOST_NOAUTH, []} + TcpHost = os:getenv("CASSA_TCP_NO_AUTH_HOST", "toxiproxy"), + TcpPort = list_to_integer(os:getenv("CASSA_TCP_NO_AUTH_PORT", "9043")), + {TcpHost, TcpPort, []} end, - case emqx_common_test_helpers:is_tcp_server_available(CassandraHost, ?CASSANDRA_DEFAULT_PORT) of + case emqx_common_test_helpers:is_tcp_server_available(CassandraHost, CassandraPort) of true -> %% keyspace `mqtt` must be created in advance {ok, Conn} = ecql:connect([ - {nodes, cassandra_servers(CassandraHost)}, + {nodes, cassandra_servers(CassandraHost, CassandraPort)}, {keyspace, "mqtt"} | AuthOpts ]), ecql:close(Conn), [ {cassa_host, CassandraHost}, + {cassa_port, CassandraPort}, {cassa_auth_opts, AuthOpts} | Config ]; @@ -212,6 +213,7 @@ create_local_resource(ResourceId, CheckedConfig) -> cassandra_config(Config) -> Host = ?config(cassa_host, Config), + Port = ?config(cassa_port, Config), AuthOpts = maps:from_list(?config(cassa_auth_opts, Config)), CassConfig = AuthOpts#{ @@ -223,7 +225,7 @@ cassandra_config(Config) -> "~s:~b", [ Host, - ?CASSANDRA_DEFAULT_PORT + Port ] ) ) diff --git a/apps/emqx_bridge_clickhouse/BSL.txt b/apps/emqx_bridge_clickhouse/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_clickhouse/BSL.txt +++ b/apps/emqx_bridge_clickhouse/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src index 85c035be1..d96d06375 100644 --- a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src +++ b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_clickhouse, [ {description, "EMQX Enterprise ClickHouse Bridge"}, - {vsn, "0.2.4"}, + {vsn, "0.3.0"}, {registered, []}, {applications, [ kernel, @@ -8,7 +8,7 @@ emqx_resource, clickhouse ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_clickhouse_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.erl b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.erl index deca42154..1c7e786d8 100644 --- a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.erl +++ b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.erl @@ -9,8 +9,11 @@ -import(hoconsc, [mk/2, enum/1, ref/2]). +%% Examples -export([ - conn_bridge_examples/1 + bridge_v2_examples/1, + conn_bridge_examples/1, + connector_examples/1 ]). -export([ @@ -20,12 +23,10 @@ desc/1 ]). --define(DEFAULT_SQL, - <<"INSERT INTO mqtt_test(payload, arrived) VALUES ('${payload}', ${timestamp})">> -). - +-define(DEFAULT_SQL, <<"INSERT INTO messages(data, arrived) VALUES ('${payload}', ${timestamp})">>). -define(DEFAULT_BATCH_VALUE_SEPARATOR, <<", ">>). - +-define(CONNECTOR_TYPE, clickhouse). +-define(ACTION_TYPE, clickhouse). %% ------------------------------------------------------------------------------------------------- %% Callback used by HTTP API %% ------------------------------------------------------------------------------------------------- @@ -40,6 +41,42 @@ conn_bridge_examples(Method) -> } ]. +bridge_v2_examples(Method) -> + ParamsExample = #{ + parameters => #{ + batch_value_separator => ?DEFAULT_BATCH_VALUE_SEPARATOR, + sql => ?DEFAULT_SQL + } + }, + [ + #{ + <<"clickhouse">> => #{ + summary => <<"ClickHouse Action">>, + value => emqx_bridge_v2_schema:action_values( + Method, clickhouse, clickhouse, ParamsExample + ) + } + } + ]. + +connector_examples(Method) -> + [ + #{ + <<"clickhouse">> => #{ + summary => <<"ClickHouse Connector">>, + value => emqx_connector_schema:connector_values( + Method, clickhouse, #{ + url => <<"http://localhost:8123">>, + database => <<"mqtt">>, + pool_size => 8, + username => <<"default">>, + password => <<"******">> + } + ) + } + } + ]. + values(_Method, Type) -> #{ enable => true, @@ -71,19 +108,49 @@ namespace() -> "bridge_clickhouse". roots() -> []. +fields("config_connector") -> + emqx_connector_schema:common_fields() ++ + emqx_bridge_clickhouse_connector:fields(config) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts); +fields(action) -> + {clickhouse, + mk( + hoconsc:map(name, ref(?MODULE, clickhouse_action)), + #{desc => <<"ClickHouse Action Config">>, required => false} + )}; +fields(clickhouse_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + mk(ref(?MODULE, action_parameters), #{ + required => true, desc => ?DESC(action_parameters) + }) + ); +fields(action_parameters) -> + [ + sql_field(), + batch_value_separator_field() + ]; +fields(connector_resource_opts) -> + emqx_connector_schema:resource_opts_fields(); +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + Fields = + emqx_bridge_clickhouse_connector:fields(config) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts), + emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, Fields); +fields(Field) when + Field == "get_bridge_v2"; + Field == "post_bridge_v2"; + Field == "put_bridge_v2" +-> + emqx_bridge_v2_schema:api_fields(Field, ?ACTION_TYPE, fields(clickhouse_action)); fields("config") -> [ {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, - {sql, - mk( - binary(), - #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} - )}, - {batch_value_separator, - mk( - binary(), - #{desc => ?DESC("batch_value_separator"), default => ?DEFAULT_BATCH_VALUE_SEPARATOR} - )}, + sql_field(), + batch_value_separator_field(), {local_topic, mk( binary(), @@ -112,6 +179,28 @@ fields("get") -> fields("post", Type) -> [type_field(Type), name_field() | fields("config")]. +sql_field() -> + {sql, + mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )}. + +batch_value_separator_field() -> + {batch_value_separator, + mk( + binary(), + #{desc => ?DESC("batch_value_separator"), default => ?DEFAULT_BATCH_VALUE_SEPARATOR} + )}. + +desc(clickhouse_action) -> + ?DESC(clickhouse_action); +desc(action_parameters) -> + ?DESC(action_parameters); +desc("config_connector") -> + ?DESC("desc_config"); +desc(connector_resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); desc("config") -> ?DESC("desc_config"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> diff --git a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse_action_info.erl b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse_action_info.erl new file mode 100644 index 000000000..066569f7e --- /dev/null +++ b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse_action_info.erl @@ -0,0 +1,62 @@ +-module(emqx_bridge_clickhouse_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_config_to_action_config/2, + bridge_v1_config_to_connector_config/1, + connector_action_config_to_bridge_v1_config/2, + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +-import(emqx_utils_conv, [bin/1]). + +-define(SCHEMA_MODULE, emqx_bridge_clickhouse). + +bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) -> + ActionTopLevelKeys = schema_keys(clickhouse_action), + ActionParametersKeys = schema_keys(action_parameters), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ActionConfig = make_config_map(ActionKeys, ActionParametersKeys, BridgeV1Config), + emqx_utils_maps:update_if_present( + <<"resource_opts">>, + fun emqx_bridge_v2_schema:project_to_actions_resource_opts/1, + ActionConfig#{<<"connector">> => ConnectorName} + ). + +bridge_v1_config_to_connector_config(BridgeV1Config) -> + ActionTopLevelKeys = schema_keys(clickhouse_action), + ActionParametersKeys = schema_keys(action_parameters), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ConnectorTopLevelKeys = schema_keys("config_connector"), + ConnectorKeys = maps:keys(BridgeV1Config) -- (ActionKeys -- ConnectorTopLevelKeys), + ConnConfig0 = maps:with(ConnectorKeys, BridgeV1Config), + emqx_utils_maps:update_if_present( + <<"resource_opts">>, + fun emqx_connector_schema:project_to_connector_resource_opts/1, + ConnConfig0 + ). + +connector_action_config_to_bridge_v1_config(ConnectorRawConf, ActionRawConf) -> + RawConf = emqx_action_info:connector_action_config_to_bridge_v1_config( + ConnectorRawConf, ActionRawConf + ), + maps:without([<<"clickhouse_type">>], RawConf). + +bridge_v1_type_name() -> clickhouse. + +action_type_name() -> clickhouse. + +connector_type_name() -> clickhouse. + +schema_module() -> ?SCHEMA_MODULE. + +make_config_map(PickKeys, IndentKeys, Config) -> + Conf0 = maps:with(PickKeys, Config), + emqx_utils_maps:indent(<<"parameters">>, IndentKeys, Conf0). + +schema_keys(Name) -> + [bin(Key) || Key <- proplists:get_keys(?SCHEMA_MODULE:fields(Name))]. diff --git a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse_connector.erl b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse_connector.erl index 8f575dd8d..e7327b56c 100644 --- a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse_connector.erl +++ b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse_connector.erl @@ -23,7 +23,8 @@ -export([ roots/0, fields/1, - values/1 + values/1, + namespace/0 ]). %% callbacks for behaviour emqx_resource @@ -31,6 +32,10 @@ callback_mode/0, on_start/2, on_stop/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channel_status/3, + on_get_channels/1, on_query/3, on_batch_query/3, on_get_status/2 @@ -61,6 +66,7 @@ -type state() :: #{ + channels => #{binary() => templates()}, templates := templates(), pool_name := binary(), connect_timeout := pos_integer() @@ -72,6 +78,8 @@ %% Configuration and default values %%===================================================================== +namespace() -> clickhouse. + roots() -> [{config, #{type => hoconsc:ref(?MODULE, config)}}]. @@ -152,10 +160,9 @@ on_start( {pool, InstanceID} ], try - Templates = prepare_sql_templates(Config), State = #{ + channels => #{}, pool_name => InstanceID, - templates => Templates, connect_timeout => ConnectTimeout }, case emqx_resource_pool:start(InstanceID, ?MODULE, Options) of @@ -192,10 +199,8 @@ prepare_sql_templates(#{ sql := Template, batch_value_separator := Separator }) -> - InsertTemplate = - emqx_placeholder:preproc_tmpl(Template), - BulkExtendInsertTemplate = - prepare_sql_bulk_extend_template(Template, Separator), + InsertTemplate = emqx_placeholder:preproc_tmpl(Template), + BulkExtendInsertTemplate = prepare_sql_bulk_extend_template(Template, Separator), #{ send_message_template => InsertTemplate, extend_send_message_template => BulkExtendInsertTemplate @@ -282,6 +287,27 @@ on_stop(InstanceID, _State) -> }), emqx_resource_pool:stop(InstanceID). +%% ------------------------------------------------------------------- +%% channel related emqx_resouce callbacks +%% ------------------------------------------------------------------- +on_add_channel(_InstId, #{channels := Channs} = OldState, ChannId, ChannConf0) -> + #{parameters := ParamConf} = ChannConf0, + NewChanns = Channs#{ChannId => #{templates => prepare_sql_templates(ParamConf)}}, + {ok, OldState#{channels => NewChanns}}. + +on_remove_channel(_InstanceId, #{channels := Channels} = State, ChannId) -> + NewState = State#{channels => maps:remove(ChannId, Channels)}, + {ok, NewState}. + +on_get_channel_status(InstanceId, _ChannId, State) -> + case on_get_status(InstanceId, State) of + {connected, _} -> connected; + {disconnected, _, _} -> disconnected + end. + +on_get_channels(InstanceId) -> + emqx_bridge_v2:get_channels_for_connector(InstanceId). + %% ------------------------------------------------------------------- %% on_get_status emqx_resouce callback and related functions %% ------------------------------------------------------------------- @@ -336,8 +362,8 @@ do_get_status(PoolName, Timeout) -> -spec on_query (resource_id(), Request, resource_state()) -> query_result() when - Request :: {RequestType, Data}, - RequestType :: send_message, + Request :: {ChannId, Data}, + ChannId :: binary(), Data :: map(); (resource_id(), Request, resource_state()) -> query_result() when Request :: {RequestType, SQL}, @@ -358,12 +384,20 @@ on_query( }), %% Have we got a query or data to fit into an SQL template? SimplifiedRequestType = query_type(RequestType), - #{templates := Templates} = State, + Templates = get_templates(RequestType, State), SQL = get_sql(SimplifiedRequestType, Templates, DataOrSQL), ClickhouseResult = execute_sql_in_clickhouse_server(PoolName, SQL), transform_and_log_clickhouse_result(ClickhouseResult, ResourceID, SQL). -get_sql(send_message, #{send_message_template := PreparedSQL}, Data) -> +get_templates(ChannId, State) -> + case maps:find(channels, State) of + {ok, Channels} -> + maps:get(templates, maps:get(ChannId, Channels, #{}), #{}); + error -> + #{} + end. + +get_sql(channel_message, #{send_message_template := PreparedSQL}, Data) -> emqx_placeholder:proc_tmpl(PreparedSQL, Data); get_sql(_, _, SQL) -> SQL. @@ -373,24 +407,21 @@ query_type(sql) -> query_type(query) -> query; %% Data that goes to bridges use the prepared template -query_type(send_message) -> - send_message. +query_type(ChannId) when is_binary(ChannId) -> + channel_message. %% ------------------------------------------------------------------- %% on_batch_query emqx_resouce callback and related functions %% ------------------------------------------------------------------- -spec on_batch_query(resource_id(), BatchReq, resource_state()) -> query_result() when - BatchReq :: nonempty_list({'send_message', map()}). + BatchReq :: nonempty_list({binary(), map()}). -on_batch_query( - ResourceID, - BatchReq, - #{pool_name := PoolName, templates := Templates} = _State -) -> - %% Currently we only support batch requests with the send_message key - {Keys, ObjectsToInsert} = lists:unzip(BatchReq), - ensure_keys_are_of_type_send_message(Keys), +on_batch_query(ResourceID, BatchReq, #{pool_name := PoolName} = State) -> + %% Currently we only support batch requests with a binary ChannId + {[ChannId | _] = Keys, ObjectsToInsert} = lists:unzip(BatchReq), + ensure_channel_messages(Keys), + Templates = get_templates(ChannId, State), %% Create batch insert SQL statement SQL = objects_to_sql(ObjectsToInsert, Templates), %% Do the actual query in the database @@ -398,22 +429,16 @@ on_batch_query( %% Transform the result to a better format transform_and_log_clickhouse_result(ResultFromClickhouse, ResourceID, SQL). -ensure_keys_are_of_type_send_message(Keys) -> - case lists:all(fun is_send_message_atom/1, Keys) of +ensure_channel_messages(Keys) -> + case lists:all(fun is_binary/1, Keys) of true -> ok; false -> erlang:error( - {unrecoverable_error, - <<"Unexpected type for batch message (Expected send_message)">>} + {unrecoverable_error, <<"Unexpected type for batch message (Expected channel-id)">>} ) end. -is_send_message_atom(send_message) -> - true; -is_send_message_atom(_) -> - false. - objects_to_sql( [FirstObject | RemainingObjects] = _ObjectsToInsert, #{ diff --git a/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_SUITE.erl b/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_SUITE.erl index 8cfc24882..d83321d27 100644 --- a/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_SUITE.erl +++ b/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_SUITE.erl @@ -9,6 +9,7 @@ -define(APP, emqx_bridge_clickhouse). -define(CLICKHOUSE_HOST, "clickhouse"). +-define(CLICKHOUSE_PORT, "8123"). -include_lib("emqx_connector/include/emqx_connector.hrl"). %% See comment in @@ -20,9 +21,9 @@ %%------------------------------------------------------------------------------ init_per_suite(Config) -> - case - emqx_common_test_helpers:is_tcp_server_available(?CLICKHOUSE_HOST, ?CLICKHOUSE_DEFAULT_PORT) - of + Host = clickhouse_host(), + Port = list_to_integer(clickhouse_port()), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of true -> emqx_common_test_helpers:render_and_load_app_config(emqx_conf), ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), @@ -114,13 +115,15 @@ sql_drop_table() -> sql_create_database() -> "CREATE DATABASE IF NOT EXISTS mqtt". +clickhouse_host() -> + os:getenv("CLICKHOUSE_HOST", ?CLICKHOUSE_HOST). +clickhouse_port() -> + os:getenv("CLICKHOUSE_PORT", ?CLICKHOUSE_PORT). + clickhouse_url() -> - erlang:iolist_to_binary([ - <<"http://">>, - ?CLICKHOUSE_HOST, - ":", - erlang:integer_to_list(?CLICKHOUSE_DEFAULT_PORT) - ]). + Host = clickhouse_host(), + Port = clickhouse_port(), + erlang:iolist_to_binary(["http://", Host, ":", Port]). clickhouse_config(Config) -> SQL = maps:get(sql, Config, sql_insert_template_for_bridge()), diff --git a/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_connector_SUITE.erl b/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_connector_SUITE.erl index e1d3149db..e9eb6c7a2 100644 --- a/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_connector_SUITE.erl +++ b/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_connector_SUITE.erl @@ -13,7 +13,6 @@ -include_lib("common_test/include/ct.hrl"). -define(APP, emqx_bridge_clickhouse). --define(CLICKHOUSE_HOST, "clickhouse"). -define(CLICKHOUSE_RESOURCE_MOD, emqx_bridge_clickhouse_connector). -define(CLICKHOUSE_PASSWORD, "public"). @@ -39,25 +38,17 @@ all() -> groups() -> []. -clickhouse_url() -> - erlang:iolist_to_binary([ - <<"http://">>, - ?CLICKHOUSE_HOST, - ":", - erlang:integer_to_list(?CLICKHOUSE_DEFAULT_PORT) - ]). - init_per_suite(Config) -> - case - emqx_common_test_helpers:is_tcp_server_available(?CLICKHOUSE_HOST, ?CLICKHOUSE_DEFAULT_PORT) - of + Host = emqx_bridge_clickhouse_SUITE:clickhouse_host(), + Port = list_to_integer(emqx_bridge_clickhouse_SUITE:clickhouse_port()), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of true -> ok = emqx_common_test_helpers:start_apps([emqx_conf]), ok = emqx_connector_test_helpers:start_apps([emqx_resource, ?APP]), %% Create the db table {ok, Conn} = clickhouse:start_link([ - {url, clickhouse_url()}, + {url, emqx_bridge_clickhouse_SUITE:clickhouse_url()}, {user, <<"default">>}, {key, ?CLICKHOUSE_PASSWORD}, {pool, tmp_pool} @@ -205,15 +196,7 @@ clickhouse_config(Overrides) -> username => <<"default">>, password => <>, pool_size => 8, - url => iolist_to_binary( - io_lib:format( - "http://~s:~b", - [ - ?CLICKHOUSE_HOST, - ?CLICKHOUSE_DEFAULT_PORT - ] - ) - ), + url => emqx_bridge_clickhouse_SUITE:clickhouse_url(), connect_timeout => <<"10s">> }, #{<<"config">> => maps:merge(Config, Overrides)}. diff --git a/apps/emqx_bridge_confluent/BSL.txt b/apps/emqx_bridge_confluent/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_confluent/BSL.txt +++ b/apps/emqx_bridge_confluent/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_confluent/rebar.config b/apps/emqx_bridge_confluent/rebar.config index 5e4719106..0519e39c9 100644 --- a/apps/emqx_bridge_confluent/rebar.config +++ b/apps/emqx_bridge_confluent/rebar.config @@ -2,8 +2,8 @@ {erl_opts, [debug_info]}. {deps, [ - {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.9.1"}}}, - {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}, + {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.10.2"}}}, + {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.5"}}}, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.1"}}}, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}, {snappyer, "1.2.9"}, diff --git a/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_producer_SUITE.erl b/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_producer_SUITE.erl index 420da1275..f530749ac 100644 --- a/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_producer_SUITE.erl +++ b/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_producer_SUITE.erl @@ -212,10 +212,7 @@ serde_roundtrip(InnerConfigMap0) -> InnerConfigMap. parse_and_check_bridge_config(InnerConfigMap, Name) -> - TypeBin = ?ACTION_TYPE_BIN, - RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}}, - hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}), - InnerConfigMap. + emqx_bridge_v2_testlib:parse_and_check(?ACTION_TYPE_BIN, Name, InnerConfigMap). shared_secret_path() -> os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret"). diff --git a/apps/emqx_bridge_dynamo/BSL.txt b/apps/emqx_bridge_dynamo/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_dynamo/BSL.txt +++ b/apps/emqx_bridge_dynamo/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src index a4b372056..a0e8e2f19 100644 --- a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src +++ b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_dynamo, [ {description, "EMQX Enterprise Dynamo Bridge"}, - {vsn, "0.1.4"}, + {vsn, "0.1.5"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector.erl b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector.erl index 9cdb8886c..0739df747 100644 --- a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector.erl +++ b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector.erl @@ -12,7 +12,7 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("hocon/include/hoconsc.hrl"). --export([roots/0, fields/1]). +-export([roots/0, fields/1, namespace/0]). %% `emqx_resource' API -export([ @@ -32,6 +32,9 @@ %%===================================================================== %% Hocon schema + +namespace() -> dynamodka. + roots() -> [{config, #{type => hoconsc:ref(?MODULE, config)}}]. diff --git a/apps/emqx_bridge_es/BSL.txt b/apps/emqx_bridge_es/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_es/BSL.txt +++ b/apps/emqx_bridge_es/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_gcp_pubsub/BSL.txt b/apps/emqx_bridge_gcp_pubsub/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_gcp_pubsub/BSL.txt +++ b/apps/emqx_bridge_gcp_pubsub/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_greptimedb/BSL.txt b/apps/emqx_bridge_greptimedb/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_greptimedb/BSL.txt +++ b/apps/emqx_bridge_greptimedb/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_greptimedb/rebar.config b/apps/emqx_bridge_greptimedb/rebar.config index 170ced1e7..bb37de16e 100644 --- a/apps/emqx_bridge_greptimedb/rebar.config +++ b/apps/emqx_bridge_greptimedb/rebar.config @@ -6,7 +6,7 @@ {emqx_connector, {path, "../../apps/emqx_connector"}}, {emqx_resource, {path, "../../apps/emqx_resource"}}, {emqx_bridge, {path, "../../apps/emqx_bridge"}}, - {greptimedb, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.6"}}} + {greptimedb, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.7"}}} ]}. {plugins, [rebar3_path_deps]}. {project_plugins, [erlfmt]}. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src index 0875d13ba..357db5c96 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_greptimedb, [ {description, "EMQX GreptimeDB Bridge"}, - {vsn, "0.1.7"}, + {vsn, "0.1.8"}, {registered, []}, {applications, [ kernel, @@ -8,7 +8,7 @@ emqx_resource, greptimedb ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_greptimedb_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl index f5ae714d7..cf3586c73 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl @@ -10,10 +10,6 @@ -import(hoconsc, [mk/2, enum/1, ref/2]). --export([ - conn_bridge_examples/1 -]). - -export([ namespace/0, roots/0, @@ -21,6 +17,16 @@ desc/1 ]). +%% Examples +-export([ + bridge_v2_examples/1, + conn_bridge_examples/1, + connector_examples/1 +]). + +-define(CONNECTOR_TYPE, greptimedb). +-define(ACTION_TYPE, greptimedb). + %% ------------------------------------------------------------------------------------------------- %% api @@ -29,44 +35,67 @@ conn_bridge_examples(Method) -> #{ <<"greptimedb">> => #{ summary => <<"Greptimedb HTTP API V2 Bridge">>, - value => values("greptimedb", Method) + value => bridge_v1_values(Method) } } ]. -values(Protocol, get) -> - values(Protocol, post); -values("greptimedb", post) -> - SupportUint = <<"uint_value=${payload.uint_key}u,">>, - TypeOpts = #{ - bucket => <<"example_bucket">>, - org => <<"examlpe_org">>, - token => <<"example_token">>, - server => <<"127.0.0.1:4001">> +bridge_v2_examples(Method) -> + ParamsExample = #{ + parameters => #{ + write_syntax => write_syntax_value(), precision => ms + } }, - values(common, "greptimedb", SupportUint, TypeOpts); -values(Protocol, put) -> - values(Protocol, post). + [ + #{ + <<"greptimedb">> => #{ + summary => <<"GreptimeDB Action">>, + value => emqx_bridge_v2_schema:action_values( + Method, greptimedb, greptimedb, ParamsExample + ) + } + } + ]. -values(common, Protocol, SupportUint, TypeOpts) -> - CommonConfigs = #{ - type => list_to_atom(Protocol), +connector_examples(Method) -> + [ + #{ + <<"greptimedb">> => #{ + summary => <<"GreptimeDB Connector">>, + value => emqx_connector_schema:connector_values( + Method, greptimedb, connector_values(Method) + ) + } + } + ]. + +bridge_v1_values(_Method) -> + #{ + type => greptimedb, name => <<"demo">>, enable => true, local_topic => <<"local/topic/#">>, - write_syntax => - <<"${topic},clientid=${clientid}", " ", "payload=${payload},", - "${clientid}_int_value=${payload.int_key}i,", SupportUint/binary, - "bool=${payload.bool}">>, + write_syntax => write_syntax_value(), precision => ms, resource_opts => #{ batch_size => 100, batch_time => <<"20ms">> }, + username => <<"example_username">>, + password => <<"******">>, + dbname => <<"example_db">>, server => <<"127.0.0.1:4001">>, ssl => #{enable => false} - }, - maps:merge(TypeOpts, CommonConfigs). + }. + +connector_values(Method) -> + maps:without([write_syntax, precision], bridge_v1_values(Method)). + +write_syntax_value() -> + <<"${topic},clientid=${clientid}", " ", "payload=${payload},", + "${clientid}_int_value=${payload.int_key}i,", + "uint_value=${payload.uint_key}u," + "bool=${payload.bool}">>. %% ------------------------------------------------------------------------------------------------- %% Hocon Schema Definitions @@ -80,11 +109,50 @@ fields("put_grpc_v1") -> method_fields(put, greptimedb); fields("get_grpc_v1") -> method_fields(get, greptimedb); -fields(Type) when - Type == greptimedb --> +fields(greptimedb = Type) -> greptimedb_bridge_common_fields() ++ - connector_fields(Type). + connector_fields(Type); +%% Actions +fields(action) -> + {greptimedb, + mk( + hoconsc:map(name, ref(?MODULE, greptimedb_action)), + #{desc => <<"GreptimeDB Action Config">>, required => false} + )}; +fields(greptimedb_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + mk(ref(?MODULE, action_parameters), #{ + required => true, desc => ?DESC(action_parameters) + }) + ); +fields(action_parameters) -> + [ + {write_syntax, fun write_syntax/1}, + emqx_bridge_greptimedb_connector:precision_field() + ]; +%% Connectors +fields("config_connector") -> + emqx_connector_schema:common_fields() ++ + emqx_bridge_greptimedb_connector:fields("connector") ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts); +fields(connector_resource_opts) -> + emqx_connector_schema:resource_opts_fields(); +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + Fields = + emqx_bridge_greptimedb_connector:fields("connector") ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts), + emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, Fields); +%$ Bridge v2 +fields(Field) when + Field == "get_bridge_v2"; + Field == "post_bridge_v2"; + Field == "put_bridge_v2" +-> + emqx_bridge_v2_schema:api_fields(Field, ?ACTION_TYPE, fields(greptimedb_action)). method_fields(post, ConnectorType) -> greptimedb_bridge_common_fields() ++ @@ -122,6 +190,14 @@ desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for Greptimedb using `", string:to_upper(Method), "` method."]; desc(greptimedb) -> ?DESC(emqx_bridge_greptimedb_connector, "greptimedb"); +desc(greptimedb_action) -> + ?DESC(greptimedb_action); +desc(action_parameters) -> + ?DESC(action_parameters); +desc("config_connector") -> + ?DESC("desc_config"); +desc(connector_resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); desc(_) -> undefined. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_action_info.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_action_info.erl new file mode 100644 index 000000000..c128e7101 --- /dev/null +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_action_info.erl @@ -0,0 +1,58 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_greptimedb_action_info). + +-behaviour(emqx_action_info). + +-export([ + action_type_name/0, + bridge_v1_config_to_action_config/2, + bridge_v1_config_to_connector_config/1, + bridge_v1_type_name/0, + connector_action_config_to_bridge_v1_config/2, + connector_type_name/0, + schema_module/0 +]). + +-import(emqx_utils_conv, [bin/1]). + +-define(SCHEMA_MODULE, emqx_bridge_greptimedb). +-define(GREPTIMEDB_TYPE, greptimedb). + +action_type_name() -> ?GREPTIMEDB_TYPE. +bridge_v1_type_name() -> ?GREPTIMEDB_TYPE. +connector_type_name() -> ?GREPTIMEDB_TYPE. + +schema_module() -> ?SCHEMA_MODULE. + +bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) -> + ActionTopLevelKeys = schema_keys(greptimedb_action), + ActionParametersKeys = schema_keys(action_parameters), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ActionConfig = make_config_map(ActionKeys, ActionParametersKeys, BridgeV1Config), + emqx_utils_maps:update_if_present( + <<"resource_opts">>, + fun emqx_bridge_v2_schema:project_to_actions_resource_opts/1, + ActionConfig#{<<"connector">> => ConnectorName} + ). + +bridge_v1_config_to_connector_config(BridgeV1Config) -> + ConnectorKeys = schema_keys("config_connector"), + emqx_utils_maps:update_if_present( + <<"resource_opts">>, + fun emqx_connector_schema:project_to_connector_resource_opts/1, + maps:with(ConnectorKeys, BridgeV1Config) + ). + +connector_action_config_to_bridge_v1_config(ConnectorRawConf, ActionRawConf) -> + emqx_action_info:connector_action_config_to_bridge_v1_config( + ConnectorRawConf, ActionRawConf + ). + +make_config_map(PickKeys, IndentKeys, Config) -> + Conf0 = maps:with(PickKeys, Config), + emqx_utils_maps:indent(<<"parameters">>, IndentKeys, Conf0). + +schema_keys(Name) -> + [bin(Key) || Key <- proplists:get_keys(?SCHEMA_MODULE:fields(Name))]. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index af42dac52..0016af463 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -4,7 +4,7 @@ -module(emqx_bridge_greptimedb_connector). -include_lib("emqx_connector/include/emqx_connector.hrl"). - +-include_lib("emqx_resource/include/emqx_resource.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -include_lib("typerefl/include/types.hrl"). -include_lib("emqx/include/logger.hrl"). @@ -19,6 +19,10 @@ callback_mode/0, on_start/2, on_stop/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channel_status/3, + on_get_channels/1, on_query/3, on_batch_query/3, on_query_async/4, @@ -34,6 +38,8 @@ desc/1 ]). +-export([precision_field/0]). + %% only for test -ifdef(TEST). -export([is_unrecoverable_error/1]). @@ -62,6 +68,38 @@ %% resource callback callback_mode() -> async_if_possible. +on_add_channel( + _InstanceId, + #{channels := Channels} = OldState, + ChannelId, + #{parameters := Parameters} = ChannelConfig0 +) -> + #{write_syntax := WriteSyntaxTmpl} = Parameters, + Precision = maps:get(precision, Parameters, ms), + ChannelConfig = maps:merge( + Parameters, + ChannelConfig0#{ + precision => Precision, + write_syntax => to_config(WriteSyntaxTmpl, Precision) + } + ), + {ok, OldState#{ + channels => Channels#{ChannelId => ChannelConfig} + }}. + +on_remove_channel(_InstanceId, #{channels := Channels} = State, ChannelId) -> + NewState = State#{channels => maps:remove(ChannelId, Channels)}, + {ok, NewState}. + +on_get_channel_status(InstanceId, _ChannelId, State) -> + case on_get_status(InstanceId, State) of + ?status_connected -> ?status_connected; + _ -> ?status_connecting + end. + +on_get_channels(InstanceId) -> + emqx_bridge_v2:get_channels_for_connector(InstanceId). + on_start(InstId, Config) -> %% InstID as pool would be handled by greptimedb client %% so there is no need to allocate pool_name here @@ -78,8 +116,13 @@ on_stop(InstId, _State) -> ok end. -on_query(InstId, {send_message, Data}, _State = #{write_syntax := SyntaxLines, client := Client}) -> - case data_to_points(Data, SyntaxLines) of +on_query(InstId, {Channel, Message}, State) -> + #{ + channels := #{Channel := #{write_syntax := SyntaxLines}}, + client := Client, + dbname := DbName + } = State, + case data_to_points(Message, DbName, SyntaxLines) of {ok, Points} -> ?tp( greptimedb_connector_send_query, @@ -97,8 +140,13 @@ on_query(InstId, {send_message, Data}, _State = #{write_syntax := SyntaxLines, c %% Once a Batched Data trans to points failed. %% This batch query failed -on_batch_query(InstId, BatchData, _State = #{write_syntax := SyntaxLines, client := Client}) -> - case parse_batch_data(InstId, BatchData, SyntaxLines) of +on_batch_query(InstId, [{Channel, _} | _] = BatchData, State) -> + #{ + channels := #{Channel := #{write_syntax := SyntaxLines}}, + client := Client, + dbname := DbName + } = State, + case parse_batch_data(InstId, DbName, BatchData, SyntaxLines) of {ok, Points} -> ?tp( greptimedb_connector_send_query, @@ -113,13 +161,13 @@ on_batch_query(InstId, BatchData, _State = #{write_syntax := SyntaxLines, client {error, {unrecoverable_error, Reason}} end. -on_query_async( - InstId, - {send_message, Data}, - {ReplyFun, Args}, - _State = #{write_syntax := SyntaxLines, client := Client} -) -> - case data_to_points(Data, SyntaxLines) of +on_query_async(InstId, {Channel, Message}, {ReplyFun, Args}, State) -> + #{ + channels := #{Channel := #{write_syntax := SyntaxLines}}, + client := Client, + dbname := DbName + } = State, + case data_to_points(Message, DbName, SyntaxLines) of {ok, Points} -> ?tp( greptimedb_connector_send_query, @@ -135,13 +183,13 @@ on_query_async( Err end. -on_batch_query_async( - InstId, - BatchData, - {ReplyFun, Args}, - #{write_syntax := SyntaxLines, client := Client} -) -> - case parse_batch_data(InstId, BatchData, SyntaxLines) of +on_batch_query_async(InstId, [{Channel, _} | _] = BatchData, {ReplyFun, Args}, State) -> + #{ + channels := #{Channel := #{write_syntax := SyntaxLines}}, + client := Client, + dbname := DbName + } = State, + case parse_batch_data(InstId, DbName, BatchData, SyntaxLines) of {ok, Points} -> ?tp( greptimedb_connector_send_query, @@ -159,9 +207,9 @@ on_batch_query_async( on_get_status(_InstId, #{client := Client}) -> case greptimedb:is_alive(Client) of true -> - connected; + ?status_connected; false -> - disconnected + ?status_disconnected end. %% ------------------------------------------------------------------------------------------------- @@ -179,22 +227,36 @@ roots() -> }} ]. +fields("connector") -> + [server_field()] ++ + credentials_fields() ++ + emqx_connector_schema_lib:ssl_fields(); +%% ============ begin: schema for old bridge configs ============ fields(common) -> [ - {server, server()}, - {precision, - %% The greptimedb only supports these 4 precision - mk(enum([ns, us, ms, s]), #{ - required => false, default => ms, desc => ?DESC("precision") - })} + server_field(), + precision_field() ]; fields(greptimedb) -> fields(common) ++ - [ - {dbname, mk(binary(), #{required => true, desc => ?DESC("dbname")})}, - {username, mk(binary(), #{desc => ?DESC("username")})}, - {password, emqx_schema_secret:mk(#{desc => ?DESC("password")})} - ] ++ emqx_connector_schema_lib:ssl_fields(). + credentials_fields() ++ + emqx_connector_schema_lib:ssl_fields(). +%% ============ end: schema for old bridge configs ============ + +desc(common) -> + ?DESC("common"); +desc(greptimedb) -> + ?DESC("greptimedb"). + +precision_field() -> + {precision, + %% The greptimedb only supports these 4 precision + mk(enum([ns, us, ms, s]), #{ + required => false, default => ms, desc => ?DESC("precision") + })}. + +server_field() -> + {server, server()}. server() -> Meta = #{ @@ -205,10 +267,12 @@ server() -> }, emqx_schema:servers_sc(Meta, ?GREPTIMEDB_HOST_OPTIONS). -desc(common) -> - ?DESC("common"); -desc(greptimedb) -> - ?DESC("greptimedb"). +credentials_fields() -> + [ + {dbname, mk(binary(), #{required => true, desc => ?DESC("dbname")})}, + {username, mk(binary(), #{desc => ?DESC("username")})}, + {password, emqx_schema_secret:mk(#{desc => ?DESC("password")})} + ]. %% ------------------------------------------------------------------------------------------------- %% internal functions @@ -243,9 +307,8 @@ start_client(InstId, Config) -> do_start_client( InstId, ClientConfig, - Config = #{write_syntax := Lines} + Config ) -> - Precision = maps:get(precision, Config, ms), case greptimedb:start_client(ClientConfig) of {ok, Client} -> case greptimedb:is_alive(Client, true) of @@ -253,7 +316,7 @@ do_start_client( State = #{ client => Client, dbname => proplists:get_value(dbname, ClientConfig, ?DEFAULT_DB), - write_syntax => to_config(Lines, Precision) + channels => #{} }, ?SLOG(info, #{ msg => "starting_greptimedb_connector_success", @@ -314,8 +377,7 @@ client_config( {pool, InstId}, {pool_type, random}, {auto_reconnect, ?AUTO_RECONNECT_S}, - {gprc_options, grpc_config()}, - {timeunit, maps:get(precision, Config, ms)} + {gprc_options, grpc_config()} ] ++ protocol_config(Config). protocol_config( @@ -469,10 +531,10 @@ to_maps_config(K, V, Res) -> %% ------------------------------------------------------------------------------------------------- %% Tags & Fields Data Trans -parse_batch_data(InstId, BatchData, SyntaxLines) -> +parse_batch_data(InstId, DbName, BatchData, SyntaxLines) -> {Points, Errors} = lists:foldl( - fun({send_message, Data}, {ListOfPoints, ErrAccIn}) -> - case data_to_points(Data, SyntaxLines) of + fun({_, Data}, {ListOfPoints, ErrAccIn}) -> + case data_to_points(Data, DbName, SyntaxLines) of {ok, Points} -> {[Points | ListOfPoints], ErrAccIn}; {error, ErrorPoints} -> @@ -496,21 +558,25 @@ parse_batch_data(InstId, BatchData, SyntaxLines) -> {error, points_trans_failed} end. --spec data_to_points(map(), [ - #{ - fields := [{binary(), binary()}], - measurement := binary(), - tags := [{binary(), binary()}], - timestamp := emqx_placeholder:tmpl_token() | integer(), - precision := {From :: ts_precision(), To :: ts_precision()} - } -]) -> {ok, [map()]} | {error, term()}. -data_to_points(Data, SyntaxLines) -> - lines_to_points(Data, SyntaxLines, [], []). +-spec data_to_points( + map(), + binary(), + [ + #{ + fields := [{binary(), binary()}], + measurement := binary(), + tags := [{binary(), binary()}], + timestamp := emqx_placeholder:tmpl_token() | integer(), + precision := {From :: ts_precision(), To :: ts_precision()} + } + ] +) -> {ok, [map()]} | {error, term()}. +data_to_points(Data, DbName, SyntaxLines) -> + lines_to_points(Data, DbName, SyntaxLines, [], []). %% When converting multiple rows data into Greptimedb Line Protocol, they are considered to be strongly correlated. %% And once a row fails to convert, all of them are considered to have failed. -lines_to_points(_, [], Points, ErrorPoints) -> +lines_to_points(_Data, _DbName, [], Points, ErrorPoints) -> case ErrorPoints of [] -> {ok, Points}; @@ -518,23 +584,27 @@ lines_to_points(_, [], Points, ErrorPoints) -> %% ignore trans succeeded points {error, ErrorPoints} end; -lines_to_points(Data, [#{timestamp := Ts} = Item | Rest], ResultPointsAcc, ErrorPointsAcc) when +lines_to_points( + Data, DbName, [#{timestamp := Ts} = Item | Rest], ResultPointsAcc, ErrorPointsAcc +) when is_list(Ts) -> TransOptions = #{return => rawlist, var_trans => fun data_filter/1}, case parse_timestamp(emqx_placeholder:proc_tmpl(Ts, Data, TransOptions)) of {ok, TsInt} -> Item1 = Item#{timestamp => TsInt}, - continue_lines_to_points(Data, Item1, Rest, ResultPointsAcc, ErrorPointsAcc); + continue_lines_to_points(Data, DbName, Item1, Rest, ResultPointsAcc, ErrorPointsAcc); {error, BadTs} -> - lines_to_points(Data, Rest, ResultPointsAcc, [ + lines_to_points(Data, DbName, Rest, ResultPointsAcc, [ {error, {bad_timestamp, BadTs}} | ErrorPointsAcc ]) end; -lines_to_points(Data, [#{timestamp := Ts} = Item | Rest], ResultPointsAcc, ErrorPointsAcc) when +lines_to_points( + Data, DbName, [#{timestamp := Ts} = Item | Rest], ResultPointsAcc, ErrorPointsAcc +) when is_integer(Ts) -> - continue_lines_to_points(Data, Item, Rest, ResultPointsAcc, ErrorPointsAcc). + continue_lines_to_points(Data, DbName, Item, Rest, ResultPointsAcc, ErrorPointsAcc). parse_timestamp([TsInt]) when is_integer(TsInt) -> {ok, TsInt}; @@ -546,30 +616,32 @@ parse_timestamp([TsBin]) -> {error, TsBin} end. -continue_lines_to_points(Data, Item, Rest, ResultPointsAcc, ErrorPointsAcc) -> - case line_to_point(Data, Item) of +continue_lines_to_points(Data, DbName, Item, Rest, ResultPointsAcc, ErrorPointsAcc) -> + case line_to_point(Data, DbName, Item) of {_, [#{fields := Fields}]} when map_size(Fields) =:= 0 -> %% greptimedb client doesn't like empty field maps... ErrorPointsAcc1 = [{error, no_fields} | ErrorPointsAcc], - lines_to_points(Data, Rest, ResultPointsAcc, ErrorPointsAcc1); + lines_to_points(Data, DbName, Rest, ResultPointsAcc, ErrorPointsAcc1); Point -> - lines_to_points(Data, Rest, [Point | ResultPointsAcc], ErrorPointsAcc) + lines_to_points(Data, DbName, Rest, [Point | ResultPointsAcc], ErrorPointsAcc) end. line_to_point( Data, + DbName, #{ measurement := Measurement, tags := Tags, fields := Fields, timestamp := Ts, - precision := Precision + precision := {_, ToPrecision} = Precision } = Item ) -> {_, EncodedTags} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Tags), {_, EncodedFields} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Fields), TableName = emqx_placeholder:proc_tmpl(Measurement, Data), - {TableName, [ + Metric = #{dbname => DbName, table => TableName, timeunit => ToPrecision}, + {Metric, [ maps:without([precision, measurement], Item#{ tags => EncodedTags, fields => EncodedFields, diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl index 73223892d..fb6639b68 100644 --- a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl @@ -452,10 +452,7 @@ t_start_ok(Config) -> [#{points := [Point0]}] = Trace, {Measurement, [Point]} = Point0, ct:pal("sent point: ~p", [Point]), - ?assertMatch( - <<_/binary>>, - Measurement - ), + ?assertMatch(#{dbname := _, table := _, timeunit := _}, Measurement), ?assertMatch( #{ fields := #{}, @@ -481,7 +478,6 @@ t_start_stop(Config) -> BridgeName = ?config(bridge_name, Config), BridgeConfig = ?config(bridge_config, Config), StopTracePoint = greptimedb_client_stopped, - ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName), ?check_trace( begin ProbeRes0 = emqx_bridge_testlib:probe_bridge_api( @@ -491,6 +487,7 @@ t_start_stop(Config) -> ), ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), ?assertMatch({ok, _}, emqx_bridge:create(BridgeType, BridgeName, BridgeConfig)), + ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName), %% Since the connection process is async, we give it some time to %% stabilize and avoid flakiness. @@ -554,6 +551,7 @@ t_start_stop(Config) -> ok end, fun(Trace) -> + ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName), %% one for probe, two for real ?assertMatch( [_, #{instance_id := ResourceId}, #{instance_id := ResourceId}], @@ -568,10 +566,7 @@ t_start_already_started(Config) -> Type = greptimedb_type_bin(?config(greptimedb_type, Config)), Name = ?config(greptimedb_name, Config), GreptimedbConfigString = ?config(greptimedb_config_string, Config), - ?assertMatch( - {ok, _}, - create_bridge(Config) - ), + ?assertMatch({ok, _}, create_bridge(Config)), ResourceId = resource_id(Config), TypeAtom = binary_to_atom(Type), NameAtom = binary_to_atom(Name), @@ -1036,7 +1031,6 @@ t_missing_field(Config) -> ok. t_authentication_error_on_send_message(Config0) -> - ResourceId = resource_id(Config0), QueryMode = proplists:get_value(query_mode, Config0, sync), GreptimedbType = ?config(greptimedb_type, Config0), GreptimeConfig0 = proplists:get_value(greptimedb_config, Config0), @@ -1055,6 +1049,7 @@ t_authentication_error_on_send_message(Config0) -> end, fun() -> {ok, _} = create_bridge(Config), + ResourceId = resource_id(Config0), ?retry( _Sleep = 1_000, _Attempts = 10, diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl index a4acf5b4e..bb8bca17d 100644 --- a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl @@ -65,7 +65,7 @@ t_lifecycle(Config) -> Port = ?config(greptimedb_tcp_port, Config), perform_lifecycle_check( <<"emqx_bridge_greptimedb_connector_SUITE">>, - greptimedb_config(Host, Port) + greptimedb_connector_config(Host, Port) ). perform_lifecycle_check(PoolName, InitialConfig) -> @@ -75,6 +75,7 @@ perform_lifecycle_check(PoolName, InitialConfig) -> % expects this FullConfig = CheckedConfig#{write_syntax => greptimedb_write_syntax()}, {ok, #{ + id := ResourceId, state := #{client := #{pool := ReturnedPoolName}} = State, status := InitialStatus }} = emqx_resource:create_local( @@ -92,8 +93,13 @@ perform_lifecycle_check(PoolName, InitialConfig) -> }} = emqx_resource:get_instance(PoolName), ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + %% install actions to the connector + ActionConfig = greptimedb_action_config(), + ChannelId = <<"test_channel">>, + ?assertEqual(ok, emqx_resource_manager:add_channel(ResourceId, ChannelId, ActionConfig)), + ?assertMatch(#{status := connected}, emqx_resource:channel_health_check(ResourceId, ChannelId)), % % Perform query as further check that the resource is working as expected - ?assertMatch({ok, _}, emqx_resource:query(PoolName, test_query())), + ?assertMatch({ok, _}, emqx_resource:query(PoolName, test_query(ChannelId))), ?assertEqual(ok, emqx_resource:stop(PoolName)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. @@ -115,7 +121,9 @@ perform_lifecycle_check(PoolName, InitialConfig) -> {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = emqx_resource:get_instance(PoolName), ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch({ok, _}, emqx_resource:query(PoolName, test_query())), + ?assertEqual(ok, emqx_resource_manager:add_channel(ResourceId, ChannelId, ActionConfig)), + ?assertMatch(#{status := connected}, emqx_resource:channel_health_check(ResourceId, ChannelId)), + ?assertMatch({ok, _}, emqx_resource:query(PoolName, test_query(ChannelId))), % Stop and remove the resource in one go. ?assertEqual(ok, emqx_resource:remove_local(PoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), @@ -126,7 +134,7 @@ perform_lifecycle_check(PoolName, InitialConfig) -> % %% Helpers % %%------------------------------------------------------------------------------ -greptimedb_config(Host, Port) -> +greptimedb_connector_config(Host, Port) -> Server = list_to_binary(io_lib:format("~s:~b", [Host, Port])), ResourceConfig = #{ <<"dbname">> => <<"public">>, @@ -136,6 +144,14 @@ greptimedb_config(Host, Port) -> }, #{<<"config">> => ResourceConfig}. +greptimedb_action_config() -> + #{ + parameters => #{ + write_syntax => greptimedb_write_syntax(), + precision => ms + } + }. + greptimedb_write_syntax() -> [ #{ @@ -146,8 +162,8 @@ greptimedb_write_syntax() -> } ]. -test_query() -> - {send_message, #{ +test_query(ChannelId) -> + {ChannelId, #{ <<"clientid">> => <<"something">>, <<"payload">> => #{bool => true}, <<"topic">> => <<"connector_test">>, diff --git a/apps/emqx_bridge_hstreamdb/BSL.txt b/apps/emqx_bridge_hstreamdb/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_hstreamdb/BSL.txt +++ b/apps/emqx_bridge_hstreamdb/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_hstreamdb/rebar.config b/apps/emqx_bridge_hstreamdb/rebar.config index eab7bcb3f..c2e3194ac 100644 --- a/apps/emqx_bridge_hstreamdb/rebar.config +++ b/apps/emqx_bridge_hstreamdb/rebar.config @@ -3,7 +3,7 @@ {erl_opts, [debug_info]}. {deps, [ {hstreamdb_erl, - {git, "https://github.com/hstreamdb/hstreamdb_erl.git", {tag, "0.4.5+v0.16.1"}}}, + {git, "https://github.com/hstreamdb/hstreamdb_erl.git", {tag, "0.5.18+v0.18.1"}}}, {emqx, {path, "../../apps/emqx"}}, {emqx_utils, {path, "../../apps/emqx_utils"}} ]}. diff --git a/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src index f9825e3dd..84c09fe3a 100644 --- a/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src +++ b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_hstreamdb, [ {description, "EMQX Enterprise HStreamDB Bridge"}, - {vsn, "0.1.3"}, + {vsn, "0.1.4"}, {registered, []}, {applications, [ kernel, @@ -8,7 +8,7 @@ emqx_resource, hstreamdb_erl ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_hstreamdb_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.erl b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.erl index 7052e0120..694f459e0 100644 --- a/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.erl +++ b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.erl @@ -1,4 +1,4 @@ -%%-------------------------------------------------------------------- +%-------------------------------------------------------------------- %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- -module(emqx_bridge_hstreamdb). @@ -6,10 +6,12 @@ -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). --import(hoconsc, [mk/2, enum/1, ref/2]). +-import(hoconsc, [mk/2, enum/1]). -export([ - conn_bridge_examples/1 + conn_bridge_examples/1, + bridge_v2_examples/1, + connector_examples/1 ]). -export([ @@ -19,6 +21,11 @@ desc/1 ]). +-define(CONNECTOR_TYPE, hstreamdb). +-define(ACTION_TYPE, ?CONNECTOR_TYPE). +-define(DEFAULT_GRPC_TIMEOUT_RAW, <<"30s">>). +-define(DEFAULT_GRPC_FLUSH_TIMEOUT_RAW, <<"10s">>). + %% ------------------------------------------------------------------------------------------------- %% api @@ -27,16 +34,16 @@ conn_bridge_examples(Method) -> #{ <<"hstreamdb">> => #{ summary => <<"HStreamDB Bridge">>, - value => values(Method) + value => conn_bridge_example_values(Method) } } ]. -values(get) -> - values(post); -values(put) -> - values(post); -values(post) -> +conn_bridge_example_values(get) -> + conn_bridge_example_values(post); +conn_bridge_example_values(put) -> + conn_bridge_example_values(post); +conn_bridge_example_values(post) -> #{ type => <<"hstreamdb">>, name => <<"demo">>, @@ -55,15 +62,143 @@ values(post) -> }, ssl => #{enable => false} }; -values(_) -> +conn_bridge_example_values(_) -> #{}. +connector_examples(Method) -> + [ + #{ + <<"hstreamdb">> => + #{ + summary => <<"HStreamDB Connector">>, + value => emqx_connector_schema:connector_values( + Method, ?CONNECTOR_TYPE, connector_values() + ) + } + } + ]. + +connector_values() -> + #{ + <<"url">> => <<"http://127.0.0.1:6570">>, + <<"grpc_timeout">> => <<"30s">>, + <<"ssl">> => + #{ + <<"enable">> => false, + <<"verify">> => <<"verify_peer">> + }, + <<"resource_opts">> => + #{ + <<"health_check_interval">> => <<"15s">>, + <<"start_timeout">> => <<"5s">> + } + }. + +bridge_v2_examples(Method) -> + [ + #{ + <<"hstreamdb">> => + #{ + summary => <<"HStreamDB Action">>, + value => emqx_bridge_v2_schema:action_values( + Method, ?ACTION_TYPE, ?CONNECTOR_TYPE, action_values() + ) + } + } + ]. + +action_values() -> + #{ + <<"parameters">> => #{ + <<"partition_key">> => <<"hej">>, + <<"record_template">> => <<"${payload}">>, + <<"stream">> => <<"mqtt_message">>, + <<"aggregation_pool_size">> => 8, + <<"writer_pool_size">> => 8 + } + }. + %% ------------------------------------------------------------------------------------------------- %% Hocon Schema Definitions namespace() -> "bridge_hstreamdb". roots() -> []. +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + Fields = + fields(connector_fields) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts), + emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, Fields); +fields(Field) when + Field == "get_bridge_v2"; + Field == "post_bridge_v2"; + Field == "put_bridge_v2" +-> + emqx_bridge_v2_schema:api_fields(Field, ?ACTION_TYPE, fields(hstreamdb_action)); +fields(action) -> + {?ACTION_TYPE, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(?MODULE, hstreamdb_action)), + #{ + desc => <<"HStreamDB Action Config">>, + required => false + } + )}; +fields(hstreamdb_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + hoconsc:mk( + hoconsc:ref(?MODULE, action_parameters), + #{ + required => true, + desc => ?DESC("action_parameters") + } + ) + ); +fields(action_parameters) -> + [ + {stream, + mk(binary(), #{ + required => true, desc => ?DESC(emqx_bridge_hstreamdb_connector, "stream_name") + })}, + + {partition_key, + mk(binary(), #{ + required => false, desc => ?DESC(emqx_bridge_hstreamdb_connector, "partition_key") + })}, + + {grpc_flush_timeout, fun grpc_flush_timeout/1}, + {record_template, + mk(binary(), #{default => <<"${payload}">>, desc => ?DESC("record_template")})}, + {aggregation_pool_size, + mk(integer(), #{default => 8, desc => ?DESC("aggregation_pool_size")})}, + {max_batches, mk(integer(), #{default => 500, desc => ?DESC("max_batches")})}, + {writer_pool_size, mk(integer(), #{default => 8, desc => ?DESC("writer_pool_size")})}, + {batch_size, mk(integer(), #{default => 100, desc => ?DESC("batch_size")})}, + {batch_interval, + mk(emqx_schema:timeout_duration_ms(), #{ + default => <<"500ms">>, desc => ?DESC("batch_interval") + })} + ]; +fields(connector_fields) -> + [ + {url, + mk(binary(), #{ + required => true, + desc => ?DESC(emqx_bridge_hstreamdb_connector, "url"), + default => <<"http://127.0.0.1:6570">> + })}, + {grpc_timeout, fun grpc_timeout/1} + ] ++ emqx_connector_schema_lib:ssl_fields(); +fields("config_connector") -> + emqx_connector_schema:common_fields() ++ + fields(connector_fields) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts); +fields(connector_resource_opts) -> + emqx_connector_schema:resource_opts_fields(); fields("config") -> hstream_bridge_common_fields() ++ connector_fields(); @@ -80,6 +215,18 @@ fields("put") -> hstream_bridge_common_fields() ++ connector_fields(). +grpc_timeout(type) -> emqx_schema:timeout_duration_ms(); +grpc_timeout(desc) -> ?DESC(emqx_bridge_hstreamdb_connector, "grpc_timeout"); +grpc_timeout(default) -> ?DEFAULT_GRPC_TIMEOUT_RAW; +grpc_timeout(required) -> false; +grpc_timeout(_) -> undefined. + +grpc_flush_timeout(type) -> emqx_schema:timeout_duration_ms(); +grpc_flush_timeout(desc) -> ?DESC("grpc_flush_timeout"); +grpc_flush_timeout(default) -> ?DEFAULT_GRPC_FLUSH_TIMEOUT_RAW; +grpc_flush_timeout(required) -> false; +grpc_flush_timeout(_) -> undefined. + hstream_bridge_common_fields() -> emqx_bridge_schema:common_bridge_fields() ++ [ @@ -97,6 +244,16 @@ desc("config") -> ?DESC("desc_config"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for HStreamDB bridge using `", string:to_upper(Method), "` method."]; +desc("creation_opts") -> + ?DESC(emqx_resource_schema, "creation_opts"); +desc("config_connector") -> + ?DESC("config_connector"); +desc(hstreamdb_action) -> + ?DESC("hstreamdb_action"); +desc(action_parameters) -> + ?DESC("action_parameters"); +desc(connector_resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); desc(_) -> undefined. diff --git a/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb_action_info.erl b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb_action_info.erl new file mode 100644 index 000000000..7aa6565fa --- /dev/null +++ b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb_action_info.erl @@ -0,0 +1,89 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_hstreamdb_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0, + bridge_v1_config_to_connector_config/1, + bridge_v1_config_to_action_config/2, + connector_action_config_to_bridge_v1_config/2 +]). + +bridge_v1_type_name() -> hstreamdb. + +action_type_name() -> hstreamdb. + +connector_type_name() -> hstreamdb. + +schema_module() -> emqx_bridge_hstreamdb. + +bridge_v1_config_to_connector_config(BridgeV1Conf) -> + ConnectorSchema = emqx_bridge_hstreamdb:fields(connector_fields), + ConnectorAtomKeys = lists:foldl(fun({K, _}, Acc) -> [K | Acc] end, [], ConnectorSchema), + ConnectorBinKeys = [atom_to_binary(K) || K <- ConnectorAtomKeys] ++ [<<"resource_opts">>], + ConnectorConfig0 = maps:with(ConnectorBinKeys, BridgeV1Conf), + emqx_utils_maps:update_if_present( + <<"resource_opts">>, + fun emqx_connector_schema:project_to_connector_resource_opts/1, + ConnectorConfig0 + ). + +bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) -> + Config0 = emqx_action_info:transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, emqx_bridge_hstreamdb, "config_connector" + ), + %% Remove fields no longer relevant for the action + Config1 = lists:foldl( + fun(Field, Acc) -> + emqx_utils_maps:deep_remove(Field, Acc) + end, + Config0, + [ + [<<"parameters">>, <<"pool_size">>], + [<<"parameters">>, <<"direction">>] + ] + ), + %% Move pool_size to aggregation_pool_size and writer_pool_size + PoolSize = maps:get(<<"pool_size">>, BridgeV1Conf, 8), + Config2 = emqx_utils_maps:deep_put( + [<<"parameters">>, <<"aggregation_pool_size">>], + Config1, + PoolSize + ), + Config3 = emqx_utils_maps:deep_put( + [<<"parameters">>, <<"writer_pool_size">>], + Config2, + PoolSize + ), + Config3. + +connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) -> + BridgeV1Config1 = maps:remove(<<"connector">>, ActionConfig), + BridgeV1Config2 = emqx_utils_maps:deep_merge(ConnectorConfig, BridgeV1Config1), + BridgeV1Config3 = maps:remove(<<"parameters">>, BridgeV1Config2), + %% Pick out pool_size from aggregation_pool_size + PoolSize = emqx_utils_maps:deep_get( + [<<"parameters">>, <<"aggregation_pool_size">>], ActionConfig, 8 + ), + BridgeV1Config4 = maps:put(<<"pool_size">>, PoolSize, BridgeV1Config3), + + %% Move the fields stream, partition_key and record_template from + %% parameters in ActionConfig to the top level in BridgeV1Config + lists:foldl( + fun(Field, Acc) -> + emqx_utils_maps:deep_put( + [Field], + Acc, + emqx_utils_maps:deep_get([<<"parameters">>, Field], ActionConfig, <<>>) + ) + end, + BridgeV1Config4, + [<<"stream">>, <<"partition_key">>, <<"record_template">>] + ). diff --git a/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb_connector.erl b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb_connector.erl index fdb80b1e1..8413e5ecd 100644 --- a/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb_connector.erl +++ b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb_connector.erl @@ -7,8 +7,9 @@ -include_lib("typerefl/include/types.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). --import(hoconsc, [mk/2, enum/1]). +-import(hoconsc, [mk/2]). -behaviour(emqx_resource). @@ -19,7 +20,11 @@ on_stop/2, on_query/3, on_batch_query/3, - on_get_status/2 + on_get_status/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, + on_get_channel_status/3 ]). -export([ @@ -38,67 +43,132 @@ -define(DEFAULT_GRPC_TIMEOUT, timer:seconds(30)). -define(DEFAULT_GRPC_TIMEOUT_RAW, <<"30s">>). +-define(DEFAULT_GRPC_FLUSH_TIMEOUT, 10000). +-define(DEFAULT_MAX_BATCHES, 500). +-define(DEFAULT_BATCH_INTERVAL, 500). +-define(DEFAULT_AGG_POOL_SIZE, 8). +-define(DEFAULT_WRITER_POOL_SIZE, 8). %% ------------------------------------------------------------------------------------------------- %% resource callback callback_mode() -> always_sync. on_start(InstId, Config) -> - start_client(InstId, Config). + try + do_on_start(InstId, Config) + catch + E:R:S -> + Error = #{ + msg => "start_hstreamdb_connector_error", + connector => InstId, + error => E, + reason => R, + stack => S + }, + ?SLOG(error, Error), + {error, Error} + end. + +on_add_channel( + _InstId, + #{ + installed_channels := InstalledChannels, + client_options := ClientOptions + } = OldState, + ChannelId, + ChannelConfig +) -> + %{ok, ChannelState} = create_channel_state(ChannelId, PoolName, ChannelConfig), + Parameters0 = maps:get(parameters, ChannelConfig), + Parameters = Parameters0#{client_options => ClientOptions}, + PartitionKey = emqx_placeholder:preproc_tmpl(maps:get(partition_key, Parameters, <<"">>)), + try + ChannelState = #{ + producer => start_producer(ChannelId, Parameters), + record_template => record_template(Parameters), + partition_key => PartitionKey + }, + NewInstalledChannels = maps:put(ChannelId, ChannelState, InstalledChannels), + %% Update state + NewState = OldState#{installed_channels => NewInstalledChannels}, + {ok, NewState} + catch + Error:Reason:Stack -> + {error, {Error, Reason, Stack}} + end. + +on_remove_channel( + _InstId, + #{ + installed_channels := InstalledChannels + } = OldState, + ChannelId +) -> + #{ + producer := Producer + } = maps:get(ChannelId, InstalledChannels), + _ = hstreamdb:stop_producer(Producer), + NewInstalledChannels = maps:remove(ChannelId, InstalledChannels), + %% Update state + NewState = OldState#{installed_channels => NewInstalledChannels}, + {ok, NewState}. + +on_get_channel_status( + _ResId, + _ChannelId, + _State +) -> + ?status_connected. + +on_get_channels(ResId) -> + emqx_bridge_v2:get_channels_for_connector(ResId). on_stop(InstId, _State) -> - case emqx_resource:get_allocated_resources(InstId) of - #{?hstreamdb_client := #{client := Client, producer := Producer}} -> - StopClientRes = hstreamdb:stop_client(Client), - StopProducerRes = hstreamdb:stop_producer(Producer), - ?SLOG(info, #{ - msg => "stop_hstreamdb_connector", - connector => InstId, - client => Client, - producer => Producer, - stop_client => StopClientRes, - stop_producer => StopProducerRes - }); - _ -> - ok - end. + ?tp( + hstreamdb_connector_on_stop, + #{instance_id => InstId} + ). -define(FAILED_TO_APPLY_HRECORD_TEMPLATE, {error, {unrecoverable_error, failed_to_apply_hrecord_template}} ). on_query( - _InstId, - {send_message, Data}, - _State = #{ - producer := Producer, partition_key := PartitionKey, record_template := HRecordTemplate - } + InstId, + {ChannelID, Data}, + #{installed_channels := Channels} = _State ) -> + #{ + producer := Producer, partition_key := PartitionKey, record_template := HRecordTemplate + } = maps:get(ChannelID, Channels), try to_record(PartitionKey, HRecordTemplate, Data) of - Record -> append_record(Producer, Record, false) + Record -> append_record(InstId, Producer, Record, false) catch _:_ -> ?FAILED_TO_APPLY_HRECORD_TEMPLATE end. on_batch_query( - _InstId, - BatchList, - _State = #{ - producer := Producer, partition_key := PartitionKey, record_template := HRecordTemplate - } + InstId, + [{ChannelID, _Data} | _] = BatchList, + #{installed_channels := Channels} = _State ) -> + #{ + producer := Producer, partition_key := PartitionKey, record_template := HRecordTemplate + } = maps:get(ChannelID, Channels), try to_multi_part_records(PartitionKey, HRecordTemplate, BatchList) of - Records -> append_record(Producer, Records, true) + Records -> append_record(InstId, Producer, Records, true) catch _:_ -> ?FAILED_TO_APPLY_HRECORD_TEMPLATE end. -on_get_status(_InstId, #{client := Client}) -> - case is_alive(Client) of - true -> - connected; - false -> - disconnected +on_get_status(_InstId, State) -> + case check_status(State) of + ok -> + ?status_connected; + Error -> + %% We set it to ?status_connecting so that the channels are not deleted. + %% The producers in the channels contains buffers so we don't want to delete them. + {?status_connecting, State, Error} end. %% ------------------------------------------------------------------------------------------------- @@ -140,142 +210,152 @@ desc(config) -> %% ------------------------------------------------------------------------------------------------- %% internal functions -start_client(InstId, Config) -> - try - do_start_client(InstId, Config) - catch - E:R:S -> - Error = #{ - msg => "start_hstreamdb_connector_error", - connector => InstId, - error => E, - reason => R, - stack => S - }, - ?SLOG(error, Error), - {error, Error} - end. -do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize, ssl := SSL}) -> +do_on_start(InstId, Config) -> ?SLOG(info, #{ msg => "starting_hstreamdb_connector_client", connector => InstId, config => Config }), - ClientName = client_name(InstId), + {ok, _} = application:ensure_all_started(hstreamdb_erl), + ClientOptions = client_options(Config), + State = #{ + client_options => ClientOptions, + installed_channels => #{} + }, + case check_status(State) of + ok -> + ?SLOG(info, #{ + msg => "hstreamdb_connector_client_started", + connector => InstId + }), + {ok, State}; + Error -> + ?tp( + hstreamdb_connector_start_failed, + #{error => client_not_alive} + ), + ?SLOG(error, #{ + msg => "hstreamdb_connector_client_not_alive", + connector => InstId, + error => Error + }), + {error, {connect_failed, Error}} + end. + +client_options(Config = #{url := ServerURL, ssl := SSL}) -> + GRPCTimeout = maps:get(<<"grpc_timeout">>, Config, ?DEFAULT_GRPC_TIMEOUT), + EnableSSL = maps:get(enable, SSL), RpcOpts = - case maps:get(enable, SSL) of + case EnableSSL of false -> - #{pool_size => PoolSize}; + #{pool_size => 1}; true -> #{ - pool_size => PoolSize, + pool_size => 1, gun_opts => #{ transport => tls, - transport_opts => emqx_tls_lib:to_client_opts(SSL) + transport_opts => + emqx_tls_lib:to_client_opts(SSL) } } end, - ClientOptions = [ - {url, binary_to_list(Server)}, - {rpc_options, RpcOpts} - ], - case hstreamdb:start_client(ClientName, ClientOptions) of + ClientOptions = #{ + url => to_string(ServerURL), + grpc_timeout => GRPCTimeout, + rpc_options => RpcOpts + }, + ClientOptions. + +check_status(ConnectorState) -> + try start_client(ConnectorState) of {ok, Client} -> - case is_alive(Client) of - true -> - ?SLOG(info, #{ - msg => "hstreamdb_connector_client_started", - connector => InstId, - client => Client - }), - start_producer(InstId, Client, Config); - _ -> - ?tp( - hstreamdb_connector_start_failed, - #{error => client_not_alive} - ), - ?SLOG(error, #{ - msg => "hstreamdb_connector_client_not_alive", - connector => InstId - }), - {error, connect_failed} - end; - {error, {already_started, Pid}} -> - ?SLOG(info, #{ - msg => "starting_hstreamdb_connector_client_find_old_client_restart_client", - old_client_pid => Pid, - old_client_name => ClientName - }), - _ = hstreamdb:stop_client(ClientName), - start_client(InstId, Config); + check_status_with_client(Client); + {error, _} = StartClientError -> + StartClientError + catch + ErrorType:Reason:_ST -> + {error, {ErrorType, Reason}} + end. + +check_status_with_client(Client) -> + try hstreamdb_client:echo(Client) of + ok -> ok; + {error, _} = ErrorEcho -> ErrorEcho + after + _ = hstreamdb:stop_client(Client) + end. + +start_client(Opts) -> + ClientOptions = maps:get(client_options, Opts), + case hstreamdb:start_client(ClientOptions) of + {ok, Client} -> + {ok, Client}; {error, Error} -> - ?SLOG(error, #{ - msg => "hstreamdb_connector_client_failed", - connector => InstId, - reason => Error - }), {error, Error} end. -is_alive(Client) -> - hstreamdb_client:echo(Client) =:= ok. - start_producer( - InstId, - Client, - Options = #{stream := Stream, pool_size := PoolSize} + ActionId, + #{ + stream := Stream, + batch_size := BatchSize, + batch_interval := Interval + } = Opts ) -> - %% TODO: change these batch options after we have better disk cache. - BatchSize = maps:get(batch_size, Options, 100), - Interval = maps:get(batch_interval, Options, 1000), - ProducerOptions = [ - {stream, Stream}, - {callback, {?MODULE, on_flush_result, []}}, - {max_records, BatchSize}, - {interval, Interval}, - {pool_size, PoolSize}, - {grpc_timeout, maps:get(grpc_timeout, Options, ?DEFAULT_GRPC_TIMEOUT)} - ], - Name = produce_name(InstId), - ?SLOG(info, #{ - msg => "starting_hstreamdb_connector_producer", - connector => InstId - }), - case hstreamdb:start_producer(Client, Name, ProducerOptions) of - {ok, Producer} -> - ?SLOG(info, #{ - msg => "hstreamdb_connector_producer_started" - }), - State = #{ - client => Client, - producer => Producer, - enable_batch => maps:get(enable_batch, Options, false), - partition_key => emqx_placeholder:preproc_tmpl( - maps:get(partition_key, Options, <<"">>) - ), - record_template => record_template(Options) - }, - ok = emqx_resource:allocate_resource(InstId, ?hstreamdb_client, #{ - client => Client, producer => Producer - }), - {ok, State}; - {error, {already_started, Pid}} -> - ?SLOG(info, #{ - msg => - "starting_hstreamdb_connector_producer_find_old_producer_restart_producer", - old_producer_pid => Pid, - old_producer_name => Name - }), - _ = hstreamdb:stop_producer(Name), - start_producer(InstId, Client, Options); + MaxBatches = maps:get(max_batches, Opts, ?DEFAULT_MAX_BATCHES), + AggPoolSize = maps:get(aggregation_pool_size, Opts, ?DEFAULT_AGG_POOL_SIZE), + WriterPoolSize = maps:get(writer_pool_size, Opts, ?DEFAULT_WRITER_POOL_SIZE), + GRPCTimeout = maps:get(grpc_flush_timeout, Opts, ?DEFAULT_GRPC_FLUSH_TIMEOUT), + ClientOptions = maps:get(client_options, Opts), + ProducerOptions = #{ + stream => to_string(Stream), + buffer_options => #{ + interval => Interval, + callback => {?MODULE, on_flush_result, [ActionId]}, + max_records => BatchSize, + max_batches => MaxBatches + }, + buffer_pool_size => AggPoolSize, + writer_options => #{ + grpc_timeout => GRPCTimeout + }, + writer_pool_size => WriterPoolSize, + client_options => ClientOptions + }, + Name = produce_name(ActionId), + ensure_start_producer(Name, ProducerOptions). + +ensure_start_producer(ProducerName, ProducerOptions) -> + case hstreamdb:start_producer(ProducerName, ProducerOptions) of + ok -> + ok; + {error, {already_started, _Pid}} -> + %% HStreamDB producer already started, restart it + _ = hstreamdb:stop_producer(ProducerName), + %% the pool might have been leaked after relup + _ = ecpool:stop_sup_pool(ProducerName), + ok = hstreamdb:start_producer(ProducerName, ProducerOptions); + {error, { + {shutdown, + {failed_to_start_child, {pool_sup, Pool}, + {shutdown, + {failed_to_start_child, worker_sup, + {shutdown, {failed_to_start_child, _, {badarg, _}}}}}}}, + _ + }} -> + %% HStreamDB producer was not properly cleared, restart it + %% the badarg error in gproc maybe caused by the pool is leaked after relup + _ = ecpool:stop_sup_pool(Pool), + ok = hstreamdb:start_producer(ProducerName, ProducerOptions); {error, Reason} -> - ?SLOG(error, #{ - msg => "starting_hstreamdb_connector_producer_failed", - reason => Reason - }), - {error, Reason} - end. + %% HStreamDB start producer failed + throw({start_producer_failed, Reason}) + end, + ProducerName. + +produce_name(ActionId) -> + list_to_binary("backend_hstream_producer:" ++ to_string(ActionId)). to_record(PartitionKeyTmpl, HRecordTmpl, Data) -> PartitionKey = emqx_placeholder:proc_tmpl(PartitionKeyTmpl, Data), @@ -289,43 +369,46 @@ to_record(PartitionKey, RawRecord) -> to_multi_part_records(PartitionKeyTmpl, HRecordTmpl, BatchList) -> lists:map( - fun({send_message, Data}) -> + fun({_, Data}) -> to_record(PartitionKeyTmpl, HRecordTmpl, Data) end, BatchList ). -append_record(Producer, MultiPartsRecords, MaybeBatch) when is_list(MultiPartsRecords) -> +append_record(ResourceId, Producer, MultiPartsRecords, MaybeBatch) when + is_list(MultiPartsRecords) +-> lists:foreach( - fun(Record) -> append_record(Producer, Record, MaybeBatch) end, MultiPartsRecords + fun(Record) -> append_record(ResourceId, Producer, Record, MaybeBatch) end, + MultiPartsRecords ); -append_record(Producer, Record, MaybeBatch) when is_tuple(Record) -> - do_append_records(Producer, Record, MaybeBatch). +append_record(ResourceId, Producer, Record, MaybeBatch) when is_tuple(Record) -> + do_append_records(ResourceId, Producer, Record, MaybeBatch). %% TODO: only sync request supported. implement async request later. -do_append_records(Producer, Record, true = IsBatch) -> +do_append_records(ResourceId, Producer, Record, true = IsBatch) -> Result = hstreamdb:append(Producer, Record), - handle_result(Result, Record, IsBatch); -do_append_records(Producer, Record, false = IsBatch) -> + handle_result(ResourceId, Result, Record, IsBatch); +do_append_records(ResourceId, Producer, Record, false = IsBatch) -> Result = hstreamdb:append_flush(Producer, Record), - handle_result(Result, Record, IsBatch). + handle_result(ResourceId, Result, Record, IsBatch). -handle_result(ok = Result, Record, IsBatch) -> - handle_result({ok, Result}, Record, IsBatch); -handle_result({ok, Result}, Record, IsBatch) -> +handle_result(ResourceId, ok = Result, Record, IsBatch) -> + handle_result(ResourceId, {ok, Result}, Record, IsBatch); +handle_result(ResourceId, {ok, Result}, Record, IsBatch) -> ?tp( hstreamdb_connector_query_append_return, - #{result => Result, is_batch => IsBatch} + #{result => Result, is_batch => IsBatch, instance_id => ResourceId} ), ?SLOG(debug, #{ msg => "hstreamdb_producer_sync_append_success", record => Record, is_batch => IsBatch }); -handle_result({error, Reason} = Err, Record, IsBatch) -> +handle_result(ResourceId, {error, Reason} = Err, Record, IsBatch) -> ?tp( hstreamdb_connector_query_append_return, - #{error => Reason, is_batch => IsBatch} + #{error => Reason, is_batch => IsBatch, instance_id => ResourceId} ), ?SLOG(error, #{ msg => "hstreamdb_producer_sync_append_failed", @@ -335,12 +418,6 @@ handle_result({error, Reason} = Err, Record, IsBatch) -> }), Err. -client_name(InstId) -> - "client:" ++ to_string(InstId). - -produce_name(ActionId) -> - list_to_atom("producer:" ++ to_string(ActionId)). - record_template(#{record_template := RawHRecordTemplate}) -> emqx_placeholder:preproc_tmpl(RawHRecordTemplate); record_template(_) -> diff --git a/apps/emqx_bridge_hstreamdb/test/emqx_bridge_hstreamdb_SUITE.erl b/apps/emqx_bridge_hstreamdb/test/emqx_bridge_hstreamdb_SUITE.erl index 4d165c03d..1ac489334 100644 --- a/apps/emqx_bridge_hstreamdb/test/emqx_bridge_hstreamdb_SUITE.erl +++ b/apps/emqx_bridge_hstreamdb/test/emqx_bridge_hstreamdb_SUITE.erl @@ -117,16 +117,21 @@ end_per_suite(_Config) -> ok. init_per_testcase(t_to_hrecord_failed, Config) -> + init_per_testcase_common(), meck:new([hstreamdb], [passthrough, no_history, no_link]), meck:expect(hstreamdb, to_record, fun(_, _, _) -> error(trans_to_hrecord_failed) end), Config; init_per_testcase(_Testcase, Config) -> + init_per_testcase_common(), %% drop stream and will create a new one in common_init/1 %% TODO: create a new stream for each test case delete_bridge(Config), snabbkaffe:start_trace(), Config. +init_per_testcase_common() -> + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(). + end_per_testcase(t_to_hrecord_failed, _Config) -> meck:unload([hstreamdb]); end_per_testcase(_Testcase, Config) -> @@ -301,7 +306,10 @@ t_simple_query(Config) -> {ok, _}, create_bridge(Config) ), - Requests = gen_batch_req(BatchSize), + Type = ?config(hstreamdb_bridge_type, Config), + Name = ?config(hstreamdb_name, Config), + ActionId = emqx_bridge_v2:id(Type, Name), + Requests = gen_batch_req(BatchSize, ActionId), ?check_trace( begin ?wait_async_action( @@ -351,6 +359,24 @@ t_to_hrecord_failed(Config) -> end, ok. +%% Connector Action Tests + +t_action_on_get_status(Config) -> + emqx_bridge_v2_testlib:t_on_get_status(Config, #{failure_status => connecting}). + +t_action_create_via_http(Config) -> + emqx_bridge_v2_testlib:t_create_via_http(Config). + +t_action_sync_query(Config) -> + MakeMessageFun = fun() -> rand_data() end, + IsSuccessCheck = fun(Result) -> ?assertEqual(ok, Result) end, + TracePoint = hstreamdb_connector_query_append_return, + emqx_bridge_v2_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint). + +t_action_start_stop(Config) -> + StopTracePoint = hstreamdb_connector_on_stop, + emqx_bridge_v2_testlib:t_start_stop(Config, StopTracePoint). + %%------------------------------------------------------------------------------ %% Helper fns %%------------------------------------------------------------------------------ @@ -362,6 +388,10 @@ common_init(ConfigT) -> URL = "http://" ++ Host ++ ":" ++ RawPort, Config0 = [ + {bridge_type, <<"hstreamdb">>}, + {bridge_name, <<"my_hstreamdb_action">>}, + {connector_type, <<"hstreamdb">>}, + {connector_name, <<"my_hstreamdb_connector">>}, {hstreamdb_host, Host}, {hstreamdb_port, Port}, {hstreamdb_url, URL}, @@ -393,6 +423,8 @@ common_init(ConfigT) -> {hstreamdb_config, HStreamDBConf}, {hstreamdb_bridge_type, BridgeType}, {hstreamdb_name, Name}, + {bridge_config, action_config(Config0)}, + {connector_config, connector_config(Config0)}, {proxy_host, ProxyHost}, {proxy_port, ProxyPort} | Config0 @@ -424,7 +456,7 @@ hstreamdb_config(BridgeType, Config) -> " resource_opts = {\n" %% always sync " query_mode = sync\n" - " request_ttl = 500ms\n" + " request_ttl = 10000ms\n" " batch_size = ~b\n" " worker_pool_size = ~b\n" " }\n" @@ -443,6 +475,45 @@ hstreamdb_config(BridgeType, Config) -> ), {Name, parse_and_check(ConfigString, BridgeType, Name)}. +action_config(Config) -> + ConnectorName = ?config(connector_name, Config), + BatchSize = batch_size(Config), + #{ + <<"connector">> => ConnectorName, + <<"enable">> => true, + <<"parameters">> => + #{ + <<"aggregation_pool_size">> => ?POOL_SIZE, + <<"record_template">> => ?RECORD_TEMPLATE, + <<"stream">> => ?STREAM, + <<"writer_pool_size">> => ?POOL_SIZE + }, + <<"resource_opts">> => + #{ + <<"batch_size">> => BatchSize, + <<"health_check_interval">> => <<"15s">>, + <<"inflight_window">> => 100, + <<"max_buffer_bytes">> => <<"256MB">>, + <<"query_mode">> => <<"sync">>, + <<"request_ttl">> => <<"45s">>, + <<"worker_pool_size">> => ?POOL_SIZE + } + }. + +connector_config(Config) -> + Port = integer_to_list(?config(hstreamdb_port, Config)), + URL = "http://" ++ ?config(hstreamdb_host, Config) ++ ":" ++ Port, + #{ + <<"url">> => URL, + <<"ssl">> => + #{<<"enable">> => false, <<"verify">> => <<"verify_peer">>}, + <<"grpc_timeout">> => <<"30s">>, + <<"resource_opts">> => #{ + <<"health_check_interval">> => <<"15s">>, + <<"start_timeout">> => <<"5s">> + } + }. + parse_and_check(ConfigString, BridgeType, Name) -> {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), @@ -454,10 +525,10 @@ parse_and_check(ConfigString, BridgeType, Name) -> -define(CONN_ATTEMPTS, 10). default_options(Config) -> - [ - {url, ?config(hstreamdb_url, Config)}, - {rpc_options, ?RPC_OPTIONS} - ]. + #{ + url => ?config(hstreamdb_url, Config), + rpc_options => ?RPC_OPTIONS + }. connect_direct_hstream(Name, Config) -> client(Name, Config, ?CONN_ATTEMPTS). @@ -511,8 +582,9 @@ send_message(Config, Data) -> query_resource(Config, Request) -> Name = ?config(hstreamdb_name, Config), BridgeType = ?config(hstreamdb_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - emqx_resource:query(ResourceID, Request, #{timeout => 1_000}). + ID = emqx_bridge_v2:id(BridgeType, Name), + ResID = emqx_connector_resource:resource_id(BridgeType, Name), + emqx_resource:query(ID, Request, #{timeout => 1_000, connector_resource_id => ResID}). restart_resource(Config) -> BridgeName = ?config(hstreamdb_name, Config), @@ -526,8 +598,16 @@ resource_id(Config) -> BridgeType = ?config(hstreamdb_bridge_type, Config), _ResourceID = emqx_bridge_resource:resource_id(BridgeType, BridgeName). +action_id(Config) -> + ActionName = ?config(hstreamdb_name, Config), + ActionType = ?config(hstreamdb_bridge_type, Config), + _ActionID = emqx_bridge_v2:id(ActionType, ActionName). + health_check_resource_ok(Config) -> - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(resource_id(Config))). + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(resource_id(Config))), + ActionName = ?config(hstreamdb_name, Config), + ActionType = ?config(hstreamdb_bridge_type, Config), + ?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(ActionType, ActionName)). health_check_resource_down(Config) -> case emqx_resource_manager:health_check(resource_id(Config)) of @@ -539,6 +619,19 @@ health_check_resource_down(Config) -> ?assert( false, lists:flatten(io_lib:format("invalid health check result:~p~n", [Other])) ) + end, + ActionName = ?config(hstreamdb_name, Config), + ActionType = ?config(hstreamdb_bridge_type, Config), + #{status := StatusV2} = emqx_bridge_v2:health_check(ActionType, ActionName), + case StatusV2 of + disconnected -> + ok; + connecting -> + ok; + OtherV2 -> + ?assert( + false, lists:flatten(io_lib:format("invalid health check result:~p~n", [OtherV2])) + ) end. % These funs start and then stop the hstreamdb connection @@ -548,22 +641,36 @@ connect_and_create_stream(Config) -> Client, ?STREAM, ?REPLICATION_FACTOR, ?BACKLOG_RETENTION_SECOND, ?SHARD_COUNT ) ), - %% force write to stream to make it created and ready to be written data for rest cases - ProducerOptions = [ - {pool_size, 4}, - {stream, ?STREAM}, - {callback, fun(_) -> ok end}, - {max_records, 10}, - {interval, 1000} - ], + %% force write to stream to make it created and ready to be written data for test cases + ProducerOptions = #{ + stream => ?STREAM, + buffer_options => #{ + interval => 1000, + callback => {?MODULE, on_flush_result, [<<"WHAT">>]}, + max_records => 1, + max_batches => 1 + }, + buffer_pool_size => 1, + writer_options => #{ + grpc_timeout => 100 + }, + writer_pool_size => 1, + client_options => default_options(Config) + }, + ?WITH_CLIENT( begin - {ok, Producer} = hstreamdb:start_producer(Client, test_producer, ProducerOptions), - _ = hstreamdb:append_flush(Producer, hstreamdb:to_record([], raw, rand_payload())), - _ = hstreamdb:stop_producer(Producer) + ok = hstreamdb:start_producer(test_producer, ProducerOptions), + _ = hstreamdb:append_flush(test_producer, hstreamdb:to_record([], raw, rand_payload())), + _ = hstreamdb:stop_producer(test_producer) end ). +on_flush_result({{flush, _Stream, _Records}, {ok, _Resp}}) -> + ok; +on_flush_result({{flush, _Stream, _Records}, {error, _Reason}}) -> + ok. + connect_and_delete_stream(Config) -> ?WITH_CLIENT( _ = hstreamdb_client:delete_stream(Client, ?STREAM) @@ -593,11 +700,11 @@ rand_payload() -> temperature => rand:uniform(40), humidity => rand:uniform(100) }). -gen_batch_req(Count) when +gen_batch_req(Count, ActionId) when is_integer(Count) andalso Count > 0 -> - [{send_message, rand_data()} || _Val <- lists:seq(1, Count)]; -gen_batch_req(Count) -> + [{ActionId, rand_data()} || _Val <- lists:seq(1, Count)]; +gen_batch_req(Count, _ActionId) -> ct:pal("Gen batch requests failed with unexpected Count: ~p", [Count]). str(List) when is_list(List) -> diff --git a/apps/emqx_bridge_http/src/emqx_bridge_http_connector.erl b/apps/emqx_bridge_http/src/emqx_bridge_http_connector.erl index ac9b18ace..20f2e168b 100644 --- a/apps/emqx_bridge_http/src/emqx_bridge_http_connector.erl +++ b/apps/emqx_bridge_http/src/emqx_bridge_http_connector.erl @@ -252,6 +252,7 @@ start_pool(PoolName, PoolOpts) -> {error, {already_started, _}} -> ?SLOG(warning, #{ msg => "emqx_connector_on_start_already_started", + connector => PoolName, pool_name => PoolName }), ok; @@ -510,8 +511,8 @@ resolve_pool_worker(#{pool_name := PoolName} = State, Key) -> on_get_channels(ResId) -> emqx_bridge_v2:get_channels_for_connector(ResId). -on_get_status(_InstId, #{pool_name := PoolName, connect_timeout := Timeout} = State) -> - case do_get_status(PoolName, Timeout) of +on_get_status(InstId, #{pool_name := InstId, connect_timeout := Timeout} = State) -> + case do_get_status(InstId, Timeout) of ok -> connected; {error, still_connecting} -> @@ -527,12 +528,7 @@ do_get_status(PoolName, Timeout) -> case ehttpc:health_check(Worker, Timeout) of ok -> ok; - {error, Reason} = Error -> - ?SLOG(error, #{ - msg => "http_connector_get_status_failed", - reason => redact(Reason), - worker => Worker - }), + {error, _} = Error -> Error end end, @@ -543,14 +539,20 @@ do_get_status(PoolName, Timeout) -> case [E || {error, _} = E <- Results] of [] -> ok; - Errors -> - hd(Errors) + [{error, Reason} | _] -> + ?SLOG(info, #{ + msg => "health_check_failed", + reason => redact(Reason), + connector => PoolName + }), + {error, Reason} end catch exit:timeout -> - ?SLOG(error, #{ - msg => "http_connector_pmap_failed", - reason => timeout + ?SLOG(info, #{ + msg => "health_check_failed", + reason => timeout, + connector => PoolName }), {error, timeout} end. diff --git a/apps/emqx_bridge_influxdb/BSL.txt b/apps/emqx_bridge_influxdb/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_influxdb/BSL.txt +++ b/apps/emqx_bridge_influxdb/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb_action_info.erl b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb_action_info.erl index 00a6c5510..5864daf50 100644 --- a/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb_action_info.erl +++ b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb_action_info.erl @@ -1,3 +1,6 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- -module(emqx_bridge_influxdb_action_info). -behaviour(emqx_action_info). diff --git a/apps/emqx_bridge_iotdb/BSL.txt b/apps/emqx_bridge_iotdb/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_iotdb/BSL.txt +++ b/apps/emqx_bridge_iotdb/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_iotdb/test/emqx_bridge_iotdb_impl_SUITE.erl b/apps/emqx_bridge_iotdb/test/emqx_bridge_iotdb_impl_SUITE.erl index 1093993b2..b33370d45 100644 --- a/apps/emqx_bridge_iotdb/test/emqx_bridge_iotdb_impl_SUITE.erl +++ b/apps/emqx_bridge_iotdb/test/emqx_bridge_iotdb_impl_SUITE.erl @@ -165,7 +165,8 @@ bridge_config(TestCase, Config) -> Version ] ), - {Name, ConfigString, emqx_bridge_v2_testlib:parse_and_check(Type, Name, ConfigString)}. + {ok, InnerConfigMap} = hocon:binary(ConfigString), + {Name, ConfigString, emqx_bridge_v2_testlib:parse_and_check(Type, Name, InnerConfigMap)}. make_iotdb_payload(DeviceId, Measurement, Type, Value) -> #{ diff --git a/apps/emqx_bridge_kafka/BSL.txt b/apps/emqx_bridge_kafka/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_kafka/BSL.txt +++ b/apps/emqx_bridge_kafka/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_kafka/rebar.config b/apps/emqx_bridge_kafka/rebar.config index e71ccea9f..7c98bf571 100644 --- a/apps/emqx_bridge_kafka/rebar.config +++ b/apps/emqx_bridge_kafka/rebar.config @@ -2,8 +2,8 @@ {erl_opts, [debug_info]}. {deps, [ - {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.9.1"}}}, - {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}, + {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.10.2"}}}, + {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.5"}}}, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.1"}}}, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}, {snappyer, "1.2.9"}, diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src index ffdb7eb20..a504a42d8 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge_kafka, [ {description, "EMQX Enterprise Kafka Bridge"}, - {vsn, "0.2.1"}, + {vsn, "0.2.2"}, {registered, [emqx_bridge_kafka_consumer_sup]}, {applications, [ kernel, diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl index b1032ff6b..be2c124e3 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl @@ -188,6 +188,7 @@ values(producer_values) -> ], kafka_header_value_encode_mode => none, max_inflight => 10, + partitions_limit => all_partitions, buffer => #{ mode => <<"hybrid">>, per_partition_limit => <<"2GB">>, @@ -414,6 +415,14 @@ fields(producer_kafka_opts) -> desc => ?DESC(partition_count_refresh_interval) } )}, + {partitions_limit, + mk( + hoconsc:union([all_partitions, pos_integer()]), + #{ + default => <<"all_partitions">>, + desc => ?DESC(partitions_limit) + } + )}, {max_inflight, mk( pos_integer(), diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl index 459e259d2..d5d8ddd35 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl @@ -135,6 +135,7 @@ create_producers_for_bridge_v2( KafkaHeadersTokens = preproc_kafka_headers(maps:get(kafka_headers, KafkaConfig, undefined)), KafkaExtHeadersTokens = preproc_ext_headers(maps:get(kafka_ext_headers, KafkaConfig, [])), KafkaHeadersValEncodeMode = maps:get(kafka_header_value_encode_mode, KafkaConfig, none), + MaxPartitions = maps:get(partitions_limit, KafkaConfig, all_partitions), #{name := BridgeName} = emqx_bridge_v2:parse_id(BridgeV2Id), TestIdStart = string:find(BridgeV2Id, ?TEST_ID_PREFIX), IsDryRun = @@ -144,7 +145,7 @@ create_producers_for_bridge_v2( _ -> string:equal(TestIdStart, InstId) end, - ok = check_topic_and_leader_connections(ClientId, KafkaTopic), + ok = check_topic_and_leader_connections(ClientId, KafkaTopic, MaxPartitions), WolffProducerConfig = producers_config( BridgeType, BridgeName, KafkaConfig, IsDryRun, BridgeV2Id ), @@ -166,7 +167,8 @@ create_producers_for_bridge_v2( kafka_config => KafkaConfig, headers_tokens => KafkaHeadersTokens, ext_headers_tokens => KafkaExtHeadersTokens, - headers_val_encode_mode => KafkaHeadersValEncodeMode + headers_val_encode_mode => KafkaHeadersValEncodeMode, + partitions_limit => MaxPartitions }}; {error, Reason2} -> ?SLOG(error, #{ @@ -376,6 +378,8 @@ on_query_async( ), do_send_msg(async, KafkaMessage, Producers, AsyncReplyFn) catch + error:{invalid_partition_count, _Count, _Partitioner} -> + {error, invalid_partition_count}; throw:{bad_kafka_header, _} = Error -> ?tp( emqx_bridge_kafka_impl_producer_async_query_failed, @@ -517,9 +521,9 @@ on_get_channel_status( %% `?status_disconnected' will make resource manager try to restart the producers / %% connector, thus potentially dropping data held in wolff producer's replayq. The %% only exception is if the topic does not exist ("unhealthy target"). - #{kafka_topic := KafkaTopic} = maps:get(ChannelId, Channels), + #{kafka_topic := KafkaTopic, partitions_limit := MaxPartitions} = maps:get(ChannelId, Channels), try - ok = check_topic_and_leader_connections(ClientId, KafkaTopic), + ok = check_topic_and_leader_connections(ClientId, KafkaTopic, MaxPartitions), ?status_connected catch throw:{unhealthy_target, Msg} -> @@ -528,11 +532,11 @@ on_get_channel_status( {?status_connecting, {K, E}} end. -check_topic_and_leader_connections(ClientId, KafkaTopic) -> +check_topic_and_leader_connections(ClientId, KafkaTopic, MaxPartitions) -> case wolff_client_sup:find_client(ClientId) of {ok, Pid} -> ok = check_topic_status(ClientId, Pid, KafkaTopic), - ok = check_if_healthy_leaders(ClientId, Pid, KafkaTopic); + ok = check_if_healthy_leaders(ClientId, Pid, KafkaTopic, MaxPartitions); {error, no_such_client} -> throw(#{ reason => cannot_find_kafka_client, @@ -562,9 +566,9 @@ check_client_connectivity(ClientId) -> {error, {find_client, Reason}} end. -check_if_healthy_leaders(ClientId, ClientPid, KafkaTopic) when is_pid(ClientPid) -> +check_if_healthy_leaders(ClientId, ClientPid, KafkaTopic, MaxPartitions) when is_pid(ClientPid) -> Leaders = - case wolff_client:get_leader_connections(ClientPid, KafkaTopic) of + case wolff_client:get_leader_connections(ClientPid, KafkaTopic, MaxPartitions) of {ok, LeadersToCheck} -> %% Kafka is considered healthy as long as any of the partition leader is reachable. lists:filtermap( @@ -584,7 +588,8 @@ check_if_healthy_leaders(ClientId, ClientPid, KafkaTopic) when is_pid(ClientPid) throw(#{ error => no_connected_partition_leader, kafka_client => ClientId, - kafka_topic => KafkaTopic + kafka_topic => KafkaTopic, + partitions_limit => MaxPartitions }); _ -> ok @@ -619,6 +624,7 @@ producers_config(BridgeType, BridgeName, Input, IsDryRun, BridgeV2Id) -> required_acks := RequiredAcks, partition_count_refresh_interval := PCntRefreshInterval, max_inflight := MaxInflight, + partitions_limit := MaxPartitions, buffer := #{ mode := BufferMode0, per_partition_limit := PerPartitionLimit, @@ -652,7 +658,8 @@ producers_config(BridgeType, BridgeName, Input, IsDryRun, BridgeV2Id) -> max_batch_bytes => MaxBatchBytes, max_send_ahead => MaxInflight - 1, compression => Compression, - telemetry_meta_data => #{bridge_id => BridgeV2Id} + telemetry_meta_data => #{bridge_id => BridgeV2Id}, + max_partitions => MaxPartitions }. %% Wolff API is a batch API. diff --git a/apps/emqx_bridge_kinesis/BSL.txt b/apps/emqx_bridge_kinesis/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_kinesis/BSL.txt +++ b/apps/emqx_bridge_kinesis/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src index 74d7dc94f..2e59fa8b2 100644 --- a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src @@ -1,13 +1,13 @@ {application, emqx_bridge_kinesis, [ {description, "EMQX Enterprise Amazon Kinesis Bridge"}, - {vsn, "0.1.3"}, + {vsn, "0.1.4"}, {registered, []}, {applications, [ kernel, stdlib, erlcloud ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_kinesis_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl index 14e197113..6c273e217 100644 --- a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl @@ -3,6 +3,7 @@ %%-------------------------------------------------------------------- -module(emqx_bridge_kinesis). + -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). @@ -15,9 +16,14 @@ ]). -export([ - conn_bridge_examples/1 + bridge_v2_examples/1, + conn_bridge_examples/1, + connector_examples/1 ]). +-define(CONNECTOR_TYPE, kinesis). +-define(ACTION_TYPE, ?CONNECTOR_TYPE). + %%------------------------------------------------------------------------------------------------- %% `hocon_schema' API %%------------------------------------------------------------------------------------------------- @@ -28,6 +34,36 @@ namespace() -> roots() -> []. +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + Fields = + fields(connector_config) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts), + emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, Fields); +fields(action) -> + {?ACTION_TYPE, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(?MODULE, kinesis_action)), + #{ + desc => <<"Kinesis Action Config">>, + required => false + } + )}; +fields(action_parameters) -> + proplists:delete(local_topic, fields(producer)); +fields(kinesis_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + hoconsc:mk( + hoconsc:ref(?MODULE, action_parameters), + #{ + required => true, + desc => ?DESC("action_parameters") + } + ) + ); fields("config_producer") -> emqx_bridge_schema:common_bridge_fields() ++ fields("resource_opts") ++ @@ -105,13 +141,6 @@ fields(producer) -> desc => ?DESC("payload_template") } )}, - {local_topic, - sc( - binary(), - #{ - desc => ?DESC("local_topic") - } - )}, {stream_name, sc( binary(), @@ -128,33 +157,110 @@ fields(producer) -> desc => ?DESC("partition_key") } )} + ] ++ fields(local_topic); +fields(local_topic) -> + [ + {local_topic, + sc( + binary(), + #{ + desc => ?DESC("local_topic") + } + )} ]; fields("get_producer") -> emqx_bridge_schema:status_fields() ++ fields("post_producer"); fields("post_producer") -> [type_field_producer(), name_field() | fields("config_producer")]; fields("put_producer") -> - fields("config_producer"). + fields("config_producer"); +fields("config_connector") -> + emqx_connector_schema:common_fields() ++ + fields(connector_config) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts); +fields(connector_resource_opts) -> + emqx_connector_schema:resource_opts_fields(); +fields(Field) when + Field == "get_bridge_v2"; + Field == "post_bridge_v2"; + Field == "put_bridge_v2" +-> + emqx_bridge_v2_schema:api_fields(Field, ?ACTION_TYPE, fields(kinesis_action)). desc("config_producer") -> ?DESC("desc_config"); desc("creation_opts") -> ?DESC(emqx_resource_schema, "creation_opts"); +desc("config_connector") -> + ?DESC("config_connector"); +desc(kinesis_action) -> + ?DESC("kinesis_action"); +desc(action_parameters) -> + ?DESC("action_parameters"); +desc(connector_resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); desc(_) -> undefined. -conn_bridge_examples(Method) -> +conn_bridge_examples(_Method) -> [ #{ <<"kinesis_producer">> => #{ summary => <<"Amazon Kinesis Producer Bridge">>, - value => values(producer, Method) + value => conn_bridge_values() } } ]. -values(producer, _Method) -> +connector_examples(Method) -> + [ + #{ + <<"kinesis">> => + #{ + summary => <<"Kinesis Connector">>, + value => emqx_connector_schema:connector_values( + Method, ?CONNECTOR_TYPE, connector_values() + ) + } + } + ]. + +connector_values() -> #{ + <<"aws_access_key_id">> => <<"your_access_key">>, + <<"aws_secret_access_key">> => <<"aws_secret_key">>, + <<"endpoint">> => <<"http://localhost:4566">>, + <<"max_retries">> => 2, + <<"pool_size">> => 8 + }. + +bridge_v2_examples(Method) -> + [ + #{ + <<"kinesis">> => + #{ + summary => <<"Kinesis Action">>, + value => emqx_bridge_v2_schema:action_values( + Method, ?ACTION_TYPE, ?CONNECTOR_TYPE, action_values() + ) + } + } + ]. + +action_values() -> + #{ + parameters => #{ + <<"partition_key">> => <<"any_key">>, + <<"payload_template">> => <<"${.}">>, + <<"stream_name">> => <<"my_stream">> + } + }. + +conn_bridge_values() -> + #{ + enable => true, + type => kinesis_producer, + name => <<"foo">>, aws_access_key_id => <<"aws_access_key_id">>, aws_secret_access_key => <<"******">>, endpoint => <<"https://kinesis.us-east-1.amazonaws.com">>, diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_action_info.erl b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_action_info.erl new file mode 100644 index 000000000..7987315e4 --- /dev/null +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_action_info.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_kinesis_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +bridge_v1_type_name() -> kinesis_producer. + +action_type_name() -> kinesis. + +connector_type_name() -> kinesis. + +schema_module() -> emqx_bridge_kinesis. diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl index 959b539a0..518a9b668 100644 --- a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl @@ -11,9 +11,7 @@ -behaviour(gen_server). -type state() :: #{ - instance_id := resource_id(), - partition_key := binary(), - stream_name := binary() + instance_id := resource_id() }. -type record() :: {Data :: binary(), PartitionKey :: binary()}. @@ -23,7 +21,8 @@ -export([ start_link/1, connection_status/1, - query/2 + connection_status/2, + query/3 ]). %% gen_server callbacks @@ -56,8 +55,16 @@ connection_status(Pid) -> {error, timeout} end. -query(Pid, Records) -> - gen_server:call(Pid, {query, Records}, infinity). +connection_status(Pid, StreamName) -> + try + gen_server:call(Pid, {connection_status, StreamName}, ?HEALTH_CHECK_TIMEOUT) + catch + _:_ -> + {error, timeout} + end. + +query(Pid, Records, StreamName) -> + gen_server:call(Pid, {query, Records, StreamName}, infinity). %%-------------------------------------------------------------------- %% @doc @@ -72,13 +79,12 @@ start_link(Options) -> %%%=================================================================== %% Initialize kinesis connector --spec init(emqx_bridge_kinesis_impl_producer:config()) -> {ok, state()}. +-spec init(emqx_bridge_kinesis_impl_producer:config_connector()) -> + {ok, state()} | {stop, Reason :: term()}. init(#{ aws_access_key_id := AwsAccessKey, aws_secret_access_key := AwsSecretAccessKey, endpoint := Endpoint, - partition_key := PartitionKey, - stream_name := StreamName, max_retries := MaxRetries, instance_id := InstanceId }) -> @@ -93,9 +99,7 @@ init(#{ } ), State = #{ - instance_id => InstanceId, - partition_key => PartitionKey, - stream_name => StreamName + instance_id => InstanceId }, %% TODO: teach `erlcloud` to to accept 0-arity closures as passwords. ok = erlcloud_config:configure( @@ -124,18 +128,19 @@ init(#{ {stop, Reason} end. -handle_call(connection_status, _From, #{stream_name := StreamName} = State) -> +handle_call({connection_status, StreamName}, _From, State) -> + Status = get_status(StreamName), + {reply, Status, State}; +handle_call(connection_status, _From, State) -> Status = - case erlcloud_kinesis:describe_stream(StreamName) of - {ok, _} -> + case erlcloud_kinesis:list_streams() of + {ok, _ListStreamsResult} -> {ok, connected}; - {error, {<<"ResourceNotFoundException">>, _}} -> - {error, unhealthy_target}; Error -> {error, Error} end, {reply, Status, State}; -handle_call({query, Records}, _From, #{stream_name := StreamName} = State) -> +handle_call({query, Records, StreamName}, _From, State) -> Result = do_query(StreamName, Records), {reply, Result, State}; handle_call(_Request, _From, State) -> @@ -158,6 +163,16 @@ code_change(_OldVsn, State, _Extra) -> %%% Internal functions %%%=================================================================== +get_status(StreamName) -> + case erlcloud_kinesis:describe_stream(StreamName) of + {ok, _} -> + {ok, connected}; + {error, {<<"ResourceNotFoundException">>, _}} -> + {error, unhealthy_target}; + Error -> + {error, Error} + end. + -spec do_query(binary(), [record()]) -> {ok, jsx:json_term() | binary()} | {error, {unrecoverable_error, term()}} diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl index decf3e83b..10049a2a9 100644 --- a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl @@ -13,27 +13,20 @@ "Kinesis stream is invalid. Please check if the stream exist in Kinesis account." ). --type config() :: #{ +-type config_connector() :: #{ aws_access_key_id := binary(), aws_secret_access_key := emqx_secret:t(binary()), endpoint := binary(), - stream_name := binary(), - partition_key := binary(), - payload_template := binary(), max_retries := non_neg_integer(), pool_size := non_neg_integer(), instance_id => resource_id(), any() => term() }. --type templates() :: #{ - partition_key := list(), - send_message := list() -}. -type state() :: #{ pool_name := resource_id(), - templates := templates() + installed_channels := map() }. --export_type([config/0]). +-export_type([config_connector/0]). %% `emqx_resource' API -export([ @@ -42,7 +35,11 @@ on_stop/2, on_query/3, on_batch_query/3, - on_get_status/2 + on_get_status/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, + on_get_channel_status/3 ]). -export([ @@ -55,7 +52,7 @@ callback_mode() -> always_sync. --spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}. +-spec on_start(resource_id(), config_connector()) -> {ok, state()} | {error, term()}. on_start( InstanceId, #{ @@ -72,10 +69,9 @@ on_start( {config, Config}, {pool_size, PoolSize} ], - Templates = parse_template(Config), State = #{ pool_name => InstanceId, - templates => Templates + installed_channels => #{} }, case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of @@ -92,7 +88,9 @@ on_stop(InstanceId, _State) -> emqx_resource_pool:stop(InstanceId). -spec on_get_status(resource_id(), state()) -> - connected | disconnected | {disconnected, state(), {unhealthy_target, string()}}. + ?status_connected + | ?status_disconnected + | {?status_disconnected, state(), {unhealthy_target, string()}}. on_get_status(_InstanceId, #{pool_name := Pool} = State) -> case emqx_resource_pool:health_check_workers( @@ -103,15 +101,15 @@ on_get_status(_InstanceId, #{pool_name := Pool} = State) -> ) of {ok, Values} -> - AllOk = lists:all(fun(S) -> S =:= {ok, connected} end, Values), + AllOk = lists:all(fun(S) -> S =:= {ok, ?status_connected} end, Values), case AllOk of true -> - connected; + ?status_connected; false -> Unhealthy = lists:any(fun(S) -> S =:= {error, unhealthy_target} end, Values), case Unhealthy of - true -> {disconnected, State, {unhealthy_target, ?TOPIC_MESSAGE}}; - false -> disconnected + true -> {?status_disconnected, State, {unhealthy_target, ?TOPIC_MESSAGE}}; + false -> ?status_disconnected end end; {error, Reason} -> @@ -120,34 +118,114 @@ on_get_status(_InstanceId, #{pool_name := Pool} = State) -> state => State, reason => Reason }), - disconnected + ?status_disconnected end. +on_add_channel( + _InstId, + #{ + installed_channels := InstalledChannels + } = OldState, + ChannelId, + ChannelConfig +) -> + {ok, ChannelState} = create_channel_state(ChannelConfig), + NewInstalledChannels = maps:put(ChannelId, ChannelState, InstalledChannels), + %% Update state + NewState = OldState#{installed_channels => NewInstalledChannels}, + {ok, NewState}. + +create_channel_state( + #{parameters := Parameters} = _ChannelConfig +) -> + #{ + stream_name := StreamName, + partition_key := PartitionKey + } = Parameters, + {ok, #{ + templates => parse_template(Parameters), + stream_name => StreamName, + partition_key => PartitionKey + }}. + +on_remove_channel( + _InstId, + #{ + installed_channels := InstalledChannels + } = OldState, + ChannelId +) -> + NewInstalledChannels = maps:remove(ChannelId, InstalledChannels), + %% Update state + NewState = OldState#{installed_channels => NewInstalledChannels}, + {ok, NewState}. + +on_get_channel_status( + _ResId, + ChannelId, + #{ + pool_name := PoolName, + installed_channels := Channels + } = State +) -> + #{stream_name := StreamName} = maps:get(ChannelId, Channels), + case + emqx_resource_pool:health_check_workers( + PoolName, + {emqx_bridge_kinesis_connector_client, connection_status, [StreamName]}, + ?HEALTH_CHECK_TIMEOUT, + #{return_values => true} + ) + of + {ok, Values} -> + AllOk = lists:all(fun(S) -> S =:= {ok, ?status_connected} end, Values), + case AllOk of + true -> + ?status_connected; + false -> + Unhealthy = lists:any(fun(S) -> S =:= {error, unhealthy_target} end, Values), + case Unhealthy of + true -> {?status_disconnected, {unhealthy_target, ?TOPIC_MESSAGE}}; + false -> ?status_disconnected + end + end; + {error, Reason} -> + ?SLOG(error, #{ + msg => "kinesis_producer_get_status_failed", + state => State, + reason => Reason + }), + ?status_disconnected + end. + +on_get_channels(ResId) -> + emqx_bridge_v2:get_channels_for_connector(ResId). + -spec on_query( resource_id(), - {send_message, map()}, + {channel_id(), map()}, state() ) -> {ok, map()} | {error, {recoverable_error, term()}} | {error, term()}. -on_query(ResourceId, {send_message, Message}, State) -> - Requests = [{send_message, Message}], +on_query(ResourceId, {ChannelId, Message}, State) -> + Requests = [{ChannelId, Message}], ?tp(emqx_bridge_kinesis_impl_producer_sync_query, #{message => Message}), - do_send_requests_sync(ResourceId, Requests, State). + do_send_requests_sync(ResourceId, Requests, State, ChannelId). -spec on_batch_query( resource_id(), - [{send_message, map()}], + [{channel_id(), map()}], state() ) -> {ok, map()} | {error, {recoverable_error, term()}} | {error, term()}. %% we only support batch insert -on_batch_query(ResourceId, [{send_message, _} | _] = Requests, State) -> +on_batch_query(ResourceId, [{ChannelId, _} | _] = Requests, State) -> ?tp(emqx_bridge_kinesis_impl_producer_sync_batch_query, #{requests => Requests}), - do_send_requests_sync(ResourceId, Requests, State). + do_send_requests_sync(ResourceId, Requests, State, ChannelId). connect(Opts) -> Options = proplists:get_value(config, Opts), @@ -159,8 +237,9 @@ connect(Opts) -> -spec do_send_requests_sync( resource_id(), - [{send_message, map()}], - state() + [{channel_id(), map()}], + state(), + channel_id() ) -> {ok, jsx:json_term() | binary()} | {error, {recoverable_error, term()}} @@ -171,12 +250,20 @@ connect(Opts) -> do_send_requests_sync( InstanceId, Requests, - #{pool_name := PoolName, templates := Templates} + #{ + pool_name := PoolName, + installed_channels := InstalledChannels + } = _State, + ChannelId ) -> + #{ + templates := Templates, + stream_name := StreamName + } = maps:get(ChannelId, InstalledChannels), Records = render_records(Requests, Templates), Result = ecpool:pick_and_do( PoolName, - {emqx_bridge_kinesis_connector_client, query, [Records]}, + {emqx_bridge_kinesis_connector_client, query, [Records, StreamName]}, no_handover ), handle_result(Result, Requests, InstanceId). @@ -239,7 +326,7 @@ render_records(Items, Templates) -> render_messages([], _Templates, RenderedMsgs) -> RenderedMsgs; render_messages( - [{send_message, Msg} | Others], + [{_, Msg} | Others], {MsgTemplate, PartitionKeyTemplate} = Templates, RenderedMsgs ) -> diff --git a/apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl b/apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl index 61b354ea3..04a084462 100644 --- a/apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl +++ b/apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl @@ -13,6 +13,7 @@ -define(BRIDGE_TYPE, kinesis_producer). -define(BRIDGE_TYPE_BIN, <<"kinesis_producer">>). +-define(BRIDGE_V2_TYPE_BIN, <<"kinesis">>). -define(KINESIS_PORT, 4566). -define(KINESIS_ACCESS_KEY, "aws_access_key_id"). -define(KINESIS_SECRET_KEY, "aws_secret_access_key"). @@ -48,7 +49,7 @@ init_per_suite(Config) -> [ {proxy_host, ProxyHost}, {proxy_port, ProxyPort}, - {kinesis_port, ?KINESIS_PORT}, + {kinesis_port, list_to_integer(os:getenv("KINESIS_PORT", integer_to_list(?KINESIS_PORT)))}, {kinesis_secretfile, SecretFile}, {proxy_name, ProxyName} | Config @@ -116,7 +117,7 @@ generate_config(Config0) -> } ), ErlcloudConfig = erlcloud_kinesis:new("access_key", "secret", Host, Port, Scheme ++ "://"), - ResourceId = emqx_bridge_resource:resource_id(?BRIDGE_TYPE_BIN, Name), + ResourceId = connector_resource_id(?BRIDGE_V2_TYPE_BIN, Name), BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, Name), [ {kinesis_name, Name}, @@ -129,6 +130,9 @@ generate_config(Config0) -> | Config0 ]. +connector_resource_id(BridgeType, Name) -> + <<"connector:", BridgeType/binary, ":", Name/binary>>. + kinesis_config(Config) -> QueryMode = proplists:get_value(query_mode, Config, async), Scheme = proplists:get_value(connection_scheme, Config, "http"), @@ -505,7 +509,7 @@ t_start_failed_then_fix(Config) -> ProxyPort = ?config(proxy_port, Config), ProxyHost = ?config(proxy_host, Config), ProxyName = ?config(proxy_name, Config), - ResourceId = ?config(resource_id, Config), + Name = ?config(kinesis_name, Config), emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> ct:sleep(1000), ?wait_async_action( @@ -517,7 +521,7 @@ t_start_failed_then_fix(Config) -> ?retry( _Sleep1 = 1_000, _Attempts1 = 30, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(?BRIDGE_V2_TYPE_BIN, Name)) ), ok. @@ -538,40 +542,58 @@ t_stop(Config) -> ok. t_get_status_ok(Config) -> - ResourceId = ?config(resource_id, Config), + Name = ?config(kinesis_name, Config), {ok, _} = create_bridge(Config), - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)), + ?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(?BRIDGE_V2_TYPE_BIN, Name)), ok. t_create_unhealthy(Config) -> delete_stream(Config), - ResourceId = ?config(resource_id, Config), + Name = ?config(kinesis_name, Config), {ok, _} = create_bridge(Config), - ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)), ?assertMatch( - {ok, _, #{error := {unhealthy_target, _}}}, - emqx_resource_manager:lookup_cached(ResourceId) + #{ + status := disconnected, + error := {unhealthy_target, _} + }, + emqx_bridge_v2:health_check(?BRIDGE_V2_TYPE_BIN, Name) ), ok. t_get_status_unhealthy(Config) -> - delete_stream(Config), - ResourceId = ?config(resource_id, Config), + Name = ?config(kinesis_name, Config), {ok, _} = create_bridge(Config), - ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)), ?assertMatch( - {ok, _, #{error := {unhealthy_target, _}}}, - emqx_resource_manager:lookup_cached(ResourceId) + #{ + status := connected + }, + emqx_bridge_v2:health_check(?BRIDGE_V2_TYPE_BIN, Name) + ), + delete_stream(Config), + ?retry( + 100, + 100, + fun() -> + ?assertMatch( + #{ + status := disconnected, + error := {unhealthy_target, _} + }, + emqx_bridge_v2:health_check(?BRIDGE_V2_TYPE_BIN, Name) + ) + end ), ok. t_publish_success(Config) -> ResourceId = ?config(resource_id, Config), TelemetryTable = ?config(telemetry_table, Config), + Name = ?config(kinesis_name, Config), ?assertMatch({ok, _}, create_bridge(Config)), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + ActionId = emqx_bridge_v2:id(?BRIDGE_V2_TYPE_BIN, Name), + assert_empty_metrics(ActionId), ShardIt = get_shard_iterator(Config), Payload = <<"payload">>, Message = emqx_message:make(?TOPIC, Payload), @@ -590,7 +612,7 @@ t_publish_success(Config) -> retried => 0, success => 1 }, - ResourceId + ActionId ), Record = wait_record(Config, ShardIt, 100, 10), ?assertEqual(Payload, proplists:get_value(<<"Data">>, Record)), @@ -599,6 +621,7 @@ t_publish_success(Config) -> t_publish_success_with_template(Config) -> ResourceId = ?config(resource_id, Config), TelemetryTable = ?config(telemetry_table, Config), + Name = ?config(kinesis_name, Config), Overrides = #{ <<"payload_template">> => <<"${payload.data}">>, @@ -607,7 +630,8 @@ t_publish_success_with_template(Config) -> ?assertMatch({ok, _}, create_bridge(Config, Overrides)), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + ActionId = emqx_bridge_v2:id(?BRIDGE_V2_TYPE_BIN, Name), + assert_empty_metrics(ActionId), ShardIt = get_shard_iterator(Config), Payload = <<"{\"key\":\"my_key\", \"data\":\"my_data\"}">>, Message = emqx_message:make(?TOPIC, Payload), @@ -626,7 +650,7 @@ t_publish_success_with_template(Config) -> retried => 0, success => 1 }, - ResourceId + ActionId ), Record = wait_record(Config, ShardIt, 100, 10), ?assertEqual(<<"my_data">>, proplists:get_value(<<"Data">>, Record)), @@ -635,10 +659,12 @@ t_publish_success_with_template(Config) -> t_publish_multiple_msgs_success(Config) -> ResourceId = ?config(resource_id, Config), TelemetryTable = ?config(telemetry_table, Config), + Name = ?config(kinesis_name, Config), ?assertMatch({ok, _}, create_bridge(Config)), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + ActionId = emqx_bridge_v2:id(?BRIDGE_V2_TYPE_BIN, Name), + assert_empty_metrics(ActionId), ShardIt = get_shard_iterator(Config), lists:foreach( fun(I) -> @@ -675,17 +701,19 @@ t_publish_multiple_msgs_success(Config) -> retried => 0, success => 10 }, - ResourceId + ActionId ), ok. t_publish_unhealthy(Config) -> ResourceId = ?config(resource_id, Config), TelemetryTable = ?config(telemetry_table, Config), + Name = ?config(kinesis_name, Config), ?assertMatch({ok, _}, create_bridge(Config)), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + ActionId = emqx_bridge_v2:id(?BRIDGE_V2_TYPE_BIN, Name), + assert_empty_metrics(ActionId), ShardIt = get_shard_iterator(Config), Payload = <<"payload">>, Message = emqx_message:make(?TOPIC, Payload), @@ -709,22 +737,26 @@ t_publish_unhealthy(Config) -> retried => 0, success => 0 }, - ResourceId + ActionId ), - ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)), ?assertMatch( - {ok, _, #{error := {unhealthy_target, _}}}, - emqx_resource_manager:lookup_cached(ResourceId) + #{ + status := disconnected, + error := {unhealthy_target, _} + }, + emqx_bridge_v2:health_check(?BRIDGE_V2_TYPE_BIN, Name) ), ok. t_publish_big_msg(Config) -> ResourceId = ?config(resource_id, Config), TelemetryTable = ?config(telemetry_table, Config), + Name = ?config(kinesis_name, Config), ?assertMatch({ok, _}, create_bridge(Config)), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + ActionId = emqx_bridge_v2:id(?BRIDGE_V2_TYPE_BIN, Name), + assert_empty_metrics(ActionId), % Maximum size is 1MB. Using 1MB + 1 here. Payload = binary:copy(<<"a">>, 1 * 1024 * 1024 + 1), Message = emqx_message:make(?TOPIC, Payload), @@ -743,7 +775,7 @@ t_publish_big_msg(Config) -> retried => 0, success => 0 }, - ResourceId + ActionId ), ok. @@ -754,15 +786,20 @@ t_publish_connection_down(Config0) -> ProxyName = ?config(proxy_name, Config), ResourceId = ?config(resource_id, Config), TelemetryTable = ?config(telemetry_table, Config), + Name = ?config(kinesis_name, Config), ?assertMatch({ok, _}, create_bridge(Config)), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), ?retry( _Sleep1 = 1_000, _Attempts1 = 30, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ?assertMatch( + #{status := connected}, + emqx_bridge_v2:health_check(?BRIDGE_V2_TYPE_BIN, Name) + ) ), emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + ActionId = emqx_bridge_v2:id(?BRIDGE_V2_TYPE_BIN, Name), + assert_empty_metrics(ActionId), ShardIt = get_shard_iterator(Config), Payload = <<"payload">>, Message = emqx_message:make(?TOPIC, Payload), @@ -784,7 +821,10 @@ t_publish_connection_down(Config0) -> ?retry( _Sleep3 = 1_000, _Attempts3 = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ?assertMatch( + #{status := connected}, + emqx_bridge_v2:health_check(?BRIDGE_V2_TYPE_BIN, Name) + ) ), Record = wait_record(Config, ShardIt, 2000, 10), %% to avoid test flakiness @@ -802,7 +842,7 @@ t_publish_connection_down(Config0) -> success => 1, retried_success => 1 }, - ResourceId + ActionId ), Data = proplists:get_value(<<"Data">>, Record), ?assertEqual(Payload, Data), @@ -880,9 +920,11 @@ t_empty_payload_template(Config) -> ResourceId = ?config(resource_id, Config), TelemetryTable = ?config(telemetry_table, Config), Removes = [<<"payload_template">>], + Name = ?config(kinesis_name, Config), ?assertMatch({ok, _}, create_bridge(Config, #{}, Removes)), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + ActionId = emqx_bridge_v2:id(?BRIDGE_V2_TYPE_BIN, Name), assert_empty_metrics(ResourceId), ShardIt = get_shard_iterator(Config), Payload = <<"payload">>, @@ -902,7 +944,7 @@ t_empty_payload_template(Config) -> retried => 0, success => 1 }, - ResourceId + ActionId ), Record = wait_record(Config, ShardIt, 100, 10), Data = proplists:get_value(<<"Data">>, Record), diff --git a/apps/emqx_bridge_matrix/BSL.txt b/apps/emqx_bridge_matrix/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_matrix/BSL.txt +++ b/apps/emqx_bridge_matrix/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_mongodb/BSL.txt b/apps/emqx_bridge_mongodb/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_mongodb/BSL.txt +++ b/apps/emqx_bridge_mongodb/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_mongodb/test/emqx_bridge_v2_mongodb_SUITE.erl b/apps/emqx_bridge_mongodb/test/emqx_bridge_v2_mongodb_SUITE.erl index 879b1d375..991d78ff1 100644 --- a/apps/emqx_bridge_mongodb/test/emqx_bridge_v2_mongodb_SUITE.erl +++ b/apps/emqx_bridge_mongodb/test/emqx_bridge_v2_mongodb_SUITE.erl @@ -197,10 +197,7 @@ serde_roundtrip(InnerConfigMap0) -> InnerConfigMap. parse_and_check_bridge_config(InnerConfigMap, Name) -> - TypeBin = ?BRIDGE_TYPE_BIN, - RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}}, - hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}), - InnerConfigMap. + emqx_bridge_v2_testlib:parse_and_check(?BRIDGE_TYPE_BIN, Name, InnerConfigMap). shared_secret_path() -> os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret"). diff --git a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src index 0c00a0d59..6fbc0edde 100644 --- a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src +++ b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge_mqtt, [ {description, "EMQX MQTT Broker Bridge"}, - {vsn, "0.1.8"}, + {vsn, "0.1.9"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_msg.erl b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_msg.erl index 48cae70d7..e09866429 100644 --- a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_msg.erl +++ b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_msg.erl @@ -16,6 +16,8 @@ -module(emqx_bridge_mqtt_msg). +-include_lib("emqx/include/emqx_mqtt.hrl"). + -export([parse/1]). -export([render/2]). @@ -66,8 +68,8 @@ render( #{ topic => render_string(TopicToken, Msg), payload => render_payload(Vars, Msg), - qos => render_simple_var(QoSToken, Msg), - retain => render_simple_var(RetainToken, Msg) + qos => render_simple_var(QoSToken, Msg, ?QOS_0), + retain => render_simple_var(RetainToken, Msg, false) }. render_payload(From, MapMsg) -> @@ -80,16 +82,23 @@ do_render_payload(Tks, Msg) -> %% Replace a string contains vars to another string in which the placeholders are replace by the %% corresponding values. For example, given "a: ${var}", if the var=1, the result string will be: -%% "a: 1". +%% "a: 1". Undefined vars will be replaced by empty strings. render_string(Tokens, Data) when is_list(Tokens) -> - emqx_placeholder:proc_tmpl(Tokens, Data, #{return => full_binary}); + emqx_placeholder:proc_tmpl(Tokens, Data, #{ + return => full_binary, var_trans => fun undefined_as_empty/1 + }); render_string(Val, _Data) -> Val. +undefined_as_empty(undefined) -> + <<>>; +undefined_as_empty(Val) -> + emqx_utils_conv:bin(Val). + %% Replace a simple var to its value. For example, given "${var}", if the var=1, then the result %% value will be an integer 1. -render_simple_var(Tokens, Data) when is_list(Tokens) -> +render_simple_var(Tokens, Data, Default) when is_list(Tokens) -> [Var] = emqx_placeholder:proc_tmpl(Tokens, Data, #{return => rawlist}), - Var; -render_simple_var(Val, _Data) -> + emqx_maybe:define(Var, Default); +render_simple_var(Val, _Data, _Default) -> Val. diff --git a/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_SUITE.erl b/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_SUITE.erl index 807fba3c9..c6850ab8e 100644 --- a/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_SUITE.erl +++ b/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_SUITE.erl @@ -836,6 +836,40 @@ t_egress_mqtt_bridge_with_rules(_) -> {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []). +t_egress_mqtt_bridge_with_dummy_rule(_) -> + BridgeIDEgress = create_bridge( + ?SERVER_CONF#{ + <<"name">> => ?BRIDGE_NAME_EGRESS, + <<"egress">> => ?EGRESS_CONF + } + ), + + {ok, 201, Rule} = request( + post, + uri(["rules"]), + #{ + <<"name">> => <<"A_rule_send_empty_messages_to_a_sink_mqtt_bridge">>, + <<"enable">> => true, + <<"actions">> => [BridgeIDEgress], + %% select something useless from what a message cannot be composed + <<"sql">> => <<"SELECT x from \"t/1\"">> + } + ), + #{<<"id">> := RuleId} = emqx_utils_json:decode(Rule), + + %% PUBLISH a message to the rule. + Payload = <<"hi">>, + RuleTopic = <<"t/1">>, + RemoteTopic = <>, + emqx:subscribe(RemoteTopic), + timer:sleep(100), + emqx:publish(emqx_message:make(RuleTopic, Payload)), + %% we should receive a message on the "remote" broker, with specified topic + assert_mqtt_msg_received(RemoteTopic, <<>>), + + {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []). + t_mqtt_conn_bridge_egress_reconnect(_) -> %% then we add a mqtt connector, using POST BridgeIDEgress = create_bridge( diff --git a/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_v2_subscriber_SUITE.erl b/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_v2_subscriber_SUITE.erl index 62e0e4f51..b9097b9c3 100644 --- a/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_v2_subscriber_SUITE.erl +++ b/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_v2_subscriber_SUITE.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -77,6 +77,7 @@ init_per_testcase(TestCase, Config) -> ]. end_per_testcase(_TestCase, _Config) -> + snabbkaffe:stop(), emqx_common_test_helpers:call_janitor(), emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), ok. diff --git a/apps/emqx_bridge_mysql/BSL.txt b/apps/emqx_bridge_mysql/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_mysql/BSL.txt +++ b/apps/emqx_bridge_mysql/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_opents/BSL.txt b/apps/emqx_bridge_opents/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_opents/BSL.txt +++ b/apps/emqx_bridge_opents/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src b/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src index 5e3b2f585..2469acaa8 100644 --- a/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_opents, [ {description, "EMQX Enterprise OpenTSDB Bridge"}, - {vsn, "0.1.3"}, + {vsn, "0.1.4"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl index cfb12453d..d38ed8eb4 100644 --- a/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl @@ -1,16 +1,19 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- -module(emqx_bridge_opents). +-include_lib("emqx/include/logger.hrl"). -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -include_lib("emqx_resource/include/emqx_resource.hrl"). --import(hoconsc, [mk/2, enum/1, ref/2]). +-import(hoconsc, [mk/2, enum/1, ref/2, array/1]). -export([ - conn_bridge_examples/1 + conn_bridge_examples/1, + bridge_v2_examples/1, + default_data_template/0 ]). -export([ @@ -20,8 +23,11 @@ desc/1 ]). +-define(CONNECTOR_TYPE, opents). +-define(ACTION_TYPE, ?CONNECTOR_TYPE). + %% ------------------------------------------------------------------------------------------------- -%% api +%% v1 examples conn_bridge_examples(Method) -> [ #{ @@ -34,7 +40,7 @@ conn_bridge_examples(Method) -> values(_Method) -> #{ - enable => true, + enabledb => true, type => opents, name => <<"foo">>, server => <<"http://127.0.0.1:4242">>, @@ -50,7 +56,37 @@ values(_Method) -> }. %% ------------------------------------------------------------------------------------------------- -%% Hocon Schema Definitions +%% v2 examples +bridge_v2_examples(Method) -> + [ + #{ + <<"opents">> => #{ + summary => <<"OpenTSDB Action">>, + value => emqx_bridge_v2_schema:action_values( + Method, ?ACTION_TYPE, ?CONNECTOR_TYPE, action_values() + ) + } + } + ]. + +action_values() -> + #{ + parameters => #{ + data => default_data_template() + } + }. + +default_data_template() -> + [ + #{ + metric => <<"${metric}">>, + tags => <<"${tags}">>, + value => <<"${value}">> + } + ]. + +%% ------------------------------------------------------------------------------------------------- +%% V1 Schema Definitions namespace() -> "bridge_opents". roots() -> []. @@ -65,10 +101,115 @@ fields("post") -> fields("put") -> fields("config"); fields("get") -> - emqx_bridge_schema:status_fields() ++ fields("post"). + emqx_bridge_schema:status_fields() ++ fields("post"); +%% ------------------------------------------------------------------------------------------------- +%% V2 Schema Definitions + +fields(action) -> + {opents, + mk( + hoconsc:map(name, ref(?MODULE, action_config)), + #{ + desc => <<"OpenTSDB Action Config">>, + required => false + } + )}; +fields(action_config) -> + emqx_bridge_v2_schema:make_producer_action_schema( + mk( + ref(?MODULE, action_parameters), + #{ + required => true, desc => ?DESC("action_parameters") + } + ) + ); +fields(action_parameters) -> + [ + {data, + mk( + array(ref(?MODULE, action_parameters_data)), + #{ + desc => ?DESC("action_parameters_data"), + default => [] + } + )} + ]; +fields(action_parameters_data) -> + TagsError = fun(Data) -> + ?SLOG(warning, #{ + msg => "invalid_tags_template", + path => "opents.parameters.data.tags", + data => Data + }), + false + end, + [ + {timestamp, + mk( + binary(), + #{ + desc => ?DESC("config_parameters_timestamp"), + required => false + } + )}, + {metric, + mk( + binary(), + #{ + required => true, + desc => ?DESC("config_parameters_metric") + } + )}, + {tags, + mk( + hoconsc:union([map(), binary()]), + #{ + required => true, + desc => ?DESC("config_parameters_tags"), + validator => fun + (Tmpl) when is_binary(Tmpl) -> + case emqx_placeholder:preproc_tmpl(Tmpl) of + [{var, _}] -> + true; + _ -> + TagsError(Tmpl) + end; + (Map) when is_map(Map) -> + case maps:size(Map) >= 1 of + true -> + true; + _ -> + TagsError(Map) + end; + (Any) -> + TagsError(Any) + end + } + )}, + {value, + mk( + hoconsc:union([integer(), float(), binary()]), + #{ + required => true, + desc => ?DESC("config_parameters_value") + } + )} + ]; +fields("post_bridge_v2") -> + emqx_bridge_schema:type_and_name_fields(enum([opents])) ++ fields(action_config); +fields("put_bridge_v2") -> + fields(action_config); +fields("get_bridge_v2") -> + emqx_bridge_schema:status_fields() ++ fields("post_bridge_v2"). desc("config") -> ?DESC("desc_config"); +desc(action_config) -> + ?DESC("desc_config"); +desc(action_parameters) -> + ?DESC("action_parameters"); +desc(action_parameters_data) -> + ?DESC("action_parameters_data"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for OpenTSDB using `", string:to_upper(Method), "` method."]; desc(_) -> diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents_action_info.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents_action_info.erl new file mode 100644 index 000000000..4c4c9568c --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents_action_info.erl @@ -0,0 +1,71 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_opents_action_info). + +-behaviour(emqx_action_info). + +-elvis([{elvis_style, invalid_dynamic_call, disable}]). + +%% behaviour callbacks +-export([ + action_type_name/0, + bridge_v1_config_to_action_config/2, + bridge_v1_config_to_connector_config/1, + bridge_v1_type_name/0, + connector_action_config_to_bridge_v1_config/2, + connector_type_name/0, + schema_module/0 +]). + +-import(emqx_utils_conv, [bin/1]). + +-define(ACTION_TYPE, opents). +-define(SCHEMA_MODULE, emqx_bridge_opents). + +action_type_name() -> ?ACTION_TYPE. +bridge_v1_type_name() -> ?ACTION_TYPE. +connector_type_name() -> ?ACTION_TYPE. + +schema_module() -> ?SCHEMA_MODULE. + +connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) -> + MergedConfig = + emqx_utils_maps:deep_merge( + maps:without( + [<<"description">>, <<"local_topic">>, <<"connector">>, <<"data">>], + emqx_utils_maps:unindent(<<"parameters">>, ActionConfig) + ), + ConnectorConfig + ), + BridgeV1Keys = schema_keys("config"), + maps:with(BridgeV1Keys, MergedConfig). + +bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) -> + ActionTopLevelKeys = schema_keys(action_config), + ActionParametersKeys = schema_keys(action_parameters), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ActionConfig = make_config_map(ActionKeys, ActionParametersKeys, BridgeV1Config), + emqx_utils_maps:update_if_present( + <<"resource_opts">>, + fun emqx_bridge_v2_schema:project_to_actions_resource_opts/1, + ActionConfig#{<<"connector">> => ConnectorName} + ). + +bridge_v1_config_to_connector_config(BridgeV1Config) -> + ConnectorKeys = schema_keys(emqx_bridge_opents_connector, "config_connector"), + emqx_utils_maps:update_if_present( + <<"resource_opts">>, + fun emqx_connector_schema:project_to_connector_resource_opts/1, + maps:with(ConnectorKeys, BridgeV1Config) + ). + +make_config_map(PickKeys, IndentKeys, Config) -> + Conf0 = maps:with(PickKeys, Config#{<<"data">> => []}), + emqx_utils_maps:indent(<<"parameters">>, IndentKeys, Conf0). + +schema_keys(Name) -> + schema_keys(?SCHEMA_MODULE, Name). + +schema_keys(Mod, Name) -> + [bin(Key) || Key <- proplists:get_keys(Mod:fields(Name))]. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl index 9271abe15..af4cba951 100644 --- a/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- -module(emqx_bridge_opents_connector). @@ -12,7 +12,7 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("hocon/include/hoconsc.hrl"). --export([roots/0, fields/1]). +-export([namespace/0, roots/0, fields/1, desc/1]). %% `emqx_resource' API -export([ @@ -21,15 +21,25 @@ on_stop/2, on_query/3, on_batch_query/3, - on_get_status/2 + on_get_status/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, + on_get_channel_status/3 ]). +-export([connector_examples/1]). + -export([connect/1]). -import(hoconsc, [mk/2, enum/1, ref/2]). +-define(CONNECTOR_TYPE, opents). + +namespace() -> "opents_connector". + %%===================================================================== -%% Hocon schema +%% V1 Hocon schema roots() -> [{config, #{type => hoconsc:ref(?MODULE, config)}}]. @@ -40,8 +50,61 @@ fields(config) -> {summary, mk(boolean(), #{default => true, desc => ?DESC("summary")})}, {details, mk(boolean(), #{default => false, desc => ?DESC("details")})}, {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} + ]; +%%===================================================================== +%% V2 Hocon schema + +fields("config_connector") -> + emqx_connector_schema:common_fields() ++ + proplists_without([auto_reconnect], fields(config)) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts); +fields(connector_resource_opts) -> + emqx_connector_schema:resource_opts_fields(); +fields("post") -> + emqx_connector_schema:type_and_name_fields(enum([opents])) ++ fields("config_connector"); +fields("put") -> + fields("config_connector"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +desc(config) -> + ?DESC("desc_config"); +desc(connector_resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); +desc("config_connector") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for OpenTSDB using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +proplists_without(Keys, List) -> + [El || El = {K, _} <- List, not lists:member(K, Keys)]. + +%%===================================================================== +%% V2 examples +connector_examples(Method) -> + [ + #{ + <<"opents">> => + #{ + summary => <<"OpenTSDB Connector">>, + value => emqx_connector_schema:connector_values( + Method, ?CONNECTOR_TYPE, connector_example_values() + ) + } + } ]. +connector_example_values() -> + #{ + name => <<"opents_connector">>, + type => opents, + enable => true, + server => <<"http://localhost:4242/">>, + pool_size => 8 + }. + %%======================================================================================== %% `emqx_resource' API %%======================================================================================== @@ -56,8 +119,7 @@ on_start( server := Server, pool_size := PoolSize, summary := Summary, - details := Details, - resource_opts := #{batch_size := BatchSize} + details := Details } = Config ) -> ?SLOG(info, #{ @@ -70,11 +132,10 @@ on_start( {server, to_str(Server)}, {summary, Summary}, {details, Details}, - {max_batch_size, BatchSize}, {pool_size, PoolSize} ], - State = #{pool_name => InstanceId, server => Server}, + State = #{pool_name => InstanceId, server => Server, channels => #{}}, case opentsdb_connectivity(Server) of ok -> case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of @@ -93,6 +154,7 @@ on_stop(InstanceId, _State) -> msg => "stopping_opents_connector", connector => InstanceId }), + ?tp(opents_bridge_stopped, #{instance_id => InstanceId}), emqx_resource_pool:stop(InstanceId). on_query(InstanceId, Request, State) -> @@ -101,10 +163,14 @@ on_query(InstanceId, Request, State) -> on_batch_query( InstanceId, BatchReq, - State + #{channels := Channels} = State ) -> - Datas = [format_opentsdb_msg(Msg) || {_Key, Msg} <- BatchReq], - do_query(InstanceId, Datas, State). + case try_render_messages(BatchReq, Channels) of + {ok, Datas} -> + do_query(InstanceId, Datas, State); + Error -> + Error + end. on_get_status(_InstanceId, #{server := Server}) -> Result = @@ -117,6 +183,39 @@ on_get_status(_InstanceId, #{server := Server}) -> end, Result. +on_add_channel( + _InstanceId, + #{channels := Channels} = OldState, + ChannelId, + #{ + parameters := #{data := Data} = Parameter + } +) -> + case maps:is_key(ChannelId, Channels) of + true -> + {error, already_exists}; + _ -> + Channel = Parameter#{ + data := preproc_data_template(Data) + }, + Channels2 = Channels#{ChannelId => Channel}, + {ok, OldState#{channels := Channels2}} + end. + +on_remove_channel(_InstanceId, #{channels := Channels} = OldState, ChannelId) -> + {ok, OldState#{channels => maps:remove(ChannelId, Channels)}}. + +on_get_channels(InstanceId) -> + emqx_bridge_v2:get_channels_for_connector(InstanceId). + +on_get_channel_status(InstanceId, ChannelId, #{channels := Channels} = State) -> + case maps:is_key(ChannelId, Channels) of + true -> + on_get_status(InstanceId, State); + _ -> + {error, not_exists} + end. + %%======================================================================================== %% Helper fns %%======================================================================================== @@ -127,6 +226,9 @@ do_query(InstanceId, Query, #{pool_name := PoolName} = State) -> "opents_connector_received", #{connector => InstanceId, query => Query, state => State} ), + + ?tp(opents_bridge_on_query, #{instance_id => InstanceId}), + Result = ecpool:pick_and_do(PoolName, {opentsdb, put, [Query]}, no_handover), case Result of @@ -172,17 +274,112 @@ opentsdb_connectivity(Server) -> end, emqx_connector_lib:http_connectivity(SvrUrl, ?HTTP_CONNECT_TIMEOUT). -format_opentsdb_msg(Msg) -> - maps:with( - [ - timestamp, - metric, - tags, - value, - <<"timestamp">>, - <<"metric">>, - <<"tags">>, - <<"value">> - ], - Msg +try_render_messages([{ChannelId, _} | _] = BatchReq, Channels) -> + case maps:find(ChannelId, Channels) of + {ok, Channel} -> + {ok, + lists:foldl( + fun({_, Message}, Acc) -> + render_channel_message(Message, Channel, Acc) + end, + [], + BatchReq + )}; + _ -> + {error, {unrecoverable_error, {invalid_channel_id, ChannelId}}} + end. + +render_channel_message(Msg, #{data := DataList}, Acc) -> + RawOpts = #{return => rawlist, var_trans => fun(X) -> X end}, + lists:foldl( + fun(#{metric := MetricTk, tags := TagsTk, value := ValueTk} = Data, InAcc) -> + MetricVal = emqx_placeholder:proc_tmpl(MetricTk, Msg), + + TagsVal = + case TagsTk of + [tags | TagTkList] -> + maps:from_list([ + { + emqx_placeholder:proc_tmpl(TagName, Msg), + emqx_placeholder:proc_tmpl(TagValue, Msg) + } + || {TagName, TagValue} <- TagTkList + ]); + TagsTks -> + case emqx_placeholder:proc_tmpl(TagsTks, Msg, RawOpts) of + [undefined] -> + #{}; + [Any] -> + Any + end + end, + + ValueVal = + case ValueTk of + [_] -> + %% just one element, maybe is a variable or a plain text + %% we should keep it as it is + erlang:hd(emqx_placeholder:proc_tmpl(ValueTk, Msg, RawOpts)); + Tks when is_list(Tks) -> + emqx_placeholder:proc_tmpl(Tks, Msg); + Raw -> + %% not a token list, just a raw value + Raw + end, + Base = #{metric => MetricVal, tags => TagsVal, value => ValueVal}, + [ + case maps:get(timestamp, Data, undefined) of + undefined -> + Base; + TimestampTk -> + Base#{timestamp => emqx_placeholder:proc_tmpl(TimestampTk, Msg)} + end + | InAcc + ] + end, + Acc, + DataList + ). + +preproc_data_template([]) -> + preproc_data_template(emqx_bridge_opents:default_data_template()); +preproc_data_template(DataList) -> + lists:map( + fun(#{tags := Tags, value := Value} = Data) -> + Data2 = maps:without([tags, value], Data), + Template = maps:map( + fun(_Key, Val) -> + emqx_placeholder:preproc_tmpl(Val) + end, + Data2 + ), + + TagsTk = + case Tags of + Tmpl when is_binary(Tmpl) -> + emqx_placeholder:preproc_tmpl(Tmpl); + Map when is_map(Map) -> + [ + tags + | [ + { + emqx_placeholder:preproc_tmpl(emqx_utils_conv:bin(TagName)), + emqx_placeholder:preproc_tmpl(TagValue) + } + || {TagName, TagValue} <- maps:to_list(Map) + ] + ] + end, + + ValueTk = + case Value of + Text when is_binary(Text) -> + emqx_placeholder:preproc_tmpl(Text); + Raw -> + Raw + end, + + Template#{tags => TagsTk, value => ValueTk} + end, + DataList ). diff --git a/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl b/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl index 3632ce786..23d5ee077 100644 --- a/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl +++ b/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- -module(emqx_bridge_opents_SUITE). @@ -12,7 +12,8 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). % DB defaults --define(BATCH_SIZE, 10). +-define(BRIDGE_TYPE_BIN, <<"opents">>). +-define(APPS, [opentsdb, emqx_bridge, emqx_resource, emqx_rule_engine, emqx_bridge_opents_SUITE]). %%------------------------------------------------------------------------------ %% CT boilerplate @@ -20,95 +21,34 @@ all() -> [ - {group, with_batch}, - {group, without_batch} + {group, default} ]. groups() -> - TCs = emqx_common_test_helpers:all(?MODULE), + AllTCs = emqx_common_test_helpers:all(?MODULE), [ - {with_batch, TCs}, - {without_batch, TCs} + {default, AllTCs} ]. -init_per_group(with_batch, Config0) -> - Config = [{batch_size, ?BATCH_SIZE} | Config0], - common_init(Config); -init_per_group(without_batch, Config0) -> - Config = [{batch_size, 1} | Config0], - common_init(Config); -init_per_group(_Group, Config) -> - Config. - -end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> - ProxyHost = ?config(proxy_host, Config), - ProxyPort = ?config(proxy_port, Config), - emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), - ok; -end_per_group(_Group, _Config) -> - ok. - init_per_suite(Config) -> - Config. + emqx_bridge_v2_testlib:init_per_suite(Config, ?APPS). -end_per_suite(_Config) -> - emqx_mgmt_api_test_util:end_suite(), - ok = emqx_common_test_helpers:stop_apps([opentsdb, emqx_bridge, emqx_resource, emqx_conf]), - ok. +end_per_suite(Config) -> + emqx_bridge_v2_testlib:end_per_suite(Config). -init_per_testcase(_Testcase, Config) -> - delete_bridge(Config), - snabbkaffe:start_trace(), - Config. - -end_per_testcase(_Testcase, Config) -> - ProxyHost = ?config(proxy_host, Config), - ProxyPort = ?config(proxy_port, Config), - emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), - ok = snabbkaffe:stop(), - delete_bridge(Config), - ok. - -%%------------------------------------------------------------------------------ -%% Helper fns -%%------------------------------------------------------------------------------ - -common_init(ConfigT) -> - Host = os:getenv("OPENTS_HOST", "toxiproxy"), +init_per_group(default, Config0) -> + Host = os:getenv("OPENTS_HOST", "toxiproxy.emqx.net"), Port = list_to_integer(os:getenv("OPENTS_PORT", "4242")), - - Config0 = [ - {opents_host, Host}, - {opents_port, Port}, - {proxy_name, "opents"} - | ConfigT - ], - - BridgeType = proplists:get_value(bridge_type, Config0, <<"opents">>), + ProxyName = "opents", case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of true -> - % Setup toxiproxy - ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), - ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), - emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), - % Ensure enterprise bridge module is loaded - ok = emqx_common_test_helpers:start_apps([ - emqx_conf, emqx_resource, emqx_bridge - ]), - _ = application:ensure_all_started(opentsdb), - _ = emqx_bridge_enterprise:module_info(), - emqx_mgmt_api_test_util:init_suite(), - {Name, OpenTSConf} = opents_config(BridgeType, Config0), - Config = - [ - {opents_config, OpenTSConf}, - {opents_bridge_type, BridgeType}, - {opents_name, Name}, - {proxy_host, ProxyHost}, - {proxy_port, ProxyPort} - | Config0 - ], - Config; + Config = emqx_bridge_v2_testlib:init_per_group(default, ?BRIDGE_TYPE_BIN, Config0), + [ + {bridge_host, Host}, + {bridge_port, Port}, + {proxy_name, ProxyName} + | Config + ]; false -> case os:getenv("IS_CI") of "yes" -> @@ -116,244 +56,152 @@ common_init(ConfigT) -> _ -> {skip, no_opents} end - end. - -opents_config(BridgeType, Config) -> - Port = integer_to_list(?config(opents_port, Config)), - Server = "http://" ++ ?config(opents_host, Config) ++ ":" ++ Port, - Name = atom_to_binary(?MODULE), - BatchSize = ?config(batch_size, Config), - ConfigString = - io_lib:format( - "bridges.~s.~s {\n" - " enable = true\n" - " server = ~p\n" - " resource_opts = {\n" - " request_ttl = 500ms\n" - " batch_size = ~b\n" - " query_mode = sync\n" - " }\n" - "}", - [ - BridgeType, - Name, - Server, - BatchSize - ] - ), - {Name, parse_and_check(ConfigString, BridgeType, Name)}. - -parse_and_check(ConfigString, BridgeType, Name) -> - {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), - hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), - #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + end; +init_per_group(_Group, Config) -> Config. -create_bridge(Config) -> - create_bridge(Config, _Overrides = #{}). +end_per_group(default, Config) -> + emqx_bridge_v2_testlib:end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. -create_bridge(Config, Overrides) -> - BridgeType = ?config(opents_bridge_type, Config), - Name = ?config(opents_name, Config), - Config0 = ?config(opents_config, Config), - Config1 = emqx_utils_maps:deep_merge(Config0, Overrides), - emqx_bridge:create(BridgeType, Name, Config1). +init_per_testcase(TestCase, Config0) -> + Type = ?config(bridge_type, Config0), + UniqueNum = integer_to_binary(erlang:unique_integer()), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + {_ConfigString, ConnectorConfig} = connector_config(Name, Config0), + {_, ActionConfig} = action_config(Name, Config0), + Config = [ + {connector_type, Type}, + {connector_name, Name}, + {connector_config, ConnectorConfig}, + {bridge_type, Type}, + {bridge_name, Name}, + {bridge_config, ActionConfig} + | Config0 + ], + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), + ok = snabbkaffe:start_trace(), + Config. -delete_bridge(Config) -> - BridgeType = ?config(opents_bridge_type, Config), - Name = ?config(opents_name, Config), - emqx_bridge:remove(BridgeType, Name). - -create_bridge_http(Params) -> - Path = emqx_mgmt_api_test_util:api_path(["bridges"]), - AuthHeader = emqx_mgmt_api_test_util:auth_header_(), - case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of - {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; - Error -> Error - end. - -send_message(Config, Payload) -> - Name = ?config(opents_name, Config), - BridgeType = ?config(opents_bridge_type, Config), - BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), - emqx_bridge:send_message(BridgeID, Payload). - -query_resource(Config, Request) -> - query_resource(Config, Request, 1_000). - -query_resource(Config, Request, Timeout) -> - Name = ?config(opents_name, Config), - BridgeType = ?config(opents_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - emqx_resource:query(ResourceID, Request, #{timeout => Timeout}). +end_per_testcase(TestCase, Config) -> + emqx_bridge_v2_testlib:end_per_testcase(TestCase, Config). %%------------------------------------------------------------------------------ -%% Testcases +%% Helper fns %%------------------------------------------------------------------------------ -t_setup_via_config_and_publish(Config) -> - ?assertMatch( - {ok, _}, - create_bridge(Config) - ), - SentData = make_data(), - ?check_trace( - begin - {_, {ok, #{result := Result}}} = - ?wait_async_action( - send_message(Config, SentData), - #{?snk_kind := buffer_worker_flush_ack}, - 2_000 - ), - ?assertMatch( - {ok, 200, #{failed := 0, success := 1}}, Result - ), - ok - end, - fun(Trace0) -> - Trace = ?of_kind(opents_connector_query_return, Trace0), - ?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace), - ok - end - ), - ok. +action_config(Name, Config) -> + Type = ?config(bridge_type, Config), + ConfigString = + io_lib:format( + "actions.~s.~s {\n" + " enable = true\n" + " connector = \"~s\"\n" + " parameters = {\n" + " data = []\n" + " }\n" + "}\n", + [ + Type, + Name, + Name + ] + ), + ct:pal("ActionConfig:~ts~n", [ConfigString]), + {ConfigString, parse_action_and_check(ConfigString, Type, Name)}. -t_setup_via_http_api_and_publish(Config) -> - BridgeType = ?config(opents_bridge_type, Config), - Name = ?config(opents_name, Config), - OpentsConfig0 = ?config(opents_config, Config), - OpentsConfig = OpentsConfig0#{ - <<"name">> => Name, - <<"type">> => BridgeType - }, - ?assertMatch( - {ok, _}, - create_bridge_http(OpentsConfig) - ), - SentData = make_data(), - ?check_trace( - begin - Request = {send_message, SentData}, - Res0 = query_resource(Config, Request, 2_500), - ?assertMatch( - {ok, 200, #{failed := 0, success := 1}}, Res0 - ), - ok - end, - fun(Trace0) -> - Trace = ?of_kind(opents_connector_query_return, Trace0), - ?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace), - ok - end - ), - ok. +connector_config(Name, Config) -> + Host = ?config(bridge_host, Config), + Port = ?config(bridge_port, Config), + Type = ?config(bridge_type, Config), + ServerURL = opents_server_url(Host, Port), + ConfigString = + io_lib:format( + "connectors.~s.~s {\n" + " enable = true\n" + " server = \"~s\"\n" + "}\n", + [ + Type, + Name, + ServerURL + ] + ), + ct:pal("ConnectorConfig:~ts~n", [ConfigString]), + {ConfigString, parse_connector_and_check(ConfigString, Type, Name)}. -t_get_status(Config) -> - ?assertMatch( - {ok, _}, - create_bridge(Config) - ), +parse_action_and_check(ConfigString, BridgeType, Name) -> + parse_and_check(ConfigString, emqx_bridge_schema, <<"actions">>, BridgeType, Name). - Name = ?config(opents_name, Config), - BridgeType = ?config(opents_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), +parse_connector_and_check(ConfigString, ConnectorType, Name) -> + parse_and_check( + ConfigString, emqx_connector_schema, <<"connectors">>, ConnectorType, Name + ). +%% emqx_utils_maps:safe_atom_key_map(Config). - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), - ok. +parse_and_check(ConfigString, SchemaMod, RootKey, Type0, Name) -> + Type = to_bin(Type0), + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(SchemaMod, RawConf, #{required => false, atom_key => false}), + #{RootKey := #{Type := #{Name := Config}}} = RawConf, + Config. -t_create_disconnected(Config) -> - BridgeType = proplists:get_value(bridge_type, Config, <<"opents">>), - Config1 = lists:keyreplace(opents_port, 1, Config, {opents_port, 61234}), - {_Name, OpenTSConf} = opents_config(BridgeType, Config1), +to_bin(List) when is_list(List) -> + unicode:characters_to_binary(List, utf8); +to_bin(Atom) when is_atom(Atom) -> + erlang:atom_to_binary(Atom); +to_bin(Bin) when is_binary(Bin) -> + Bin. - Config2 = lists:keyreplace(opents_config, 1, Config1, {opents_config, OpenTSConf}), - ?assertMatch({ok, _}, create_bridge(Config2)), +opents_server_url(Host, Port) -> + iolist_to_binary([ + "http://", + Host, + ":", + integer_to_binary(Port) + ]). - Name = ?config(opents_name, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceID)), - ok. +is_success_check({ok, 200, #{failed := Failed}}) -> + ?assertEqual(0, Failed); +is_success_check(Ret) -> + ?assert(false, Ret). -t_write_failure(Config) -> - ProxyName = ?config(proxy_name, Config), - ProxyPort = ?config(proxy_port, Config), - ProxyHost = ?config(proxy_host, Config), - {ok, _} = create_bridge(Config), - SentData = make_data(), - emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> - {_, {ok, #{result := Result}}} = - ?wait_async_action( - send_message(Config, SentData), - #{?snk_kind := buffer_worker_flush_ack}, - 2_000 - ), - ?assertMatch({error, _}, Result), - ok - end), - ok. +is_error_check(Result) -> + ?assertMatch({error, {400, #{failed := 1}}}, Result). -t_write_timeout(Config) -> - ProxyName = ?config(proxy_name, Config), - ProxyPort = ?config(proxy_port, Config), - ProxyHost = ?config(proxy_host, Config), - {ok, _} = create_bridge( - Config, - #{ - <<"resource_opts">> => #{ - <<"request_ttl">> => <<"500ms">>, - <<"resume_interval">> => <<"100ms">>, - <<"health_check_interval">> => <<"100ms">> +opentds_query(Config, Metric) -> + Path = <<"/api/query">>, + Opts = #{return_all => true}, + Body = #{ + start => <<"1h-ago">>, + queries => [ + #{ + aggregator => <<"last">>, + metric => Metric, + tags => #{ + host => <<"*">> + } } - } - ), - SentData = make_data(), - emqx_common_test_helpers:with_failure( - timeout, ProxyName, ProxyHost, ProxyPort, fun() -> - ?assertMatch( - {error, {resource_error, #{reason := timeout}}}, - query_resource(Config, {send_message, SentData}) - ) - end - ), - ok. + ], + showTSUID => false, + showQuery => false, + delete => false + }, + opentsdb_request(Config, Path, Body, Opts). -t_missing_data(Config) -> - ?assertMatch( - {ok, _}, - create_bridge(Config) - ), - {_, {ok, #{result := Result}}} = - ?wait_async_action( - send_message(Config, #{}), - #{?snk_kind := buffer_worker_flush_ack}, - 2_000 - ), - ?assertMatch( - {error, {400, #{failed := 1, success := 0}}}, - Result - ), - ok. +opentsdb_request(Config, Path, Body) -> + opentsdb_request(Config, Path, Body, #{}). -t_bad_data(Config) -> - ?assertMatch( - {ok, _}, - create_bridge(Config) - ), - Data = maps:without([metric], make_data()), - {_, {ok, #{result := Result}}} = - ?wait_async_action( - send_message(Config, Data), - #{?snk_kind := buffer_worker_flush_ack}, - 2_000 - ), - - ?assertMatch( - {error, {400, #{failed := 1, success := 0}}}, Result - ), - ok. - -make_data() -> - make_data(<<"cpu">>, 12). +opentsdb_request(Config, Path, Body, Opts) -> + Host = ?config(bridge_host, Config), + Port = ?config(bridge_port, Config), + ServerURL = opents_server_url(Host, Port), + URL = <>, + emqx_mgmt_api_test_util:request_api(post, URL, [], [], Body, Opts). make_data(Metric, Value) -> #{ @@ -363,3 +211,221 @@ make_data(Metric, Value) -> }, value => Value }. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_query_simple(Config) -> + Metric = <<"t_query_simple">>, + Value = 12, + MakeMessageFun = fun() -> make_data(Metric, Value) end, + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, MakeMessageFun, fun is_success_check/1, opents_bridge_on_query + ), + {ok, {{_, 200, _}, _, IoTDBResult}} = opentds_query(Config, Metric), + QResult = emqx_utils_json:decode(IoTDBResult), + ?assertMatch( + [ + #{ + <<"metric">> := Metric, + <<"dps">> := _ + } + ], + QResult + ), + [#{<<"dps">> := Dps}] = QResult, + ?assertMatch([Value | _], maps:values(Dps)). + +t_create_via_http(Config) -> + emqx_bridge_v2_testlib:t_create_via_http(Config). + +t_start_stop(Config) -> + emqx_bridge_v2_testlib:t_start_stop(Config, opents_bridge_stopped). + +t_on_get_status(Config) -> + emqx_bridge_v2_testlib:t_on_get_status(Config, #{failure_status => connecting}). + +t_query_invalid_data(Config) -> + Metric = <<"t_query_invalid_data">>, + Value = 12, + MakeMessageFun = fun() -> maps:remove(value, make_data(Metric, Value)) end, + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, MakeMessageFun, fun is_error_check/1, opents_bridge_on_query + ). + +t_tags_validator(Config) -> + %% Create without data configured + ?assertMatch({ok, _}, emqx_bridge_v2_testlib:create_bridge(Config)), + + ?assertMatch( + {ok, _}, + emqx_bridge_v2_testlib:update_bridge_api(Config, #{ + <<"parameters">> => #{ + <<"data">> => [ + #{ + <<"metric">> => <<"${metric}">>, + <<"tags">> => <<"${tags}">>, + <<"value">> => <<"${payload.value}">> + } + ] + } + }) + ), + + ?assertMatch( + {error, _}, + emqx_bridge_v2_testlib:update_bridge_api(Config, #{ + <<"parameters">> => #{ + <<"data">> => [ + #{ + <<"metric">> => <<"${metric}">>, + <<"tags">> => <<"text">>, + <<"value">> => <<"${payload.value}">> + } + ] + } + }) + ). + +t_raw_int_value(Config) -> + raw_value_test(<<"t_raw_int_value">>, 42, Config). + +t_raw_float_value(Config) -> + raw_value_test(<<"t_raw_float_value">>, 42.5, Config). + +t_list_tags(Config) -> + ?assertMatch({ok, _}, emqx_bridge_v2_testlib:create_bridge(Config)), + ResourceId = emqx_bridge_v2_testlib:resource_id(Config), + BridgeId = emqx_bridge_v2_testlib:bridge_id(Config), + ?retry( + _Sleep = 1_000, + _Attempts = 10, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + ?assertMatch( + {ok, _}, + emqx_bridge_v2_testlib:update_bridge_api(Config, #{ + <<"parameters">> => #{ + <<"data">> => [ + #{ + <<"metric">> => <<"${metric}">>, + <<"tags">> => #{<<"host">> => <<"valueA">>}, + value => <<"${value}">> + } + ] + } + }) + ), + + Metric = <<"t_list_tags">>, + Value = 12, + MakeMessageFun = fun() -> make_data(Metric, Value) end, + + is_success_check( + emqx_resource:simple_sync_query(ResourceId, {BridgeId, MakeMessageFun()}) + ), + + {ok, {{_, 200, _}, _, IoTDBResult}} = opentds_query(Config, Metric), + QResult = emqx_utils_json:decode(IoTDBResult), + ?assertMatch( + [ + #{ + <<"metric">> := Metric, + <<"tags">> := #{<<"host">> := <<"valueA">>} + } + ], + QResult + ). + +t_list_tags_with_var(Config) -> + ?assertMatch({ok, _}, emqx_bridge_v2_testlib:create_bridge(Config)), + ResourceId = emqx_bridge_v2_testlib:resource_id(Config), + BridgeId = emqx_bridge_v2_testlib:bridge_id(Config), + ?retry( + _Sleep = 1_000, + _Attempts = 10, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + ?assertMatch( + {ok, _}, + emqx_bridge_v2_testlib:update_bridge_api(Config, #{ + <<"parameters">> => #{ + <<"data">> => [ + #{ + <<"metric">> => <<"${metric}">>, + <<"tags">> => #{<<"host">> => <<"${value}">>}, + value => <<"${value}">> + } + ] + } + }) + ), + + Metric = <<"t_list_tags_with_var">>, + Value = 12, + MakeMessageFun = fun() -> make_data(Metric, Value) end, + + is_success_check( + emqx_resource:simple_sync_query(ResourceId, {BridgeId, MakeMessageFun()}) + ), + + {ok, {{_, 200, _}, _, IoTDBResult}} = opentds_query(Config, Metric), + QResult = emqx_utils_json:decode(IoTDBResult), + ?assertMatch( + [ + #{ + <<"metric">> := Metric, + <<"tags">> := #{<<"host">> := <<"12">>} + } + ], + QResult + ). + +raw_value_test(Metric, RawValue, Config) -> + ?assertMatch({ok, _}, emqx_bridge_v2_testlib:create_bridge(Config)), + ResourceId = emqx_bridge_v2_testlib:resource_id(Config), + BridgeId = emqx_bridge_v2_testlib:bridge_id(Config), + ?retry( + _Sleep = 1_000, + _Attempts = 10, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + ?assertMatch( + {ok, _}, + emqx_bridge_v2_testlib:update_bridge_api(Config, #{ + <<"parameters">> => #{ + <<"data">> => [ + #{ + <<"metric">> => <<"${metric}">>, + <<"tags">> => <<"${tags}">>, + <<"value">> => RawValue + } + ] + } + }) + ), + + Value = 12, + MakeMessageFun = fun() -> make_data(Metric, Value) end, + + is_success_check( + emqx_resource:simple_sync_query(ResourceId, {BridgeId, MakeMessageFun()}) + ), + + {ok, {{_, 200, _}, _, IoTDBResult}} = opentds_query(Config, Metric), + QResult = emqx_utils_json:decode(IoTDBResult), + ?assertMatch( + [ + #{ + <<"metric">> := Metric, + <<"dps">> := _ + } + ], + QResult + ), + [#{<<"dps">> := Dps}] = QResult, + ?assertMatch([RawValue | _], maps:values(Dps)). diff --git a/apps/emqx_bridge_oracle/BSL.txt b/apps/emqx_bridge_oracle/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_oracle/BSL.txt +++ b/apps/emqx_bridge_oracle/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src index d68c6ca9a..39b606d5f 100644 --- a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src +++ b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_oracle, [ {description, "EMQX Enterprise Oracle Database Bridge"}, - {vsn, "0.1.4"}, + {vsn, "0.1.5"}, {registered, []}, {applications, [ kernel, @@ -8,7 +8,7 @@ emqx_resource, emqx_oracle ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_oracle_action_info]}]}, {modules, []}, {links, []} diff --git a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl index 15b2be575..e4cb2c3b4 100644 --- a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl +++ b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl @@ -9,7 +9,9 @@ -include_lib("emqx_resource/include/emqx_resource.hrl"). -export([ - conn_bridge_examples/1 + bridge_v2_examples/1, + conn_bridge_examples/1, + connector_examples/1 ]). -export([ @@ -20,22 +22,25 @@ config_validator/1 ]). +-define(CONNECTOR_TYPE, oracle). +-define(ACTION_TYPE, ?CONNECTOR_TYPE). + -define(DEFAULT_SQL, << "insert into t_mqtt_msgs(msgid, topic, qos, payload) " "values (${id}, ${topic}, ${qos}, ${payload})" >>). -conn_bridge_examples(Method) -> +conn_bridge_examples(_Method) -> [ #{ <<"oracle">> => #{ summary => <<"Oracle Database Bridge">>, - value => values(Method) + value => conn_bridge_examples_values() } } ]. -values(_Method) -> +conn_bridge_examples_values() -> #{ enable => true, type => oracle, @@ -58,6 +63,54 @@ values(_Method) -> } }. +connector_examples(Method) -> + [ + #{ + <<"oracle">> => + #{ + summary => <<"Oracle Connector">>, + value => emqx_connector_schema:connector_values( + Method, ?CONNECTOR_TYPE, connector_values() + ) + } + } + ]. + +connector_values() -> + #{ + <<"username">> => <<"system">>, + <<"password">> => <<"oracle">>, + <<"server">> => <<"127.0.0.1:1521">>, + <<"service_name">> => <<"XE">>, + <<"sid">> => <<"XE">>, + <<"pool_size">> => 8, + <<"resource_opts">> => + #{ + <<"health_check_interval">> => <<"15s">>, + <<"start_timeout">> => <<"5s">> + } + }. + +bridge_v2_examples(Method) -> + [ + #{ + <<"oracle">> => + #{ + summary => <<"Oracle Action">>, + value => emqx_bridge_v2_schema:action_values( + Method, ?ACTION_TYPE, ?CONNECTOR_TYPE, action_values() + ) + } + } + ]. + +action_values() -> + #{ + parameters => #{ + <<"sql">> => ?DEFAULT_SQL + } + }. + %% ------------------------------------------------------------------------------------------------- %% Hocon Schema Definitions @@ -65,6 +118,54 @@ namespace() -> "bridge_oracle". roots() -> []. +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + Fields = + fields(connector_fields) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts), + emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, Fields); +fields(Field) when + Field == "get_bridge_v2"; + Field == "post_bridge_v2"; + Field == "put_bridge_v2" +-> + emqx_bridge_v2_schema:api_fields(Field, ?ACTION_TYPE, fields(oracle_action)); +fields(action) -> + {?ACTION_TYPE, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(?MODULE, oracle_action)), + #{ + desc => <<"Oracle Action Config">>, + required => false + } + )}; +fields(oracle_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + hoconsc:mk( + hoconsc:ref(?MODULE, action_parameters), + #{ + required => true, + desc => ?DESC("action_parameters") + } + ) + ); +fields(action_parameters) -> + [ + {sql, + hoconsc:mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )} + ]; +fields("config_connector") -> + emqx_connector_schema:common_fields() ++ + fields(connector_fields) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts); +fields(connector_resource_opts) -> + emqx_connector_schema:resource_opts_fields(); fields("config") -> [ {enable, @@ -83,8 +184,10 @@ fields("config") -> #{desc => ?DESC("local_topic"), default => undefined} )} ] ++ emqx_resource_schema:fields("resource_opts") ++ - (emqx_oracle_schema:fields(config) -- - emqx_connector_schema_lib:prepare_statement_fields()); + fields(connector_fields); +fields(connector_fields) -> + (emqx_oracle_schema:fields(config) -- + emqx_connector_schema_lib:prepare_statement_fields()); fields("post") -> fields("post", oracle); fields("put") -> @@ -97,6 +200,16 @@ fields("post", Type) -> desc("config") -> ?DESC("desc_config"); +desc("creation_opts") -> + ?DESC(emqx_resource_schema, "creation_opts"); +desc("config_connector") -> + ?DESC("config_connector"); +desc(oracle_action) -> + ?DESC("oracle_action"); +desc(action_parameters) -> + ?DESC("action_parameters"); +desc(connector_resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); desc(_) -> undefined. @@ -116,5 +229,5 @@ config_validator(#{<<"server">> := Server} = Config) when not is_map_key(<<"service_name">>, Config) -> {error, "neither SID nor Service Name was set"}; -config_validator(_) -> +config_validator(_Config) -> ok. diff --git a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle_action_info.erl b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle_action_info.erl new file mode 100644 index 000000000..561b798bd --- /dev/null +++ b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle_action_info.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_oracle_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +bridge_v1_type_name() -> oracle. + +action_type_name() -> oracle. + +connector_type_name() -> oracle. + +schema_module() -> emqx_bridge_oracle. diff --git a/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl index 878ae2e1d..608d81bec 100644 --- a/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl +++ b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl @@ -267,7 +267,12 @@ parse_and_check(ConfigString, Name) -> resource_id(Config) -> Type = ?BRIDGE_TYPE_BIN, Name = ?config(oracle_name, Config), - emqx_bridge_resource:resource_id(Type, Name). + <<"connector:", Type/binary, ":", Name/binary>>. + +action_id(Config) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + emqx_bridge_v2:id(Type, Name). bridge_id(Config) -> Type = ?BRIDGE_TYPE_BIN, @@ -378,6 +383,7 @@ create_rule_and_action_http(Config) -> t_sync_query(Config) -> ResourceId = resource_id(Config), + Name = ?config(oracle_name, Config), ?check_trace( begin reset_table(Config), @@ -387,6 +393,18 @@ t_sync_query(Config) -> _Attempts = 20, ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) ), + ?retry( + _Sleep1 = 1_000, + _Attempts1 = 30, + ?assertMatch( + #{status := connected}, + emqx_bridge_v2:health_check( + ?BRIDGE_TYPE_BIN, + Name + ) + ) + ), + ActionId = action_id(Config), MsgId = erlang:unique_integer(), Params = #{ topic => ?config(mqtt_topic, Config), @@ -394,7 +412,7 @@ t_sync_query(Config) -> payload => ?config(oracle_name, Config), retain => true }, - Message = {send_message, Params}, + Message = {ActionId, Params}, ?assertEqual( {ok, [{affected_rows, 1}]}, emqx_resource:simple_sync_query(ResourceId, Message) ), @@ -409,7 +427,7 @@ t_batch_sync_query(Config) -> ProxyHost = ?config(proxy_host, Config), ProxyName = ?config(proxy_name, Config), ResourceId = resource_id(Config), - BridgeId = bridge_id(Config), + Name = ?config(oracle_name, Config), ?check_trace( begin reset_table(Config), @@ -419,6 +437,17 @@ t_batch_sync_query(Config) -> _Attempts = 30, ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) ), + ?retry( + _Sleep = 1_000, + _Attempts = 30, + ?assertMatch( + #{status := connected}, + emqx_bridge_v2:health_check( + ?BRIDGE_TYPE_BIN, + Name + ) + ) + ), MsgId = erlang:unique_integer(), Params = #{ topic => ?config(mqtt_topic, Config), @@ -431,9 +460,9 @@ t_batch_sync_query(Config) -> % be sent async as callback_mode is set to async_if_possible. emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> ct:sleep(1000), - emqx_bridge:send_message(BridgeId, Params), - emqx_bridge:send_message(BridgeId, Params), - emqx_bridge:send_message(BridgeId, Params), + emqx_bridge_v2:send_message(?BRIDGE_TYPE_BIN, Name, Params, #{}), + emqx_bridge_v2:send_message(?BRIDGE_TYPE_BIN, Name, Params, #{}), + emqx_bridge_v2:send_message(?BRIDGE_TYPE_BIN, Name, Params, #{}), ok end), % Wait for reconnection. @@ -442,6 +471,17 @@ t_batch_sync_query(Config) -> _Attempts = 30, ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) ), + ?retry( + _Sleep = 1_000, + _Attempts = 30, + ?assertMatch( + #{status := connected}, + emqx_bridge_v2:health_check( + ?BRIDGE_TYPE_BIN, + Name + ) + ) + ), ?retry( _Sleep = 1_000, _Attempts = 30, @@ -506,6 +546,17 @@ t_start_stop(Config) -> _Attempts = 20, ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) ), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertMatch( + #{status := connected}, + emqx_bridge_v2:health_check( + ?BRIDGE_TYPE_BIN, + OracleName + ) + ) + ), %% Check that the bridge probe API doesn't leak atoms. ProbeRes0 = probe_bridge_api( @@ -554,6 +605,7 @@ t_probe_with_nested_tokens(Config) -> t_message_with_nested_tokens(Config) -> BridgeId = bridge_id(Config), ResourceId = resource_id(Config), + Name = ?config(oracle_name, Config), reset_table(Config), ?assertMatch( {ok, _}, @@ -568,6 +620,17 @@ t_message_with_nested_tokens(Config) -> _Attempts = 20, ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) ), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertMatch( + #{status := connected}, + emqx_bridge_v2:health_check( + ?BRIDGE_TYPE_BIN, + Name + ) + ) + ), MsgId = erlang:unique_integer(), Data = binary_to_list(?config(oracle_name, Config)), Params = #{ @@ -600,6 +663,7 @@ t_on_get_status(Config) -> ProxyPort = ?config(proxy_port, Config), ProxyHost = ?config(proxy_host, Config), ProxyName = ?config(proxy_name, Config), + Name = ?config(oracle_name, Config), ResourceId = resource_id(Config), reset_table(Config), ?assertMatch({ok, _}, create_bridge(Config)), @@ -612,13 +676,23 @@ t_on_get_status(Config) -> ), emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> ct:sleep(500), - ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)), + ?assertMatch( + #{status := disconnected}, + emqx_bridge_v2:health_check(?BRIDGE_TYPE_BIN, Name) + ) end), %% Check that it recovers itself. ?retry( _Sleep = 1_000, _Attempts = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + begin + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)), + ?assertMatch( + #{status := connected}, + emqx_bridge_v2:health_check(?BRIDGE_TYPE_BIN, Name) + ) + end ), ok. @@ -664,6 +738,7 @@ t_missing_table(Config) -> begin drop_table_if_exists(Config), ?assertMatch({ok, _}, create_bridge_api(Config)), + ActionId = emqx_bridge_v2:id(?BRIDGE_TYPE_BIN, ?config(oracle_name, Config)), ?retry( _Sleep = 1_000, _Attempts = 20, @@ -679,7 +754,7 @@ t_missing_table(Config) -> payload => ?config(oracle_name, Config), retain => true }, - Message = {send_message, Params}, + Message = {ActionId, Params}, ?assertMatch( {error, {resource_error, #{reason := not_connected}}}, emqx_resource:simple_sync_query(ResourceId, Message) @@ -698,6 +773,7 @@ t_table_removed(Config) -> begin reset_table(Config), ?assertMatch({ok, _}, create_bridge_api(Config)), + ActionId = emqx_bridge_v2:id(?BRIDGE_TYPE_BIN, ?config(oracle_name, Config)), ?retry( _Sleep = 1_000, _Attempts = 20, @@ -711,7 +787,7 @@ t_table_removed(Config) -> payload => ?config(oracle_name, Config), retain => true }, - Message = {send_message, Params}, + Message = {ActionId, Params}, ?assertEqual( {error, {unrecoverable_error, {942, "ORA-00942: table or view does not exist\n"}}}, emqx_resource:simple_sync_query(ResourceId, Message) diff --git a/apps/emqx_bridge_pgsql/BSL.txt b/apps/emqx_bridge_pgsql/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_pgsql/BSL.txt +++ b/apps/emqx_bridge_pgsql/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_pgsql/test/emqx_bridge_v2_pgsql_SUITE.erl b/apps/emqx_bridge_pgsql/test/emqx_bridge_v2_pgsql_SUITE.erl index d077cece7..cf84bebab 100644 --- a/apps/emqx_bridge_pgsql/test/emqx_bridge_v2_pgsql_SUITE.erl +++ b/apps/emqx_bridge_pgsql/test/emqx_bridge_v2_pgsql_SUITE.erl @@ -193,10 +193,7 @@ serde_roundtrip(InnerConfigMap0) -> InnerConfigMap. parse_and_check_bridge_config(InnerConfigMap, Name) -> - TypeBin = ?BRIDGE_TYPE_BIN, - RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}}, - hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}), - InnerConfigMap. + emqx_bridge_v2_testlib:parse_and_check(?BRIDGE_TYPE_BIN, Name, InnerConfigMap). make_message() -> ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), diff --git a/apps/emqx_bridge_pulsar/BSL.txt b/apps/emqx_bridge_pulsar/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_pulsar/BSL.txt +++ b/apps/emqx_bridge_pulsar/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_rabbitmq/BSL.txt b/apps/emqx_bridge_rabbitmq/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_rabbitmq/BSL.txt +++ b/apps/emqx_bridge_rabbitmq/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_rabbitmq/rebar.config b/apps/emqx_bridge_rabbitmq/rebar.config index a2f072e48..a6af22040 100644 --- a/apps/emqx_bridge_rabbitmq/rebar.config +++ b/apps/emqx_bridge_rabbitmq/rebar.config @@ -1,6 +1,6 @@ %% -*- mode: erlang; -*- -{erl_opts, [debug_info]}. +{erl_opts, [debug_info, {feature, maybe_expr, enable}]}. {deps, [ %% The following two are dependencies of rabbit_common {thoas, {git, "https://github.com/emqx/thoas.git", {tag, "v1.0.0"}}}, diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.app.src b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.app.src index 2e1ec3444..a885cc6bc 100644 --- a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.app.src +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.app.src @@ -1,7 +1,8 @@ {application, emqx_bridge_rabbitmq, [ {description, "EMQX Enterprise RabbitMQ Bridge"}, - {vsn, "0.1.7"}, + {vsn, "0.1.8"}, {registered, []}, + {mod, {emqx_bridge_rabbitmq_app, []}}, {applications, [ kernel, stdlib, diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.erl index 608e0a669..d9d182aa9 100644 --- a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.erl +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- -module(emqx_bridge_rabbitmq). @@ -22,7 +22,7 @@ ]). %% ------------------------------------------------------------------------------------------------- -%% Callback used by HTTP API +%% Callback used by HTTP API v1 %% ------------------------------------------------------------------------------------------------- conn_bridge_examples(Method) -> @@ -78,7 +78,7 @@ fields("config") -> {local_topic, mk( binary(), - #{desc => ?DESC("local_topic"), default => undefined} + #{desc => ?DESC("local_topic")} )}, {resource_opts, mk( diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_action_info.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_action_info.erl new file mode 100644 index 000000000..cd7d340de --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_action_info.erl @@ -0,0 +1,77 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rabbitmq_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0, + bridge_v1_config_to_connector_config/1, + bridge_v1_config_to_action_config/2, + is_source/0, + is_action/0 +]). + +-define(SCHEMA_MODULE, emqx_bridge_rabbitmq_pubsub_schema). +-import(emqx_utils_conv, [bin/1]). + +bridge_v1_type_name() -> rabbitmq. + +action_type_name() -> rabbitmq. + +connector_type_name() -> rabbitmq. + +schema_module() -> ?SCHEMA_MODULE. + +is_source() -> true. +is_action() -> true. + +bridge_v1_config_to_connector_config(BridgeV1Config) -> + ActionTopLevelKeys = schema_keys(?SCHEMA_MODULE:fields(publisher_action)), + ActionParametersKeys = schema_keys(?SCHEMA_MODULE:fields(action_parameters)), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ConnectorTopLevelKeys = schema_keys( + emqx_bridge_rabbitmq_connector_schema:fields("config_connector") + ), + ConnectorKeys = (maps:keys(BridgeV1Config) -- (ActionKeys -- ConnectorTopLevelKeys)), + ConnectorConfig0 = maps:with(ConnectorKeys, BridgeV1Config), + emqx_utils_maps:update_if_present( + <<"resource_opts">>, + fun emqx_connector_schema:project_to_connector_resource_opts/1, + ConnectorConfig0 + ). + +bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) -> + ActionTopLevelKeys = schema_keys(?SCHEMA_MODULE:fields(publisher_action)), + ActionParametersKeys = schema_keys(?SCHEMA_MODULE:fields(action_parameters)), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ActionConfig0 = make_config_map(ActionKeys, ActionParametersKeys, BridgeV1Config), + emqx_utils_maps:update_if_present( + <<"resource_opts">>, + fun emqx_bridge_v2_schema:project_to_actions_resource_opts/1, + ActionConfig0#{<<"connector">> => ConnectorName} + ). + +schema_keys(Schema) -> + [bin(Key) || {Key, _} <- Schema]. + +make_config_map(PickKeys, IndentKeys, Config) -> + Conf0 = maps:with(PickKeys, Config), + emqx_utils_maps:indent(<<"parameters">>, IndentKeys, Conf0). diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_app.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_app.erl new file mode 100644 index 000000000..e43a70620 --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_app.erl @@ -0,0 +1,26 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_bridge_rabbitmq_app). + +-behaviour(application). + +-export([start/2, stop/1]). + +start(_StartType, _StartArgs) -> + emqx_bridge_rabbitmq_sup:start_link(). + +stop(_State) -> + ok. diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector.erl index 2e4074f79..134ba15b6 100644 --- a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector.erl +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector.erl @@ -1,9 +1,10 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- -module(emqx_bridge_rabbitmq_connector). +-feature(maybe_expr, enable). -include_lib("emqx_connector/include/emqx_connector.hrl"). -include_lib("emqx_resource/include/emqx_resource.hrl"). -include_lib("typerefl/include/types.hrl"). @@ -22,17 +23,16 @@ %% hocon_schema callbacks -export([namespace/0, roots/0, fields/1]). -%% HTTP API callbacks --export([values/1]). - %% emqx_resource callbacks -export([ - %% Required callbacks on_start/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, on_stop/2, callback_mode/0, - %% Optional callbacks on_get_status/2, + on_get_channel_status/3, on_query/3, on_batch_query/3 ]). @@ -41,142 +41,18 @@ -export([connect/1]). %% Internal callbacks --export([publish_messages/3]). +-export([publish_messages/4]). namespace() -> "rabbitmq". +%% bridge v1 roots() -> [{config, #{type => hoconsc:ref(?MODULE, config)}}]. +%% bridge v1 called by emqx_bridge_rabbitmq fields(config) -> - [ - {server, - hoconsc:mk( - typerefl:binary(), - #{ - default => <<"localhost">>, - desc => ?DESC("server") - } - )}, - {port, - hoconsc:mk( - emqx_schema:port_number(), - #{ - default => 5672, - desc => ?DESC("server") - } - )}, - {username, - hoconsc:mk( - typerefl:binary(), - #{ - required => true, - desc => ?DESC("username") - } - )}, - {password, emqx_connector_schema_lib:password_field(#{required => true})}, - {pool_size, - hoconsc:mk( - typerefl:pos_integer(), - #{ - default => 8, - desc => ?DESC("pool_size") - } - )}, - {timeout, - hoconsc:mk( - emqx_schema:timeout_duration_ms(), - #{ - default => <<"5s">>, - desc => ?DESC("timeout") - } - )}, - {wait_for_publish_confirmations, - hoconsc:mk( - boolean(), - #{ - default => true, - desc => ?DESC("wait_for_publish_confirmations") - } - )}, - {publish_confirmation_timeout, - hoconsc:mk( - emqx_schema:timeout_duration_ms(), - #{ - default => <<"30s">>, - desc => ?DESC("timeout") - } - )}, - - {virtual_host, - hoconsc:mk( - typerefl:binary(), - #{ - default => <<"/">>, - desc => ?DESC("virtual_host") - } - )}, - {heartbeat, - hoconsc:mk( - emqx_schema:timeout_duration_ms(), - #{ - default => <<"30s">>, - desc => ?DESC("heartbeat") - } - )}, - %% Things related to sending messages to RabbitMQ - {exchange, - hoconsc:mk( - typerefl:binary(), - #{ - required => true, - desc => ?DESC("exchange") - } - )}, - {routing_key, - hoconsc:mk( - typerefl:binary(), - #{ - required => true, - desc => ?DESC("routing_key") - } - )}, - {delivery_mode, - hoconsc:mk( - hoconsc:enum([non_persistent, persistent]), - #{ - default => non_persistent, - desc => ?DESC("delivery_mode") - } - )}, - {payload_template, - hoconsc:mk( - binary(), - #{ - default => <<"${.}">>, - desc => ?DESC("payload_template") - } - )} - ] ++ emqx_connector_schema_lib:ssl_fields(). - -values(post) -> - maps:merge(values(put), #{name => <<"connector">>}); -values(get) -> - values(post); -values(put) -> - #{ - server => <<"localhost">>, - port => 5672, - enable => true, - pool_size => 8, - type => rabbitmq, - username => <<"guest">>, - password => <<"******">>, - routing_key => <<"my_routing_key">>, - payload_template => <<"">> - }; -values(_) -> - #{}. + emqx_bridge_rabbitmq_connector_schema:fields(connector) ++ + emqx_bridge_rabbitmq_pubsub_schema:fields(action_parameters). %% =================================================================== %% Callbacks defined in emqx_resource @@ -186,127 +62,84 @@ values(_) -> callback_mode() -> always_sync. -%% emqx_resource callback - -%% emqx_resource callback called when the resource is started - --spec on_start(resource_id(), term()) -> {ok, resource_state()} | {error, _}. -on_start( - InstanceID, - #{ - pool_size := PoolSize, - payload_template := PayloadTemplate, - delivery_mode := InitialDeliveryMode - } = InitialConfig -) -> - DeliveryMode = - case InitialDeliveryMode of - non_persistent -> 1; - persistent -> 2 - end, - Config = InitialConfig#{ - delivery_mode => DeliveryMode - }, +on_start(InstanceID, Config) -> ?SLOG(info, #{ msg => "starting_rabbitmq_connector", connector => InstanceID, config => emqx_utils:redact(Config) }), + init_secret(), Options = [ {config, Config}, - %% The pool_size is read by ecpool and decides the number of workers in - %% the pool - {pool_size, PoolSize}, + {pool_size, maps:get(pool_size, Config)}, {pool, InstanceID} ], - ProcessedTemplate = emqx_placeholder:preproc_tmpl(PayloadTemplate), - State = #{ - poolname => InstanceID, - processed_payload_template => ProcessedTemplate, - config => Config - }, - %% Initialize RabbitMQ's secret library so that the password is encrypted - %% in the log files. - case credentials_obfuscation:secret() of - ?PENDING_SECRET -> - Bytes = crypto:strong_rand_bytes(128), - %% The password can appear in log files if we don't do this - credentials_obfuscation:set_secret(Bytes); - _ -> - %% Already initialized - ok - end, case emqx_resource_pool:start(InstanceID, ?MODULE, Options) of ok -> - {ok, State}; + {ok, #{channels => #{}}}; {error, Reason} -> - ?SLOG(info, #{ + ?SLOG(error, #{ msg => "rabbitmq_connector_start_failed", - error_reason => Reason, + reason => Reason, config => emqx_utils:redact(Config) }), {error, Reason} end. -%% emqx_resource callback called when the resource is stopped - --spec on_stop(resource_id(), resource_state()) -> term(). -on_stop( - ResourceID, - _State +on_add_channel( + InstanceId, + #{channels := Channels} = State, + ChannelId, + Config ) -> + case maps:is_key(ChannelId, Channels) of + true -> + {error, already_exists}; + false -> + ProcParam = preproc_parameter(Config), + case make_channel(InstanceId, ChannelId, ProcParam) of + {ok, RabbitChannels} -> + Channel = #{param => ProcParam, rabbitmq => RabbitChannels}, + NewChannels = maps:put(ChannelId, Channel, Channels), + {ok, State#{channels => NewChannels}}; + {error, Error} -> + ?SLOG(error, #{ + msg => "failed_to_start_rabbitmq_channel", + instance_id => InstanceId, + params => emqx_utils:redact(Config), + error => Error + }), + {error, Error} + end + end. + +on_remove_channel(_InstanceId, #{channels := Channels} = State, ChannelId) -> + try_unsubscribe(ChannelId, Channels), + {ok, State#{channels => maps:remove(ChannelId, Channels)}}. + +on_get_channels(InstanceId) -> + emqx_bridge_v2:get_channels_for_connector(InstanceId). + +on_stop(ResourceID, _State) -> ?SLOG(info, #{ msg => "stopping_rabbitmq_connector", connector => ResourceID }), - stop_clients_and_pool(ResourceID). - -stop_clients_and_pool(PoolName) -> - Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)], - Clients = [ - begin - {ok, Client} = ecpool_worker:client(Worker), - Client - end - || Worker <- Workers - ], - %% We need to stop the pool before stopping the workers as the pool monitors the workers - StopResult = emqx_resource_pool:stop(PoolName), - lists:foreach(fun stop_worker/1, Clients), - StopResult. - -stop_worker({Channel, Connection}) -> - amqp_channel:close(Channel), - amqp_connection:close(Connection). - -%% This is the callback function that is called by ecpool when the pool is -%% started + lists:foreach( + fun({_Name, Worker}) -> + case ecpool_worker:client(Worker) of + {ok, Conn} -> amqp_connection:close(Conn); + _ -> ok + end + end, + ecpool:workers(ResourceID) + ), + emqx_resource_pool:stop(ResourceID). +%% This is the callback function that is called by ecpool -spec connect(term()) -> {ok, {pid(), pid()}, map()} | {error, term()}. connect(Options) -> Config = proplists:get_value(config, Options), - try - create_rabbitmq_connection_and_channel(Config) - catch - _:{error, Reason} -> - ?SLOG(error, #{ - msg => "rabbitmq_connector_connection_failed", - error_type => error, - error_reason => Reason, - config => emqx_utils:redact(Config) - }), - {error, Reason}; - Type:Reason -> - ?SLOG(error, #{ - msg => "rabbitmq_connector_connection_failed", - error_type => Type, - error_reason => Reason, - config => emqx_utils:redact(Config) - }), - {error, Reason} - end. - -create_rabbitmq_connection_and_channel(Config) -> #{ server := Host, port := Port, @@ -314,237 +147,164 @@ create_rabbitmq_connection_and_channel(Config) -> password := WrappedPassword, timeout := Timeout, virtual_host := VirtualHost, - heartbeat := Heartbeat, - wait_for_publish_confirmations := WaitForPublishConfirmations + heartbeat := Heartbeat } = Config, %% TODO: teach `amqp` to accept 0-arity closures as passwords. Password = emqx_secret:unwrap(WrappedPassword), - SSLOptions = - case maps:get(ssl, Config, #{}) of - #{enable := true} = SSLOpts -> - emqx_tls_lib:to_client_opts(SSLOpts); - _ -> - none - end, - RabbitMQConnectionOptions = + RabbitMQConnOptions = #amqp_params_network{ - host = erlang:binary_to_list(Host), + host = Host, port = Port, - ssl_options = SSLOptions, + ssl_options = to_ssl_options(Config), username = Username, password = Password, connection_timeout = Timeout, virtual_host = VirtualHost, heartbeat = Heartbeat }, - {ok, RabbitMQConnection} = - case amqp_connection:start(RabbitMQConnectionOptions) of - {ok, Connection} -> - {ok, Connection}; - {error, Reason} -> - erlang:error({error, Reason}) - end, - {ok, RabbitMQChannel} = - case amqp_connection:open_channel(RabbitMQConnection) of - {ok, Channel} -> - {ok, Channel}; - {error, OpenChannelErrorReason} -> - erlang:error({error, OpenChannelErrorReason}) - end, - %% We need to enable confirmations if we want to wait for them - case WaitForPublishConfirmations of - true -> - case amqp_channel:call(RabbitMQChannel, #'confirm.select'{}) of - #'confirm.select_ok'{} -> - ok; - Error -> - ConfirmModeErrorReason = - erlang:iolist_to_binary( - io_lib:format( - "Could not enable RabbitMQ confirmation mode ~p", - [Error] - ) - ), - erlang:error({error, ConfirmModeErrorReason}) - end; - false -> - ok - end, - {ok, {RabbitMQConnection, RabbitMQChannel}, #{ - supervisees => [RabbitMQConnection, RabbitMQChannel] - }}. - -%% emqx_resource callback called to check the status of the resource + case amqp_connection:start(RabbitMQConnOptions) of + {ok, RabbitMQConn} -> + {ok, RabbitMQConn}; + {error, Reason} -> + ?SLOG(error, #{ + msg => "rabbitmq_connector_connection_failed", + reason => Reason, + config => emqx_utils:redact(Config) + }), + {error, Reason} + end. -spec on_get_status(resource_id(), term()) -> {connected, resource_state()} | {disconnected, resource_state(), binary()}. -on_get_status( - _InstId, - #{ - poolname := PoolName - } = State -) -> - Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)], - Clients = [ - begin - {ok, Client} = ecpool_worker:client(Worker), - Client - end - || Worker <- Workers - ], - CheckResults = [ - check_worker(Client) - || Client <- Clients - ], - Connected = length(CheckResults) > 0 andalso lists:all(fun(R) -> R end, CheckResults), - case Connected of - true -> - {connected, State}; - false -> - {disconnected, State, <<"not_connected">>} - end; -on_get_status( - _InstId, - State -) -> - {disconnect, State, <<"not_connected: no connection pool in state">>}. +on_get_status(PoolName, #{channels := Channels} = State) -> + ChannelNum = maps:size(Channels), + Conns = get_rabbitmq_connections(PoolName), + Check = + lists:all( + fun(Conn) -> + [{num_channels, ActualNum}] = amqp_connection:info(Conn, [num_channels]), + ChannelNum >= ActualNum + end, + Conns + ), + case Check andalso Conns =/= [] of + true -> {connected, State}; + false -> {disconnected, State, <<"not_connected">>} + end. -check_worker({Channel, Connection}) -> - erlang:is_process_alive(Channel) andalso erlang:is_process_alive(Connection). +on_get_channel_status(_InstanceId, ChannelId, #{channels := Channels}) -> + case emqx_utils_maps:deep_find([ChannelId, rabbitmq], Channels) of + {ok, RabbitMQ} -> + case lists:all(fun is_process_alive/1, maps:values(RabbitMQ)) of + true -> connected; + false -> {error, not_connected} + end; + _ -> + {error, not_exists} + end. -%% emqx_resource callback that is called when a non-batch query is received - --spec on_query(resource_id(), Request, resource_state()) -> query_result() when - Request :: {RequestType, Data}, - RequestType :: send_message, - Data :: map(). -on_query( - ResourceID, - {RequestType, Data}, - #{ - poolname := PoolName, - processed_payload_template := PayloadTemplate, - config := Config - } = State -) -> +on_query(ResourceID, {ChannelId, Data} = MsgReq, State) -> ?SLOG(debug, #{ msg => "rabbitmq_connector_received_query", connector => ResourceID, - type => RequestType, + channel => ChannelId, data => Data, state => emqx_utils:redact(State) }), - MessageData = format_data(PayloadTemplate, Data), - Res = ecpool:pick_and_do( - PoolName, - {?MODULE, publish_messages, [Config, [MessageData]]}, - no_handover - ), - handle_result(Res). + #{channels := Channels} = State, + case maps:find(ChannelId, Channels) of + {ok, #{param := ProcParam, rabbitmq := RabbitMQ}} -> + Res = ecpool:pick_and_do( + ResourceID, + {?MODULE, publish_messages, [RabbitMQ, ProcParam, [MsgReq]]}, + no_handover + ), + handle_result(Res); + error -> + {error, {unrecoverable_error, {invalid_message_tag, ChannelId}}} + end. -%% emqx_resource callback that is called when a batch query is received - --spec on_batch_query(resource_id(), BatchReq, resource_state()) -> query_result() when - BatchReq :: nonempty_list({'send_message', map()}). -on_batch_query( - ResourceID, - BatchReq, - State -) -> +on_batch_query(ResourceID, [{ChannelId, _Data} | _] = Batch, State) -> ?SLOG(debug, #{ msg => "rabbitmq_connector_received_batch_query", connector => ResourceID, - data => BatchReq, + data => Batch, state => emqx_utils:redact(State) }), - %% Currently we only support batch requests with the send_message key - {Keys, MessagesToInsert} = lists:unzip(BatchReq), - ensure_keys_are_of_type_send_message(Keys), - %% Pick out the payload template - #{ - processed_payload_template := PayloadTemplate, - poolname := PoolName, - config := Config - } = State, - %% Create batch payload - FormattedMessages = [ - format_data(PayloadTemplate, Data) - || Data <- MessagesToInsert - ], - %% Publish the messages - Res = ecpool:pick_and_do( - PoolName, - {?MODULE, publish_messages, [Config, FormattedMessages]}, - no_handover - ), - handle_result(Res). + #{channels := Channels} = State, + case maps:find(ChannelId, Channels) of + {ok, #{param := ProcParam, rabbitmq := RabbitMQ}} -> + Res = ecpool:pick_and_do( + ResourceID, + {?MODULE, publish_messages, [RabbitMQ, ProcParam, Batch]}, + no_handover + ), + handle_result(Res); + error -> + {error, {unrecoverable_error, {invalid_message_tag, ChannelId}}} + end. publish_messages( - {_Connection, Channel}, + Conn, + RabbitMQ, #{ delivery_mode := DeliveryMode, + payload_template := PayloadTmpl, routing_key := RoutingKey, exchange := Exchange, wait_for_publish_confirmations := WaitForPublishConfirmations, publish_confirmation_timeout := PublishConfirmationTimeout - } = _Config, + }, Messages ) -> - MessageProperties = #'P_basic'{ - headers = [], - delivery_mode = DeliveryMode - }, - Method = #'basic.publish'{ - exchange = Exchange, - routing_key = RoutingKey - }, - _ = [ - amqp_channel:cast( - Channel, - Method, - #amqp_msg{ - payload = Message, - props = MessageProperties - } - ) - || Message <- Messages - ], - case WaitForPublishConfirmations of - true -> - case amqp_channel:wait_for_confirms(Channel, PublishConfirmationTimeout) of - true -> - ok; - false -> - erlang:error( - {recoverable_error, - <<"RabbitMQ: Got NACK when waiting for message acknowledgment.">>} - ); - timeout -> - erlang:error( - {recoverable_error, - <<"RabbitMQ: Timeout when waiting for message acknowledgment.">>} + case maps:find(Conn, RabbitMQ) of + {ok, Channel} -> + MessageProperties = #'P_basic'{ + headers = [], + delivery_mode = DeliveryMode + }, + Method = #'basic.publish'{ + exchange = Exchange, + routing_key = RoutingKey + }, + lists:foreach( + fun({_, MsgRaw}) -> + amqp_channel:cast( + Channel, + Method, + #amqp_msg{ + payload = format_data(PayloadTmpl, MsgRaw), + props = MessageProperties + } ) + end, + Messages + ), + case WaitForPublishConfirmations of + true -> + case amqp_channel:wait_for_confirms(Channel, PublishConfirmationTimeout) of + true -> + ok; + false -> + erlang:error( + {recoverable_error, + <<"RabbitMQ: Got NACK when waiting for message acknowledgment.">>} + ); + timeout -> + erlang:error( + {recoverable_error, + <<"RabbitMQ: Timeout when waiting for message acknowledgment.">>} + ) + end; + false -> + ok end; - false -> - ok - end. - -ensure_keys_are_of_type_send_message(Keys) -> - case lists:all(fun is_send_message_atom/1, Keys) of - true -> - ok; - false -> + error -> erlang:error( - {unrecoverable_error, - <<"Unexpected type for batch message (Expected send_message)">>} + {recoverable_error, {<<"RabbitMQ: channel_not_found">>, Conn, RabbitMQ}} ) end. -is_send_message_atom(send_message) -> - true; -is_send_message_atom(_) -> - false. - format_data([], Msg) -> emqx_utils_json:encode(Msg); format_data(Tokens, Msg) -> @@ -554,3 +314,105 @@ handle_result({error, ecpool_empty}) -> {error, {recoverable_error, ecpool_empty}}; handle_result(Res) -> Res. + +make_channel(PoolName, ChannelId, Params) -> + Conns = get_rabbitmq_connections(PoolName), + make_channel(Conns, PoolName, ChannelId, Params, #{}). + +make_channel([], _PoolName, _ChannelId, _Param, Acc) -> + {ok, Acc}; +make_channel([Conn | Conns], PoolName, ChannelId, Params, Acc) -> + maybe + {ok, RabbitMQChannel} ?= amqp_connection:open_channel(Conn), + ok ?= try_confirm_channel(Params, RabbitMQChannel), + ok ?= try_subscribe(Params, RabbitMQChannel, PoolName, ChannelId), + NewAcc = Acc#{Conn => RabbitMQChannel}, + make_channel(Conns, PoolName, ChannelId, Params, NewAcc) + end. + +%% We need to enable confirmations if we want to wait for them +try_confirm_channel(#{wait_for_publish_confirmations := true}, Channel) -> + case amqp_channel:call(Channel, #'confirm.select'{}) of + #'confirm.select_ok'{} -> + ok; + Error -> + Reason = + iolist_to_binary( + io_lib:format( + "Could not enable RabbitMQ confirmation mode ~p", + [Error] + ) + ), + {error, Reason} + end; +try_confirm_channel(#{wait_for_publish_confirmations := false}, _Channel) -> + ok. + +%% Initialize Rabbitmq's secret library so that the password is encrypted +%% in the log files. +init_secret() -> + case credentials_obfuscation:secret() of + ?PENDING_SECRET -> + Bytes = crypto:strong_rand_bytes(128), + %% The password can appear in log files if we don't do this + credentials_obfuscation:set_secret(Bytes); + _ -> + %% Already initialized + ok + end. + +preproc_parameter(#{config_root := actions, parameters := Parameter}) -> + #{ + payload_template := PayloadTemplate, + delivery_mode := InitialDeliveryMode + } = Parameter, + Parameter#{ + delivery_mode => delivery_mode(InitialDeliveryMode), + payload_template => emqx_placeholder:preproc_tmpl(PayloadTemplate), + config_root => actions + }; +preproc_parameter(#{config_root := sources, parameters := Parameter, hookpoints := Hooks}) -> + Parameter#{hookpoints => Hooks, config_root => sources}. + +delivery_mode(non_persistent) -> 1; +delivery_mode(persistent) -> 2. + +to_ssl_options(#{ssl := #{enable := true} = SSLOpts}) -> + emqx_tls_lib:to_client_opts(SSLOpts); +to_ssl_options(_) -> + none. + +get_rabbitmq_connections(PoolName) -> + lists:filtermap( + fun({_Name, Worker}) -> + case ecpool_worker:client(Worker) of + {ok, Conn} -> {true, Conn}; + _ -> false + end + end, + ecpool:workers(PoolName) + ). + +try_subscribe( + #{queue := Queue, no_ack := NoAck, config_root := sources} = Params, + RabbitChan, + PoolName, + ChannelId +) -> + WorkState = {RabbitChan, PoolName, Params}, + {ok, ConsumePid} = emqx_bridge_rabbitmq_sup:ensure_started(ChannelId, WorkState), + BasicConsume = #'basic.consume'{queue = Queue, no_ack = NoAck}, + #'basic.consume_ok'{consumer_tag = _} = + amqp_channel:subscribe(RabbitChan, BasicConsume, ConsumePid), + ok; +try_subscribe(#{config_root := actions}, _RabbitChan, _PoolName, _ChannelId) -> + ok. + +try_unsubscribe(ChannelId, Channels) -> + case emqx_utils_maps:deep_find([ChannelId, rabbitmq], Channels) of + {ok, RabbitMQ} -> + lists:foreach(fun(Pid) -> catch amqp_channel:close(Pid) end, maps:values(RabbitMQ)), + emqx_bridge_rabbitmq_sup:ensure_deleted(ChannelId); + _ -> + ok + end. diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector_schema.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector_schema.erl new file mode 100644 index 000000000..02b5ae61c --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector_schema.erl @@ -0,0 +1,141 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rabbitmq_connector_schema). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-define(TYPE, rabbitmq). + +-export([roots/0, fields/1, desc/1, namespace/0]). +-export([connector_examples/1, connector_example_values/0]). + +%%====================================================================================== +%% Hocon Schema Definitions +namespace() -> ?TYPE. + +roots() -> []. + +fields("config_connector") -> + emqx_bridge_schema:common_bridge_fields() ++ + fields(connector) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts); +fields(connector) -> + [ + {server, + ?HOCON( + string(), + #{ + default => <<"localhost">>, + desc => ?DESC("server") + } + )}, + {port, + ?HOCON( + emqx_schema:port_number(), + #{ + default => 5672, + desc => ?DESC("server") + } + )}, + {username, + ?HOCON( + binary(), + #{ + required => true, + desc => ?DESC("username") + } + )}, + {password, emqx_connector_schema_lib:password_field(#{required => true})}, + {pool_size, + ?HOCON( + pos_integer(), + #{ + default => 8, + desc => ?DESC("pool_size") + } + )}, + {timeout, + ?HOCON( + emqx_schema:timeout_duration_ms(), + #{ + default => <<"5s">>, + desc => ?DESC("timeout") + } + )}, + {virtual_host, + ?HOCON( + binary(), + #{ + default => <<"/">>, + desc => ?DESC("virtual_host") + } + )}, + {heartbeat, + ?HOCON( + emqx_schema:timeout_duration_ms(), + #{ + default => <<"30s">>, + desc => ?DESC("heartbeat") + } + )} + ] ++ + emqx_connector_schema_lib:ssl_fields(); +fields(connector_resource_opts) -> + emqx_connector_schema:resource_opts_fields(); +fields("post") -> + emqx_connector_schema:type_and_name_fields(?TYPE) ++ fields("config_connector"); +fields("put") -> + fields("config_connector"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("config_connector"). + +desc("config_connector") -> + ?DESC("config_connector"); +desc(connector_resource_opts) -> + ?DESC(connector_resource_opts); +desc(_) -> + undefined. + +connector_examples(Method) -> + [ + #{ + <<"rabbitmq">> => + #{ + summary => <<"Rabbitmq Connector">>, + value => emqx_connector_schema:connector_values( + Method, ?TYPE, connector_example_values() + ) + } + } + ]. + +connector_example_values() -> + #{ + name => <<"rabbitmq_connector">>, + type => rabbitmq, + enable => true, + server => <<"127.0.0.1">>, + port => 5672, + username => <<"guest">>, + password => <<"******">>, + pool_size => 8, + timeout => <<"5s">>, + virtual_host => <<"/">>, + heartbeat => <<"30s">>, + ssl => #{enable => false} + }. diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_pubsub_schema.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_pubsub_schema.erl new file mode 100644 index 000000000..3fb00632c --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_pubsub_schema.erl @@ -0,0 +1,246 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_bridge_rabbitmq_pubsub_schema). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-export([roots/0, fields/1, desc/1, namespace/0]). + +-export([ + bridge_v2_examples/1, + source_examples/1 +]). + +-define(ACTION_TYPE, rabbitmq). +-define(SOURCE_TYPE, rabbitmq). +-define(CONNECTOR_SCHEMA, emqx_bridge_rabbitmq_connector_schema). + +%%====================================================================================== +%% Hocon Schema Definitions +namespace() -> "bridge_rabbitmq". + +roots() -> []. + +fields(action) -> + {rabbitmq, + ?HOCON( + ?MAP(name, ?R_REF(publisher_action)), + #{ + desc => <<"RabbitMQ Action Config">>, + required => false + } + )}; +fields(publisher_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + ?HOCON( + ?R_REF(action_parameters), + #{ + required => true, + desc => ?DESC(action_parameters) + } + ), + #{resource_opts_ref => ?R_REF(action_resource_opts)} + ); +fields(action_parameters) -> + [ + {wait_for_publish_confirmations, + hoconsc:mk( + boolean(), + #{ + default => true, + desc => ?DESC(?CONNECTOR_SCHEMA, "wait_for_publish_confirmations") + } + )}, + {publish_confirmation_timeout, + hoconsc:mk( + emqx_schema:timeout_duration_ms(), + #{ + default => <<"30s">>, + desc => ?DESC(?CONNECTOR_SCHEMA, "timeout") + } + )}, + {exchange, + hoconsc:mk( + typerefl:binary(), + #{ + required => true, + desc => ?DESC(?CONNECTOR_SCHEMA, "exchange") + } + )}, + {routing_key, + hoconsc:mk( + typerefl:binary(), + #{ + required => true, + desc => ?DESC(?CONNECTOR_SCHEMA, "routing_key") + } + )}, + {delivery_mode, + hoconsc:mk( + hoconsc:enum([non_persistent, persistent]), + #{ + default => non_persistent, + desc => ?DESC(?CONNECTOR_SCHEMA, "delivery_mode") + } + )}, + {payload_template, + hoconsc:mk( + binary(), + #{ + default => <<"">>, + desc => ?DESC(?CONNECTOR_SCHEMA, "payload_template") + } + )} + ]; +fields(source) -> + {rabbitmq, + ?HOCON( + hoconsc:map(name, ?R_REF(subscriber_source)), + #{ + desc => <<"MQTT Subscriber Source Config">>, + required => false + } + )}; +fields(subscriber_source) -> + emqx_bridge_v2_schema:make_consumer_action_schema( + ?HOCON( + ?R_REF(source_parameters), + #{ + required => true, + desc => ?DESC("source_parameters") + } + ) + ); +fields(source_parameters) -> + [ + {queue, + ?HOCON( + binary(), + #{ + required => true, + desc => ?DESC("source_queue") + } + )}, + {wait_for_publish_confirmations, + hoconsc:mk( + boolean(), + #{ + default => true, + desc => ?DESC(?CONNECTOR_SCHEMA, "wait_for_publish_confirmations") + } + )}, + {no_ack, + ?HOCON( + boolean(), + #{ + required => false, + default => true, + desc => ?DESC("source_no_ack") + } + )} + ]; +fields(action_resource_opts) -> + emqx_bridge_v2_schema:action_resource_opts_fields(); +fields(source_resource_opts) -> + emqx_bridge_v2_schema:source_resource_opts_fields(); +fields(Field) when + Field == "get_bridge_v2"; + Field == "post_bridge_v2"; + Field == "put_bridge_v2" +-> + emqx_bridge_v2_schema:api_fields(Field, ?ACTION_TYPE, fields(publisher_action)); +fields(Field) when + Field == "get_source"; + Field == "post_source"; + Field == "put_source" +-> + emqx_bridge_v2_schema:api_fields(Field, ?SOURCE_TYPE, fields(subscriber_source)); +fields(What) -> + error({emqx_bridge_mqtt_pubsub_schema, missing_field_handler, What}). +%% v2: api schema +%% The parameter equals to +%% `get_bridge_v2`, `post_bridge_v2`, `put_bridge_v2` from emqx_bridge_v2_schema:api_schema/1 +%% `get_connector`, `post_connector`, `put_connector` from emqx_connector_schema:api_schema/1 +%%-------------------------------------------------------------------- +%% v1/v2 + +desc("config") -> + ?DESC("desc_config"); +desc(action_resource_opts) -> + ?DESC(emqx_resource_schema, "creation_opts"); +desc(source_resource_opts) -> + ?DESC(emqx_resource_schema, "creation_opts"); +desc(action_parameters) -> + ?DESC(action_parameters); +desc(source_parameters) -> + ?DESC(source_parameters); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for WebHook using `", string:to_upper(Method), "` method."]; +desc("http_action") -> + ?DESC("desc_config"); +desc("parameters_opts") -> + ?DESC("config_parameters_opts"); +desc(publisher_action) -> + ?DESC(publisher_action); +desc(subscriber_source) -> + ?DESC(subscriber_source); +desc(_) -> + undefined. + +bridge_v2_examples(Method) -> + [ + #{ + <<"rabbitmq">> => #{ + summary => <<"RabbitMQ Producer Action">>, + value => emqx_bridge_v2_schema:action_values( + Method, + _ActionType = ?ACTION_TYPE, + _ConnectorType = rabbitmq, + #{ + parameters => #{ + wait_for_publish_confirmations => true, + publish_confirmation_timeout => <<"30s">>, + exchange => <<"test_exchange">>, + routing_key => <<"/">>, + delivery_mode => <<"non_persistent">>, + payload_template => <<"${.payload}">> + } + } + ) + } + } + ]. + +source_examples(Method) -> + [ + #{ + <<"rabbitmq">> => #{ + summary => <<"RabbitMQ Subscriber Source">>, + value => emqx_bridge_v2_schema:source_values( + Method, + _SourceType = ?SOURCE_TYPE, + _ConnectorType = rabbitmq, + #{ + parameters => #{ + queue => <<"test_queue">>, + no_ack => true + } + } + ) + } + } + ]. diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_source_sup.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_source_sup.erl new file mode 100644 index 000000000..e15e258dc --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_source_sup.erl @@ -0,0 +1,32 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rabbitmq_source_sup). + +-behaviour(supervisor). +%% API +-export([start_link/0]). +-export([init/1]). + +start_link() -> + supervisor:start_link(?MODULE, []). + +init([]) -> + SupFlags = #{ + strategy => simple_one_for_one, + intensity => 100, + period => 10 + }, + {ok, {SupFlags, [worker_spec()]}}. + +worker_spec() -> + Mod = emqx_bridge_rabbitmq_source_worker, + #{ + id => Mod, + start => {Mod, start_link, []}, + restart => transient, + shutdown => brutal_kill, + type => worker, + modules => [Mod] + }. diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_source_worker.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_source_worker.erl new file mode 100644 index 000000000..d0d43641b --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_source_worker.erl @@ -0,0 +1,104 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_rabbitmq_source_worker). + +-behaviour(gen_server). + +-export([start_link/1]). +-export([ + init/1, + handle_continue/2, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2 +]). + +-include_lib("amqp_client/include/amqp_client.hrl"). + +start_link(Args) -> + gen_server:start_link(?MODULE, Args, []). + +init({_RabbitChannel, _InstanceId, _Params} = State) -> + {ok, State, {continue, confirm_ok}}. + +handle_continue(confirm_ok, State) -> + receive + #'basic.consume_ok'{} -> {noreply, State} + end. + +handle_call(_Request, _From, State) -> + {reply, ok, State}. + +handle_cast(_Request, State) -> + {noreply, State}. + +handle_info( + {#'basic.deliver'{delivery_tag = Tag}, #amqp_msg{ + payload = Payload, + props = PBasic + }}, + {Channel, InstanceId, Params} = State +) -> + Message = to_map(PBasic, Payload), + #{hookpoints := Hooks, no_ack := NoAck} = Params, + lists:foreach(fun(Hook) -> emqx_hooks:run(Hook, [Message]) end, Hooks), + (NoAck =:= false) andalso + amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag}), + emqx_resource_metrics:received_inc(InstanceId), + {noreply, State}; +handle_info(#'basic.cancel_ok'{}, State) -> + {stop, normal, State}; +handle_info(_Info, State) -> + {noreply, State}. + +to_map(PBasic, Payload) -> + #'P_basic'{ + content_type = ContentType, + content_encoding = ContentEncoding, + headers = Headers, + delivery_mode = DeliveryMode, + priority = Priority, + correlation_id = CorrelationId, + reply_to = ReplyTo, + expiration = Expiration, + message_id = MessageId, + timestamp = Timestamp, + type = Type, + user_id = UserId, + app_id = AppId, + cluster_id = ClusterId + } = PBasic, + Message = #{ + <<"payload">> => make_payload(Payload), + <<"content_type">> => ContentType, + <<"content_encoding">> => ContentEncoding, + <<"headers">> => make_headers(Headers), + <<"delivery_mode">> => DeliveryMode, + <<"priority">> => Priority, + <<"correlation_id">> => CorrelationId, + <<"reply_to">> => ReplyTo, + <<"expiration">> => Expiration, + <<"message_id">> => MessageId, + <<"timestamp">> => Timestamp, + <<"type">> => Type, + <<"user_id">> => UserId, + <<"app_id">> => AppId, + <<"cluster_id">> => ClusterId + }, + maps:filtermap(fun(_K, V) -> V =/= undefined andalso V =/= <<"undefined">> end, Message). + +terminate(_Reason, _State) -> + ok. + +make_headers(undefined) -> + undefined; +make_headers(Headers) when is_list(Headers) -> + maps:from_list([{Key, Value} || {Key, _Type, Value} <- Headers]). + +make_payload(Payload) -> + case emqx_utils_json:safe_decode(Payload, [return_maps]) of + {ok, Map} -> Map; + {error, _} -> Payload + end. diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_sup.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_sup.erl new file mode 100644 index 000000000..b81052269 --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_sup.erl @@ -0,0 +1,76 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_bridge_rabbitmq_sup). + +-feature(maybe_expr, enable). +-behaviour(supervisor). + +-export([ensure_started/2]). +-export([ensure_deleted/1]). +-export([start_link/0]). +-export([init/1]). + +-define(BRIDGE_SUP, ?MODULE). + +ensure_started(SuperId, Config) -> + {ok, SuperPid} = ensure_supervisor_started(SuperId), + case supervisor:start_child(SuperPid, [Config]) of + {ok, WorkPid} -> + {ok, WorkPid}; + {error, {already_started, WorkPid}} -> + {ok, WorkPid}; + {error, Error} -> + {error, Error} + end. + +ensure_deleted(SuperId) -> + maybe + Pid = erlang:whereis(?BRIDGE_SUP), + true ?= Pid =/= undefined, + ok ?= supervisor:terminate_child(Pid, SuperId), + ok ?= supervisor:delete_child(Pid, SuperId) + else + false -> ok; + {error, not_found} -> ok; + Error -> Error + end. + +ensure_supervisor_started(Id) -> + SupervisorSpec = + #{ + id => Id, + start => {emqx_bridge_rabbitmq_source_sup, start_link, []}, + restart => permanent, + type => supervisor + }, + case supervisor:start_child(?BRIDGE_SUP, SupervisorSpec) of + {ok, Pid} -> + {ok, Pid}; + {error, {already_started, Pid}} -> + {ok, Pid} + end. + +start_link() -> + supervisor:start_link({local, ?BRIDGE_SUP}, ?MODULE, []). + +init([]) -> + SupFlags = #{ + strategy => one_for_one, + intensity => 50, + period => 10 + }, + ChildSpecs = [], + {ok, {SupFlags, ChildSpecs}}. diff --git a/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_SUITE.erl b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_SUITE.erl deleted file mode 100644 index 0ae7af9fc..000000000 --- a/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_SUITE.erl +++ /dev/null @@ -1,426 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%%-------------------------------------------------------------------- - --module(emqx_bridge_rabbitmq_SUITE). - --compile(nowarn_export_all). --compile(export_all). - --include_lib("emqx_connector/include/emqx_connector.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("stdlib/include/assert.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - -%% See comment in -%% apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl for how to -%% run this without bringing up the whole CI infrastucture - -rabbit_mq_host() -> - <<"rabbitmq">>. - -rabbit_mq_port() -> - 5672. - -rabbit_mq_exchange() -> - <<"messages">>. - -rabbit_mq_queue() -> - <<"test_queue">>. - -rabbit_mq_routing_key() -> - <<"test_routing_key">>. - -get_channel_connection(Config) -> - proplists:get_value(channel_connection, Config). - -%%------------------------------------------------------------------------------ -%% Common Test Setup, Teardown and Testcase List -%%------------------------------------------------------------------------------ - -all() -> - [ - {group, tcp}, - {group, tls} - ]. - -groups() -> - AllTCs = emqx_common_test_helpers:all(?MODULE), - [ - {tcp, AllTCs}, - {tls, AllTCs} - ]. - -init_per_suite(Config) -> - Config. - -end_per_suite(_Config) -> - ok. - -init_per_group(tcp, Config) -> - RabbitMQHost = os:getenv("RABBITMQ_PLAIN_HOST", "rabbitmq"), - RabbitMQPort = list_to_integer(os:getenv("RABBITMQ_PLAIN_PORT", "5672")), - case emqx_common_test_helpers:is_tcp_server_available(RabbitMQHost, RabbitMQPort) of - true -> - Config1 = common_init_per_group(#{ - host => RabbitMQHost, port => RabbitMQPort, tls => false - }), - Config1 ++ Config; - false -> - case os:getenv("IS_CI") of - "yes" -> - throw(no_rabbitmq); - _ -> - {skip, no_rabbitmq} - end - end; -init_per_group(tls, Config) -> - RabbitMQHost = os:getenv("RABBITMQ_TLS_HOST", "rabbitmq"), - RabbitMQPort = list_to_integer(os:getenv("RABBITMQ_TLS_PORT", "5671")), - case emqx_common_test_helpers:is_tcp_server_available(RabbitMQHost, RabbitMQPort) of - true -> - Config1 = common_init_per_group(#{ - host => RabbitMQHost, port => RabbitMQPort, tls => true - }), - Config1 ++ Config; - false -> - case os:getenv("IS_CI") of - "yes" -> - throw(no_rabbitmq); - _ -> - {skip, no_rabbitmq} - end - end; -init_per_group(_Group, Config) -> - Config. - -common_init_per_group(Opts) -> - emqx_common_test_helpers:render_and_load_app_config(emqx_conf), - ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), - ok = emqx_connector_test_helpers:start_apps([emqx_resource]), - {ok, _} = application:ensure_all_started(emqx_connector), - {ok, _} = application:ensure_all_started(amqp_client), - emqx_mgmt_api_test_util:init_suite(), - ChannelConnection = setup_rabbit_mq_exchange_and_queue(Opts), - [{channel_connection, ChannelConnection}]. - -setup_rabbit_mq_exchange_and_queue(#{host := RabbitMQHost, port := RabbitMQPort, tls := UseTLS}) -> - SSLOptions = - case UseTLS of - false -> - none; - true -> - CertsDir = filename:join([ - emqx_common_test_helpers:proj_root(), - ".ci", - "docker-compose-file", - "certs" - ]), - emqx_tls_lib:to_client_opts( - #{ - enable => true, - cacertfile => filename:join([CertsDir, "ca.crt"]), - certfile => filename:join([CertsDir, "client.pem"]), - keyfile => filename:join([CertsDir, "client.key"]) - } - ) - end, - %% Create an exachange and a queue - {ok, Connection} = - amqp_connection:start(#amqp_params_network{ - host = RabbitMQHost, - port = RabbitMQPort, - ssl_options = SSLOptions - }), - {ok, Channel} = amqp_connection:open_channel(Connection), - %% Create an exchange - #'exchange.declare_ok'{} = - amqp_channel:call( - Channel, - #'exchange.declare'{ - exchange = rabbit_mq_exchange(), - type = <<"topic">> - } - ), - %% Create a queue - #'queue.declare_ok'{} = - amqp_channel:call( - Channel, - #'queue.declare'{queue = rabbit_mq_queue()} - ), - %% Bind the queue to the exchange - #'queue.bind_ok'{} = - amqp_channel:call( - Channel, - #'queue.bind'{ - queue = rabbit_mq_queue(), - exchange = rabbit_mq_exchange(), - routing_key = rabbit_mq_routing_key() - } - ), - #{ - connection => Connection, - channel => Channel - }. - -end_per_group(_Group, Config) -> - #{ - connection := Connection, - channel := Channel - } = get_channel_connection(Config), - emqx_mgmt_api_test_util:end_suite(), - ok = emqx_common_test_helpers:stop_apps([emqx_conf]), - ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), - _ = application:stop(emqx_connector), - _ = application:stop(emqx_bridge), - %% Close the channel - ok = amqp_channel:close(Channel), - %% Close the connection - ok = amqp_connection:close(Connection). - -init_per_testcase(_, Config) -> - Config. - -end_per_testcase(_, _Config) -> - ok. - -rabbitmq_config(Config) -> - %%SQL = maps:get(sql, Config, sql_insert_template_for_bridge()), - BatchSize = maps:get(batch_size, Config, 1), - BatchTime = maps:get(batch_time_ms, Config, 0), - Name = atom_to_binary(?MODULE), - Server = maps:get(server, Config, rabbit_mq_host()), - Port = maps:get(port, Config, rabbit_mq_port()), - Template = maps:get(payload_template, Config, <<"">>), - ConfigString = - io_lib:format( - "bridges.rabbitmq.~s {\n" - " enable = true\n" - " server = \"~s\"\n" - " port = ~p\n" - " username = \"guest\"\n" - " password = \"guest\"\n" - " routing_key = \"~s\"\n" - " exchange = \"~s\"\n" - " payload_template = \"~s\"\n" - " resource_opts = {\n" - " batch_size = ~b\n" - " batch_time = ~bms\n" - " }\n" - "}\n", - [ - Name, - Server, - Port, - rabbit_mq_routing_key(), - rabbit_mq_exchange(), - Template, - BatchSize, - BatchTime - ] - ), - ct:pal(ConfigString), - parse_and_check(ConfigString, <<"rabbitmq">>, Name). - -parse_and_check(ConfigString, BridgeType, Name) -> - {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), - hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), - #{<<"bridges">> := #{BridgeType := #{Name := RetConfig}}} = RawConf, - RetConfig. - -make_bridge(Config) -> - Type = <<"rabbitmq">>, - Name = atom_to_binary(?MODULE), - BridgeConfig = rabbitmq_config(Config), - {ok, _} = emqx_bridge:create( - Type, - Name, - BridgeConfig - ), - emqx_bridge_resource:bridge_id(Type, Name). - -delete_bridge() -> - Type = <<"rabbitmq">>, - Name = atom_to_binary(?MODULE), - ok = emqx_bridge:remove(Type, Name). - -%%------------------------------------------------------------------------------ -%% Test Cases -%%------------------------------------------------------------------------------ - -t_make_delete_bridge(_Config) -> - make_bridge(#{}), - %% Check that the new brige is in the list of bridges - Bridges = emqx_bridge:list(), - Name = atom_to_binary(?MODULE), - IsRightName = - fun - (#{name := BName}) when BName =:= Name -> - true; - (_) -> - false - end, - ?assert(lists:any(IsRightName, Bridges)), - delete_bridge(), - BridgesAfterDelete = emqx_bridge:list(), - ?assertNot(lists:any(IsRightName, BridgesAfterDelete)), - ok. - -t_make_delete_bridge_non_existing_server(_Config) -> - make_bridge(#{server => <<"non_existing_server">>, port => 3174}), - %% Check that the new brige is in the list of bridges - Bridges = emqx_bridge:list(), - Name = atom_to_binary(?MODULE), - IsRightName = - fun - (#{name := BName}) when BName =:= Name -> - true; - (_) -> - false - end, - ?assert(lists:any(IsRightName, Bridges)), - delete_bridge(), - BridgesAfterDelete = emqx_bridge:list(), - ?assertNot(lists:any(IsRightName, BridgesAfterDelete)), - ok. - -t_send_message_query(Config) -> - BridgeID = make_bridge(#{batch_size => 1}), - Payload = #{<<"key">> => 42, <<"data">> => <<"RabbitMQ">>, <<"timestamp">> => 10000}, - %% This will use the SQL template included in the bridge - emqx_bridge:send_message(BridgeID, Payload), - %% Check that the data got to the database - ?assertEqual(Payload, receive_simple_test_message(Config)), - delete_bridge(), - ok. - -t_send_message_query_with_template(Config) -> - BridgeID = make_bridge(#{ - batch_size => 1, - payload_template => - << - "{" - " \\\"key\\\": ${key}," - " \\\"data\\\": \\\"${data}\\\"," - " \\\"timestamp\\\": ${timestamp}," - " \\\"secret\\\": 42" - "}" - >> - }), - Payload = #{ - <<"key">> => 7, - <<"data">> => <<"RabbitMQ">>, - <<"timestamp">> => 10000 - }, - emqx_bridge:send_message(BridgeID, Payload), - %% Check that the data got to the database - ExpectedResult = Payload#{ - <<"secret">> => 42 - }, - ?assertEqual(ExpectedResult, receive_simple_test_message(Config)), - delete_bridge(), - ok. - -t_send_simple_batch(Config) -> - BridgeConf = - #{ - batch_size => 100 - }, - BridgeID = make_bridge(BridgeConf), - Payload = #{<<"key">> => 42, <<"data">> => <<"RabbitMQ">>, <<"timestamp">> => 10000}, - emqx_bridge:send_message(BridgeID, Payload), - ?assertEqual(Payload, receive_simple_test_message(Config)), - delete_bridge(), - ok. - -t_send_simple_batch_with_template(Config) -> - BridgeConf = - #{ - batch_size => 100, - payload_template => - << - "{" - " \\\"key\\\": ${key}," - " \\\"data\\\": \\\"${data}\\\"," - " \\\"timestamp\\\": ${timestamp}," - " \\\"secret\\\": 42" - "}" - >> - }, - BridgeID = make_bridge(BridgeConf), - Payload = #{ - <<"key">> => 7, - <<"data">> => <<"RabbitMQ">>, - <<"timestamp">> => 10000 - }, - emqx_bridge:send_message(BridgeID, Payload), - ExpectedResult = Payload#{ - <<"secret">> => 42 - }, - ?assertEqual(ExpectedResult, receive_simple_test_message(Config)), - delete_bridge(), - ok. - -t_heavy_batching(Config) -> - NumberOfMessages = 20000, - BridgeConf = #{ - batch_size => 10173, - batch_time_ms => 50 - }, - BridgeID = make_bridge(BridgeConf), - SendMessage = fun(Key) -> - Payload = #{ - <<"key">> => Key - }, - emqx_bridge:send_message(BridgeID, Payload) - end, - [SendMessage(Key) || Key <- lists:seq(1, NumberOfMessages)], - AllMessages = lists:foldl( - fun(_, Acc) -> - Message = receive_simple_test_message(Config), - #{<<"key">> := Key} = Message, - Acc#{Key => true} - end, - #{}, - lists:seq(1, NumberOfMessages) - ), - ?assertEqual(NumberOfMessages, maps:size(AllMessages)), - delete_bridge(), - ok. - -receive_simple_test_message(Config) -> - #{channel := Channel} = get_channel_connection(Config), - #'basic.consume_ok'{consumer_tag = ConsumerTag} = - amqp_channel:call( - Channel, - #'basic.consume'{ - queue = rabbit_mq_queue() - } - ), - receive - %% This is the first message received - #'basic.consume_ok'{} -> - ok - end, - receive - {#'basic.deliver'{delivery_tag = DeliveryTag}, Content} -> - %% Ack the message - amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = DeliveryTag}), - %% Cancel the consumer - #'basic.cancel_ok'{consumer_tag = ConsumerTag} = - amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = ConsumerTag}), - emqx_utils_json:decode(Content#amqp_msg.payload) - end. - -rabbitmq_config() -> - Config = - #{ - server => rabbit_mq_host(), - port => 5672, - exchange => rabbit_mq_exchange(), - routing_key => rabbit_mq_routing_key() - }, - #{<<"config">> => Config}. - -test_data() -> - #{<<"msg_field">> => <<"Hello">>}. diff --git a/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl index 689c39dc5..56cdc8b0d 100644 --- a/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl +++ b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- -module(emqx_bridge_rabbitmq_connector_SUITE). @@ -12,109 +12,45 @@ -include_lib("stdlib/include/assert.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). +-import(emqx_bridge_rabbitmq_test_utils, [ + rabbit_mq_exchange/0, + rabbit_mq_routing_key/0, + rabbit_mq_queue/0, + rabbit_mq_host/0, + rabbit_mq_port/0, + get_rabbitmq/1, + ssl_options/1, + get_channel_connection/1, + parse_and_check/4, + receive_message_from_rabbitmq/1 +]). %% This test SUITE requires a running RabbitMQ instance. If you don't want to -%% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script +%% bring up the whole CI infrastructure with the `scripts/ct/run.sh` script %% you can create a clickhouse instance with the following command. %% 5672 is the default port for AMQP 0-9-1 and 15672 is the default port for -%% the HTTP managament interface. +%% the HTTP management interface. %% %% docker run -it --rm --name rabbitmq -p 127.0.0.1:5672:5672 -p 127.0.0.1:15672:15672 rabbitmq:3.11-management -rabbit_mq_host() -> - <<"rabbitmq">>. - -rabbit_mq_port() -> - 5672. - -rabbit_mq_password() -> - <<"guest">>. - -rabbit_mq_exchange() -> - <<"test_exchange">>. - -rabbit_mq_queue() -> - <<"test_queue">>. - -rabbit_mq_routing_key() -> - <<"test_routing_key">>. - all() -> - emqx_common_test_helpers:all(?MODULE). + [ + {group, tcp}, + {group, tls} + ]. -init_per_suite(Config) -> - case - emqx_common_test_helpers:is_tcp_server_available( - erlang:binary_to_list(rabbit_mq_host()), rabbit_mq_port() - ) - of - true -> - Apps = emqx_cth_suite:start( - [emqx_conf, emqx_connector, emqx_bridge_rabbitmq], - #{work_dir => emqx_cth_suite:work_dir(Config)} - ), - ChannelConnection = setup_rabbit_mq_exchange_and_queue(), - [{channel_connection, ChannelConnection}, {suite_apps, Apps} | Config]; - false -> - case os:getenv("IS_CI") of - "yes" -> - throw(no_rabbitmq); - _ -> - {skip, no_rabbitmq} - end - end. +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + [ + {tcp, AllTCs}, + {tls, AllTCs} + ]. -setup_rabbit_mq_exchange_and_queue() -> - %% Create an exachange and a queue - {ok, Connection} = - amqp_connection:start(#amqp_params_network{ - host = erlang:binary_to_list(rabbit_mq_host()), - port = rabbit_mq_port() - }), - {ok, Channel} = amqp_connection:open_channel(Connection), - %% Create an exchange - #'exchange.declare_ok'{} = - amqp_channel:call( - Channel, - #'exchange.declare'{ - exchange = rabbit_mq_exchange(), - type = <<"topic">> - } - ), - %% Create a queue - #'queue.declare_ok'{} = - amqp_channel:call( - Channel, - #'queue.declare'{queue = rabbit_mq_queue()} - ), - %% Bind the queue to the exchange - #'queue.bind_ok'{} = - amqp_channel:call( - Channel, - #'queue.bind'{ - queue = rabbit_mq_queue(), - exchange = rabbit_mq_exchange(), - routing_key = rabbit_mq_routing_key() - } - ), - #{ - connection => Connection, - channel => Channel - }. +init_per_group(Group, Config) -> + emqx_bridge_rabbitmq_test_utils:init_per_group(Group, Config). -get_channel_connection(Config) -> - proplists:get_value(channel_connection, Config). - -end_per_suite(Config) -> - #{ - connection := Connection, - channel := Channel - } = get_channel_connection(Config), - %% Close the channel - ok = amqp_channel:close(Channel), - %% Close the connection - ok = amqp_connection:close(Connection), - ok = emqx_cth_suite:stop(?config(suite_apps, Config)). +end_per_group(Group, Config) -> + emqx_bridge_rabbitmq_test_utils:end_per_group(Group, Config). % %%------------------------------------------------------------------------------ % %% Testcases @@ -122,7 +58,7 @@ end_per_suite(Config) -> t_lifecycle(Config) -> perform_lifecycle_check( - erlang:atom_to_binary(?MODULE), + erlang:atom_to_binary(?FUNCTION_NAME), rabbitmq_config(), Config ). @@ -144,12 +80,10 @@ t_start_passfile(Config) -> ). perform_lifecycle_check(ResourceID, InitialConfig, TestConfig) -> - #{ - channel := Channel - } = get_channel_connection(TestConfig), CheckedConfig = check_config(InitialConfig), #{ - state := #{poolname := PoolName} = State, + id := PoolName, + state := State, status := InitialStatus } = create_local_resource(ResourceID, CheckedConfig), ?assertEqual(InitialStatus, connected), @@ -161,7 +95,7 @@ perform_lifecycle_check(ResourceID, InitialConfig, TestConfig) -> emqx_resource:get_instance(ResourceID), ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceID)), %% Perform query as further check that the resource is working as expected - perform_query(ResourceID, Channel), + perform_query(ResourceID, TestConfig), ?assertEqual(ok, emqx_resource:stop(ResourceID)), %% Resource will be listed still, but state will be changed and healthcheck will fail %% as the worker no longer exists. @@ -183,7 +117,7 @@ perform_lifecycle_check(ResourceID, InitialConfig, TestConfig) -> emqx_resource:get_instance(ResourceID), ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceID)), %% Check that everything is working again by performing a query - perform_query(ResourceID, Channel), + perform_query(ResourceID, TestConfig), % Stop and remove the resource in one go. ?assertEqual(ok, emqx_resource:remove_local(ResourceID)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), @@ -211,34 +145,16 @@ create_local_resource(ResourceID, CheckedConfig) -> perform_query(PoolName, Channel) -> %% Send message to queue: - ok = emqx_resource:query(PoolName, {query, test_data()}), + ActionConfig = rabbitmq_action_config(), + ChannelId = <<"test_channel">>, + ?assertEqual(ok, emqx_resource_manager:add_channel(PoolName, ChannelId, ActionConfig)), + ok = emqx_resource:query(PoolName, {ChannelId, payload()}), %% Get the message from queue: - ok = receive_simple_test_message(Channel). - -receive_simple_test_message(Channel) -> - #'basic.consume_ok'{consumer_tag = ConsumerTag} = - amqp_channel:call( - Channel, - #'basic.consume'{ - queue = rabbit_mq_queue() - } - ), - receive - %% This is the first message received - #'basic.consume_ok'{} -> - ok - end, - receive - {#'basic.deliver'{delivery_tag = DeliveryTag}, Content} -> - Expected = test_data(), - ?assertEqual(Expected, emqx_utils_json:decode(Content#amqp_msg.payload)), - %% Ack the message - amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = DeliveryTag}), - %% Cancel the consumer - #'basic.cancel_ok'{consumer_tag = ConsumerTag} = - amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = ConsumerTag}), - ok - end. + SendData = test_data(), + RecvData = receive_message_from_rabbitmq(Channel), + ?assertMatch(SendData, RecvData), + ?assertEqual(ok, emqx_resource_manager:remove_channel(PoolName, ChannelId)), + ok. rabbitmq_config() -> rabbitmq_config(#{}). @@ -255,5 +171,24 @@ rabbitmq_config(Overrides) -> }, #{<<"config">> => maps:merge(Config, Overrides)}. +payload() -> + #{<<"payload">> => test_data()}. + test_data() -> - #{<<"msg_field">> => <<"Hello">>}. + #{<<"Hello">> => <<"World">>}. + +rabbitmq_action_config() -> + #{ + config_root => actions, + parameters => #{ + delivery_mode => non_persistent, + exchange => rabbit_mq_exchange(), + payload_template => <<"${.payload}">>, + publish_confirmation_timeout => 30000, + routing_key => rabbit_mq_routing_key(), + wait_for_publish_confirmations => true + } + }. + +rabbit_mq_password() -> + <<"guest">>. diff --git a/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_test_utils.erl b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_test_utils.erl new file mode 100644 index 000000000..47df47976 --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_test_utils.erl @@ -0,0 +1,203 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rabbitmq_test_utils). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("stdlib/include/assert.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +init_per_group(tcp, Config) -> + RabbitMQHost = os:getenv("RABBITMQ_PLAIN_HOST", "rabbitmq"), + RabbitMQPort = list_to_integer(os:getenv("RABBITMQ_PLAIN_PORT", "5672")), + case emqx_common_test_helpers:is_tcp_server_available(RabbitMQHost, RabbitMQPort) of + true -> + Config1 = common_init_per_group(#{ + host => RabbitMQHost, port => RabbitMQPort, tls => false + }), + Config1 ++ Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_rabbitmq); + _ -> + {skip, no_rabbitmq} + end + end; +init_per_group(tls, Config) -> + RabbitMQHost = os:getenv("RABBITMQ_TLS_HOST", "rabbitmq"), + RabbitMQPort = list_to_integer(os:getenv("RABBITMQ_TLS_PORT", "5671")), + case emqx_common_test_helpers:is_tcp_server_available(RabbitMQHost, RabbitMQPort) of + true -> + Config1 = common_init_per_group(#{ + host => RabbitMQHost, port => RabbitMQPort, tls => true + }), + Config1 ++ Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_rabbitmq); + _ -> + {skip, no_rabbitmq} + end + end; +init_per_group(_Group, Config) -> + Config. + +common_init_per_group(Opts) -> + emqx_common_test_helpers:render_and_load_app_config(emqx_conf), + ok = emqx_common_test_helpers:start_apps([ + emqx_conf, emqx_bridge, emqx_bridge_rabbitmq, emqx_rule_engine + ]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), + {ok, _} = application:ensure_all_started(amqp_client), + emqx_mgmt_api_test_util:init_suite(), + #{host := Host, port := Port, tls := UseTLS} = Opts, + ChannelConnection = setup_rabbit_mq_exchange_and_queue(Host, Port, UseTLS), + [ + {channel_connection, ChannelConnection}, + {rabbitmq, #{server => Host, port => Port, tls => UseTLS}} + ]. + +setup_rabbit_mq_exchange_and_queue(Host, Port, UseTLS) -> + SSLOptions = + case UseTLS of + false -> none; + true -> emqx_tls_lib:to_client_opts(ssl_options(UseTLS)) + end, + %% Create an exchange and a queue + {ok, Connection} = + amqp_connection:start(#amqp_params_network{ + host = Host, + port = Port, + ssl_options = SSLOptions + }), + {ok, Channel} = amqp_connection:open_channel(Connection), + %% Create an exchange + #'exchange.declare_ok'{} = + amqp_channel:call( + Channel, + #'exchange.declare'{ + exchange = rabbit_mq_exchange(), + type = <<"topic">> + } + ), + %% Create a queue + #'queue.declare_ok'{} = + amqp_channel:call( + Channel, + #'queue.declare'{queue = rabbit_mq_queue()} + ), + %% Bind the queue to the exchange + #'queue.bind_ok'{} = + amqp_channel:call( + Channel, + #'queue.bind'{ + queue = rabbit_mq_queue(), + exchange = rabbit_mq_exchange(), + routing_key = rabbit_mq_routing_key() + } + ), + #{ + connection => Connection, + channel => Channel + }. + +end_per_group(_Group, Config) -> + #{ + connection := Connection, + channel := Channel + } = get_channel_connection(Config), + amqp_channel:call(Channel, #'queue.purge'{queue = rabbit_mq_queue()}), + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf, emqx_bridge_rabbitmq, emqx_rule_engine]), + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector), + _ = application:stop(emqx_bridge), + %% Close the channel + ok = amqp_channel:close(Channel), + %% Close the connection + ok = amqp_connection:close(Connection). + +rabbit_mq_host() -> + list_to_binary(os:getenv("RABBITMQ_PLAIN_HOST", "rabbitmq")). + +rabbit_mq_port() -> + list_to_integer(os:getenv("RABBITMQ_PLAIN_PORT", "5672")). + +rabbit_mq_exchange() -> + <<"messages">>. + +rabbit_mq_queue() -> + <<"test_queue">>. + +rabbit_mq_routing_key() -> + <<"test_routing_key">>. + +get_rabbitmq(Config) -> + proplists:get_value(rabbitmq, Config). + +get_channel_connection(Config) -> + proplists:get_value(channel_connection, Config). + +ssl_options(true) -> + CertsDir = filename:join([ + emqx_common_test_helpers:proj_root(), + ".ci", + "docker-compose-file", + "certs" + ]), + #{ + enable => true, + cacertfile => filename:join([CertsDir, "ca.crt"]), + certfile => filename:join([CertsDir, "client.pem"]), + keyfile => filename:join([CertsDir, "client.key"]) + }; +ssl_options(false) -> + #{ + enable => false + }. + +parse_and_check(Key, Mod, Conf, Name) -> + ConfStr = hocon_pp:do(Conf, #{}), + ct:pal(ConfStr), + {ok, RawConf} = hocon:binary(ConfStr, #{format => map}), + hocon_tconf:check_plain(Mod, RawConf, #{required => false, atom_key => false}), + #{Key := #{<<"rabbitmq">> := #{Name := RetConf}}} = RawConf, + RetConf. + +receive_message_from_rabbitmq(Config) -> + #{channel := Channel} = get_channel_connection(Config), + #'basic.consume_ok'{consumer_tag = ConsumerTag} = + amqp_channel:call( + Channel, + #'basic.consume'{ + queue = rabbit_mq_queue() + } + ), + receive + %% This is the first message received + #'basic.consume_ok'{} -> + ok + end, + receive + {#'basic.deliver'{delivery_tag = DeliveryTag}, Content} -> + %% Ack the message + amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = DeliveryTag}), + %% Cancel the consumer + #'basic.cancel_ok'{consumer_tag = ConsumerTag} = + amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = ConsumerTag}), + Payload = Content#amqp_msg.payload, + case emqx_utils_json:safe_decode(Payload, [return_maps]) of + {ok, Msg} -> Msg; + {error, _} -> ?assert(false, {"Failed to decode the message", Payload}) + end + after 5000 -> + ?assert(false, "Did not receive message within 5 second") + end. diff --git a/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_v1_SUITE.erl b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_v1_SUITE.erl new file mode 100644 index 000000000..48756c616 --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_v1_SUITE.erl @@ -0,0 +1,221 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rabbitmq_v1_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("stdlib/include/assert.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +%% See comment in +%% apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl for how to +%% run this without bringing up the whole CI infrastructure +-define(TYPE, <<"rabbitmq">>). +-import(emqx_bridge_rabbitmq_test_utils, [ + rabbit_mq_exchange/0, + rabbit_mq_routing_key/0, + rabbit_mq_queue/0, + rabbit_mq_host/0, + rabbit_mq_port/0, + get_rabbitmq/1, + ssl_options/1, + get_channel_connection/1, + parse_and_check/4, + receive_message_from_rabbitmq/1 +]). +%%------------------------------------------------------------------------------ +%% Common Test Setup, Tear down and Testcase List +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, tcp}, + {group, tls} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + [ + {tcp, AllTCs}, + {tls, AllTCs} + ]. + +init_per_group(Group, Config) -> + emqx_bridge_rabbitmq_test_utils:init_per_group(Group, Config). + +end_per_group(Group, Config) -> + emqx_bridge_rabbitmq_test_utils:end_per_group(Group, Config). + +create_bridge(Name, Config) -> + BridgeConfig = rabbitmq_config(Config), + {ok, _} = emqx_bridge:create(?TYPE, Name, BridgeConfig), + emqx_bridge_resource:bridge_id(?TYPE, Name). + +delete_bridge(Name) -> + ok = emqx_bridge:remove(?TYPE, Name). + +%%------------------------------------------------------------------------------ +%% Test Cases +%%------------------------------------------------------------------------------ + +t_create_delete_bridge(Config) -> + Name = atom_to_binary(?FUNCTION_NAME), + RabbitMQ = get_rabbitmq(Config), + create_bridge(Name, RabbitMQ), + Bridges = emqx_bridge:list(), + Any = fun(#{name := BName}) -> BName =:= Name end, + ?assert(lists:any(Any, Bridges), Bridges), + ok = delete_bridge(Name), + BridgesAfterDelete = emqx_bridge:list(), + ?assertNot(lists:any(Any, BridgesAfterDelete), BridgesAfterDelete), + ok. + +t_create_delete_bridge_non_existing_server(_Config) -> + Name = atom_to_binary(?FUNCTION_NAME), + create_bridge(Name, #{server => <<"non_existing_server">>, port => 3174}), + %% Check that the new bridge is in the list of bridges + Bridges = emqx_bridge:list(), + Any = fun(#{name := BName}) -> BName =:= Name end, + ?assert(lists:any(Any, Bridges)), + ok = delete_bridge(Name), + BridgesAfterDelete = emqx_bridge:list(), + ?assertNot(lists:any(Any, BridgesAfterDelete)), + ok. + +t_send_message_query(Config) -> + Name = atom_to_binary(?FUNCTION_NAME), + RabbitMQ = get_rabbitmq(Config), + BridgeID = create_bridge(Name, RabbitMQ#{batch_size => 1}), + Payload = #{<<"key">> => 42, <<"data">> => <<"RabbitMQ">>, <<"timestamp">> => 10000}, + %% This will use the SQL template included in the bridge + emqx_bridge:send_message(BridgeID, Payload), + %% Check that the data got to the database + ?assertEqual(Payload, receive_message_from_rabbitmq(Config)), + ok = delete_bridge(Name), + ok. + +t_send_message_query_with_template(Config) -> + Name = atom_to_binary(?FUNCTION_NAME), + RabbitMQ = get_rabbitmq(Config), + BridgeID = create_bridge(Name, RabbitMQ#{ + batch_size => 1, + payload_template => payload_template() + }), + Payload = #{ + <<"key">> => 7, + <<"data">> => <<"RabbitMQ">>, + <<"timestamp">> => 10000 + }, + emqx_bridge:send_message(BridgeID, Payload), + %% Check that the data got to the database + ExpectedResult = Payload#{ + <<"secret">> => 42 + }, + ?assertEqual(ExpectedResult, receive_message_from_rabbitmq(Config)), + ok = delete_bridge(Name), + ok. + +t_send_simple_batch(Config) -> + Name = atom_to_binary(?FUNCTION_NAME), + RabbitMQ = get_rabbitmq(Config), + BridgeConf = RabbitMQ#{batch_size => 100}, + BridgeID = create_bridge(Name, BridgeConf), + Payload = #{<<"key">> => 42, <<"data">> => <<"RabbitMQ">>, <<"timestamp">> => 10000}, + emqx_bridge:send_message(BridgeID, Payload), + ?assertEqual(Payload, receive_message_from_rabbitmq(Config)), + ok = delete_bridge(Name), + ok. + +t_send_simple_batch_with_template(Config) -> + Name = atom_to_binary(?FUNCTION_NAME), + RabbitMQ = get_rabbitmq(Config), + BridgeConf = + RabbitMQ#{ + batch_size => 100, + payload_template => payload_template() + }, + BridgeID = create_bridge(Name, BridgeConf), + Payload = #{ + <<"key">> => 7, + <<"data">> => <<"RabbitMQ">>, + <<"timestamp">> => 10000 + }, + emqx_bridge:send_message(BridgeID, Payload), + ExpectedResult = Payload#{<<"secret">> => 42}, + ?assertEqual(ExpectedResult, receive_message_from_rabbitmq(Config)), + ok = delete_bridge(Name), + ok. + +t_heavy_batching(Config) -> + Name = atom_to_binary(?FUNCTION_NAME), + NumberOfMessages = 20000, + RabbitMQ = get_rabbitmq(Config), + BridgeConf = RabbitMQ#{ + batch_size => 10173, + batch_time_ms => 50 + }, + BridgeID = create_bridge(Name, BridgeConf), + SendMessage = fun(Key) -> + Payload = #{<<"key">> => Key}, + emqx_bridge:send_message(BridgeID, Payload) + end, + [SendMessage(Key) || Key <- lists:seq(1, NumberOfMessages)], + AllMessages = lists:foldl( + fun(_, Acc) -> + Message = receive_message_from_rabbitmq(Config), + #{<<"key">> := Key} = Message, + Acc#{Key => true} + end, + #{}, + lists:seq(1, NumberOfMessages) + ), + ?assertEqual(NumberOfMessages, maps:size(AllMessages)), + ok = delete_bridge(Name), + ok. + +rabbitmq_config(Config) -> + UseTLS = maps:get(tls, Config, false), + BatchSize = maps:get(batch_size, Config, 1), + BatchTime = maps:get(batch_time_ms, Config, 0), + Name = atom_to_binary(?MODULE), + Server = maps:get(server, Config, rabbit_mq_host()), + Port = maps:get(port, Config, rabbit_mq_port()), + Template = maps:get(payload_template, Config, <<"">>), + Bridge = + #{ + <<"bridges">> => #{ + <<"rabbitmq">> => #{ + Name => #{ + <<"enable">> => true, + <<"ssl">> => ssl_options(UseTLS), + <<"server">> => Server, + <<"port">> => Port, + <<"username">> => <<"guest">>, + <<"password">> => <<"guest">>, + <<"routing_key">> => rabbit_mq_routing_key(), + <<"exchange">> => rabbit_mq_exchange(), + <<"payload_template">> => Template, + <<"resource_opts">> => #{ + <<"batch_size">> => BatchSize, + <<"batch_time">> => BatchTime + } + } + } + } + }, + parse_and_check(<<"bridges">>, emqx_bridge_schema, Bridge, Name). + +payload_template() -> + << + "{" + " \"key\": ${key}," + " \"data\": \"${data}\"," + " \"timestamp\": ${timestamp}," + " \"secret\": 42" + "}" + >>. diff --git a/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_v2_SUITE.erl b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_v2_SUITE.erl new file mode 100644 index 000000000..8b11f732a --- /dev/null +++ b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_v2_SUITE.erl @@ -0,0 +1,261 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rabbitmq_v2_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("stdlib/include/assert.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-import(emqx_bridge_rabbitmq_test_utils, [ + rabbit_mq_exchange/0, + rabbit_mq_routing_key/0, + rabbit_mq_queue/0, + rabbit_mq_host/0, + rabbit_mq_port/0, + get_rabbitmq/1, + get_tls/1, + ssl_options/1, + get_channel_connection/1, + parse_and_check/4, + receive_message_from_rabbitmq/1 +]). +-import(emqx_common_test_helpers, [on_exit/1]). + +-define(TYPE, <<"rabbitmq">>). + +all() -> + [ + {group, tcp}, + {group, tls} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + [ + {tcp, AllTCs}, + {tls, AllTCs} + ]. + +init_per_group(Group, Config) -> + Config1 = emqx_bridge_rabbitmq_test_utils:init_per_group(Group, Config), + Name = atom_to_binary(?MODULE), + create_connector(Name, get_rabbitmq(Config1)), + Config1. + +end_per_group(Group, Config) -> + Name = atom_to_binary(?MODULE), + delete_connector(Name), + emqx_bridge_rabbitmq_test_utils:end_per_group(Group, Config). + +rabbitmq_connector(Config) -> + UseTLS = maps:get(tls, Config, false), + Name = atom_to_binary(?MODULE), + Server = maps:get(server, Config, rabbit_mq_host()), + Port = maps:get(port, Config, rabbit_mq_port()), + Connector = #{ + <<"connectors">> => #{ + <<"rabbitmq">> => #{ + Name => #{ + <<"enable">> => true, + <<"ssl">> => ssl_options(UseTLS), + <<"server">> => Server, + <<"port">> => Port, + <<"username">> => <<"guest">>, + <<"password">> => <<"guest">> + } + } + } + }, + parse_and_check(<<"connectors">>, emqx_connector_schema, Connector, Name). + +rabbitmq_source() -> + Name = atom_to_binary(?MODULE), + Source = #{ + <<"sources">> => #{ + <<"rabbitmq">> => #{ + Name => #{ + <<"enable">> => true, + <<"connector">> => Name, + <<"parameters">> => #{ + <<"no_ack">> => true, + <<"queue">> => rabbit_mq_queue(), + <<"wait_for_publish_confirmations">> => true + } + } + } + } + }, + parse_and_check(<<"sources">>, emqx_bridge_v2_schema, Source, Name). + +rabbitmq_action() -> + Name = atom_to_binary(?MODULE), + Action = #{ + <<"actions">> => #{ + <<"rabbitmq">> => #{ + Name => #{ + <<"connector">> => Name, + <<"enable">> => true, + <<"parameters">> => #{ + <<"exchange">> => rabbit_mq_exchange(), + <<"payload_template">> => <<"${.payload}">>, + <<"routing_key">> => rabbit_mq_routing_key(), + <<"delivery_mode">> => <<"non_persistent">>, + <<"publish_confirmation_timeout">> => <<"30s">>, + <<"wait_for_publish_confirmations">> => true + } + } + } + } + }, + parse_and_check(<<"actions">>, emqx_bridge_v2_schema, Action, Name). + +create_connector(Name, Config) -> + Connector = rabbitmq_connector(Config), + {ok, _} = emqx_connector:create(?TYPE, Name, Connector). + +delete_connector(Name) -> + ok = emqx_connector:remove(?TYPE, Name). + +create_source(Name) -> + Source = rabbitmq_source(), + {ok, _} = emqx_bridge_v2:create(sources, ?TYPE, Name, Source). + +delete_source(Name) -> + ok = emqx_bridge_v2:remove(sources, ?TYPE, Name). + +create_action(Name) -> + Action = rabbitmq_action(), + {ok, _} = emqx_bridge_v2:create(actions, ?TYPE, Name, Action). + +delete_action(Name) -> + ok = emqx_bridge_v2:remove(actions, ?TYPE, Name). + +%%------------------------------------------------------------------------------ +%% Test Cases +%%------------------------------------------------------------------------------ + +t_source(Config) -> + Name = atom_to_binary(?FUNCTION_NAME), + create_source(Name), + Sources = emqx_bridge_v2:list(sources), + Any = fun(#{name := BName}) -> BName =:= Name end, + ?assert(lists:any(Any, Sources), Sources), + Topic = <<"tesldkafd">>, + {ok, #{id := RuleId}} = emqx_rule_engine:create_rule( + #{ + sql => <<"select * from \"$bridges/rabbitmq:", Name/binary, "\"">>, + id => atom_to_binary(?FUNCTION_NAME), + actions => [ + #{ + args => #{ + topic => Topic, + mqtt_properties => #{}, + payload => <<"${payload}">>, + qos => 0, + retain => false, + user_properties => [] + }, + function => republish + } + ], + description => <<"bridge_v2 republish rule">> + } + ), + on_exit(fun() -> emqx_rule_engine:delete_rule(RuleId) end), + {ok, C1} = emqtt:start_link([{clean_start, true}]), + {ok, _} = emqtt:connect(C1), + {ok, #{}, [0]} = emqtt:subscribe(C1, Topic, [{qos, 0}, {rh, 0}]), + send_test_message_to_rabbitmq(Config), + PayloadBin = emqx_utils_json:encode(payload()), + ?assertMatch( + [ + #{ + dup := false, + properties := undefined, + topic := Topic, + qos := 0, + payload := PayloadBin, + retain := false + } + ], + receive_messages(1) + ), + ok = emqtt:disconnect(C1), + ok = delete_source(Name), + SourcesAfterDelete = emqx_bridge_v2:list(sources), + ?assertNot(lists:any(Any, SourcesAfterDelete), SourcesAfterDelete), + ok. + +t_action(Config) -> + Name = atom_to_binary(?FUNCTION_NAME), + create_action(Name), + Actions = emqx_bridge_v2:list(actions), + Any = fun(#{name := BName}) -> BName =:= Name end, + ?assert(lists:any(Any, Actions), Actions), + Topic = <<"lkadfdaction">>, + {ok, #{id := RuleId}} = emqx_rule_engine:create_rule( + #{ + sql => <<"select * from \"", Topic/binary, "\"">>, + id => atom_to_binary(?FUNCTION_NAME), + actions => [<<"rabbitmq:", Name/binary>>], + description => <<"bridge_v2 send msg to rabbitmq action">> + } + ), + on_exit(fun() -> emqx_rule_engine:delete_rule(RuleId) end), + {ok, C1} = emqtt:start_link([{clean_start, true}]), + {ok, _} = emqtt:connect(C1), + Payload = payload(), + PayloadBin = emqx_utils_json:encode(Payload), + {ok, _} = emqtt:publish(C1, Topic, #{}, PayloadBin, [{qos, 1}, {retain, false}]), + Msg = receive_message_from_rabbitmq(Config), + ?assertMatch(Payload, Msg), + ok = emqtt:disconnect(C1), + ok = delete_action(Name), + ActionsAfterDelete = emqx_bridge_v2:list(actions), + ?assertNot(lists:any(Any, ActionsAfterDelete), ActionsAfterDelete), + ok. + +receive_messages(Count) -> + receive_messages(Count, []). +receive_messages(0, Msgs) -> + Msgs; +receive_messages(Count, Msgs) -> + receive + {publish, Msg} -> + ct:log("Msg: ~p ~n", [Msg]), + receive_messages(Count - 1, [Msg | Msgs]); + Other -> + ct:log("Other Msg: ~p~n", [Other]), + receive_messages(Count, Msgs) + after 2000 -> + Msgs + end. + +payload() -> + #{<<"key">> => 42, <<"data">> => <<"RabbitMQ">>, <<"timestamp">> => 10000}. + +send_test_message_to_rabbitmq(Config) -> + #{channel := Channel} = get_channel_connection(Config), + MessageProperties = #'P_basic'{ + headers = [], + delivery_mode = 1 + }, + Method = #'basic.publish'{ + exchange = rabbit_mq_exchange(), + routing_key = rabbit_mq_routing_key() + }, + amqp_channel:cast( + Channel, + Method, + #amqp_msg{ + payload = emqx_utils_json:encode(payload()), + props = MessageProperties + } + ), + ok. diff --git a/apps/emqx_bridge_redis/BSL.txt b/apps/emqx_bridge_redis/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_redis/BSL.txt +++ b/apps/emqx_bridge_redis/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_redis/test/emqx_bridge_v2_redis_SUITE.erl b/apps/emqx_bridge_redis/test/emqx_bridge_v2_redis_SUITE.erl index 18cbc126d..cfcbc8e92 100644 --- a/apps/emqx_bridge_redis/test/emqx_bridge_v2_redis_SUITE.erl +++ b/apps/emqx_bridge_redis/test/emqx_bridge_v2_redis_SUITE.erl @@ -259,10 +259,7 @@ serde_roundtrip(InnerConfigMap0) -> InnerConfigMap. parse_and_check_bridge_config(InnerConfigMap, Name) -> - TypeBin = ?BRIDGE_TYPE_BIN, - RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}}, - hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}), - InnerConfigMap. + emqx_bridge_v2_testlib:parse_and_check(?BRIDGE_TYPE_BIN, Name, InnerConfigMap). make_message() -> ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), diff --git a/apps/emqx_bridge_rocketmq/BSL.txt b/apps/emqx_bridge_rocketmq/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_rocketmq/BSL.txt +++ b/apps/emqx_bridge_rocketmq/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src index 38c00e7ee..564e36a88 100644 --- a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src @@ -1,9 +1,9 @@ {application, emqx_bridge_rocketmq, [ {description, "EMQX Enterprise RocketMQ Bridge"}, - {vsn, "0.1.4"}, + {vsn, "0.1.5"}, {registered, []}, {applications, [kernel, stdlib, emqx_resource, rocketmq]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_rocketmq_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.erl b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.erl index b3149fa99..c40473ee5 100644 --- a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.erl +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.erl @@ -8,12 +8,7 @@ -include_lib("emqx_bridge/include/emqx_bridge.hrl"). -include_lib("emqx_resource/include/emqx_resource.hrl"). --import(hoconsc, [mk/2, enum/1, ref/2]). - --export([ - conn_bridge_examples/1, - values/1 -]). +-import(hoconsc, [mk/2, enum/1]). -export([ namespace/0, @@ -22,6 +17,14 @@ desc/1 ]). +-export([ + bridge_v2_examples/1, + connector_examples/1, + conn_bridge_examples/1 +]). + +-define(CONNECTOR_TYPE, rocketmq). +-define(ACTION_TYPE, ?CONNECTOR_TYPE). -define(DEFAULT_TEMPLATE, <<>>). -define(DEFFAULT_REQ_TIMEOUT, <<"15s">>). @@ -33,14 +36,14 @@ conn_bridge_examples(Method) -> #{ <<"rocketmq">> => #{ summary => <<"RocketMQ Bridge">>, - value => values(Method) + value => conn_bridge_example_values(Method) } } ]. -values(get) -> - values(post); -values(post) -> +conn_bridge_example_values(get) -> + conn_bridge_example_values(post); +conn_bridge_example_values(post) -> #{ enable => true, type => rocketmq, @@ -58,15 +61,143 @@ values(post) -> max_buffer_bytes => ?DEFAULT_BUFFER_BYTES } }; -values(put) -> - values(post). +conn_bridge_example_values(put) -> + conn_bridge_example_values(post). + +connector_examples(Method) -> + [ + #{ + <<"rocketmq">> => + #{ + summary => <<"RocketMQ Connector">>, + value => emqx_connector_schema:connector_values( + Method, ?CONNECTOR_TYPE, connector_values() + ) + } + } + ]. + +connector_values() -> + #{ + <<"enable">> => true, + <<"servers">> => <<"127.0.0.1:9876">>, + <<"pool_size">> => 8, + <<"resource_opts">> => #{ + <<"health_check_interval">> => <<"15s">>, + <<"start_after_created">> => true, + <<"start_timeout">> => <<"5s">> + } + }. + +bridge_v2_examples(Method) -> + [ + #{ + <<"rocketmq">> => + #{ + summary => <<"RocketMQ Action">>, + value => emqx_bridge_v2_schema:action_values( + Method, ?ACTION_TYPE, ?CONNECTOR_TYPE, action_values() + ) + } + } + ]. + +action_values() -> + #{ + <<"parameters">> => #{ + <<"topic">> => <<"TopicTest">>, + <<"template">> => ?DEFAULT_TEMPLATE, + <<"refresh_interval">> => <<"3s">>, + <<"send_buffer">> => <<"1024KB">>, + <<"sync_timeout">> => <<"3s">> + } + }. %% ------------------------------------------------------------------------------------------------- %% Hocon Schema Definitions -namespace() -> "bridge_rocketmq". + +namespace() -> "rocketmq". roots() -> []. +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + emqx_connector_schema:api_fields( + Field, + ?CONNECTOR_TYPE, + fields("config_connector") -- emqx_connector_schema:common_fields() + ); +fields(Field) when + Field == "get_bridge_v2"; + Field == "post_bridge_v2"; + Field == "put_bridge_v2" +-> + emqx_bridge_v2_schema:api_fields(Field, ?ACTION_TYPE, fields(rocketmq_action)); +fields(action) -> + {?ACTION_TYPE, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(?MODULE, rocketmq_action)), + #{ + desc => <<"RocketMQ Action Config">>, + required => false + } + )}; +fields(rocketmq_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + hoconsc:mk( + hoconsc:ref(?MODULE, action_parameters), + #{ + required => true, + desc => ?DESC("action_parameters") + } + ) + ); +fields(action_parameters) -> + Parameters = + [ + {template, + mk( + binary(), + #{desc => ?DESC("template"), default => ?DEFAULT_TEMPLATE} + )} + ] ++ emqx_bridge_rocketmq_connector:fields(config), + lists:foldl( + fun(Key, Acc) -> + proplists:delete(Key, Acc) + end, + Parameters, + [ + servers, + pool_size, + auto_reconnect, + access_key, + secret_key, + security_token + ] + ); +fields("config_connector") -> + Config = + emqx_connector_schema:common_fields() ++ + emqx_bridge_rocketmq_connector:fields(config) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts), + lists:foldl( + fun(Key, Acc) -> + proplists:delete(Key, Acc) + end, + Config, + [ + topic, + sync_timeout, + refresh_interval, + send_buffer, + auto_reconnect + ] + ); +fields(connector_resource_opts) -> + emqx_connector_schema:resource_opts_fields(); fields("config") -> [ {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, @@ -94,6 +225,14 @@ desc("config") -> ?DESC("desc_config"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for RocketMQ using `", string:to_upper(Method), "` method."]; +desc("config_connector") -> + ?DESC("config_connector"); +desc(rocketmq_action) -> + ?DESC("rocketmq_action"); +desc(action_parameters) -> + ?DESC("action_parameters"); +desc(connector_resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); desc(_) -> undefined. diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_action_info.erl b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_action_info.erl new file mode 100644 index 000000000..f3a7ab1a3 --- /dev/null +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_action_info.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_rocketmq_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +bridge_v1_type_name() -> rocketmq. + +action_type_name() -> rocketmq. + +connector_type_name() -> rocketmq. + +schema_module() -> emqx_bridge_rocketmq. diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_connector.erl b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_connector.erl index 81045ade4..a5bfa6437 100644 --- a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_connector.erl +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_connector.erl @@ -12,7 +12,7 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("hocon/include/hoconsc.hrl"). --export([roots/0, fields/1]). +-export([roots/0, fields/1, namespace/0]). %% `emqx_resource' API -export([ @@ -21,10 +21,14 @@ on_stop/2, on_query/3, on_batch_query/3, - on_get_status/2 + on_get_status/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, + on_get_channel_status/3 ]). --import(hoconsc, [mk/2, enum/1, ref/2]). +-import(hoconsc, [mk/2]). -define(ROCKETMQ_HOST_OPTIONS, #{ default_port => 9876 @@ -32,6 +36,9 @@ %%===================================================================== %% Hocon schema + +namespace() -> rocketmq. + roots() -> [{config, #{type => hoconsc:ref(?MODULE, config)}}]. @@ -82,7 +89,12 @@ callback_mode() -> always_sync. on_start( InstanceId, - #{servers := BinServers, topic := Topic, sync_timeout := SyncTimeout} = Config + #{ + servers := BinServers, + access_key := AccessKey, + secret_key := SecretKey, + security_token := SecurityToken + } = Config ) -> ?SLOG(info, #{ msg => "starting_rocketmq_connector", @@ -94,18 +106,13 @@ on_start( emqx_schema:parse_servers(BinServers, ?ROCKETMQ_HOST_OPTIONS) ), ClientId = client_id(InstanceId), - TopicTks = emqx_placeholder:preproc_tmpl(Topic), - #{acl_info := AclInfo} = ProducerOpts = make_producer_opts(Config), - ClientCfg = #{acl_info => AclInfo}, - Templates = parse_template(Config), + ACLInfo = acl_info(AccessKey, SecretKey, SecurityToken), + ClientCfg = #{acl_info => ACLInfo}, State = #{ client_id => ClientId, - topic => Topic, - topic_tokens => TopicTks, - sync_timeout => SyncTimeout, - templates => Templates, - producers_opts => ProducerOpts + acl_info => ACLInfo, + installed_channels => #{} }, ok = emqx_resource:allocate_resource(InstanceId, client_id, ClientId), @@ -123,6 +130,64 @@ on_start( {error, Reason} end. +on_add_channel( + _InstId, + #{ + installed_channels := InstalledChannels, + acl_info := ACLInfo + } = OldState, + ChannelId, + ChannelConfig +) -> + {ok, ChannelState} = create_channel_state(ChannelConfig, ACLInfo), + NewInstalledChannels = maps:put(ChannelId, ChannelState, InstalledChannels), + %% Update state + NewState = OldState#{installed_channels => NewInstalledChannels}, + {ok, NewState}. + +create_channel_state( + #{parameters := Conf} = _ChannelConfig, + ACLInfo +) -> + #{ + topic := Topic, + sync_timeout := SyncTimeout + } = Conf, + TopicTks = emqx_placeholder:preproc_tmpl(Topic), + ProducerOpts = make_producer_opts(Conf, ACLInfo), + Templates = parse_template(Conf), + State = #{ + topic => Topic, + topic_tokens => TopicTks, + templates => Templates, + sync_timeout => SyncTimeout, + acl_info => ACLInfo, + producers_opts => ProducerOpts + }, + {ok, State}. + +on_remove_channel( + _InstId, + #{ + installed_channels := InstalledChannels + } = OldState, + ChannelId +) -> + NewInstalledChannels = maps:remove(ChannelId, InstalledChannels), + %% Update state + NewState = OldState#{installed_channels => NewInstalledChannels}, + {ok, NewState}. + +on_get_channel_status( + _ResId, + _ChannelId, + _State +) -> + ?status_connected. + +on_get_channels(ResId) -> + emqx_bridge_v2:get_channels_for_connector(ResId). + on_stop(InstanceId, _State) -> ?SLOG(info, #{ msg => "stopping_rocketmq_connector", @@ -144,7 +209,7 @@ on_query(InstanceId, Query, State) -> do_query(InstanceId, Query, send_sync, State). %% We only support batch inserts and all messages must have the same topic -on_batch_query(InstanceId, [{send_message, _Msg} | _] = Query, State) -> +on_batch_query(InstanceId, [{_ChannelId, _Msg} | _] = Query, State) -> do_query(InstanceId, Query, batch_send_sync, State); on_batch_query(_InstanceId, Query, _State) -> {error, {unrecoverable_error, {invalid_request, Query}}}. @@ -154,11 +219,11 @@ on_get_status(_InstanceId, #{client_id := ClientId}) -> {ok, Pid} -> status_result(rocketmq_client:get_status(Pid)); _ -> - connecting + ?status_connecting end. -status_result(_Status = true) -> connected; -status_result(_Status) -> connecting. +status_result(_Status = true) -> ?status_connected; +status_result(_Status) -> ?status_connecting. %%======================================================================================== %% Helper fns @@ -169,11 +234,8 @@ do_query( Query, QueryFunc, #{ - templates := Templates, client_id := ClientId, - topic_tokens := TopicTks, - producers_opts := ProducerOpts, - sync_timeout := RequestTimeout + installed_channels := Channels } = State ) -> ?TRACE( @@ -181,6 +243,13 @@ do_query( "rocketmq_connector_received", #{connector => InstanceId, query => Query, state => State} ), + ChannelId = get_channel_id(Query), + #{ + topic_tokens := TopicTks, + templates := Templates, + sync_timeout := RequestTimeout, + producers_opts := ProducerOpts + } = maps:get(ChannelId, Channels), TopicKey = get_topic_key(Query, TopicTks), Data = apply_template(Query, Templates), @@ -209,6 +278,9 @@ do_query( Result end. +get_channel_id({ChannelId, _}) -> ChannelId; +get_channel_id([{ChannelId, _} | _]) -> ChannelId. + safe_do_produce(InstanceId, QueryFunc, ClientId, TopicKey, Data, ProducerOpts, RequestTimeout) -> try Producers = get_producers(InstanceId, ClientId, TopicKey, ProducerOpts), @@ -275,14 +347,11 @@ is_sensitive_key(_) -> make_producer_opts( #{ - access_key := AccessKey, - secret_key := SecretKey, - security_token := SecurityToken, send_buffer := SendBuff, refresh_interval := RefreshInterval - } + }, + ACLInfo ) -> - ACLInfo = acl_info(AccessKey, SecretKey, SecurityToken), #{ tcp_opts => [{sndbuf, SendBuff}], ref_topic_route_interval => RefreshInterval, diff --git a/apps/emqx_bridge_rocketmq/test/emqx_bridge_rocketmq_SUITE.erl b/apps/emqx_bridge_rocketmq/test/emqx_bridge_rocketmq_SUITE.erl index 1a5133b84..4a0a5a862 100644 --- a/apps/emqx_bridge_rocketmq/test/emqx_bridge_rocketmq_SUITE.erl +++ b/apps/emqx_bridge_rocketmq/test/emqx_bridge_rocketmq_SUITE.erl @@ -196,14 +196,15 @@ create_bridge_http(Params) -> send_message(Config, Payload) -> Name = ?GET_CONFIG(rocketmq_name, Config), BridgeType = ?GET_CONFIG(rocketmq_bridge_type, Config), - BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), - emqx_bridge:send_message(BridgeID, Payload). + ActionId = emqx_bridge_v2:id(BridgeType, Name), + emqx_bridge_v2:query(BridgeType, Name, {ActionId, Payload}, #{}). query_resource(Config, Request) -> Name = ?GET_CONFIG(rocketmq_name, Config), BridgeType = ?GET_CONFIG(rocketmq_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - emqx_resource:query(ResourceID, Request, #{timeout => 500}). + ID = emqx_bridge_v2:id(BridgeType, Name), + ResID = emqx_connector_resource:resource_id(BridgeType, Name), + emqx_resource:query(ID, Request, #{timeout => 500, connector_resource_id => ResID}). %%------------------------------------------------------------------------------ %% Testcases @@ -273,6 +274,7 @@ t_get_status(Config) -> ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + ?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name)), ok. t_simple_query(Config) -> @@ -280,7 +282,10 @@ t_simple_query(Config) -> {ok, _}, create_bridge(Config) ), - Request = {send_message, #{message => <<"Hello">>}}, + Type = ?GET_CONFIG(rocketmq_bridge_type, Config), + Name = ?GET_CONFIG(rocketmq_name, Config), + ActionId = emqx_bridge_v2:id(Type, Name), + Request = {ActionId, #{message => <<"Hello">>}}, Result = query_resource(Config, Request), ?assertEqual(ok, Result), ok. diff --git a/apps/emqx_durable_storage/BSL.txt b/apps/emqx_bridge_s3/BSL.txt similarity index 99% rename from apps/emqx_durable_storage/BSL.txt rename to apps/emqx_bridge_s3/BSL.txt index 2374e6ce2..f0cd31c6f 100644 --- a/apps/emqx_durable_storage/BSL.txt +++ b/apps/emqx_bridge_s3/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-06-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_s3/README.md b/apps/emqx_bridge_s3/README.md new file mode 100644 index 000000000..ac542b468 --- /dev/null +++ b/apps/emqx_bridge_s3/README.md @@ -0,0 +1,16 @@ +# EMQX S3 Bridge + +This application provides connector and action implementations for the EMQX to integrate with Amazon S3 compatible storage services as part of the EMQX data integration pipelines. +Users can leverage [EMQX Rule Engine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) to create rules that publish message data to S3 storage service. + +## Documentation + +Refer to [Rules engine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) for the EMQX rules engine introduction. + +## Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +## License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_s3/docker-ct b/apps/emqx_bridge_s3/docker-ct new file mode 100644 index 000000000..a5a001815 --- /dev/null +++ b/apps/emqx_bridge_s3/docker-ct @@ -0,0 +1,2 @@ +minio +toxiproxy diff --git a/apps/emqx_bridge_s3/rebar.config b/apps/emqx_bridge_s3/rebar.config new file mode 100644 index 000000000..51bf0e0b6 --- /dev/null +++ b/apps/emqx_bridge_s3/rebar.config @@ -0,0 +1,6 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ + {emqx_resource, {path, "../../apps/emqx_resource"}} +]}. diff --git a/apps/emqx_bridge_s3/src/emqx_bridge_s3.app.src b/apps/emqx_bridge_s3/src/emqx_bridge_s3.app.src new file mode 100644 index 000000000..0047b5e51 --- /dev/null +++ b/apps/emqx_bridge_s3/src/emqx_bridge_s3.app.src @@ -0,0 +1,17 @@ +{application, emqx_bridge_s3, [ + {description, "EMQX Enterprise S3 Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + erlcloud, + emqx_resource, + emqx_s3 + ]}, + {env, [ + {emqx_action_info_modules, [emqx_bridge_s3_action_info]} + ]}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_s3/src/emqx_bridge_s3.erl b/apps/emqx_bridge_s3/src/emqx_bridge_s3.erl new file mode 100644 index 000000000..eff5282db --- /dev/null +++ b/apps/emqx_bridge_s3/src/emqx_bridge_s3.erl @@ -0,0 +1,217 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_s3). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include("emqx_bridge_s3.hrl"). + +-behaviour(hocon_schema). +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-export([ + bridge_v2_examples/1, + connector_examples/1 +]). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> + "bridge_s3". + +roots() -> + []. + +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + emqx_connector_schema:api_fields(Field, ?CONNECTOR, fields(s3_connector_config)); +fields(Field) when + Field == "get_bridge_v2"; + Field == "put_bridge_v2"; + Field == "post_bridge_v2" +-> + emqx_bridge_v2_schema:api_fields(Field, ?ACTION, fields(?ACTION)); +fields(action) -> + {?ACTION, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(?MODULE, ?ACTION)), + #{ + desc => <<"S3 Action Config">>, + required => false + } + )}; +fields("config_connector") -> + lists:append([ + emqx_connector_schema:common_fields(), + fields(s3_connector_config), + emqx_connector_schema:resource_opts_ref(?MODULE, s3_connector_resource_opts) + ]); +fields(?ACTION) -> + emqx_bridge_v2_schema:make_producer_action_schema( + hoconsc:mk( + ?R_REF(s3_upload_parameters), + #{ + required => true, + desc => ?DESC(s3_upload) + } + ), + #{ + resource_opts_ref => ?R_REF(s3_action_resource_opts) + } + ); +fields(s3_connector_config) -> + emqx_s3_schema:fields(s3_client); +fields(s3_upload_parameters) -> + emqx_s3_schema:fields(s3_upload) ++ + [ + {content, + hoconsc:mk( + string(), + #{ + required => false, + default => <<"${.}">>, + desc => ?DESC(s3_object_content) + } + )} + ]; +fields(s3_action_resource_opts) -> + UnsupportedOpts = [batch_size, batch_time], + lists:filter( + fun({N, _}) -> not lists:member(N, UnsupportedOpts) end, + emqx_bridge_v2_schema:action_resource_opts_fields() + ); +fields(s3_connector_resource_opts) -> + CommonOpts = emqx_connector_schema:common_resource_opts_subfields(), + lists:filter( + fun({N, _}) -> lists:member(N, CommonOpts) end, + emqx_connector_schema:resource_opts_fields() + ). + +desc("config_connector") -> + ?DESC(config_connector); +desc(?ACTION) -> + ?DESC(s3_upload); +desc(s3_upload) -> + ?DESC(s3_upload); +desc(s3_upload_parameters) -> + ?DESC(s3_upload_parameters); +desc(s3_action_resource_opts) -> + ?DESC(emqx_resource_schema, resource_opts); +desc(s3_connector_resource_opts) -> + ?DESC(emqx_resource_schema, resource_opts); +desc(_Name) -> + undefined. + +%% Examples + +bridge_v2_examples(Method) -> + [ + #{ + <<"s3">> => #{ + summary => <<"S3 Simple Upload">>, + value => action_example(Method) + } + } + ]. + +action_example(post) -> + maps:merge( + action_example(put), + #{ + type => atom_to_binary(?ACTION), + name => <<"my_s3_action">> + } + ); +action_example(get) -> + maps:merge( + action_example(put), + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + } + ); +action_example(put) -> + #{ + enable => true, + connector => <<"my_s3_connector">>, + description => <<"My action">>, + parameters => #{ + bucket => <<"${clientid}">>, + key => <<"${topic}">>, + content => <<"${payload}">>, + acl => <<"public_read">> + }, + resource_opts => #{ + query_mode => <<"sync">>, + inflight_window => 10 + } + }. + +connector_examples(Method) -> + [ + #{ + <<"s3_aws">> => #{ + summary => <<"S3 Connector">>, + value => connector_example(Method) + } + } + ]. + +connector_example(get) -> + maps:merge( + connector_example(put), + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + } + ); +connector_example(post) -> + maps:merge( + connector_example(put), + #{ + type => atom_to_binary(?CONNECTOR), + name => <<"my_s3_connector">> + } + ); +connector_example(put) -> + #{ + enable => true, + description => <<"My S3 connector">>, + host => <<"s3.eu-east-1.amazonaws.com">>, + port => 443, + access_key_id => <<"ACCESS">>, + secret_access_key => <<"SECRET">>, + transport_options => #{ + ssl => #{ + enable => true, + verify => <<"verify_peer">> + }, + connect_timeout => <<"1s">>, + request_timeout => <<"60s">>, + pool_size => 4, + max_retries => 1, + enable_pipelining => 1 + } + }. diff --git a/apps/emqx_bridge_s3/src/emqx_bridge_s3.hrl b/apps/emqx_bridge_s3/src/emqx_bridge_s3.hrl new file mode 100644 index 000000000..6d500d056 --- /dev/null +++ b/apps/emqx_bridge_s3/src/emqx_bridge_s3.hrl @@ -0,0 +1,11 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-ifndef(__EMQX_BRIDGE_S3_HRL__). +-define(__EMQX_BRIDGE_S3_HRL__, true). + +-define(ACTION, s3). +-define(CONNECTOR, s3). + +-endif. diff --git a/apps/emqx_bridge_s3/src/emqx_bridge_s3_action_info.erl b/apps/emqx_bridge_s3/src/emqx_bridge_s3_action_info.erl new file mode 100644 index 000000000..646173bf4 --- /dev/null +++ b/apps/emqx_bridge_s3/src/emqx_bridge_s3_action_info.erl @@ -0,0 +1,19 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_s3_action_info). + +-behaviour(emqx_action_info). + +-export([ + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +action_type_name() -> s3. + +connector_type_name() -> s3. + +schema_module() -> emqx_bridge_s3. diff --git a/apps/emqx_bridge_s3/src/emqx_bridge_s3_connector.erl b/apps/emqx_bridge_s3/src/emqx_bridge_s3_connector.erl new file mode 100644 index 000000000..a072c0464 --- /dev/null +++ b/apps/emqx_bridge_s3/src/emqx_bridge_s3_connector.erl @@ -0,0 +1,224 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_s3_connector). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/trace.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-behaviour(emqx_resource). +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, + on_query/3, + % on_batch_query/3, + on_get_status/2, + on_get_channel_status/3 +]). + +-type config() :: #{ + access_key_id => string(), + secret_access_key => emqx_secret:t(string()), + host := string(), + port := pos_integer(), + transport_options => emqx_s3:transport_options() +}. + +-type channel_config() :: #{ + parameters := #{ + bucket := string(), + key := string(), + content := string(), + acl => emqx_s3:acl() + } +}. + +-type channel_state() :: #{ + bucket := emqx_template:str(), + key := emqx_template:str(), + upload_options => emqx_s3_client:upload_options() +}. + +-type state() :: #{ + pool_name := resource_id(), + pool_pid => pid(), + client_config := emqx_s3_client:config(), + channels := #{channel_id() => channel_state()} +}. + +%% + +-spec callback_mode() -> callback_mode(). +callback_mode() -> + always_sync. + +%% Management + +-spec on_start(_InstanceId :: resource_id(), config()) -> + {ok, state()} | {error, _Reason}. +on_start(InstId, Config) -> + PoolName = InstId, + S3Config = Config#{url_expire_time => 0}, + State = #{ + pool_name => PoolName, + client_config => emqx_s3_profile_conf:client_config(S3Config, PoolName), + channels => #{} + }, + HttpConfig = emqx_s3_profile_conf:http_config(Config), + case ehttpc_sup:start_pool(PoolName, HttpConfig) of + {ok, Pid} -> + ?SLOG(info, #{msg => "s3_connector_start_http_pool_success", pool_name => PoolName}), + {ok, State#{pool_pid => Pid}}; + {error, Reason} = Error -> + ?SLOG(error, #{ + msg => "s3_connector_start_http_pool_fail", + pool_name => PoolName, + http_config => HttpConfig, + reason => Reason + }), + Error + end. + +-spec on_stop(_InstanceId :: resource_id(), state()) -> + ok. +on_stop(InstId, _State = #{pool_name := PoolName}) -> + case ehttpc_sup:stop_pool(PoolName) of + ok -> + ?tp(s3_bridge_stopped, #{instance_id => InstId}), + ok; + {error, Reason} -> + ?SLOG(error, #{ + msg => "s3_connector_http_pool_stop_fail", + pool_name => PoolName, + reason => Reason + }), + ok + end. + +-spec on_get_status(_InstanceId :: resource_id(), state()) -> + health_check_status(). +on_get_status(_InstId, State = #{client_config := Config}) -> + try erlcloud_s3:list_buckets(emqx_s3_client:aws_config(Config)) of + Props when is_list(Props) -> + ?status_connected + catch + error:{aws_error, {http_error, _Code, _, Reason}} -> + {?status_disconnected, State, Reason}; + error:{aws_error, {socket_error, Reason}} -> + {?status_disconnected, State, Reason} + end. + +-spec on_add_channel(_InstanceId :: resource_id(), state(), channel_id(), channel_config()) -> + {ok, state()} | {error, _Reason}. +on_add_channel(_InstId, State = #{channels := Channels}, ChannelId, Config) -> + ChannelState = init_channel_state(Config), + {ok, State#{channels => Channels#{ChannelId => ChannelState}}}. + +-spec on_remove_channel(_InstanceId :: resource_id(), state(), channel_id()) -> + {ok, state()}. +on_remove_channel(_InstId, State = #{channels := Channels}, ChannelId) -> + {ok, State#{channels => maps:remove(ChannelId, Channels)}}. + +-spec on_get_channels(_InstanceId :: resource_id()) -> + [_ChannelConfig]. +on_get_channels(InstId) -> + emqx_bridge_v2:get_channels_for_connector(InstId). + +-spec on_get_channel_status(_InstanceId :: resource_id(), channel_id(), state()) -> + channel_status(). +on_get_channel_status(_InstId, ChannelId, #{channels := Channels}) -> + case maps:get(ChannelId, Channels, undefined) of + _ChannelState = #{} -> + %% TODO + %% Since bucket name may be templated, we can't really provide any + %% additional information regarding the channel health. + ?status_connected; + undefined -> + ?status_disconnected + end. + +init_channel_state(#{parameters := Parameters}) -> + #{ + bucket => emqx_template:parse(maps:get(bucket, Parameters)), + key => emqx_template:parse(maps:get(key, Parameters)), + content => emqx_template:parse(maps:get(content, Parameters)), + upload_options => #{ + acl => maps:get(acl, Parameters, undefined) + } + }. + +%% Queries + +-type query() :: {_Tag :: channel_id(), _Data :: emqx_jsonish:t()}. + +-spec on_query(_InstanceId :: resource_id(), query(), state()) -> + {ok, _Result} | {error, _Reason}. +on_query(InstId, {Tag, Data}, #{client_config := Config, channels := Channels}) -> + case maps:get(Tag, Channels, undefined) of + ChannelState = #{} -> + run_simple_upload(InstId, Data, ChannelState, Config); + undefined -> + {error, {unrecoverable_error, {invalid_message_tag, Tag}}} + end. + +run_simple_upload( + InstId, + Data, + #{ + bucket := BucketTemplate, + key := KeyTemplate, + content := ContentTemplate, + upload_options := UploadOpts + }, + Config +) -> + Bucket = render_bucket(BucketTemplate, Data), + Client = emqx_s3_client:create(Bucket, Config), + Key = render_key(KeyTemplate, Data), + Content = render_content(ContentTemplate, Data), + case emqx_s3_client:put_object(Client, Key, UploadOpts, Content) of + ok -> + ?tp(s3_bridge_connector_upload_ok, #{ + instance_id => InstId, + bucket => Bucket, + key => Key + }), + ok; + {error, Reason} -> + {error, map_error(Reason)} + end. + +map_error({socket_error, _} = Reason) -> + {recoverable_error, Reason}; +map_error(Reason = {aws_error, Status, _, _Body}) when Status >= 500 -> + %% https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + {recoverable_error, Reason}; +map_error(Reason) -> + {unrecoverable_error, Reason}. + +render_bucket(Template, Data) -> + case emqx_template:render(Template, {emqx_jsonish, Data}) of + {Result, []} -> + iolist_to_string(Result); + {_, Errors} -> + erlang:error({unrecoverable_error, {bucket_undefined, Errors}}) + end. + +render_key(Template, Data) -> + %% NOTE: ignoring errors here, missing variables will be rendered as `"undefined"`. + {Result, _Errors} = emqx_template:render(Template, {emqx_jsonish, Data}), + iolist_to_string(Result). + +render_content(Template, Data) -> + %% NOTE: ignoring errors here, missing variables will be rendered as `"undefined"`. + {Result, _Errors} = emqx_template:render(Template, {emqx_jsonish, Data}), + Result. + +iolist_to_string(IOList) -> + unicode:characters_to_list(IOList). diff --git a/apps/emqx_bridge_s3/test/emqx_bridge_s3_SUITE.erl b/apps/emqx_bridge_s3/test/emqx_bridge_s3_SUITE.erl new file mode 100644 index 000000000..da9787911 --- /dev/null +++ b/apps/emqx_bridge_s3/test/emqx_bridge_s3_SUITE.erl @@ -0,0 +1,202 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_s3_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/test_macros.hrl"). + +-import(emqx_utils_conv, [bin/1]). + +%% See `emqx_bridge_s3.hrl`. +-define(BRIDGE_TYPE, <<"s3">>). +-define(CONNECTOR_TYPE, <<"s3">>). + +-define(PROXY_NAME, "minio_tcp"). +-define(CONTENT_TYPE, "application/x-emqx-payload"). + +%% CT Setup + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + _ = emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + Apps = emqx_cth_suite:start( + [ + emqx, + emqx_conf, + emqx_connector, + emqx_bridge_s3, + emqx_bridge, + emqx_rule_engine, + emqx_management, + {emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"} + ], + #{work_dir => emqx_cth_suite:work_dir(Config)} + ), + {ok, _} = emqx_common_test_http:create_default_app(), + [ + {apps, Apps}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {proxy_name, ?PROXY_NAME} + | Config + ]. + +end_per_suite(Config) -> + ok = emqx_cth_suite:stop(?config(apps, Config)). + +%% Testcases + +init_per_testcase(TestCase, Config) -> + ct:timetrap(timer:seconds(30)), + ok = snabbkaffe:start_trace(), + Name = iolist_to_binary(io_lib:format("~s~p", [TestCase, erlang:unique_integer()])), + ConnectorConfig = connector_config(Name, Config), + ActionConfig = action_config(Name, Name), + [ + {connector_type, ?CONNECTOR_TYPE}, + {connector_name, Name}, + {connector_config, ConnectorConfig}, + {bridge_type, ?BRIDGE_TYPE}, + {bridge_name, Name}, + {bridge_config, ActionConfig} + | Config + ]. + +end_per_testcase(_TestCase, _Config) -> + ok = snabbkaffe:stop(), + ok. + +connector_config(Name, _Config) -> + BaseConf = emqx_s3_test_helpers:base_raw_config(tcp), + parse_and_check_config(<<"connectors">>, ?CONNECTOR_TYPE, Name, #{ + <<"enable">> => true, + <<"description">> => <<"S3 Connector">>, + <<"host">> => maps:get(<<"host">>, BaseConf), + <<"port">> => maps:get(<<"port">>, BaseConf), + <<"access_key_id">> => maps:get(<<"access_key_id">>, BaseConf), + <<"secret_access_key">> => maps:get(<<"secret_access_key">>, BaseConf), + <<"transport_options">> => #{ + <<"headers">> => #{ + <<"content-type">> => <> + }, + <<"connect_timeout">> => <<"500ms">>, + <<"request_timeout">> => <<"1s">>, + <<"pool_size">> => 4, + <<"max_retries">> => 0, + <<"enable_pipelining">> => 1 + } + }). + +action_config(Name, ConnectorId) -> + parse_and_check_config(<<"actions">>, ?BRIDGE_TYPE, Name, #{ + <<"enable">> => true, + <<"connector">> => ConnectorId, + <<"parameters">> => #{ + <<"bucket">> => <<"${clientid}">>, + <<"key">> => <<"${topic}">>, + <<"content">> => <<"${payload}">>, + <<"acl">> => <<"public_read">> + }, + <<"resource_opts">> => #{ + <<"buffer_mode">> => <<"memory_only">>, + <<"buffer_seg_bytes">> => <<"10MB">>, + <<"health_check_interval">> => <<"3s">>, + <<"inflight_window">> => 40, + <<"max_buffer_bytes">> => <<"256MB">>, + <<"metrics_flush_interval">> => <<"1s">>, + <<"query_mode">> => <<"sync">>, + <<"request_ttl">> => <<"60s">>, + <<"resume_interval">> => <<"3s">>, + <<"worker_pool_size">> => <<"4">> + } + }). + +parse_and_check_config(Root, Type, Name, ConfigIn) -> + Schema = + case Root of + <<"connectors">> -> emqx_connector_schema; + <<"actions">> -> emqx_bridge_v2_schema + end, + #{Root := #{Type := #{Name := Config}}} = + hocon_tconf:check_plain( + Schema, + #{Root => #{Type => #{Name => ConfigIn}}}, + #{required => false, atom_key => false} + ), + ct:pal("parsed config: ~p", [Config]), + ConfigIn. + +t_start_stop(Config) -> + emqx_bridge_v2_testlib:t_start_stop(Config, s3_bridge_stopped). + +t_create_via_http(Config) -> + emqx_bridge_v2_testlib:t_create_via_http(Config). + +t_on_get_status(Config) -> + emqx_bridge_v2_testlib:t_on_get_status(Config, #{}). + +t_sync_query(Config) -> + Bucket = emqx_s3_test_helpers:unique_bucket(), + Topic = "a/b/c", + Payload = rand:bytes(1024), + AwsConfig = emqx_s3_test_helpers:aws_config(tcp), + ok = erlcloud_s3:create_bucket(Bucket, AwsConfig), + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, + fun() -> mk_message(Bucket, Topic, Payload) end, + fun(Res) -> ?assertMatch(ok, Res) end, + s3_bridge_connector_upload_ok + ), + ?assertMatch( + #{ + content := Payload, + content_type := ?CONTENT_TYPE + }, + maps:from_list(erlcloud_s3:get_object(Bucket, Topic, AwsConfig)) + ). + +t_query_retry_recoverable(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + BridgeName = ?config(bridge_name, Config), + Bucket = emqx_s3_test_helpers:unique_bucket(), + Topic = "d/e/f", + Payload = rand:bytes(1024), + AwsConfig = emqx_s3_test_helpers:aws_config(tcp), + ok = erlcloud_s3:create_bucket(Bucket, AwsConfig), + %% Create a bridge with the sample configuration. + ?assertMatch( + {ok, _Bridge}, + emqx_bridge_v2_testlib:create_bridge(Config) + ), + %% Simulate recoverable failure. + _ = emqx_common_test_helpers:enable_failure(timeout, ?PROXY_NAME, ProxyHost, ProxyPort), + _ = timer:apply_after( + _Timeout = 5000, + emqx_common_test_helpers, + heal_failure, + [timeout, ?PROXY_NAME, ProxyHost, ProxyPort] + ), + Message = mk_message(Bucket, Topic, Payload), + %% Verify that the message is sent eventually. + ok = emqx_bridge_v2:send_message(?BRIDGE_TYPE, BridgeName, Message, #{}), + ?assertMatch( + #{content := Payload}, + maps:from_list(erlcloud_s3:get_object(Bucket, Topic, AwsConfig)) + ). + +mk_message(ClientId, Topic, Payload) -> + Message = emqx_message:make(bin(ClientId), bin(Topic), Payload), + {Event, _} = emqx_rule_events:eventmsg_publish(Message), + Event. diff --git a/apps/emqx_bridge_sqlserver/BSL.txt b/apps/emqx_bridge_sqlserver/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_sqlserver/BSL.txt +++ b/apps/emqx_bridge_sqlserver/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src index 331f9c29f..bddf212e3 100644 --- a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src +++ b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_sqlserver, [ {description, "EMQX Enterprise SQL Server Bridge"}, - {vsn, "0.1.5"}, + {vsn, "0.1.6"}, {registered, []}, {applications, [kernel, stdlib, emqx_resource, odbc]}, {env, []}, diff --git a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl index a87e71e31..e9e77ba6b 100644 --- a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl +++ b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl @@ -24,7 +24,8 @@ %% Hocon config schema exports -export([ roots/0, - fields/1 + fields/1, + namespace/0 ]). %% callbacks for behaviour emqx_resource @@ -132,6 +133,8 @@ %% Configuration and default values %%==================================================================== +namespace() -> sqlserver. + roots() -> [{config, #{type => hoconsc:ref(?MODULE, config)}}]. diff --git a/apps/emqx_bridge_syskeeper/BSL.txt b/apps/emqx_bridge_syskeeper/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_syskeeper/BSL.txt +++ b/apps/emqx_bridge_syskeeper/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_tdengine/BSL.txt b/apps/emqx_bridge_tdengine/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_tdengine/BSL.txt +++ b/apps/emqx_bridge_tdengine/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src index 5375a6ba9..898a3211d 100644 --- a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src +++ b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_tdengine, [ {description, "EMQX Enterprise TDEngine Bridge"}, - {vsn, "0.1.6"}, + {vsn, "0.1.7"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.erl b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.erl index da170e943..3025ff55e 100644 --- a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.erl +++ b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.erl @@ -9,35 +9,21 @@ -import(hoconsc, [mk/2, enum/1, ref/2]). --export([ - conn_bridge_examples/1, - values/1 -]). - --export([ - namespace/0, - roots/0, - fields/1, - desc/1 -]). +-export([conn_bridge_examples/1, values/1, bridge_v2_examples/1]). +-export([namespace/0, roots/0, fields/1, desc/1]). -define(DEFAULT_SQL, << - "insert into t_mqtt_msg(ts, msgid, mqtt_topic, qos, payload, arrived) " - "values (${ts}, '${id}', '${topic}', ${qos}, '${payload}', ${timestamp})" + "insert into t_mqtt_msg(ts, msgid, mqtt_topic, qos, payload, " + "arrived) values (${ts}, '${id}', '${topic}', ${qos}, '${payload}', " + "${timestamp})" >>). +-define(CONNECTOR_TYPE, tdengine). +-define(ACTION_TYPE, ?CONNECTOR_TYPE). %% ------------------------------------------------------------------------------------------------- -%% api - +%% v1 examples conn_bridge_examples(Method) -> - [ - #{ - <<"tdengine">> => #{ - summary => <<"TDengine Bridge">>, - value => values(Method) - } - } - ]. + [#{<<"tdengine">> => #{summary => <<"TDengine Bridge">>, value => values(Method)}}]. values(_Method) -> #{ @@ -51,21 +37,46 @@ values(_Method) -> password => <<"******">>, sql => ?DEFAULT_SQL, local_topic => <<"local/topic/#">>, - resource_opts => #{ - worker_pool_size => 8, - health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, - batch_size => ?DEFAULT_BATCH_SIZE, - batch_time => ?DEFAULT_BATCH_TIME, - query_mode => sync, - max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + resource_opts => + #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => sync, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% v2 examples +bridge_v2_examples(Method) -> + [ + #{ + <<"tdengine">> => #{ + summary => <<"TDengine Action">>, + value => emqx_bridge_v2_schema:action_values( + Method, ?ACTION_TYPE, ?CONNECTOR_TYPE, action_values() + ) + } + } + ]. + +action_values() -> + #{ + parameters => #{ + database => <<"mqtt">>, + sql => ?DEFAULT_SQL } }. %% ------------------------------------------------------------------------------------------------- -%% Hocon Schema Definitions -namespace() -> "bridge_tdengine". +%% v1 Hocon Schema Definitions +namespace() -> + "bridge_tdengine". -roots() -> []. +roots() -> + []. fields("config") -> [ @@ -73,24 +84,68 @@ fields("config") -> {sql, mk( binary(), - #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + #{ + desc => ?DESC("sql_template"), + default => ?DEFAULT_SQL, + format => <<"sql">> + } )}, - {local_topic, - mk( - binary(), - #{desc => ?DESC("local_topic"), default => undefined} - )} - ] ++ emqx_resource_schema:fields("resource_opts") ++ + {local_topic, mk(binary(), #{desc => ?DESC("local_topic"), default => undefined})} + ] ++ + emqx_resource_schema:fields("resource_opts") ++ emqx_bridge_tdengine_connector:fields(config); fields("post") -> [type_field(), name_field() | fields("config")]; fields("put") -> fields("config"); fields("get") -> - emqx_bridge_schema:status_fields() ++ fields("post"). + emqx_bridge_schema:status_fields() ++ fields("post"); +%% ------------------------------------------------------------------------------------------------- +%% v2 Hocon Schema Definitions +fields(action) -> + {tdengine, + mk( + hoconsc:map(name, ref(?MODULE, action_config)), + #{ + desc => <<"TDengine Action Config">>, + required => false + } + )}; +fields(action_config) -> + emqx_bridge_v2_schema:make_producer_action_schema( + mk( + ref(?MODULE, action_parameters), + #{ + required => true, desc => ?DESC("action_parameters") + } + ) + ); +fields(action_parameters) -> + [ + {database, fun emqx_connector_schema_lib:database/1}, + {sql, + mk( + binary(), + #{ + desc => ?DESC("sql_template"), + default => ?DEFAULT_SQL, + format => <<"sql">> + } + )} + ]; +fields("post_bridge_v2") -> + emqx_bridge_schema:type_and_name_fields(enum([tdengine])) ++ fields(action_config); +fields("put_bridge_v2") -> + fields(action_config); +fields("get_bridge_v2") -> + emqx_bridge_schema:status_fields() ++ fields("post_bridge_v2"). desc("config") -> ?DESC("desc_config"); +desc(action_config) -> + ?DESC("desc_config"); +desc(action_parameters) -> + ?DESC("action_parameters"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for TDengine using `", string:to_upper(Method), "` method."]; desc(_) -> diff --git a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_action_info.erl b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_action_info.erl new file mode 100644 index 000000000..e545b8826 --- /dev/null +++ b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_action_info.erl @@ -0,0 +1,28 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023-2024 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_tdengine_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +-define(ACTION_TYPE, tdengine). +-define(SCHEMA_MODULE, emqx_bridge_tdengine). + +bridge_v1_type_name() -> + ?ACTION_TYPE. + +action_type_name() -> + ?ACTION_TYPE. + +connector_type_name() -> + ?ACTION_TYPE. + +schema_module() -> + ?SCHEMA_MODULE. diff --git a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_connector.erl b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_connector.erl index 522007cbc..ce9d84b4f 100644 --- a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_connector.erl +++ b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_connector.erl @@ -11,7 +11,7 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("hocon/include/hoconsc.hrl"). --export([roots/0, fields/1]). +-export([namespace/0, roots/0, fields/1, desc/1]). %% `emqx_resource' API -export([ @@ -20,9 +20,15 @@ on_stop/2, on_query/3, on_batch_query/3, - on_get_status/2 + on_get_status/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, + on_get_channel_status/3 ]). +-export([connector_examples/1]). + -export([connect/1, do_get_status/1, execute/3, do_batch_insert/4]). -import(hoconsc, [mk/2, enum/1, ref/2]). @@ -31,26 +37,61 @@ default_port => 6041 }). +-define(CONNECTOR_TYPE, tdengine). + +namespace() -> "tdengine_connector". + %%===================================================================== -%% Hocon schema +%% V1 Hocon schema roots() -> [{config, #{type => hoconsc:ref(?MODULE, config)}}]. fields(config) -> + base_config(true); +%%===================================================================== +%% V2 Hocon schema + +fields("config_connector") -> + emqx_connector_schema:common_fields() ++ + base_config(false) ++ + emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts); +fields(connector_resource_opts) -> + emqx_connector_schema:resource_opts_fields(); +fields("post") -> + emqx_connector_schema:type_and_name_fields(enum([tdengine])) ++ fields("config_connector"); +fields("put") -> + fields("config_connector"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +base_config(HasDatabase) -> [ {server, server()} - | adjust_fields(emqx_connector_schema_lib:relational_db_fields()) + | adjust_fields(emqx_connector_schema_lib:relational_db_fields(), HasDatabase) ]. -adjust_fields(Fields) -> - lists:map( +desc(config) -> + ?DESC("desc_config"); +desc(connector_resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); +desc("config_connector") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for TDengine using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +adjust_fields(Fields, HasDatabase) -> + lists:filtermap( fun ({username, OrigUsernameFn}) -> - {username, add_default_fn(OrigUsernameFn, <<"root">>)}; + {true, {username, add_default_fn(OrigUsernameFn, <<"root">>)}}; ({password, _}) -> - {password, emqx_connector_schema_lib:password_field(#{required => true})}; - (Field) -> - Field + {true, {password, emqx_connector_schema_lib:password_field(#{required => true})}}; + ({database, _}) -> + HasDatabase; + (_Field) -> + true end, Fields ). @@ -65,6 +106,32 @@ server() -> Meta = #{desc => ?DESC("server")}, emqx_schema:servers_sc(Meta, ?TD_HOST_OPTIONS). +%%===================================================================== +%% V2 Hocon schema +connector_examples(Method) -> + [ + #{ + <<"tdengine">> => + #{ + summary => <<"TDengine Connector">>, + value => emqx_connector_schema:connector_values( + Method, ?CONNECTOR_TYPE, connector_example_values() + ) + } + } + ]. + +connector_example_values() -> + #{ + name => <<"tdengine_connector">>, + type => tdengine, + enable => true, + server => <<"127.0.0.1:6041">>, + pool_size => 8, + username => <<"root">>, + password => <<"******">> + }. + %%======================================================================================== %% `emqx_resource' API %%======================================================================================== @@ -93,11 +160,10 @@ on_start( {username, Username}, {password, Password}, {pool_size, PoolSize}, - {pool, binary_to_atom(InstanceId, utf8)} + {pool, InstanceId} ], - Prepares = parse_prepare_sql(Config), - State = Prepares#{pool_name => InstanceId, query_opts => query_opts(Config)}, + State = #{pool_name => InstanceId, channels => #{}}, case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of ok -> {ok, State}; @@ -110,34 +176,33 @@ on_stop(InstanceId, _State) -> msg => "stopping_tdengine_connector", connector => InstanceId }), + ?tp(tdengine_connector_stop, #{instance_id => InstanceId}), emqx_resource_pool:stop(InstanceId). -on_query(InstanceId, {query, SQL}, State) -> - do_query(InstanceId, SQL, State); -on_query(InstanceId, {Key, Data}, #{insert_tokens := InsertTksMap} = State) -> - case maps:find(Key, InsertTksMap) of - {ok, Tokens} when is_map(Data) -> - SQL = emqx_placeholder:proc_tmpl(Tokens, Data), - do_query(InstanceId, SQL, State); +on_query(InstanceId, {ChannelId, Data}, #{channels := Channels} = State) -> + case maps:find(ChannelId, Channels) of + {ok, #{insert := Tokens, opts := Opts}} -> + Query = emqx_placeholder:proc_tmpl(Tokens, Data), + do_query_job(InstanceId, {?MODULE, execute, [Query, Opts]}, State); _ -> - {error, {unrecoverable_error, invalid_request}} + {error, {unrecoverable_error, {invalid_channel_id, InstanceId}}} end. %% aggregate the batch queries to one SQL is a heavy job, we should put it in the worker process on_batch_query( InstanceId, - [{Key, _Data = #{}} | _] = BatchReq, - #{batch_tokens := BatchTksMap, query_opts := Opts} = State + [{ChannelId, _Data = #{}} | _] = BatchReq, + #{channels := Channels} = State ) -> - case maps:find(Key, BatchTksMap) of - {ok, Tokens} -> + case maps:find(ChannelId, Channels) of + {ok, #{batch := Tokens, opts := Opts}} -> do_query_job( InstanceId, {?MODULE, do_batch_insert, [Tokens, BatchReq, Opts]}, State ); _ -> - {error, {unrecoverable_error, batch_prepare_not_implemented}} + {error, {unrecoverable_error, {invalid_channel_id, InstanceId}}} end; on_batch_query(InstanceId, BatchReq, State) -> LogMeta = #{connector => InstanceId, request => BatchReq, state => State}, @@ -157,13 +222,46 @@ do_get_status(Conn) -> status_result(_Status = true) -> connected; status_result(_Status = false) -> connecting. +on_add_channel( + _InstanceId, + #{channels := Channels} = OldState, + ChannelId, + #{ + parameters := #{database := Database, sql := SQL} + } +) -> + case maps:is_key(ChannelId, Channels) of + true -> + {error, already_exists}; + _ -> + case parse_prepare_sql(SQL) of + {ok, Result} -> + Opts = [{db_name, Database}], + Channels2 = Channels#{ChannelId => Result#{opts => Opts}}, + {ok, OldState#{channels := Channels2}}; + Error -> + Error + end + end. + +on_remove_channel(_InstanceId, #{channels := Channels} = OldState, ChannelId) -> + {ok, OldState#{channels => maps:remove(ChannelId, Channels)}}. + +on_get_channels(InstanceId) -> + emqx_bridge_v2:get_channels_for_connector(InstanceId). + +on_get_channel_status(InstanceId, ChannelId, #{channels := Channels} = State) -> + case maps:is_key(ChannelId, Channels) of + true -> + on_get_status(InstanceId, State); + _ -> + {error, not_exists} + end. + %%======================================================================================== %% Helper fns %%======================================================================================== -do_query(InstanceId, Query, #{query_opts := Opts} = State) -> - do_query_job(InstanceId, {?MODULE, execute, [Query, Opts]}, State). - do_query_job(InstanceId, Job, #{pool_name := PoolName} = State) -> ?TRACE( "QUERY", @@ -171,12 +269,11 @@ do_query_job(InstanceId, Job, #{pool_name := PoolName} = State) -> #{connector => InstanceId, job => Job, state => State} ), Result = ecpool:pick_and_do(PoolName, Job, no_handover), - case Result of {error, Reason} -> ?tp( tdengine_connector_query_return, - #{error => Reason} + #{instance_id => InstanceId, error => Reason} ), ?SLOG(error, #{ msg => "tdengine_connector_do_query_failed", @@ -193,7 +290,7 @@ do_query_job(InstanceId, Job, #{pool_name := PoolName} = State) -> _ -> ?tp( tdengine_connector_query_return, - #{result => Result} + #{instance_id => InstanceId, result => Result} ), Result end. @@ -221,49 +318,23 @@ connect(Opts) -> NOpts = [{password, emqx_secret:unwrap(Secret)} | OptsRest], tdengine:start_link(NOpts). -query_opts(#{database := Database} = _Opts) -> - [{db_name, Database}]. - -parse_prepare_sql(Config) -> - SQL = - case maps:get(sql, Config, undefined) of - undefined -> #{}; - Template -> #{send_message => Template} - end, - - parse_batch_prepare_sql(maps:to_list(SQL), #{}, #{}). - -parse_batch_prepare_sql([{Key, H} | T], InsertTksMap, BatchTksMap) -> - case emqx_utils_sql:get_statement_type(H) of - select -> - parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap); +parse_prepare_sql(SQL) -> + case emqx_utils_sql:get_statement_type(SQL) of insert -> - InsertTks = emqx_placeholder:preproc_tmpl(H), - H1 = string:trim(H, trailing, ";"), - case split_insert_sql(H1) of + InsertTks = emqx_placeholder:preproc_tmpl(SQL), + SQL1 = string:trim(SQL, trailing, ";"), + case split_insert_sql(SQL1) of [_InsertPart, BatchDesc] -> BatchTks = emqx_placeholder:preproc_tmpl(BatchDesc), - parse_batch_prepare_sql( - T, - InsertTksMap#{Key => InsertTks}, - BatchTksMap#{Key => BatchTks} - ); + {ok, #{insert => InsertTks, batch => BatchTks}}; Result -> - ?SLOG(error, #{msg => "split_sql_failed", sql => H, result => Result}), - parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap) + {error, #{msg => "split_sql_failed", sql => SQL, result => Result}} end; Type when is_atom(Type) -> - ?SLOG(error, #{msg => "detect_sql_type_unsupported", sql => H, type => Type}), - parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap); + {error, #{msg => "detect_sql_type_unsupported", sql => SQL, type => Type}}; {error, Reason} -> - ?SLOG(error, #{msg => "detect_sql_type_failed", sql => H, reason => Reason}), - parse_batch_prepare_sql(T, InsertTksMap, BatchTksMap) - end; -parse_batch_prepare_sql([], InsertTksMap, BatchTksMap) -> - #{ - insert_tokens => InsertTksMap, - batch_tokens => BatchTksMap - }. + {error, #{msg => "detect_sql_type_failed", sql => SQL, reason => Reason}} + end. to_bin(List) when is_list(List) -> unicode:characters_to_binary(List, utf8). diff --git a/apps/emqx_bridge_tdengine/test/emqx_bridge_tdengine_SUITE.erl b/apps/emqx_bridge_tdengine/test/emqx_bridge_tdengine_SUITE.erl index 92ad3a611..511518bda 100644 --- a/apps/emqx_bridge_tdengine/test/emqx_bridge_tdengine_SUITE.erl +++ b/apps/emqx_bridge_tdengine/test/emqx_bridge_tdengine_SUITE.erl @@ -54,6 +54,11 @@ ok = tdengine:stop(Con) ). +-define(BRIDGE_TYPE_BIN, <<"tdengine">>). +-define(APPS, [ + hackney, tdengine, emqx_bridge, emqx_resource, emqx_rule_engine, emqx_bridge_tdengine +]). + %%------------------------------------------------------------------------------ %% CT boilerplate %%------------------------------------------------------------------------------ @@ -66,16 +71,21 @@ all() -> groups() -> TCs = emqx_common_test_helpers:all(?MODULE), - NonBatchCases = [t_write_timeout], MustBatchCases = [t_batch_insert, t_auto_create_batch_insert], BatchingGroups = [{group, with_batch}, {group, without_batch}], [ {async, BatchingGroups}, {sync, BatchingGroups}, - {with_batch, TCs -- NonBatchCases}, + {with_batch, TCs}, {without_batch, TCs -- MustBatchCases} ]. +init_per_suite(Config) -> + emqx_bridge_v2_testlib:init_per_suite(Config, ?APPS). + +end_per_suite(Config) -> + emqx_bridge_v2_testlib:end_per_suite(Config). + init_per_group(async, Config) -> [{query_mode, async} | Config]; init_per_group(sync, Config) -> @@ -89,36 +99,37 @@ init_per_group(without_batch, Config0) -> init_per_group(_Group, Config) -> Config. -end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> - connect_and_drop_table(Config), - ProxyHost = ?config(proxy_host, Config), - ProxyPort = ?config(proxy_port, Config), - emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), +end_per_group(default, Config) -> + emqx_bridge_v2_testlib:end_per_group(Config), ok; end_per_group(_Group, _Config) -> ok. -init_per_suite(Config) -> +init_per_testcase(TestCase, Config0) -> + connect_and_clear_table(Config0), + Type = ?config(bridge_type, Config0), + UniqueNum = integer_to_binary(erlang:unique_integer()), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + {_ConfigString, ConnectorConfig} = connector_config(Name, Config0), + {_, ActionConfig} = action_config(TestCase, Name, Config0), + Config = [ + {connector_type, Type}, + {connector_name, Name}, + {connector_config, ConnectorConfig}, + {bridge_type, Type}, + {bridge_name, Name}, + {bridge_config, ActionConfig} + | Config0 + ], + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), + ok = snabbkaffe:start_trace(), Config. -end_per_suite(_Config) -> - emqx_mgmt_api_test_util:end_suite(), - ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), - ok. - -init_per_testcase(_Testcase, Config) -> +end_per_testcase(TestCase, Config) -> + emqx_bridge_v2_testlib:end_per_testcase(TestCase, Config), connect_and_clear_table(Config), - delete_bridge(Config), - snabbkaffe:start_trace(), - Config. - -end_per_testcase(_Testcase, Config) -> - ProxyHost = ?config(proxy_host, Config), - ProxyPort = ?config(proxy_port, Config), - emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), - connect_and_clear_table(Config), - ok = snabbkaffe:stop(), - delete_bridge(Config), ok. %%------------------------------------------------------------------------------ @@ -132,34 +143,14 @@ common_init(ConfigT) -> Config0 = [ {td_host, Host}, {td_port, Port}, - {proxy_name, "tdengine_restful"}, - {template, ?SQL_BRIDGE} + {proxy_name, "tdengine_restful"} | ConfigT ], - BridgeType = proplists:get_value(bridge_type, Config0, <<"tdengine">>), case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of true -> - % Setup toxiproxy - ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), - ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), - emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), - % Ensure enterprise bridge module is loaded - ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge, tdengine]), - _ = emqx_bridge_enterprise:module_info(), - emqx_mgmt_api_test_util:init_suite(), - % Connect to tdengine directly and create the table - connect_and_create_table(Config0), - {Name, TDConf} = tdengine_config(BridgeType, Config0), - Config = - [ - {tdengine_config, TDConf}, - {tdengine_bridge_type, BridgeType}, - {tdengine_name, Name}, - {proxy_host, ProxyHost}, - {proxy_port, ProxyPort} - | Config0 - ], + Config = emqx_bridge_v2_testlib:init_per_group(default, ?BRIDGE_TYPE_BIN, Config0), + connect_and_create_table(Config), Config; false -> case os:getenv("IS_CI") of @@ -170,97 +161,100 @@ common_init(ConfigT) -> end end. -tdengine_config(BridgeType, Config) -> - Port = integer_to_list(?config(td_port, Config)), - Server = ?config(td_host, Config) ++ ":" ++ Port, - Name = atom_to_binary(?MODULE), +action_config(TestCase, Name, Config) -> + Type = ?config(bridge_type, Config), BatchSize = case ?config(enable_batch, Config) of true -> ?BATCH_SIZE; false -> 1 end, QueryMode = ?config(query_mode, Config), - Template = ?config(template, Config), ConfigString = io_lib:format( - "bridges.~s.~s {\n" + "actions.~s.~s {\n" " enable = true\n" - " server = ~p\n" - " database = ~p\n" - " username = ~p\n" - " password = ~p\n" - " sql = ~p\n" + " connector = \"~s\"\n" + " parameters = {\n" + " database = ~p\n" + " sql = ~p\n" + " }\n" " resource_opts = {\n" " request_ttl = 500ms\n" " batch_size = ~b\n" " query_mode = ~s\n" " }\n" - "}", + "}\n", [ - BridgeType, + Type, + Name, Name, - Server, ?TD_DATABASE, - ?TD_USERNAME, - ?TD_PASSWORD, - Template, + case TestCase of + Auto when + Auto =:= t_auto_create_simple_insert; Auto =:= t_auto_create_batch_insert + -> + ?AUTO_CREATE_BRIDGE; + _ -> + ?SQL_BRIDGE + end, BatchSize, QueryMode ] ), - {Name, parse_and_check(ConfigString, BridgeType, Name)}. + ct:pal("ActionConfig:~ts~n", [ConfigString]), + {ConfigString, parse_action_and_check(ConfigString, Type, Name)}. -parse_and_check(ConfigString, BridgeType, Name) -> +connector_config(Name, Config) -> + Host = ?config(td_host, Config), + Port = ?config(td_port, Config), + Type = ?config(bridge_type, Config), + Server = Host ++ ":" ++ integer_to_list(Port), + ConfigString = + io_lib:format( + "connectors.~s.~s {\n" + " enable = true\n" + " server = \"~s\"\n" + " username = ~p\n" + " password = ~p\n" + "}\n", + [ + Type, + Name, + Server, + ?TD_USERNAME, + ?TD_PASSWORD + ] + ), + ct:pal("ConnectorConfig:~ts~n", [ConfigString]), + {ConfigString, parse_connector_and_check(ConfigString, Type, Name)}. + +parse_action_and_check(ConfigString, BridgeType, Name) -> + parse_and_check(ConfigString, emqx_bridge_schema, <<"actions">>, BridgeType, Name). + +parse_connector_and_check(ConfigString, ConnectorType, Name) -> + parse_and_check( + ConfigString, emqx_connector_schema, <<"connectors">>, ConnectorType, Name + ). + +parse_and_check(ConfigString, SchemaMod, RootKey, Type0, Name) -> + Type = to_bin(Type0), {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), - hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), - #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + hocon_tconf:check_plain(SchemaMod, RawConf, #{required => false, atom_key => false}), + #{RootKey := #{Type := #{Name := Config}}} = RawConf, Config. -create_bridge(Config) -> - create_bridge(Config, _Overrides = #{}). - -create_bridge(Config, Overrides) -> - BridgeType = ?config(tdengine_bridge_type, Config), - Name = ?config(tdengine_name, Config), - TDConfig0 = ?config(tdengine_config, Config), - TDConfig = emqx_utils_maps:deep_merge(TDConfig0, Overrides), - emqx_bridge:create(BridgeType, Name, TDConfig). - -delete_bridge(Config) -> - BridgeType = ?config(tdengine_bridge_type, Config), - Name = ?config(tdengine_name, Config), - emqx_bridge:remove(BridgeType, Name). - -create_bridge_http(Params) -> - Path = emqx_mgmt_api_test_util:api_path(["bridges"]), - AuthHeader = emqx_mgmt_api_test_util:auth_header_(), - case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of - {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; - Error -> Error - end. +to_bin(List) when is_list(List) -> + unicode:characters_to_binary(List, utf8); +to_bin(Atom) when is_atom(Atom) -> + erlang:atom_to_binary(Atom); +to_bin(Bin) when is_binary(Bin) -> + Bin. send_message(Config, Payload) -> - Name = ?config(tdengine_name, Config), - BridgeType = ?config(tdengine_bridge_type, Config), - BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), - emqx_bridge:send_message(BridgeID, Payload). - -query_resource(Config, Request) -> - Name = ?config(tdengine_name, Config), - BridgeType = ?config(tdengine_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - emqx_resource:query(ResourceID, Request, #{timeout => 1_000}). - -query_resource_async(Config, Request) -> - Name = ?config(tdengine_name, Config), - BridgeType = ?config(tdengine_bridge_type, Config), - Ref = alias([reply]), - AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end, - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - Return = emqx_resource:query(ResourceID, Request, #{ - timeout => 500, async_reply_fun => {AsyncReplyFun, []} - }), - {Return, Ref}. + BridgeType = ?config(bridge_type, Config), + Name = ?config(bridge_name, Config), + ct:print(">>> Name:~p~n BridgeType:~p~n", [Name, BridgeType]), + emqx_bridge_v2:send_message(BridgeType, Name, Payload, #{}). receive_result(Ref, Timeout) -> receive @@ -287,17 +281,13 @@ connect_direct_tdengine(Config) -> % These funs connect and then stop the tdengine connection connect_and_create_table(Config) -> ?WITH_CON(begin + _ = directly_query(Con, ?SQL_DROP_TABLE), + _ = directly_query(Con, ?SQL_DROP_STABLE), {ok, _} = directly_query(Con, ?SQL_CREATE_DATABASE, []), {ok, _} = directly_query(Con, ?SQL_CREATE_TABLE), {ok, _} = directly_query(Con, ?SQL_CREATE_STABLE) end). -connect_and_drop_table(Config) -> - ?WITH_CON(begin - {ok, _} = directly_query(Con, ?SQL_DROP_TABLE), - {ok, _} = directly_query(Con, ?SQL_DROP_STABLE) - end). - connect_and_clear_table(Config) -> ?WITH_CON({ok, _} = directly_query(Con, ?SQL_DELETE)). @@ -322,275 +312,53 @@ directly_query(Con, Query) -> directly_query(Con, Query, QueryOpts) -> tdengine:insert(Con, Query, QueryOpts). +is_success_check(Result) -> + ?assertMatch({ok, #{<<"code">> := 0}}, Result). + +to_str(Atom) when is_atom(Atom) -> + erlang:atom_to_list(Atom). + %%------------------------------------------------------------------------------ %% Testcases %%------------------------------------------------------------------------------ -t_setup_via_config_and_publish(Config) -> - ?assertMatch( - {ok, _}, - create_bridge(Config) - ), - SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000, second_ts => 1668602148010}, - ?check_trace( - begin - {_, {ok, #{result := Result}}} = - ?wait_async_action( - send_message(Config, SentData), - #{?snk_kind := buffer_worker_flush_ack}, - 2_000 - ), - ?assertMatch( - {ok, #{<<"code">> := 0, <<"rows">> := 1}}, Result - ), - ?assertMatch( - [[?PAYLOAD], [?PAYLOAD]], - connect_and_get_payload(Config) - ), - ok - end, - fun(Trace0) -> - Trace = ?of_kind(tdengine_connector_query_return, Trace0), - ?assertMatch([#{result := {ok, #{<<"code">> := 0, <<"rows">> := 1}}}], Trace), - ok - end - ), - ok. +t_create_via_http(Config) -> + emqx_bridge_v2_testlib:t_create_via_http(Config). -t_setup_via_http_api_and_publish(Config) -> - BridgeType = ?config(tdengine_bridge_type, Config), - Name = ?config(tdengine_name, Config), - QueryMode = ?config(query_mode, Config), - TDengineConfig0 = ?config(tdengine_config, Config), - TDengineConfig = TDengineConfig0#{ - <<"name">> => Name, - <<"type">> => BridgeType - }, - ?assertMatch( - {ok, _}, - create_bridge_http(TDengineConfig) - ), +t_on_get_status(Config) -> + emqx_bridge_v2_testlib:t_on_get_status(Config, #{failure_status => connecting}). - SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000, second_ts => 1668602148010}, - ?check_trace( - begin - Request = {send_message, SentData}, - Res0 = - case QueryMode of - sync -> - query_resource(Config, Request); - async -> - {_, Ref} = query_resource_async(Config, Request), - {ok, Res} = receive_result(Ref, 2_000), - Res - end, +t_start_stop(Config) -> + emqx_bridge_v2_testlib:t_start_stop(Config, tdengine_connector_stop). - ?assertMatch( - {ok, #{<<"code">> := 0, <<"rows">> := 1}}, Res0 - ), - ?assertMatch( - [[?PAYLOAD], [?PAYLOAD]], - connect_and_get_payload(Config) - ), - ok - end, - fun(Trace0) -> - Trace = ?of_kind(tdengine_connector_query_return, Trace0), - ?assertMatch([#{result := {ok, #{<<"code">> := 0, <<"rows">> := 1}}}], Trace), - ok - end - ), - ok. - -t_get_status(Config) -> - ?assertMatch( - {ok, _}, - create_bridge(Config) - ), - ProxyPort = ?config(proxy_port, Config), - ProxyHost = ?config(proxy_host, Config), - ProxyName = ?config(proxy_name, Config), - - Name = ?config(tdengine_name, Config), - BridgeType = ?config(tdengine_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), - emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> +t_invalid_data(Config) -> + MakeMessageFun = fun() -> #{} end, + IsSuccessCheck = fun(Result) -> ?assertMatch( - {ok, Status} when Status =:= disconnected orelse Status =:= connecting, - emqx_resource_manager:health_check(ResourceID) + {error, #{ + <<"code">> := 534, + <<"desc">> := _ + }}, + Result ) - end), - ok. - -t_write_failure(Config) -> - ProxyName = ?config(proxy_name, Config), - ProxyPort = ?config(proxy_port, Config), - ProxyHost = ?config(proxy_host, Config), - {ok, _} = create_bridge(Config), - SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000, second_ts => 1668602148010}, - emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> - {_, {ok, #{result := Result}}} = - ?wait_async_action( - send_message(Config, SentData), - #{?snk_kind := buffer_worker_flush_ack}, - 2_000 - ), - case Result of - {error, Reason} when Reason =:= econnrefused; Reason =:= closed -> - ok; - _ -> - throw({unexpected, Result}) - end, - ok - end), - ok. - -% This test doesn't work with batch enabled since it is not possible -% to set the timeout directly for batch queries -t_write_timeout(Config) -> - ProxyName = ?config(proxy_name, Config), - ProxyPort = ?config(proxy_port, Config), - ProxyHost = ?config(proxy_host, Config), - QueryMode = ?config(query_mode, Config), - {ok, _} = create_bridge( - Config, - #{ - <<"resource_opts">> => #{ - <<"request_ttl">> => <<"500ms">>, - <<"resume_interval">> => <<"100ms">>, - <<"health_check_interval">> => <<"100ms">> - } - } - ), - SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000, second_ts => 1668602148010}, - %% FIXME: TDengine connector hangs indefinetily during - %% `call_query' while the connection is unresponsive. Should add - %% a timeout to `APPLY_RESOURCE' in buffer worker?? - case QueryMode of - sync -> - emqx_common_test_helpers:with_failure( - timeout, ProxyName, ProxyHost, ProxyPort, fun() -> - ?assertMatch( - {error, {resource_error, #{reason := timeout}}}, - query_resource(Config, {send_message, SentData}) - ) - end - ); - async -> - ct:comment("tdengine connector hangs the buffer worker forever") end, - ok. - -t_simple_sql_query(Config) -> - EnableBatch = ?config(enable_batch, Config), - ?assertMatch( - {ok, _}, - create_bridge(Config) + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, MakeMessageFun, IsSuccessCheck, tdengine_connector_query_return ), - Request = {query, <<"SELECT 1 AS T">>}, - {_, {ok, #{result := Result}}} = - ?wait_async_action( - query_resource(Config, Request), - #{?snk_kind := buffer_worker_flush_ack}, - 2_000 - ), - case EnableBatch of - true -> - ?assertEqual({error, {unrecoverable_error, invalid_request}}, Result); - false -> - ?assertMatch({ok, #{<<"code">> := 0, <<"data">> := [[1]]}}, Result) - end, + ok. -t_missing_data(Config) -> - ?assertMatch( - {ok, _}, - create_bridge(Config) - ), - {_, {ok, #{result := Result}}} = - ?wait_async_action( - send_message(Config, #{}), - #{?snk_kind := buffer_worker_flush_ack}, - 2_000 - ), - ?assertMatch( - {error, #{ - <<"code">> := 534, - <<"desc">> := _ - }}, - Result - ), - ok. - -t_bad_sql_parameter(Config) -> - ?assertMatch( - {ok, _}, - create_bridge(Config) - ), - Request = {send_message, <<"">>}, - {_, {ok, #{result := Result}}} = - ?wait_async_action( - query_resource(Config, Request), - #{?snk_kind := buffer_worker_flush_ack}, - 2_000 - ), - - ?assertMatch({error, {unrecoverable_error, invalid_request}}, Result), - ok. - -%% TODO -%% For supporting to generate a subtable name by mixing prefixes/suffixes with placeholders, -%% the SQL quote(escape) is removed now, -%% we should introduce a new syntax for placeholders to allow some vars to keep unquote. -%% t_nasty_sql_string(Config) -> -%% ?assertMatch( -%% {ok, _}, -%% create_bridge(Config) -%% ), -%% % NOTE -%% % Column `payload` has BINARY type, so we would certainly like to test it -%% % with `lists:seq(1, 127)`, but: -%% % 1. There's no way to insert zero byte in an SQL string, seems that TDengine's -%% % parser[1] has no escaping sequence for it so a zero byte probably confuses -%% % interpreter somewhere down the line. -%% % 2. Bytes > 127 come back as U+FFFDs (i.e. replacement characters) in UTF-8 for -%% % some reason. -%% % -%% % [1]: https://github.com/taosdata/TDengine/blob/066cb34a/source/libs/parser/src/parUtil.c#L279-L301 -%% Payload = list_to_binary(lists:seq(1, 127)), -%% Message = #{payload => Payload, timestamp => erlang:system_time(millisecond)}, -%% {_, {ok, #{result := Result}}} = -%% ?wait_async_action( -%% send_message(Config, Message), -%% #{?snk_kind := buffer_worker_flush_ack}, -%% 2_000 -%% ), -%% ?assertMatch( -%% {ok, #{<<"code">> := 0, <<"rows">> := 1}}, -%% Result -%% ), -%% ?assertEqual( -%% Payload, -%% connect_and_get_payload(Config) -%% ). - t_simple_insert(Config) -> connect_and_clear_table(Config), - ?assertMatch( - {ok, _}, - create_bridge(Config) + + MakeMessageFun = fun() -> + #{payload => ?PAYLOAD, timestamp => 1668602148000, second_ts => 1668602148010} + end, + + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, MakeMessageFun, fun is_success_check/1, tdengine_connector_query_return ), - SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000, second_ts => 1668602148010}, - Request = {send_message, SentData}, - {_, {ok, #{result := _Result}}} = - ?wait_async_action( - query_resource(Config, Request), - #{?snk_kind := buffer_worker_flush_ack}, - 2_000 - ), ?assertMatch( [[?PAYLOAD], [?PAYLOAD]], connect_and_get_payload(Config) @@ -598,10 +366,7 @@ t_simple_insert(Config) -> t_batch_insert(Config) -> connect_and_clear_table(Config), - ?assertMatch( - {ok, _}, - create_bridge(Config) - ), + ?assertMatch({ok, _}, emqx_bridge_v2_testlib:create_bridge(Config)), Size = 5, Ts = erlang:system_time(millisecond), @@ -612,8 +377,7 @@ t_batch_insert(Config) -> SentData = #{ payload => ?PAYLOAD, timestamp => Ts + Idx, second_ts => Ts + Idx + 5000 }, - Request = {send_message, SentData}, - query_resource(Config, Request) + send_message(Config, SentData) end, lists:seq(1, Size) ), @@ -632,27 +396,22 @@ t_batch_insert(Config) -> ) ). -t_auto_create_simple_insert(Config0) -> +t_auto_create_simple_insert(Config) -> ClientId = to_str(?FUNCTION_NAME), - Config = get_auto_create_config(Config0), - ?assertMatch( - {ok, _}, - create_bridge(Config) + + MakeMessageFun = fun() -> + #{ + payload => ?PAYLOAD, + timestamp => 1668602148000, + second_ts => 1668602148000 + 100, + clientid => ClientId + } + end, + + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, MakeMessageFun, fun is_success_check/1, tdengine_connector_query_return ), - SentData = #{ - payload => ?PAYLOAD, - timestamp => 1668602148000, - second_ts => 1668602148000 + 100, - clientid => ClientId - }, - Request = {send_message, SentData}, - {_, {ok, #{result := _Result}}} = - ?wait_async_action( - query_resource(Config, Request), - #{?snk_kind := buffer_worker_flush_ack}, - 2_000 - ), ?assertMatch( [[?PAYLOAD]], connect_and_query(Config, "SELECT payload FROM " ++ ClientId) @@ -673,15 +432,10 @@ t_auto_create_simple_insert(Config0) -> connect_and_query(Config, "DROP TABLE test_" ++ ClientId) ). -t_auto_create_batch_insert(Config0) -> +t_auto_create_batch_insert(Config) -> ClientId1 = "client1", ClientId2 = "client2", - Config = get_auto_create_config(Config0), - - ?assertMatch( - {ok, _}, - create_bridge(Config) - ), + ?assertMatch({ok, _}, emqx_bridge_v2_testlib:create_bridge(Config)), Size1 = 2, Size2 = 3, @@ -699,8 +453,7 @@ t_auto_create_batch_insert(Config0) -> second_ts => Ts + Idx + Offset + 5000, clientid => ClientId }, - Request = {send_message, SentData}, - query_resource(Config, Request) + send_message(Config, SentData) end, lists:seq(1, Size) ) @@ -738,17 +491,3 @@ t_auto_create_batch_insert(Config0) -> end, [ClientId1, ClientId2, "test_" ++ ClientId1, "test_" ++ ClientId2] ). - -to_bin(List) when is_list(List) -> - unicode:characters_to_binary(List, utf8); -to_bin(Bin) when is_binary(Bin) -> - Bin. - -to_str(Atom) when is_atom(Atom) -> - erlang:atom_to_list(Atom). - -get_auto_create_config(Config0) -> - Config = lists:keyreplace(template, 1, Config0, {template, ?AUTO_CREATE_BRIDGE}), - BridgeType = proplists:get_value(bridge_type, Config, <<"tdengine">>), - {_Name, TDConf} = tdengine_config(BridgeType, Config), - lists:keyreplace(tdengine_config, 1, Config, {tdengine_config, TDConf}). diff --git a/apps/emqx_bridge_timescale/BSL.txt b/apps/emqx_bridge_timescale/BSL.txt index 0acc0e696..f0cd31c6f 100644 --- a/apps/emqx_bridge_timescale/BSL.txt +++ b/apps/emqx_bridge_timescale/BSL.txt @@ -7,7 +7,7 @@ Licensed Work: EMQX Enterprise Edition Additional Use Grant: Students and educators are granted right to copy, modify, and create derivative work for research or education. -Change Date: 2027-02-01 +Change Date: 2028-01-26 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, diff --git a/apps/emqx_conf/src/emqx_cluster_rpc.erl b/apps/emqx_conf/src/emqx_cluster_rpc.erl index 8ad30dc74..21bf96806 100644 --- a/apps/emqx_conf/src/emqx_cluster_rpc.erl +++ b/apps/emqx_conf/src/emqx_cluster_rpc.erl @@ -17,7 +17,7 @@ -behaviour(gen_server). %% API --export([start_link/0, mnesia/1]). +-export([start_link/0, create_tables/0]). %% Note: multicall functions are statically checked by %% `emqx_bapi_trans' and `emqx_bpapi_static_checks' modules. Don't @@ -65,8 +65,6 @@ -export_type([tnx_id/0, succeed_num/0]). --boot_mnesia({mnesia, [boot]}). - -include_lib("emqx/include/logger.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include("emqx_conf.hrl"). @@ -99,7 +97,8 @@ %%%=================================================================== %%% API %%%=================================================================== -mnesia(boot) -> + +create_tables() -> ok = mria:create_table(?CLUSTER_MFA, [ {type, ordered_set}, {rlog_shard, ?CLUSTER_RPC_SHARD}, @@ -113,7 +112,11 @@ mnesia(boot) -> {storage, disc_copies}, {record_name, cluster_rpc_commit}, {attributes, record_info(fields, cluster_rpc_commit)} - ]). + ]), + [ + ?CLUSTER_MFA, + ?CLUSTER_COMMIT + ]. start_link() -> start_link(node(), ?MODULE, get_retry_ms()). diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index 25a936a7a..dedb0c3c6 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,9 +1,9 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.34"}, + {vsn, "0.1.35"}, {registered, []}, {mod, {emqx_conf_app, []}}, - {applications, [kernel, stdlib, emqx_ctl]}, + {applications, [kernel, stdlib]}, {env, []}, {modules, []} ]}. diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index 0a8339ddd..0df5711e0 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -158,7 +158,6 @@ dump_schema(Dir, SchemaModule) -> ok = emqx_dashboard_desc_cache:init(), lists:foreach( fun(Lang) -> - ok = gen_config_md(Dir, SchemaModule, Lang), ok = gen_schema_json(Dir, SchemaModule, Lang) end, ["en", "zh"] @@ -468,14 +467,6 @@ bridge_schema_json() -> SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => Version}, gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo). -%% TODO: remove it and also remove hocon_md.erl and friends. -%% markdown generation from schema is a failure and we are moving to an interactive -%% viewer like swagger UI. -gen_config_md(Dir, SchemaModule, Lang) -> - SchemaMdFile = filename:join([Dir, "config-" ++ Lang ++ ".md"]), - io:format(user, "===< Generating: ~s~n", [SchemaMdFile]), - ok = gen_doc(SchemaMdFile, SchemaModule, Lang). - %% @doc return the root schema module. -spec schema_module() -> module(). schema_module() -> @@ -515,19 +506,6 @@ make_desc_resolver(Lang) -> unicode:characters_to_binary(Desc) end. --spec gen_doc(file:name_all(), module(), string()) -> ok. -gen_doc(File, SchemaModule, Lang) -> - Version = emqx_release:version(), - Title = - "# " ++ emqx_release:description() ++ " Configuration\n\n" ++ - "", - BodyFile = filename:join([rel, "emqx_conf.template." ++ Lang ++ ".md"]), - {ok, Body} = file:read_file(BodyFile), - Resolver = make_desc_resolver(Lang), - Opts = #{title => Title, body => Body, desc_resolver => Resolver}, - Doc = hocon_schema_md:gen(SchemaModule, Opts), - file:write_file(File, Doc). - gen_api_schema_json_iodata(SchemaMod, SchemaInfo) -> emqx_dashboard_swagger:gen_api_schema_json_iodata( SchemaMod, diff --git a/apps/emqx_conf/src/emqx_conf_app.erl b/apps/emqx_conf/src/emqx_conf_app.erl index 74a7a8f2e..654a6bfe4 100644 --- a/apps/emqx_conf/src/emqx_conf_app.erl +++ b/apps/emqx_conf/src/emqx_conf_app.erl @@ -27,6 +27,7 @@ -include("emqx_conf.hrl"). start(_StartType, _StartArgs) -> + ok = mria:wait_for_tables(emqx_cluster_rpc:create_tables()), try ok = init_conf() catch diff --git a/apps/emqx_conf/src/emqx_conf_cli.erl b/apps/emqx_conf/src/emqx_conf_cli.erl index d519e2e05..3a1261b30 100644 --- a/apps/emqx_conf/src/emqx_conf_cli.erl +++ b/apps/emqx_conf/src/emqx_conf_cli.erl @@ -89,6 +89,10 @@ admins(["skip", Node0]) -> emqx_cluster_rpc:skip_failed_commit(Node), status(); admins(["tnxid", TnxId0]) -> + %% changed to 'inspect' in 5.6 + %% TODO: delete this clause in 5.7 + admins(["inspect", TnxId0]); +admins(["inspect", TnxId0]) -> TnxId = list_to_integer(TnxId0), print(emqx_cluster_rpc:query(TnxId)); admins(["fast_forward"]) -> @@ -145,12 +149,14 @@ usage_conf() -> usage_sync() -> [ - {"conf cluster_sync status", "Show cluster config sync status summary"}, - {"conf cluster_sync skip [node]", "Increase one commit on specific node"}, - {"conf cluster_sync tnxid ", - "Display detailed information of the config change transaction at TnxId"}, - {"conf cluster_sync fast_forward [node] [tnx_id]", - "Fast-forward config change transaction to tnx_id on the given node." + {"conf cluster_sync status", "Show cluster config sync status summary for all nodes."}, + {"conf cluster_sync inspect ", + "Inspect detailed information of the config change transaction at the given commit ID"}, + {"conf cluster_sync skip [node]", + "Increment the (currently failing) commit on the given node.\n" + "WARNING: This results in inconsistent configs among the clustered nodes."}, + {"conf cluster_sync fast_forward [node] ", + "Fast-forward config change to the given commit ID on the given node.\n" "WARNING: This results in inconsistent configs among the clustered nodes."} ]. diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index bc0c97fee..ea35988bd 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2021-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2021-2024 EMQ Technologies Co., Ltd. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -42,6 +42,8 @@ %% internal exports for `emqx_enterprise_schema' only. -export([ensure_unicode_path/2, convert_rotation/2, log_handler_common_confs/2]). +-define(DEFAULT_NODE_NAME, <<"emqx@127.0.0.1">>). + %% Static apps which merge their configs into the merged emqx.conf %% The list can not be made a dynamic read at run-time as it is used %% by nodetool to generate app.