Compare commits

..

2 Commits

Author SHA1 Message Date
JianBo He 1e72838ddf chore: bump Dashboard vsn 2024-07-30 08:23:44 +08:00
JianBo He 419623fc63 chore: bump vsn 2024-07-30 08:23:44 +08:00
659 changed files with 6149 additions and 33207 deletions

View File

@ -1,24 +0,0 @@
version: '3.9'
services:
azurite:
container_name: azurite
image: mcr.microsoft.com/azure-storage/azurite:3.30.0
restart: always
expose:
- "10000"
# ports:
# - "10000:10000"
networks:
- emqx_bridge
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:10000"]
interval: 30s
timeout: 5s
retries: 4
command:
- azurite-blob
- "--blobHost"
- 0.0.0.0
- "-d"
- debug.log

View File

@ -1,30 +0,0 @@
version: '3.9'
services:
couchbase:
container_name: couchbase
hostname: couchbase
image: ghcr.io/emqx/couchbase:1.0.0
restart: always
expose:
- 8091-8093
# ports:
# - "8091-8093:8091-8093"
networks:
- emqx_bridge
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8093/admin/ping"]
interval: 30s
timeout: 5s
retries: 4
environment:
- CLUSTER=localhost
- USER=admin
- PASS=public
- PORT=8091
- RAMSIZEMB=2048
- RAMSIZEINDEXMB=512
- RAMSIZEFTSMB=512
- BUCKETS=mqtt
- BUCKETSIZES=100
- AUTOREBALANCE=true

View File

@ -10,7 +10,7 @@ services:
nofile: 1024 nofile: 1024
image: openldap image: openldap
#ports: #ports:
# - "389:389" # - 389:389
volumes: volumes:
- ./certs/ca.crt:/etc/certs/ca.crt - ./certs/ca.crt:/etc/certs/ca.crt
restart: always restart: always

View File

@ -215,17 +215,5 @@
"listen": "0.0.0.0:9200", "listen": "0.0.0.0:9200",
"upstream": "elasticsearch:9200", "upstream": "elasticsearch:9200",
"enabled": true "enabled": true
},
{
"name": "azurite_plain",
"listen": "0.0.0.0:10000",
"upstream": "azurite:10000",
"enabled": true
},
{
"name": "couchbase",
"listen": "0.0.0.0:8093",
"upstream": "couchbase:8093",
"enabled": true
} }
] ]

View File

@ -1,18 +1,18 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, http_server, [ {application, http_server,
{description, "An HTTP server application"}, [{description, "An HTTP server application"},
{vsn, "0.2.0"}, {vsn, "0.2.0"},
{registered, []}, {registered, []},
% {mod, {http_server_app, []}}, % {mod, {http_server_app, []}},
{modules, []}, {modules, []},
{applications, [ {applications,
kernel, [kernel,
stdlib, stdlib,
minirest minirest
]}, ]},
{env, []}, {env,[]},
{modules, []}, {modules, []},
{licenses, ["Apache 2.0"]}, {licenses, ["Apache 2.0"]},
{links, []} {links, []}
]}. ]}.

View File

@ -51,7 +51,7 @@ runs:
echo "SELF_HOSTED=false" >> $GITHUB_OUTPUT echo "SELF_HOSTED=false" >> $GITHUB_OUTPUT
;; ;;
esac esac
- uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1
id: cache id: cache
if: steps.prepare.outputs.SELF_HOSTED != 'true' if: steps.prepare.outputs.SELF_HOSTED != 'true'
with: with:

View File

@ -1 +0,0 @@
*/.github/*

View File

@ -27,7 +27,7 @@ jobs:
ELIXIR_VSN: ${{ steps.env.outputs.ELIXIR_VSN }} ELIXIR_VSN: ${{ steps.env.outputs.ELIXIR_VSN }}
BUILDER: ${{ steps.env.outputs.BUILDER }} BUILDER: ${{ steps.env.outputs.BUILDER }}
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ github.event.inputs.ref }} ref: ${{ github.event.inputs.ref }}
- name: Set up environment - name: Set up environment
@ -52,7 +52,7 @@ jobs:
contents: read contents: read
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ github.event.inputs.ref }} ref: ${{ github.event.inputs.ref }}
fetch-depth: 0 fetch-depth: 0
@ -136,7 +136,7 @@ jobs:
contents: read contents: read
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Work around https://github.com/actions/checkout/issues/766 - name: Work around https://github.com/actions/checkout/issues/766
@ -152,7 +152,7 @@ jobs:
echo "PROFILE=${PROFILE}" | tee -a .env echo "PROFILE=${PROFILE}" | tee -a .env
echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env
zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip . zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip .
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with: with:
name: ${{ matrix.profile }} name: ${{ matrix.profile }}
path: ${{ matrix.profile }}.zip path: ${{ matrix.profile }}.zip

View File

@ -35,7 +35,7 @@ jobs:
BUILD_FROM: ${{ steps.env.outputs.BUILD_FROM }} BUILD_FROM: ${{ steps.env.outputs.BUILD_FROM }}
RUN_FROM: ${{ steps.env.outputs.BUILD_FROM }} RUN_FROM: ${{ steps.env.outputs.BUILD_FROM }}
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ github.event.inputs.ref }} ref: ${{ github.event.inputs.ref }}
- name: Set up environment - name: Set up environment
@ -65,7 +65,7 @@ jobs:
contents: read contents: read
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ github.event.inputs.ref }} ref: ${{ github.event.inputs.ref }}
fetch-depth: 0 fetch-depth: 0
@ -147,7 +147,7 @@ jobs:
contents: read contents: read
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ github.event.inputs.ref }} ref: ${{ github.event.inputs.ref }}
fetch-depth: 0 fetch-depth: 0
@ -163,7 +163,7 @@ jobs:
echo "PROFILE=${PROFILE}" | tee -a .env echo "PROFILE=${PROFILE}" | tee -a .env
echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env
zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip . zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip .
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with: with:
name: ${{ matrix.profile }} name: ${{ matrix.profile }}
path: ${{ matrix.profile }}.zip path: ${{ matrix.profile }}.zip

View File

@ -75,7 +75,7 @@ jobs:
- arm64 - arm64
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ github.event.inputs.ref }} ref: ${{ github.event.inputs.ref }}
- run: git config --global --add safe.directory "$PWD" - run: git config --global --add safe.directory "$PWD"
@ -83,7 +83,7 @@ jobs:
id: build id: build
run: | run: |
make ${{ matrix.profile }}-tgz make ${{ matrix.profile }}-tgz
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with: with:
name: "${{ matrix.profile }}-${{ matrix.arch }}.tar.gz" name: "${{ matrix.profile }}-${{ matrix.arch }}.tar.gz"
path: "_packages/emqx*/emqx-*.tar.gz" path: "_packages/emqx*/emqx-*.tar.gz"
@ -107,10 +107,10 @@ jobs:
- ["${{ inputs.profile }}-elixir", "${{ inputs.profile == 'emqx' && 'docker.io,public.ecr.aws' || 'docker.io' }}"] - ["${{ inputs.profile }}-elixir", "${{ inputs.profile == 'emqx' && 'docker.io,public.ecr.aws' || 'docker.io' }}"]
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ github.event.inputs.ref }} ref: ${{ github.event.inputs.ref }}
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
pattern: "${{ matrix.profile[0] }}-*.tar.gz" pattern: "${{ matrix.profile[0] }}-*.tar.gz"
path: _packages path: _packages
@ -122,25 +122,24 @@ jobs:
run: | run: |
ls -lR _packages/$PROFILE ls -lR _packages/$PROFILE
mv _packages/$PROFILE/*.tar.gz ./ mv _packages/$PROFILE/*.tar.gz ./
- name: Enable containerd image store on Docker Engine - name: Enable containerd image store on Docker Engine
run: | run: |
echo "$(sudo cat /etc/docker/daemon.json | jq '. += {"features": {"containerd-snapshotter": true}}')" > daemon.json echo "$(jq '. += {"features": {"containerd-snapshotter": true}}' /etc/docker/daemon.json)" > daemon.json
sudo mv daemon.json /etc/docker/daemon.json sudo mv daemon.json /etc/docker/daemon.json
sudo systemctl restart docker sudo systemctl restart docker
- uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
- uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v3.2.0
- name: Login to hub.docker.com - name: Login to hub.docker.com
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
if: inputs.publish && contains(matrix.profile[1], 'docker.io') if: inputs.publish && contains(matrix.profile[1], 'docker.io')
with: with:
username: ${{ secrets.DOCKER_HUB_USER }} username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_TOKEN }} password: ${{ secrets.DOCKER_HUB_TOKEN }}
- name: Login to AWS ECR - name: Login to AWS ECR
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
if: inputs.publish && contains(matrix.profile[1], 'public.ecr.aws') if: inputs.publish && contains(matrix.profile[1], 'public.ecr.aws')
with: with:
registry: public.ecr.aws registry: public.ecr.aws

View File

@ -26,7 +26,7 @@ jobs:
- emqx-enterprise-elixir - emqx-enterprise-elixir
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up environment - name: Set up environment
id: env id: env
run: | run: |
@ -51,7 +51,7 @@ jobs:
if: always() if: always()
run: | run: |
docker save $_EMQX_DOCKER_IMAGE_TAG | gzip > $EMQX_NAME-docker-$PKG_VSN.tar.gz docker save $_EMQX_DOCKER_IMAGE_TAG | gzip > $EMQX_NAME-docker-$PKG_VSN.tar.gz
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with: with:
name: "${{ env.EMQX_NAME }}-docker" name: "${{ env.EMQX_NAME }}-docker"
path: "${{ env.EMQX_NAME }}-docker-${{ env.PKG_VSN }}.tar.gz" path: "${{ env.EMQX_NAME }}-docker-${{ env.PKG_VSN }}.tar.gz"

View File

@ -82,7 +82,7 @@ jobs:
- ${{ inputs.otp_vsn }} - ${{ inputs.otp_vsn }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ github.event.inputs.ref }} ref: ${{ github.event.inputs.ref }}
fetch-depth: 0 fetch-depth: 0
@ -95,7 +95,7 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }} apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }} apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: success() if: success()
with: with:
name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.otp }} name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.otp }}
@ -113,11 +113,13 @@ jobs:
- ubuntu24.04 - ubuntu24.04
- ubuntu22.04 - ubuntu22.04
- ubuntu20.04 - ubuntu20.04
- ubuntu18.04
- debian12 - debian12
- debian11 - debian11
- debian10 - debian10
- el9 - el9
- el8 - el8
- el7
- amzn2 - amzn2
- amzn2023 - amzn2023
arch: arch:
@ -145,7 +147,7 @@ jobs:
shell: bash shell: bash
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ github.event.inputs.ref }} ref: ${{ github.event.inputs.ref }}
fetch-depth: 0 fetch-depth: 0
@ -180,7 +182,7 @@ jobs:
--builder $BUILDER \ --builder $BUILDER \
--elixir $IS_ELIXIR \ --elixir $IS_ELIXIR \
--pkgtype pkg --pkgtype pkg
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with: with:
name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.with_elixir == 'yes' && '-elixir' || '' }}-${{ matrix.builder }}-${{ matrix.otp }}-${{ matrix.elixir }} name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.with_elixir == 'yes' && '-elixir' || '' }}-${{ matrix.builder }}-${{ matrix.otp }}-${{ matrix.elixir }}
path: _packages/${{ matrix.profile }}/ path: _packages/${{ matrix.profile }}/
@ -198,7 +200,7 @@ jobs:
profile: profile:
- ${{ inputs.profile }} - ${{ inputs.profile }}
steps: steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
pattern: "${{ matrix.profile }}-*" pattern: "${{ matrix.profile }}-*"
path: packages/${{ matrix.profile }} path: packages/${{ matrix.profile }}

View File

@ -23,7 +23,6 @@ jobs:
profile: profile:
- ['emqx', 'master'] - ['emqx', 'master']
- ['emqx', 'release-57'] - ['emqx', 'release-57']
- ['emqx', 'release-58']
os: os:
- ubuntu22.04 - ubuntu22.04
- amzn2023 - amzn2023
@ -38,7 +37,7 @@ jobs:
shell: bash shell: bash
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ matrix.profile[1] }} ref: ${{ matrix.profile[1] }}
fetch-depth: 0 fetch-depth: 0
@ -54,14 +53,14 @@ jobs:
- name: build pkg - name: build pkg
run: | run: |
./scripts/buildx.sh --profile "$PROFILE" --pkgtype pkg --builder "$BUILDER" ./scripts/buildx.sh --profile "$PROFILE" --pkgtype pkg --builder "$BUILDER"
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: success() if: success()
with: with:
name: ${{ matrix.profile[0] }}-${{ matrix.profile[1] }}-${{ matrix.os }} name: ${{ matrix.profile[0] }}-${{ matrix.os }}-${{ github.ref_name }}
path: _packages/${{ matrix.profile[0] }}/ path: _packages/${{ matrix.profile[0] }}/
retention-days: 7 retention-days: 7
- name: Send notification to Slack - name: Send notification to Slack
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
if: failure() if: failure()
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
@ -84,7 +83,7 @@ jobs:
- macos-14-arm64 - macos-14-arm64
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ matrix.branch }} ref: ${{ matrix.branch }}
fetch-depth: 0 fetch-depth: 0
@ -102,14 +101,14 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }} apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }} apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: success() if: success()
with: with:
name: ${{ matrix.profile }}-${{ matrix.os }} name: ${{ matrix.profile }}-${{ matrix.os }}
path: _packages/${{ matrix.profile }}/ path: _packages/${{ matrix.profile }}/
retention-days: 7 retention-days: 7
- name: Send notification to Slack - name: Send notification to Slack
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
if: failure() if: failure()
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}

View File

@ -32,7 +32,7 @@ jobs:
- ["emqx-enterprise", "erlang", "x64"] - ["emqx-enterprise", "erlang", "x64"]
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: build tgz - name: build tgz
@ -41,13 +41,13 @@ jobs:
- name: build pkg - name: build pkg
run: | run: |
./scripts/buildx.sh --profile $PROFILE --pkgtype pkg --elixir $ELIXIR --arch $ARCH ./scripts/buildx.sh --profile $PROFILE --pkgtype pkg --elixir $ELIXIR --arch $ARCH
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with: with:
name: "${{ matrix.profile[0] }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}" name: "${{ matrix.profile[0] }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
path: _packages/${{ matrix.profile[0] }}/* path: _packages/${{ matrix.profile[0] }}/*
retention-days: 7 retention-days: 7
compression-level: 0 compression-level: 0
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with: with:
name: "${{ matrix.profile[0] }}-schema-dump-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}" name: "${{ matrix.profile[0] }}-schema-dump-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
path: | path: |
@ -69,7 +69,7 @@ jobs:
EMQX_NAME: ${{ matrix.profile }} EMQX_NAME: ${{ matrix.profile }}
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up environment - name: Set up environment
id: env id: env
run: | run: |
@ -84,7 +84,7 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }} apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }} apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with: with:
name: ${{ matrix.os }} name: ${{ matrix.os }}
path: _packages/**/* path: _packages/**/*

View File

@ -22,7 +22,7 @@ jobs:
profile: profile:
- emqx-enterprise - emqx-enterprise
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
- run: make ensure-rebar3 - run: make ensure-rebar3
- run: ./scripts/check-deps-integrity.escript - run: ./scripts/check-deps-integrity.escript
@ -37,7 +37,7 @@ jobs:
- run: ./scripts/check-elixir-deps-discrepancies.exs - run: ./scripts/check-elixir-deps-discrepancies.exs
- run: ./scripts/check-elixir-applications.exs - run: ./scripts/check-elixir-applications.exs
- name: Upload produced lock files - name: Upload produced lock files
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: failure() if: failure()
with: with:
name: ${{ matrix.profile }}_produced_lock_files name: ${{ matrix.profile }}_produced_lock_files

View File

@ -24,14 +24,13 @@ jobs:
branch: branch:
- master - master
- release-57 - release-57
- release-58
language: language:
- cpp - cpp
- python - python
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ matrix.branch }} ref: ${{ matrix.branch }}

View File

@ -7,6 +7,9 @@ on:
# run hourly # run hourly
- cron: "0 * * * *" - cron: "0 * * * *"
workflow_dispatch: workflow_dispatch:
inputs:
ref:
required: false
permissions: permissions:
contents: read contents: read
@ -14,21 +17,14 @@ permissions:
jobs: jobs:
rerun-failed-jobs: rerun-failed-jobs:
if: github.repository_owner == 'emqx' if: github.repository_owner == 'emqx'
runs-on: ubuntu-latest runs-on: ubuntu-22.04
permissions: permissions:
checks: read checks: read
actions: write actions: write
strategy:
fail-fast: false
matrix:
ref:
- master
- release-57
- release-58
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ matrix.ref }} ref: ${{ github.event.inputs.ref || 'master' }}
- name: run script - name: run script
shell: bash shell: bash

View File

@ -32,7 +32,7 @@ jobs:
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }} PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
fetch-depth: 0 fetch-depth: 0
ref: ${{ github.event.inputs.ref }} ref: ${{ github.event.inputs.ref }}
@ -52,7 +52,7 @@ jobs:
id: package_file id: package_file
run: | run: |
echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with: with:
name: emqx-ubuntu20.04 name: emqx-ubuntu20.04
path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }} path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }}
@ -72,17 +72,17 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1 aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test - name: Checkout tf-emqx-performance-test
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
repository: emqx/tf-emqx-performance-test repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test path: tf-emqx-performance-test
ref: v0.2.3 ref: v0.2.3
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: emqx-ubuntu20.04 name: emqx-ubuntu20.04
path: tf-emqx-performance-test/ path: tf-emqx-performance-test/
- name: Setup Terraform - name: Setup Terraform
uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 # v3.1.1 uses: hashicorp/setup-terraform@a1502cd9e758c50496cc9ac5308c4843bcd56d36 # v3.0.0
with: with:
terraform_wrapper: false terraform_wrapper: false
- name: run scenario - name: run scenario
@ -105,7 +105,7 @@ jobs:
terraform destroy -auto-approve terraform destroy -auto-approve
aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id . aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id .
- name: Send notification to Slack - name: Send notification to Slack
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
with: with:
payload-file-path: "./tf-emqx-performance-test/slack-payload.json" payload-file-path: "./tf-emqx-performance-test/slack-payload.json"
- name: terraform destroy - name: terraform destroy
@ -113,13 +113,13 @@ jobs:
working-directory: ./tf-emqx-performance-test working-directory: ./tf-emqx-performance-test
run: | run: |
terraform destroy -auto-approve terraform destroy -auto-approve
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: success() if: success()
with: with:
name: metrics name: metrics
path: | path: |
"./tf-emqx-performance-test/*.tar.gz" "./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: failure() if: failure()
with: with:
name: terraform name: terraform
@ -143,17 +143,17 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1 aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test - name: Checkout tf-emqx-performance-test
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
repository: emqx/tf-emqx-performance-test repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test path: tf-emqx-performance-test
ref: v0.2.3 ref: v0.2.3
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: emqx-ubuntu20.04 name: emqx-ubuntu20.04
path: tf-emqx-performance-test/ path: tf-emqx-performance-test/
- name: Setup Terraform - name: Setup Terraform
uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 # v3.1.1 uses: hashicorp/setup-terraform@a1502cd9e758c50496cc9ac5308c4843bcd56d36 # v3.0.0
with: with:
terraform_wrapper: false terraform_wrapper: false
- name: run scenario - name: run scenario
@ -176,7 +176,7 @@ jobs:
terraform destroy -auto-approve terraform destroy -auto-approve
aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id . aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id .
- name: Send notification to Slack - name: Send notification to Slack
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
with: with:
payload-file-path: "./tf-emqx-performance-test/slack-payload.json" payload-file-path: "./tf-emqx-performance-test/slack-payload.json"
- name: terraform destroy - name: terraform destroy
@ -184,13 +184,13 @@ jobs:
working-directory: ./tf-emqx-performance-test working-directory: ./tf-emqx-performance-test
run: | run: |
terraform destroy -auto-approve terraform destroy -auto-approve
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: success() if: success()
with: with:
name: metrics name: metrics
path: | path: |
"./tf-emqx-performance-test/*.tar.gz" "./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: failure() if: failure()
with: with:
name: terraform name: terraform
@ -215,17 +215,17 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1 aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test - name: Checkout tf-emqx-performance-test
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
repository: emqx/tf-emqx-performance-test repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test path: tf-emqx-performance-test
ref: v0.2.3 ref: v0.2.3
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: emqx-ubuntu20.04 name: emqx-ubuntu20.04
path: tf-emqx-performance-test/ path: tf-emqx-performance-test/
- name: Setup Terraform - name: Setup Terraform
uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 # v3.1.1 uses: hashicorp/setup-terraform@a1502cd9e758c50496cc9ac5308c4843bcd56d36 # v3.0.0
with: with:
terraform_wrapper: false terraform_wrapper: false
- name: run scenario - name: run scenario
@ -249,7 +249,7 @@ jobs:
terraform destroy -auto-approve terraform destroy -auto-approve
aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id . aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id .
- name: Send notification to Slack - name: Send notification to Slack
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
with: with:
payload-file-path: "./tf-emqx-performance-test/slack-payload.json" payload-file-path: "./tf-emqx-performance-test/slack-payload.json"
- name: terraform destroy - name: terraform destroy
@ -257,13 +257,13 @@ jobs:
working-directory: ./tf-emqx-performance-test working-directory: ./tf-emqx-performance-test
run: | run: |
terraform destroy -auto-approve terraform destroy -auto-approve
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: success() if: success()
with: with:
name: metrics name: metrics
path: | path: |
"./tf-emqx-performance-test/*.tar.gz" "./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: failure() if: failure()
with: with:
name: terraform name: terraform
@ -289,17 +289,17 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1 aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test - name: Checkout tf-emqx-performance-test
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
repository: emqx/tf-emqx-performance-test repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test path: tf-emqx-performance-test
ref: v0.2.3 ref: v0.2.3
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: emqx-ubuntu20.04 name: emqx-ubuntu20.04
path: tf-emqx-performance-test/ path: tf-emqx-performance-test/
- name: Setup Terraform - name: Setup Terraform
uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 # v3.1.1 uses: hashicorp/setup-terraform@a1502cd9e758c50496cc9ac5308c4843bcd56d36 # v3.0.0
with: with:
terraform_wrapper: false terraform_wrapper: false
- name: run scenario - name: run scenario
@ -322,7 +322,7 @@ jobs:
terraform destroy -auto-approve terraform destroy -auto-approve
aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id . aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id .
- name: Send notification to Slack - name: Send notification to Slack
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
with: with:
payload-file-path: "./tf-emqx-performance-test/slack-payload.json" payload-file-path: "./tf-emqx-performance-test/slack-payload.json"
- name: terraform destroy - name: terraform destroy
@ -330,13 +330,13 @@ jobs:
working-directory: ./tf-emqx-performance-test working-directory: ./tf-emqx-performance-test
run: | run: |
terraform destroy -auto-approve terraform destroy -auto-approve
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: success() if: success()
with: with:
name: metrics name: metrics
path: | path: |
"./tf-emqx-performance-test/*.tar.gz" "./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: failure() if: failure()
with: with:
name: terraform name: terraform

View File

@ -36,7 +36,7 @@ jobs:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ github.event.inputs.tag }} ref: ${{ github.event.inputs.tag }}
- name: Detect profile - name: Detect profile
@ -106,12 +106,16 @@ jobs:
push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-arm64.deb" push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-arm64.deb"
push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-amd64.deb" push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-amd64.deb"
push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-arm64.deb" push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-arm64.deb"
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-amd64.deb"
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-arm64.deb"
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-amd64.deb" push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-amd64.deb"
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-arm64.deb" push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-arm64.deb"
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-amd64.deb" push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-amd64.deb"
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-arm64.deb" push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-arm64.deb"
push "ubuntu/noble" "packages/$PROFILE-$VERSION-ubuntu24.04-amd64.deb" push "ubuntu/noble" "packages/$PROFILE-$VERSION-ubuntu24.04-amd64.deb"
push "ubuntu/noble" "packages/$PROFILE-$VERSION-ubuntu24.04-arm64.deb" push "ubuntu/noble" "packages/$PROFILE-$VERSION-ubuntu24.04-arm64.deb"
push "el/7" "packages/$PROFILE-$VERSION-el7-amd64.rpm"
push "el/7" "packages/$PROFILE-$VERSION-el7-arm64.rpm"
push "el/8" "packages/$PROFILE-$VERSION-el8-amd64.rpm" push "el/8" "packages/$PROFILE-$VERSION-el8-amd64.rpm"
push "el/8" "packages/$PROFILE-$VERSION-el8-arm64.rpm" push "el/8" "packages/$PROFILE-$VERSION-el8-arm64.rpm"
push "el/9" "packages/$PROFILE-$VERSION-el9-amd64.rpm" push "el/9" "packages/$PROFILE-$VERSION-el9-amd64.rpm"
@ -131,7 +135,7 @@ jobs:
checks: write checks: write
actions: write actions: write
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- name: trigger re-run of app versions check on open PRs - name: trigger re-run of app versions check on open PRs
shell: bash shell: bash
env: env:

View File

@ -25,7 +25,7 @@ jobs:
- emqx - emqx
- emqx-enterprise - emqx-enterprise
steps: steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: ${{ matrix.profile }} name: ${{ matrix.profile }}
- name: extract artifact - name: extract artifact
@ -39,10 +39,10 @@ jobs:
- name: print erlang log - name: print erlang log
if: failure() if: failure()
run: | run: |
cat _build/${{ matrix.profile }}/rel/emqx/log/erlang.log.* cat _build/${{ matrix.profile }}/rel/emqx/logs/erlang.log.*
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: failure() if: failure()
with: with:
name: conftest-logs-${{ matrix.profile }} name: conftest-logs-${{ matrix.profile }}
path: _build/${{ matrix.profile }}/rel/emqx/log path: _build/${{ matrix.profile }}/rel/emqx/logs
retention-days: 7 retention-days: 7

View File

@ -28,14 +28,14 @@ jobs:
EMQX_IMAGE_OLD_VERSION_TAG: ${{ matrix.profile[1] }} EMQX_IMAGE_OLD_VERSION_TAG: ${{ matrix.profile[1] }}
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up environment - name: Set up environment
id: env id: env
run: | run: |
source env.sh source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME") PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV" echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with: with:
name: ${{ env.EMQX_NAME }}-docker name: ${{ env.EMQX_NAME }}-docker
path: /tmp path: /tmp
@ -69,6 +69,7 @@ jobs:
shell: bash shell: bash
env: env:
EMQX_NAME: ${{ matrix.profile }} EMQX_NAME: ${{ matrix.profile }}
_EMQX_TEST_DB_BACKEND: ${{ matrix.cluster_db_backend }}
strategy: strategy:
fail-fast: false fail-fast: false
@ -77,20 +78,18 @@ jobs:
- emqx - emqx
- emqx-enterprise - emqx-enterprise
- emqx-elixir - emqx-elixir
cluster_db_backend:
- mnesia
- rlog
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up environment - name: Set up environment
id: env id: env
run: | run: |
source env.sh source env.sh
if [ "$EMQX_NAME" = "emqx-enterprise" ]; then
_EMQX_TEST_DB_BACKEND='rlog'
else
_EMQX_TEST_DB_BACKEND='mnesia'
fi
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME") PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV" echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with: with:
name: ${{ env.EMQX_NAME }}-docker name: ${{ env.EMQX_NAME }}-docker
path: /tmp path: /tmp

View File

@ -37,7 +37,7 @@ jobs:
matrix: ${{ steps.matrix.outputs.matrix }} matrix: ${{ steps.matrix.outputs.matrix }}
skip: ${{ steps.matrix.outputs.skip }} skip: ${{ steps.matrix.outputs.skip }}
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: prepare test matrix - name: prepare test matrix
@ -72,7 +72,7 @@ jobs:
run: run:
shell: bash shell: bash
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: run - name: run
@ -95,7 +95,7 @@ jobs:
echo "Suites: $SUITES" echo "Suites: $SUITES"
./rebar3 as standalone_test ct --name 'test@127.0.0.1' -v --readable=true --suite="$SUITES" ./rebar3 as standalone_test ct --name 'test@127.0.0.1' -v --readable=true --suite="$SUITES"
fi fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: failure() if: failure()
with: with:
name: logs-emqx-app-tests-${{ matrix.type }} name: logs-emqx-app-tests-${{ matrix.type }}

View File

@ -34,7 +34,7 @@ jobs:
- ssl1.3 - ssl1.3
- ssl1.2 - ssl1.2
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
path: source path: source
- name: Set up environment - name: Set up environment
@ -44,7 +44,7 @@ jobs:
source env.sh source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME") PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "EMQX_TAG=$PKG_VSN" >> "$GITHUB_ENV" echo "EMQX_TAG=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with: with:
name: "${{ env.EMQX_NAME }}-docker" name: "${{ env.EMQX_NAME }}-docker"
path: /tmp path: /tmp
@ -164,7 +164,7 @@ jobs:
fi fi
sleep 1; sleep 1;
done done
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
repository: emqx/paho.mqtt.testing repository: emqx/paho.mqtt.testing
ref: develop-5.0 ref: develop-5.0

View File

@ -12,7 +12,7 @@ jobs:
steps: steps:
- name: Cache Jmeter - name: Cache Jmeter
id: cache-jmeter id: cache-jmeter
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1
with: with:
path: /tmp/apache-jmeter.tgz path: /tmp/apache-jmeter.tgz
key: apache-jmeter-5.4.3.tgz key: apache-jmeter-5.4.3.tgz
@ -31,7 +31,7 @@ jobs:
else else
wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz $ARCHIVE_URL wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz $ARCHIVE_URL
fi fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with: with:
name: apache-jmeter.tgz name: apache-jmeter.tgz
path: /tmp/apache-jmeter.tgz path: /tmp/apache-jmeter.tgz
@ -51,14 +51,14 @@ jobs:
needs: jmeter_artifact needs: jmeter_artifact
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up environment - name: Set up environment
id: env id: env
run: | run: |
source env.sh source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx) PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV" echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: emqx-docker name: emqx-docker
path: /tmp path: /tmp
@ -95,7 +95,7 @@ jobs:
echo "check logs failed" echo "check logs failed"
exit 1 exit 1
fi fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: always() if: always()
with: with:
name: jmeter_logs-advanced_feat-${{ matrix.scripts_type }} name: jmeter_logs-advanced_feat-${{ matrix.scripts_type }}
@ -120,14 +120,14 @@ jobs:
needs: jmeter_artifact needs: jmeter_artifact
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up environment - name: Set up environment
id: env id: env
run: | run: |
source env.sh source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx) PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV" echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: emqx-docker name: emqx-docker
path: /tmp path: /tmp
@ -175,7 +175,7 @@ jobs:
if: failure() if: failure()
run: | run: |
docker compose -f .ci/docker-compose-file/docker-compose-emqx-cluster.yaml logs --no-color > ./jmeter_logs/emqx.log docker compose -f .ci/docker-compose-file/docker-compose-emqx-cluster.yaml logs --no-color > ./jmeter_logs/emqx.log
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: always() if: always()
with: with:
name: jmeter_logs-pgsql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.pgsql_tag }} name: jmeter_logs-pgsql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.pgsql_tag }}
@ -197,14 +197,14 @@ jobs:
needs: jmeter_artifact needs: jmeter_artifact
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up environment - name: Set up environment
id: env id: env
run: | run: |
source env.sh source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx) PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV" echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: emqx-docker name: emqx-docker
path: /tmp path: /tmp
@ -248,7 +248,7 @@ jobs:
echo "check logs failed" echo "check logs failed"
exit 1 exit 1
fi fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: always() if: always()
with: with:
name: jmeter_logs-mysql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.mysql_tag }} name: jmeter_logs-mysql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.mysql_tag }}
@ -266,14 +266,14 @@ jobs:
needs: jmeter_artifact needs: jmeter_artifact
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up environment - name: Set up environment
id: env id: env
run: | run: |
source env.sh source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx) PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV" echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: emqx-docker name: emqx-docker
path: /tmp path: /tmp
@ -313,7 +313,7 @@ jobs:
echo "check logs failed" echo "check logs failed"
exit 1 exit 1
fi fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: always() if: always()
with: with:
name: jmeter_logs-JWT_authn-${{ matrix.scripts_type }} name: jmeter_logs-JWT_authn-${{ matrix.scripts_type }}
@ -332,14 +332,14 @@ jobs:
needs: jmeter_artifact needs: jmeter_artifact
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up environment - name: Set up environment
id: env id: env
run: | run: |
source env.sh source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx) PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV" echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: emqx-docker name: emqx-docker
path: /tmp path: /tmp
@ -370,7 +370,7 @@ jobs:
echo "check logs failed" echo "check logs failed"
exit 1 exit 1
fi fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: always() if: always()
with: with:
name: jmeter_logs-built_in_database_authn_authz-${{ matrix.scripts_type }} name: jmeter_logs-built_in_database_authn_authz-${{ matrix.scripts_type }}

View File

@ -25,7 +25,7 @@ jobs:
run: run:
shell: bash shell: bash
steps: steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: emqx-enterprise name: emqx-enterprise
- name: extract artifact - name: extract artifact
@ -45,7 +45,7 @@ jobs:
run: | run: |
export PROFILE='emqx-enterprise' export PROFILE='emqx-enterprise'
make emqx-enterprise-tgz make emqx-enterprise-tgz
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
name: Upload built emqx and test scenario name: Upload built emqx and test scenario
with: with:
name: relup_tests_emqx_built name: relup_tests_emqx_built
@ -72,10 +72,10 @@ jobs:
run: run:
shell: bash shell: bash
steps: steps:
- uses: erlef/setup-beam@b9c58b0450cd832ccdb3c17cc156a47065d2114f # v1.18.1 - uses: erlef/setup-beam@2f0cc07b4b9bea248ae098aba9e1a8a1de5ec24c # v1.17.5
with: with:
otp-version: 26.2.5 otp-version: 26.2.5
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
repository: hawk/lux repository: hawk/lux
ref: lux-2.8.1 ref: lux-2.8.1
@ -88,7 +88,7 @@ jobs:
./configure ./configure
make make
echo "$(pwd)/bin" >> $GITHUB_PATH echo "$(pwd)/bin" >> $GITHUB_PATH
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
name: Download built emqx and test scenario name: Download built emqx and test scenario
with: with:
name: relup_tests_emqx_built name: relup_tests_emqx_built
@ -111,7 +111,7 @@ jobs:
docker logs node2.emqx.io | tee lux_logs/emqx2.log docker logs node2.emqx.io | tee lux_logs/emqx2.log
exit 1 exit 1
fi fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
name: Save debug data name: Save debug data
if: failure() if: failure()
with: with:

View File

@ -46,7 +46,7 @@ jobs:
contents: read contents: read
steps: steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: ${{ matrix.profile }} name: ${{ matrix.profile }}
@ -90,7 +90,7 @@ jobs:
contents: read contents: read
steps: steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: ${{ matrix.profile }} name: ${{ matrix.profile }}
- name: extract artifact - name: extract artifact
@ -133,7 +133,7 @@ jobs:
if: failure() if: failure()
run: tar -czf logs.tar.gz _build/test/logs run: tar -czf logs.tar.gz _build/test/logs
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: failure() if: failure()
with: with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-sg${{ matrix.suitegroup }} name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-sg${{ matrix.suitegroup }}
@ -164,7 +164,7 @@ jobs:
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-sg${{ matrix.suitegroup }} CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-sg${{ matrix.suitegroup }}
steps: steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: ${{ matrix.profile }} name: ${{ matrix.profile }}
- name: extract artifact - name: extract artifact
@ -193,7 +193,7 @@ jobs:
if: failure() if: failure()
run: tar -czf logs.tar.gz _build/test/logs run: tar -czf logs.tar.gz _build/test/logs
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: failure() if: failure()
with: with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-sg${{ matrix.suitegroup }} name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-sg${{ matrix.suitegroup }}
@ -216,7 +216,7 @@ jobs:
steps: steps:
- name: Coveralls finished - name: Coveralls finished
if: github.repository == 'emqx/emqx' if: github.repository == 'emqx/emqx'
uses: coverallsapp/github-action@643bc377ffa44ace6394b2b5d0d3950076de9f63 # v2.3.0 uses: coverallsapp/github-action@3dfc5567390f6fa9267c0ee9c251e4c8c3f18949 # v2.2.3
with: with:
parallel-finished: true parallel-finished: true
git-branch: ${{ github.ref }} git-branch: ${{ github.ref }}

View File

@ -25,12 +25,12 @@ jobs:
steps: steps:
- name: "Checkout code" - name: "Checkout code"
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
persist-credentials: false persist-credentials: false
- name: "Run analysis" - name: "Run analysis"
uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1
with: with:
results_file: results.sarif results_file: results.sarif
results_format: sarif results_format: sarif
@ -40,7 +40,7 @@ jobs:
publish_results: true publish_results: true
- name: "Upload artifact" - name: "Upload artifact"
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5 uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with: with:
name: SARIF file name: SARIF file
path: results.sarif path: results.sarif

View File

@ -19,7 +19,7 @@ jobs:
- emqx-enterprise - emqx-enterprise
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }} runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
steps: steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
pattern: "${{ matrix.profile }}-schema-dump-*-x64" pattern: "${{ matrix.profile }}-schema-dump-*-x64"
merge-multiple: true merge-multiple: true

View File

@ -30,14 +30,14 @@ jobs:
include: ${{ fromJson(inputs.ct-matrix) }} include: ${{ fromJson(inputs.ct-matrix) }}
container: "${{ inputs.builder }}" container: "${{ inputs.builder }}"
steps: steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with: with:
name: ${{ matrix.profile }} name: ${{ matrix.profile }}
- name: extract artifact - name: extract artifact
run: | run: |
unzip -o -q ${{ matrix.profile }}.zip unzip -o -q ${{ matrix.profile }}.zip
git config --global --add safe.directory "$GITHUB_WORKSPACE" git config --global --add safe.directory "$GITHUB_WORKSPACE"
- uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1
with: with:
path: "emqx_dialyzer_${{ matrix.profile }}_plt" path: "emqx_dialyzer_${{ matrix.profile }}_plt"
key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ hashFiles('rebar.*', 'apps/*/rebar.*') }} key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ hashFiles('rebar.*', 'apps/*/rebar.*') }}

View File

@ -1,88 +0,0 @@
name: Sync release branch
concurrency:
group: sync-release-branch-${{ github.event_name }}-${{ github.ref }}
cancel-in-progress: true
on:
schedule:
- cron: '0 2 * * *'
workflow_dispatch:
permissions:
contents: read
jobs:
create-pr:
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
strategy:
fail-fast: false
matrix:
branch:
- release-57
env:
SYNC_BRANCH: ${{ matrix.branch }}
defaults:
run:
shell: bash
permissions:
contents: write
pull-requests: write
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0
- name: create new branch
run: |
set -euxo pipefail
NEW_BRANCH_NAME=sync-${SYNC_BRANCH}-$(date +"%Y%m%d-%H%M%S")
echo "NEW_BRANCH_NAME=${NEW_BRANCH_NAME}" >> $GITHUB_ENV
git config --global user.name "${GITHUB_ACTOR}"
git config --global user.email "${GITHUB_ACTOR}@users.noreply.github.com"
git checkout -b ${NEW_BRANCH_NAME}
git merge origin/${SYNC_BRANCH} 2>&1 | tee merge.log
git push origin ${NEW_BRANCH_NAME}:${NEW_BRANCH_NAME}
- name: create pull request
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -euxo pipefail
for pr in $(gh pr list --state open --base master --label sync-release-branch --search "Sync ${SYNC_BRANCH} in:title" --repo ${{ github.repository }} --json number --jq '.[] | .number'); do
gh pr close $pr --repo ${{ github.repository }} --delete-branch || true
done
gh pr create --title "Sync ${SYNC_BRANCH}" --body "Sync ${SYNC_BRANCH}" --base master --head ${NEW_BRANCH_NAME} --label sync-release-branch --repo ${{ github.repository }}
- name: Send notification to Slack
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
run: |
awk '{printf "%s\\n", $0}' merge.log > merge.log.1
cat <<EOF > payload.json
{
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Automatic sync of ${SYNC_BRANCH} branch failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "\`\`\`$(cat merge.log.1)\`\`\`"
}
}
]
}
EOF
curl -X POST -H 'Content-type: application/json' --data @payload.json "$SLACK_WEBHOOK_URL"

View File

@ -23,7 +23,7 @@ jobs:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with: with:
ref: ${{ github.event.inputs.tag }} ref: ${{ github.event.inputs.tag }}
- name: Detect profile - name: Detect profile

View File

@ -10,8 +10,8 @@ include env.sh
# Dashboard version # Dashboard version
# from https://github.com/emqx/emqx-dashboard5 # from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.10.0-beta.1 export EMQX_DASHBOARD_VERSION ?= v1.9.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.8.0-beta.1 export EMQX_EE_DASHBOARD_VERSION ?= e1.7.2-beta.4
export EMQX_RELUP ?= true export EMQX_RELUP ?= true
export EMQX_REL_FORM ?= tgz export EMQX_REL_FORM ?= tgz
@ -28,8 +28,6 @@ CT_COVER_EXPORT_PREFIX ?= $(PROFILE)
export REBAR_GIT_CLONE_OPTIONS += --depth=1 export REBAR_GIT_CLONE_OPTIONS += --depth=1
ELIXIR_COMMON_DEPS := ensure-hex ensure-mix-rebar3 ensure-mix-rebar
.PHONY: default .PHONY: default
default: $(REBAR) $(PROFILE) default: $(REBAR) $(PROFILE)
@ -60,12 +58,8 @@ ensure-mix-rebar3: $(REBAR)
ensure-mix-rebar: $(REBAR) ensure-mix-rebar: $(REBAR)
@mix local.rebar --if-missing --force @mix local.rebar --if-missing --force
.PHONY: elixir-common-deps
elixir-common-deps: $(ELIXIR_COMMON_DEPS)
.PHONY: mix-deps-get .PHONY: mix-deps-get
mix-deps-get: elixir-common-deps mix-deps-get: $(ELIXIR_COMMON_DEPS)
@mix deps.get @mix deps.get
.PHONY: eunit .PHONY: eunit
@ -244,7 +238,7 @@ $(foreach zt,$(ALL_ZIPS),$(eval $(call download-relup-packages,$(zt))))
## relup target is to create relup instructions ## relup target is to create relup instructions
.PHONY: $(REL_PROFILES:%=%-relup) .PHONY: $(REL_PROFILES:%=%-relup)
define gen-relup-target define gen-relup-target
$1-relup: $(COMMON_DEPS) $1-relup: $1-relup-downloads $(COMMON_DEPS)
@$(BUILD) $1 relup @$(BUILD) $1 relup
endef endef
ALL_TGZS = $(REL_PROFILES) ALL_TGZS = $(REL_PROFILES)
@ -253,7 +247,7 @@ $(foreach zt,$(ALL_TGZS),$(eval $(call gen-relup-target,$(zt))))
## tgz target is to create a release package .tar.gz with relup ## tgz target is to create a release package .tar.gz with relup
.PHONY: $(REL_PROFILES:%=%-tgz) .PHONY: $(REL_PROFILES:%=%-tgz)
define gen-tgz-target define gen-tgz-target
$1-tgz: $(COMMON_DEPS) $1-tgz: $1-relup
@$(BUILD) $1 tgz @$(BUILD) $1 tgz
endef endef
ALL_TGZS = $(REL_PROFILES) ALL_TGZS = $(REL_PROFILES)
@ -316,20 +310,10 @@ $(foreach tt,$(ALL_ELIXIR_TGZS),$(eval $(call gen-elixir-tgz-target,$(tt))))
.PHONY: fmt .PHONY: fmt
fmt: $(REBAR) fmt: $(REBAR)
@find . \( -name '*.app.src' -o \ @$(SCRIPTS)/erlfmt -w 'apps/*/{src,include,priv,test,integration_test}/**/*.{erl,hrl,app.src,eterm}'
-name '*.erl' -o \ @$(SCRIPTS)/erlfmt -w 'apps/*/rebar.config' 'apps/emqx/rebar.config.script' '.ci/fvt_tests/http_server/rebar.config'
-name '*.hrl' -o \ @$(SCRIPTS)/erlfmt -w 'rebar.config' 'rebar.config.erl'
-name 'rebar.config' -o \ @$(SCRIPTS)/erlfmt -w 'scripts/*.escript' 'bin/*.escript' 'bin/nodetool'
-name '*.eterm' -o \
-name '*.escript' \) \
-not -path '*/_build/*' \
-not -path '*/deps/*' \
-not -path '*/_checkouts/*' \
-type f \
| xargs $(SCRIPTS)/erlfmt -w
@$(SCRIPTS)/erlfmt -w 'apps/emqx/rebar.config.script'
@$(SCRIPTS)/erlfmt -w 'elvis.config'
@$(SCRIPTS)/erlfmt -w 'bin/nodetool'
@mix format @mix format
.PHONY: clean-test-cluster-config .PHONY: clean-test-cluster-config

View File

@ -45,10 +45,6 @@
). ).
-define(assertReceive(PATTERN, TIMEOUT), -define(assertReceive(PATTERN, TIMEOUT),
?assertReceive(PATTERN, TIMEOUT, #{})
).
-define(assertReceive(PATTERN, TIMEOUT, EXTRA),
(fun() -> (fun() ->
receive receive
X__V = PATTERN -> X__V X__V = PATTERN -> X__V
@ -58,8 +54,7 @@
{module, ?MODULE}, {module, ?MODULE},
{line, ?LINE}, {line, ?LINE},
{expression, (??PATTERN)}, {expression, (??PATTERN)},
{mailbox, ?drainMailbox()}, {mailbox, ?drainMailbox()}
{extra_info, EXTRA}
]} ]}
) )
end end

View File

@ -65,20 +65,9 @@
%% Route %% Route
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-record(share_dest, {
session_id :: emqx_session:session_id(),
group :: emqx_types:group()
}).
-record(route, { -record(route, {
topic :: binary(), topic :: binary(),
dest :: dest :: node() | {binary(), node()} | emqx_session:session_id()
node()
| {binary(), node()}
| emqx_session:session_id()
%% One session can also have multiple subscriptions to the same topic through different groups
| #share_dest{}
| emqx_external_broker:dest()
}). }).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -683,7 +683,6 @@ end).
-define(FRAME_PARSE_ERROR, frame_parse_error). -define(FRAME_PARSE_ERROR, frame_parse_error).
-define(FRAME_SERIALIZE_ERROR, frame_serialize_error). -define(FRAME_SERIALIZE_ERROR, frame_serialize_error).
-define(THROW_FRAME_ERROR(Reason), erlang:throw({?FRAME_PARSE_ERROR, Reason})). -define(THROW_FRAME_ERROR(Reason), erlang:throw({?FRAME_PARSE_ERROR, Reason})).
-define(THROW_SERIALIZE_ERROR(Reason), erlang:throw({?FRAME_SERIALIZE_ERROR, Reason})). -define(THROW_SERIALIZE_ERROR(Reason), erlang:throw({?FRAME_SERIALIZE_ERROR, Reason})).

View File

@ -32,7 +32,7 @@
%% `apps/emqx/src/bpapi/README.md' %% `apps/emqx/src/bpapi/README.md'
%% Opensource edition %% Opensource edition
-define(EMQX_RELEASE_CE, "5.8.0-alpha.1"). -define(EMQX_RELEASE_CE, "5.7.2-prvbuild.1").
%% Enterprise edition %% Enterprise edition
-define(EMQX_RELEASE_EE, "5.8.0-alpha.1"). -define(EMQX_RELEASE_EE, "5.7.2-prvbuild.1").

View File

@ -20,11 +20,4 @@
-define(IS_SESSION_IMPL_MEM(S), (is_tuple(S) andalso element(1, S) =:= session)). -define(IS_SESSION_IMPL_MEM(S), (is_tuple(S) andalso element(1, S) =:= session)).
-define(IS_SESSION_IMPL_DS(S), (is_map_key(id, S))). -define(IS_SESSION_IMPL_DS(S), (is_map_key(id, S))).
%% (Erlang) messages that a connection process should forward to the
%% session handler.
-record(session_message, {
message :: term()
}).
-define(session_message(MSG), #session_message{message = MSG}).
-endif. -endif.

View File

@ -86,6 +86,5 @@
{'SOURCE_ERROR', <<"Source error">>}, {'SOURCE_ERROR', <<"Source error">>},
{'UPDATE_FAILED', <<"Update failed">>}, {'UPDATE_FAILED', <<"Update failed">>},
{'REST_FAILED', <<"Reset source or config failed">>}, {'REST_FAILED', <<"Reset source or config failed">>},
{'CLIENT_NOT_RESPONSE', <<"Client not responding">>}, {'CLIENT_NOT_RESPONSE', <<"Client not responding">>}
{'UNSUPPORTED_MEDIA_TYPE', <<"Unsupported media type">>}
]). ]).

View File

@ -30,10 +30,7 @@
logger:log( logger:log(
Level, Level,
(Data), (Data),
maps:merge(Meta, #{ Meta
mfa => {?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY},
line => ?LINE
})
); );
false -> false ->
ok ok
@ -91,7 +88,7 @@
?_DO_TRACE(Tag, Msg, Meta), ?_DO_TRACE(Tag, Msg, Meta),
?SLOG( ?SLOG(
Level, Level,
(Meta)#{msg => Msg, tag => Tag}, (emqx_trace_formatter:format_meta_map(Meta))#{msg => Msg, tag => Tag},
#{is_trace => false} #{is_trace => false}
) )
end). end).

View File

@ -25,16 +25,11 @@ all() ->
emqx_common_test_helpers:all(?MODULE). emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) -> init_per_suite(Config) ->
case emqx_ds_test_helpers:skip_if_norepl() of TCApps = emqx_cth_suite:start(
false -> app_specs(),
TCApps = emqx_cth_suite:start( #{work_dir => emqx_cth_suite:work_dir(Config)}
app_specs(), ),
#{work_dir => emqx_cth_suite:work_dir(Config)} [{tc_apps, TCApps} | Config].
),
[{tc_apps, TCApps} | Config];
Yes ->
Yes
end.
end_per_suite(Config) -> end_per_suite(Config) ->
TCApps = ?config(tc_apps, Config), TCApps = ?config(tc_apps, Config),
@ -163,7 +158,7 @@ mk_clientid(Prefix, ID) ->
restart_node(Node, NodeSpec) -> restart_node(Node, NodeSpec) ->
?tp(will_restart_node, #{}), ?tp(will_restart_node, #{}),
emqx_cth_cluster:restart(NodeSpec), emqx_cth_cluster:restart(Node, NodeSpec),
wait_nodeup(Node), wait_nodeup(Node),
?tp(restarted_node, #{}), ?tp(restarted_node, #{}),
ok. ok.
@ -258,7 +253,7 @@ t_session_subscription_idempotency(Config) ->
ok ok
end, end,
fun(_Trace) -> fun(Trace) ->
Session = session_open(Node1, ClientId), Session = session_open(Node1, ClientId),
?assertMatch( ?assertMatch(
#{SubTopicFilter := #{}}, #{SubTopicFilter := #{}},
@ -331,7 +326,7 @@ t_session_unsubscription_idempotency(Config) ->
ok ok
end, end,
fun(_Trace) -> fun(Trace) ->
Session = session_open(Node1, ClientId), Session = session_open(Node1, ClientId),
?assertEqual( ?assertEqual(
#{}, #{},

View File

@ -8,7 +8,7 @@ defmodule EMQX.MixProject do
app: :emqx, app: :emqx,
version: "0.1.0", version: "0.1.0",
build_path: "../../_build", build_path: "../../_build",
erlc_paths: erlc_paths(), erlc_paths: UMP.erlc_paths(),
erlc_options: [ erlc_options: [
{:i, "src"} {:i, "src"}
| UMP.erlc_options() | UMP.erlc_options()
@ -36,9 +36,8 @@ defmodule EMQX.MixProject do
def deps() do def deps() do
## FIXME!!! go though emqx.app.src and add missing stuff... ## FIXME!!! go though emqx.app.src and add missing stuff...
[ [
{:emqx_mix_utils, in_umbrella: true, runtime: false},
{:emqx_utils, in_umbrella: true}, {:emqx_utils, in_umbrella: true},
{:emqx_ds_backends, in_umbrella: true}, # {:emqx_ds_backends, in_umbrella: true},
UMP.common_dep(:gproc), UMP.common_dep(:gproc),
UMP.common_dep(:gen_rpc), UMP.common_dep(:gen_rpc),
@ -54,15 +53,6 @@ defmodule EMQX.MixProject do
] ++ UMP.quicer_dep() ] ++ UMP.quicer_dep()
end end
defp erlc_paths() do
paths = UMP.erlc_paths()
if UMP.test_env?() do
["integration_test" | paths]
else
paths
end
end
defp extra_dirs() do defp extra_dirs() do
dirs = ["src", "etc"] dirs = ["src", "etc"]
if UMP.test_env?() do if UMP.test_env?() do

View File

@ -10,14 +10,12 @@
{emqx_bridge,5}. {emqx_bridge,5}.
{emqx_bridge,6}. {emqx_bridge,6}.
{emqx_broker,1}. {emqx_broker,1}.
{emqx_cluster_link,1}.
{emqx_cm,1}. {emqx_cm,1}.
{emqx_cm,2}. {emqx_cm,2}.
{emqx_cm,3}. {emqx_cm,3}.
{emqx_conf,1}. {emqx_conf,1}.
{emqx_conf,2}. {emqx_conf,2}.
{emqx_conf,3}. {emqx_conf,3}.
{emqx_conf,4}.
{emqx_connector,1}. {emqx_connector,1}.
{emqx_dashboard,1}. {emqx_dashboard,1}.
{emqx_delayed,1}. {emqx_delayed,1}.
@ -27,7 +25,6 @@
{emqx_ds,2}. {emqx_ds,2}.
{emqx_ds,3}. {emqx_ds,3}.
{emqx_ds,4}. {emqx_ds,4}.
{emqx_ds_shared_sub,1}.
{emqx_eviction_agent,1}. {emqx_eviction_agent,1}.
{emqx_eviction_agent,2}. {emqx_eviction_agent,2}.
{emqx_eviction_agent,3}. {emqx_eviction_agent,3}.
@ -50,7 +47,6 @@
{emqx_mgmt_api_plugins,1}. {emqx_mgmt_api_plugins,1}.
{emqx_mgmt_api_plugins,2}. {emqx_mgmt_api_plugins,2}.
{emqx_mgmt_api_plugins,3}. {emqx_mgmt_api_plugins,3}.
{emqx_mgmt_api_relup,1}.
{emqx_mgmt_cluster,1}. {emqx_mgmt_cluster,1}.
{emqx_mgmt_cluster,2}. {emqx_mgmt_cluster,2}.
{emqx_mgmt_cluster,3}. {emqx_mgmt_cluster,3}.

View File

@ -24,18 +24,18 @@
{deps, [ {deps, [
{emqx_utils, {path, "../emqx_utils"}}, {emqx_utils, {path, "../emqx_utils"}},
{emqx_durable_storage, {path, "../emqx_durable_storage"}}, {emqx_durable_storage, {path, "../emqx_durable_storage"}},
{emqx_ds_backends, {path, "../emqx_ds_backends"}},
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}}, {lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}, {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.12.0"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.3"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.5"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.5"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.43.2"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.43.1"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.10"}}} {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.10"}}},
{ra, "2.7.3"}
]}. ]}.
{plugins, [{rebar3_proper, "0.12.1"}, rebar3_path_deps]}. {plugins, [{rebar3_proper, "0.12.1"}, rebar3_path_deps]}.

View File

@ -24,8 +24,7 @@ IsQuicSupp = fun() ->
end, end,
Bcrypt = {bcrypt, {git, "https://github.com/emqx/erlang-bcrypt.git", {tag, "0.6.0"}}}, Bcrypt = {bcrypt, {git, "https://github.com/emqx/erlang-bcrypt.git", {tag, "0.6.0"}}},
Quicer = Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.313"}}}.
{quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.500"}}}.
Dialyzer = fun(Config) -> Dialyzer = fun(Config) ->
{dialyzer, OldDialyzerConfig} = lists:keyfind(dialyzer, 1, Config), {dialyzer, OldDialyzerConfig} = lists:keyfind(dialyzer, 1, Config),

View File

@ -237,29 +237,27 @@ log_formatter(HandlerName, Conf) ->
_ -> _ ->
conf_get("formatter", Conf) conf_get("formatter", Conf)
end, end,
TsFormat = timestamp_format(Conf), TsFormat = timstamp_format(Conf),
WithMfa = conf_get("with_mfa", Conf),
PayloadEncode = conf_get("payload_encode", Conf, text), PayloadEncode = conf_get("payload_encode", Conf, text),
do_formatter( do_formatter(
Format, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa, PayloadEncode Format, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, PayloadEncode
). ).
%% auto | epoch | rfc3339 %% auto | epoch | rfc3339
timestamp_format(Conf) -> timstamp_format(Conf) ->
conf_get("timestamp_format", Conf). conf_get("timestamp_format", Conf).
%% helpers %% helpers
do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa, PayloadEncode) -> do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, PayloadEncode) ->
{emqx_logger_jsonfmt, #{ {emqx_logger_jsonfmt, #{
chars_limit => CharsLimit, chars_limit => CharsLimit,
single_line => SingleLine, single_line => SingleLine,
time_offset => TimeOffSet, time_offset => TimeOffSet,
depth => Depth, depth => Depth,
timestamp_format => TsFormat, timestamp_format => TsFormat,
with_mfa => WithMfa,
payload_encode => PayloadEncode payload_encode => PayloadEncode
}}; }};
do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa, PayloadEncode) -> do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, PayloadEncode) ->
{emqx_logger_textfmt, #{ {emqx_logger_textfmt, #{
template => ["[", level, "] ", msg, "\n"], template => ["[", level, "] ", msg, "\n"],
chars_limit => CharsLimit, chars_limit => CharsLimit,
@ -267,7 +265,6 @@ do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa,
time_offset => TimeOffSet, time_offset => TimeOffSet,
depth => Depth, depth => Depth,
timestamp_format => TsFormat, timestamp_format => TsFormat,
with_mfa => WithMfa,
payload_encode => PayloadEncode payload_encode => PayloadEncode
}}. }}.

View File

@ -2,7 +2,7 @@
{application, emqx, [ {application, emqx, [
{id, "emqx"}, {id, "emqx"},
{description, "EMQX Core"}, {description, "EMQX Core"},
{vsn, "5.3.4"}, {vsn, "5.3.3"},
{modules, []}, {modules, []},
{registered, []}, {registered, []},
{applications, [ {applications, [
@ -18,7 +18,7 @@
sasl, sasl,
lc, lc,
hocon, hocon,
emqx_ds_backends, emqx_durable_storage,
bcrypt, bcrypt,
pbkdf2, pbkdf2,
emqx_http_lib, emqx_http_lib,

View File

@ -33,7 +33,6 @@
-export([ -export([
check/1, check/1,
check_clientid/1,
create/1, create/1,
look_up/1, look_up/1,
delete/1, delete/1,
@ -118,10 +117,6 @@ check(ClientInfo) ->
do_check({peerhost, maps:get(peerhost, ClientInfo, undefined)}) orelse do_check({peerhost, maps:get(peerhost, ClientInfo, undefined)}) orelse
do_check_rules(ClientInfo). do_check_rules(ClientInfo).
-spec check_clientid(emqx_types:clientid()) -> boolean().
check_clientid(ClientId) ->
do_check({clientid, ClientId}) orelse do_check_rules(#{clientid => ClientId}).
-spec format(emqx_types:banned()) -> map(). -spec format(emqx_types:banned()) -> map().
format(#banned{ format(#banned{
who = Who0, who = Who0,

View File

@ -244,24 +244,11 @@ publish(Msg) when is_record(Msg, message) ->
topic => Topic topic => Topic
}), }),
[]; [];
Msg1 = #message{} -> Msg1 = #message{topic = Topic} ->
do_publish(Msg1); PersistRes = persist_publish(Msg1),
Msgs when is_list(Msgs) -> route(aggre(emqx_router:match_routes(Topic)), delivery(Msg1), PersistRes)
do_publish_many(Msgs)
end. end.
do_publish_many([]) ->
[];
do_publish_many([Msg | T]) ->
do_publish(Msg) ++ do_publish_many(T).
do_publish(#message{topic = Topic} = Msg) ->
PersistRes = persist_publish(Msg),
Routes = aggre(emqx_router:match_routes(Topic)),
Delivery = delivery(Msg),
RouteRes = route(Routes, Delivery, PersistRes),
do_forward_external(Delivery, RouteRes).
persist_publish(Msg) -> persist_publish(Msg) ->
case emqx_persistent_message:persist(Msg) of case emqx_persistent_message:persist(Msg) of
ok -> ok ->
@ -345,9 +332,6 @@ aggre([], false, Acc) ->
aggre([], true, Acc) -> aggre([], true, Acc) ->
lists:usort(Acc). lists:usort(Acc).
do_forward_external(Delivery, RouteRes) ->
emqx_external_broker:forward(Delivery) ++ RouteRes.
%% @doc Forward message to another node. %% @doc Forward message to another node.
-spec forward( -spec forward(
node(), emqx_types:topic() | emqx_types:share(), emqx_types:delivery(), RpcMode :: sync | async node(), emqx_types:topic() | emqx_types:share(), emqx_types:delivery(), RpcMode :: sync | async
@ -659,27 +643,19 @@ maybe_delete_route(Topic) ->
sync_route(Action, Topic, ReplyTo) -> sync_route(Action, Topic, ReplyTo) ->
EnabledOn = emqx_config:get([broker, routing, batch_sync, enable_on]), EnabledOn = emqx_config:get([broker, routing, batch_sync, enable_on]),
Res = case EnabledOn of
case EnabledOn of all ->
all -> push_sync_route(Action, Topic, ReplyTo);
push_sync_route(Action, Topic, ReplyTo); none ->
none -> regular_sync_route(Action, Topic);
regular_sync_route(Action, Topic); Role ->
Role -> case Role =:= mria_config:whoami() of
case Role =:= mria_config:whoami() of true ->
true -> push_sync_route(Action, Topic, ReplyTo);
push_sync_route(Action, Topic, ReplyTo); false ->
false -> regular_sync_route(Action, Topic)
regular_sync_route(Action, Topic) end
end end.
end,
_ = external_sync_route(Action, Topic),
Res.
external_sync_route(add, Topic) ->
emqx_external_broker:add_route(Topic);
external_sync_route(delete, Topic) ->
emqx_external_broker:delete_route(Topic).
push_sync_route(Action, Topic, Opts) -> push_sync_route(Action, Topic, Opts) ->
emqx_router_syncer:push(Action, Topic, node(), Opts). emqx_router_syncer:push(Action, Topic, node(), Opts).

View File

@ -47,7 +47,7 @@ init([]) ->
router_syncer_pool, router_syncer_pool,
hash, hash,
PoolSize, PoolSize,
{emqx_router_syncer, start_link_pooled, []} {emqx_router_syncer, start_link, []}
]), ]),
%% Shared subscription %% Shared subscription

View File

@ -19,7 +19,6 @@
-include("emqx.hrl"). -include("emqx.hrl").
-include("emqx_channel.hrl"). -include("emqx_channel.hrl").
-include("emqx_session.hrl").
-include("emqx_mqtt.hrl"). -include("emqx_mqtt.hrl").
-include("emqx_access_control.hrl"). -include("emqx_access_control.hrl").
-include("logger.hrl"). -include("logger.hrl").
@ -146,9 +145,7 @@
-type replies() :: emqx_types:packet() | reply() | [reply()]. -type replies() :: emqx_types:packet() | reply() | [reply()].
-define(IS_MQTT_V5, #channel{conninfo = #{proto_ver := ?MQTT_PROTO_V5}}). -define(IS_MQTT_V5, #channel{conninfo = #{proto_ver := ?MQTT_PROTO_V5}}).
-define(IS_CONNECTED_OR_REAUTHENTICATING(ConnState),
((ConnState == connected) orelse (ConnState == reauthenticating))
).
-define(IS_COMMON_SESSION_TIMER(N), -define(IS_COMMON_SESSION_TIMER(N),
((N == retry_delivery) orelse (N == expire_awaiting_rel)) ((N == retry_delivery) orelse (N == expire_awaiting_rel))
). ).
@ -237,7 +234,7 @@ caps(#channel{clientinfo = #{zone := Zone}}) ->
-spec init(emqx_types:conninfo(), opts()) -> channel(). -spec init(emqx_types:conninfo(), opts()) -> channel().
init( init(
ConnInfo = #{ ConnInfo = #{
peername := {PeerHost, PeerPort} = PeerName, peername := {PeerHost, PeerPort},
sockname := {_Host, SockPort} sockname := {_Host, SockPort}
}, },
#{ #{
@ -261,9 +258,6 @@ init(
listener => ListenerId, listener => ListenerId,
protocol => Protocol, protocol => Protocol,
peerhost => PeerHost, peerhost => PeerHost,
%% We copy peername to clientinfo because some event contexts only have access
%% to client info (e.g.: authn/authz).
peername => PeerName,
peerport => PeerPort, peerport => PeerPort,
sockport => SockPort, sockport => SockPort,
clientid => undefined, clientid => undefined,
@ -339,7 +333,7 @@ take_conn_info_fields(Fields, ClientInfo, ConnInfo) ->
| {shutdown, Reason :: term(), channel()} | {shutdown, Reason :: term(), channel()}
| {shutdown, Reason :: term(), replies(), channel()}. | {shutdown, Reason :: term(), replies(), channel()}.
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = ConnState}) when handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = ConnState}) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState) ConnState =:= connected orelse ConnState =:= reauthenticating
-> ->
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel); handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel);
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = connecting}) -> handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = connecting}) ->
@ -569,8 +563,29 @@ handle_in(
process_disconnect(ReasonCode, Properties, NChannel); process_disconnect(ReasonCode, Properties, NChannel);
handle_in(?AUTH_PACKET(), Channel) -> handle_in(?AUTH_PACKET(), Channel) ->
handle_out(disconnect, ?RC_IMPLEMENTATION_SPECIFIC_ERROR, Channel); handle_out(disconnect, ?RC_IMPLEMENTATION_SPECIFIC_ERROR, Channel);
handle_in({frame_error, Reason}, Channel) -> handle_in({frame_error, Reason}, Channel = #channel{conn_state = idle}) ->
handle_frame_error(Reason, Channel); shutdown(shutdown_count(frame_error, Reason), Channel);
handle_in(
{frame_error, #{cause := frame_too_large} = R}, Channel = #channel{conn_state = connecting}
) ->
shutdown(
shutdown_count(frame_error, R), ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), Channel
);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = connecting}) ->
shutdown(shutdown_count(frame_error, Reason), ?CONNACK_PACKET(?RC_MALFORMED_PACKET), Channel);
handle_in(
{frame_error, #{cause := frame_too_large}}, Channel = #channel{conn_state = ConnState}
) when
ConnState =:= connected orelse ConnState =:= reauthenticating
->
handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = ConnState}) when
ConnState =:= connected orelse ConnState =:= reauthenticating
->
handle_out(disconnect, {?RC_MALFORMED_PACKET, Reason}, Channel);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = disconnected}) ->
?SLOG(error, #{msg => "malformed_mqtt_message", reason => Reason}),
{ok, Channel};
handle_in(Packet, Channel) -> handle_in(Packet, Channel) ->
?SLOG(error, #{msg => "disconnecting_due_to_unexpected_message", packet => Packet}), ?SLOG(error, #{msg => "disconnecting_due_to_unexpected_message", packet => Packet}),
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel). handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel).
@ -1002,68 +1017,6 @@ not_nacked({deliver, _Topic, Msg}) ->
true true
end. end.
%%--------------------------------------------------------------------
%% Handle Frame Error
%%--------------------------------------------------------------------
handle_frame_error(
Reason = #{cause := frame_too_large},
Channel = #channel{conn_state = ConnState, conninfo = ConnInfo}
) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
ShutdownCount = shutdown_count(frame_error, Reason),
case proto_ver(Reason, ConnInfo) of
?MQTT_PROTO_V5 ->
handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel);
_ ->
shutdown(ShutdownCount, Channel)
end;
%% Only send CONNACK with reason code `frame_too_large` for MQTT-v5.0 when connecting,
%% otherwise DONOT send any CONNACK or DISCONNECT packet.
handle_frame_error(
Reason,
Channel = #channel{conn_state = ConnState, conninfo = ConnInfo}
) when
is_map(Reason) andalso
(ConnState == idle orelse ConnState == connecting)
->
ShutdownCount = shutdown_count(frame_error, Reason),
ProtoVer = proto_ver(Reason, ConnInfo),
NChannel = Channel#channel{conninfo = ConnInfo#{proto_ver => ProtoVer}},
case ProtoVer of
?MQTT_PROTO_V5 ->
shutdown(ShutdownCount, ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), NChannel);
_ ->
shutdown(ShutdownCount, NChannel)
end;
handle_frame_error(
Reason,
Channel = #channel{conn_state = connecting}
) ->
shutdown(
shutdown_count(frame_error, Reason),
?CONNACK_PACKET(?RC_MALFORMED_PACKET),
Channel
);
handle_frame_error(
Reason,
Channel = #channel{conn_state = ConnState}
) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
handle_out(
disconnect,
{?RC_MALFORMED_PACKET, Reason},
Channel
);
handle_frame_error(
Reason,
Channel = #channel{conn_state = disconnected}
) ->
?SLOG(error, #{msg => "malformed_mqtt_message", reason => Reason}),
{ok, Channel}.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Handle outgoing packet %% Handle outgoing packet
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -1332,7 +1285,7 @@ handle_info(
session = Session session = Session
} }
) when ) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState) ConnState =:= connected orelse ConnState =:= reauthenticating
-> ->
{Intent, Session1} = session_disconnect(ClientInfo, ConnInfo, Session), {Intent, Session1} = session_disconnect(ClientInfo, ConnInfo, Session),
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(sock_closed, Channel)), Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(sock_closed, Channel)),
@ -1362,9 +1315,6 @@ handle_info({'DOWN', Ref, process, Pid, Reason}, Channel) ->
[] -> {ok, Channel}; [] -> {ok, Channel};
Msgs -> {ok, Msgs, Channel} Msgs -> {ok, Msgs, Channel}
end; end;
handle_info(?session_message(Message), #channel{session = Session} = Channel) ->
NSession = emqx_session:handle_info(Message, Session),
{ok, Channel#channel{session = NSession}};
handle_info(Info, Channel) -> handle_info(Info, Channel) ->
?SLOG(error, #{msg => "unexpected_info", info => Info}), ?SLOG(error, #{msg => "unexpected_info", info => Info}),
{ok, Channel}. {ok, Channel}.
@ -1799,7 +1749,7 @@ maybe_add_cert(Map, #channel{conninfo = ConnInfo}) ->
maybe_add_cert(Map, ConnInfo); maybe_add_cert(Map, ConnInfo);
maybe_add_cert(Map, #{peercert := PeerCert}) when is_binary(PeerCert) -> maybe_add_cert(Map, #{peercert := PeerCert}) when is_binary(PeerCert) ->
%% NOTE: it's raw binary at this point, %% NOTE: it's raw binary at this point,
%% encoding to PEM (base64) is done lazy in emqx_auth_utils:render_var %% encoding to PEM (base64) is done lazy in emqx_authn_utils:render_var
Map#{cert_pem => PeerCert}; Map#{cert_pem => PeerCert};
maybe_add_cert(Map, _) -> maybe_add_cert(Map, _) ->
Map. Map.
@ -2679,7 +2629,8 @@ save_alias(outbound, AliasId, Topic, TopicAliases = #{outbound := Aliases}) ->
NAliases = maps:put(Topic, AliasId, Aliases), NAliases = maps:put(Topic, AliasId, Aliases),
TopicAliases#{outbound => NAliases}. TopicAliases#{outbound => NAliases}.
-compile({inline, [reply/2, shutdown/2, shutdown/3]}). -compile({inline, [reply/2, shutdown/2, shutdown/3, sp/1, flag/1]}).
reply(Reply, Channel) -> reply(Reply, Channel) ->
{reply, Reply, Channel}. {reply, Reply, Channel}.
@ -2715,13 +2666,13 @@ disconnect_and_shutdown(
?IS_MQTT_V5 = ?IS_MQTT_V5 =
#channel{conn_state = ConnState} #channel{conn_state = ConnState}
) when ) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState) ConnState =:= connected orelse ConnState =:= reauthenticating
-> ->
NChannel = ensure_disconnected(Reason, Channel), NChannel = ensure_disconnected(Reason, Channel),
shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), NChannel); shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), NChannel);
%% mqtt v3/v4 connected sessions %% mqtt v3/v4 connected sessions
disconnect_and_shutdown(Reason, Reply, Channel = #channel{conn_state = ConnState}) when disconnect_and_shutdown(Reason, Reply, Channel = #channel{conn_state = ConnState}) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState) ConnState =:= connected orelse ConnState =:= reauthenticating
-> ->
NChannel = ensure_disconnected(Reason, Channel), NChannel = ensure_disconnected(Reason, Channel),
shutdown(Reason, Reply, NChannel); shutdown(Reason, Reply, NChannel);
@ -2764,13 +2715,6 @@ is_durable_session(#channel{session = Session}) ->
false false
end. end.
proto_ver(#{proto_ver := ProtoVer}, _ConnInfo) ->
ProtoVer;
proto_ver(_Reason, #{proto_ver := ProtoVer}) ->
ProtoVer;
proto_ver(_, _) ->
?MQTT_PROTO_V4.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% For CT tests %% For CT tests
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -499,14 +499,15 @@ fill_defaults(RawConf, Opts) ->
). ).
-spec fill_defaults(module(), raw_config(), hocon_tconf:opts()) -> map(). -spec fill_defaults(module(), raw_config(), hocon_tconf:opts()) -> map().
fill_defaults(SchemaMod, RawConf = #{<<"durable_storage">> := Ds}, Opts) -> fill_defaults(_SchemaMod, RawConf = #{<<"durable_storage">> := _}, _) ->
%% FIXME: kludge to prevent `emqx_config' module from filling in %% FIXME: kludge to prevent `emqx_config' module from filling in
%% the default values for backends and layouts. These records are %% the default values for backends and layouts. These records are
%% inside unions, and adding default values there will add %% inside unions, and adding default values there will add
%% incompatible fields. %% incompatible fields.
RawConf1 = maps:remove(<<"durable_storage">>, RawConf), %%
Conf = fill_defaults(SchemaMod, RawConf1, Opts), %% Note: this function is called for each individual conf root, so
Conf#{<<"durable_storage">> => Ds}; %% this clause only affects this particular subtree.
RawConf;
fill_defaults(SchemaMod, RawConf, Opts0) -> fill_defaults(SchemaMod, RawConf, Opts0) ->
Opts = maps:merge(#{required => false, make_serializable => true}, Opts0), Opts = maps:merge(#{required => false, make_serializable => true}, Opts0),
hocon_tconf:check_plain( hocon_tconf:check_plain(

View File

@ -173,9 +173,7 @@
system_code_change/4 system_code_change/4
]} ]}
). ).
-dialyzer({no_missing_calls, [handle_msg/2]}).
-ifndef(BUILD_WITHOUT_QUIC).
-spec start_link -spec start_link
(esockd:transport(), esockd:socket(), emqx_channel:opts()) -> (esockd:transport(), esockd:socket(), emqx_channel:opts()) ->
{ok, pid()}; {ok, pid()};
@ -185,9 +183,6 @@
emqx_quic_connection:cb_state() emqx_quic_connection:cb_state()
) -> ) ->
{ok, pid()}. {ok, pid()}.
-else.
-spec start_link(esockd:transport(), esockd:socket(), emqx_channel:opts()) -> {ok, pid()}.
-endif.
start_link(Transport, Socket, Options) -> start_link(Transport, Socket, Options) ->
Args = [self(), Transport, Socket, Options], Args = [self(), Transport, Socket, Options],
@ -328,7 +323,7 @@ init_state(
max_size => emqx_config:get_zone_conf(Zone, [mqtt, max_packet_size]) max_size => emqx_config:get_zone_conf(Zone, [mqtt, max_packet_size])
}, },
ParseState = emqx_frame:initial_parse_state(FrameOpts), ParseState = emqx_frame:initial_parse_state(FrameOpts),
Serialize = emqx_frame:initial_serialize_opts(FrameOpts), Serialize = emqx_frame:serialize_opts(),
%% Init Channel %% Init Channel
Channel = emqx_channel:init(ConnInfo, Opts), Channel = emqx_channel:init(ConnInfo, Opts),
GcState = GcState =
@ -473,17 +468,19 @@ cancel_stats_timer(State) ->
process_msg([], State) -> process_msg([], State) ->
{ok, State}; {ok, State};
process_msg([Msg | More], State) -> process_msg([Msg | More], State) ->
try handle_msg(Msg, State) of try
ok -> case handle_msg(Msg, State) of
process_msg(More, State); ok ->
{ok, NState} -> process_msg(More, State);
process_msg(More, NState); {ok, NState} ->
{ok, Msgs, NState} -> process_msg(More, NState);
process_msg(append_msg(More, Msgs), NState); {ok, Msgs, NState} ->
{stop, Reason, NState} -> process_msg(append_msg(More, Msgs), NState);
{stop, Reason, NState}; {stop, Reason, NState} ->
{stop, Reason} -> {stop, Reason, NState};
{stop, Reason, State} {stop, Reason} ->
{stop, Reason, State}
end
catch catch
exit:normal -> exit:normal ->
{stop, normal, State}; {stop, normal, State};
@ -569,10 +566,12 @@ handle_msg({Closed, _Sock}, State) when
handle_msg({Passive, _Sock}, State) when handle_msg({Passive, _Sock}, State) when
Passive == tcp_passive; Passive == ssl_passive; Passive =:= quic_passive Passive == tcp_passive; Passive == ssl_passive; Passive =:= quic_passive
-> ->
%% In Stats
Pubs = emqx_pd:reset_counter(incoming_pubs), Pubs = emqx_pd:reset_counter(incoming_pubs),
Bytes = emqx_pd:reset_counter(incoming_bytes), Bytes = emqx_pd:reset_counter(incoming_bytes),
InStats = #{cnt => Pubs, oct => Bytes},
%% Run GC and Check OOM %% Run GC and Check OOM
NState1 = check_oom(Pubs, Bytes, run_gc(Pubs, Bytes, State)), NState1 = check_oom(run_gc(InStats, State)),
handle_info(activate_socket, NState1); handle_info(activate_socket, NState1);
handle_msg( handle_msg(
Deliver = {deliver, _Topic, _Msg}, Deliver = {deliver, _Topic, _Msg},
@ -783,8 +782,7 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
input_bytes => Data, input_bytes => Data,
parsed_packets => Packets parsed_packets => Packets
}), }),
NState = enrich_state(Reason, State), {[{frame_error, Reason} | Packets], State};
{[{frame_error, Reason} | Packets], NState};
error:Reason:Stacktrace -> error:Reason:Stacktrace ->
?LOG(error, #{ ?LOG(error, #{
at_state => emqx_frame:describe_state(ParseState), at_state => emqx_frame:describe_state(ParseState),
@ -901,7 +899,8 @@ sent(#state{listener = {Type, Listener}} = State) ->
true -> true ->
Pubs = emqx_pd:reset_counter(outgoing_pubs), Pubs = emqx_pd:reset_counter(outgoing_pubs),
Bytes = emqx_pd:reset_counter(outgoing_bytes), Bytes = emqx_pd:reset_counter(outgoing_bytes),
{ok, check_oom(Pubs, Bytes, run_gc(Pubs, Bytes, State))}; OutStats = #{cnt => Pubs, oct => Bytes},
{ok, check_oom(run_gc(OutStats, State))};
false -> false ->
{ok, State} {ok, State}
end. end.
@ -994,23 +993,17 @@ check_limiter(
Data, Data,
WhenOk, WhenOk,
Msgs, Msgs,
#state{channel = Channel, limiter_timer = undefined, limiter = Limiter} = State #state{limiter_timer = undefined, limiter = Limiter} = State
) -> ) ->
case emqx_limiter_container:check_list(Needs, Limiter) of case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} -> {ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2}); WhenOk(Data, Msgs, State#state{limiter = Limiter2});
{pause, Time, Limiter2} -> {pause, Time, Limiter2} ->
?SLOG_THROTTLE( ?SLOG(debug, #{
warning, msg => "pause_time_due_to_rate_limit",
#{ needs => Needs,
msg => socket_receive_paused_by_rate_limit, time_in_ms => Time
paused_ms => Time }),
},
#{
tag => "RATE",
clientid => emqx_channel:info(clientid, Channel)
}
),
Retry = #retry{ Retry = #retry{
types = [Type || {_, Type} <- Needs], types = [Type || {_, Type} <- Needs],
@ -1044,7 +1037,7 @@ check_limiter(
%% try to perform a retry %% try to perform a retry
-spec retry_limiter(state()) -> _. -spec retry_limiter(state()) -> _.
retry_limiter(#state{channel = Channel, limiter = Limiter} = State) -> retry_limiter(#state{limiter = Limiter} = State) ->
#retry{types = Types, data = Data, next = Next} = #retry{types = Types, data = Data, next = Next} =
emqx_limiter_container:get_retry_context(Limiter), emqx_limiter_container:get_retry_context(Limiter),
case emqx_limiter_container:retry_list(Types, Limiter) of case emqx_limiter_container:retry_list(Types, Limiter) of
@ -1058,17 +1051,11 @@ retry_limiter(#state{channel = Channel, limiter = Limiter} = State) ->
} }
); );
{pause, Time, Limiter2} -> {pause, Time, Limiter2} ->
?SLOG_THROTTLE( ?SLOG(debug, #{
warning, msg => "pause_time_due_to_rate_limit",
#{ types => Types,
msg => socket_receive_paused_by_rate_limit, time_in_ms => Time
paused_ms => Time }),
},
#{
tag => "RATE",
clientid => emqx_channel:info(clientid, Channel)
}
),
TRef = start_timer(Time, limit_timeout), TRef = start_timer(Time, limit_timeout),
@ -1081,36 +1068,25 @@ retry_limiter(#state{channel = Channel, limiter = Limiter} = State) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Run GC and Check OOM %% Run GC and Check OOM
run_gc(Pubs, Bytes, State = #state{gc_state = GcSt, zone = Zone}) -> run_gc(Stats, State = #state{gc_state = GcSt, zone = Zone}) ->
case case
?ENABLED(GcSt) andalso not emqx_olp:backoff_gc(Zone) andalso ?ENABLED(GcSt) andalso not emqx_olp:backoff_gc(Zone) andalso
emqx_gc:run(Pubs, Bytes, GcSt) emqx_gc:run(Stats, GcSt)
of of
false -> State; false -> State;
{_IsGC, GcSt1} -> State#state{gc_state = GcSt1} {_IsGC, GcSt1} -> State#state{gc_state = GcSt1}
end. end.
check_oom(Pubs, Bytes, State = #state{channel = Channel}) -> check_oom(State = #state{channel = Channel}) ->
ShutdownPolicy = emqx_config:get_zone_conf( ShutdownPolicy = emqx_config:get_zone_conf(
emqx_channel:info(zone, Channel), [force_shutdown] emqx_channel:info(zone, Channel), [force_shutdown]
), ),
?tp(debug, check_oom, #{policy => ShutdownPolicy}),
case emqx_utils:check_oom(ShutdownPolicy) of case emqx_utils:check_oom(ShutdownPolicy) of
{shutdown, Reason} -> {shutdown, Reason} ->
%% triggers terminate/2 callback immediately %% triggers terminate/2 callback immediately
?tp(warning, check_oom_shutdown, #{
policy => ShutdownPolicy,
incoming_pubs => Pubs,
incoming_bytes => Bytes,
shutdown => Reason
}),
erlang:exit({shutdown, Reason}); erlang:exit({shutdown, Reason});
Result -> _ ->
?tp(debug, check_oom_ok, #{
policy => ShutdownPolicy,
incoming_pubs => Pubs,
incoming_bytes => Bytes,
result => Result
}),
ok ok
end, end,
State. State.
@ -1228,12 +1204,6 @@ inc_counter(Key, Inc) ->
_ = emqx_pd:inc_counter(Key, Inc), _ = emqx_pd:inc_counter(Key, Inc),
ok. ok.
enrich_state(#{parse_state := NParseState}, State) ->
Serialize = emqx_frame:serialize_opts(NParseState),
State#state{parse_state = NParseState, serialize = Serialize};
enrich_state(_, State) ->
State.
set_tcp_keepalive({quic, _Listener}) -> set_tcp_keepalive({quic, _Listener}) ->
ok; ok;
set_tcp_keepalive({Type, Id}) -> set_tcp_keepalive({Type, Id}) ->

View File

@ -18,7 +18,7 @@
-module(emqx_ds_schema). -module(emqx_ds_schema).
%% API: %% API:
-export([schema/0, translate_builtin_raft/1, translate_builtin_local/1]). -export([schema/0, translate_builtin/1]).
%% Behavior callbacks: %% Behavior callbacks:
-export([fields/1, desc/1, namespace/0]). -export([fields/1, desc/1, namespace/0]).
@ -32,51 +32,42 @@
%% Type declarations %% Type declarations
%%================================================================================ %%================================================================================
-ifndef(EMQX_RELEASE_EDITION).
-define(EMQX_RELEASE_EDITION, ce).
-endif.
-if(?EMQX_RELEASE_EDITION == ee).
-define(DEFAULT_BACKEND, builtin_raft).
-define(BUILTIN_BACKENDS, [ref(builtin_raft), ref(builtin_local)]).
-else.
-define(DEFAULT_BACKEND, builtin_local).
-define(BUILTIN_BACKENDS, [ref(builtin_local)]).
-endif.
%%================================================================================ %%================================================================================
%% API %% API
%%================================================================================ %%================================================================================
translate_builtin_raft( translate_builtin(
Backend = #{ Backend = #{
backend := builtin_raft, backend := builtin,
n_shards := NShards, n_shards := NShards,
n_sites := NSites, n_sites := NSites,
replication_factor := ReplFactor, replication_factor := ReplFactor,
layout := Layout layout := Layout
} }
) -> ) ->
Storage =
case Layout of
#{
type := wildcard_optimized,
bits_per_topic_level := BitsPerTopicLevel,
epoch_bits := EpochBits,
topic_index_bytes := TIBytes
} ->
{emqx_ds_storage_bitfield_lts, #{
bits_per_topic_level => BitsPerTopicLevel,
topic_index_bytes => TIBytes,
epoch_bits => EpochBits
}};
#{type := reference} ->
{emqx_ds_storage_reference, #{}}
end,
#{ #{
backend => builtin_raft, backend => builtin,
n_shards => NShards, n_shards => NShards,
n_sites => NSites, n_sites => NSites,
replication_factor => ReplFactor, replication_factor => ReplFactor,
replication_options => maps:get(replication_options, Backend, #{}), replication_options => maps:get(replication_options, Backend, #{}),
storage => translate_layout(Layout) storage => Storage
}.
translate_builtin_local(
#{
backend := builtin_local,
n_shards := NShards,
layout := Layout
}
) ->
#{
backend => builtin_local,
n_shards => NShards,
storage => translate_layout(Layout)
}. }.
%%================================================================================ %%================================================================================
@ -92,24 +83,24 @@ schema() ->
ds_schema(#{ ds_schema(#{
default => default =>
#{ #{
<<"backend">> => ?DEFAULT_BACKEND <<"backend">> => builtin
}, },
importance => ?IMPORTANCE_MEDIUM, importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(messages) desc => ?DESC(messages)
})} })}
]. ].
fields(builtin_local) -> fields(builtin) ->
%% Schema for the builtin_raft backend: %% Schema for the builtin backend:
[ [
{backend, {backend,
sc( sc(
builtin_local, builtin,
#{ #{
'readOnly' => true, 'readOnly' => true,
default => builtin_local, default => builtin,
importance => ?IMPORTANCE_MEDIUM, importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(backend_type) desc => ?DESC(builtin_backend)
} }
)}, )},
{'_config_handler', {'_config_handler',
@ -117,32 +108,27 @@ fields(builtin_local) ->
{module(), atom()}, {module(), atom()},
#{ #{
'readOnly' => true, 'readOnly' => true,
default => {?MODULE, translate_builtin_local}, default => {?MODULE, translate_builtin},
importance => ?IMPORTANCE_HIDDEN importance => ?IMPORTANCE_HIDDEN
} }
)}
| common_builtin_fields()
];
fields(builtin_raft) ->
%% Schema for the builtin_raft backend:
[
{backend,
sc(
builtin_raft,
#{
'readOnly' => true,
default => builtin_raft,
importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(backend_type)
}
)}, )},
{'_config_handler', {data_dir,
sc( sc(
{module(), atom()}, string(),
#{ #{
'readOnly' => true, mapping => "emqx_durable_storage.db_data_dir",
default => {?MODULE, translate_builtin_raft}, required => false,
importance => ?IMPORTANCE_HIDDEN importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(builtin_data_dir)
}
)},
{n_shards,
sc(
pos_integer(),
#{
default => 12,
importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(builtin_n_shards)
} }
)}, )},
%% TODO: Deprecate once cluster management and rebalancing is implemented. %% TODO: Deprecate once cluster management and rebalancing is implemented.
@ -171,10 +157,29 @@ fields(builtin_raft) ->
default => #{}, default => #{},
importance => ?IMPORTANCE_HIDDEN importance => ?IMPORTANCE_HIDDEN
} }
)},
{local_write_buffer,
sc(
ref(builtin_local_write_buffer),
#{
importance => ?IMPORTANCE_HIDDEN,
desc => ?DESC(builtin_local_write_buffer)
}
)},
{layout,
sc(
hoconsc:union(builtin_layouts()),
#{
desc => ?DESC(builtin_layout),
importance => ?IMPORTANCE_MEDIUM,
default =>
#{
<<"type">> => wildcard_optimized
}
}
)} )}
| common_builtin_fields()
]; ];
fields(builtin_write_buffer) -> fields(builtin_local_write_buffer) ->
[ [
{max_items, {max_items,
sc( sc(
@ -183,7 +188,7 @@ fields(builtin_write_buffer) ->
default => 1000, default => 1000,
mapping => "emqx_durable_storage.egress_batch_size", mapping => "emqx_durable_storage.egress_batch_size",
importance => ?IMPORTANCE_HIDDEN, importance => ?IMPORTANCE_HIDDEN,
desc => ?DESC(builtin_write_buffer_max_items) desc => ?DESC(builtin_local_write_buffer_max_items)
} }
)}, )},
{flush_interval, {flush_interval,
@ -193,7 +198,7 @@ fields(builtin_write_buffer) ->
default => 100, default => 100,
mapping => "emqx_durable_storage.egress_flush_interval", mapping => "emqx_durable_storage.egress_flush_interval",
importance => ?IMPORTANCE_HIDDEN, importance => ?IMPORTANCE_HIDDEN,
desc => ?DESC(builtin_write_buffer_flush_interval) desc => ?DESC(builtin_local_write_buffer_flush_interval)
} }
)} )}
]; ];
@ -234,42 +239,6 @@ fields(layout_builtin_wildcard_optimized) ->
} }
)} )}
]; ];
fields(layout_builtin_wildcard_optimized_v2) ->
[
{type,
sc(
wildcard_optimized_v2,
#{
'readOnly' => true,
default => wildcard_optimized_v2,
desc => ?DESC(layout_builtin_wildcard_optimized_type)
}
)},
{bytes_per_topic_level,
sc(
range(1, 16),
#{
default => 8,
importance => ?IMPORTANCE_HIDDEN
}
)},
{topic_index_bytes,
sc(
pos_integer(),
#{
default => 8,
importance => ?IMPORTANCE_HIDDEN
}
)},
{serialization_schema,
sc(
emqx_ds_msg_serializer:schema(),
#{
default => v1,
importance => ?IMPORTANCE_HIDDEN
}
)}
];
fields(layout_builtin_reference) -> fields(layout_builtin_reference) ->
[ [
{type, {type,
@ -278,65 +247,17 @@ fields(layout_builtin_reference) ->
#{ #{
'readOnly' => true, 'readOnly' => true,
importance => ?IMPORTANCE_LOW, importance => ?IMPORTANCE_LOW,
default => reference,
desc => ?DESC(layout_builtin_reference_type) desc => ?DESC(layout_builtin_reference_type)
} }
)} )}
]. ].
common_builtin_fields() -> desc(builtin) ->
[ ?DESC(builtin);
{data_dir, desc(builtin_local_write_buffer) ->
sc( ?DESC(builtin_local_write_buffer);
string(),
#{
mapping => "emqx_durable_storage.db_data_dir",
required => false,
importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(builtin_data_dir)
}
)},
{n_shards,
sc(
pos_integer(),
#{
default => 16,
importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(builtin_n_shards)
}
)},
{local_write_buffer,
sc(
ref(builtin_write_buffer),
#{
importance => ?IMPORTANCE_HIDDEN,
desc => ?DESC(builtin_write_buffer)
}
)},
{layout,
sc(
hoconsc:union(builtin_layouts()),
#{
desc => ?DESC(builtin_layout),
importance => ?IMPORTANCE_MEDIUM,
default =>
#{
<<"type">> => wildcard_optimized_v2
}
}
)}
].
desc(builtin_raft) ->
?DESC(builtin_raft);
desc(builtin_local) ->
?DESC(builtin_local);
desc(builtin_write_buffer) ->
?DESC(builtin_write_buffer);
desc(layout_builtin_wildcard_optimized) -> desc(layout_builtin_wildcard_optimized) ->
?DESC(layout_builtin_wildcard_optimized); ?DESC(layout_builtin_wildcard_optimized);
desc(layout_builtin_wildcard_optimized_v2) ->
?DESC(layout_builtin_wildcard_optimized);
desc(layout_builtin_reference) -> desc(layout_builtin_reference) ->
?DESC(layout_builtin_reference); ?DESC(layout_builtin_reference);
desc(_) -> desc(_) ->
@ -346,40 +267,12 @@ desc(_) ->
%% Internal functions %% Internal functions
%%================================================================================ %%================================================================================
translate_layout(
#{
type := wildcard_optimized_v2,
bytes_per_topic_level := BytesPerTopicLevel,
topic_index_bytes := TopicIndexBytes,
serialization_schema := SSchema
}
) ->
{emqx_ds_storage_skipstream_lts, #{
wildcard_hash_bytes => BytesPerTopicLevel,
topic_index_bytes => TopicIndexBytes,
serialization_schema => SSchema
}};
translate_layout(
#{
type := wildcard_optimized,
bits_per_topic_level := BitsPerTopicLevel,
epoch_bits := EpochBits,
topic_index_bytes := TIBytes
}
) ->
{emqx_ds_storage_bitfield_lts, #{
bits_per_topic_level => BitsPerTopicLevel,
topic_index_bytes => TIBytes,
epoch_bits => EpochBits
}};
translate_layout(#{type := reference}) ->
{emqx_ds_storage_reference, #{}}.
ds_schema(Options) -> ds_schema(Options) ->
sc( sc(
hoconsc:union( hoconsc:union([
?BUILTIN_BACKENDS ++ emqx_schema_hooks:injection_point('durable_storage.backends', []) ref(builtin)
), | emqx_schema_hooks:injection_point('durable_storage.backends', [])
]),
Options Options
). ).
@ -388,11 +281,7 @@ builtin_layouts() ->
%% suitable for production use. However, it's very simple and %% suitable for production use. However, it's very simple and
%% produces a very predictabale replay order, which can be useful %% produces a very predictabale replay order, which can be useful
%% for testing and debugging: %% for testing and debugging:
[ [ref(layout_builtin_wildcard_optimized), ref(layout_builtin_reference)].
ref(layout_builtin_wildcard_optimized_v2),
ref(layout_builtin_wildcard_optimized),
ref(layout_builtin_reference)
].
sc(Type, Meta) -> hoconsc:mk(Type, Meta). sc(Type, Meta) -> hoconsc:mk(Type, Meta).

View File

@ -1,148 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_external_broker).
-callback forward(emqx_types:delivery()) ->
emqx_types:publish_result().
-callback add_route(emqx_types:topic()) -> ok.
-callback delete_route(emqx_types:topic()) -> ok.
-callback add_shared_route(emqx_types:topic(), emqx_types:group()) -> ok.
-callback delete_shared_route(emqx_types:topic(), emqx_types:group()) -> ok.
-callback add_persistent_route(emqx_types:topic(), emqx_persistent_session_ds:id()) -> ok.
-callback delete_persistent_route(emqx_types:topic(), emqx_persistent_session_ds:id()) -> ok.
-type dest() :: term().
-export([
%% Registration
provider/0,
register_provider/1,
unregister_provider/1,
%% Forwarding
forward/1,
%% Routing updates
add_route/1,
delete_route/1,
add_shared_route/2,
delete_shared_route/2,
add_persistent_route/2,
delete_persistent_route/2,
add_persistent_shared_route/3,
delete_persistent_shared_route/3
]).
-export_type([dest/0]).
-include("logger.hrl").
-define(PROVIDER, {?MODULE, external_broker}).
-define(safe_with_provider(IfRegistered, IfNotRegistered),
case persistent_term:get(?PROVIDER, undefined) of
undefined ->
IfNotRegistered;
Provider ->
try
Provider:IfRegistered
catch
Err:Reason:St ->
?SLOG_THROTTLE(error, #{
msg => external_broker_crashed,
provider => Provider,
callback => ?FUNCTION_NAME,
stacktrace => St,
error => Err,
reason => Reason
}),
{error, Reason}
end
end
).
%% TODO: provider API copied from emqx_external_traces,
%% but it can be moved to a common module.
%%--------------------------------------------------------------------
%% Provider API
%%--------------------------------------------------------------------
-spec register_provider(module()) -> ok | {error, term()}.
register_provider(Module) when is_atom(Module) ->
case is_valid_provider(Module) of
true ->
persistent_term:put(?PROVIDER, Module);
false ->
{error, invalid_provider}
end.
-spec unregister_provider(module()) -> ok | {error, term()}.
unregister_provider(Module) ->
case persistent_term:get(?PROVIDER, undefined) of
Module ->
persistent_term:erase(?PROVIDER),
ok;
_ ->
{error, not_registered}
end.
-spec provider() -> module() | undefined.
provider() ->
persistent_term:get(?PROVIDER, undefined).
%%--------------------------------------------------------------------
%% Broker API
%%--------------------------------------------------------------------
forward(Delivery) ->
?safe_with_provider(?FUNCTION_NAME(Delivery), []).
add_route(Topic) ->
?safe_with_provider(?FUNCTION_NAME(Topic), ok).
delete_route(Topic) ->
?safe_with_provider(?FUNCTION_NAME(Topic), ok).
add_shared_route(Topic, Group) ->
?safe_with_provider(?FUNCTION_NAME(Topic, Group), ok).
delete_shared_route(Topic, Group) ->
?safe_with_provider(?FUNCTION_NAME(Topic, Group), ok).
add_persistent_route(Topic, ID) ->
?safe_with_provider(?FUNCTION_NAME(Topic, ID), ok).
delete_persistent_route(Topic, ID) ->
?safe_with_provider(?FUNCTION_NAME(Topic, ID), ok).
add_persistent_shared_route(Topic, Group, ID) ->
?safe_with_provider(?FUNCTION_NAME(Topic, Group, ID), ok).
delete_persistent_shared_route(Topic, Group, ID) ->
?safe_with_provider(?FUNCTION_NAME(Topic, Group, ID), ok).
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
is_valid_provider(Module) ->
lists:all(
fun({F, A}) -> erlang:function_exported(Module, F, A) end,
?MODULE:behaviour_info(callbacks)
).

View File

@ -29,12 +29,11 @@
parse/2, parse/2,
serialize_fun/0, serialize_fun/0,
serialize_fun/1, serialize_fun/1,
initial_serialize_opts/1, serialize_opts/0,
serialize_opts/1, serialize_opts/1,
serialize_pkt/2, serialize_pkt/2,
serialize/1, serialize/1,
serialize/2, serialize/2
serialize/3
]). ]).
-export([describe_state/1]). -export([describe_state/1]).
@ -85,7 +84,7 @@
-define(MULTIPLIER_MAX, 16#200000). -define(MULTIPLIER_MAX, 16#200000).
-dialyzer({no_match, [serialize_utf8_string/3]}). -dialyzer({no_match, [serialize_utf8_string/2]}).
%% @doc Describe state for logging. %% @doc Describe state for logging.
describe_state(?NONE(_Opts)) -> describe_state(?NONE(_Opts)) ->
@ -267,50 +266,28 @@ packet(Header, Variable) ->
packet(Header, Variable, Payload) -> packet(Header, Variable, Payload) ->
#mqtt_packet{header = Header, variable = Variable, payload = Payload}. #mqtt_packet{header = Header, variable = Variable, payload = Payload}.
parse_connect(FrameBin, Options = #{strict_mode := StrictMode}) -> parse_connect(FrameBin, StrictMode) ->
{ProtoName, Rest0} = parse_utf8_string_with_cause(FrameBin, StrictMode, invalid_proto_name), {ProtoName, Rest} = parse_utf8_string_with_cause(FrameBin, StrictMode, invalid_proto_name),
%% No need to parse and check proto_ver if proto_name is invalid, check it first case ProtoName of
%% And the matching check of `proto_name` and `proto_ver` fields will be done in `emqx_packet:check_proto_ver/2` <<"MQTT">> ->
_ = validate_proto_name(ProtoName), ok;
{IsBridge, ProtoVer, Rest2} = parse_connect_proto_ver(Rest0), <<"MQIsdp">> ->
NOptions = Options#{version => ProtoVer}, ok;
try _ ->
do_parse_connect(ProtoName, IsBridge, ProtoVer, Rest2, StrictMode) %% from spec: the server MAY send disconnect with reason code 0x84
catch %% we chose to close socket because the client is likely not talking MQTT anyway
throw:{?FRAME_PARSE_ERROR, ReasonM} when is_map(ReasonM) -> ?PARSE_ERR(#{
?PARSE_ERR( cause => invalid_proto_name,
ReasonM#{ expected => <<"'MQTT' or 'MQIsdp'">>,
proto_ver => ProtoVer, received => ProtoName
proto_name => ProtoName, })
parse_state => ?NONE(NOptions) end,
} parse_connect2(ProtoName, Rest, StrictMode).
);
throw:{?FRAME_PARSE_ERROR, Reason} ->
?PARSE_ERR(
#{
cause => Reason,
proto_ver => ProtoVer,
proto_name => ProtoName,
parse_state => ?NONE(NOptions)
}
)
end.
do_parse_connect( parse_connect2(
ProtoName, ProtoName,
IsBridge, <<BridgeTag:4, ProtoVer:4, UsernameFlagB:1, PasswordFlagB:1, WillRetainB:1, WillQoS:2,
ProtoVer, WillFlagB:1, CleanStart:1, Reserved:1, KeepAlive:16/big, Rest2/binary>>,
<<
UsernameFlagB:1,
PasswordFlagB:1,
WillRetainB:1,
WillQoS:2,
WillFlagB:1,
CleanStart:1,
Reserved:1,
KeepAlive:16/big,
Rest/binary
>>,
StrictMode StrictMode
) -> ) ->
_ = validate_connect_reserved(Reserved), _ = validate_connect_reserved(Reserved),
@ -325,14 +302,14 @@ do_parse_connect(
UsernameFlag = bool(UsernameFlagB), UsernameFlag = bool(UsernameFlagB),
PasswordFlag = bool(PasswordFlagB) PasswordFlag = bool(PasswordFlagB)
), ),
{Properties, Rest3} = parse_properties(Rest, ProtoVer, StrictMode), {Properties, Rest3} = parse_properties(Rest2, ProtoVer, StrictMode),
{ClientId, Rest4} = parse_utf8_string_with_cause(Rest3, StrictMode, invalid_clientid), {ClientId, Rest4} = parse_utf8_string_with_cause(Rest3, StrictMode, invalid_clientid),
ConnPacket = #mqtt_packet_connect{ ConnPacket = #mqtt_packet_connect{
proto_name = ProtoName, proto_name = ProtoName,
proto_ver = ProtoVer, proto_ver = ProtoVer,
%% For bridge mode, non-standard implementation %% For bridge mode, non-standard implementation
%% Invented by mosquitto, named 'try_private': https://mosquitto.org/man/mosquitto-conf-5.html %% Invented by mosquitto, named 'try_private': https://mosquitto.org/man/mosquitto-conf-5.html
is_bridge = IsBridge, is_bridge = (BridgeTag =:= 8),
clean_start = bool(CleanStart), clean_start = bool(CleanStart),
will_flag = WillFlag, will_flag = WillFlag,
will_qos = WillQoS, will_qos = WillQoS,
@ -365,16 +342,16 @@ do_parse_connect(
unexpected_trailing_bytes => size(Rest7) unexpected_trailing_bytes => size(Rest7)
}) })
end; end;
do_parse_connect(_ProtoName, _IsBridge, _ProtoVer, Bin, _StrictMode) -> parse_connect2(_ProtoName, Bin, _StrictMode) ->
%% sent less than 24 bytes %% sent less than 32 bytes
?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}). ?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}).
parse_packet( parse_packet(
#mqtt_packet_header{type = ?CONNECT}, #mqtt_packet_header{type = ?CONNECT},
FrameBin, FrameBin,
Options #{strict_mode := StrictMode}
) -> ) ->
parse_connect(FrameBin, Options); parse_connect(FrameBin, StrictMode);
parse_packet( parse_packet(
#mqtt_packet_header{type = ?CONNACK}, #mqtt_packet_header{type = ?CONNACK},
<<AckFlags:8, ReasonCode:8, Rest/binary>>, <<AckFlags:8, ReasonCode:8, Rest/binary>>,
@ -538,12 +515,6 @@ parse_packet_id(<<PacketId:16/big, Rest/binary>>) ->
parse_packet_id(_) -> parse_packet_id(_) ->
?PARSE_ERR(invalid_packet_id). ?PARSE_ERR(invalid_packet_id).
parse_connect_proto_ver(<<BridgeTag:4, ProtoVer:4, Rest/binary>>) ->
{_IsBridge = (BridgeTag =:= 8), ProtoVer, Rest};
parse_connect_proto_ver(Bin) ->
%% sent less than 1 bytes or empty
?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}).
parse_properties(Bin, Ver, _StrictMode) when Ver =/= ?MQTT_PROTO_V5 -> parse_properties(Bin, Ver, _StrictMode) when Ver =/= ?MQTT_PROTO_V5 ->
{#{}, Bin}; {#{}, Bin};
%% TODO: version mess? %% TODO: version mess?
@ -754,53 +725,43 @@ serialize_fun() -> serialize_fun(?DEFAULT_OPTIONS).
serialize_fun(#mqtt_packet_connect{proto_ver = ProtoVer, properties = ConnProps}) -> serialize_fun(#mqtt_packet_connect{proto_ver = ProtoVer, properties = ConnProps}) ->
MaxSize = get_property('Maximum-Packet-Size', ConnProps, ?MAX_PACKET_SIZE), MaxSize = get_property('Maximum-Packet-Size', ConnProps, ?MAX_PACKET_SIZE),
serialize_fun(#{version => ProtoVer, max_size => MaxSize, strict_mode => false}); serialize_fun(#{version => ProtoVer, max_size => MaxSize});
serialize_fun(#{version := Ver, max_size := MaxSize, strict_mode := StrictMode}) -> serialize_fun(#{version := Ver, max_size := MaxSize}) ->
fun(Packet) -> fun(Packet) ->
IoData = serialize(Packet, Ver, StrictMode), IoData = serialize(Packet, Ver),
case is_too_large(IoData, MaxSize) of case is_too_large(IoData, MaxSize) of
true -> <<>>; true -> <<>>;
false -> IoData false -> IoData
end end
end. end.
initial_serialize_opts(Opts) -> serialize_opts() ->
maps:merge(?DEFAULT_OPTIONS, Opts). ?DEFAULT_OPTIONS.
serialize_opts(?NONE(Options)) ->
maps:merge(?DEFAULT_OPTIONS, Options);
serialize_opts(#mqtt_packet_connect{proto_ver = ProtoVer, properties = ConnProps}) -> serialize_opts(#mqtt_packet_connect{proto_ver = ProtoVer, properties = ConnProps}) ->
MaxSize = get_property('Maximum-Packet-Size', ConnProps, ?MAX_PACKET_SIZE), MaxSize = get_property('Maximum-Packet-Size', ConnProps, ?MAX_PACKET_SIZE),
#{version => ProtoVer, max_size => MaxSize, strict_mode => false}. #{version => ProtoVer, max_size => MaxSize}.
serialize_pkt(Packet, #{version := Ver, max_size := MaxSize, strict_mode := StrictMode}) -> serialize_pkt(Packet, #{version := Ver, max_size := MaxSize}) ->
IoData = serialize(Packet, Ver, StrictMode), IoData = serialize(Packet, Ver),
case is_too_large(IoData, MaxSize) of case is_too_large(IoData, MaxSize) of
true -> <<>>; true -> <<>>;
false -> IoData false -> IoData
end. end.
-spec serialize(emqx_types:packet()) -> iodata(). -spec serialize(emqx_types:packet()) -> iodata().
serialize(Packet) -> serialize(Packet, ?MQTT_PROTO_V4, false). serialize(Packet) -> serialize(Packet, ?MQTT_PROTO_V4).
serialize(Packet, Ver) -> serialize(Packet, Ver, false). -spec serialize(emqx_types:packet(), emqx_types:proto_ver()) -> iodata().
-spec serialize(emqx_types:packet(), emqx_types:proto_ver(), boolean()) -> iodata().
serialize( serialize(
#mqtt_packet{ #mqtt_packet{
header = Header, header = Header,
variable = Variable, variable = Variable,
payload = Payload payload = Payload
}, },
Ver, Ver
StrictMode
) -> ) ->
serialize( serialize(Header, serialize_variable(Variable, Ver), serialize_payload(Payload)).
Header,
serialize_variable(Variable, Ver, StrictMode),
serialize_payload(Payload),
StrictMode
).
serialize( serialize(
#mqtt_packet_header{ #mqtt_packet_header{
@ -810,8 +771,7 @@ serialize(
retain = Retain retain = Retain
}, },
VariableBin, VariableBin,
PayloadBin, PayloadBin
_StrictMode
) when ) when
?CONNECT =< Type andalso Type =< ?AUTH ?CONNECT =< Type andalso Type =< ?AUTH
-> ->
@ -843,8 +803,7 @@ serialize_variable(
username = Username, username = Username,
password = Password password = Password
}, },
_Ver, _Ver
StrictMode
) -> ) ->
[ [
serialize_binary_data(ProtoName), serialize_binary_data(ProtoName),
@ -862,20 +821,20 @@ serialize_variable(
0:1, 0:1,
KeepAlive:16/big-unsigned-integer KeepAlive:16/big-unsigned-integer
>>, >>,
serialize_properties(Properties, ProtoVer, StrictMode), serialize_properties(Properties, ProtoVer),
serialize_utf8_string(ClientId, StrictMode), serialize_utf8_string(ClientId),
case WillFlag of case WillFlag of
true -> true ->
[ [
serialize_properties(WillProps, ProtoVer, StrictMode), serialize_properties(WillProps, ProtoVer),
serialize_utf8_string(WillTopic, StrictMode), serialize_utf8_string(WillTopic),
serialize_binary_data(WillPayload) serialize_binary_data(WillPayload)
]; ];
false -> false ->
<<>> <<>>
end, end,
serialize_utf8_string(Username, true, StrictMode), serialize_utf8_string(Username, true),
serialize_utf8_string(Password, true, StrictMode) serialize_utf8_string(Password, true)
]; ];
serialize_variable( serialize_variable(
#mqtt_packet_connack{ #mqtt_packet_connack{
@ -883,28 +842,26 @@ serialize_variable(
reason_code = ReasonCode, reason_code = ReasonCode,
properties = Properties properties = Properties
}, },
Ver, Ver
StrictMode
) -> ) ->
[AckFlags, ReasonCode, serialize_properties(Properties, Ver, StrictMode)]; [AckFlags, ReasonCode, serialize_properties(Properties, Ver)];
serialize_variable( serialize_variable(
#mqtt_packet_publish{ #mqtt_packet_publish{
topic_name = TopicName, topic_name = TopicName,
packet_id = PacketId, packet_id = PacketId,
properties = Properties properties = Properties
}, },
Ver, Ver
StrictMode
) -> ) ->
[ [
serialize_utf8_string(TopicName, StrictMode), serialize_utf8_string(TopicName),
case PacketId of case PacketId of
undefined -> <<>>; undefined -> <<>>;
_ -> <<PacketId:16/big-unsigned-integer>> _ -> <<PacketId:16/big-unsigned-integer>>
end, end,
serialize_properties(Properties, Ver, StrictMode) serialize_properties(Properties, Ver)
]; ];
serialize_variable(#mqtt_packet_puback{packet_id = PacketId}, Ver, _StrictMode) when serialize_variable(#mqtt_packet_puback{packet_id = PacketId}, Ver) when
Ver == ?MQTT_PROTO_V3; Ver == ?MQTT_PROTO_V4 Ver == ?MQTT_PROTO_V3; Ver == ?MQTT_PROTO_V4
-> ->
<<PacketId:16/big-unsigned-integer>>; <<PacketId:16/big-unsigned-integer>>;
@ -914,13 +871,12 @@ serialize_variable(
reason_code = ReasonCode, reason_code = ReasonCode,
properties = Properties properties = Properties
}, },
Ver = ?MQTT_PROTO_V5, Ver = ?MQTT_PROTO_V5
StrictMode
) -> ) ->
[ [
<<PacketId:16/big-unsigned-integer>>, <<PacketId:16/big-unsigned-integer>>,
ReasonCode, ReasonCode,
serialize_properties(Properties, Ver, StrictMode) serialize_properties(Properties, Ver)
]; ];
serialize_variable( serialize_variable(
#mqtt_packet_subscribe{ #mqtt_packet_subscribe{
@ -928,13 +884,12 @@ serialize_variable(
properties = Properties, properties = Properties,
topic_filters = TopicFilters topic_filters = TopicFilters
}, },
Ver, Ver
StrictMode
) -> ) ->
[ [
<<PacketId:16/big-unsigned-integer>>, <<PacketId:16/big-unsigned-integer>>,
serialize_properties(Properties, Ver, StrictMode), serialize_properties(Properties, Ver),
serialize_topic_filters(subscribe, TopicFilters, Ver, StrictMode) serialize_topic_filters(subscribe, TopicFilters, Ver)
]; ];
serialize_variable( serialize_variable(
#mqtt_packet_suback{ #mqtt_packet_suback{
@ -942,12 +897,11 @@ serialize_variable(
properties = Properties, properties = Properties,
reason_codes = ReasonCodes reason_codes = ReasonCodes
}, },
Ver, Ver
StrictMode
) -> ) ->
[ [
<<PacketId:16/big-unsigned-integer>>, <<PacketId:16/big-unsigned-integer>>,
serialize_properties(Properties, Ver, StrictMode), serialize_properties(Properties, Ver),
serialize_reason_codes(ReasonCodes) serialize_reason_codes(ReasonCodes)
]; ];
serialize_variable( serialize_variable(
@ -956,13 +910,12 @@ serialize_variable(
properties = Properties, properties = Properties,
topic_filters = TopicFilters topic_filters = TopicFilters
}, },
Ver, Ver
StrictMode
) -> ) ->
[ [
<<PacketId:16/big-unsigned-integer>>, <<PacketId:16/big-unsigned-integer>>,
serialize_properties(Properties, Ver, StrictMode), serialize_properties(Properties, Ver),
serialize_topic_filters(unsubscribe, TopicFilters, Ver, StrictMode) serialize_topic_filters(unsubscribe, TopicFilters, Ver)
]; ];
serialize_variable( serialize_variable(
#mqtt_packet_unsuback{ #mqtt_packet_unsuback{
@ -970,15 +923,14 @@ serialize_variable(
properties = Properties, properties = Properties,
reason_codes = ReasonCodes reason_codes = ReasonCodes
}, },
Ver, Ver
StrictMode
) -> ) ->
[ [
<<PacketId:16/big-unsigned-integer>>, <<PacketId:16/big-unsigned-integer>>,
serialize_properties(Properties, Ver, StrictMode), serialize_properties(Properties, Ver),
serialize_reason_codes(ReasonCodes) serialize_reason_codes(ReasonCodes)
]; ];
serialize_variable(#mqtt_packet_disconnect{}, Ver, _StrictMode) when serialize_variable(#mqtt_packet_disconnect{}, Ver) when
Ver == ?MQTT_PROTO_V3; Ver == ?MQTT_PROTO_V4 Ver == ?MQTT_PROTO_V3; Ver == ?MQTT_PROTO_V4
-> ->
<<>>; <<>>;
@ -987,115 +939,110 @@ serialize_variable(
reason_code = ReasonCode, reason_code = ReasonCode,
properties = Properties properties = Properties
}, },
Ver = ?MQTT_PROTO_V5, Ver = ?MQTT_PROTO_V5
StrictMode
) -> ) ->
[ReasonCode, serialize_properties(Properties, Ver, StrictMode)]; [ReasonCode, serialize_properties(Properties, Ver)];
serialize_variable(#mqtt_packet_disconnect{}, _Ver, _StrictMode) -> serialize_variable(#mqtt_packet_disconnect{}, _Ver) ->
<<>>; <<>>;
serialize_variable( serialize_variable(
#mqtt_packet_auth{ #mqtt_packet_auth{
reason_code = ReasonCode, reason_code = ReasonCode,
properties = Properties properties = Properties
}, },
Ver = ?MQTT_PROTO_V5, Ver = ?MQTT_PROTO_V5
StrictMode
) -> ) ->
[ReasonCode, serialize_properties(Properties, Ver, StrictMode)]; [ReasonCode, serialize_properties(Properties, Ver)];
serialize_variable(PacketId, ?MQTT_PROTO_V3, _StrictMode) when is_integer(PacketId) -> serialize_variable(PacketId, ?MQTT_PROTO_V3) when is_integer(PacketId) ->
<<PacketId:16/big-unsigned-integer>>; <<PacketId:16/big-unsigned-integer>>;
serialize_variable(PacketId, ?MQTT_PROTO_V4, _StrictMode) when is_integer(PacketId) -> serialize_variable(PacketId, ?MQTT_PROTO_V4) when is_integer(PacketId) ->
<<PacketId:16/big-unsigned-integer>>; <<PacketId:16/big-unsigned-integer>>;
serialize_variable(undefined, _Ver, _StrictMode) -> serialize_variable(undefined, _Ver) ->
<<>>. <<>>.
serialize_payload(undefined) -> <<>>; serialize_payload(undefined) -> <<>>;
serialize_payload(Bin) -> Bin. serialize_payload(Bin) -> Bin.
serialize_properties(_Props, Ver, _StrictMode) when Ver =/= ?MQTT_PROTO_V5 -> serialize_properties(_Props, Ver) when Ver =/= ?MQTT_PROTO_V5 ->
<<>>; <<>>;
serialize_properties(Props, ?MQTT_PROTO_V5, StrictMode) -> serialize_properties(Props, ?MQTT_PROTO_V5) ->
serialize_properties(Props, StrictMode). serialize_properties(Props).
serialize_properties(undefined, _StrictMode) -> serialize_properties(undefined) ->
<<0>>; <<0>>;
serialize_properties(Props, _StrictMode) when map_size(Props) == 0 -> serialize_properties(Props) when map_size(Props) == 0 ->
<<0>>; <<0>>;
serialize_properties(Props, StrictMode) when is_map(Props) -> serialize_properties(Props) when is_map(Props) ->
Bin = << Bin = <<<<(serialize_property(Prop, Val))/binary>> || {Prop, Val} <- maps:to_list(Props)>>,
<<(serialize_property(Prop, Val, StrictMode))/binary>>
|| {Prop, Val} <- maps:to_list(Props)
>>,
[serialize_variable_byte_integer(byte_size(Bin)), Bin]. [serialize_variable_byte_integer(byte_size(Bin)), Bin].
serialize_property(_, Disabled, _StrictMode) when Disabled =:= disabled; Disabled =:= undefined -> serialize_property(_, Disabled) when Disabled =:= disabled; Disabled =:= undefined ->
<<>>; <<>>;
serialize_property(internal_extra, _, _StrictMode) -> serialize_property(internal_extra, _) ->
<<>>; <<>>;
serialize_property('Payload-Format-Indicator', Val, _StrictMode) -> serialize_property('Payload-Format-Indicator', Val) ->
<<16#01, Val>>; <<16#01, Val>>;
serialize_property('Message-Expiry-Interval', Val, _StrictMode) -> serialize_property('Message-Expiry-Interval', Val) ->
<<16#02, Val:32/big>>; <<16#02, Val:32/big>>;
serialize_property('Content-Type', Val, StrictMode) -> serialize_property('Content-Type', Val) ->
<<16#03, (serialize_utf8_string(Val, StrictMode))/binary>>; <<16#03, (serialize_utf8_string(Val))/binary>>;
serialize_property('Response-Topic', Val, StrictMode) -> serialize_property('Response-Topic', Val) ->
<<16#08, (serialize_utf8_string(Val, StrictMode))/binary>>; <<16#08, (serialize_utf8_string(Val))/binary>>;
serialize_property('Correlation-Data', Val, _StrictMode) -> serialize_property('Correlation-Data', Val) ->
<<16#09, (byte_size(Val)):16, Val/binary>>; <<16#09, (byte_size(Val)):16, Val/binary>>;
serialize_property('Subscription-Identifier', Val, _StrictMode) -> serialize_property('Subscription-Identifier', Val) ->
<<16#0B, (serialize_variable_byte_integer(Val))/binary>>; <<16#0B, (serialize_variable_byte_integer(Val))/binary>>;
serialize_property('Session-Expiry-Interval', Val, _StrictMode) -> serialize_property('Session-Expiry-Interval', Val) ->
<<16#11, Val:32/big>>; <<16#11, Val:32/big>>;
serialize_property('Assigned-Client-Identifier', Val, StrictMode) -> serialize_property('Assigned-Client-Identifier', Val) ->
<<16#12, (serialize_utf8_string(Val, StrictMode))/binary>>; <<16#12, (serialize_utf8_string(Val))/binary>>;
serialize_property('Server-Keep-Alive', Val, _StrictMode) -> serialize_property('Server-Keep-Alive', Val) ->
<<16#13, Val:16/big>>; <<16#13, Val:16/big>>;
serialize_property('Authentication-Method', Val, StrictMode) -> serialize_property('Authentication-Method', Val) ->
<<16#15, (serialize_utf8_string(Val, StrictMode))/binary>>; <<16#15, (serialize_utf8_string(Val))/binary>>;
serialize_property('Authentication-Data', Val, _StrictMode) -> serialize_property('Authentication-Data', Val) ->
<<16#16, (iolist_size(Val)):16, Val/binary>>; <<16#16, (iolist_size(Val)):16, Val/binary>>;
serialize_property('Request-Problem-Information', Val, _StrictMode) -> serialize_property('Request-Problem-Information', Val) ->
<<16#17, Val>>; <<16#17, Val>>;
serialize_property('Will-Delay-Interval', Val, _StrictMode) -> serialize_property('Will-Delay-Interval', Val) ->
<<16#18, Val:32/big>>; <<16#18, Val:32/big>>;
serialize_property('Request-Response-Information', Val, _StrictMode) -> serialize_property('Request-Response-Information', Val) ->
<<16#19, Val>>; <<16#19, Val>>;
serialize_property('Response-Information', Val, StrictMode) -> serialize_property('Response-Information', Val) ->
<<16#1A, (serialize_utf8_string(Val, StrictMode))/binary>>; <<16#1A, (serialize_utf8_string(Val))/binary>>;
serialize_property('Server-Reference', Val, StrictMode) -> serialize_property('Server-Reference', Val) ->
<<16#1C, (serialize_utf8_string(Val, StrictMode))/binary>>; <<16#1C, (serialize_utf8_string(Val))/binary>>;
serialize_property('Reason-String', Val, StrictMode) -> serialize_property('Reason-String', Val) ->
<<16#1F, (serialize_utf8_string(Val, StrictMode))/binary>>; <<16#1F, (serialize_utf8_string(Val))/binary>>;
serialize_property('Receive-Maximum', Val, _StrictMode) -> serialize_property('Receive-Maximum', Val) ->
<<16#21, Val:16/big>>; <<16#21, Val:16/big>>;
serialize_property('Topic-Alias-Maximum', Val, _StrictMode) -> serialize_property('Topic-Alias-Maximum', Val) ->
<<16#22, Val:16/big>>; <<16#22, Val:16/big>>;
serialize_property('Topic-Alias', Val, _StrictMode) -> serialize_property('Topic-Alias', Val) ->
<<16#23, Val:16/big>>; <<16#23, Val:16/big>>;
serialize_property('Maximum-QoS', Val, _StrictMode) -> serialize_property('Maximum-QoS', Val) ->
<<16#24, Val>>; <<16#24, Val>>;
serialize_property('Retain-Available', Val, _StrictMode) -> serialize_property('Retain-Available', Val) ->
<<16#25, Val>>; <<16#25, Val>>;
serialize_property('User-Property', {Key, Val}, StrictMode) -> serialize_property('User-Property', {Key, Val}) ->
<<16#26, (serialize_utf8_pair(Key, Val, StrictMode))/binary>>; <<16#26, (serialize_utf8_pair({Key, Val}))/binary>>;
serialize_property('User-Property', Props, StrictMode) when is_list(Props) -> serialize_property('User-Property', Props) when is_list(Props) ->
<< <<
<<(serialize_property('User-Property', {Key, Val}, StrictMode))/binary>> <<(serialize_property('User-Property', {Key, Val}))/binary>>
|| {Key, Val} <- Props || {Key, Val} <- Props
>>; >>;
serialize_property('Maximum-Packet-Size', Val, _StrictMode) -> serialize_property('Maximum-Packet-Size', Val) ->
<<16#27, Val:32/big>>; <<16#27, Val:32/big>>;
serialize_property('Wildcard-Subscription-Available', Val, _StrictMode) -> serialize_property('Wildcard-Subscription-Available', Val) ->
<<16#28, Val>>; <<16#28, Val>>;
serialize_property('Subscription-Identifier-Available', Val, _StrictMode) -> serialize_property('Subscription-Identifier-Available', Val) ->
<<16#29, Val>>; <<16#29, Val>>;
serialize_property('Shared-Subscription-Available', Val, _StrictMode) -> serialize_property('Shared-Subscription-Available', Val) ->
<<16#2A, Val>>. <<16#2A, Val>>.
serialize_topic_filters(subscribe, TopicFilters, ?MQTT_PROTO_V5, StrictMode) -> serialize_topic_filters(subscribe, TopicFilters, ?MQTT_PROTO_V5) ->
<< <<
<< <<
(serialize_utf8_string(Topic, StrictMode))/binary, (serialize_utf8_string(Topic))/binary,
?RESERVED:2, ?RESERVED:2,
Rh:2, Rh:2,
(flag(Rap)):1, (flag(Rap)):1,
@ -1104,42 +1051,37 @@ serialize_topic_filters(subscribe, TopicFilters, ?MQTT_PROTO_V5, StrictMode) ->
>> >>
|| {Topic, #{rh := Rh, rap := Rap, nl := Nl, qos := QoS}} <- TopicFilters || {Topic, #{rh := Rh, rap := Rap, nl := Nl, qos := QoS}} <- TopicFilters
>>; >>;
serialize_topic_filters(subscribe, TopicFilters, _Ver, StrictMode) -> serialize_topic_filters(subscribe, TopicFilters, _Ver) ->
<< <<
<<(serialize_utf8_string(Topic, StrictMode))/binary, ?RESERVED:6, QoS:2>> <<(serialize_utf8_string(Topic))/binary, ?RESERVED:6, QoS:2>>
|| {Topic, #{qos := QoS}} <- TopicFilters || {Topic, #{qos := QoS}} <- TopicFilters
>>; >>;
serialize_topic_filters(unsubscribe, TopicFilters, _Ver, StrictMode) -> serialize_topic_filters(unsubscribe, TopicFilters, _Ver) ->
<<<<(serialize_utf8_string(Topic, StrictMode))/binary>> || Topic <- TopicFilters>>. <<<<(serialize_utf8_string(Topic))/binary>> || Topic <- TopicFilters>>.
serialize_reason_codes(undefined) -> serialize_reason_codes(undefined) ->
<<>>; <<>>;
serialize_reason_codes(ReasonCodes) when is_list(ReasonCodes) -> serialize_reason_codes(ReasonCodes) when is_list(ReasonCodes) ->
<<<<Code>> || Code <- ReasonCodes>>. <<<<Code>> || Code <- ReasonCodes>>.
serialize_utf8_pair(Name, Value, StrictMode) -> serialize_utf8_pair({Name, Value}) ->
<< <<(serialize_utf8_string(Name))/binary, (serialize_utf8_string(Value))/binary>>.
(serialize_utf8_string(Name, StrictMode))/binary,
(serialize_utf8_string(Value, StrictMode))/binary
>>.
serialize_binary_data(Bin) -> serialize_binary_data(Bin) ->
[<<(byte_size(Bin)):16/big-unsigned-integer>>, Bin]. [<<(byte_size(Bin)):16/big-unsigned-integer>>, Bin].
serialize_utf8_string(undefined, false, _StrictMode) -> serialize_utf8_string(undefined, false) ->
?SERIALIZE_ERR(utf8_string_undefined); ?SERIALIZE_ERR(utf8_string_undefined);
serialize_utf8_string(undefined, true, _StrictMode) -> serialize_utf8_string(undefined, true) ->
<<>>; <<>>;
serialize_utf8_string(String, _AllowNull, StrictMode) -> serialize_utf8_string(String, _AllowNull) ->
serialize_utf8_string(String, StrictMode). serialize_utf8_string(String).
serialize_utf8_string(String, true) -> serialize_utf8_string(String) ->
StringBin = unicode:characters_to_binary(String), StringBin = unicode:characters_to_binary(String),
serialize_utf8_string(StringBin, false); Len = byte_size(StringBin),
serialize_utf8_string(String, false) ->
Len = byte_size(String),
true = (Len =< 16#ffff), true = (Len =< 16#ffff),
<<Len:16/big, String/binary>>. <<Len:16/big, StringBin/binary>>.
serialize_remaining_len(I) -> serialize_remaining_len(I) ->
serialize_variable_byte_integer(I). serialize_variable_byte_integer(I).
@ -1187,34 +1129,18 @@ validate_subqos([3 | _]) -> ?PARSE_ERR(bad_subqos);
validate_subqos([_ | T]) -> validate_subqos(T); validate_subqos([_ | T]) -> validate_subqos(T);
validate_subqos([]) -> ok. validate_subqos([]) -> ok.
%% from spec: the server MAY send disconnect with reason code 0x84
%% we chose to close socket because the client is likely not talking MQTT anyway
validate_proto_name(<<"MQTT">>) ->
ok;
validate_proto_name(<<"MQIsdp">>) ->
ok;
validate_proto_name(ProtoName) ->
?PARSE_ERR(#{
cause => invalid_proto_name,
expected => <<"'MQTT' or 'MQIsdp'">>,
received => ProtoName
}).
%% MQTT-v3.1.1-[MQTT-3.1.2-3], MQTT-v5.0-[MQTT-3.1.2-3] %% MQTT-v3.1.1-[MQTT-3.1.2-3], MQTT-v5.0-[MQTT-3.1.2-3]
-compile({inline, [validate_connect_reserved/1]}).
validate_connect_reserved(0) -> ok; validate_connect_reserved(0) -> ok;
validate_connect_reserved(1) -> ?PARSE_ERR(reserved_connect_flag). validate_connect_reserved(1) -> ?PARSE_ERR(reserved_connect_flag).
-compile({inline, [validate_connect_will/3]}).
%% MQTT-v3.1.1-[MQTT-3.1.2-13], MQTT-v5.0-[MQTT-3.1.2-11] %% MQTT-v3.1.1-[MQTT-3.1.2-13], MQTT-v5.0-[MQTT-3.1.2-11]
validate_connect_will(false, _, WillQoS) when WillQoS > 0 -> ?PARSE_ERR(invalid_will_qos); validate_connect_will(false, _, WillQos) when WillQos > 0 -> ?PARSE_ERR(invalid_will_qos);
%% MQTT-v3.1.1-[MQTT-3.1.2-14], MQTT-v5.0-[MQTT-3.1.2-12] %% MQTT-v3.1.1-[MQTT-3.1.2-14], MQTT-v5.0-[MQTT-3.1.2-12]
validate_connect_will(true, _, WillQoS) when WillQoS > 2 -> ?PARSE_ERR(invalid_will_qos); validate_connect_will(true, _, WillQoS) when WillQoS > 2 -> ?PARSE_ERR(invalid_will_qos);
%% MQTT-v3.1.1-[MQTT-3.1.2-15], MQTT-v5.0-[MQTT-3.1.2-13] %% MQTT-v3.1.1-[MQTT-3.1.2-15], MQTT-v5.0-[MQTT-3.1.2-13]
validate_connect_will(false, WillRetain, _) when WillRetain -> ?PARSE_ERR(invalid_will_retain); validate_connect_will(false, WillRetain, _) when WillRetain -> ?PARSE_ERR(invalid_will_retain);
validate_connect_will(_, _, _) -> ok. validate_connect_will(_, _, _) -> ok.
-compile({inline, [validate_connect_password_flag/4]}).
%% MQTT-v3.1 %% MQTT-v3.1
%% Username flag and password flag are not strongly related %% Username flag and password flag are not strongly related
%% https://public.dhe.ibm.com/software/dw/webservices/ws-mqtt/mqtt-v3r1.html#connect %% https://public.dhe.ibm.com/software/dw/webservices/ws-mqtt/mqtt-v3r1.html#connect
@ -1229,7 +1155,6 @@ validate_connect_password_flag(true, ?MQTT_PROTO_V5, _, _) ->
validate_connect_password_flag(_, _, _, _) -> validate_connect_password_flag(_, _, _, _) ->
ok. ok.
-compile({inline, [bool/1]}).
bool(0) -> false; bool(0) -> false;
bool(1) -> true. bool(1) -> true.

View File

@ -30,6 +30,7 @@
-export([ -export([
init/1, init/1,
run/2,
run/3, run/3,
info/1, info/1,
reset/1 reset/1
@ -61,7 +62,12 @@ init(#{count := Count, bytes := Bytes}) ->
Oct = [{oct, {Bytes, Bytes}} || ?ENABLED(Bytes)], Oct = [{oct, {Bytes, Bytes}} || ?ENABLED(Bytes)],
?GCS(maps:from_list(Cnt ++ Oct)). ?GCS(maps:from_list(Cnt ++ Oct)).
%% @doc Try to run GC based on reductions of count or bytes. %% @doc Try to run GC based on reduntions of count or bytes.
-spec run(#{cnt := pos_integer(), oct := pos_integer()}, gc_state()) ->
{boolean(), gc_state()}.
run(#{cnt := Cnt, oct := Oct}, GcSt) ->
run(Cnt, Oct, GcSt).
-spec run(pos_integer(), pos_integer(), gc_state()) -> -spec run(pos_integer(), pos_integer(), gc_state()) ->
{boolean(), gc_state()}. {boolean(), gc_state()}.
run(Cnt, Oct, ?GCS(St)) -> run(Cnt, Oct, ?GCS(St)) ->

View File

@ -64,17 +64,6 @@
-export_type([listener_id/0]). -export_type([listener_id/0]).
-dialyzer(
{no_unknown, [
is_running/3,
current_conns/3,
do_stop_listener/3,
do_start_listener/4,
do_update_listener/4,
quic_listener_conf_rollback/3
]}
).
-type listener_id() :: atom() | binary(). -type listener_id() :: atom() | binary().
-type listener_type() :: tcp | ssl | ws | wss | quic | dtls. -type listener_type() :: tcp | ssl | ws | wss | quic | dtls.
@ -432,7 +421,7 @@ do_start_listener(Type, Name, Id, #{bind := ListenOn} = Opts) when ?ESOCKD_LISTE
esockd:open( esockd:open(
Id, Id,
ListenOn, ListenOn,
merge_default(esockd_opts(Id, Type, Name, Opts, _OldOpts = undefined)) merge_default(esockd_opts(Id, Type, Name, Opts))
); );
%% Start MQTT/WS listener %% Start MQTT/WS listener
do_start_listener(Type, Name, Id, Opts) when ?COWBOY_LISTENER(Type) -> do_start_listener(Type, Name, Id, Opts) when ?COWBOY_LISTENER(Type) ->
@ -476,7 +465,7 @@ do_update_listener(Type, Name, OldConf, NewConf = #{bind := ListenOn}) when
Id = listener_id(Type, Name), Id = listener_id(Type, Name),
case maps:get(bind, OldConf) of case maps:get(bind, OldConf) of
ListenOn -> ListenOn ->
esockd:set_options({Id, ListenOn}, esockd_opts(Id, Type, Name, NewConf, OldConf)); esockd:set_options({Id, ListenOn}, esockd_opts(Id, Type, Name, NewConf));
_Different -> _Different ->
%% TODO %% TODO
%% Again, we're not strictly required to drop live connections in this case. %% Again, we're not strictly required to drop live connections in this case.
@ -588,7 +577,7 @@ perform_listener_change(update, {{Type, Name, ConfOld}, {_, _, ConfNew}}) ->
perform_listener_change(stop, {Type, Name, Conf}) -> perform_listener_change(stop, {Type, Name, Conf}) ->
stop_listener(Type, Name, Conf). stop_listener(Type, Name, Conf).
esockd_opts(ListenerId, Type, Name, Opts0, OldOpts) -> esockd_opts(ListenerId, Type, Name, Opts0) ->
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0), Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
Limiter = limiter(Opts0), Limiter = limiter(Opts0),
Opts2 = Opts2 =
@ -620,7 +609,7 @@ esockd_opts(ListenerId, Type, Name, Opts0, OldOpts) ->
tcp -> tcp ->
Opts3#{tcp_options => tcp_opts(Opts0)}; Opts3#{tcp_options => tcp_opts(Opts0)};
ssl -> ssl ->
OptsWithCRL = inject_crl_config(Opts0, OldOpts), OptsWithCRL = inject_crl_config(Opts0),
OptsWithSNI = inject_sni_fun(ListenerId, OptsWithCRL), OptsWithSNI = inject_sni_fun(ListenerId, OptsWithCRL),
OptsWithRootFun = inject_root_fun(OptsWithSNI), OptsWithRootFun = inject_root_fun(OptsWithSNI),
OptsWithVerifyFun = inject_verify_fun(OptsWithRootFun), OptsWithVerifyFun = inject_verify_fun(OptsWithRootFun),
@ -996,7 +985,7 @@ inject_sni_fun(_ListenerId, Conf) ->
Conf. Conf.
inject_crl_config( inject_crl_config(
Conf = #{ssl_options := #{enable_crl_check := true} = SSLOpts}, _OldOpts Conf = #{ssl_options := #{enable_crl_check := true} = SSLOpts}
) -> ) ->
HTTPTimeout = emqx_config:get([crl_cache, http_timeout], timer:seconds(15)), HTTPTimeout = emqx_config:get([crl_cache, http_timeout], timer:seconds(15)),
Conf#{ Conf#{
@ -1006,16 +995,7 @@ inject_crl_config(
crl_cache => {emqx_ssl_crl_cache, {internal, [{http, HTTPTimeout}]}} crl_cache => {emqx_ssl_crl_cache, {internal, [{http, HTTPTimeout}]}}
} }
}; };
inject_crl_config(#{ssl_options := SSLOpts0} = Conf0, #{} = OldOpts) -> inject_crl_config(Conf) ->
%% Note: we must set crl options to `undefined' to unset them. Otherwise,
%% `esockd' will retain such options when `esockd:merge_opts/2' is called and the SSL
%% options were previously enabled.
WasEnabled = emqx_utils_maps:deep_get([ssl_options, enable_crl_check], OldOpts, false),
Undefine = fun(Acc, K) -> emqx_utils_maps:put_if(Acc, K, undefined, WasEnabled) end,
SSLOpts1 = Undefine(SSLOpts0, crl_check),
SSLOpts = Undefine(SSLOpts1, crl_cache),
Conf0#{ssl_options := SSLOpts};
inject_crl_config(Conf, undefined = _OldOpts) ->
Conf. Conf.
maybe_unregister_ocsp_stapling_refresh( maybe_unregister_ocsp_stapling_refresh(
@ -1038,6 +1018,7 @@ ensure_max_conns(<<"infinity">>) -> <<"infinity">>;
ensure_max_conns(MaxConn) when is_binary(MaxConn) -> binary_to_integer(MaxConn); ensure_max_conns(MaxConn) when is_binary(MaxConn) -> binary_to_integer(MaxConn);
ensure_max_conns(MaxConn) -> MaxConn. ensure_max_conns(MaxConn) -> MaxConn.
-spec quic_listen_on(X :: any()) -> quicer:listen_on().
quic_listen_on(Bind) -> quic_listen_on(Bind) ->
case Bind of case Bind of
{Addr, Port} when tuple_size(Addr) == 4 -> {Addr, Port} when tuple_size(Addr) == 4 ->

View File

@ -105,7 +105,7 @@ format(Msg, Meta, Config) ->
maybe_format_msg(undefined, _Meta, _Config) -> maybe_format_msg(undefined, _Meta, _Config) ->
#{}; #{};
maybe_format_msg({report, Report0} = Msg, #{report_cb := Cb} = Meta, Config) -> maybe_format_msg({report, Report0} = Msg, #{report_cb := Cb} = Meta, Config) ->
Report = emqx_logger_textfmt:try_encode_meta(Report0, Config), Report = emqx_logger_textfmt:try_encode_payload(Report0, Config),
case is_map(Report) andalso Cb =:= ?DEFAULT_FORMATTER of case is_map(Report) andalso Cb =:= ?DEFAULT_FORMATTER of
true -> true ->
%% reporting a map without a customised format function %% reporting a map without a customised format function
@ -294,7 +294,6 @@ json_obj_root(Data0, Config) ->
_ -> _ ->
json(Msg1, Config) json(Msg1, Config)
end, end,
MFA = emqx_utils:format_mfal(Data0, Config),
Data = Data =
maps:fold( maps:fold(
fun(K, V, D) -> fun(K, V, D) ->
@ -303,12 +302,12 @@ json_obj_root(Data0, Config) ->
end, end,
[], [],
maps:without( maps:without(
[time, gl, file, report_cb, msg, '$kind', level, mfa, is_trace], Data0 [time, gl, file, report_cb, msg, '$kind', level, is_trace], Data0
) )
), ),
lists:filter( lists:filter(
fun({_, V}) -> V =/= undefined end, fun({_, V}) -> V =/= undefined end,
[{time, format_ts(Time, Config)}, {level, Level}, {msg, Msg}, {mfa, MFA}] [{time, format_ts(Time, Config)}, {level, Level}, {msg, Msg}]
) ++ Data. ) ++ Data.
format_ts(Ts, #{timestamp_format := rfc3339, time_offset := Offset}) when is_integer(Ts) -> format_ts(Ts, #{timestamp_format := rfc3339, time_offset := Offset}) when is_integer(Ts) ->

View File

@ -20,12 +20,12 @@
-export([format/2]). -export([format/2]).
-export([check_config/1]). -export([check_config/1]).
-export([try_format_unicode/1, try_encode_meta/2]). -export([try_format_unicode/1, try_encode_payload/2]).
%% Used in the other log formatters %% Used in the other log formatters
-export([evaluate_lazy_values_if_dbg_level/1, evaluate_lazy_values/1]). -export([evaluate_lazy_values_if_dbg_level/1, evaluate_lazy_values/1]).
check_config(X) -> check_config(X) ->
logger_formatter:check_config(maps:without([timestamp_format, with_mfa, payload_encode], X)). logger_formatter:check_config(maps:without([timestamp_format, payload_encode], X)).
%% Principle here is to delegate the formatting to logger_formatter:format/2 %% Principle here is to delegate the formatting to logger_formatter:format/2
%% as much as possible, and only enrich the report with clientid, peername, topic, username %% as much as possible, and only enrich the report with clientid, peername, topic, username
@ -41,24 +41,18 @@ format(#{msg := {report, ReportMap0}, meta := _Meta} = Event0, Config) when is_m
false -> false ->
maps:from_list(ReportList) maps:from_list(ReportList)
end, end,
fmt(Event#{msg := {report, Report}}, maps:remove(with_mfa, Config)); fmt(Event#{msg := {report, Report}}, Config);
format(#{msg := {string, String}} = Event, Config) -> format(#{msg := {string, String}} = Event, Config) ->
%% copied from logger_formatter:format/2 %% copied from logger_formatter:format/2
%% unsure how this case is triggered %% unsure how this case is triggered
format(Event#{msg => {"~ts ", [String]}}, maps:remove(with_mfa, Config)); format(Event#{msg => {"~ts ", [String]}}, Config);
format(#{msg := _Msg, meta := _Meta} = Event0, Config) -> format(#{msg := _Msg, meta := _Meta} = Event0, Config) ->
#{msg := Msg0, meta := Meta} = Event1 = evaluate_lazy_values_if_dbg_level(Event0), #{msg := Msg0, meta := Meta} = Event1 = evaluate_lazy_values_if_dbg_level(Event0),
%% For format strings like logger:log(Level, "~p", [Var]) %% For format strings like logger:log(Level, "~p", [Var])
%% and logger:log(Level, "message", #{key => value}) %% and logger:log(Level, "message", #{key => value})
Msg1 = enrich_client_info(Msg0, Meta), Msg1 = enrich_client_info(Msg0, Meta),
Msg2 = enrich_mfa(Msg1, Meta, Config), Msg2 = enrich_topic(Msg1, Meta),
Msg3 = enrich_topic(Msg2, Meta), fmt(Event1#{msg := Msg2}, Config).
fmt(Event1#{msg := Msg3}, maps:remove(with_mfa, Config)).
enrich_mfa({Fmt, Args}, Data, #{with_mfa := true} = Config) when is_list(Fmt) ->
{Fmt ++ " mfa: ~ts", Args ++ [emqx_utils:format_mfal(Data, Config)]};
enrich_mfa(Msg, _, _) ->
Msg.
%% Most log entries with lazy values are trace events with level debug. So to %% Most log entries with lazy values are trace events with level debug. So to
%% be more efficient we only search for lazy values to evaluate in the entries %% be more efficient we only search for lazy values to evaluate in the entries
@ -111,7 +105,7 @@ is_list_report_acceptable(_) ->
enrich_report(ReportRaw0, Meta, Config) -> enrich_report(ReportRaw0, Meta, Config) ->
%% clientid and peername always in emqx_conn's process metadata. %% clientid and peername always in emqx_conn's process metadata.
%% topic and username can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2 %% topic and username can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2
ReportRaw = try_encode_meta(ReportRaw0, Config), ReportRaw = try_encode_payload(ReportRaw0, Config),
Topic = Topic =
case maps:get(topic, Meta, undefined) of case maps:get(topic, Meta, undefined) of
undefined -> maps:get(topic, ReportRaw, undefined); undefined -> maps:get(topic, ReportRaw, undefined);
@ -125,7 +119,6 @@ enrich_report(ReportRaw0, Meta, Config) ->
ClientId = maps:get(clientid, Meta, undefined), ClientId = maps:get(clientid, Meta, undefined),
Peer = maps:get(peername, Meta, undefined), Peer = maps:get(peername, Meta, undefined),
Msg = maps:get(msg, ReportRaw, undefined), Msg = maps:get(msg, ReportRaw, undefined),
MFA = emqx_utils:format_mfal(Meta, Config),
%% TODO: move all tags to Meta so we can filter traces %% TODO: move all tags to Meta so we can filter traces
%% based on tags (currently not supported) %% based on tags (currently not supported)
Tag = maps:get(tag, ReportRaw, maps:get(tag, Meta, undefined)), Tag = maps:get(tag, ReportRaw, maps:get(tag, Meta, undefined)),
@ -140,7 +133,6 @@ enrich_report(ReportRaw0, Meta, Config) ->
{topic, try_format_unicode(Topic)}, {topic, try_format_unicode(Topic)},
{username, try_format_unicode(Username)}, {username, try_format_unicode(Username)},
{peername, Peer}, {peername, Peer},
{mfa, try_format_unicode(MFA)},
{msg, Msg}, {msg, Msg},
{clientid, try_format_unicode(ClientId)}, {clientid, try_format_unicode(ClientId)},
{tag, Tag} {tag, Tag}
@ -180,22 +172,9 @@ enrich_topic({Fmt, Args}, #{topic := Topic}) when is_list(Fmt) ->
enrich_topic(Msg, _) -> enrich_topic(Msg, _) ->
Msg. Msg.
try_encode_meta(Report, Config) -> try_encode_payload(#{payload := Payload} = Report, #{payload_encode := Encode}) ->
lists:foldl(
fun(Meta, Acc) ->
try_encode_meta(Meta, Acc, Config)
end,
Report,
[payload, packet]
).
try_encode_meta(payload, #{payload := Payload} = Report, #{payload_encode := Encode}) ->
Report#{payload := encode_payload(Payload, Encode)}; Report#{payload := encode_payload(Payload, Encode)};
try_encode_meta(packet, #{packet := Packet} = Report, #{payload_encode := Encode}) when try_encode_payload(Report, _Config) ->
is_tuple(Packet)
->
Report#{packet := emqx_packet:format(Packet, Encode)};
try_encode_meta(_, Report, _Config) ->
Report. Report.
encode_payload(Payload, text) -> encode_payload(Payload, text) ->
@ -203,5 +182,4 @@ encode_payload(Payload, text) ->
encode_payload(_Payload, hidden) -> encode_payload(_Payload, hidden) ->
"******"; "******";
encode_payload(Payload, hex) -> encode_payload(Payload, hex) ->
Bin = emqx_utils_conv:bin(Payload), binary:encode_hex(Payload).
binary:encode_hex(Bin).

View File

@ -51,6 +51,7 @@
]). ]).
-export([ -export([
format/1,
format/2 format/2
]). ]).
@ -480,6 +481,10 @@ will_msg(#mqtt_packet_connect{
headers = #{username => Username, properties => Props} headers = #{username => Username, properties => Props}
}. }.
%% @doc Format packet
-spec format(emqx_types:packet()) -> iolist().
format(Packet) -> format(Packet, emqx_trace_handler:payload_encode()).
%% @doc Format packet %% @doc Format packet
-spec format(emqx_types:packet(), hex | text | hidden) -> iolist(). -spec format(emqx_types:packet(), hex | text | hidden) -> iolist().
format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) -> format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) ->

View File

@ -25,7 +25,6 @@
-include("emqx_mqtt.hrl"). -include("emqx_mqtt.hrl").
-include("emqx_session.hrl").
-include("emqx_persistent_session_ds/session_internals.hrl"). -include("emqx_persistent_session_ds/session_internals.hrl").
-ifdef(TEST). -ifdef(TEST).
@ -64,7 +63,6 @@
deliver/3, deliver/3,
replay/3, replay/3,
handle_timeout/3, handle_timeout/3,
handle_info/2,
disconnect/2, disconnect/2,
terminate/2 terminate/2
]). ]).
@ -108,7 +106,6 @@
seqno/0, seqno/0,
timestamp/0, timestamp/0,
topic_filter/0, topic_filter/0,
share_topic_filter/0,
subscription_id/0, subscription_id/0,
subscription/0, subscription/0,
session/0, session/0,
@ -120,8 +117,7 @@
%% Currently, this is the clientid. We avoid `emqx_types:clientid()' because that can be %% Currently, this is the clientid. We avoid `emqx_types:clientid()' because that can be
%% an atom, in theory (?). %% an atom, in theory (?).
-type id() :: binary(). -type id() :: binary().
-type share_topic_filter() :: #share{}. -type topic_filter() :: emqx_types:topic() | #share{}.
-type topic_filter() :: emqx_types:topic() | share_topic_filter().
%% Subscription and subscription states: %% Subscription and subscription states:
%% %%
@ -159,8 +155,6 @@
subopts := map() subopts := map()
}. }.
-type shared_sub_state() :: term().
-define(TIMER_PULL, timer_pull). -define(TIMER_PULL, timer_pull).
-define(TIMER_GET_STREAMS, timer_get_streams). -define(TIMER_GET_STREAMS, timer_get_streams).
-define(TIMER_BUMP_LAST_ALIVE_AT, timer_bump_last_alive_at). -define(TIMER_BUMP_LAST_ALIVE_AT, timer_bump_last_alive_at).
@ -178,13 +172,8 @@
props := map(), props := map(),
%% Persistent state: %% Persistent state:
s := emqx_persistent_session_ds_state:t(), s := emqx_persistent_session_ds_state:t(),
%% Shared subscription state:
shared_sub_s := shared_sub_state(),
%% Buffer: %% Buffer:
inflight := emqx_persistent_session_ds_inflight:t(), inflight := emqx_persistent_session_ds_inflight:t(),
%% Last fetched stream:
%% Used as a continuation point for fair stream scheduling.
last_fetched_stream => emqx_persistent_session_ds_state:stream_key(),
%% In-progress replay: %% In-progress replay:
%% List of stream replay states to be added to the inflight buffer. %% List of stream replay states to be added to the inflight buffer.
replay => [{_StreamKey, stream_state()}, ...], replay => [{_StreamKey, stream_state()}, ...],
@ -288,11 +277,8 @@ info(created_at, #{s := S}) ->
emqx_persistent_session_ds_state:get_created_at(S); emqx_persistent_session_ds_state:get_created_at(S);
info(is_persistent, #{}) -> info(is_persistent, #{}) ->
true; true;
info(subscriptions, #{s := S, shared_sub_s := SharedSubS}) -> info(subscriptions, #{s := S}) ->
maps:merge( emqx_persistent_session_ds_subs:to_map(S);
emqx_persistent_session_ds_subs:to_map(S),
emqx_persistent_session_ds_shared_subs:to_map(S, SharedSubS)
);
info(subscriptions_cnt, #{s := S}) -> info(subscriptions_cnt, #{s := S}) ->
emqx_persistent_session_ds_state:n_subscriptions(S); emqx_persistent_session_ds_state:n_subscriptions(S);
info(subscriptions_max, #{props := Conf}) -> info(subscriptions_max, #{props := Conf}) ->
@ -370,23 +356,15 @@ print_session(ClientId) ->
%% Client -> Broker: SUBSCRIBE / UNSUBSCRIBE %% Client -> Broker: SUBSCRIBE / UNSUBSCRIBE
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Suppress warnings about clauses handling unimplemented results
%% of `emqx_persistent_session_ds_shared_subs:on_subscribe/3`
-dialyzer({nowarn_function, subscribe/3}).
-spec subscribe(topic_filter(), emqx_types:subopts(), session()) -> -spec subscribe(topic_filter(), emqx_types:subopts(), session()) ->
{ok, session()} | {error, emqx_types:reason_code()}. {ok, session()} | {error, emqx_types:reason_code()}.
subscribe( subscribe(
#share{} = TopicFilter, #share{},
SubOpts, _SubOpts,
Session _Session
) -> ) ->
case emqx_persistent_session_ds_shared_subs:on_subscribe(TopicFilter, SubOpts, Session) of %% TODO: Shared subscriptions are not supported yet:
{ok, S0, SharedSubS} -> {error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED};
S = emqx_persistent_session_ds_state:commit(S0),
{ok, Session#{s => S, shared_sub_s => SharedSubS}};
Error = {error, _} ->
Error
end;
subscribe( subscribe(
TopicFilter, TopicFilter,
SubOpts, SubOpts,
@ -400,27 +378,8 @@ subscribe(
Error Error
end. end.
%% Suppress warnings about clauses handling unimplemented results
%% of `emqx_persistent_session_ds_shared_subs:on_unsubscribe/4`
-dialyzer({nowarn_function, unsubscribe/2}).
-spec unsubscribe(topic_filter(), session()) -> -spec unsubscribe(topic_filter(), session()) ->
{ok, session(), emqx_types:subopts()} | {error, emqx_types:reason_code()}. {ok, session(), emqx_types:subopts()} | {error, emqx_types:reason_code()}.
unsubscribe(
#share{} = TopicFilter,
Session = #{id := SessionId, s := S0, shared_sub_s := SharedSubS0}
) ->
case
emqx_persistent_session_ds_shared_subs:on_unsubscribe(
SessionId, TopicFilter, S0, SharedSubS0
)
of
{ok, S1, SharedSubS1, #{id := SubId, subopts := SubOpts}} ->
S2 = emqx_persistent_session_ds_stream_scheduler:on_unsubscribe(SubId, S1),
S = emqx_persistent_session_ds_state:commit(S2),
{ok, Session#{s => S, shared_sub_s => SharedSubS1}, SubOpts};
Error = {error, _} ->
Error
end;
unsubscribe( unsubscribe(
TopicFilter, TopicFilter,
Session = #{id := SessionId, s := S0} Session = #{id := SessionId, s := S0}
@ -581,8 +540,6 @@ pubcomp(_ClientInfo, PacketId, Session0) ->
end. end.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Delivers
%%--------------------------------------------------------------------
-spec deliver(clientinfo(), [emqx_types:deliver()], session()) -> -spec deliver(clientinfo(), [emqx_types:deliver()], session()) ->
{ok, replies(), session()}. {ok, replies(), session()}.
@ -594,10 +551,6 @@ deliver(ClientInfo, Delivers, Session0) ->
), ),
{ok, [], pull_now(Session)}. {ok, [], pull_now(Session)}.
%%--------------------------------------------------------------------
%% Timeouts
%%--------------------------------------------------------------------
-spec handle_timeout(clientinfo(), _Timeout, session()) -> -spec handle_timeout(clientinfo(), _Timeout, session()) ->
{ok, replies(), session()} | {ok, replies(), timeout(), session()}. {ok, replies(), session()} | {ok, replies(), timeout(), session()}.
handle_timeout(ClientInfo, ?TIMER_PULL, Session0) -> handle_timeout(ClientInfo, ?TIMER_PULL, Session0) ->
@ -620,19 +573,14 @@ handle_timeout(ClientInfo, ?TIMER_PULL, Session0) ->
handle_timeout(ClientInfo, ?TIMER_RETRY_REPLAY, Session0) -> handle_timeout(ClientInfo, ?TIMER_RETRY_REPLAY, Session0) ->
Session = replay_streams(Session0, ClientInfo), Session = replay_streams(Session0, ClientInfo),
{ok, [], Session}; {ok, [], Session};
handle_timeout(ClientInfo, ?TIMER_GET_STREAMS, Session0 = #{s := S0, shared_sub_s := SharedSubS0}) -> handle_timeout(ClientInfo, ?TIMER_GET_STREAMS, Session0 = #{s := S0}) ->
%% `gc` and `renew_streams` methods may drop unsubscribed streams. S1 = emqx_persistent_session_ds_subs:gc(S0),
%% Shared subscription handler must have a chance to see unsubscribed streams S = emqx_persistent_session_ds_stream_scheduler:renew_streams(S1),
%% in the fully replayed state.
{S1, SharedSubS1} = emqx_persistent_session_ds_shared_subs:pre_renew_streams(S0, SharedSubS0),
S2 = emqx_persistent_session_ds_subs:gc(S1),
S3 = emqx_persistent_session_ds_stream_scheduler:renew_streams(S2),
{S, SharedSubS} = emqx_persistent_session_ds_shared_subs:renew_streams(S3, SharedSubS1),
Interval = get_config(ClientInfo, [renew_streams_interval]), Interval = get_config(ClientInfo, [renew_streams_interval]),
Session = emqx_session:ensure_timer( Session = emqx_session:ensure_timer(
?TIMER_GET_STREAMS, ?TIMER_GET_STREAMS,
Interval, Interval,
Session0#{s => S, shared_sub_s => SharedSubS} Session0#{s => S}
), ),
{ok, [], Session}; {ok, [], Session};
handle_timeout(_ClientInfo, ?TIMER_BUMP_LAST_ALIVE_AT, Session0 = #{s := S0}) -> handle_timeout(_ClientInfo, ?TIMER_BUMP_LAST_ALIVE_AT, Session0 = #{s := S0}) ->
@ -653,22 +601,6 @@ handle_timeout(_ClientInfo, Timeout, Session) ->
?SLOG(warning, #{msg => "unknown_ds_timeout", timeout => Timeout}), ?SLOG(warning, #{msg => "unknown_ds_timeout", timeout => Timeout}),
{ok, [], Session}. {ok, [], Session}.
%%--------------------------------------------------------------------
%% Generic messages
%%--------------------------------------------------------------------
-spec handle_info(term(), session()) -> session().
handle_info(?shared_sub_message(Msg), Session = #{s := S0, shared_sub_s := SharedSubS0}) ->
{S, SharedSubS} = emqx_persistent_session_ds_shared_subs:on_info(S0, SharedSubS0, Msg),
Session#{s => S, shared_sub_s => SharedSubS}.
%%--------------------------------------------------------------------
%% Shared subscription outgoing messages
%%--------------------------------------------------------------------
shared_sub_opts(SessionId) ->
#{session_id => SessionId}.
bump_last_alive(S0) -> bump_last_alive(S0) ->
%% Note: we take a pessimistic approach here and assume that the client will be alive %% Note: we take a pessimistic approach here and assume that the client will be alive
%% until the next bump timeout. With this, we avoid garbage collecting this session %% until the next bump timeout. With this, we avoid garbage collecting this session
@ -761,7 +693,7 @@ skip_batch(StreamKey, SRS0, Session = #{s := S0}, ClientInfo, Reason) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec disconnect(session(), emqx_types:conninfo()) -> {shutdown, session()}. -spec disconnect(session(), emqx_types:conninfo()) -> {shutdown, session()}.
disconnect(Session = #{id := Id, s := S0, shared_sub_s := SharedSubS0}, ConnInfo) -> disconnect(Session = #{id := Id, s := S0}, ConnInfo) ->
S1 = maybe_set_offline_info(S0, Id), S1 = maybe_set_offline_info(S0, Id),
S2 = emqx_persistent_session_ds_state:set_last_alive_at(now_ms(), S1), S2 = emqx_persistent_session_ds_state:set_last_alive_at(now_ms(), S1),
S3 = S3 =
@ -771,9 +703,8 @@ disconnect(Session = #{id := Id, s := S0, shared_sub_s := SharedSubS0}, ConnInfo
_ -> _ ->
S2 S2
end, end,
{S4, SharedSubS} = emqx_persistent_session_ds_shared_subs:on_disconnect(S3, SharedSubS0), S = emqx_persistent_session_ds_state:commit(S3),
S = emqx_persistent_session_ds_state:commit(S4), {shutdown, Session#{s => S}}.
{shutdown, Session#{s => S, shared_sub_s => SharedSubS}}.
-spec terminate(Reason :: term(), session()) -> ok. -spec terminate(Reason :: term(), session()) -> ok.
terminate(_Reason, Session = #{id := Id, s := S}) -> terminate(_Reason, Session = #{id := Id, s := S}) ->
@ -821,12 +752,10 @@ list_client_subscriptions(ClientId) ->
{error, not_found} {error, not_found}
end. end.
-spec get_client_subscription(emqx_types:clientid(), topic_filter() | share_topic_filter()) -> -spec get_client_subscription(emqx_types:clientid(), emqx_types:topic()) ->
subscription() | undefined. subscription() | undefined.
get_client_subscription(ClientId, #share{} = ShareTopicFilter) -> get_client_subscription(ClientId, Topic) ->
emqx_persistent_session_ds_shared_subs:cold_get_subscription(ClientId, ShareTopicFilter); emqx_persistent_session_ds_subs:cold_get_subscription(ClientId, Topic).
get_client_subscription(ClientId, TopicFilter) ->
emqx_persistent_session_ds_subs:cold_get_subscription(ClientId, TopicFilter).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Session tables operations %% Session tables operations
@ -885,17 +814,13 @@ session_open(
S4 = emqx_persistent_session_ds_state:set_will_message(MaybeWillMsg, S3), S4 = emqx_persistent_session_ds_state:set_will_message(MaybeWillMsg, S3),
S5 = set_clientinfo(ClientInfo, S4), S5 = set_clientinfo(ClientInfo, S4),
S6 = emqx_persistent_session_ds_state:set_protocol({ProtoName, ProtoVer}, S5), S6 = emqx_persistent_session_ds_state:set_protocol({ProtoName, ProtoVer}, S5),
{ok, S7, SharedSubS} = emqx_persistent_session_ds_shared_subs:open( S = emqx_persistent_session_ds_state:commit(S6),
S6, shared_sub_opts(SessionId)
),
S = emqx_persistent_session_ds_state:commit(S7),
Inflight = emqx_persistent_session_ds_inflight:new( Inflight = emqx_persistent_session_ds_inflight:new(
receive_maximum(NewConnInfo) receive_maximum(NewConnInfo)
), ),
#{ #{
id => SessionId, id => SessionId,
s => S, s => S,
shared_sub_s => SharedSubS,
inflight => Inflight, inflight => Inflight,
props => #{} props => #{}
} }
@ -944,7 +869,6 @@ session_ensure_new(
id => Id, id => Id,
props => Conf, props => Conf,
s => S, s => S,
shared_sub_s => emqx_persistent_session_ds_shared_subs:new(shared_sub_opts(Id)),
inflight => emqx_persistent_session_ds_inflight:new(receive_maximum(ConnInfo)) inflight => emqx_persistent_session_ds_inflight:new(receive_maximum(ConnInfo))
}. }.
@ -955,8 +879,8 @@ session_drop(SessionId, Reason) ->
case emqx_persistent_session_ds_state:open(SessionId) of case emqx_persistent_session_ds_state:open(SessionId) of
{ok, S0} -> {ok, S0} ->
?tp(debug, drop_persistent_session, #{client_id => SessionId, reason => Reason}), ?tp(debug, drop_persistent_session, #{client_id => SessionId, reason => Reason}),
ok = emqx_persistent_session_ds_subs:on_session_drop(SessionId, S0), emqx_persistent_session_ds_subs:on_session_drop(SessionId, S0),
ok = emqx_persistent_session_ds_state:delete(SessionId); emqx_persistent_session_ds_state:delete(SessionId);
undefined -> undefined ->
ok ok
end. end.
@ -993,33 +917,24 @@ do_ensure_all_iterators_closed(_DSSessionID) ->
%% Normal replay: %% Normal replay:
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
fetch_new_messages(Session0 = #{s := S0, shared_sub_s := SharedSubS0}, ClientInfo) -> fetch_new_messages(Session = #{s := S}, ClientInfo) ->
{S1, SharedSubS1} = emqx_persistent_session_ds_shared_subs:on_streams_replay(S0, SharedSubS0), Streams = emqx_persistent_session_ds_stream_scheduler:find_new_streams(S),
Session1 = Session0#{s => S1, shared_sub_s => SharedSubS1}, fetch_new_messages(Streams, Session, ClientInfo).
LFS = maps:get(last_fetched_stream, Session1, beginning),
ItStream = emqx_persistent_session_ds_stream_scheduler:iter_next_streams(LFS, S1),
BatchSize = get_config(ClientInfo, [batch_size]),
Session2 = fetch_new_messages(ItStream, BatchSize, Session1, ClientInfo),
Session2#{shared_sub_s => SharedSubS1}.
fetch_new_messages(ItStream0, BatchSize, Session0, ClientInfo) -> fetch_new_messages([], Session, _ClientInfo) ->
#{inflight := Inflight} = Session0, Session;
fetch_new_messages([I | Streams], Session0 = #{inflight := Inflight}, ClientInfo) ->
BatchSize = get_config(ClientInfo, [batch_size]),
case emqx_persistent_session_ds_inflight:n_buffered(all, Inflight) >= BatchSize of case emqx_persistent_session_ds_inflight:n_buffered(all, Inflight) >= BatchSize of
true -> true ->
%% Buffer is full: %% Buffer is full:
Session0; Session0;
false -> false ->
case emqx_persistent_session_ds_stream_scheduler:next_stream(ItStream0) of Session = new_batch(I, BatchSize, Session0, ClientInfo),
{StreamKey, Srs, ItStream} -> fetch_new_messages(Streams, Session, ClientInfo)
Session1 = new_batch(StreamKey, Srs, BatchSize, Session0, ClientInfo),
Session = Session1#{last_fetched_stream => StreamKey},
fetch_new_messages(ItStream, BatchSize, Session, ClientInfo);
none ->
Session0
end
end. end.
new_batch(StreamKey, Srs0, BatchSize, Session0 = #{s := S0}, ClientInfo) -> new_batch({StreamKey, Srs0}, BatchSize, Session0 = #{s := S0}, ClientInfo) ->
SN1 = emqx_persistent_session_ds_state:get_seqno(?next(?QOS_1), S0), SN1 = emqx_persistent_session_ds_state:get_seqno(?next(?QOS_1), S0),
SN2 = emqx_persistent_session_ds_state:get_seqno(?next(?QOS_2), S0), SN2 = emqx_persistent_session_ds_state:get_seqno(?next(?QOS_2), S0),
Srs1 = Srs0#srs{ Srs1 = Srs0#srs{

View File

@ -67,7 +67,7 @@
-type t() :: #inflight{}. -type t() :: #inflight{}.
%%================================================================================ %%================================================================================
%% API functions %% API funcions
%%================================================================================ %%================================================================================
-spec new(non_neg_integer()) -> t(). -spec new(non_neg_integer()) -> t().

View File

@ -17,7 +17,7 @@
-module(emqx_persistent_session_ds_router). -module(emqx_persistent_session_ds_router).
-include("emqx.hrl"). -include("emqx.hrl").
-include("emqx_ps_ds_int.hrl"). -include("emqx_persistent_session_ds/emqx_ps_ds_int.hrl").
-export([init_tables/0]). -export([init_tables/0]).
@ -46,10 +46,9 @@
-export([has_route/2]). -export([has_route/2]).
-endif. -endif.
-type route() :: #ps_route{}. -type dest() :: emqx_persistent_session_ds:id().
-type dest() :: emqx_persistent_session_ds:id() | #share_dest{}.
-export_type([dest/0, route/0]). -export_type([dest/0]).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Table Initialization %% Table Initialization
@ -124,19 +123,19 @@ has_any_route(Topic) ->
%% @doc Take a real topic (not filter) as input, return the matching topics and topic %% @doc Take a real topic (not filter) as input, return the matching topics and topic
%% filters associated with route destination. %% filters associated with route destination.
-spec match_routes(emqx_types:topic()) -> [route()]. -spec match_routes(emqx_types:topic()) -> [emqx_types:route()].
match_routes(Topic) when is_binary(Topic) -> match_routes(Topic) when is_binary(Topic) ->
lookup_route_tab(Topic) ++ lookup_route_tab(Topic) ++
[match_to_route(M) || M <- match_filters(Topic)]. [match_to_route(M) || M <- match_filters(Topic)].
%% @doc Take a topic or filter as input, and return the existing routes with exactly %% @doc Take a topic or filter as input, and return the existing routes with exactly
%% this topic or filter. %% this topic or filter.
-spec lookup_routes(emqx_types:topic()) -> [route()]. -spec lookup_routes(emqx_types:topic()) -> [emqx_types:route()].
lookup_routes(Topic) -> lookup_routes(Topic) ->
case emqx_topic:wildcard(Topic) of case emqx_topic:wildcard(Topic) of
true -> true ->
Pat = #ps_routeidx{entry = emqx_topic_index:make_key(Topic, '$1')}, Pat = #ps_routeidx{entry = emqx_topic_index:make_key(Topic, '$1')},
[#ps_route{topic = Topic, dest = Dest} || [Dest] <- ets:match(?PS_FILTERS_TAB, Pat)]; [Dest || [Dest] <- ets:match(?PS_FILTERS_TAB, Pat)];
false -> false ->
lookup_route_tab(Topic) lookup_route_tab(Topic)
end. end.
@ -161,7 +160,7 @@ topics() ->
print_routes(Topic) -> print_routes(Topic) ->
lists:foreach( lists:foreach(
fun(#ps_route{topic = To, dest = Dest}) -> fun(#ps_route{topic = To, dest = Dest}) ->
io:format("~ts -> ~tp~n", [To, Dest]) io:format("~ts -> ~ts~n", [To, Dest])
end, end,
match_routes(Topic) match_routes(Topic)
). ).
@ -195,11 +194,11 @@ cleanup_routes(DSSessionId) ->
?PS_ROUTER_TAB ?PS_ROUTER_TAB
). ).
-spec foldl_routes(fun((route(), Acc) -> Acc), Acc) -> Acc. -spec foldl_routes(fun((emqx_types:route(), Acc) -> Acc), Acc) -> Acc.
foldl_routes(FoldFun, AccIn) -> foldl_routes(FoldFun, AccIn) ->
fold_routes(foldl, FoldFun, AccIn). fold_routes(foldl, FoldFun, AccIn).
-spec foldr_routes(fun((route(), Acc) -> Acc), Acc) -> Acc. -spec foldr_routes(fun((emqx_types:route(), Acc) -> Acc), Acc) -> Acc.
foldr_routes(FoldFun, AccIn) -> foldr_routes(FoldFun, AccIn) ->
fold_routes(foldr, FoldFun, AccIn). fold_routes(foldr, FoldFun, AccIn).
@ -247,8 +246,6 @@ mk_filtertab_fold_fun(FoldFun) ->
match_filters(Topic) -> match_filters(Topic) ->
emqx_topic_index:matches(Topic, ?PS_FILTERS_TAB, []). emqx_topic_index:matches(Topic, ?PS_FILTERS_TAB, []).
get_dest_session_id(#share_dest{session_id = DSSessionId}) ->
DSSessionId;
get_dest_session_id({_, DSSessionId}) -> get_dest_session_id({_, DSSessionId}) ->
DSSessionId; DSSessionId;
get_dest_session_id(DSSessionId) -> get_dest_session_id(DSSessionId) ->

View File

@ -1,791 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
%% @doc This module
%% * handles creation and management of _shared_ subscriptions for the session;
%% * provides streams to the session;
%% * handles progress of stream replay.
%%
%% The logic is quite straightforward; most of the parts resemble the logic of the
%% `emqx_persistent_session_ds_subs` (subscribe/unsubscribe) and
%% `emqx_persistent_session_ds_scheduler` (providing new streams),
%% but some data is sent or received from the `emqx_persistent_session_ds_shared_subs_agent`
%% which communicates with remote shared subscription leaders.
%%
%% A tricky part is the concept of "scheduled actions". When we unsubscribe from a topic
%% we may have some streams that have unacked messages. So we do not have a reliable
%% progress for them. Sending the current progress to the leader and disconnecting
%% will lead to the duplication of messages. So after unsubscription, we need to wait
%% some time until all streams are acked, and only then we disconnect from the leader.
%%
%% For this purpose we have the `scheduled_actions` map in the state of the module.
%% We preserve there the streams that we need to wait for and collect their progress.
%% We also use `scheduled_actions` for resubscriptions. If a client quickly resubscribes
%% after unsubscription, we may still have the mentioned streams unacked. If we abandon
%% them, just connect to the leader, then it may lease us the same streams again, but with
%% the previous progress. So messages may duplicate.
-module(emqx_persistent_session_ds_shared_subs).
-include("emqx_mqtt.hrl").
-include("emqx.hrl").
-include("logger.hrl").
-include("session_internals.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
-export([
new/1,
open/2,
on_subscribe/3,
on_unsubscribe/4,
on_disconnect/2,
on_streams_replay/2,
on_info/3,
pre_renew_streams/2,
renew_streams/2,
to_map/2
]).
%% Management API:
-export([
cold_get_subscription/2
]).
-export([
format_lease_events/1,
format_stream_progresses/1
]).
-define(schedule_subscribe, schedule_subscribe).
-define(schedule_unsubscribe, schedule_unsubscribe).
-type stream_key() :: {emqx_persistent_session_ds:id(), emqx_ds:stream()}.
-type scheduled_action_type() ::
{?schedule_subscribe, emqx_types:subopts()} | ?schedule_unsubscribe.
-type agent_stream_progress() :: #{
stream := emqx_ds:stream(),
progress := progress(),
use_finished := boolean()
}.
-type progress() ::
#{
iterator := emqx_ds:iterator()
}.
-type scheduled_action() :: #{
type := scheduled_action_type(),
stream_keys_to_wait := [stream_key()],
progresses := [agent_stream_progress()]
}.
-type t() :: #{
agent := emqx_persistent_session_ds_shared_subs_agent:t(),
scheduled_actions := #{
share_topic_filter() => scheduled_action()
}
}.
-type share_topic_filter() :: emqx_persistent_session_ds:share_topic_filter().
-type opts() :: #{
session_id := emqx_persistent_session_ds:id()
}.
-define(rank_x, rank_shared).
-define(rank_y, 0).
-export_type([
progress/0,
agent_stream_progress/0
]).
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
%%--------------------------------------------------------------------
%% new
-spec new(opts()) -> t().
new(Opts) ->
#{
agent => emqx_persistent_session_ds_shared_subs_agent:new(
agent_opts(Opts)
),
scheduled_actions => #{}
}.
%%--------------------------------------------------------------------
%% open
-spec open(emqx_persistent_session_ds_state:t(), opts()) ->
{ok, emqx_persistent_session_ds_state:t(), t()}.
open(S0, Opts) ->
SharedSubscriptions = fold_shared_subs(
fun(#share{} = ShareTopicFilter, Sub, Acc) ->
[{ShareTopicFilter, to_agent_subscription(S0, Sub)} | Acc]
end,
[],
S0
),
Agent = emqx_persistent_session_ds_shared_subs_agent:open(
SharedSubscriptions, agent_opts(Opts)
),
SharedSubS = #{agent => Agent, scheduled_actions => #{}},
S1 = revoke_all_streams(S0),
{ok, S1, SharedSubS}.
%%--------------------------------------------------------------------
%% on_subscribe
-spec on_subscribe(
share_topic_filter(),
emqx_types:subopts(),
emqx_persistent_session_ds:session()
) -> {ok, emqx_persistent_session_ds_state:t(), t()} | {error, emqx_types:reason_code()}.
on_subscribe(#share{} = ShareTopicFilter, SubOpts, #{s := S} = Session) ->
Subscription = emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S),
on_subscribe(Subscription, ShareTopicFilter, SubOpts, Session).
%%--------------------------------------------------------------------
%% on_subscribe internal functions
on_subscribe(undefined, ShareTopicFilter, SubOpts, #{props := Props, s := S} = Session) ->
#{max_subscriptions := MaxSubscriptions} = Props,
case emqx_persistent_session_ds_state:n_subscriptions(S) < MaxSubscriptions of
true ->
create_new_subscription(ShareTopicFilter, SubOpts, Session);
false ->
{error, ?RC_QUOTA_EXCEEDED}
end;
on_subscribe(Subscription, ShareTopicFilter, SubOpts, Session) ->
update_subscription(Subscription, ShareTopicFilter, SubOpts, Session).
-dialyzer({nowarn_function, create_new_subscription/3}).
create_new_subscription(#share{topic = TopicFilter, group = Group} = ShareTopicFilter, SubOpts, #{
id := SessionId,
s := S0,
shared_sub_s := #{agent := Agent} = SharedSubS0,
props := Props
}) ->
case
emqx_persistent_session_ds_shared_subs_agent:can_subscribe(
Agent, ShareTopicFilter, SubOpts
)
of
ok ->
ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, #share_dest{
session_id = SessionId, group = Group
}),
_ = emqx_external_broker:add_persistent_shared_route(TopicFilter, Group, SessionId),
#{upgrade_qos := UpgradeQoS} = Props,
{SubId, S1} = emqx_persistent_session_ds_state:new_id(S0),
{SStateId, S2} = emqx_persistent_session_ds_state:new_id(S1),
SState = #{
parent_subscription => SubId, upgrade_qos => UpgradeQoS, subopts => SubOpts
},
S3 = emqx_persistent_session_ds_state:put_subscription_state(
SStateId, SState, S2
),
Subscription = #{
id => SubId,
current_state => SStateId,
start_time => now_ms()
},
S = emqx_persistent_session_ds_state:put_subscription(
ShareTopicFilter, Subscription, S3
),
SharedSubS = schedule_subscribe(SharedSubS0, ShareTopicFilter, SubOpts),
{ok, S, SharedSubS};
{error, _} = Error ->
Error
end.
update_subscription(
#{current_state := SStateId0, id := SubId} = Sub0, ShareTopicFilter, SubOpts, #{
s := S0, shared_sub_s := SharedSubS, props := Props
}
) ->
#{upgrade_qos := UpgradeQoS} = Props,
SState = #{parent_subscription => SubId, upgrade_qos => UpgradeQoS, subopts => SubOpts},
case emqx_persistent_session_ds_state:get_subscription_state(SStateId0, S0) of
SState ->
%% Client resubscribed with the same parameters:
{ok, S0, SharedSubS};
_ ->
%% Subsription parameters changed:
{SStateId, S1} = emqx_persistent_session_ds_state:new_id(S0),
S2 = emqx_persistent_session_ds_state:put_subscription_state(
SStateId, SState, S1
),
Sub = Sub0#{current_state => SStateId},
S = emqx_persistent_session_ds_state:put_subscription(ShareTopicFilter, Sub, S2),
{ok, S, SharedSubS}
end.
-dialyzer({nowarn_function, schedule_subscribe/3}).
schedule_subscribe(
#{agent := Agent0, scheduled_actions := ScheduledActions0} = SharedSubS0,
ShareTopicFilter,
SubOpts
) ->
case ScheduledActions0 of
#{ShareTopicFilter := ScheduledAction} ->
ScheduledActions1 = ScheduledActions0#{
ShareTopicFilter => ScheduledAction#{type => {?schedule_subscribe, SubOpts}}
},
?tp(debug, shared_subs_schedule_subscribe_override, #{
share_topic_filter => ShareTopicFilter,
new_type => {?schedule_subscribe, SubOpts},
old_action => format_schedule_action(ScheduledAction)
}),
SharedSubS0#{scheduled_actions := ScheduledActions1};
_ ->
?tp(debug, shared_subs_schedule_subscribe_new, #{
share_topic_filter => ShareTopicFilter, subopts => SubOpts
}),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_subscribe(
Agent0, ShareTopicFilter, SubOpts
),
SharedSubS0#{agent => Agent1}
end.
%%--------------------------------------------------------------------
%% on_unsubscribe
-spec on_unsubscribe(
emqx_persistent_session_ds:id(),
share_topic_filter(),
emqx_persistent_session_ds_state:t(),
t()
) ->
{ok, emqx_persistent_session_ds_state:t(), t(), emqx_persistent_session_ds:subscription()}
| {error, emqx_types:reason_code()}.
on_unsubscribe(
SessionId, #share{topic = TopicFilter, group = Group} = ShareTopicFilter, S0, SharedSubS0
) ->
case lookup(ShareTopicFilter, S0) of
undefined ->
{error, ?RC_NO_SUBSCRIPTION_EXISTED};
#{id := SubId} = Subscription ->
?tp(persistent_session_ds_subscription_delete, #{
session_id => SessionId, share_topic_filter => ShareTopicFilter
}),
_ = emqx_external_broker:delete_persistent_shared_route(TopicFilter, Group, SessionId),
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, #share_dest{
session_id = SessionId, group = Group
}),
S = emqx_persistent_session_ds_state:del_subscription(ShareTopicFilter, S0),
SharedSubS = schedule_unsubscribe(S, SharedSubS0, SubId, ShareTopicFilter),
{ok, S, SharedSubS, Subscription}
end.
%%--------------------------------------------------------------------
%% on_unsubscribe internal functions
schedule_unsubscribe(
S, #{scheduled_actions := ScheduledActions0} = SharedSubS0, UnsubscridedSubId, ShareTopicFilter
) ->
case ScheduledActions0 of
#{ShareTopicFilter := ScheduledAction0} ->
ScheduledAction1 = ScheduledAction0#{type => ?schedule_unsubscribe},
ScheduledActions1 = ScheduledActions0#{
ShareTopicFilter => ScheduledAction1
},
?tp(debug, shared_subs_schedule_unsubscribe_override, #{
share_topic_filter => ShareTopicFilter,
new_type => ?schedule_unsubscribe,
old_action => format_schedule_action(ScheduledAction0)
}),
SharedSubS0#{scheduled_actions := ScheduledActions1};
_ ->
StreamKeys = stream_keys_by_sub_id(S, UnsubscridedSubId),
ScheduledActions1 = ScheduledActions0#{
ShareTopicFilter => #{
type => ?schedule_unsubscribe,
stream_keys_to_wait => StreamKeys,
progresses => []
}
},
?tp(debug, shared_subs_schedule_unsubscribe_new, #{
share_topic_filter => ShareTopicFilter,
stream_keys => format_stream_keys(StreamKeys)
}),
SharedSubS0#{scheduled_actions := ScheduledActions1}
end.
%%--------------------------------------------------------------------
%% pre_renew_streams
-spec pre_renew_streams(emqx_persistent_session_ds_state:t(), t()) ->
{emqx_persistent_session_ds_state:t(), t()}.
pre_renew_streams(S, SharedSubS) ->
on_streams_replay(S, SharedSubS).
%%--------------------------------------------------------------------
%% renew_streams
-spec renew_streams(emqx_persistent_session_ds_state:t(), t()) ->
{emqx_persistent_session_ds_state:t(), t()}.
renew_streams(S0, #{agent := Agent0, scheduled_actions := ScheduledActions} = SharedSubS0) ->
{StreamLeaseEvents, Agent1} = emqx_persistent_session_ds_shared_subs_agent:renew_streams(
Agent0
),
StreamLeaseEvents =/= [] andalso
?tp(debug, shared_subs_new_stream_lease_events, #{
stream_lease_events => format_lease_events(StreamLeaseEvents)
}),
S1 = lists:foldl(
fun
(#{type := lease} = Event, S) -> accept_stream(Event, S, ScheduledActions);
(#{type := revoke} = Event, S) -> revoke_stream(Event, S)
end,
S0,
StreamLeaseEvents
),
SharedSubS1 = SharedSubS0#{agent => Agent1},
{S1, SharedSubS1}.
%%--------------------------------------------------------------------
%% renew_streams internal functions
accept_stream(#{share_topic_filter := ShareTopicFilter} = Event, S, ScheduledActions) ->
%% If we have a pending action (subscribe or unsubscribe) for this topic filter,
%% we should not accept a stream and start replaying it. We won't use it anyway:
%% * if subscribe is pending, we will reset agent obtain a new lease
%% * if unsubscribe is pending, we will drop connection
case ScheduledActions of
#{ShareTopicFilter := _Action} ->
S;
_ ->
accept_stream(Event, S)
end.
accept_stream(
#{
share_topic_filter := ShareTopicFilter,
stream := Stream,
progress := #{iterator := Iterator} = _Progress
} = _Event,
S0
) ->
case emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S0) of
undefined ->
%% We unsubscribed
S0;
#{id := SubId, current_state := SStateId} ->
Key = {SubId, Stream},
NeedCreateStream =
case emqx_persistent_session_ds_state:get_stream(Key, S0) of
undefined ->
true;
#srs{unsubscribed = true} ->
true;
_SRS ->
false
end,
case NeedCreateStream of
true ->
NewSRS =
#srs{
rank_x = ?rank_x,
rank_y = ?rank_y,
it_begin = Iterator,
it_end = Iterator,
sub_state_id = SStateId
},
S1 = emqx_persistent_session_ds_state:put_stream(Key, NewSRS, S0),
S1;
false ->
S0
end
end.
revoke_stream(
#{share_topic_filter := ShareTopicFilter, stream := Stream}, S0
) ->
case emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S0) of
undefined ->
%% This should not happen.
%% Agent should have received unsubscribe callback
%% and should not have revoked this stream
S0;
#{id := SubId} ->
Key = {SubId, Stream},
case emqx_persistent_session_ds_state:get_stream(Key, S0) of
undefined ->
S0;
SRS0 ->
SRS1 = SRS0#srs{unsubscribed = true},
S1 = emqx_persistent_session_ds_state:put_stream(Key, SRS1, S0),
S1
end
end.
%%--------------------------------------------------------------------
%% on_streams_replay
-spec on_streams_replay(
emqx_persistent_session_ds_state:t(),
t()
) -> {emqx_persistent_session_ds_state:t(), t()}.
on_streams_replay(S0, SharedSubS0) ->
{S1, #{agent := Agent0, scheduled_actions := ScheduledActions0} = SharedSubS1} =
renew_streams(S0, SharedSubS0),
Progresses = all_stream_progresses(S1, Agent0),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_stream_progress(
Agent0, Progresses
),
{Agent2, ScheduledActions1} = run_scheduled_actions(S1, Agent1, ScheduledActions0),
SharedSubS2 = SharedSubS1#{
agent => Agent2,
scheduled_actions => ScheduledActions1
},
{S1, SharedSubS2}.
%%--------------------------------------------------------------------
%% on_streams_replay internal functions
all_stream_progresses(S, Agent) ->
all_stream_progresses(S, Agent, _NeedUnacked = false).
all_stream_progresses(S, _Agent, NeedUnacked) ->
CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
fold_shared_stream_states(
fun(ShareTopicFilter, Stream, SRS, ProgressesAcc0) ->
case
is_stream_started(CommQos1, CommQos2, SRS) and
(NeedUnacked or is_stream_fully_acked(CommQos1, CommQos2, SRS))
of
true ->
StreamProgress = stream_progress(CommQos1, CommQos2, Stream, SRS),
maps:update_with(
ShareTopicFilter,
fun(Progresses) -> [StreamProgress | Progresses] end,
[StreamProgress],
ProgressesAcc0
);
false ->
ProgressesAcc0
end
end,
#{},
S
).
run_scheduled_actions(S, Agent, ScheduledActions) ->
maps:fold(
fun(ShareTopicFilter, Action0, {AgentAcc0, ScheduledActionsAcc}) ->
case run_scheduled_action(S, AgentAcc0, ShareTopicFilter, Action0) of
{ok, AgentAcc1} ->
{AgentAcc1, maps:remove(ShareTopicFilter, ScheduledActionsAcc)};
{continue, Action1} ->
{AgentAcc0, ScheduledActionsAcc#{ShareTopicFilter => Action1}}
end
end,
{Agent, ScheduledActions},
ScheduledActions
).
run_scheduled_action(
S,
Agent0,
ShareTopicFilter,
#{type := Type, stream_keys_to_wait := StreamKeysToWait0, progresses := Progresses0} = Action
) ->
StreamKeysToWait1 = filter_unfinished_streams(S, StreamKeysToWait0),
Progresses1 = stream_progresses(S, StreamKeysToWait0 -- StreamKeysToWait1) ++ Progresses0,
case StreamKeysToWait1 of
[] ->
?tp(debug, shared_subs_schedule_action_complete, #{
share_topic_filter => ShareTopicFilter,
progresses => format_stream_progresses(Progresses1),
type => Type
}),
%% Regular progress won't se unsubscribed streams, so we need to
%% send the progress explicitly.
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_stream_progress(
Agent0, #{ShareTopicFilter => Progresses1}
),
case Type of
{?schedule_subscribe, SubOpts} ->
{ok,
emqx_persistent_session_ds_shared_subs_agent:on_subscribe(
Agent1, ShareTopicFilter, SubOpts
)};
?schedule_unsubscribe ->
{ok,
emqx_persistent_session_ds_shared_subs_agent:on_unsubscribe(
Agent1, ShareTopicFilter, Progresses1
)}
end;
_ ->
Action1 = Action#{stream_keys_to_wait => StreamKeysToWait1, progresses => Progresses1},
?tp(debug, shared_subs_schedule_action_continue, #{
share_topic_filter => ShareTopicFilter,
new_action => format_schedule_action(Action1)
}),
{continue, Action1}
end.
filter_unfinished_streams(S, StreamKeysToWait) ->
CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
lists:filter(
fun(Key) ->
case emqx_persistent_session_ds_state:get_stream(Key, S) of
undefined ->
%% This should not happen: we should see any stream
%% in completed state before deletion
true;
SRS ->
not is_stream_fully_acked(CommQos1, CommQos2, SRS)
end
end,
StreamKeysToWait
).
stream_progresses(S, StreamKeys) ->
CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
lists:map(
fun({_SubId, Stream} = Key) ->
SRS = emqx_persistent_session_ds_state:get_stream(Key, S),
stream_progress(CommQos1, CommQos2, Stream, SRS)
end,
StreamKeys
).
%%--------------------------------------------------------------------
%% on_disconnect
on_disconnect(S0, #{agent := Agent0} = SharedSubS0) ->
S1 = revoke_all_streams(S0),
Progresses = all_stream_progresses(S1, Agent0, _NeedUnacked = true),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_disconnect(Agent0, Progresses),
SharedSubS1 = SharedSubS0#{agent => Agent1, scheduled_actions => #{}},
{S1, SharedSubS1}.
%%--------------------------------------------------------------------
%% on_disconnect helpers
revoke_all_streams(S0) ->
fold_shared_stream_states(
fun(ShareTopicFilter, Stream, _SRS, S) ->
revoke_stream(#{share_topic_filter => ShareTopicFilter, stream => Stream}, S)
end,
S0,
S0
).
%%--------------------------------------------------------------------
%% on_info
-spec on_info(emqx_persistent_session_ds_state:t(), t(), term()) ->
{emqx_persistent_session_ds_state:t(), t()}.
on_info(S, #{agent := Agent0} = SharedSubS0, Info) ->
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_info(Agent0, Info),
SharedSubS1 = SharedSubS0#{agent => Agent1},
{S, SharedSubS1}.
%%--------------------------------------------------------------------
%% to_map
-spec to_map(emqx_persistent_session_ds_state:t(), t()) -> map().
to_map(S, _SharedSubS) ->
fold_shared_subs(
fun(ShareTopicFilter, _, Acc) -> Acc#{ShareTopicFilter => lookup(ShareTopicFilter, S)} end,
#{},
S
).
%%--------------------------------------------------------------------
%% cold_get_subscription
-spec cold_get_subscription(emqx_persistent_session_ds:id(), share_topic_filter()) ->
emqx_persistent_session_ds:subscription() | undefined.
cold_get_subscription(SessionId, ShareTopicFilter) ->
case emqx_persistent_session_ds_state:cold_get_subscription(SessionId, ShareTopicFilter) of
[Sub = #{current_state := SStateId}] ->
case
emqx_persistent_session_ds_state:cold_get_subscription_state(SessionId, SStateId)
of
[#{subopts := Subopts}] ->
Sub#{subopts => Subopts};
_ ->
undefined
end;
_ ->
undefined
end.
%%--------------------------------------------------------------------
%% Generic helpers
%%--------------------------------------------------------------------
lookup(ShareTopicFilter, S) ->
case emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S) of
Sub = #{current_state := SStateId} ->
case emqx_persistent_session_ds_state:get_subscription_state(SStateId, S) of
#{subopts := SubOpts} ->
Sub#{subopts => SubOpts};
undefined ->
undefined
end;
undefined ->
undefined
end.
stream_keys_by_sub_id(S, MatchSubId) ->
emqx_persistent_session_ds_state:fold_streams(
fun({SubId, _Stream} = StreamKey, _SRS, StreamKeys) ->
case SubId of
MatchSubId ->
[StreamKey | StreamKeys];
_ ->
StreamKeys
end
end,
[],
S
).
stream_progress(
CommQos1,
CommQos2,
Stream,
#srs{
it_end = EndIt,
it_begin = BeginIt
} = SRS
) ->
Iterator =
case is_stream_fully_acked(CommQos1, CommQos2, SRS) of
true -> EndIt;
false -> BeginIt
end,
#{
stream => Stream,
progress => #{
iterator => Iterator
},
use_finished => is_use_finished(SRS)
}.
fold_shared_subs(Fun, Acc, S) ->
emqx_persistent_session_ds_state:fold_subscriptions(
fun
(#share{} = ShareTopicFilter, Sub, Acc0) -> Fun(ShareTopicFilter, Sub, Acc0);
(_, _Sub, Acc0) -> Acc0
end,
Acc,
S
).
fold_shared_stream_states(Fun, Acc, S) ->
%% TODO
%% Optimize or cache
ShareTopicFilters = fold_shared_subs(
fun
(#share{} = ShareTopicFilter, #{id := Id} = _Sub, Acc0) ->
Acc0#{Id => ShareTopicFilter};
(_, _, Acc0) ->
Acc0
end,
#{},
S
),
emqx_persistent_session_ds_state:fold_streams(
fun({SubId, Stream}, SRS, Acc0) ->
case ShareTopicFilters of
#{SubId := ShareTopicFilter} ->
Fun(ShareTopicFilter, Stream, SRS, Acc0);
_ ->
Acc0
end
end,
Acc,
S
).
to_agent_subscription(_S, Subscription) ->
maps:with([start_time], Subscription).
agent_opts(#{session_id := SessionId}) ->
#{session_id => SessionId}.
-dialyzer({nowarn_function, now_ms/0}).
now_ms() ->
erlang:system_time(millisecond).
is_use_finished(#srs{unsubscribed = Unsubscribed}) ->
Unsubscribed.
is_stream_started(CommQos1, CommQos2, #srs{first_seqno_qos1 = Q1, last_seqno_qos1 = Q2}) ->
(CommQos1 >= Q1) or (CommQos2 >= Q2).
is_stream_fully_acked(_, _, #srs{
first_seqno_qos1 = Q1, last_seqno_qos1 = Q1, first_seqno_qos2 = Q2, last_seqno_qos2 = Q2
}) ->
%% Streams where the last chunk doesn't contain any QoS1 and 2
%% messages are considered fully acked:
true;
is_stream_fully_acked(Comm1, Comm2, #srs{last_seqno_qos1 = S1, last_seqno_qos2 = S2}) ->
(Comm1 >= S1) andalso (Comm2 >= S2).
%%--------------------------------------------------------------------
%% Formatters
%%--------------------------------------------------------------------
format_schedule_action(#{
type := Type, progresses := Progresses, stream_keys_to_wait := StreamKeysToWait
}) ->
#{
type => Type,
progresses => format_stream_progresses(Progresses),
stream_keys_to_wait => format_stream_keys(StreamKeysToWait)
}.
format_stream_progresses(Streams) ->
lists:map(
fun format_stream_progress/1,
Streams
).
format_stream_progress(#{stream := Stream, progress := Progress} = Value) ->
Value#{stream => format_opaque(Stream), progress => format_progress(Progress)}.
format_progress(#{iterator := Iterator} = Progress) ->
Progress#{iterator => format_opaque(Iterator)}.
format_stream_key(beginning) -> beginning;
format_stream_key({SubId, Stream}) -> {SubId, format_opaque(Stream)}.
format_stream_keys(StreamKeys) ->
lists:map(
fun format_stream_key/1,
StreamKeys
).
format_lease_events(Events) ->
lists:map(
fun format_lease_event/1,
Events
).
format_lease_event(#{stream := Stream, progress := Progress} = Event) ->
Event#{stream => format_opaque(Stream), progress => format_progress(Progress)};
format_lease_event(#{stream := Stream} = Event) ->
Event#{stream => format_opaque(Stream)}.
format_opaque(Opaque) ->
erlang:phash2(Opaque).

View File

@ -1,138 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_persistent_session_ds_shared_subs_agent).
-include("shared_subs_agent.hrl").
-include("emqx_session.hrl").
-include("session_internals.hrl").
-type session_id() :: emqx_persistent_session_ds:id().
-type subscription() :: #{
start_time := emqx_ds:time()
}.
-type t() :: term().
-type share_topic_filter() :: emqx_persistent_session_ds:share_topic_filter().
-type opts() :: #{
session_id := session_id()
}.
%% TODO
%% This records go through network, we better shrink them
%% * use integer keys
%% * somehow avoid passing stream and topic_filter they both are part of the iterator
-type stream_lease() :: #{
type => lease,
%% Used as "external" subscription_id
share_topic_filter := share_topic_filter(),
stream := emqx_ds:stream(),
iterator := emqx_ds:iterator()
}.
-type stream_revoke() :: #{
type => revoke,
share_topic_filter := share_topic_filter(),
stream := emqx_ds:stream()
}.
-type stream_lease_event() :: stream_lease() | stream_revoke().
-type stream_progress() :: #{
share_topic_filter := share_topic_filter(),
stream := emqx_ds:stream(),
iterator := emqx_ds:iterator(),
use_finished := boolean()
}.
-export_type([
t/0,
subscription/0,
session_id/0,
stream_lease_event/0,
opts/0
]).
-export([
new/1,
open/2,
can_subscribe/3,
on_subscribe/3,
on_unsubscribe/3,
on_stream_progress/2,
on_info/2,
on_disconnect/2,
renew_streams/1
]).
-export([
send/2,
send_after/3
]).
%%--------------------------------------------------------------------
%% Behaviour
%%--------------------------------------------------------------------
-callback new(opts()) -> t().
-callback open([{share_topic_filter(), subscription()}], opts()) -> t().
-callback can_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> ok | {error, term()}.
-callback on_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> t().
-callback on_unsubscribe(t(), share_topic_filter(), [stream_progress()]) -> t().
-callback on_disconnect(t(), [stream_progress()]) -> t().
-callback renew_streams(t()) -> {[stream_lease_event()], t()}.
-callback on_stream_progress(t(), #{share_topic_filter() => [stream_progress()]}) -> t().
-callback on_info(t(), term()) -> t().
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
-spec new(opts()) -> t().
new(Opts) ->
?shared_subs_agent:new(Opts).
-spec open([{share_topic_filter(), subscription()}], opts()) -> t().
open(Topics, Opts) ->
?shared_subs_agent:open(Topics, Opts).
-spec can_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> ok | {error, term()}.
can_subscribe(Agent, ShareTopicFilter, SubOpts) ->
?shared_subs_agent:can_subscribe(Agent, ShareTopicFilter, SubOpts).
-spec on_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> t().
on_subscribe(Agent, ShareTopicFilter, SubOpts) ->
?shared_subs_agent:on_subscribe(Agent, ShareTopicFilter, SubOpts).
-spec on_unsubscribe(t(), share_topic_filter(), [stream_progress()]) -> t().
on_unsubscribe(Agent, ShareTopicFilter, StreamProgresses) ->
?shared_subs_agent:on_unsubscribe(Agent, ShareTopicFilter, StreamProgresses).
-spec on_disconnect(t(), #{share_topic_filter() => [stream_progress()]}) -> t().
on_disconnect(Agent, StreamProgresses) ->
?shared_subs_agent:on_disconnect(Agent, StreamProgresses).
-spec renew_streams(t()) -> {[stream_lease_event()], t()}.
renew_streams(Agent) ->
?shared_subs_agent:renew_streams(Agent).
-spec on_stream_progress(t(), #{share_topic_filter() => [stream_progress()]}) -> t().
on_stream_progress(Agent, StreamProgress) ->
?shared_subs_agent:on_stream_progress(Agent, StreamProgress).
-spec on_info(t(), term()) -> t().
on_info(Agent, Info) ->
?shared_subs_agent:on_info(Agent, Info).
-spec send(pid(), term()) -> term().
send(Dest, Msg) ->
erlang:send(Dest, ?session_message(?shared_sub_message(Msg))).
-spec send_after(non_neg_integer(), pid(), term()) -> reference().
send_after(Time, Dest, Msg) ->
erlang:send_after(Time, Dest, ?session_message(?shared_sub_message(Msg))).

View File

@ -1,54 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_persistent_session_ds_shared_subs_null_agent).
-include("emqx_mqtt.hrl").
-export([
new/1,
open/2,
can_subscribe/3,
on_subscribe/3,
on_unsubscribe/3,
on_stream_progress/2,
on_info/2,
on_disconnect/2,
renew_streams/1
]).
-behaviour(emqx_persistent_session_ds_shared_subs_agent).
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
new(_Opts) ->
undefined.
open(_Topics, _Opts) ->
undefined.
can_subscribe(_Agent, _TopicFilter, _SubOpts) ->
{error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED}.
on_subscribe(Agent, _TopicFilter, _SubOpts) ->
Agent.
on_unsubscribe(Agent, _TopicFilter, _Progresses) ->
Agent.
on_disconnect(Agent, _) ->
Agent.
renew_streams(Agent) ->
{[], Agent}.
on_stream_progress(Agent, _StreamProgress) ->
Agent.
on_info(Agent, _Info) ->
Agent.

View File

@ -39,7 +39,7 @@
-export([get_peername/1, set_peername/2]). -export([get_peername/1, set_peername/2]).
-export([get_protocol/1, set_protocol/2]). -export([get_protocol/1, set_protocol/2]).
-export([new_id/1]). -export([new_id/1]).
-export([get_stream/2, put_stream/3, del_stream/2, fold_streams/3, iter_streams/2, n_streams/1]). -export([get_stream/2, put_stream/3, del_stream/2, fold_streams/3, n_streams/1]).
-export([get_seqno/2, put_seqno/3]). -export([get_seqno/2, put_seqno/3]).
-export([get_rank/2, put_rank/3, del_rank/2, fold_ranks/3]). -export([get_rank/2, put_rank/3, del_rank/2, fold_ranks/3]).
-export([ -export([
@ -66,14 +66,11 @@
n_awaiting_rel/1 n_awaiting_rel/1
]). ]).
-export([iter_next/1]).
-export([make_session_iterator/0, session_iterator_next/2]). -export([make_session_iterator/0, session_iterator_next/2]).
-export_type([ -export_type([
t/0, t/0,
metadata/0, metadata/0,
iter/2,
seqno_type/0, seqno_type/0,
stream_key/0, stream_key/0,
rank_key/0, rank_key/0,
@ -92,8 +89,6 @@
-type message() :: emqx_types:message(). -type message() :: emqx_types:message().
-opaque iter(K, V) :: gb_trees:iter(K, V).
-opaque session_iterator() :: emqx_persistent_session_ds:id() | '$end_of_table'. -opaque session_iterator() :: emqx_persistent_session_ds:id() | '$end_of_table'.
%% Generic key-value wrapper that is used for exporting arbitrary %% Generic key-value wrapper that is used for exporting arbitrary
@ -118,7 +113,7 @@
-type pmap(K, V) :: -type pmap(K, V) ::
#pmap{ #pmap{
table :: atom(), table :: atom(),
cache :: #{K => V} | gb_trees:tree(K, V), cache :: #{K => V},
dirty :: #{K => dirty | del} dirty :: #{K => dirty | del}
}. }.
@ -197,7 +192,7 @@
-endif. -endif.
%%================================================================================ %%================================================================================
%% API functions %% API funcions
%%================================================================================ %%================================================================================
-spec create_tables() -> ok. -spec create_tables() -> ok.
@ -399,9 +394,7 @@ new_id(Rec) ->
get_subscription(TopicFilter, Rec) -> get_subscription(TopicFilter, Rec) ->
gen_get(?subscriptions, TopicFilter, Rec). gen_get(?subscriptions, TopicFilter, Rec).
-spec cold_get_subscription( -spec cold_get_subscription(emqx_persistent_session_ds:id(), emqx_types:topic()) ->
emqx_persistent_session_ds:id(), emqx_types:topic() | emqx_types:share()
) ->
[emqx_persistent_session_ds_subs:subscription()]. [emqx_persistent_session_ds_subs:subscription()].
cold_get_subscription(SessionId, Topic) -> cold_get_subscription(SessionId, Topic) ->
kv_pmap_read(?subscription_tab, SessionId, Topic). kv_pmap_read(?subscription_tab, SessionId, Topic).
@ -483,14 +476,6 @@ del_stream(Key, Rec) ->
fold_streams(Fun, Acc, Rec) -> fold_streams(Fun, Acc, Rec) ->
gen_fold(?streams, Fun, Acc, Rec). gen_fold(?streams, Fun, Acc, Rec).
-spec iter_streams(_StartAfter :: stream_key() | beginning, t()) ->
iter(stream_key(), emqx_persistent_session_ds:stream_state()).
iter_streams(After, Rec) ->
%% NOTE
%% No special handling for `beginning', as it always compares less
%% than any `stream_key()'.
gen_iter_after(?streams, After, Rec).
-spec n_streams(t()) -> non_neg_integer(). -spec n_streams(t()) -> non_neg_integer().
n_streams(Rec) -> n_streams(Rec) ->
gen_size(?streams, Rec). gen_size(?streams, Rec).
@ -549,12 +534,6 @@ n_awaiting_rel(Rec) ->
%% %%
-spec iter_next(iter(K, V)) -> {K, V, iter(K, V)} | none.
iter_next(It0) ->
gen_iter_next(It0).
%%
-spec make_session_iterator() -> session_iterator(). -spec make_session_iterator() -> session_iterator().
make_session_iterator() -> make_session_iterator() ->
mnesia:dirty_first(?session_tab). mnesia:dirty_first(?session_tab).
@ -622,14 +601,6 @@ gen_size(Field, Rec) ->
check_sequence(Rec), check_sequence(Rec),
pmap_size(maps:get(Field, Rec)). pmap_size(maps:get(Field, Rec)).
gen_iter_after(Field, After, Rec) ->
check_sequence(Rec),
pmap_iter_after(After, maps:get(Field, Rec)).
gen_iter_next(It) ->
%% NOTE: Currently, gbt iterators is the only type of iterators.
gbt_iter_next(It).
-spec update_pmaps(fun((pmap(_K, _V) | undefined, atom()) -> term()), map()) -> map(). -spec update_pmaps(fun((pmap(_K, _V) | undefined, atom()) -> term()), map()) -> map().
update_pmaps(Fun, Map) -> update_pmaps(Fun, Map) ->
lists:foldl( lists:foldl(
@ -648,7 +619,7 @@ update_pmaps(Fun, Map) ->
%% This functtion should be ran in a transaction. %% This functtion should be ran in a transaction.
-spec pmap_open(atom(), emqx_persistent_session_ds:id()) -> pmap(_K, _V). -spec pmap_open(atom(), emqx_persistent_session_ds:id()) -> pmap(_K, _V).
pmap_open(Table, SessionId) -> pmap_open(Table, SessionId) ->
Clean = cache_from_list(Table, kv_pmap_restore(Table, SessionId)), Clean = maps:from_list(kv_pmap_restore(Table, SessionId)),
#pmap{ #pmap{
table = Table, table = Table,
cache = Clean, cache = Clean,
@ -656,29 +627,29 @@ pmap_open(Table, SessionId) ->
}. }.
-spec pmap_get(K, pmap(K, V)) -> V | undefined. -spec pmap_get(K, pmap(K, V)) -> V | undefined.
pmap_get(K, #pmap{table = Table, cache = Cache}) -> pmap_get(K, #pmap{cache = Cache}) ->
cache_get(Table, K, Cache). maps:get(K, Cache, undefined).
-spec pmap_put(K, V, pmap(K, V)) -> pmap(K, V). -spec pmap_put(K, V, pmap(K, V)) -> pmap(K, V).
pmap_put(K, V, Pmap = #pmap{table = Table, dirty = Dirty, cache = Cache}) -> pmap_put(K, V, Pmap = #pmap{dirty = Dirty, cache = Cache}) ->
Pmap#pmap{ Pmap#pmap{
cache = cache_put(Table, K, V, Cache), cache = maps:put(K, V, Cache),
dirty = Dirty#{K => dirty} dirty = Dirty#{K => dirty}
}. }.
-spec pmap_del(K, pmap(K, V)) -> pmap(K, V). -spec pmap_del(K, pmap(K, V)) -> pmap(K, V).
pmap_del( pmap_del(
Key, Key,
Pmap = #pmap{table = Table, dirty = Dirty, cache = Cache} Pmap = #pmap{dirty = Dirty, cache = Cache}
) -> ) ->
Pmap#pmap{ Pmap#pmap{
cache = cache_remove(Table, Key, Cache), cache = maps:remove(Key, Cache),
dirty = Dirty#{Key => del} dirty = Dirty#{Key => del}
}. }.
-spec pmap_fold(fun((K, V, A) -> A), A, pmap(K, V)) -> A. -spec pmap_fold(fun((K, V, A) -> A), A, pmap(K, V)) -> A.
pmap_fold(Fun, Acc, #pmap{table = Table, cache = Cache}) -> pmap_fold(Fun, Acc, #pmap{cache = Cache}) ->
cache_fold(Table, Fun, Acc, Cache). maps:fold(Fun, Acc, Cache).
-spec pmap_commit(emqx_persistent_session_ds:id(), pmap(K, V)) -> pmap(K, V). -spec pmap_commit(emqx_persistent_session_ds:id(), pmap(K, V)) -> pmap(K, V).
pmap_commit( pmap_commit(
@ -689,7 +660,7 @@ pmap_commit(
(K, del) -> (K, del) ->
kv_pmap_delete(Tab, SessionId, K); kv_pmap_delete(Tab, SessionId, K);
(K, dirty) -> (K, dirty) ->
V = cache_get(Tab, K, Cache), V = maps:get(K, Cache),
kv_pmap_persist(Tab, SessionId, K, V) kv_pmap_persist(Tab, SessionId, K, V)
end, end,
Dirty Dirty
@ -699,110 +670,13 @@ pmap_commit(
}. }.
-spec pmap_format(pmap(_K, _V)) -> map(). -spec pmap_format(pmap(_K, _V)) -> map().
pmap_format(#pmap{table = Table, cache = Cache}) -> pmap_format(#pmap{cache = Cache}) ->
cache_format(Table, Cache).
-spec pmap_size(pmap(_K, _V)) -> non_neg_integer().
pmap_size(#pmap{table = Table, cache = Cache}) ->
cache_size(Table, Cache).
pmap_iter_after(After, #pmap{table = Table, cache = Cache}) ->
%% NOTE: Only valid for gbt-backed PMAPs.
gbt = cache_data_type(Table),
gbt_iter_after(After, Cache).
%%
cache_data_type(?stream_tab) -> gbt;
cache_data_type(_Table) -> map.
cache_from_list(?stream_tab, L) ->
gbt_from_list(L);
cache_from_list(_Table, L) ->
maps:from_list(L).
cache_get(?stream_tab, K, Cache) ->
gbt_get(K, Cache, undefined);
cache_get(_Table, K, Cache) ->
maps:get(K, Cache, undefined).
cache_put(?stream_tab, K, V, Cache) ->
gbt_put(K, V, Cache);
cache_put(_Table, K, V, Cache) ->
maps:put(K, V, Cache).
cache_remove(?stream_tab, K, Cache) ->
gbt_remove(K, Cache);
cache_remove(_Table, K, Cache) ->
maps:remove(K, Cache).
cache_fold(?stream_tab, Fun, Acc, Cache) ->
gbt_fold(Fun, Acc, Cache);
cache_fold(_Table, Fun, Acc, Cache) ->
maps:fold(Fun, Acc, Cache).
cache_format(?stream_tab, Cache) ->
gbt_format(Cache);
cache_format(_Table, Cache) ->
Cache. Cache.
cache_size(?stream_tab, Cache) -> -spec pmap_size(pmap(_K, _V)) -> non_neg_integer().
gbt_size(Cache); pmap_size(#pmap{cache = Cache}) ->
cache_size(_Table, Cache) ->
maps:size(Cache). maps:size(Cache).
%% PMAP Cache implementation backed by `gb_trees'.
%% Supports iteration starting from specific key.
gbt_from_list(L) ->
lists:foldl(
fun({K, V}, Acc) -> gb_trees:insert(K, V, Acc) end,
gb_trees:empty(),
L
).
gbt_get(K, Cache, undefined) ->
case gb_trees:lookup(K, Cache) of
none -> undefined;
{_, V} -> V
end.
gbt_put(K, V, Cache) ->
gb_trees:enter(K, V, Cache).
gbt_remove(K, Cache) ->
gb_trees:delete_any(K, Cache).
gbt_format(Cache) ->
gb_trees:to_list(Cache).
gbt_fold(Fun, Acc, Cache) ->
It = gb_trees:iterator(Cache),
gbt_fold_iter(Fun, Acc, It).
gbt_fold_iter(Fun, Acc, It0) ->
case gb_trees:next(It0) of
{K, V, It} ->
gbt_fold_iter(Fun, Fun(K, V, Acc), It);
_ ->
Acc
end.
gbt_size(Cache) ->
gb_trees:size(Cache).
gbt_iter_after(After, Cache) ->
It0 = gb_trees:iterator_from(After, Cache),
case gb_trees:next(It0) of
{After, _, It} ->
It;
_ ->
It0
end.
gbt_iter_next(It) ->
gb_trees:next(It).
%% Functions dealing with set tables: %% Functions dealing with set tables:
kv_persist(Tab, SessionId, Val0) -> kv_persist(Tab, SessionId, Val0) ->

View File

@ -16,8 +16,7 @@
-module(emqx_persistent_session_ds_stream_scheduler). -module(emqx_persistent_session_ds_stream_scheduler).
%% API: %% API:
-export([iter_next_streams/2, next_stream/1]). -export([find_new_streams/1, find_replay_streams/1, is_fully_acked/2]).
-export([find_replay_streams/1, is_fully_acked/2]).
-export([renew_streams/1, on_unsubscribe/2]). -export([renew_streams/1, on_unsubscribe/2]).
%% behavior callbacks: %% behavior callbacks:
@ -36,29 +35,6 @@
%% Type declarations %% Type declarations
%%================================================================================ %%================================================================================
-type stream_key() :: emqx_persistent_session_ds_state:stream_key().
-type stream_state() :: emqx_persistent_session_ds:stream_state().
%% Restartable iterator with a filter and an iteration limit.
-record(iter, {
limit :: non_neg_integer(),
filter,
it,
it_cont
}).
-type iter(K, V, IterInner) :: #iter{
filter :: fun((K, V) -> boolean()),
it :: IterInner,
it_cont :: IterInner
}.
-type iter_stream() :: iter(
stream_key(),
stream_state(),
emqx_persistent_session_ds_state:iter(stream_key(), stream_state())
).
%%================================================================================ %%================================================================================
%% API functions %% API functions
%%================================================================================ %%================================================================================
@ -94,9 +70,9 @@ find_replay_streams(S) ->
%% %%
%% This function is non-detereministic: it randomizes the order of %% This function is non-detereministic: it randomizes the order of
%% streams to ensure fair replay of different topics. %% streams to ensure fair replay of different topics.
-spec iter_next_streams(_LastVisited :: stream_key(), emqx_persistent_session_ds_state:t()) -> -spec find_new_streams(emqx_persistent_session_ds_state:t()) ->
iter_stream(). [{emqx_persistent_session_ds_state:stream_key(), emqx_persistent_session_ds:stream_state()}].
iter_next_streams(LastVisited, S) -> find_new_streams(S) ->
%% FIXME: this function is currently very sensitive to the %% FIXME: this function is currently very sensitive to the
%% consistency of the packet IDs on both broker and client side. %% consistency of the packet IDs on both broker and client side.
%% %%
@ -111,44 +87,23 @@ iter_next_streams(LastVisited, S) ->
%% after timeout?) %% after timeout?)
Comm1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S), Comm1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
Comm2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S), Comm2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
Filter = fun(_Key, Stream) -> is_fetchable(Comm1, Comm2, Stream) end, shuffle(
#iter{ emqx_persistent_session_ds_state:fold_streams(
%% Limit the iteration to one round over all streams: fun
limit = emqx_persistent_session_ds_state:n_streams(S), (_Key, #srs{it_end = end_of_stream}, Acc) ->
%% Filter out the streams not eligible for fetching: Acc;
filter = Filter, (Key, Stream, Acc) ->
%% Start the iteration right after the last visited stream: case is_fully_acked(Comm1, Comm2, Stream) andalso not Stream#srs.unsubscribed of
it = emqx_persistent_session_ds_state:iter_streams(LastVisited, S), true ->
%% Restart the iteration from the beginning: [{Key, Stream} | Acc];
it_cont = emqx_persistent_session_ds_state:iter_streams(beginning, S) false ->
}. Acc
end
-spec next_stream(iter_stream()) -> {stream_key(), stream_state(), iter_stream()} | none. end,
next_stream(#iter{limit = 0}) -> [],
none; S
next_stream(ItStream0 = #iter{limit = N, filter = Filter, it = It0, it_cont = ItCont}) -> )
case emqx_persistent_session_ds_state:iter_next(It0) of ).
{Key, Stream, It} ->
ItStream = ItStream0#iter{it = It, limit = N - 1},
case Filter(Key, Stream) of
true ->
{Key, Stream, ItStream};
false ->
next_stream(ItStream)
end;
none when It0 =/= ItCont ->
%% Restart the iteration from the beginning:
ItStream = ItStream0#iter{it = ItCont},
next_stream(ItStream);
none ->
%% No point in restarting the iteration, `ItCont` is empty:
none
end.
is_fetchable(_Comm1, _Comm2, #srs{it_end = end_of_stream}) ->
false;
is_fetchable(Comm1, Comm2, #srs{unsubscribed = Unsubscribed} = Stream) ->
is_fully_acked(Comm1, Comm2, Stream) andalso not Unsubscribed.
%% @doc This function makes the session aware of the new streams. %% @doc This function makes the session aware of the new streams.
%% %%
@ -172,12 +127,7 @@ renew_streams(S0) ->
S1 = remove_unsubscribed_streams(S0), S1 = remove_unsubscribed_streams(S0),
S2 = remove_fully_replayed_streams(S1), S2 = remove_fully_replayed_streams(S1),
S3 = update_stream_subscription_state_ids(S2), S3 = update_stream_subscription_state_ids(S2),
%% For shared subscriptions, the streams are populated by emqx_persistent_session_ds_subs:fold(
%% `emqx_persistent_session_ds_shared_subs`.
%% TODO
%% Move discovery of proper streams
%% out of the scheduler for complete symmetry?
fold_proper_subscriptions(
fun fun
(Key, #{start_time := StartTime, id := SubId, current_state := SStateId}, Acc) -> (Key, #{start_time := StartTime, id := SubId, current_state := SStateId}, Acc) ->
TopicFilter = emqx_topic:words(Key), TopicFilter = emqx_topic:words(Key),
@ -256,6 +206,9 @@ ensure_iterator(TopicFilter, StartTime, SubId, SStateId, {{RankX, RankY}, Stream
Key = {SubId, Stream}, Key = {SubId, Stream},
case emqx_persistent_session_ds_state:get_stream(Key, S) of case emqx_persistent_session_ds_state:get_stream(Key, S) of
undefined -> undefined ->
?SLOG(debug, #{
msg => new_stream, key => Key, stream => Stream
}),
case emqx_ds:make_iterator(?PERSISTENT_MESSAGE_DB, Stream, TopicFilter, StartTime) of case emqx_ds:make_iterator(?PERSISTENT_MESSAGE_DB, Stream, TopicFilter, StartTime) of
{ok, Iterator} -> {ok, Iterator} ->
NewStreamState = #srs{ NewStreamState = #srs{
@ -455,12 +408,15 @@ is_fully_acked(_, _, #srs{
is_fully_acked(Comm1, Comm2, #srs{last_seqno_qos1 = S1, last_seqno_qos2 = S2}) -> is_fully_acked(Comm1, Comm2, #srs{last_seqno_qos1 = S1, last_seqno_qos2 = S2}) ->
(Comm1 >= S1) andalso (Comm2 >= S2). (Comm1 >= S1) andalso (Comm2 >= S2).
fold_proper_subscriptions(Fun, Acc, S) -> -spec shuffle([A]) -> [A].
emqx_persistent_session_ds_state:fold_subscriptions( shuffle(L0) ->
fun L1 = lists:map(
(#share{}, _Sub, Acc0) -> Acc0; fun(A) ->
(TopicFilter, Sub, Acc0) -> Fun(TopicFilter, Sub, Acc0) %% maybe topic/stream prioritization could be introduced here?
{rand:uniform(), A}
end, end,
Acc, L0
S ),
). L2 = lists:sort(L1),
{_, L} = lists:unzip(L2),
L.

View File

@ -30,7 +30,8 @@
on_session_drop/2, on_session_drop/2,
gc/1, gc/1,
lookup/2, lookup/2,
to_map/1 to_map/1,
fold/3
]). ]).
%% Management API: %% Management API:
@ -92,7 +93,6 @@ on_subscribe(TopicFilter, SubOpts, #{id := SessionId, s := S0, props := Props})
case emqx_persistent_session_ds_state:n_subscriptions(S0) < MaxSubscriptions of case emqx_persistent_session_ds_state:n_subscriptions(S0) < MaxSubscriptions of
true -> true ->
ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, SessionId), ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, SessionId),
_ = emqx_external_broker:add_persistent_route(TopicFilter, SessionId),
{SubId, S1} = emqx_persistent_session_ds_state:new_id(S0), {SubId, S1} = emqx_persistent_session_ds_state:new_id(S0),
{SStateId, S2} = emqx_persistent_session_ds_state:new_id(S1), {SStateId, S2} = emqx_persistent_session_ds_state:new_id(S1),
SState = #{ SState = #{
@ -155,13 +155,12 @@ on_unsubscribe(SessionId, TopicFilter, S0) ->
#{session_id => SessionId, topic_filter => TopicFilter}, #{session_id => SessionId, topic_filter => TopicFilter},
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, SessionId) ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, SessionId)
), ),
_ = emqx_external_broker:delete_persistent_route(TopicFilter, SessionId),
{ok, emqx_persistent_session_ds_state:del_subscription(TopicFilter, S0), Subscription} {ok, emqx_persistent_session_ds_state:del_subscription(TopicFilter, S0), Subscription}
end. end.
-spec on_session_drop(emqx_persistent_session_ds:id(), emqx_persistent_session_ds_state:t()) -> ok. -spec on_session_drop(emqx_persistent_session_ds:id(), emqx_persistent_session_ds_state:t()) -> ok.
on_session_drop(SessionId, S0) -> on_session_drop(SessionId, S0) ->
_ = fold_proper_subscriptions( fold(
fun(TopicFilter, _Subscription, S) -> fun(TopicFilter, _Subscription, S) ->
case on_unsubscribe(SessionId, TopicFilter, S) of case on_unsubscribe(SessionId, TopicFilter, S) of
{ok, S1, _} -> S1; {ok, S1, _} -> S1;
@ -170,14 +169,10 @@ on_session_drop(SessionId, S0) ->
end, end,
S0, S0,
S0 S0
), ).
ok.
%% @doc Remove subscription states that don't have a parent, and that %% @doc Remove subscription states that don't have a parent, and that
%% don't have any unacked messages. %% don't have any unacked messages:
%% TODO
%% This function collects shared subs as well
%% Move to a separate module to keep symmetry?
-spec gc(emqx_persistent_session_ds_state:t()) -> emqx_persistent_session_ds_state:t(). -spec gc(emqx_persistent_session_ds_state:t()) -> emqx_persistent_session_ds_state:t().
gc(S0) -> gc(S0) ->
%% Create a set of subscription states IDs referenced either by a %% Create a set of subscription states IDs referenced either by a
@ -215,7 +210,7 @@ gc(S0) ->
S0 S0
). ).
%% @doc Lookup a subscription and merge it with its current state: %% @doc Fold over active subscriptions:
-spec lookup(emqx_persistent_session_ds:topic_filter(), emqx_persistent_session_ds_state:t()) -> -spec lookup(emqx_persistent_session_ds:topic_filter(), emqx_persistent_session_ds_state:t()) ->
emqx_persistent_session_ds:subscription() | undefined. emqx_persistent_session_ds:subscription() | undefined.
lookup(TopicFilter, S) -> lookup(TopicFilter, S) ->
@ -235,12 +230,22 @@ lookup(TopicFilter, S) ->
%% purpose: %% purpose:
-spec to_map(emqx_persistent_session_ds_state:t()) -> map(). -spec to_map(emqx_persistent_session_ds_state:t()) -> map().
to_map(S) -> to_map(S) ->
fold_proper_subscriptions( fold(
fun(TopicFilter, _, Acc) -> Acc#{TopicFilter => lookup(TopicFilter, S)} end, fun(TopicFilter, _, Acc) -> Acc#{TopicFilter => lookup(TopicFilter, S)} end,
#{}, #{},
S S
). ).
%% @doc Fold over active subscriptions:
-spec fold(
fun((emqx_types:topic(), emqx_persistent_session_ds:subscription(), Acc) -> Acc),
Acc,
emqx_persistent_session_ds_state:t()
) ->
Acc.
fold(Fun, Acc, S) ->
emqx_persistent_session_ds_state:fold_subscriptions(Fun, Acc, S).
-spec cold_get_subscription(emqx_persistent_session_ds:id(), emqx_types:topic()) -> -spec cold_get_subscription(emqx_persistent_session_ds:id(), emqx_types:topic()) ->
emqx_persistent_session_ds:subscription() | undefined. emqx_persistent_session_ds:subscription() | undefined.
cold_get_subscription(SessionId, Topic) -> cold_get_subscription(SessionId, Topic) ->
@ -262,15 +267,5 @@ cold_get_subscription(SessionId, Topic) ->
%% Internal functions %% Internal functions
%%================================================================================ %%================================================================================
fold_proper_subscriptions(Fun, Acc, S) ->
emqx_persistent_session_ds_state:fold_subscriptions(
fun
(#share{}, _Sub, Acc0) -> Acc0;
(TopicFilter, Sub, Acc0) -> Fun(TopicFilter, Sub, Acc0)
end,
Acc,
S
).
now_ms() -> now_ms() ->
erlang:system_time(millisecond). erlang:system_time(millisecond).

View File

@ -21,7 +21,7 @@
-record(ps_route, { -record(ps_route, {
topic :: binary(), topic :: binary(),
dest :: emqx_persistent_session_ds_router:dest() | '_' dest :: emqx_persistent_session_ds:id() | '_'
}). }).
-record(ps_routeidx, { -record(ps_routeidx, {

View File

@ -71,11 +71,4 @@
sub_state_id :: emqx_persistent_session_ds_subs:subscription_state_id() sub_state_id :: emqx_persistent_session_ds_subs:subscription_state_id()
}). }).
%% (Erlang) messages that session should forward to the
%% shared subscription handler.
-record(shared_sub_message, {
message :: term()
}).
-define(shared_sub_message(MSG), #shared_sub_message{message = MSG}).
-endif. -endif.

View File

@ -1,45 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-ifndef(SHARED_SUBS_AGENT_HRL).
-define(SHARED_SUBS_AGENT_HRL, true).
-ifdef(EMQX_RELEASE_EDITION).
-if(?EMQX_RELEASE_EDITION == ee).
%% agent from BSL app
-ifdef(TEST).
-define(shared_subs_agent, emqx_ds_shared_sub_agent).
%% clause of -ifdef(TEST).
-else.
%% Till full implementation we need to dispach to the null agent.
%% It will report "not implemented" error for attempts to use shared subscriptions.
-define(shared_subs_agent, emqx_persistent_session_ds_shared_subs_null_agent).
% -define(shared_subs_agent, emqx_ds_shared_sub_agent).
%% end of -ifdef(TEST).
-endif.
%% clause of -if(?EMQX_RELEASE_EDITION == ee).
-else.
-define(shared_subs_agent, emqx_persistent_session_ds_shared_subs_null_agent).
%% end of -if(?EMQX_RELEASE_EDITION == ee).
-endif.
%% clause of -ifdef(EMQX_RELEASE_EDITION).
-else.
-define(shared_subs_agent, emqx_persistent_session_ds_shared_subs_null_agent).
%% end of -ifdef(EMQX_RELEASE_EDITION).
-endif.
-endif.

View File

@ -1,40 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2017-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_post_upgrade).
%% Example of a hot upgrade callback function.
%% PR#12765
% -export([
% pr12765_update_stats_timer/1,
% pr20000_ensure_sup_started/3
% ]).
%% Please ensure that every callback function is reentrant.
%% This way, users can attempt upgrade multiple times if an issue arises.
%%
% pr12765_update_stats_timer(_FromVsn) ->
% emqx_stats:update_interval(broker_stats, fun emqx_broker_helper:stats_fun/0).
%
% pr20000_ensure_sup_started(_FromVsn, "5.6.1" ++ _, ChildSpec) ->
% ChildId = maps:get(id, ChildSpec),
% case supervisor:terminate_child(emqx_sup, ChildId) of
% ok -> supervisor:delete_child(emqx_sup, ChildId);
% Error -> Error
% end,
% supervisor:start_child(emqx_sup, ChildSpec);
% pr20000_ensure_sup_started(_FromVsn, _TargetVsn, _) ->
% ok.

View File

@ -62,7 +62,7 @@
streams := [{pid(), quicer:stream_handle()}], streams := [{pid(), quicer:stream_handle()}],
%% New stream opts %% New stream opts
stream_opts := map(), stream_opts := map(),
%% If connection is resumed from session ticket %% If conneciton is resumed from session ticket
is_resumed => boolean(), is_resumed => boolean(),
%% mqtt message serializer config %% mqtt message serializer config
serialize => undefined, serialize => undefined,
@ -70,8 +70,8 @@
}. }.
-type cb_ret() :: quicer_lib:cb_ret(). -type cb_ret() :: quicer_lib:cb_ret().
%% @doc Data streams initializations are started in parallel with control streams, data streams are blocked %% @doc Data streams initializions are started in parallel with control streams, data streams are blocked
%% for the activation from control stream after it is accepted as a legit connection. %% for the activation from control stream after it is accepted as a legit conneciton.
%% For security, the initial number of allowed data streams from client should be limited by %% For security, the initial number of allowed data streams from client should be limited by
%% 'peer_bidi_stream_count` & 'peer_unidi_stream_count` %% 'peer_bidi_stream_count` & 'peer_unidi_stream_count`
-spec activate_data_streams(pid(), { -spec activate_data_streams(pid(), {
@ -80,7 +80,7 @@
activate_data_streams(ConnOwner, {PS, Serialize, Channel}) -> activate_data_streams(ConnOwner, {PS, Serialize, Channel}) ->
gen_server:call(ConnOwner, {activate_data_streams, {PS, Serialize, Channel}}, infinity). gen_server:call(ConnOwner, {activate_data_streams, {PS, Serialize, Channel}}, infinity).
%% @doc connection owner init callback %% @doc conneciton owner init callback
-spec init(map()) -> {ok, cb_state()}. -spec init(map()) -> {ok, cb_state()}.
init(#{stream_opts := SOpts} = S) when is_list(SOpts) -> init(#{stream_opts := SOpts} = S) when is_list(SOpts) ->
init(S#{stream_opts := maps:from_list(SOpts)}); init(S#{stream_opts := maps:from_list(SOpts)});

View File

@ -0,0 +1,42 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2017-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_relup).
%% NOTE: DO NOT remove this `-include`.
%% We use this to force this module to be upgraded every release.
-include("emqx_release.hrl").
-export([
post_release_upgrade/2,
post_release_downgrade/2
]).
-define(INFO(FORMAT), io:format("[emqx_relup] " ++ FORMAT ++ "~n")).
-define(INFO(FORMAT, ARGS), io:format("[emqx_relup] " ++ FORMAT ++ "~n", ARGS)).
%% What to do after upgraded from an old release vsn.
post_release_upgrade(FromRelVsn, _) ->
?INFO("emqx has been upgraded from ~s to ~s!", [FromRelVsn, emqx_release:version()]),
reload_components().
%% What to do after downgraded to an old release vsn.
post_release_downgrade(ToRelVsn, _) ->
?INFO("emqx has been downgraded from ~s to ~s!", [emqx_release:version(), ToRelVsn]),
reload_components().
reload_components() ->
ok.

View File

@ -107,14 +107,7 @@
unused = [] :: nil() unused = [] :: nil()
}). }).
-define(dest_patterns(NodeOrExtDest), -define(node_patterns(Node), [Node, {'_', Node}]).
case is_atom(NodeOrExtDest) of
%% node
true -> [NodeOrExtDest, {'_', NodeOrExtDest}];
%% external destination
false -> [NodeOrExtDest]
end
).
-define(UNSUPPORTED, unsupported). -define(UNSUPPORTED, unsupported).
@ -314,13 +307,13 @@ print_routes(Topic) ->
). ).
-spec cleanup_routes(node()) -> ok. -spec cleanup_routes(node()) -> ok.
cleanup_routes(NodeOrExtDest) -> cleanup_routes(Node) ->
cleanup_routes(get_schema_vsn(), NodeOrExtDest). cleanup_routes(get_schema_vsn(), Node).
cleanup_routes(v2, NodeOrExtDest) -> cleanup_routes(v2, Node) ->
cleanup_routes_v2(NodeOrExtDest); cleanup_routes_v2(Node);
cleanup_routes(v1, NodeOrExtDest) -> cleanup_routes(v1, Node) ->
cleanup_routes_v1(NodeOrExtDest). cleanup_routes_v1(Node).
-spec foldl_routes(fun((emqx_types:route(), Acc) -> Acc), Acc) -> Acc. -spec foldl_routes(fun((emqx_types:route(), Acc) -> Acc), Acc) -> Acc.
foldl_routes(FoldFun, AccIn) -> foldl_routes(FoldFun, AccIn) ->
@ -437,19 +430,19 @@ has_route_v1(Topic, Dest) ->
has_route_tab_entry(Topic, Dest) -> has_route_tab_entry(Topic, Dest) ->
[] =/= ets:match(?ROUTE_TAB, #route{topic = Topic, dest = Dest}). [] =/= ets:match(?ROUTE_TAB, #route{topic = Topic, dest = Dest}).
cleanup_routes_v1(NodeOrExtDest) -> cleanup_routes_v1(Node) ->
?with_fallback( ?with_fallback(
lists:foreach( lists:foreach(
fun(Pattern) -> fun(Pattern) ->
throw_unsupported(mria:match_delete(?ROUTE_TAB, make_route_rec_pat(Pattern))) throw_unsupported(mria:match_delete(?ROUTE_TAB, make_route_rec_pat(Pattern)))
end, end,
?dest_patterns(NodeOrExtDest) ?node_patterns(Node)
), ),
cleanup_routes_v1_fallback(NodeOrExtDest) cleanup_routes_v1_fallback(Node)
). ).
cleanup_routes_v1_fallback(NodeOrExtDest) -> cleanup_routes_v1_fallback(Node) ->
Patterns = [make_route_rec_pat(P) || P <- ?dest_patterns(NodeOrExtDest)], Patterns = [make_route_rec_pat(P) || P <- ?node_patterns(Node)],
mria:transaction(?ROUTE_SHARD, fun() -> mria:transaction(?ROUTE_SHARD, fun() ->
[ [
mnesia:delete_object(?ROUTE_TAB, Route, write) mnesia:delete_object(?ROUTE_TAB, Route, write)
@ -519,7 +512,7 @@ lookup_routes_v2(Topic) ->
case emqx_topic:wildcard(Topic) of case emqx_topic:wildcard(Topic) of
true -> true ->
Pat = #routeidx{entry = emqx_topic_index:make_key(Topic, '$1')}, Pat = #routeidx{entry = emqx_topic_index:make_key(Topic, '$1')},
[#route{topic = Topic, dest = Dest} || [Dest] <- ets:match(?ROUTE_TAB_FILTERS, Pat)]; [Dest || [Dest] <- ets:match(?ROUTE_TAB_FILTERS, Pat)];
false -> false ->
lookup_route_tab(Topic) lookup_route_tab(Topic)
end. end.
@ -532,7 +525,7 @@ has_route_v2(Topic, Dest) ->
has_route_tab_entry(Topic, Dest) has_route_tab_entry(Topic, Dest)
end. end.
cleanup_routes_v2(NodeOrExtDest) -> cleanup_routes_v2(Node) ->
?with_fallback( ?with_fallback(
lists:foreach( lists:foreach(
fun(Pattern) -> fun(Pattern) ->
@ -544,18 +537,18 @@ cleanup_routes_v2(NodeOrExtDest) ->
), ),
throw_unsupported(mria:match_delete(?ROUTE_TAB, make_route_rec_pat(Pattern))) throw_unsupported(mria:match_delete(?ROUTE_TAB, make_route_rec_pat(Pattern)))
end, end,
?dest_patterns(NodeOrExtDest) ?node_patterns(Node)
), ),
cleanup_routes_v2_fallback(NodeOrExtDest) cleanup_routes_v2_fallback(Node)
). ).
cleanup_routes_v2_fallback(NodeOrExtDest) -> cleanup_routes_v2_fallback(Node) ->
%% NOTE %% NOTE
%% No point in transaction here because all the operations on filters table are dirty. %% No point in transaction here because all the operations on filters table are dirty.
ok = ets:foldl( ok = ets:foldl(
fun(#routeidx{entry = K}, ok) -> fun(#routeidx{entry = K}, ok) ->
case get_dest_node(emqx_topic_index:get_id(K)) of case get_dest_node(emqx_topic_index:get_id(K)) of
NodeOrExtDest -> Node ->
mria:dirty_delete(?ROUTE_TAB_FILTERS, K); mria:dirty_delete(?ROUTE_TAB_FILTERS, K);
_ -> _ ->
ok ok
@ -567,7 +560,7 @@ cleanup_routes_v2_fallback(NodeOrExtDest) ->
ok = ets:foldl( ok = ets:foldl(
fun(#route{dest = Dest} = Route, ok) -> fun(#route{dest = Dest} = Route, ok) ->
case get_dest_node(Dest) of case get_dest_node(Dest) of
NodeOrExtDest -> Node ->
mria:dirty_delete_object(?ROUTE_TAB, Route); mria:dirty_delete_object(?ROUTE_TAB, Route);
_ -> _ ->
ok ok
@ -577,8 +570,6 @@ cleanup_routes_v2_fallback(NodeOrExtDest) ->
?ROUTE_TAB ?ROUTE_TAB
). ).
get_dest_node({external, _} = ExtDest) ->
ExtDest;
get_dest_node({_, Node}) -> get_dest_node({_, Node}) ->
Node; Node;
get_dest_node(Node) -> get_dest_node(Node) ->

View File

@ -21,17 +21,11 @@
-behaviour(gen_server). -behaviour(gen_server).
-export([start_link/1]).
-export([start_link/2]). -export([start_link/2]).
-export([start_link_pooled/2]).
-export([push/4]). -export([push/4]).
-export([push/5]).
-export([wait/1]). -export([wait/1]).
-export([suspend/1]).
-export([activate/1]).
-export([stats/0]). -export([stats/0]).
-export([ -export([
@ -44,15 +38,6 @@
-type action() :: add | delete. -type action() :: add | delete.
-type options() :: #{
max_batch_size => pos_integer(),
min_sync_interval => non_neg_integer(),
error_delay => non_neg_integer(),
error_retry_interval => non_neg_integer(),
initial_state => activated | suspended,
batch_handler => {module(), _Function :: atom(), _Args :: list()}
}.
-define(POOL, router_syncer_pool). -define(POOL, router_syncer_pool).
-define(MAX_BATCH_SIZE, 1000). -define(MAX_BATCH_SIZE, 1000).
@ -92,23 +77,13 @@
%% %%
-spec start_link(options()) -> -spec start_link(atom(), pos_integer()) ->
{ok, pid()} | {error, _Reason}.
start_link(Options) ->
gen_server:start_link(?MODULE, mk_state(Options), []).
-spec start_link(_Name, options()) ->
{ok, pid()} | {error, _Reason}.
start_link(Name, Options) ->
gen_server:start_link(Name, ?MODULE, mk_state(Options), []).
-spec start_link_pooled(atom(), pos_integer()) ->
{ok, pid()}. {ok, pid()}.
start_link_pooled(Pool, Id) -> start_link(Pool, Id) ->
gen_server:start_link( gen_server:start_link(
{local, emqx_utils:proc_name(?MODULE, Id)}, {local, emqx_utils:proc_name(?MODULE, Id)},
?MODULE, ?MODULE,
{Pool, Id, mk_state(#{})}, [Pool, Id],
[] []
). ).
@ -118,16 +93,9 @@ when
Opts :: #{reply => pid()}. Opts :: #{reply => pid()}.
push(Action, Topic, Dest, Opts) -> push(Action, Topic, Dest, Opts) ->
Worker = gproc_pool:pick_worker(?POOL, Topic), Worker = gproc_pool:pick_worker(?POOL, Topic),
push(Worker, Action, Topic, Dest, Opts).
-spec push(_Ref, action(), emqx_types:topic(), emqx_router:dest(), Opts) ->
ok | _WaitRef :: reference()
when
Opts :: #{reply => pid()}.
push(Ref, Action, Topic, Dest, Opts) ->
Prio = designate_prio(Action, Opts), Prio = designate_prio(Action, Opts),
Context = mk_push_context(Opts), Context = mk_push_context(Opts),
_ = gproc:send(Ref, ?PUSH(Prio, {Action, Topic, Dest, Context})), _ = erlang:send(Worker, ?PUSH(Prio, {Action, Topic, Dest, Context})),
case Context of case Context of
[{MRef, _}] -> [{MRef, _}] ->
MRef; MRef;
@ -166,16 +134,6 @@ mk_push_context(_) ->
%% %%
%% Suspended syncer receives and accumulates route ops but doesn't apply them
%% until it is activated.
suspend(Ref) ->
gen_server:call(Ref, suspend, infinity).
activate(Ref) ->
gen_server:call(Ref, activate, infinity).
%%
-type stats() :: #{ -type stats() :: #{
size := non_neg_integer(), size := non_neg_integer(),
n_add := non_neg_integer(), n_add := non_neg_integer(),
@ -191,34 +149,10 @@ stats() ->
%% %%
mk_state(Options) -> init([Pool, Id]) ->
#{
state => maps:get(initial_state, Options, active),
stash => stash_new(),
retry_timer => undefined,
max_batch_size => maps:get(max_batch_size, Options, ?MAX_BATCH_SIZE),
min_sync_interval => maps:get(min_sync_interval, Options, ?MIN_SYNC_INTERVAL),
error_delay => maps:get(error_delay, Options, ?ERROR_DELAY),
error_retry_interval => maps:get(error_retry_interval, Options, ?ERROR_RETRY_INTERVAL),
batch_handler => maps:get(batch_handler, Options, default)
}.
%%
init({Pool, Id, State}) ->
true = gproc_pool:connect_worker(Pool, {Pool, Id}), true = gproc_pool:connect_worker(Pool, {Pool, Id}),
{ok, State}; {ok, #{stash => stash_new()}}.
init(State) ->
{ok, State}.
handle_call(suspend, _From, State) ->
NState = State#{state := suspended},
{reply, ok, NState};
handle_call(activate, _From, State = #{state := suspended}) ->
NState = run_batch_loop([], State#{state := active}),
{reply, ok, NState};
handle_call(activate, _From, State) ->
{reply, ok, State};
handle_call(stats, _From, State = #{stash := Stash}) -> handle_call(stats, _From, State = #{stash := Stash}) ->
{reply, stash_stats(Stash), State}; {reply, stash_stats(Stash), State};
handle_call(_Call, _From, State) -> handle_call(_Call, _From, State) ->
@ -228,11 +162,11 @@ handle_cast(_Msg, State) ->
{noreply, State}. {noreply, State}.
handle_info({timeout, _TRef, retry}, State) -> handle_info({timeout, _TRef, retry}, State) ->
NState = run_batch_loop([], State#{retry_timer := undefined}), NState = run_batch_loop([], maps:remove(retry_timer, State)),
{noreply, NState}; {noreply, NState};
handle_info(Push = ?PUSH(_, _), State = #{min_sync_interval := MSI}) -> handle_info(Push = ?PUSH(_, _), State) ->
%% NOTE: Wait a bit to collect potentially overlapping operations. %% NOTE: Wait a bit to collect potentially overlapping operations.
ok = timer:sleep(MSI), ok = timer:sleep(?MIN_SYNC_INTERVAL),
NState = run_batch_loop([Push], State), NState = run_batch_loop([Push], State),
{noreply, NState}. {noreply, NState}.
@ -241,16 +175,12 @@ terminate(_Reason, _State) ->
%% %%
run_batch_loop(Incoming, State = #{stash := Stash0, state := suspended}) -> run_batch_loop(Incoming, State = #{stash := Stash0}) ->
Stash1 = stash_add(Incoming, Stash0), Stash1 = stash_add(Incoming, Stash0),
Stash2 = stash_drain(Stash1), Stash2 = stash_drain(Stash1),
State#{stash := Stash2}; {Batch, Stash3} = mk_batch(Stash2),
run_batch_loop(Incoming, State = #{stash := Stash0, max_batch_size := MBS}) ->
Stash1 = stash_add(Incoming, Stash0),
Stash2 = stash_drain(Stash1),
{Batch, Stash3} = mk_batch(Stash2, MBS),
?tp_ignore_side_effects_in_prod(router_syncer_new_batch, batch_stats(Batch, Stash3)), ?tp_ignore_side_effects_in_prod(router_syncer_new_batch, batch_stats(Batch, Stash3)),
case run_batch(Batch, State) of case run_batch(Batch) of
Status = #{} -> Status = #{} ->
ok = send_replies(Status, Batch), ok = send_replies(Status, Batch),
NState = cancel_retry_timer(State#{stash := Stash3}), NState = cancel_retry_timer(State#{stash := Stash3}),
@ -273,37 +203,37 @@ run_batch_loop(Incoming, State = #{stash := Stash0, max_batch_size := MBS}) ->
batch => batch_stats(Batch, Stash3) batch => batch_stats(Batch, Stash3)
}), }),
NState = State#{stash := Stash2}, NState = State#{stash := Stash2},
ok = error_cooldown(NState), ok = timer:sleep(?ERROR_DELAY),
ensure_retry_timer(NState) ensure_retry_timer(NState)
end. end.
error_cooldown(#{error_delay := ED}) ->
timer:sleep(ED).
ensure_retry_timer(State = #{retry_timer := undefined, error_retry_interval := ERI}) ->
TRef = emqx_utils:start_timer(ERI, retry),
State#{retry_timer := TRef};
ensure_retry_timer(State = #{retry_timer := _TRef}) -> ensure_retry_timer(State = #{retry_timer := _TRef}) ->
State. State;
ensure_retry_timer(State) ->
TRef = emqx_utils:start_timer(?ERROR_RETRY_INTERVAL, retry),
State#{retry_timer => TRef}.
cancel_retry_timer(State = #{retry_timer := TRef}) -> cancel_retry_timer(State = #{retry_timer := TRef}) ->
ok = emqx_utils:cancel_timer(TRef), ok = emqx_utils:cancel_timer(TRef),
State#{retry_timer := undefined}; maps:remove(retry_timer, State);
cancel_retry_timer(State) -> cancel_retry_timer(State) ->
State. State.
%% %%
mk_batch(Stash, BatchSize) when map_size(Stash) =< BatchSize -> mk_batch(Stash) when map_size(Stash) =< ?MAX_BATCH_SIZE ->
%% This is perfect situation, we just use stash as batch w/o extra reallocations. %% This is perfect situation, we just use stash as batch w/o extra reallocations.
{Stash, stash_new()}; {Stash, stash_new()};
mk_batch(Stash, BatchSize) -> mk_batch(Stash) ->
%% Take a subset of stashed operations to form a batch. %% Take a subset of stashed operations to form a batch.
%% Note that stash is an unordered map, it's not a queue. The order of operations is %% Note that stash is an unordered map, it's not a queue. The order of operations is
%% not preserved strictly, only loosely, because of how we start from high priority %% not preserved strictly, only loosely, because of how we start from high priority
%% operations and go down to low priority ones. This might cause some operations to %% operations and go down to low priority ones. This might cause some operations to
%% stay in stash for unfairly long time, when there are many high priority operations. %% stay in stash for unfairly long time, when there are many high priority operations.
%% However, it's unclear how likely this is to happen in practice. %% However, it's unclear how likely this is to happen in practice.
mk_batch(Stash, ?MAX_BATCH_SIZE).
mk_batch(Stash, BatchSize) ->
mk_batch(?PRIO_HI, #{}, BatchSize, Stash). mk_batch(?PRIO_HI, #{}, BatchSize, Stash).
mk_batch(Prio, Batch, SizeLeft, Stash) -> mk_batch(Prio, Batch, SizeLeft, Stash) ->
@ -348,12 +278,10 @@ replyctx_send(Result, RefsPids) ->
%% %%
run_batch(Empty, _State) when Empty =:= #{} -> run_batch(Batch) when map_size(Batch) > 0 ->
#{};
run_batch(Batch, #{batch_handler := default}) ->
catch emqx_router:do_batch(Batch); catch emqx_router:do_batch(Batch);
run_batch(Batch, #{batch_handler := {Module, Function, Args}}) -> run_batch(_Empty) ->
erlang:apply(Module, Function, [Batch | Args]). #{}.
%% %%

View File

@ -137,7 +137,7 @@ maybe_badrpc(Delivery) ->
Delivery. Delivery.
max_client_num() -> max_client_num() ->
emqx:get_config([rpc, client_num], ?DefaultClientNum). emqx:get_config([rpc, tcp_client_num], ?DefaultClientNum).
-spec unwrap_erpc(emqx_rpc:erpc(A) | [emqx_rpc:erpc(A)]) -> A | {error, _Err} | list(). -spec unwrap_erpc(emqx_rpc:erpc(A) | [emqx_rpc:erpc(A)]) -> A | {error, _Err} | list().
unwrap_erpc(Res) when is_list(Res) -> unwrap_erpc(Res) when is_list(Res) ->

View File

@ -351,7 +351,6 @@ fields("authz_cache") ->
#{ #{
default => true, default => true,
required => true, required => true,
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(fields_cache_enable) desc => ?DESC(fields_cache_enable)
} }
)}, )},
@ -388,7 +387,6 @@ fields("flapping_detect") ->
boolean(), boolean(),
#{ #{
default => false, default => false,
%% importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(flapping_detect_enable) desc => ?DESC(flapping_detect_enable)
} }
)}, )},
@ -425,7 +423,6 @@ fields("force_shutdown") ->
boolean(), boolean(),
#{ #{
default => true, default => true,
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(force_shutdown_enable) desc => ?DESC(force_shutdown_enable)
} }
)}, )},
@ -455,7 +452,6 @@ fields("overload_protection") ->
boolean(), boolean(),
#{ #{
desc => ?DESC(overload_protection_enable), desc => ?DESC(overload_protection_enable),
%% importance => ?IMPORTANCE_NO_DOC,
default => false default => false
} }
)}, )},
@ -516,11 +512,7 @@ fields("force_gc") ->
{"enable", {"enable",
sc( sc(
boolean(), boolean(),
#{ #{default => true, desc => ?DESC(force_gc_enable)}
default => true,
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(force_gc_enable)
}
)}, )},
{"count", {"count",
sc( sc(
@ -1673,7 +1665,6 @@ fields("durable_sessions") ->
sc( sc(
boolean(), #{ boolean(), #{
desc => ?DESC(durable_sessions_enable), desc => ?DESC(durable_sessions_enable),
%% importance => ?IMPORTANCE_NO_DOC,
default => false default => false
} }
)}, )},
@ -1897,7 +1888,6 @@ base_listener(Bind) ->
#{ #{
default => true, default => true,
aliases => [enabled], aliases => [enabled],
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(fields_listener_enabled) desc => ?DESC(fields_listener_enabled)
} }
)}, )},
@ -1980,6 +1970,10 @@ zones_field_schema() ->
} }
). ).
desc("persistent_session_store") ->
"Settings for message persistence.";
desc("persistent_session_builtin") ->
"Settings for the built-in storage engine of persistent messages.";
desc("persistent_table_mria_opts") -> desc("persistent_table_mria_opts") ->
"Tuning options for the mria table."; "Tuning options for the mria table.";
desc("stats") -> desc("stats") ->
@ -2426,7 +2420,6 @@ client_ssl_opts_schema(Defaults) ->
boolean(), boolean(),
#{ #{
default => false, default => false,
%% importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(client_ssl_opts_schema_enable) desc => ?DESC(client_ssl_opts_schema_enable)
} }
)}, )},
@ -3650,9 +3643,9 @@ mqtt_general() ->
)}, )},
{"retry_interval", {"retry_interval",
sc( sc(
hoconsc:union([infinity, timeout_duration()]), timeout_duration(),
#{ #{
default => infinity, default => <<"30s">>,
desc => ?DESC(mqtt_retry_interval) desc => ?DESC(mqtt_retry_interval)
} }
)}, )},

View File

@ -30,7 +30,7 @@
-define(LOADER, emqx_secret_loader). -define(LOADER, emqx_secret_loader).
%%================================================================================ %%================================================================================
%% API functions %% API funcions
%%================================================================================ %%================================================================================
%% @doc Wrap a term in a secret closure. %% @doc Wrap a term in a secret closure.

View File

@ -83,7 +83,6 @@
-export([ -export([
deliver/3, deliver/3,
handle_info/2,
handle_timeout/3, handle_timeout/3,
disconnect/3, disconnect/3,
terminate/3 terminate/3
@ -189,10 +188,6 @@
-callback destroy(t() | clientinfo()) -> ok. -callback destroy(t() | clientinfo()) -> ok.
-callback clear_will_message(t()) -> t(). -callback clear_will_message(t()) -> t().
-callback publish_will_message_now(t(), message()) -> t(). -callback publish_will_message_now(t(), message()) -> t().
-callback handle_timeout(clientinfo(), common_timer_name() | custom_timer_name(), t()) ->
{ok, replies(), t()}
| {ok, replies(), timeout(), t()}.
-callback handle_info(term(), t()) -> t().
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Create a Session %% Create a Session
@ -489,14 +484,6 @@ enrich_subopts(_Opt, _V, Msg, _) ->
handle_timeout(ClientInfo, Timer, Session) -> handle_timeout(ClientInfo, Timer, Session) ->
?IMPL(Session):handle_timeout(ClientInfo, Timer, Session). ?IMPL(Session):handle_timeout(ClientInfo, Timer, Session).
%%--------------------------------------------------------------------
%% Generic Messages
%%--------------------------------------------------------------------
-spec handle_info(term(), t()) -> t().
handle_info(Info, Session) ->
?IMPL(Session):handle_info(Info, Session).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec ensure_timer(custom_timer_name(), timeout(), map()) -> -spec ensure_timer(custom_timer_name(), timeout(), map()) ->
@ -614,7 +601,7 @@ should_keep(MsgDeliver) ->
not is_banned_msg(MsgDeliver). not is_banned_msg(MsgDeliver).
is_banned_msg(#message{from = ClientId}) -> is_banned_msg(#message{from = ClientId}) ->
emqx_banned:check_clientid(ClientId). [] =/= emqx_banned:look_up({clientid, ClientId}).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -87,7 +87,6 @@
deliver/3, deliver/3,
replay/3, replay/3,
handle_timeout/3, handle_timeout/3,
handle_info/2,
disconnect/2, disconnect/2,
terminate/2 terminate/2
]). ]).
@ -598,23 +597,14 @@ handle_timeout(ClientInfo, retry_delivery, Session) ->
handle_timeout(ClientInfo, expire_awaiting_rel, Session) -> handle_timeout(ClientInfo, expire_awaiting_rel, Session) ->
expire(ClientInfo, Session). expire(ClientInfo, Session).
%%--------------------------------------------------------------------
%% Geneic messages
%%--------------------------------------------------------------------
-spec handle_info(term(), session()) -> session().
handle_info(Msg, Session) ->
?SLOG(warning, #{msg => emqx_session_mem_unknown_message, message => Msg}),
Session.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Retry Delivery %% Retry Delivery
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec retry(clientinfo(), session()) -> -spec retry(clientinfo(), session()) ->
{ok, replies(), session()} | {ok, replies(), timeout(), session()}. {ok, replies(), session()}.
retry(ClientInfo, Session = #session{inflight = Inflight, retry_interval = Interval}) -> retry(ClientInfo, Session = #session{inflight = Inflight}) ->
case emqx_inflight:is_empty(Inflight) orelse Interval =:= infinity of case emqx_inflight:is_empty(Inflight) of
true -> true ->
{ok, [], Session}; {ok, [], Session};
false -> false ->

View File

@ -421,12 +421,8 @@ init_monitors() ->
handle_call({subscribe, Group, Topic, SubPid}, _From, State = #state{pmon = PMon}) -> handle_call({subscribe, Group, Topic, SubPid}, _From, State = #state{pmon = PMon}) ->
mria:dirty_write(?SHARED_SUBSCRIPTION, record(Group, Topic, SubPid)), mria:dirty_write(?SHARED_SUBSCRIPTION, record(Group, Topic, SubPid)),
case ets:member(?SHARED_SUBSCRIBER, {Group, Topic}) of case ets:member(?SHARED_SUBSCRIBER, {Group, Topic}) of
true -> true -> ok;
ok; false -> ok = emqx_router:do_add_route(Topic, {Group, node()})
false ->
ok = emqx_router:do_add_route(Topic, {Group, node()}),
_ = emqx_external_broker:add_shared_route(Topic, Group),
ok
end, end,
ok = maybe_insert_alive_tab(SubPid), ok = maybe_insert_alive_tab(SubPid),
ok = maybe_insert_round_robin_count({Group, Topic}), ok = maybe_insert_round_robin_count({Group, Topic}),
@ -549,9 +545,7 @@ is_alive_sub(Pid) ->
delete_route_if_needed({Group, Topic} = GroupTopic) -> delete_route_if_needed({Group, Topic} = GroupTopic) ->
if_no_more_subscribers(GroupTopic, fun() -> if_no_more_subscribers(GroupTopic, fun() ->
ok = emqx_router:do_delete_route(Topic, {Group, node()}), ok = emqx_router:do_delete_route(Topic, {Group, node()})
_ = emqx_external_broker:delete_shared_route(Topic, Group),
ok
end). end).
get_default_shared_subscription_strategy() -> get_default_shared_subscription_strategy() ->

View File

@ -589,14 +589,6 @@ ensure_valid_options(Options, Versions) ->
ensure_valid_options([], _, Acc) -> ensure_valid_options([], _, Acc) ->
lists:reverse(Acc); lists:reverse(Acc);
ensure_valid_options([{K, undefined} | T], Versions, Acc) when
K =:= crl_check;
K =:= crl_cache
->
%% Note: we must set crl options to `undefined' to unset them. Otherwise,
%% `esockd' will retain such options when `esockd:merge_opts/2' is called and the SSL
%% options were previously enabled.
ensure_valid_options(T, Versions, [{K, undefined} | Acc]);
ensure_valid_options([{_, undefined} | T], Versions, Acc) -> ensure_valid_options([{_, undefined} | T], Versions, Acc) ->
ensure_valid_options(T, Versions, Acc); ensure_valid_options(T, Versions, Acc);
ensure_valid_options([{_, ""} | T], Versions, Acc) -> ensure_valid_options([{_, ""} | T], Versions, Acc) ->

View File

@ -33,8 +33,7 @@
feed_var/3, feed_var/3,
systop/1, systop/1,
parse/1, parse/1,
parse/2, parse/2
intersection/2
]). ]).
-export([ -export([
@ -53,8 +52,6 @@
((C =:= '#' orelse C =:= <<"#">>) andalso REST =/= []) ((C =:= '#' orelse C =:= <<"#">>) andalso REST =/= [])
). ).
-define(IS_WILDCARD(W), W =:= '+' orelse W =:= '#').
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% APIs %% APIs
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -101,55 +98,6 @@ match(_, ['#']) ->
match(_, _) -> match(_, _) ->
false. false.
%% @doc Finds an intersection between two topics, two filters or a topic and a filter.
%% The function is commutative: reversing parameters doesn't affect the returned value.
%% Two topics intersect only when they are equal.
%% The intersection of a topic and a filter is always either the topic itself or false (no intersection).
%% The intersection of two filters is either false or a new topic filter that would match only those topics,
%% that can be matched by both input filters.
%% For example, the intersection of "t/global/#" and "t/+/1/+" is "t/global/1/+".
-spec intersection(TopicOrFilter, TopicOrFilter) -> TopicOrFilter | false when
TopicOrFilter :: emqx_types:topic().
intersection(Topic1, Topic2) when is_binary(Topic1), is_binary(Topic2) ->
case intersect_start(words(Topic1), words(Topic2)) of
false -> false;
Intersection -> join(Intersection)
end.
intersect_start([<<"$", _/bytes>> | _], [W | _]) when ?IS_WILDCARD(W) ->
false;
intersect_start([W | _], [<<"$", _/bytes>> | _]) when ?IS_WILDCARD(W) ->
false;
intersect_start(Words1, Words2) ->
intersect(Words1, Words2).
intersect(Words1, ['#']) ->
Words1;
intersect(['#'], Words2) ->
Words2;
intersect([W1], ['+']) ->
[W1];
intersect(['+'], [W2]) ->
[W2];
intersect([W1 | T1], [W2 | T2]) when ?IS_WILDCARD(W1), ?IS_WILDCARD(W2) ->
intersect_join(wildcard_intersection(W1, W2), intersect(T1, T2));
intersect([W | T1], [W | T2]) ->
intersect_join(W, intersect(T1, T2));
intersect([W1 | T1], [W2 | T2]) when ?IS_WILDCARD(W1) ->
intersect_join(W2, intersect(T1, T2));
intersect([W1 | T1], [W2 | T2]) when ?IS_WILDCARD(W2) ->
intersect_join(W1, intersect(T1, T2));
intersect([], []) ->
[];
intersect(_, _) ->
false.
intersect_join(_, false) -> false;
intersect_join(W, Words) -> [W | Words].
wildcard_intersection(W, W) -> W;
wildcard_intersection(_, _) -> '+'.
-spec match_share(Name, Filter) -> boolean() when -spec match_share(Name, Filter) -> boolean() when
Name :: share(), Name :: share(),
Filter :: topic() | share(). Filter :: topic() | share().

View File

@ -23,7 +23,6 @@
-export([delete/3]). -export([delete/3]).
-export([match/2]). -export([match/2]).
-export([matches/3]). -export([matches/3]).
-export([matches_filter/3]).
-export([make_key/2]). -export([make_key/2]).
@ -73,12 +72,6 @@ match(Topic, Tab) ->
matches(Topic, Tab, Opts) -> matches(Topic, Tab, Opts) ->
emqx_trie_search:matches(Topic, make_nextf(Tab), Opts). emqx_trie_search:matches(Topic, make_nextf(Tab), Opts).
%% @doc Match given topic filter against the index and return _all_ matches.
%% If `unique` option is given, return only unique matches by record ID.
-spec matches_filter(emqx_types:topic(), ets:table(), emqx_trie_search:opts()) -> [match(_ID)].
matches_filter(TopicFilter, Tab, Opts) ->
emqx_trie_search:matches_filter(TopicFilter, make_nextf(Tab), Opts).
%% @doc Extract record ID from the match. %% @doc Extract record ID from the match.
-spec get_id(match(ID)) -> ID. -spec get_id(match(ID)) -> ID.
get_id(Key) -> get_id(Key) ->

View File

@ -17,6 +17,7 @@
-include("emqx_mqtt.hrl"). -include("emqx_mqtt.hrl").
-export([format/2]). -export([format/2]).
-export([format_meta_map/1]).
%% logger_formatter:config/0 is not exported. %% logger_formatter:config/0 is not exported.
-type config() :: map(). -type config() :: map().
@ -42,6 +43,10 @@ format(
format(Event, Config) -> format(Event, Config) ->
emqx_logger_textfmt:format(Event, Config). emqx_logger_textfmt:format(Event, Config).
format_meta_map(Meta) ->
Encode = emqx_trace_handler:payload_encode(),
format_meta_map(Meta, Encode).
format_meta_map(Meta, Encode) -> format_meta_map(Meta, Encode) ->
format_meta_map(Meta, Encode, [ format_meta_map(Meta, Encode, [
{packet, fun format_packet/2}, {packet, fun format_packet/2},

View File

@ -99,7 +99,7 @@
-module(emqx_trie_search). -module(emqx_trie_search).
-export([make_key/2, make_pat/2, filter/1]). -export([make_key/2, make_pat/2, filter/1]).
-export([match/2, matches/3, get_id/1, get_topic/1, matches_filter/3]). -export([match/2, matches/3, get_id/1, get_topic/1]).
-export_type([key/1, word/0, words/0, nextf/0, opts/0]). -export_type([key/1, word/0, words/0, nextf/0, opts/0]).
-define(END, '$end_of_table'). -define(END, '$end_of_table').
@ -183,20 +183,9 @@ match(Topic, NextF) ->
matches(Topic, NextF, Opts) -> matches(Topic, NextF, Opts) ->
search(Topic, NextF, Opts). search(Topic, NextF, Opts).
%% @doc Match given topic filter against the index and return _all_ matches.
-spec matches_filter(emqx_types:topic(), nextf(), opts()) -> [key(_)].
matches_filter(TopicFilter, NextF, Opts) ->
search(TopicFilter, NextF, [topic_filter | Opts]).
%% @doc Entrypoint of the search for a given topic. %% @doc Entrypoint of the search for a given topic.
search(Topic, NextF, Opts) -> search(Topic, NextF, Opts) ->
%% A private opt Words = topic_words(Topic),
IsFilter = proplists:get_bool(topic_filter, Opts),
Words =
case IsFilter of
true -> filter_words(Topic);
false -> topic_words(Topic)
end,
Base = base_init(Words), Base = base_init(Words),
ORetFirst = proplists:get_bool(return_first, Opts), ORetFirst = proplists:get_bool(return_first, Opts),
OUnique = proplists:get_bool(unique, Opts), OUnique = proplists:get_bool(unique, Opts),
@ -211,10 +200,8 @@ search(Topic, NextF, Opts) ->
end, end,
Matches = Matches =
case search_new(Words, Base, NextF, Acc0) of case search_new(Words, Base, NextF, Acc0) of
{Cursor, Acc} when not IsFilter -> {Cursor, Acc} ->
match_topics(Topic, Cursor, NextF, Acc); match_topics(Topic, Cursor, NextF, Acc);
{_Cursor, Acc} ->
Acc;
Acc -> Acc ->
Acc Acc
end, end,
@ -288,17 +275,6 @@ compare(['#'], _Words, _) ->
% Closest possible next entries that we must not miss: % Closest possible next entries that we must not miss:
% * a/+/+/d/# (same topic but a different ID) % * a/+/+/d/# (same topic but a different ID)
match_full; match_full;
%% Filter search %%
compare(_Filter, ['#'], _) ->
match_full;
compare([_ | TF], ['+' | TW], Pos) ->
case compare(TF, TW, Pos + 1) of
lower ->
lower;
Other ->
Other
end;
%% Filter search end %%
compare(['+' | TF], [HW | TW], Pos) -> compare(['+' | TF], [HW | TW], Pos) ->
case compare(TF, TW, Pos + 1) of case compare(TF, TW, Pos + 1) of
lower -> lower ->

View File

@ -267,7 +267,6 @@
[ [
{node(), topic(), deliver_result()} {node(), topic(), deliver_result()}
| {share, topic(), deliver_result()} | {share, topic(), deliver_result()}
| {emqx_external_broker:dest(), topic(), deliver_result()}
| persisted | persisted
] ]
| disconnect. | disconnect.

View File

@ -399,6 +399,7 @@ compat_windows(Fun) when is_function(Fun, 0) ->
0.0 0.0
end; end;
compat_windows(Fun) -> compat_windows(Fun) ->
?SLOG(warning, "Invalid function: ~p", [Fun]),
error({badarg, Fun}). error({badarg, Fun}).
load(Avg) -> load(Avg) ->

View File

@ -303,7 +303,7 @@ websocket_init([Req, Opts]) ->
max_size => emqx_config:get_zone_conf(Zone, [mqtt, max_packet_size]) max_size => emqx_config:get_zone_conf(Zone, [mqtt, max_packet_size])
}, },
ParseState = emqx_frame:initial_parse_state(FrameOpts), ParseState = emqx_frame:initial_parse_state(FrameOpts),
Serialize = emqx_frame:initial_serialize_opts(FrameOpts), Serialize = emqx_frame:serialize_opts(),
Channel = emqx_channel:init(ConnInfo, Opts), Channel = emqx_channel:init(ConnInfo, Opts),
GcState = get_force_gc(Zone), GcState = get_force_gc(Zone),
StatsTimer = get_stats_enable(Zone), StatsTimer = get_stats_enable(Zone),
@ -436,7 +436,6 @@ websocket_handle({Frame, _}, State) ->
%% TODO: should not close the ws connection %% TODO: should not close the ws connection
?LOG(error, #{msg => "unexpected_frame", frame => Frame}), ?LOG(error, #{msg => "unexpected_frame", frame => Frame}),
shutdown(unexpected_ws_frame, State). shutdown(unexpected_ws_frame, State).
websocket_info({call, From, Req}, State) -> websocket_info({call, From, Req}, State) ->
handle_call(From, Req, State); handle_call(From, Req, State);
websocket_info({cast, rate_limit}, State) -> websocket_info({cast, rate_limit}, State) ->
@ -456,8 +455,8 @@ websocket_info({incoming, Packet}, State) ->
handle_incoming(Packet, State); handle_incoming(Packet, State);
websocket_info({outgoing, Packets}, State) -> websocket_info({outgoing, Packets}, State) ->
return(enqueue(Packets, State)); return(enqueue(Packets, State));
websocket_info({check_gc, Cnt, Oct}, State) -> websocket_info({check_gc, Stats}, State) ->
return(check_oom(run_gc(Cnt, Oct, State))); return(check_oom(run_gc(Stats, State)));
websocket_info( websocket_info(
Deliver = {deliver, _Topic, _Msg}, Deliver = {deliver, _Topic, _Msg},
State = #state{listener = {Type, Listener}} State = #state{listener = {Type, Listener}}
@ -604,23 +603,17 @@ check_limiter(
Data, Data,
WhenOk, WhenOk,
Msgs, Msgs,
#state{channel = Channel, limiter_timer = undefined, limiter = Limiter} = State #state{limiter_timer = undefined, limiter = Limiter} = State
) -> ) ->
case emqx_limiter_container:check_list(Needs, Limiter) of case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} -> {ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2}); WhenOk(Data, Msgs, State#state{limiter = Limiter2});
{pause, Time, Limiter2} -> {pause, Time, Limiter2} ->
?SLOG_THROTTLE( ?SLOG(debug, #{
warning, msg => "pause_time_due_to_rate_limit",
#{ needs => Needs,
msg => socket_receive_paused_by_rate_limit, time_in_ms => Time
paused_ms => Time }),
},
#{
tag => "RATE",
clientid => emqx_channel:info(clientid, Channel)
}
),
Retry = #retry{ Retry = #retry{
types = [Type || {_, Type} <- Needs], types = [Type || {_, Type} <- Needs],
@ -654,7 +647,7 @@ check_limiter(
State#state{limiter_buffer = queue:in(New, Buffer)}. State#state{limiter_buffer = queue:in(New, Buffer)}.
-spec retry_limiter(state()) -> state(). -spec retry_limiter(state()) -> state().
retry_limiter(#state{channel = Channel, limiter = Limiter} = State) -> retry_limiter(#state{limiter = Limiter} = State) ->
#retry{types = Types, data = Data, next = Next} = emqx_limiter_container:get_retry_context( #retry{types = Types, data = Data, next = Next} = emqx_limiter_container:get_retry_context(
Limiter Limiter
), ),
@ -669,17 +662,11 @@ retry_limiter(#state{channel = Channel, limiter = Limiter} = State) ->
} }
); );
{pause, Time, Limiter2} -> {pause, Time, Limiter2} ->
?SLOG_THROTTLE( ?SLOG(debug, #{
warning, msg => "pause_time_due_to_rate_limit",
#{ types => Types,
msg => socket_receive_paused_by_rate_limit, time_in_ms => Time
paused_ms => Time }),
},
#{
tag => "RATE",
clientid => emqx_channel:info(clientid, Channel)
}
),
TRef = start_timer(Time, limit_timeout), TRef = start_timer(Time, limit_timeout),
@ -695,8 +682,8 @@ when_msg_in(Packets, Msgs, State) ->
%% Run GC, Check OOM %% Run GC, Check OOM
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
run_gc(Cnt, Oct, State = #state{gc_state = GcSt}) -> run_gc(Stats, State = #state{gc_state = GcSt}) ->
case ?ENABLED(GcSt) andalso emqx_gc:run(Cnt, Oct, GcSt) of case ?ENABLED(GcSt) andalso emqx_gc:run(Stats, GcSt) of
false -> State; false -> State;
{_IsGC, GcSt1} -> State#state{gc_state = GcSt1} {_IsGC, GcSt1} -> State#state{gc_state = GcSt1}
end. end.
@ -738,8 +725,7 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
input_bytes => Data input_bytes => Data
}), }),
FrameError = {frame_error, Reason}, FrameError = {frame_error, Reason},
NState = enrich_state(Reason, State), {[{incoming, FrameError} | Packets], State};
{[{incoming, FrameError} | Packets], NState};
error:Reason:Stacktrace -> error:Reason:Stacktrace ->
?LOG(error, #{ ?LOG(error, #{
at_state => emqx_frame:describe_state(ParseState), at_state => emqx_frame:describe_state(ParseState),
@ -810,9 +796,11 @@ handle_outgoing(
get_active_n(Type, Listener) get_active_n(Type, Listener)
of of
true -> true ->
Cnt = emqx_pd:reset_counter(outgoing_pubs), Stats = #{
Oct = emqx_pd:reset_counter(outgoing_bytes), cnt => emqx_pd:reset_counter(outgoing_pubs),
postpone({check_gc, Cnt, Oct}, State); oct => emqx_pd:reset_counter(outgoing_bytes)
},
postpone({check_gc, Stats}, State);
false -> false ->
State State
end, end,
@ -832,7 +820,7 @@ serialize_and_inc_stats_fun(#state{serialize = Serialize}) ->
?LOG(warning, #{ ?LOG(warning, #{
msg => "packet_discarded", msg => "packet_discarded",
reason => "frame_too_large", reason => "frame_too_large",
packet => Packet packet => emqx_packet:format(Packet)
}), }),
ok = emqx_metrics:inc('delivery.dropped.too_large'), ok = emqx_metrics:inc('delivery.dropped.too_large'),
ok = emqx_metrics:inc('delivery.dropped'), ok = emqx_metrics:inc('delivery.dropped'),
@ -1071,13 +1059,6 @@ check_max_connection(Type, Listener) ->
{denny, Reason} {denny, Reason}
end end
end. end.
enrich_state(#{parse_state := NParseState}, State) ->
Serialize = emqx_frame:serialize_opts(NParseState),
State#state{parse_state = NParseState, serialize = Serialize};
enrich_state(_, State) ->
State.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% For CT tests %% For CT tests
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -33,7 +33,7 @@ introduced_in() ->
"5.0.8". "5.0.8".
%%================================================================================ %%================================================================================
%% API functions %% API funcions
%%================================================================================ %%================================================================================
-spec send(node(), pid(), emqx_types:topic(), term()) -> true. -spec send(node(), pid(), emqx_types:topic(), term()) -> true.

View File

@ -112,10 +112,6 @@ t_check(_) ->
?assertNot(emqx_banned:check(ClientInfoValidFull)), ?assertNot(emqx_banned:check(ClientInfoValidFull)),
?assertNot(emqx_banned:check(ClientInfoValidEmpty)), ?assertNot(emqx_banned:check(ClientInfoValidEmpty)),
?assertNot(emqx_banned:check(ClientInfoValidOnlyClientId)), ?assertNot(emqx_banned:check(ClientInfoValidOnlyClientId)),
?assert(emqx_banned:check_clientid(<<"BannedClient">>)),
?assert(emqx_banned:check_clientid(<<"BannedClientRE">>)),
ok = emqx_banned:delete(emqx_banned:who(clientid, <<"BannedClient">>)), ok = emqx_banned:delete(emqx_banned:who(clientid, <<"BannedClient">>)),
ok = emqx_banned:delete(emqx_banned:who(username, <<"BannedUser">>)), ok = emqx_banned:delete(emqx_banned:who(username, <<"BannedUser">>)),
ok = emqx_banned:delete(emqx_banned:who(peerhost, {192, 168, 0, 1})), ok = emqx_banned:delete(emqx_banned:who(peerhost, {192, 168, 0, 1})),
@ -131,10 +127,6 @@ t_check(_) ->
?assertNot(emqx_banned:check(ClientInfoBannedUsernameRE)), ?assertNot(emqx_banned:check(ClientInfoBannedUsernameRE)),
?assertNot(emqx_banned:check(ClientInfoBannedAddrNet)), ?assertNot(emqx_banned:check(ClientInfoBannedAddrNet)),
?assertNot(emqx_banned:check(ClientInfoValidFull)), ?assertNot(emqx_banned:check(ClientInfoValidFull)),
?assertNot(emqx_banned:check_clientid(<<"BannedClient">>)),
?assertNot(emqx_banned:check_clientid(<<"BannedClientRE">>)),
?assertEqual(0, emqx_banned:info(size)). ?assertEqual(0, emqx_banned:info(size)).
t_unused(_) -> t_unused(_) ->

View File

@ -414,32 +414,24 @@ t_handle_in_auth(_) ->
emqx_channel:handle_in(?AUTH_PACKET(), Channel). emqx_channel:handle_in(?AUTH_PACKET(), Channel).
t_handle_in_frame_error(_) -> t_handle_in_frame_error(_) ->
IdleChannelV5 = channel(#{conn_state => idle}), IdleChannel = channel(#{conn_state => idle}),
%% no CONNACK packet for v4 {shutdown, #{shutdown_count := frame_too_large, cause := frame_too_large}, _Chan} =
?assertMatch( emqx_channel:handle_in({frame_error, #{cause => frame_too_large}}, IdleChannel),
{shutdown, #{shutdown_count := frame_too_large, cause := frame_too_large}, _Chan},
emqx_channel:handle_in(
{frame_error, #{cause => frame_too_large}}, v4(IdleChannelV5)
)
),
ConnectingChan = channel(#{conn_state => connecting}), ConnectingChan = channel(#{conn_state => connecting}),
ConnackPacket = ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), ConnackPacket = ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE),
?assertMatch( {shutdown,
{shutdown, #{
#{ shutdown_count := frame_too_large,
shutdown_count := frame_too_large, cause := frame_too_large,
cause := frame_too_large, limit := 100,
limit := 100, received := 101
received := 101 },
}, ConnackPacket,
ConnackPacket, _}, _} =
emqx_channel:handle_in( emqx_channel:handle_in(
{frame_error, #{cause => frame_too_large, received => 101, limit => 100}}, {frame_error, #{cause => frame_too_large, received => 101, limit => 100}},
ConnectingChan ConnectingChan
) ),
),
DisconnectPacket = ?DISCONNECT_PACKET(?RC_PACKET_TOO_LARGE), DisconnectPacket = ?DISCONNECT_PACKET(?RC_PACKET_TOO_LARGE),
ConnectedChan = channel(#{conn_state => connected}), ConnectedChan = channel(#{conn_state => connected}),
?assertMatch( ?assertMatch(

View File

@ -78,7 +78,6 @@
start_epmd/0, start_epmd/0,
start_peer/2, start_peer/2,
stop_peer/1, stop_peer/1,
ebin_path/0,
listener_port/2 listener_port/2
]). ]).

View File

@ -445,7 +445,7 @@ zone_global_defaults() ->
peer_cert_as_username => disabled, peer_cert_as_username => disabled,
response_information => [], response_information => [],
retain_available => true, retain_available => true,
retry_interval => infinity, retry_interval => 30000,
message_expiry_interval => infinity, message_expiry_interval => infinity,
server_keepalive => disabled, server_keepalive => disabled,
session_expiry_interval => 7200000, session_expiry_interval => 7200000,

View File

@ -333,17 +333,6 @@ t_handle_incoming(_) ->
), ),
?assertMatch({ok, _Out, _NState}, emqx_connection:handle_incoming(frame_error, st())). ?assertMatch({ok, _Out, _NState}, emqx_connection:handle_incoming(frame_error, st())).
t_handle_outing_non_utf8_topic(_) ->
Topic = <<"测试"/utf16>>,
Publish = ?PUBLISH_PACKET(0, Topic, 1),
StrictOff = #{version => 5, max_size => 16#FFFF, strict_mode => false},
StOff = st(#{serialize => StrictOff}),
OffResult = emqx_connection:handle_outgoing(Publish, StOff),
?assertMatch({ok, _}, OffResult),
StrictOn = #{version => 5, max_size => 16#FFFF, strict_mode => true},
StOn = st(#{serialize => StrictOn}),
?assertError(frame_serialize_error, emqx_connection:handle_outgoing(Publish, StOn)).
t_with_channel(_) -> t_with_channel(_) ->
State = st(), State = st(),
ok = meck:expect(emqx_channel, handle_in, fun(_, _) -> ok end), ok = meck:expect(emqx_channel, handle_in, fun(_, _) -> ok end),
@ -526,7 +515,7 @@ t_oom_shutdown(_) ->
with_conn( with_conn(
fun(Pid) -> fun(Pid) ->
Pid ! {tcp_passive, foo}, Pid ! {tcp_passive, foo},
{ok, _} = ?block_until(#{?snk_kind := check_oom_shutdown}, 1000), {ok, _} = ?block_until(#{?snk_kind := check_oom}, 1000),
{ok, _} = ?block_until(#{?snk_kind := terminate}, 100), {ok, _} = ?block_until(#{?snk_kind := terminate}, 100),
Trace = snabbkaffe:collect_trace(), Trace = snabbkaffe:collect_trace(),
?assertEqual(1, length(?of_kind(terminate, Trace))), ?assertEqual(1, length(?of_kind(terminate, Trace))),

View File

@ -138,14 +138,13 @@ init_per_testcase(t_refresh_config = TestCase, Config) ->
]; ];
init_per_testcase(TestCase, Config) when init_per_testcase(TestCase, Config) when
TestCase =:= t_update_listener; TestCase =:= t_update_listener;
TestCase =:= t_update_listener_enable_disable;
TestCase =:= t_validations TestCase =:= t_validations
-> ->
ct:timetrap({seconds, 30}), ct:timetrap({seconds, 30}),
ok = snabbkaffe:start_trace(), ok = snabbkaffe:start_trace(),
%% when running emqx standalone tests, we can't use those %% when running emqx standalone tests, we can't use those
%% features. %% features.
case does_module_exist(emqx_mgmt) of case does_module_exist(emqx_management) of
true -> true ->
DataDir = ?config(data_dir, Config), DataDir = ?config(data_dir, Config),
CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]), CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]),
@ -166,7 +165,7 @@ init_per_testcase(TestCase, Config) when
{emqx_conf, #{config => #{listeners => #{ssl => #{default => ListenerConf}}}}}, {emqx_conf, #{config => #{listeners => #{ssl => #{default => ListenerConf}}}}},
emqx, emqx,
emqx_management, emqx_management,
emqx_mgmt_api_test_util:emqx_dashboard() {emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
], ],
#{work_dir => emqx_cth_suite:work_dir(TestCase, Config)} #{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}
), ),
@ -207,7 +206,6 @@ read_crl(Filename) ->
end_per_testcase(TestCase, Config) when end_per_testcase(TestCase, Config) when
TestCase =:= t_update_listener; TestCase =:= t_update_listener;
TestCase =:= t_update_listener_enable_disable;
TestCase =:= t_validations TestCase =:= t_validations
-> ->
Skip = proplists:get_bool(skip_does_not_apply, Config), Skip = proplists:get_bool(skip_does_not_apply, Config),
@ -1059,104 +1057,3 @@ do_t_validations(_Config) ->
), ),
ok. ok.
%% Checks that if CRL is ever enabled and then disabled, clients can connect, even if they
%% would otherwise not have their corresponding CRLs cached and fail with `{bad_crls,
%% no_relevant_crls}`.
t_update_listener_enable_disable(Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ct:pal("skipping as this test does not apply in this profile"),
ok;
false ->
do_t_update_listener_enable_disable(Config)
end.
do_t_update_listener_enable_disable(Config) ->
DataDir = ?config(data_dir, Config),
Keyfile = filename:join([DataDir, "server.key.pem"]),
Certfile = filename:join([DataDir, "server.cert.pem"]),
Cacertfile = filename:join([DataDir, "ca-chain.cert.pem"]),
ClientCert = filename:join(DataDir, "client.cert.pem"),
ClientKey = filename:join(DataDir, "client.key.pem"),
ListenerId = "ssl:default",
%% Enable CRL
{ok, {{_, 200, _}, _, ListenerData0}} = get_listener_via_api(ListenerId),
CRLConfig0 =
#{
<<"ssl_options">> =>
#{
<<"keyfile">> => Keyfile,
<<"certfile">> => Certfile,
<<"cacertfile">> => Cacertfile,
<<"enable_crl_check">> => true,
<<"fail_if_no_peer_cert">> => true
}
},
ListenerData1 = emqx_utils_maps:deep_merge(ListenerData0, CRLConfig0),
{ok, {_, _, ListenerData2}} = update_listener_via_api(ListenerId, ListenerData1),
?assertMatch(
#{
<<"ssl_options">> :=
#{
<<"enable_crl_check">> := true,
<<"verify">> := <<"verify_peer">>,
<<"fail_if_no_peer_cert">> := true
}
},
ListenerData2
),
%% Disable CRL
CRLConfig1 =
#{
<<"ssl_options">> =>
#{
<<"keyfile">> => Keyfile,
<<"certfile">> => Certfile,
<<"cacertfile">> => Cacertfile,
<<"enable_crl_check">> => false,
<<"fail_if_no_peer_cert">> => true
}
},
ListenerData3 = emqx_utils_maps:deep_merge(ListenerData2, CRLConfig1),
redbug:start(
[
"esockd_server:get_listener_prop -> return",
"esockd_server:set_listener_prop -> return",
"esockd:merge_opts -> return",
"esockd_listener_sup:set_options -> return",
"emqx_listeners:inject_crl_config -> return"
],
[{msgs, 100}]
),
{ok, {_, _, ListenerData4}} = update_listener_via_api(ListenerId, ListenerData3),
?assertMatch(
#{
<<"ssl_options">> :=
#{
<<"enable_crl_check">> := false,
<<"verify">> := <<"verify_peer">>,
<<"fail_if_no_peer_cert">> := true
}
},
ListenerData4
),
%% Now the client that would be blocked tries to connect and should now be allowed.
{ok, C} = emqtt:start_link([
{ssl, true},
{ssl_opts, [
{certfile, ClientCert},
{keyfile, ClientKey},
{verify, verify_none}
]},
{port, 8883}
]),
?assertMatch({ok, _}, emqtt:connect(C)),
emqtt:stop(C),
?assertNotReceive({http_get, _}),
ok.

View File

@ -38,7 +38,7 @@
%% in `end_per_suite/1` or `end_per_group/2`) with the result from step 2. %% in `end_per_suite/1` or `end_per_group/2`) with the result from step 2.
-module(emqx_cth_cluster). -module(emqx_cth_cluster).
-export([start/1, start/2, restart/1]). -export([start/1, start/2, restart/1, restart/2]).
-export([stop/1, stop_node/1]). -export([stop/1, stop_node/1]).
-export([start_bare_nodes/1, start_bare_nodes/2]). -export([start_bare_nodes/1, start_bare_nodes/2]).
@ -158,18 +158,18 @@ wait_clustered([Node | Nodes] = All, Check, Deadline) ->
nodes_not_running => NodesNotRunnging nodes_not_running => NodesNotRunnging
}} }}
); );
{false, _Nodes} -> {false, Nodes} ->
timer:sleep(100), timer:sleep(100),
wait_clustered(All, Check, Deadline) wait_clustered(All, Check, Deadline)
end. end.
restart(NodeSpecs = [_ | _]) -> restart(NodeSpec) ->
Nodes = [maps:get(name, Spec) || Spec <- NodeSpecs], restart(maps:get(name, NodeSpec), NodeSpec).
ct:pal("Stopping peer nodes: ~p", [Nodes]),
ok = stop(Nodes), restart(Node, Spec) ->
start([Spec#{boot_type => restart} || Spec <- NodeSpecs]); ct:pal("Stopping peer node ~p", [Node]),
restart(NodeSpec = #{}) -> ok = emqx_cth_peer:stop(Node),
restart([NodeSpec]). start([Spec#{boot_type => restart}]).
mk_nodespecs(Nodes, ClusterOpts) -> mk_nodespecs(Nodes, ClusterOpts) ->
NodeSpecs = lists:zipwith( NodeSpecs = lists:zipwith(

Some files were not shown because too many files have changed in this diff Show More