Compare commits

..

1 Commits

Author SHA1 Message Date
Ivan Dyachkov 69fdf5b816 ci: freeze base docker image to stable-20240612-slim 2024-06-26 15:09:09 +02:00
893 changed files with 5870 additions and 32017 deletions

View File

@ -1,30 +0,0 @@
version: '3.9'
services:
couchbase:
container_name: couchbase
hostname: couchbase
image: ghcr.io/emqx/couchbase:1.0.0
restart: always
expose:
- 8091-8093
# ports:
# - "8091-8093:8091-8093"
networks:
- emqx_bridge
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8093/admin/ping"]
interval: 30s
timeout: 5s
retries: 4
environment:
- CLUSTER=localhost
- USER=admin
- PASS=public
- PORT=8091
- RAMSIZEMB=2048
- RAMSIZEINDEXMB=512
- RAMSIZEFTSMB=512
- BUCKETS=mqtt
- BUCKETSIZES=100
- AUTOREBALANCE=true

View File

@ -18,7 +18,7 @@ services:
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
kdc:
hostname: kdc.emqx.net
image: ghcr.io/emqx/emqx-builder/5.3-9:1.15.7-26.2.5-3-ubuntu22.04
image: ghcr.io/emqx/emqx-builder/5.3-8:1.15.7-26.2.5-2-ubuntu22.04
container_name: kdc.emqx.net
expose:
- 88 # kdc

View File

@ -10,7 +10,7 @@ services:
nofile: 1024
image: openldap
#ports:
# - "389:389"
# - 389:389
volumes:
- ./certs/ca.crt:/etc/certs/ca.crt
restart: always

View File

@ -3,7 +3,7 @@ version: '3.9'
services:
erlang:
container_name: erlang
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.3-9:1.15.7-26.2.5-3-ubuntu22.04}
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.3-8:1.15.7-26.2.5-2-ubuntu22.04}
env_file:
- credentials.env
- conf.env

View File

@ -1,61 +0,0 @@
# LDAP authentication
To run manual tests with the default docker-compose files.
Expose openldap container port by uncommenting the `ports` config in `docker-compose-ldap.yaml `
To start openldap:
```
docker-compose -f ./.ci/docker-compose-file/docker-compose.yaml -f ./.ci/docker-compose-file/docker-compose-ldap.yaml up -docker
```
## LDAP database
LDAP database is populated from below files:
```
apps/emqx_ldap/test/data/emqx.io.ldif /usr/local/etc/openldap/schema/emqx.io.ldif
apps/emqx_ldap/test/data/emqx.schema /usr/local/etc/openldap/schema/emqx.schema
```
## Minimal EMQX config
```
authentication = [
{
backend = ldap
base_dn = "uid=${username},ou=testdevice,dc=emqx,dc=io"
filter = "(& (objectClass=mqttUser) (uid=${username}))"
mechanism = password_based
method {
is_superuser_attribute = isSuperuser
password_attribute = userPassword
type = hash
}
password = public
pool_size = 8
query_timeout = "5s"
request_timeout = "10s"
server = "localhost:1389"
username = "cn=root,dc=emqx,dc=io"
}
]
```
## Example ldapsearch command
```
ldapsearch -x -H ldap://localhost:389 -D "cn=root,dc=emqx,dc=io" -W -b "uid=mqttuser0007,ou=testdevice,dc=emqx,dc=io" "(&(objectClass=mqttUser)(uid=mqttuser0007))"
```
## Example mqttx command
The client password hashes are generated from their username.
```
# disabled user
mqttx pub -t 't/1' -h localhost -p 1883 -m x -u mqttuser0006 -P mqttuser0006
# enabled super-user
mqttx pub -t 't/1' -h localhost -p 1883 -m x -u mqttuser0007 -P mqttuser0007
```

View File

@ -221,11 +221,5 @@
"listen": "0.0.0.0:10000",
"upstream": "azurite:10000",
"enabled": true
},
{
"name": "couchbase",
"listen": "0.0.0.0:8093",
"upstream": "couchbase:8093",
"enabled": true
}
]

View File

@ -51,7 +51,7 @@ runs:
echo "SELF_HOSTED=false" >> $GITHUB_OUTPUT
;;
esac
- uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
- uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1
id: cache
if: steps.prepare.outputs.SELF_HOSTED != 'true'
with:

View File

@ -1,8 +1,24 @@
name: 'Prepare jmeter'
inputs:
version-emqx:
required: true
type: string
runs:
using: composite
steps:
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
with:
name: emqx-docker
path: /tmp
- name: load docker image
shell: bash
env:
PKG_VSN: ${{ inputs.version-emqx }}
run: |
EMQX_DOCKER_IMAGE_TAG=$(docker load < /tmp/emqx-docker-${PKG_VSN}.tar.gz | sed 's/Loaded image: //g')
echo "_EMQX_DOCKER_IMAGE_TAG=$EMQX_DOCKER_IMAGE_TAG" >> $GITHUB_ENV
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with:
repository: emqx/emqx-fvt

View File

@ -1 +0,0 @@
*/.github/*

View File

@ -11,48 +11,29 @@ on:
ref:
required: false
defaults:
run:
shell: bash
env:
IS_CI: "yes"
jobs:
init:
runs-on: ubuntu-22.04
outputs:
BUILDER_VSN: ${{ steps.env.outputs.BUILDER_VSN }}
OTP_VSN: ${{ steps.env.outputs.OTP_VSN }}
ELIXIR_VSN: ${{ steps.env.outputs.ELIXIR_VSN }}
BUILDER: ${{ steps.env.outputs.BUILDER }}
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
- name: Set up environment
id: env
run: |
source ./env.sh
echo "BUILDER_VSN=$EMQX_BUILDER_VSN" | tee -a "$GITHUB_OUTPUT"
echo "OTP_VSN=$OTP_VSN" | tee -a "$GITHUB_OUTPUT"
echo "ELIXIR_VSN=$ELIXIR_VSN" | tee -a "$GITHUB_OUTPUT"
echo "BUILDER=$EMQX_BUILDER" | tee -a "$GITHUB_OUTPUT"
sanity-checks:
runs-on: ubuntu-22.04
needs: init
container: ${{ needs.init.outputs.BUILDER }}
container: "ghcr.io/emqx/emqx-builder/5.3-8:1.15.7-26.2.5-2-ubuntu22.04"
outputs:
ct-matrix: ${{ steps.matrix.outputs.ct-matrix }}
ct-host: ${{ steps.matrix.outputs.ct-host }}
ct-docker: ${{ steps.matrix.outputs.ct-docker }}
version-emqx: ${{ steps.matrix.outputs.version-emqx }}
version-emqx-enterprise: ${{ steps.matrix.outputs.version-emqx-enterprise }}
builder: "ghcr.io/emqx/emqx-builder/5.3-8:1.15.7-26.2.5-2-ubuntu22.04"
builder_vsn: "5.3-8"
otp_vsn: "26.2.5-2"
elixir_vsn: "1.15.7"
permissions:
contents: read
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
@ -111,20 +92,35 @@ jobs:
- name: Generate CT Matrix
id: matrix
run: |
MATRIX="$(./scripts/find-apps.sh --ci)"
APPS="$(./scripts/find-apps.sh --ci)"
MATRIX="$(echo "${APPS}" | jq -c '
[
(.[] | select(.profile == "emqx") | . + {
builder: "5.3-8",
otp: "26.2.5-2",
elixir: "1.15.7"
}),
(.[] | select(.profile == "emqx-enterprise") | . + {
builder: "5.3-8",
otp: ["26.2.5-2"][],
elixir: "1.15.7"
})
]
')"
echo "${MATRIX}" | jq
CT_MATRIX="$(echo "${MATRIX}" | jq -c 'map({profile}) | unique')"
CT_MATRIX="$(echo "${MATRIX}" | jq -c 'map({profile, builder, otp, elixir}) | unique')"
CT_HOST="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "host"))')"
CT_DOCKER="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "docker"))')"
echo "ct-matrix=${CT_MATRIX}" | tee -a $GITHUB_OUTPUT
echo "ct-host=${CT_HOST}" | tee -a $GITHUB_OUTPUT
echo "ct-docker=${CT_DOCKER}" | tee -a $GITHUB_OUTPUT
echo "version-emqx=$(./pkg-vsn.sh emqx)" | tee -a $GITHUB_OUTPUT
echo "version-emqx-enterprise=$(./pkg-vsn.sh emqx-enterprise)" | tee -a $GITHUB_OUTPUT
compile:
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral-xl","linux","x64"]') }}
container: ${{ needs.init.outputs.BUILDER }}
container: ${{ needs.sanity-checks.outputs.builder }}
needs:
- init
- sanity-checks
strategy:
matrix:
@ -136,7 +132,7 @@ jobs:
contents: read
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
fetch-depth: 0
- name: Work around https://github.com/actions/checkout/issues/766
@ -152,7 +148,7 @@ jobs:
echo "PROFILE=${PROFILE}" | tee -a .env
echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env
zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip .
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: ${{ matrix.profile }}
path: ${{ matrix.profile }}.zip
@ -160,47 +156,53 @@ jobs:
run_emqx_app_tests:
needs:
- init
- sanity-checks
- compile
uses: ./.github/workflows/run_emqx_app_tests.yaml
with:
builder: ${{ needs.init.outputs.BUILDER }}
builder: ${{ needs.sanity-checks.outputs.builder }}
before_ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }}
after_ref: ${{ github.sha }}
run_test_cases:
needs:
- init
- sanity-checks
- compile
uses: ./.github/workflows/run_test_cases.yaml
with:
builder: ${{ needs.init.outputs.BUILDER }}
builder: ${{ needs.sanity-checks.outputs.builder }}
ct-matrix: ${{ needs.sanity-checks.outputs.ct-matrix }}
ct-host: ${{ needs.sanity-checks.outputs.ct-host }}
ct-docker: ${{ needs.sanity-checks.outputs.ct-docker }}
static_checks:
needs:
- init
- sanity-checks
- compile
uses: ./.github/workflows/static_checks.yaml
with:
builder: ${{ needs.init.outputs.BUILDER }}
builder: ${{ needs.sanity-checks.outputs.builder }}
ct-matrix: ${{ needs.sanity-checks.outputs.ct-matrix }}
build_slim_packages:
needs:
- sanity-checks
uses: ./.github/workflows/build_slim_packages.yaml
with:
builder: ${{ needs.sanity-checks.outputs.builder }}
builder_vsn: ${{ needs.sanity-checks.outputs.builder_vsn }}
otp_vsn: ${{ needs.sanity-checks.outputs.otp_vsn }}
elixir_vsn: ${{ needs.sanity-checks.outputs.elixir_vsn }}
build_docker_for_test:
needs:
- init
- sanity-checks
uses: ./.github/workflows/build_docker_for_test.yaml
with:
otp_vsn: ${{ needs.sanity-checks.outputs.otp_vsn }}
elixir_vsn: ${{ needs.sanity-checks.outputs.elixir_vsn }}
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
version-emqx-enterprise: ${{ needs.sanity-checks.outputs.version-emqx-enterprise }}
spellcheck:
needs:
@ -210,35 +212,41 @@ jobs:
run_conf_tests:
needs:
- init
- sanity-checks
- compile
uses: ./.github/workflows/run_conf_tests.yaml
with:
builder: ${{ needs.init.outputs.BUILDER }}
builder: ${{ needs.sanity-checks.outputs.builder }}
check_deps_integrity:
needs:
- init
- sanity-checks
uses: ./.github/workflows/check_deps_integrity.yaml
with:
builder: ${{ needs.init.outputs.BUILDER }}
builder: ${{ needs.sanity-checks.outputs.builder }}
run_jmeter_tests:
needs:
- sanity-checks
- build_docker_for_test
uses: ./.github/workflows/run_jmeter_tests.yaml
with:
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
run_docker_tests:
needs:
- sanity-checks
- build_docker_for_test
uses: ./.github/workflows/run_docker_tests.yaml
with:
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
version-emqx-enterprise: ${{ needs.sanity-checks.outputs.version-emqx-enterprise }}
run_helm_tests:
needs:
- sanity-checks
- build_docker_for_test
uses: ./.github/workflows/run_helm_tests.yaml
with:
version-emqx: ${{ needs.sanity-checks.outputs.version-emqx }}
version-emqx-enterprise: ${{ needs.sanity-checks.outputs.version-emqx-enterprise }}

View File

@ -8,6 +8,7 @@ on:
push:
tags:
- 'v*'
- 'e*'
branches:
- 'master'
- 'release-5[0-9]'
@ -17,42 +18,13 @@ on:
ref:
required: false
defaults:
run:
shell: bash
env:
IS_CI: 'yes'
jobs:
init:
runs-on: ubuntu-22.04
outputs:
BUILDER_VSN: ${{ steps.env.outputs.BUILDER_VSN }}
OTP_VSN: ${{ steps.env.outputs.OTP_VSN }}
ELIXIR_VSN: ${{ steps.env.outputs.ELIXIR_VSN }}
BUILDER: ${{ steps.env.outputs.BUILDER }}
BUILD_FROM: ${{ steps.env.outputs.BUILD_FROM }}
RUN_FROM: ${{ steps.env.outputs.BUILD_FROM }}
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
- name: Set up environment
id: env
run: |
source env.sh
echo "BUILDER_VSN=$EMQX_BUILDER_VSN" >> "$GITHUB_OUTPUT"
echo "OTP_VSN=$OTP_VSN" >> "$GITHUB_OUTPUT"
echo "ELIXIR_VSN=$ELIXIR_VSN" >> "$GITHUB_OUTPUT"
echo "BUILDER=$EMQX_BUILDER" >> "$GITHUB_OUTPUT"
echo "BUILD_FROM=$EMQX_DOCKER_BUILD_FROM" >> "$GITHUB_OUTPUT"
echo "RUN_FROM=$EMQX_DOCKER_RUN_FROM" >> "$GITHUB_OUTPUT"
prepare:
runs-on: ubuntu-22.04
needs: init
container: ${{ needs.init.outputs.BUILDER }}
container: 'ghcr.io/emqx/emqx-builder/5.3-8:1.15.7-26.2.5-2-ubuntu22.04'
outputs:
profile: ${{ steps.parse-git-ref.outputs.profile }}
release: ${{ steps.parse-git-ref.outputs.release }}
@ -60,12 +32,16 @@ jobs:
ct-matrix: ${{ steps.matrix.outputs.ct-matrix }}
ct-host: ${{ steps.matrix.outputs.ct-host }}
ct-docker: ${{ steps.matrix.outputs.ct-docker }}
builder: 'ghcr.io/emqx/emqx-builder/5.3-8:1.15.7-26.2.5-2-ubuntu22.04'
builder_vsn: '5.3-8'
otp_vsn: '26.2.5-2'
elixir_vsn: '1.15.7'
permissions:
contents: read
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
@ -86,9 +62,23 @@ jobs:
- name: Build matrix
id: matrix
run: |
MATRIX="$(./scripts/find-apps.sh --ci)"
APPS="$(./scripts/find-apps.sh --ci)"
MATRIX="$(echo "${APPS}" | jq -c '
[
(.[] | select(.profile == "emqx") | . + {
builder: "5.3-8",
otp: "26.2.5-2",
elixir: "1.15.7"
}),
(.[] | select(.profile == "emqx-enterprise") | . + {
builder: "5.3-8",
otp: ["26.2.5-2"][],
elixir: "1.15.7"
})
]
')"
echo "${MATRIX}" | jq
CT_MATRIX="$(echo "${MATRIX}" | jq -c 'map({profile}) | unique')"
CT_MATRIX="$(echo "${MATRIX}" | jq -c 'map({profile, builder, otp, elixir}) | unique')"
CT_HOST="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "host"))')"
CT_DOCKER="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "docker"))')"
echo "ct-matrix=${CT_MATRIX}" | tee -a $GITHUB_OUTPUT
@ -98,44 +88,46 @@ jobs:
build_packages:
if: needs.prepare.outputs.release == 'true'
needs:
- init
- prepare
uses: ./.github/workflows/build_packages.yaml
with:
profile: ${{ needs.prepare.outputs.profile }}
publish: true
otp_vsn: ${{ needs.init.outputs.OTP_VSN }}
elixir_vsn: ${{ needs.init.outputs.ELIXIR_VSN }}
builder_vsn: ${{ needs.init.outputs.BUILDER_VSN }}
otp_vsn: ${{ needs.prepare.outputs.otp_vsn }}
elixir_vsn: ${{ needs.prepare.outputs.elixir_vsn }}
builder_vsn: ${{ needs.prepare.outputs.builder_vsn }}
secrets: inherit
build_and_push_docker_images:
if: needs.prepare.outputs.release == 'true'
needs:
- init
- prepare
uses: ./.github/workflows/build_and_push_docker_images.yaml
with:
profile: ${{ needs.prepare.outputs.profile }}
publish: true
latest: ${{ needs.prepare.outputs.latest }}
build_from: ${{ needs.init.outputs.BUILD_FROM }}
run_from: ${{ needs.init.outputs.RUN_FROM }}
otp_vsn: ${{ needs.prepare.outputs.otp_vsn }}
elixir_vsn: ${{ needs.prepare.outputs.elixir_vsn }}
builder_vsn: ${{ needs.prepare.outputs.builder_vsn }}
secrets: inherit
build_slim_packages:
if: needs.prepare.outputs.release != 'true'
needs:
- init
- prepare
uses: ./.github/workflows/build_slim_packages.yaml
with:
builder: ${{ needs.prepare.outputs.builder }}
builder_vsn: ${{ needs.prepare.outputs.builder_vsn }}
otp_vsn: ${{ needs.prepare.outputs.otp_vsn }}
elixir_vsn: ${{ needs.prepare.outputs.elixir_vsn }}
compile:
if: needs.prepare.outputs.release != 'true'
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container: ${{ needs.init.outputs.BUILDER }}
container: ${{ needs.prepare.outputs.builder }}
needs:
- init
- prepare
strategy:
matrix:
@ -147,7 +139,7 @@ jobs:
contents: read
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
@ -163,7 +155,7 @@ jobs:
echo "PROFILE=${PROFILE}" | tee -a .env
echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env
zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip .
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: ${{ matrix.profile }}
path: ${{ matrix.profile }}.zip
@ -171,23 +163,22 @@ jobs:
run_emqx_app_tests:
needs:
- init
- prepare
- compile
uses: ./.github/workflows/run_emqx_app_tests.yaml
with:
builder: ${{ needs.init.outputs.BUILDER }}
builder: ${{ needs.prepare.outputs.builder }}
before_ref: ${{ github.event.before }}
after_ref: ${{ github.sha }}
run_test_cases:
if: needs.prepare.outputs.release != 'true'
needs:
- init
- prepare
- compile
uses: ./.github/workflows/run_test_cases.yaml
with:
builder: ${{ needs.init.outputs.BUILDER }}
builder: ${{ needs.prepare.outputs.builder }}
ct-matrix: ${{ needs.prepare.outputs.ct-matrix }}
ct-host: ${{ needs.prepare.outputs.ct-host }}
ct-docker: ${{ needs.prepare.outputs.ct-docker }}
@ -195,20 +186,18 @@ jobs:
run_conf_tests:
if: needs.prepare.outputs.release != 'true'
needs:
- init
- prepare
- compile
uses: ./.github/workflows/run_conf_tests.yaml
with:
builder: ${{ needs.init.outputs.BUILDER }}
builder: ${{ needs.prepare.outputs.builder }}
static_checks:
if: needs.prepare.outputs.release != 'true'
needs:
- init
- prepare
- compile
uses: ./.github/workflows/static_checks.yaml
with:
builder: ${{ needs.init.outputs.BUILDER }}
builder: ${{ needs.prepare.outputs.builder }}
ct-matrix: ${{ needs.prepare.outputs.ct-matrix }}

View File

@ -16,10 +16,13 @@ on:
publish:
required: true
type: boolean
build_from:
otp_vsn:
required: true
type: string
run_from:
elixir_vsn:
required: true
type: string
builder_vsn:
required: true
type: string
secrets:
@ -47,12 +50,18 @@ on:
required: false
type: boolean
default: false
build_from:
otp_vsn:
required: false
type: string
default: ghcr.io/emqx/emqx-builder/5.3-9:1.15.7-26.2.5-3-debian12
run_from:
default: public.ecr.aws/debian/debian:stable-20240612-slim
default: '26.2.5-2'
elixir_vsn:
required: false
type: string
default: '1.15.7'
builder_vsn:
required: false
type: string
default: '5.3-8'
permissions:
contents: read
@ -60,7 +69,7 @@ permissions:
jobs:
build:
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON(format('["self-hosted","ephemeral","linux","{0}"]', matrix.arch)) || 'ubuntu-22.04' }}
container: ${{ inputs.build_from }}
container: "ghcr.io/emqx/emqx-builder/${{ inputs.builder_vsn }}:${{ inputs.elixir_vsn }}-${{ inputs.otp_vsn }}-debian12"
outputs:
PKG_VSN: ${{ steps.build.outputs.PKG_VSN }}
@ -75,7 +84,7 @@ jobs:
- arm64
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ github.event.inputs.ref }}
- run: git config --global --add safe.directory "$PWD"
@ -83,7 +92,7 @@ jobs:
id: build
run: |
make ${{ matrix.profile }}-tgz
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: "${{ matrix.profile }}-${{ matrix.arch }}.tar.gz"
path: "_packages/emqx*/emqx-*.tar.gz"
@ -107,10 +116,10 @@ jobs:
- ["${{ inputs.profile }}-elixir", "${{ inputs.profile == 'emqx' && 'docker.io,public.ecr.aws' || 'docker.io' }}"]
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ github.event.inputs.ref }}
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
pattern: "${{ matrix.profile[0] }}-*.tar.gz"
path: _packages
@ -122,25 +131,24 @@ jobs:
run: |
ls -lR _packages/$PROFILE
mv _packages/$PROFILE/*.tar.gz ./
- name: Enable containerd image store on Docker Engine
run: |
echo "$(sudo cat /etc/docker/daemon.json | jq '. += {"features": {"containerd-snapshotter": true}}')" > daemon.json
echo "$(jq '. += {"features": {"containerd-snapshotter": true}}' /etc/docker/daemon.json)" > daemon.json
sudo mv daemon.json /etc/docker/daemon.json
sudo systemctl restart docker
- uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0
- uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
- uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
- uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
- name: Login to hub.docker.com
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
if: inputs.publish && contains(matrix.profile[1], 'docker.io')
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- name: Login to AWS ECR
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
if: inputs.publish && contains(matrix.profile[1], 'public.ecr.aws')
with:
registry: public.ecr.aws
@ -156,9 +164,14 @@ jobs:
DOCKER_LATEST: ${{ inputs.latest }}
DOCKER_PUSH: false
DOCKER_BUILD_NOCACHE: true
BUILD_FROM: ${{ inputs.build_from }}
RUN_FROM: ${{ inputs.run_from }}
DOCKER_PLATFORMS: linux/amd64
DOCKER_LOAD: true
EMQX_RUNNER: 'public.ecr.aws/debian/debian:stable-20240612-slim'
EMQX_DOCKERFILE: 'deploy/docker/Dockerfile'
PKG_VSN: ${{ needs.build.outputs.PKG_VSN }}
EMQX_BUILDER_VERSION: ${{ inputs.builder_vsn }}
OTP_VSN: ${{ inputs.otp_vsn }}
ELIXIR_VSN: ${{ inputs.elixir_vsn }}
EMQX_SOURCE_TYPE: tgz
run: |
./build ${PROFILE} docker
@ -202,9 +215,12 @@ jobs:
DOCKER_BUILD_NOCACHE: false
DOCKER_PLATFORMS: linux/amd64,linux/arm64
DOCKER_LOAD: false
BUILD_FROM: ${{ inputs.build_from }}
RUN_FROM: ${{ inputs.run_from }}
EMQX_RUNNER: 'public.ecr.aws/debian/debian:stable-20240612-slim'
EMQX_DOCKERFILE: 'deploy/docker/Dockerfile'
PKG_VSN: ${{ needs.build.outputs.PKG_VSN }}
EMQX_BUILDER_VERSION: ${{ inputs.builder_vsn }}
OTP_VSN: ${{ inputs.otp_vsn }}
ELIXIR_VSN: ${{ inputs.elixir_vsn }}
EMQX_SOURCE_TYPE: tgz
run: |
./build ${PROFILE} docker

View File

@ -6,6 +6,19 @@ concurrency:
on:
workflow_call:
inputs:
otp_vsn:
required: true
type: string
elixir_vsn:
required: true
type: string
version-emqx:
required: true
type: string
version-emqx-enterprise:
required: true
type: string
permissions:
contents: read
@ -15,6 +28,9 @@ jobs:
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
env:
EMQX_NAME: ${{ matrix.profile }}
PKG_VSN: ${{ matrix.profile == 'emqx-enterprise' && inputs.version-emqx-enterprise || inputs.version-emqx }}
OTP_VSN: ${{ inputs.otp_vsn }}
ELIXIR_VSN: ${{ inputs.elixir_vsn }}
strategy:
fail-fast: false
@ -26,13 +42,7 @@ jobs:
- emqx-enterprise-elixir
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: build and export to Docker
id: build
run: |
@ -42,16 +52,12 @@ jobs:
run: |
CID=$(docker run -d --rm -P $_EMQX_DOCKER_IMAGE_TAG)
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID)
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT || {
docker logs $CID
exit 1
}
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
docker stop $CID
- name: export docker image
if: always()
run: |
docker save $_EMQX_DOCKER_IMAGE_TAG | gzip > $EMQX_NAME-docker-$PKG_VSN.tar.gz
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: "${{ env.EMQX_NAME }}-docker"
path: "${{ env.EMQX_NAME }}-docker-${{ env.PKG_VSN }}.tar.gz"

View File

@ -55,7 +55,7 @@ on:
otp_vsn:
required: false
type: string
default: '26.2.5-3'
default: '26.2.5-2'
elixir_vsn:
required: false
type: string
@ -63,7 +63,7 @@ on:
builder_vsn:
required: false
type: string
default: '5.3-9'
default: '5.3-8'
permissions:
contents: read
@ -82,7 +82,7 @@ jobs:
- ${{ inputs.otp_vsn }}
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
@ -95,7 +95,7 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: success()
with:
name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.otp }}
@ -145,7 +145,7 @@ jobs:
shell: bash
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
@ -180,7 +180,7 @@ jobs:
--builder $BUILDER \
--elixir $IS_ELIXIR \
--pkgtype pkg
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.with_elixir == 'yes' && '-elixir' || '' }}-${{ matrix.builder }}-${{ matrix.otp }}-${{ matrix.elixir }}
path: _packages/${{ matrix.profile }}/
@ -198,7 +198,7 @@ jobs:
profile:
- ${{ inputs.profile }}
steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
pattern: "${{ matrix.profile }}-*"
path: packages/${{ matrix.profile }}

View File

@ -16,45 +16,56 @@ jobs:
linux:
if: github.repository_owner == 'emqx'
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
container:
image: "ghcr.io/emqx/emqx-builder/${{ matrix.profile[2] }}-${{ matrix.os }}"
strategy:
fail-fast: false
matrix:
profile:
- ['emqx', 'master']
- ['emqx', 'release-57']
- ['emqx', 'release-58']
- ['emqx', 'master', '5.3-8:1.15.7-26.2.5-2']
- ['emqx', 'release-57', '5.3-8:1.15.7-26.2.5-2']
os:
- ubuntu22.04
- amzn2023
env:
PROFILE: ${{ matrix.profile[0] }}
OS: ${{ matrix.os }}
BUILDER_SYSTEM: force_docker
defaults:
run:
shell: bash
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ matrix.profile[1] }}
fetch-depth: 0
- name: Set up environment
id: env
- name: fix workdir
run: |
source env.sh
BUILDER="ghcr.io/emqx/emqx-builder/${EMQX_BUILDER_VSN}:${ELIXIR_VSN}-${OTP_VSN}-${OS}"
echo "BUILDER=$BUILDER" >> "$GITHUB_ENV"
- name: build tgz
set -eu
git config --global --add safe.directory "$GITHUB_WORKSPACE"
# Align path for CMake caches
if [ ! "$PWD" = "/emqx" ]; then
ln -s $PWD /emqx
cd /emqx
fi
echo "pwd is $PWD"
- name: build emqx packages
env:
PROFILE: ${{ matrix.profile[0] }}
ACLOCAL_PATH: "/usr/share/aclocal:/usr/local/share/aclocal"
run: |
./scripts/buildx.sh --profile "$PROFILE" --pkgtype tgz --builder "$BUILDER"
- name: build pkg
set -eu
make "${PROFILE}-tgz"
make "${PROFILE}-pkg"
- name: test emqx packages
env:
PROFILE: ${{ matrix.profile[0] }}
run: |
./scripts/buildx.sh --profile "$PROFILE" --pkgtype pkg --builder "$BUILDER"
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
set -eu
./scripts/pkg-tests.sh "${PROFILE}-tgz"
./scripts/pkg-tests.sh "${PROFILE}-pkg"
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: success()
with:
name: ${{ matrix.profile[0] }}-${{ matrix.profile[1] }}-${{ matrix.os }}
@ -80,29 +91,26 @@ jobs:
- emqx
branch:
- master
otp:
- 26.2.5-2
os:
- macos-14-arm64
- macos-12-arm64
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ matrix.branch }}
fetch-depth: 0
- name: Set up environment
id: env
run: |
source env.sh
echo "OTP_VSN=$OTP_VSN" >> "$GITHUB_OUTPUT"
- uses: ./.github/actions/package-macos
with:
profile: ${{ matrix.profile }}
otp: ${{ steps.env.outputs.OTP_VSN }}
otp: ${{ matrix.otp }}
os: ${{ matrix.os }}
apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }}
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: success()
with:
name: ${{ matrix.profile }}-${{ matrix.os }}

View File

@ -6,50 +6,97 @@ concurrency:
on:
workflow_call:
inputs:
builder:
required: true
type: string
builder_vsn:
required: true
type: string
otp_vsn:
required: true
type: string
elixir_vsn:
required: true
type: string
workflow_dispatch:
inputs:
ref:
required: false
builder:
required: false
type: string
default: 'ghcr.io/emqx/emqx-builder/5.3-8:1.15.7-26.2.5-2-ubuntu22.04'
builder_vsn:
required: false
type: string
default: '5.3-8'
otp_vsn:
required: false
type: string
default: '26.2.5-2'
elixir_vsn:
required: false
type: string
default: '1.15.7'
permissions:
contents: read
jobs:
linux:
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON(format('["self-hosted","ephemeral","linux","{0}"]', matrix.profile[2])) || 'ubuntu-22.04' }}
runs-on: ${{ github.repository_owner == 'emqx' && fromJSON(format('["self-hosted","ephemeral","linux","{0}"]', matrix.profile[4])) || 'ubuntu-22.04' }}
env:
PROFILE: ${{ matrix.profile[0] }}
ELIXIR: ${{ matrix.profile[1] == 'elixir' && 'yes' || 'no' }}
ARCH: ${{ matrix.profile[2] == 'x64' && 'amd64' || 'arm64' }}
BUILDER_SYSTEM: force_docker
EMQX_NAME: ${{ matrix.profile[0] }}
strategy:
fail-fast: false
matrix:
profile:
- ["emqx", "elixir", "x64"]
- ["emqx", "elixir", "arm64"]
- ["emqx-enterprise", "erlang", "x64"]
- ["emqx", "26.2.5-2", "ubuntu22.04", "elixir", "x64"]
- ["emqx", "26.2.5-2", "ubuntu22.04", "elixir", "arm64"]
- ["emqx-enterprise", "26.2.5-2", "ubuntu22.04", "erlang", "x64"]
container: "ghcr.io/emqx/emqx-builder/${{ inputs.builder_vsn }}:${{ inputs.elixir_vsn }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
fetch-depth: 0
- name: build tgz
- name: Work around https://github.com/actions/checkout/issues/766
run: |
./scripts/buildx.sh --profile $PROFILE --pkgtype tgz --elixir $ELIXIR --arch $ARCH
- name: build pkg
git config --global --add safe.directory "$GITHUB_WORKSPACE"
echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
- name: build and test tgz package
if: matrix.profile[3] == 'erlang'
run: |
./scripts/buildx.sh --profile $PROFILE --pkgtype pkg --elixir $ELIXIR --arch $ARCH
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
make ${EMQX_NAME}-tgz
./scripts/pkg-tests.sh ${EMQX_NAME}-tgz
- name: build and test deb/rpm packages
if: matrix.profile[3] == 'erlang'
run: |
make ${EMQX_NAME}-pkg
./scripts/pkg-tests.sh ${EMQX_NAME}-pkg
- name: build and test tgz package (Elixir)
if: matrix.profile[3] == 'elixir'
run: |
make ${EMQX_NAME}-elixir-tgz
./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-tgz
- name: build and test deb/rpm packages (Elixir)
if: matrix.profile[3] == 'elixir'
run: |
make ${EMQX_NAME}-elixir-pkg
./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-pkg
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: "${{ matrix.profile[0] }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
name: "${{ matrix.profile[0] }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}-${{ matrix.profile[3] }}-${{ matrix.profile[4] }}"
path: _packages/${{ matrix.profile[0] }}/*
retention-days: 7
compression-level: 0
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: "${{ matrix.profile[0] }}-schema-dump-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
name: "${{ matrix.profile[0] }}-schema-dump-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}-${{ matrix.profile[3] }}-${{ matrix.profile[4] }}"
path: |
scripts/spellcheck
_build/docgen/${{ matrix.profile[0] }}/schema-en.json
@ -61,30 +108,27 @@ jobs:
matrix:
profile:
- emqx
otp:
- ${{ inputs.otp_vsn }}
os:
- macos-14-arm64
- macos-14
runs-on: ${{ matrix.os }}
env:
EMQX_NAME: ${{ matrix.profile }}
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
echo "OTP_VSN=$OTP_VSN" >> "$GITHUB_OUTPUT"
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/package-macos
with:
profile: ${{ matrix.profile }}
otp: ${{ steps.env.outputs.OTP_VSN }}
otp: ${{ matrix.otp }}
os: ${{ matrix.os }}
apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }}
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: ${{ matrix.os }}
path: _packages/**/*

View File

@ -22,7 +22,7 @@ jobs:
profile:
- emqx-enterprise
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
- run: make ensure-rebar3
- run: ./scripts/check-deps-integrity.escript
@ -37,7 +37,7 @@ jobs:
- run: ./scripts/check-elixir-deps-discrepancies.exs
- run: ./scripts/check-elixir-applications.exs
- name: Upload produced lock files
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure()
with:
name: ${{ matrix.profile }}_produced_lock_files

View File

@ -17,6 +17,8 @@ jobs:
permissions:
actions: read
security-events: write
container:
image: ghcr.io/emqx/emqx-builder/5.3-8:1.15.7-26.2.5-2-ubuntu22.04
strategy:
fail-fast: false
@ -24,17 +26,21 @@ jobs:
branch:
- master
- release-57
- release-58
language:
- cpp
- python
steps:
- name: Checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ matrix.branch }}
- name: Ensure git safe dir
run: |
git config --global --add safe.directory "$GITHUB_WORKSPACE"
make ensure-rebar3
- name: Initialize CodeQL
uses: github/codeql-action/init@7e187e1c529d80bac7b87a16e7a792427f65cf02 # v2.15.5
with:
@ -45,7 +51,14 @@ jobs:
env:
PROFILE: emqx-enterprise
run: |
./scripts/buildx.sh --profile emqx-enterprise --pkgtype rel
make emqx-enterprise-compile
- name: Fetch deps
if: matrix.language == 'python'
env:
PROFILE: emqx-enterprise
run: |
make deps-emqx-enterprise
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@7e187e1c529d80bac7b87a16e7a792427f65cf02 # v2.15.5

View File

@ -24,9 +24,8 @@ jobs:
ref:
- master
- release-57
- release-58
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ matrix.ref }}

View File

@ -26,13 +26,13 @@ jobs:
prepare:
runs-on: ubuntu-latest
if: github.repository_owner == 'emqx'
container: ghcr.io/emqx/emqx-builder/5.3-9:1.15.7-26.2.5-3-ubuntu20.04
container: ghcr.io/emqx/emqx-builder/5.3-8:1.15.7-26.2.5-2-ubuntu20.04
outputs:
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
fetch-depth: 0
ref: ${{ github.event.inputs.ref }}
@ -52,7 +52,7 @@ jobs:
id: package_file
run: |
echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: emqx-ubuntu20.04
path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }}
@ -72,12 +72,12 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
@ -113,13 +113,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure()
with:
name: terraform
@ -143,12 +143,12 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
@ -184,13 +184,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure()
with:
name: terraform
@ -215,12 +215,12 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
@ -257,13 +257,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure()
with:
name: terraform
@ -289,12 +289,12 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
@ -330,13 +330,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure()
with:
name: terraform

View File

@ -36,7 +36,7 @@ jobs:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ github.event.inputs.tag }}
- name: Detect profile
@ -131,7 +131,7 @@ jobs:
checks: write
actions: write
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: trigger re-run of app versions check on open PRs
shell: bash
env:

View File

@ -25,7 +25,7 @@ jobs:
- emqx
- emqx-enterprise
steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: ${{ matrix.profile }}
- name: extract artifact
@ -40,7 +40,7 @@ jobs:
if: failure()
run: |
cat _build/${{ matrix.profile }}/rel/emqx/log/erlang.log.*
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure()
with:
name: conftest-logs-${{ matrix.profile }}

View File

@ -6,6 +6,13 @@ concurrency:
on:
workflow_call:
inputs:
version-emqx:
required: true
type: string
version-emqx-enterprise:
required: true
type: string
permissions:
contents: read
@ -25,17 +32,12 @@ jobs:
env:
EMQX_NAME: ${{ matrix.profile[0] }}
PKG_VSN: ${{ matrix.profile[0] == 'emqx-enterprise' && inputs.version-emqx-enterprise || inputs.version-emqx }}
EMQX_IMAGE_OLD_VERSION_TAG: ${{ matrix.profile[1] }}
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: ${{ env.EMQX_NAME }}-docker
path: /tmp
@ -50,11 +52,9 @@ jobs:
docker compose up --abort-on-container-exit --exit-code-from selenium
- name: test two nodes cluster with proto_dist=inet_tls in docker
run: |
## -d 1 means only put node 1 (latest version) behind haproxy
./scripts/test/start-two-nodes-in-docker.sh -d 1 -P $_EMQX_DOCKER_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG
./scripts/test/start-two-nodes-in-docker.sh -P $_EMQX_DOCKER_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy)
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
## -c menas 'cleanup'
./scripts/test/start-two-nodes-in-docker.sh -c
- name: cleanup
if: always()
@ -69,6 +69,8 @@ jobs:
shell: bash
env:
EMQX_NAME: ${{ matrix.profile }}
PKG_VSN: ${{ matrix.profile == 'emqx-enterprise' && inputs.version-emqx-enterprise || inputs.version-emqx }}
_EMQX_TEST_DB_BACKEND: ${{ matrix.cluster_db_backend }}
strategy:
fail-fast: false
@ -77,20 +79,12 @@ jobs:
- emqx
- emqx-enterprise
- emqx-elixir
cluster_db_backend:
- mnesia
- rlog
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
if [ "$EMQX_NAME" = "emqx-enterprise" ]; then
_EMQX_TEST_DB_BACKEND='rlog'
else
_EMQX_TEST_DB_BACKEND='mnesia'
fi
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: ${{ env.EMQX_NAME }}-docker
path: /tmp

View File

@ -37,7 +37,7 @@ jobs:
matrix: ${{ steps.matrix.outputs.matrix }}
skip: ${{ steps.matrix.outputs.skip }}
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
fetch-depth: 0
- name: prepare test matrix
@ -72,7 +72,7 @@ jobs:
run:
shell: bash
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
fetch-depth: 0
- name: run
@ -95,7 +95,7 @@ jobs:
echo "Suites: $SUITES"
./rebar3 as standalone_test ct --name 'test@127.0.0.1' -v --readable=true --suite="$SUITES"
fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure()
with:
name: logs-emqx-app-tests-${{ matrix.type }}

View File

@ -6,6 +6,13 @@ concurrency:
on:
workflow_call:
inputs:
version-emqx:
required: true
type: string
version-emqx-enterprise:
required: true
type: string
permissions:
contents: read
@ -18,6 +25,7 @@ jobs:
shell: bash
env:
EMQX_NAME: ${{ matrix.profile }}
EMQX_TAG: ${{ matrix.profile == 'emqx-enterprise' && inputs.version-emqx-enterprise || inputs.version-emqx }}
REPOSITORY: "emqx/${{ matrix.profile }}"
strategy:
@ -34,17 +42,10 @@ jobs:
- ssl1.3
- ssl1.2
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
path: source
- name: Set up environment
id: env
run: |
cd source
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "EMQX_TAG=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: "${{ env.EMQX_NAME }}-docker"
path: /tmp
@ -164,7 +165,7 @@ jobs:
fi
sleep 1;
done
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
repository: emqx/paho.mqtt.testing
ref: develop-5.0

View File

@ -2,6 +2,10 @@ name: JMeter integration tests
on:
workflow_call:
inputs:
version-emqx:
required: true
type: string
permissions:
contents: read
@ -31,7 +35,7 @@ jobs:
else
wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz $ARCHIVE_URL
fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: apache-jmeter.tgz
path: /tmp/apache-jmeter.tgz
@ -51,23 +55,10 @@ jobs:
needs: jmeter_artifact
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
- name: load docker image
shell: bash
run: |
EMQX_DOCKER_IMAGE_TAG=$(docker load < /tmp/emqx-docker-${PKG_VSN}.tar.gz | sed 's/Loaded image: //g')
echo "_EMQX_DOCKER_IMAGE_TAG=$EMQX_DOCKER_IMAGE_TAG" >> $GITHUB_ENV
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/prepare-jmeter
with:
version-emqx: ${{ inputs.version-emqx }}
- name: docker compose up
timeout-minutes: 5
run: |
@ -95,7 +86,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: always()
with:
name: jmeter_logs-advanced_feat-${{ matrix.scripts_type }}
@ -120,23 +111,10 @@ jobs:
needs: jmeter_artifact
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
- name: load docker image
shell: bash
run: |
EMQX_DOCKER_IMAGE_TAG=$(docker load < /tmp/emqx-docker-${PKG_VSN}.tar.gz | sed 's/Loaded image: //g')
echo "_EMQX_DOCKER_IMAGE_TAG=$EMQX_DOCKER_IMAGE_TAG" >> $GITHUB_ENV
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/prepare-jmeter
with:
version-emqx: ${{ inputs.version-emqx }}
- name: docker compose up
timeout-minutes: 5
env:
@ -175,7 +153,7 @@ jobs:
if: failure()
run: |
docker compose -f .ci/docker-compose-file/docker-compose-emqx-cluster.yaml logs --no-color > ./jmeter_logs/emqx.log
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: always()
with:
name: jmeter_logs-pgsql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.pgsql_tag }}
@ -197,23 +175,10 @@ jobs:
needs: jmeter_artifact
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
- name: load docker image
shell: bash
run: |
EMQX_DOCKER_IMAGE_TAG=$(docker load < /tmp/emqx-docker-${PKG_VSN}.tar.gz | sed 's/Loaded image: //g')
echo "_EMQX_DOCKER_IMAGE_TAG=$EMQX_DOCKER_IMAGE_TAG" >> $GITHUB_ENV
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/prepare-jmeter
with:
version-emqx: ${{ inputs.version-emqx }}
- name: docker compose up
timeout-minutes: 5
env:
@ -248,7 +213,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: always()
with:
name: jmeter_logs-mysql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.mysql_tag }}
@ -266,23 +231,10 @@ jobs:
needs: jmeter_artifact
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
- name: load docker image
shell: bash
run: |
EMQX_DOCKER_IMAGE_TAG=$(docker load < /tmp/emqx-docker-${PKG_VSN}.tar.gz | sed 's/Loaded image: //g')
echo "_EMQX_DOCKER_IMAGE_TAG=$EMQX_DOCKER_IMAGE_TAG" >> $GITHUB_ENV
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/prepare-jmeter
with:
version-emqx: ${{ inputs.version-emqx }}
- name: docker compose up
timeout-minutes: 5
run: |
@ -313,7 +265,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: always()
with:
name: jmeter_logs-JWT_authn-${{ matrix.scripts_type }}
@ -332,23 +284,10 @@ jobs:
needs: jmeter_artifact
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
- name: load docker image
shell: bash
run: |
EMQX_DOCKER_IMAGE_TAG=$(docker load < /tmp/emqx-docker-${PKG_VSN}.tar.gz | sed 's/Loaded image: //g')
echo "_EMQX_DOCKER_IMAGE_TAG=$EMQX_DOCKER_IMAGE_TAG" >> $GITHUB_ENV
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/prepare-jmeter
with:
version-emqx: ${{ inputs.version-emqx }}
- name: docker compose up
timeout-minutes: 5
run: |
@ -370,7 +309,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: always()
with:
name: jmeter_logs-built_in_database_authn_authz-${{ matrix.scripts_type }}

View File

@ -25,7 +25,7 @@ jobs:
run:
shell: bash
steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: emqx-enterprise
- name: extract artifact
@ -45,7 +45,7 @@ jobs:
run: |
export PROFILE='emqx-enterprise'
make emqx-enterprise-tgz
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
name: Upload built emqx and test scenario
with:
name: relup_tests_emqx_built
@ -72,10 +72,10 @@ jobs:
run:
shell: bash
steps:
- uses: erlef/setup-beam@b9c58b0450cd832ccdb3c17cc156a47065d2114f # v1.18.1
- uses: erlef/setup-beam@0a541161e47ec43ccbd9510053c5f336ca76c2a2 # v1.17.6
with:
otp-version: 26.2.5
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
repository: hawk/lux
ref: lux-2.8.1
@ -88,7 +88,7 @@ jobs:
./configure
make
echo "$(pwd)/bin" >> $GITHUB_PATH
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
name: Download built emqx and test scenario
with:
name: relup_tests_emqx_built
@ -111,7 +111,7 @@ jobs:
docker logs node2.emqx.io | tee lux_logs/emqx2.log
exit 1
fi
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
name: Save debug data
if: failure()
with:

View File

@ -35,18 +35,18 @@ jobs:
defaults:
run:
shell: bash
container: ${{ inputs.builder }}
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
env:
PROFILE: ${{ matrix.profile }}
ENABLE_COVER_COMPILE: 1
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
permissions:
contents: read
steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: ${{ matrix.profile }}
@ -90,7 +90,7 @@ jobs:
contents: read
steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: ${{ matrix.profile }}
- name: extract artifact
@ -100,7 +100,7 @@ jobs:
# produces $PROFILE-<app-name>-<otp-vsn>-sg<suitegroup>.coverdata
- name: run common tests
env:
DOCKER_CT_RUNNER_IMAGE: ${{ inputs.builder }}
DOCKER_CT_RUNNER_IMAGE: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
MONGO_TAG: "5"
MYSQL_TAG: "8"
PGSQL_TAG: "13"
@ -111,7 +111,7 @@ jobs:
MINIO_TAG: "RELEASE.2023-03-20T20-16-18Z"
SUITEGROUP: ${{ matrix.suitegroup }}
ENABLE_COVER_COMPILE: 1
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-sg${{ matrix.suitegroup }}
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }} --keep-up
@ -133,10 +133,10 @@ jobs:
if: failure()
run: tar -czf logs.tar.gz _build/test/logs
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure()
with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-sg${{ matrix.suitegroup }}
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
path: logs.tar.gz
compression-level: 0
retention-days: 7
@ -149,7 +149,7 @@ jobs:
matrix:
include: ${{ fromJson(inputs.ct-host) }}
container: ${{ inputs.builder }}
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
defaults:
run:
shell: bash
@ -161,10 +161,10 @@ jobs:
PROFILE: ${{ matrix.profile }}
SUITEGROUP: ${{ matrix.suitegroup }}
ENABLE_COVER_COMPILE: 1
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-sg${{ matrix.suitegroup }}
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: ${{ matrix.profile }}
- name: extract artifact
@ -193,10 +193,10 @@ jobs:
if: failure()
run: tar -czf logs.tar.gz _build/test/logs
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure()
with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-sg${{ matrix.suitegroup }}
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
path: logs.tar.gz
compression-level: 0
retention-days: 7

View File

@ -25,12 +25,12 @@ jobs:
steps:
- name: "Checkout code"
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0
uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3
with:
results_file: results.sarif
results_format: sarif
@ -40,7 +40,7 @@ jobs:
publish_results: true
- name: "Upload artifact"
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: SARIF file
path: results.sarif

View File

@ -19,7 +19,7 @@ jobs:
- emqx-enterprise
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
pattern: "${{ matrix.profile }}-schema-dump-*-x64"
merge-multiple: true

View File

@ -28,9 +28,9 @@ jobs:
fail-fast: false
matrix:
include: ${{ fromJson(inputs.ct-matrix) }}
container: "${{ inputs.builder }}"
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
steps:
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: ${{ matrix.profile }}
- name: extract artifact
@ -39,10 +39,10 @@ jobs:
git config --global --add safe.directory "$GITHUB_WORKSPACE"
- uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: "emqx_dialyzer_${{ matrix.profile }}_plt"
key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ hashFiles('rebar.*', 'apps/*/rebar.*') }}
path: "emqx_dialyzer_${{ matrix.otp }}_plt"
key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }}-${{ hashFiles('rebar.*', 'apps/*/rebar.*') }}
restore-keys: |
rebar3-dialyzer-plt-${{ matrix.profile }}-
rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }}-
- run: cat .env | tee -a $GITHUB_ENV
- name: run static checks
run: make static_checks

View File

@ -1,88 +0,0 @@
name: Sync release branch
concurrency:
group: sync-release-branch-${{ github.event_name }}-${{ github.ref }}
cancel-in-progress: true
on:
schedule:
- cron: '0 2 * * *'
workflow_dispatch:
permissions:
contents: read
jobs:
create-pr:
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
strategy:
fail-fast: false
matrix:
branch:
- release-57
env:
SYNC_BRANCH: ${{ matrix.branch }}
defaults:
run:
shell: bash
permissions:
contents: write
pull-requests: write
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0
- name: create new branch
run: |
set -euxo pipefail
NEW_BRANCH_NAME=sync-${SYNC_BRANCH}-$(date +"%Y%m%d-%H%M%S")
echo "NEW_BRANCH_NAME=${NEW_BRANCH_NAME}" >> $GITHUB_ENV
git config --global user.name "${GITHUB_ACTOR}"
git config --global user.email "${GITHUB_ACTOR}@users.noreply.github.com"
git checkout -b ${NEW_BRANCH_NAME}
git merge origin/${SYNC_BRANCH} 2>&1 | tee merge.log
git push origin ${NEW_BRANCH_NAME}:${NEW_BRANCH_NAME}
- name: create pull request
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -euxo pipefail
for pr in $(gh pr list --state open --base master --label sync-release-branch --search "Sync ${SYNC_BRANCH} in:title" --repo ${{ github.repository }} --json number --jq '.[] | .number'); do
gh pr close $pr --repo ${{ github.repository }} --delete-branch || true
done
gh pr create --title "Sync ${SYNC_BRANCH}" --body "Sync ${SYNC_BRANCH}" --base master --head ${NEW_BRANCH_NAME} --label sync-release-branch --repo ${{ github.repository }}
- name: Send notification to Slack
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
run: |
awk '{printf "%s\\n", $0}' merge.log > merge.log.1
cat <<EOF > payload.json
{
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Automatic sync of ${SYNC_BRANCH} branch failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "\`\`\`$(cat merge.log.1)\`\`\`"
}
}
]
}
EOF
curl -X POST -H 'Content-type: application/json' --data @payload.json "$SLACK_WEBHOOK_URL"

View File

@ -23,7 +23,7 @@ jobs:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ github.event.inputs.tag }}
- name: Detect profile

View File

@ -1,2 +1,2 @@
erlang 26.2.5-3
erlang 26.2.5-2
elixir 1.15.7-otp-26

View File

@ -6,15 +6,22 @@ endif
REBAR = $(CURDIR)/rebar3
BUILD = $(CURDIR)/build
SCRIPTS = $(CURDIR)/scripts
include env.sh
export EMQX_RELUP ?= true
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.3-8:1.15.7-26.2.5-2-debian12
export EMQX_DEFAULT_RUNNER = public.ecr.aws/debian/debian:12-slim
export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT)
export REBAR_COLOR=none
FIND=/usr/bin/find
else
FIND=find
endif
# Dashboard version
# from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.10.0-beta.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.8.0-beta.1
export EMQX_RELUP ?= true
export EMQX_REL_FORM ?= tgz
export EMQX_DASHBOARD_VERSION ?= v1.9.1-beta.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.7.1-beta.1
-include default-profile.mk
PROFILE ?= emqx
@ -28,8 +35,6 @@ CT_COVER_EXPORT_PREFIX ?= $(PROFILE)
export REBAR_GIT_CLONE_OPTIONS += --depth=1
ELIXIR_COMMON_DEPS := ensure-hex ensure-mix-rebar3 ensure-mix-rebar
.PHONY: default
default: $(REBAR) $(PROFILE)
@ -60,12 +65,8 @@ ensure-mix-rebar3: $(REBAR)
ensure-mix-rebar: $(REBAR)
@mix local.rebar --if-missing --force
.PHONY: elixir-common-deps
elixir-common-deps: $(ELIXIR_COMMON_DEPS)
.PHONY: mix-deps-get
mix-deps-get: elixir-common-deps
mix-deps-get: $(ELIXIR_COMMON_DEPS)
@mix deps.get
.PHONY: eunit
@ -195,8 +196,8 @@ $(PROFILES:%=clean-%):
@if [ -d _build/$(@:clean-%=%) ]; then \
rm -f rebar.lock; \
rm -rf _build/$(@:clean-%=%)/rel; \
find _build/$(@:clean-%=%) -name '*.beam' -o -name '*.so' -o -name '*.app' -o -name '*.appup' -o -name '*.o' -o -name '*.d' -type f | xargs rm -f; \
find _build/$(@:clean-%=%) -type l -delete; \
$(FIND) _build/$(@:clean-%=%) -name '*.beam' -o -name '*.so' -o -name '*.app' -o -name '*.appup' -o -name '*.o' -o -name '*.d' -type f | xargs rm -f; \
$(FIND) _build/$(@:clean-%=%) -type l -delete; \
fi
.PHONY: clean-all
@ -244,7 +245,7 @@ $(foreach zt,$(ALL_ZIPS),$(eval $(call download-relup-packages,$(zt))))
## relup target is to create relup instructions
.PHONY: $(REL_PROFILES:%=%-relup)
define gen-relup-target
$1-relup: $(COMMON_DEPS)
$1-relup: $1-relup-downloads $(COMMON_DEPS)
@$(BUILD) $1 relup
endef
ALL_TGZS = $(REL_PROFILES)
@ -253,7 +254,7 @@ $(foreach zt,$(ALL_TGZS),$(eval $(call gen-relup-target,$(zt))))
## tgz target is to create a release package .tar.gz with relup
.PHONY: $(REL_PROFILES:%=%-tgz)
define gen-tgz-target
$1-tgz: $(COMMON_DEPS)
$1-tgz: $1-relup
@$(BUILD) $1 tgz
endef
ALL_TGZS = $(REL_PROFILES)
@ -316,12 +317,12 @@ $(foreach tt,$(ALL_ELIXIR_TGZS),$(eval $(call gen-elixir-tgz-target,$(tt))))
.PHONY: fmt
fmt: $(REBAR)
@find . \( -name '*.app.src' -o \
-name '*.erl' -o \
-name '*.hrl' -o \
-name 'rebar.config' -o \
-name '*.eterm' -o \
-name '*.escript' \) \
@$(FIND) . \( -name '*.app.src' -o \
-name '*.erl' -o \
-name '*.hrl' -o \
-name 'rebar.config' -o \
-name '*.eterm' -o \
-name '*.escript' \) \
-not -path '*/_build/*' \
-not -path '*/deps/*' \
-not -path '*/_checkouts/*' \

View File

@ -65,20 +65,9 @@
%% Route
%%--------------------------------------------------------------------
-record(share_dest, {
session_id :: emqx_session:session_id(),
group :: emqx_types:group()
}).
-record(route, {
topic :: binary(),
dest ::
node()
| {binary(), node()}
| emqx_session:session_id()
%% One session can also have multiple subscriptions to the same topic through different groups
| #share_dest{}
| emqx_external_broker:dest()
dest :: node() | {binary(), node()} | emqx_session:session_id() | emqx_external_broker:dest()
}).
%%--------------------------------------------------------------------
@ -106,10 +95,4 @@
until :: integer()
}).
%%--------------------------------------------------------------------
%% Configurations
%%--------------------------------------------------------------------
-define(KIND_REPLICATE, replicate).
-define(KIND_INITIATE, initiate).
-endif.

View File

@ -683,7 +683,6 @@ end).
-define(FRAME_PARSE_ERROR, frame_parse_error).
-define(FRAME_SERIALIZE_ERROR, frame_serialize_error).
-define(THROW_FRAME_ERROR(Reason), erlang:throw({?FRAME_PARSE_ERROR, Reason})).
-define(THROW_SERIALIZE_ERROR(Reason), erlang:throw({?FRAME_SERIALIZE_ERROR, Reason})).

View File

@ -32,7 +32,7 @@
%% `apps/emqx/src/bpapi/README.md'
%% Opensource edition
-define(EMQX_RELEASE_CE, "5.8.0-alpha.1").
-define(EMQX_RELEASE_CE, "5.7.1-alpha.1").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.8.0-alpha.1").
-define(EMQX_RELEASE_EE, "5.7.1-alpha.1").

View File

@ -41,20 +41,16 @@
).
%% NOTE: do not forget to use atom for msg and add every used msg to
%% the default value of `log.throttling.msgs` list.
%% the default value of `log.thorttling.msgs` list.
-define(SLOG_THROTTLE(Level, Data),
?SLOG_THROTTLE(Level, Data, #{})
).
-define(SLOG_THROTTLE(Level, Data, Meta),
?SLOG_THROTTLE(Level, undefined, Data, Meta)
).
-define(SLOG_THROTTLE(Level, UniqueKey, Data, Meta),
case logger:allow(Level, ?MODULE) of
true ->
(fun(#{msg := __Msg} = __Data) ->
case emqx_log_throttler:allow(__Msg, UniqueKey) of
case emqx_log_throttler:allow(__Msg) of
true ->
logger:log(Level, __Data, Meta);
false ->
@ -91,7 +87,7 @@
?_DO_TRACE(Tag, Msg, Meta),
?SLOG(
Level,
(Meta)#{msg => Msg, tag => Tag},
(emqx_trace_formatter:format_meta_map(Meta))#{msg => Msg, tag => Tag},
#{is_trace => false}
)
end).

View File

@ -163,7 +163,7 @@ mk_clientid(Prefix, ID) ->
restart_node(Node, NodeSpec) ->
?tp(will_restart_node, #{}),
emqx_cth_cluster:restart(NodeSpec),
emqx_cth_cluster:restart(Node, NodeSpec),
wait_nodeup(Node),
?tp(restarted_node, #{}),
ok.

View File

@ -1,74 +0,0 @@
defmodule EMQX.MixProject do
use Mix.Project
alias EMQXUmbrella.MixProject, as: UMP
def project do
[
app: :emqx,
version: "0.1.0",
build_path: "../../_build",
erlc_paths: erlc_paths(),
erlc_options: [
{:i, "src"}
| UMP.erlc_options()
],
compilers: Mix.compilers() ++ [:copy_srcs],
# used by our `Mix.Tasks.Compile.CopySrcs` compiler
extra_dirs: extra_dirs(),
deps_path: "../../deps",
lockfile: "../../mix.lock",
elixir: "~> 1.14",
start_permanent: Mix.env() == :prod,
deps: deps()
]
end
# Run "mix help compile.app" to learn about applications
def application do
[
## FIXME!!! go though emqx.app.src and add missing stuff...
extra_applications: [:public_key, :ssl, :os_mon, :logger, :mnesia, :sasl] ++ UMP.extra_applications(),
mod: {:emqx_app, []}
]
end
def deps() do
## FIXME!!! go though emqx.app.src and add missing stuff...
[
{:emqx_mix_utils, in_umbrella: true, runtime: false},
{:emqx_utils, in_umbrella: true},
{:emqx_ds_backends, in_umbrella: true},
UMP.common_dep(:gproc),
UMP.common_dep(:gen_rpc),
UMP.common_dep(:ekka),
UMP.common_dep(:esockd),
UMP.common_dep(:cowboy),
UMP.common_dep(:lc),
UMP.common_dep(:hocon),
UMP.common_dep(:ranch),
UMP.common_dep(:bcrypt),
UMP.common_dep(:pbkdf2),
UMP.common_dep(:emqx_http_lib),
] ++ UMP.quicer_dep()
end
defp erlc_paths() do
paths = UMP.erlc_paths()
if UMP.test_env?() do
["integration_test" | paths]
else
paths
end
end
defp extra_dirs() do
dirs = ["src", "etc"]
if UMP.test_env?() do
["test", "integration_test" | dirs]
else
dirs
end
end
end

View File

@ -10,14 +10,12 @@
{emqx_bridge,5}.
{emqx_bridge,6}.
{emqx_broker,1}.
{emqx_cluster_link,1}.
{emqx_cm,1}.
{emqx_cm,2}.
{emqx_cm,3}.
{emqx_conf,1}.
{emqx_conf,2}.
{emqx_conf,3}.
{emqx_conf,4}.
{emqx_connector,1}.
{emqx_dashboard,1}.
{emqx_delayed,1}.
@ -27,7 +25,6 @@
{emqx_ds,2}.
{emqx_ds,3}.
{emqx_ds,4}.
{emqx_ds_shared_sub,1}.
{emqx_eviction_agent,1}.
{emqx_eviction_agent,2}.
{emqx_eviction_agent,3}.
@ -50,7 +47,6 @@
{emqx_mgmt_api_plugins,1}.
{emqx_mgmt_api_plugins,2}.
{emqx_mgmt_api_plugins,3}.
{emqx_mgmt_api_relup,1}.
{emqx_mgmt_cluster,1}.
{emqx_mgmt_cluster,2}.
{emqx_mgmt_cluster,3}.
@ -63,6 +59,7 @@
{emqx_node_rebalance_api,1}.
{emqx_node_rebalance_api,2}.
{emqx_node_rebalance_evacuation,1}.
{emqx_node_rebalance_purge,1}.
{emqx_node_rebalance_status,1}.
{emqx_node_rebalance_status,2}.
{emqx_persistent_session_ds,1}.

View File

@ -28,14 +28,15 @@
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.12.0"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.5"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.2"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.4"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.43.2"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.42.2"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.10"}}}
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.10"}}},
{ra, "2.7.3"}
]}.
{plugins, [{rebar3_proper, "0.12.1"}, rebar3_path_deps]}.

View File

@ -239,9 +239,8 @@ log_formatter(HandlerName, Conf) ->
end,
TsFormat = timestamp_format(Conf),
WithMfa = conf_get("with_mfa", Conf),
PayloadEncode = conf_get("payload_encode", Conf, text),
do_formatter(
Format, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa, PayloadEncode
Format, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa
).
%% auto | epoch | rfc3339
@ -249,17 +248,16 @@ timestamp_format(Conf) ->
conf_get("timestamp_format", Conf).
%% helpers
do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa, PayloadEncode) ->
do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa) ->
{emqx_logger_jsonfmt, #{
chars_limit => CharsLimit,
single_line => SingleLine,
time_offset => TimeOffSet,
depth => Depth,
timestamp_format => TsFormat,
with_mfa => WithMfa,
payload_encode => PayloadEncode
with_mfa => WithMfa
}};
do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa, PayloadEncode) ->
do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa) ->
{emqx_logger_textfmt, #{
template => ["[", level, "] ", msg, "\n"],
chars_limit => CharsLimit,
@ -267,8 +265,7 @@ do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa,
time_offset => TimeOffSet,
depth => Depth,
timestamp_format => TsFormat,
with_mfa => WithMfa,
payload_encode => PayloadEncode
with_mfa => WithMfa
}}.
%% Don't record all logger message

View File

@ -2,7 +2,7 @@
{application, emqx, [
{id, "emqx"},
{description, "EMQX Core"},
{vsn, "5.3.4"},
{vsn, "5.3.1"},
{modules, []},
{registered, []},
{applications, [

View File

@ -61,12 +61,9 @@
get_raw_config/2,
update_config/2,
update_config/3,
update_config/4,
remove_config/1,
remove_config/2,
remove_config/3,
reset_config/2,
reset_config/3,
data_dir/0,
etc_file/1,
cert_file/1,
@ -198,7 +195,7 @@ get_raw_config(KeyPath, Default) ->
-spec update_config(emqx_utils_maps:config_key_path(), emqx_config:update_request()) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
update_config(KeyPath, UpdateReq) ->
update_config(KeyPath, UpdateReq, #{}, #{}).
update_config(KeyPath, UpdateReq, #{}).
-spec update_config(
emqx_utils_maps:config_key_path(),
@ -206,56 +203,30 @@ update_config(KeyPath, UpdateReq) ->
emqx_config:update_opts()
) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
update_config(KeyPath, UpdateReq, Opts) ->
update_config(KeyPath, UpdateReq, Opts, #{}).
-spec update_config(
emqx_utils_maps:config_key_path(),
emqx_config:update_request(),
emqx_config:update_opts(),
emqx_config:cluster_rpc_opts()
) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
update_config([RootName | _] = KeyPath, UpdateReq, Opts, ClusterRpcOpts) ->
update_config([RootName | _] = KeyPath, UpdateReq, Opts) ->
emqx_config_handler:update_config(
emqx_config:get_schema_mod(RootName),
KeyPath,
{{update, UpdateReq}, Opts},
ClusterRpcOpts
{{update, UpdateReq}, Opts}
).
-spec remove_config(emqx_utils_maps:config_key_path()) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
remove_config(KeyPath) ->
remove_config(KeyPath, #{}, #{}).
remove_config(KeyPath, #{}).
-spec remove_config(emqx_utils_maps:config_key_path(), emqx_config:update_opts()) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
remove_config([_RootName | _] = KeyPath, Opts) ->
remove_config(KeyPath, Opts, #{}).
-spec remove_config(
emqx_utils_maps:config_key_path(), emqx_config:update_opts(), emqx_config:cluster_rpc_opts()
) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
remove_config([RootName | _] = KeyPath, Opts, ClusterRpcOpts) ->
remove_config([RootName | _] = KeyPath, Opts) ->
emqx_config_handler:update_config(
emqx_config:get_schema_mod(RootName),
KeyPath,
{remove, Opts},
ClusterRpcOpts
{remove, Opts}
).
-spec reset_config(emqx_utils_maps:config_key_path(), emqx_config:update_opts()) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
reset_config([RootName | SubKeys] = KeyPath, Opts) ->
reset_config([RootName | SubKeys] = KeyPath, Opts, #{}).
-spec reset_config(
emqx_utils_maps:config_key_path(), emqx_config:update_opts(), emqx_config:cluster_rpc_opts()
) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
reset_config([RootName | SubKeys] = KeyPath, Opts, ClusterRpcOpts) ->
case emqx_config:get_default_value(KeyPath) of
{ok, Default} ->
Mod = emqx_config:get_schema_mod(RootName),
@ -264,8 +235,7 @@ reset_config([RootName | SubKeys] = KeyPath, Opts, ClusterRpcOpts) ->
emqx_config_handler:update_config(
Mod,
KeyPath,
{{update, Default}, Opts},
ClusterRpcOpts
{{update, Default}, Opts}
);
false ->
NewConf =
@ -277,8 +247,7 @@ reset_config([RootName | SubKeys] = KeyPath, Opts, ClusterRpcOpts) ->
emqx_config_handler:update_config(
Mod,
[RootName],
{{update, NewConf}, Opts},
ClusterRpcOpts
{{update, NewConf}, Opts}
)
end;
{error, _} = Error ->

View File

@ -16,8 +16,6 @@
-module(emqx_banned).
-feature(maybe_expr, enable).
-behaviour(gen_server).
-behaviour(emqx_db_backup).
@ -51,7 +49,6 @@
handle_call/3,
handle_cast/2,
handle_info/2,
handle_continue/2,
terminate/2,
code_change/3
]).
@ -140,7 +137,7 @@ format(#banned{
until => to_rfc3339(Until)
}.
-spec parse(map()) -> {ok, emqx_types:banned()} | {error, term()}.
-spec parse(map()) -> emqx_types:banned() | {error, term()}.
parse(Params) ->
case parse_who(Params) of
{error, Reason} ->
@ -152,13 +149,13 @@ parse(Params) ->
Until = maps:get(<<"until">>, Params, At + ?EXPIRATION_TIME),
case Until > erlang:system_time(second) of
true ->
{ok, #banned{
#banned{
who = Who,
by = By,
reason = Reason,
at = At,
until = Until
}};
};
false ->
ErrorReason =
io_lib:format("Cannot create expired banned, ~p to ~p", [At, Until]),
@ -242,139 +239,12 @@ who(peerhost_net, CIDR) when is_tuple(CIDR) -> {peerhost_net, CIDR};
who(peerhost_net, CIDR) when is_binary(CIDR) ->
{peerhost_net, esockd_cidr:parse(binary_to_list(CIDR), true)}.
%%--------------------------------------------------------------------
%% Import From CSV
%%--------------------------------------------------------------------
init_from_csv(undefined) ->
ok;
init_from_csv(File) ->
maybe
core ?= mria_rlog:role(),
'$end_of_table' ?= mnesia:dirty_first(?BANNED_RULE_TAB),
'$end_of_table' ?= mnesia:dirty_first(?BANNED_INDIVIDUAL_TAB),
{ok, Bin} ?= file:read_file(File),
Stream = emqx_utils_stream:csv(Bin, #{nullable => true, filter_null => true}),
{ok, List} ?= parse_stream(Stream),
import_from_stream(List),
?SLOG(info, #{
msg => "load_banned_bootstrap_file_succeeded",
file => File
})
else
replicant ->
ok;
{Name, _} when
Name == peerhost;
Name == peerhost_net;
Name == clientid_re;
Name == username_re;
Name == clientid;
Name == username
->
ok;
{error, Reason} = Error ->
?SLOG(error, #{
msg => "load_banned_bootstrap_file_failed",
reason => Reason,
file => File
}),
Error
end.
import_from_stream(Stream) ->
Groups = maps:groups_from_list(
fun(#banned{who = Who}) -> table(Who) end, Stream
),
maps:foreach(
fun(Tab, Items) ->
Trans = fun() ->
lists:foreach(
fun(Item) ->
mnesia:write(Tab, Item, write)
end,
Items
)
end,
case trans(Trans) of
{ok, _} ->
?SLOG(info, #{
msg => "import_banned_from_stream_succeeded",
items => Items
});
{error, Reason} ->
?SLOG(error, #{
msg => "import_banned_from_stream_failed",
reason => Reason,
items => Items
})
end
end,
Groups
).
parse_stream(Stream) ->
try
List = emqx_utils_stream:consume(Stream),
parse_stream(List, [], [])
catch
error:Reason ->
{error, Reason}
end.
parse_stream([Item | List], Ok, Error) ->
maybe
{ok, Item1} ?= normalize_parse_item(Item),
{ok, Banned} ?= parse(Item1),
parse_stream(List, [Banned | Ok], Error)
else
{error, _} ->
parse_stream(List, Ok, [Item | Error])
end;
parse_stream([], Ok, []) ->
{ok, Ok};
parse_stream([], Ok, Error) ->
?SLOG(warning, #{
msg => "invalid_banned_items",
items => Error
}),
{ok, Ok}.
normalize_parse_item(#{<<"as">> := As} = Item) ->
ParseTime = fun(Name, Input) ->
maybe
#{Name := Time} ?= Input,
{ok, Epoch} ?= emqx_utils_calendar:to_epoch_second(emqx_utils_conv:str(Time)),
{ok, Input#{Name := Epoch}}
else
{error, _} = Error ->
Error;
NoTime when is_map(NoTime) ->
{ok, NoTime}
end
end,
maybe
{ok, Type} ?= emqx_utils:safe_to_existing_atom(As),
{ok, Item1} ?= ParseTime(<<"at">>, Item#{<<"as">> := Type}),
ParseTime(<<"until">>, Item1)
end;
normalize_parse_item(_Item) ->
{error, invalid_item}.
%%--------------------------------------------------------------------
%% gen_server callbacks
%%--------------------------------------------------------------------
init([]) ->
{ok, ensure_expiry_timer(#{expiry_timer => undefined}), {continue, init_from_csv}}.
handle_continue(init_from_csv, State) ->
File = emqx_schema:naive_env_interpolation(
emqx:get_config([banned, bootstrap_file], undefined)
),
_ = init_from_csv(File),
{noreply, State}.
{ok, ensure_expiry_timer(#{expiry_timer => undefined})}.
handle_call(Req, _From, State) ->
?SLOG(error, #{msg => "unexpected_call", call => Req}),
@ -385,7 +255,7 @@ handle_cast(Msg, State) ->
{noreply, State}.
handle_info({timeout, TRef, expire}, State = #{expiry_timer := TRef}) ->
_ = trans(fun ?MODULE:expire_banned_items/1, [
_ = mria:transaction(?COMMON_SHARD, fun ?MODULE:expire_banned_items/1, [
erlang:system_time(second)
]),
{noreply, ensure_expiry_timer(State), hibernate};
@ -526,15 +396,3 @@ on_banned(_) ->
all_rules() ->
ets:tab2list(?BANNED_RULE_TAB).
trans(Fun) ->
case mria:transaction(?COMMON_SHARD, Fun) of
{atomic, Res} -> {ok, Res};
{aborted, Reason} -> {error, Reason}
end.
trans(Fun, Args) ->
case mria:transaction(?COMMON_SHARD, Fun, Args) of
{atomic, Res} -> {ok, Res};
{aborted, Reason} -> {error, Reason}
end.

View File

@ -146,9 +146,7 @@
-type replies() :: emqx_types:packet() | reply() | [reply()].
-define(IS_MQTT_V5, #channel{conninfo = #{proto_ver := ?MQTT_PROTO_V5}}).
-define(IS_CONNECTED_OR_REAUTHENTICATING(ConnState),
((ConnState == connected) orelse (ConnState == reauthenticating))
).
-define(IS_COMMON_SESSION_TIMER(N),
((N == retry_delivery) orelse (N == expire_awaiting_rel))
).
@ -237,7 +235,7 @@ caps(#channel{clientinfo = #{zone := Zone}}) ->
-spec init(emqx_types:conninfo(), opts()) -> channel().
init(
ConnInfo = #{
peername := {PeerHost, PeerPort} = PeerName,
peername := {PeerHost, PeerPort},
sockname := {_Host, SockPort}
},
#{
@ -261,9 +259,6 @@ init(
listener => ListenerId,
protocol => Protocol,
peerhost => PeerHost,
%% We copy peername to clientinfo because some event contexts only have access
%% to client info (e.g.: authn/authz).
peername => PeerName,
peerport => PeerPort,
sockport => SockPort,
clientid => undefined,
@ -275,7 +270,7 @@ init(
},
Zone
),
{NClientInfo, NConnInfo} = take_conn_info_fields([ws_cookie, peersni], ClientInfo, ConnInfo),
{NClientInfo, NConnInfo} = take_ws_cookie(ClientInfo, ConnInfo),
#channel{
conninfo = NConnInfo,
clientinfo = NClientInfo,
@ -315,19 +310,13 @@ set_peercert_infos(Peercert, ClientInfo, Zone) ->
ClientId = PeercetAs(peer_cert_as_clientid),
ClientInfo#{username => Username, clientid => ClientId, dn => DN, cn => CN}.
take_conn_info_fields(Fields, ClientInfo, ConnInfo) ->
lists:foldl(
fun(Field, {ClientInfo0, ConnInfo0}) ->
case maps:take(Field, ConnInfo0) of
{Value, NConnInfo} ->
{ClientInfo0#{Field => Value}, NConnInfo};
_ ->
{ClientInfo0, ConnInfo0}
end
end,
{ClientInfo, ConnInfo},
Fields
).
take_ws_cookie(ClientInfo, ConnInfo) ->
case maps:take(ws_cookie, ConnInfo) of
{WsCookie, NConnInfo} ->
{ClientInfo#{ws_cookie => WsCookie}, NConnInfo};
_ ->
{ClientInfo, ConnInfo}
end.
%%--------------------------------------------------------------------
%% Handle incoming packet
@ -339,7 +328,7 @@ take_conn_info_fields(Fields, ClientInfo, ConnInfo) ->
| {shutdown, Reason :: term(), channel()}
| {shutdown, Reason :: term(), replies(), channel()}.
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = ConnState}) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
ConnState =:= connected orelse ConnState =:= reauthenticating
->
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel);
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = connecting}) ->
@ -556,10 +545,8 @@ handle_in(
{error, ReasonCode} ->
handle_out(disconnect, ReasonCode, Channel)
end;
handle_in(?PACKET(?PINGREQ), Channel = #channel{keepalive = Keepalive}) ->
{ok, NKeepalive} = emqx_keepalive:check(Keepalive),
NChannel = Channel#channel{keepalive = NKeepalive},
{ok, ?PACKET(?PINGRESP), reset_timer(keepalive, NChannel)};
handle_in(?PACKET(?PINGREQ), Channel) ->
{ok, ?PACKET(?PINGRESP), Channel};
handle_in(
?DISCONNECT_PACKET(ReasonCode, Properties),
Channel = #channel{conninfo = ConnInfo}
@ -569,8 +556,29 @@ handle_in(
process_disconnect(ReasonCode, Properties, NChannel);
handle_in(?AUTH_PACKET(), Channel) ->
handle_out(disconnect, ?RC_IMPLEMENTATION_SPECIFIC_ERROR, Channel);
handle_in({frame_error, Reason}, Channel) ->
handle_frame_error(Reason, Channel);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = idle}) ->
shutdown(shutdown_count(frame_error, Reason), Channel);
handle_in(
{frame_error, #{cause := frame_too_large} = R}, Channel = #channel{conn_state = connecting}
) ->
shutdown(
shutdown_count(frame_error, R), ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), Channel
);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = connecting}) ->
shutdown(shutdown_count(frame_error, Reason), ?CONNACK_PACKET(?RC_MALFORMED_PACKET), Channel);
handle_in(
{frame_error, #{cause := frame_too_large}}, Channel = #channel{conn_state = ConnState}
) when
ConnState =:= connected orelse ConnState =:= reauthenticating
->
handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = ConnState}) when
ConnState =:= connected orelse ConnState =:= reauthenticating
->
handle_out(disconnect, {?RC_MALFORMED_PACKET, Reason}, Channel);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = disconnected}) ->
?SLOG(error, #{msg => "malformed_mqtt_message", reason => Reason}),
{ok, Channel};
handle_in(Packet, Channel) ->
?SLOG(error, #{msg => "disconnecting_due_to_unexpected_message", packet => Packet}),
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel).
@ -1002,68 +1010,6 @@ not_nacked({deliver, _Topic, Msg}) ->
true
end.
%%--------------------------------------------------------------------
%% Handle Frame Error
%%--------------------------------------------------------------------
handle_frame_error(
Reason = #{cause := frame_too_large},
Channel = #channel{conn_state = ConnState, conninfo = ConnInfo}
) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
ShutdownCount = shutdown_count(frame_error, Reason),
case proto_ver(Reason, ConnInfo) of
?MQTT_PROTO_V5 ->
handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel);
_ ->
shutdown(ShutdownCount, Channel)
end;
%% Only send CONNACK with reason code `frame_too_large` for MQTT-v5.0 when connecting,
%% otherwise DONOT send any CONNACK or DISCONNECT packet.
handle_frame_error(
Reason,
Channel = #channel{conn_state = ConnState, conninfo = ConnInfo}
) when
is_map(Reason) andalso
(ConnState == idle orelse ConnState == connecting)
->
ShutdownCount = shutdown_count(frame_error, Reason),
ProtoVer = proto_ver(Reason, ConnInfo),
NChannel = Channel#channel{conninfo = ConnInfo#{proto_ver => ProtoVer}},
case ProtoVer of
?MQTT_PROTO_V5 ->
shutdown(ShutdownCount, ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), NChannel);
_ ->
shutdown(ShutdownCount, NChannel)
end;
handle_frame_error(
Reason,
Channel = #channel{conn_state = connecting}
) ->
shutdown(
shutdown_count(frame_error, Reason),
?CONNACK_PACKET(?RC_MALFORMED_PACKET),
Channel
);
handle_frame_error(
Reason,
Channel = #channel{conn_state = ConnState}
) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
handle_out(
disconnect,
{?RC_MALFORMED_PACKET, Reason},
Channel
);
handle_frame_error(
Reason,
Channel = #channel{conn_state = disconnected}
) ->
?SLOG(error, #{msg => "malformed_mqtt_message", reason => Reason}),
{ok, Channel}.
%%--------------------------------------------------------------------
%% Handle outgoing packet
%%--------------------------------------------------------------------
@ -1284,12 +1230,11 @@ handle_call(
{keepalive, Interval},
Channel = #channel{
keepalive = KeepAlive,
conninfo = ConnInfo,
clientinfo = #{zone := Zone}
conninfo = ConnInfo
}
) ->
ClientId = info(clientid, Channel),
NKeepalive = emqx_keepalive:update(Zone, Interval, KeepAlive),
NKeepalive = emqx_keepalive:update(timer:seconds(Interval), KeepAlive),
NConnInfo = maps:put(keepalive, Interval, ConnInfo),
NChannel = Channel#channel{keepalive = NKeepalive, conninfo = NConnInfo},
SockInfo = maps:get(sockinfo, emqx_cm:get_chan_info(ClientId), #{}),
@ -1332,7 +1277,7 @@ handle_info(
session = Session
}
) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
ConnState =:= connected orelse ConnState =:= reauthenticating
->
{Intent, Session1} = session_disconnect(ClientInfo, ConnInfo, Session),
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(sock_closed, Channel)),
@ -1392,22 +1337,22 @@ die_if_test_compiled() ->
| {shutdown, Reason :: term(), channel()}.
handle_timeout(
_TRef,
keepalive,
{keepalive, _StatVal},
Channel = #channel{keepalive = undefined}
) ->
{ok, Channel};
handle_timeout(
_TRef,
keepalive,
{keepalive, _StatVal},
Channel = #channel{conn_state = disconnected}
) ->
{ok, Channel};
handle_timeout(
_TRef,
keepalive,
{keepalive, StatVal},
Channel = #channel{keepalive = Keepalive}
) ->
case emqx_keepalive:check(Keepalive) of
case emqx_keepalive:check(StatVal, Keepalive) of
{ok, NKeepalive} ->
NChannel = Channel#channel{keepalive = NKeepalive},
{ok, reset_timer(keepalive, NChannel)};
@ -1518,16 +1463,10 @@ reset_timer(Name, Time, Channel) ->
ensure_timer(Name, Time, clean_timer(Name, Channel)).
clean_timer(Name, Channel = #channel{timers = Timers}) ->
case maps:take(Name, Timers) of
error ->
Channel;
{TRef, NTimers} ->
ok = emqx_utils:cancel_timer(TRef),
Channel#channel{timers = NTimers}
end.
Channel#channel{timers = maps:remove(Name, Timers)}.
interval(keepalive, #channel{keepalive = KeepAlive}) ->
emqx_keepalive:info(check_interval, KeepAlive);
emqx_keepalive:info(interval, KeepAlive);
interval(retry_delivery, #channel{session = Session}) ->
emqx_session:info(retry_interval, Session);
interval(expire_awaiting_rel, #channel{session = Session}) ->
@ -2385,7 +2324,9 @@ ensure_keepalive_timer(0, Channel) ->
ensure_keepalive_timer(disabled, Channel) ->
Channel;
ensure_keepalive_timer(Interval, Channel = #channel{clientinfo = #{zone := Zone}}) ->
Keepalive = emqx_keepalive:init(Zone, Interval),
Multiplier = get_mqtt_conf(Zone, keepalive_multiplier),
RecvCnt = emqx_pd:get_counter(recv_pkt),
Keepalive = emqx_keepalive:init(RecvCnt, round(timer:seconds(Interval) * Multiplier)),
ensure_timer(keepalive, Channel#channel{keepalive = Keepalive}).
clear_keepalive(Channel = #channel{timers = Timers}) ->
@ -2679,7 +2620,8 @@ save_alias(outbound, AliasId, Topic, TopicAliases = #{outbound := Aliases}) ->
NAliases = maps:put(Topic, AliasId, Aliases),
TopicAliases#{outbound => NAliases}.
-compile({inline, [reply/2, shutdown/2, shutdown/3]}).
-compile({inline, [reply/2, shutdown/2, shutdown/3, sp/1, flag/1]}).
reply(Reply, Channel) ->
{reply, Reply, Channel}.
@ -2715,13 +2657,13 @@ disconnect_and_shutdown(
?IS_MQTT_V5 =
#channel{conn_state = ConnState}
) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
ConnState =:= connected orelse ConnState =:= reauthenticating
->
NChannel = ensure_disconnected(Reason, Channel),
shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), NChannel);
%% mqtt v3/v4 connected sessions
disconnect_and_shutdown(Reason, Reply, Channel = #channel{conn_state = ConnState}) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
ConnState =:= connected orelse ConnState =:= reauthenticating
->
NChannel = ensure_disconnected(Reason, Channel),
shutdown(Reason, Reply, NChannel);
@ -2764,13 +2706,6 @@ is_durable_session(#channel{session = Session}) ->
false
end.
proto_ver(#{proto_ver := ProtoVer}, _ConnInfo) ->
ProtoVer;
proto_ver(_Reason, #{proto_ver := ProtoVer}) ->
ProtoVer;
proto_ver(_, _) ->
?MQTT_PROTO_V4.
%%--------------------------------------------------------------------
%% For CT tests
%%--------------------------------------------------------------------

View File

@ -118,7 +118,6 @@
config/0,
app_envs/0,
update_opts/0,
cluster_rpc_opts/0,
update_cmd/0,
update_args/0,
update_error/0,
@ -148,7 +147,6 @@
raw_config => emqx_config:raw_config(),
post_config_update => #{module() => any()}
}.
-type cluster_rpc_opts() :: #{kind => ?KIND_INITIATE | ?KIND_REPLICATE}.
%% raw_config() is the config that is NOT parsed and translated by hocon schema
-type raw_config() :: #{binary() => term()} | list() | undefined.
@ -499,14 +497,15 @@ fill_defaults(RawConf, Opts) ->
).
-spec fill_defaults(module(), raw_config(), hocon_tconf:opts()) -> map().
fill_defaults(SchemaMod, RawConf = #{<<"durable_storage">> := Ds}, Opts) ->
fill_defaults(_SchemaMod, RawConf = #{<<"durable_storage">> := _}, _) ->
%% FIXME: kludge to prevent `emqx_config' module from filling in
%% the default values for backends and layouts. These records are
%% inside unions, and adding default values there will add
%% incompatible fields.
RawConf1 = maps:remove(<<"durable_storage">>, RawConf),
Conf = fill_defaults(SchemaMod, RawConf1, Opts),
Conf#{<<"durable_storage">> => Ds};
%%
%% Note: this function is called for each individual conf root, so
%% this clause only affects this particular subtree.
RawConf;
fill_defaults(SchemaMod, RawConf, Opts0) ->
Opts = maps:merge(#{required => false, make_serializable => true}, Opts0),
hocon_tconf:check_plain(

View File

@ -18,7 +18,6 @@
-module(emqx_config_handler).
-include("logger.hrl").
-include("emqx.hrl").
-include("emqx_schema.hrl").
-include_lib("hocon/include/hocon_types.hrl").
@ -31,7 +30,6 @@
add_handler/2,
remove_handler/1,
update_config/3,
update_config/4,
get_raw_cluster_override_conf/0,
info/0
]).
@ -55,13 +53,9 @@
-optional_callbacks([
pre_config_update/3,
pre_config_update/4,
propagated_pre_config_update/3,
propagated_pre_config_update/4,
post_config_update/5,
post_config_update/6,
propagated_post_config_update/5,
propagated_post_config_update/6
propagated_post_config_update/5
]).
-callback pre_config_update([atom()], emqx_config:update_request(), emqx_config:raw_config()) ->
@ -89,38 +83,6 @@
) ->
ok | {ok, Result :: any()} | {error, Reason :: term()}.
-callback pre_config_update(
[atom()], emqx_config:update_request(), emqx_config:raw_config(), emqx_config:cluster_rpc_opts()
) ->
ok | {ok, emqx_config:update_request()} | {error, term()}.
-callback propagated_pre_config_update(
[binary()],
emqx_config:update_request(),
emqx_config:raw_config(),
emqx_config:cluster_rpc_opts()
) ->
ok | {ok, emqx_config:update_request()} | {error, term()}.
-callback post_config_update(
[atom()],
emqx_config:update_request(),
emqx_config:config(),
emqx_config:config(),
emqx_config:app_envs(),
emqx_config:cluster_rpc_opts()
) ->
ok | {ok, Result :: any()} | {error, Reason :: term()}.
-callback propagated_post_config_update(
[atom()],
emqx_config:update_request(),
emqx_config:config(),
emqx_config:config(),
emqx_config:app_envs(),
emqx_config:cluster_rpc_opts()
) ->
ok | {ok, Result :: any()} | {error, Reason :: term()}.
-type state() :: #{handlers := any()}.
-type config_key_path() :: emqx_utils_maps:config_key_path().
@ -130,17 +92,12 @@ start_link() ->
stop() ->
gen_server:stop(?MODULE).
update_config(SchemaModule, ConfKeyPath, UpdateArgs) ->
update_config(SchemaModule, ConfKeyPath, UpdateArgs, #{}).
-spec update_config(module(), config_key_path(), emqx_config:update_args(), map()) ->
-spec update_config(module(), config_key_path(), emqx_config:update_args()) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
update_config(SchemaModule, ConfKeyPath, UpdateArgs, ClusterOpts) ->
update_config(SchemaModule, ConfKeyPath, UpdateArgs) ->
%% force convert the path to a list of atoms, as there maybe some wildcard names/ids in the path
AtomKeyPath = [atom(Key) || Key <- ConfKeyPath],
gen_server:call(
?MODULE, {change_config, SchemaModule, AtomKeyPath, UpdateArgs, ClusterOpts}, infinity
).
gen_server:call(?MODULE, {change_config, SchemaModule, AtomKeyPath, UpdateArgs}, infinity).
-spec add_handler(config_key_path(), handler_name()) ->
ok | {error, {conflict, list()}}.
@ -173,11 +130,11 @@ handle_call({add_handler, ConfKeyPath, HandlerName}, _From, State = #{handlers :
{error, _Reason} = Error -> {reply, Error, State}
end;
handle_call(
{change_config, SchemaModule, ConfKeyPath, UpdateArgs, ClusterRpcOpts},
{change_config, SchemaModule, ConfKeyPath, UpdateArgs},
_From,
#{handlers := Handlers} = State
) ->
Result = handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs, ClusterRpcOpts),
Result = handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs),
{reply, Result, State};
handle_call(get_raw_cluster_override_conf, _From, State) ->
Reply = emqx_config:read_override_conf(#{override_to => cluster}),
@ -246,9 +203,9 @@ filter_top_level_handlers(Handlers) ->
Handlers
).
handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs, ClusterRpcOpts) ->
handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs) ->
try
do_handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs, ClusterRpcOpts)
do_handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs)
catch
throw:Reason ->
{error, Reason};
@ -260,14 +217,13 @@ handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs, ClusterRp
update_req => UpdateArgs,
module => SchemaModule,
key_path => ConfKeyPath,
cluster_rpc_opts => ClusterRpcOpts,
stacktrace => ST
}),
{error, {config_update_crashed, Reason}}
end.
do_handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs, ClusterOpts) ->
case process_update_request(ConfKeyPath, Handlers, UpdateArgs, ClusterOpts) of
do_handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs) ->
case process_update_request(ConfKeyPath, Handlers, UpdateArgs) of
{ok, NewRawConf, OverrideConf, Opts} ->
check_and_save_configs(
SchemaModule,
@ -276,24 +232,23 @@ do_handle_update_request(SchemaModule, ConfKeyPath, Handlers, UpdateArgs, Cluste
NewRawConf,
OverrideConf,
UpdateArgs,
Opts,
ClusterOpts
Opts
);
{error, Result} ->
{error, Result}
end.
process_update_request([_], _Handlers, {remove, _Opts}, _ClusterRpcOpts) ->
process_update_request([_], _Handlers, {remove, _Opts}) ->
{error, "remove_root_is_forbidden"};
process_update_request(ConfKeyPath, _Handlers, {remove, Opts}, _ClusterRpcOpts) ->
process_update_request(ConfKeyPath, _Handlers, {remove, Opts}) ->
OldRawConf = emqx_config:get_root_raw(ConfKeyPath),
BinKeyPath = bin_path(ConfKeyPath),
NewRawConf = emqx_utils_maps:deep_remove(BinKeyPath, OldRawConf),
OverrideConf = remove_from_override_config(BinKeyPath, Opts),
{ok, NewRawConf, OverrideConf, Opts};
process_update_request(ConfKeyPath, Handlers, {{update, UpdateReq}, Opts}, ClusterRpcOpts) ->
process_update_request(ConfKeyPath, Handlers, {{update, UpdateReq}, Opts}) ->
OldRawConf = emqx_config:get_root_raw(ConfKeyPath),
case do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq, ClusterRpcOpts) of
case do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq) of
{ok, NewRawConf} ->
OverrideConf = merge_to_override_config(NewRawConf, Opts),
{ok, NewRawConf, OverrideConf, Opts};
@ -301,16 +256,15 @@ process_update_request(ConfKeyPath, Handlers, {{update, UpdateReq}, Opts}, Clust
Error
end.
do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq, ClusterRpcOpts) ->
do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq, ClusterRpcOpts, []).
do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq) ->
do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq, []).
do_update_config([], Handlers, OldRawConf, UpdateReq, ClusterRpcOpts, ConfKeyPath) ->
do_update_config([], Handlers, OldRawConf, UpdateReq, ConfKeyPath) ->
call_pre_config_update(#{
handlers => Handlers,
old_raw_conf => OldRawConf,
update_req => UpdateReq,
conf_key_path => ConfKeyPath,
cluster_rpc_opts => ClusterRpcOpts,
callback => pre_config_update,
is_propagated => false
});
@ -319,18 +273,13 @@ do_update_config(
Handlers,
OldRawConf,
UpdateReq,
ClusterRpcOpts,
ConfKeyPath0
) ->
ConfKeyPath = ConfKeyPath0 ++ [ConfKey],
ConfKeyBin = bin(ConfKey),
SubOldRawConf = get_sub_config(ConfKeyBin, OldRawConf),
SubHandlers = get_sub_handlers(ConfKey, Handlers),
case
do_update_config(
SubConfKeyPath, SubHandlers, SubOldRawConf, UpdateReq, ClusterRpcOpts, ConfKeyPath
)
of
case do_update_config(SubConfKeyPath, SubHandlers, SubOldRawConf, UpdateReq, ConfKeyPath) of
{ok, NewUpdateReq} ->
merge_to_old_config(#{ConfKeyBin => NewUpdateReq}, OldRawConf);
Error ->
@ -344,18 +293,12 @@ check_and_save_configs(
NewRawConf,
OverrideConf,
UpdateArgs,
Opts,
ClusterOpts
Opts
) ->
Schema = schema(SchemaModule, ConfKeyPath),
Kind = maps:get(kind, ClusterOpts, ?KIND_INITIATE),
{AppEnvs, NewConf} = emqx_config:check_config(Schema, NewRawConf),
OldConf = emqx_config:get_root(ConfKeyPath),
case
do_post_config_update(
ConfKeyPath, Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, ClusterOpts, #{}
)
of
case do_post_config_update(ConfKeyPath, Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, #{}) of
{ok, Result0} ->
post_update_ok(
AppEnvs,
@ -367,24 +310,21 @@ check_and_save_configs(
UpdateArgs,
Result0
);
{error, {post_config_update, HandlerName, Reason}} when Kind =/= ?KIND_INITIATE ->
?SLOG(critical, #{
msg => "post_config_update_failed_but_save_the_config_anyway",
handler => HandlerName,
reason => Reason
}),
post_update_ok(
AppEnvs,
NewConf,
NewRawConf,
OverrideConf,
Opts,
ConfKeyPath,
UpdateArgs,
#{}
);
{error, _} = Error ->
Error
{error, {post_config_update, HandlerName, Reason}} ->
HandlePostFailureFun =
fun() ->
post_update_ok(
AppEnvs,
NewConf,
NewRawConf,
OverrideConf,
Opts,
ConfKeyPath,
UpdateArgs,
#{}
)
end,
{error, {post_config_update, HandlerName, {Reason, HandlePostFailureFun}}}
end.
post_update_ok(AppEnvs, NewConf, NewRawConf, OverrideConf, Opts, ConfKeyPath, UpdateArgs, Result0) ->
@ -392,9 +332,7 @@ post_update_ok(AppEnvs, NewConf, NewRawConf, OverrideConf, Opts, ConfKeyPath, Up
Result1 = return_change_result(ConfKeyPath, UpdateArgs),
{ok, Result1#{post_config_update => Result0}}.
do_post_config_update(
ConfKeyPath, Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, ClusterOpts, Result
) ->
do_post_config_update(ConfKeyPath, Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, Result) ->
do_post_config_update(
ConfKeyPath,
Handlers,
@ -402,7 +340,6 @@ do_post_config_update(
NewConf,
AppEnvs,
UpdateArgs,
ClusterOpts,
Result,
[]
).
@ -415,7 +352,6 @@ do_post_config_update(
AppEnvs,
UpdateArgs,
Result,
ClusterOpts,
ConfKeyPath
) ->
call_post_config_update(#{
@ -426,7 +362,6 @@ do_post_config_update(
update_req => up_req(UpdateArgs),
result => Result,
conf_key_path => ConfKeyPath,
cluster_rpc_opts => ClusterOpts,
callback => post_config_update
});
do_post_config_update(
@ -436,7 +371,6 @@ do_post_config_update(
NewConf,
AppEnvs,
UpdateArgs,
ClusterOpts,
Result,
ConfKeyPath0
) ->
@ -451,7 +385,6 @@ do_post_config_update(
SubNewConf,
AppEnvs,
UpdateArgs,
ClusterOpts,
Result,
ConfKeyPath
).
@ -495,41 +428,37 @@ call_proper_pre_config_update(
#{
handlers := #{?MOD := Module},
callback := Callback,
update_req := UpdateReq
update_req := UpdateReq,
old_raw_conf := OldRawConf
} = Ctx
) ->
Arity = get_function_arity(Module, Callback, [3, 4]),
case apply_pre_config_update(Module, Callback, Arity, Ctx) of
{ok, NewUpdateReq} ->
{ok, NewUpdateReq};
ok ->
{ok, UpdateReq};
{error, Reason} ->
{error, {pre_config_update, Module, Reason}}
case erlang:function_exported(Module, Callback, 3) of
true ->
case apply_pre_config_update(Module, Ctx) of
{ok, NewUpdateReq} ->
{ok, NewUpdateReq};
ok ->
{ok, UpdateReq};
{error, Reason} ->
{error, {pre_config_update, Module, Reason}}
end;
false ->
merge_to_old_config(UpdateReq, OldRawConf)
end;
call_proper_pre_config_update(
#{update_req := UpdateReq}
) ->
{ok, UpdateReq}.
apply_pre_config_update(Module, Callback, 3, #{
conf_key_path := ConfKeyPath,
update_req := UpdateReq,
old_raw_conf := OldRawConf
}) ->
Module:Callback(ConfKeyPath, UpdateReq, OldRawConf);
apply_pre_config_update(Module, Callback, 4, #{
apply_pre_config_update(Module, #{
conf_key_path := ConfKeyPath,
update_req := UpdateReq,
old_raw_conf := OldRawConf,
cluster_rpc_opts := ClusterRpcOpts
callback := Callback
}) ->
Module:Callback(ConfKeyPath, UpdateReq, OldRawConf, ClusterRpcOpts);
apply_pre_config_update(_Module, _Callback, false, #{
update_req := UpdateReq,
old_raw_conf := OldRawConf
}) ->
merge_to_old_config(UpdateReq, OldRawConf).
Module:Callback(
ConfKeyPath, UpdateReq, OldRawConf
).
propagate_pre_config_updates_to_subconf(
#{handlers := #{?WKEY := _}} = Ctx
@ -631,23 +560,28 @@ call_proper_post_config_update(
result := Result
} = Ctx
) ->
Arity = get_function_arity(Module, Callback, [5, 6]),
case apply_post_config_update(Module, Callback, Arity, Ctx) of
ok -> {ok, Result};
{ok, Result1} -> {ok, Result#{Module => Result1}};
{error, Reason} -> {error, {post_config_update, Module, Reason}}
case erlang:function_exported(Module, Callback, 5) of
true ->
case apply_post_config_update(Module, Ctx) of
ok -> {ok, Result};
{ok, Result1} -> {ok, Result#{Module => Result1}};
{error, Reason} -> {error, {post_config_update, Module, Reason}}
end;
false ->
{ok, Result}
end;
call_proper_post_config_update(
#{result := Result} = _Ctx
) ->
{ok, Result}.
apply_post_config_update(Module, Callback, 5, #{
apply_post_config_update(Module, #{
conf_key_path := ConfKeyPath,
update_req := UpdateReq,
new_conf := NewConf,
old_conf := OldConf,
app_envs := AppEnvs
app_envs := AppEnvs,
callback := Callback
}) ->
Module:Callback(
ConfKeyPath,
@ -655,25 +589,7 @@ apply_post_config_update(Module, Callback, 5, #{
NewConf,
OldConf,
AppEnvs
);
apply_post_config_update(Module, Callback, 6, #{
conf_key_path := ConfKeyPath,
update_req := UpdateReq,
cluster_rpc_opts := ClusterRpcOpts,
new_conf := NewConf,
old_conf := OldConf,
app_envs := AppEnvs
}) ->
Module:Callback(
ConfKeyPath,
UpdateReq,
NewConf,
OldConf,
AppEnvs,
ClusterRpcOpts
);
apply_post_config_update(_Module, _Callback, false, _Ctx) ->
ok.
).
propagate_post_config_updates_to_subconf(
#{handlers := #{?WKEY := _}} = Ctx
@ -852,9 +768,7 @@ assert_callback_function(Mod) ->
_ = apply(Mod, module_info, []),
case
erlang:function_exported(Mod, pre_config_update, 3) orelse
erlang:function_exported(Mod, post_config_update, 5) orelse
erlang:function_exported(Mod, pre_config_update, 4) orelse
erlang:function_exported(Mod, post_config_update, 6)
erlang:function_exported(Mod, post_config_update, 5)
of
true -> ok;
false -> error(#{msg => "bad_emqx_config_handler_callback", module => Mod})
@ -897,13 +811,3 @@ load_prev_handlers() ->
save_handlers(Handlers) ->
application:set_env(emqx, ?MODULE, Handlers).
get_function_arity(_Module, _Callback, []) ->
false;
get_function_arity(Module, Callback, [Arity | Opts]) ->
%% ensure module is loaded
Module = Module:module_info(module),
case erlang:function_exported(Module, Callback, Arity) of
true -> Arity;
false -> get_function_arity(Module, Callback, Opts)
end.

View File

@ -173,9 +173,7 @@
system_code_change/4
]}
).
-dialyzer({no_missing_calls, [handle_msg/2]}).
-ifndef(BUILD_WITHOUT_QUIC).
-spec start_link
(esockd:transport(), esockd:socket(), emqx_channel:opts()) ->
{ok, pid()};
@ -185,9 +183,6 @@
emqx_quic_connection:cb_state()
) ->
{ok, pid()}.
-else.
-spec start_link(esockd:transport(), esockd:socket(), emqx_channel:opts()) -> {ok, pid()}.
-endif.
start_link(Transport, Socket, Options) ->
Args = [self(), Transport, Socket, Options],
@ -310,13 +305,11 @@ init_state(
{ok, Peername} = Transport:ensure_ok_or_exit(peername, [Socket]),
{ok, Sockname} = Transport:ensure_ok_or_exit(sockname, [Socket]),
Peercert = Transport:ensure_ok_or_exit(peercert, [Socket]),
PeerSNI = Transport:ensure_ok_or_exit(peersni, [Socket]),
ConnInfo = #{
socktype => Transport:type(Socket),
peername => Peername,
sockname => Sockname,
peercert => Peercert,
peersni => PeerSNI,
conn_mod => ?MODULE
},
@ -473,17 +466,19 @@ cancel_stats_timer(State) ->
process_msg([], State) ->
{ok, State};
process_msg([Msg | More], State) ->
try handle_msg(Msg, State) of
ok ->
process_msg(More, State);
{ok, NState} ->
process_msg(More, NState);
{ok, Msgs, NState} ->
process_msg(append_msg(More, Msgs), NState);
{stop, Reason, NState} ->
{stop, Reason, NState};
{stop, Reason} ->
{stop, Reason, State}
try
case handle_msg(Msg, State) of
ok ->
process_msg(More, State);
{ok, NState} ->
process_msg(More, NState);
{ok, Msgs, NState} ->
process_msg(append_msg(More, Msgs), NState);
{stop, Reason, NState} ->
{stop, Reason, NState};
{stop, Reason} ->
{stop, Reason, State}
end
catch
exit:normal ->
{stop, normal, State};
@ -569,10 +564,12 @@ handle_msg({Closed, _Sock}, State) when
handle_msg({Passive, _Sock}, State) when
Passive == tcp_passive; Passive == ssl_passive; Passive =:= quic_passive
->
%% In Stats
Pubs = emqx_pd:reset_counter(incoming_pubs),
Bytes = emqx_pd:reset_counter(incoming_bytes),
InStats = #{cnt => Pubs, oct => Bytes},
%% Run GC and Check OOM
NState1 = check_oom(Pubs, Bytes, run_gc(Pubs, Bytes, State)),
NState1 = check_oom(run_gc(InStats, State)),
handle_info(activate_socket, NState1);
handle_msg(
Deliver = {deliver, _Topic, _Msg},
@ -732,7 +729,9 @@ handle_timeout(
disconnected ->
{ok, State};
_ ->
with_channel(handle_timeout, [TRef, keepalive], State)
%% recv_pkt: valid MQTT message
RecvCnt = emqx_pd:get_counter(recv_pkt),
handle_timeout(TRef, {keepalive, RecvCnt}, State)
end;
handle_timeout(TRef, Msg, State) ->
with_channel(handle_timeout, [TRef, Msg], State).
@ -783,8 +782,7 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
input_bytes => Data,
parsed_packets => Packets
}),
NState = enrich_state(Reason, State),
{[{frame_error, Reason} | Packets], NState};
{[{frame_error, Reason} | Packets], State};
error:Reason:Stacktrace ->
?LOG(error, #{
at_state => emqx_frame:describe_state(ParseState),
@ -901,7 +899,8 @@ sent(#state{listener = {Type, Listener}} = State) ->
true ->
Pubs = emqx_pd:reset_counter(outgoing_pubs),
Bytes = emqx_pd:reset_counter(outgoing_bytes),
{ok, check_oom(Pubs, Bytes, run_gc(Pubs, Bytes, State))};
OutStats = #{cnt => Pubs, oct => Bytes},
{ok, check_oom(run_gc(OutStats, State))};
false ->
{ok, State}
end.
@ -1081,36 +1080,25 @@ retry_limiter(#state{channel = Channel, limiter = Limiter} = State) ->
%%--------------------------------------------------------------------
%% Run GC and Check OOM
run_gc(Pubs, Bytes, State = #state{gc_state = GcSt, zone = Zone}) ->
run_gc(Stats, State = #state{gc_state = GcSt, zone = Zone}) ->
case
?ENABLED(GcSt) andalso not emqx_olp:backoff_gc(Zone) andalso
emqx_gc:run(Pubs, Bytes, GcSt)
emqx_gc:run(Stats, GcSt)
of
false -> State;
{_IsGC, GcSt1} -> State#state{gc_state = GcSt1}
end.
check_oom(Pubs, Bytes, State = #state{channel = Channel}) ->
check_oom(State = #state{channel = Channel}) ->
ShutdownPolicy = emqx_config:get_zone_conf(
emqx_channel:info(zone, Channel), [force_shutdown]
),
?tp(debug, check_oom, #{policy => ShutdownPolicy}),
case emqx_utils:check_oom(ShutdownPolicy) of
{shutdown, Reason} ->
%% triggers terminate/2 callback immediately
?tp(warning, check_oom_shutdown, #{
policy => ShutdownPolicy,
incoming_pubs => Pubs,
incoming_bytes => Bytes,
shutdown => Reason
}),
erlang:exit({shutdown, Reason});
Result ->
?tp(debug, check_oom_ok, #{
policy => ShutdownPolicy,
incoming_pubs => Pubs,
incoming_bytes => Bytes,
result => Result
}),
_ ->
ok
end,
State.
@ -1228,12 +1216,6 @@ inc_counter(Key, Inc) ->
_ = emqx_pd:inc_counter(Key, Inc),
ok.
enrich_state(#{parse_state := NParseState}, State) ->
Serialize = emqx_frame:serialize_opts(NParseState),
State#state{parse_state = NParseState, serialize = Serialize};
enrich_state(_, State) ->
State.
set_tcp_keepalive({quic, _Listener}) ->
ok;
set_tcp_keepalive({Type, Id}) ->

View File

@ -234,42 +234,6 @@ fields(layout_builtin_wildcard_optimized) ->
}
)}
];
fields(layout_builtin_wildcard_optimized_v2) ->
[
{type,
sc(
wildcard_optimized_v2,
#{
'readOnly' => true,
default => wildcard_optimized_v2,
desc => ?DESC(layout_builtin_wildcard_optimized_type)
}
)},
{bytes_per_topic_level,
sc(
range(1, 16),
#{
default => 8,
importance => ?IMPORTANCE_HIDDEN
}
)},
{topic_index_bytes,
sc(
pos_integer(),
#{
default => 8,
importance => ?IMPORTANCE_HIDDEN
}
)},
{serialization_schema,
sc(
emqx_ds_msg_serializer:schema(),
#{
default => v1,
importance => ?IMPORTANCE_HIDDEN
}
)}
];
fields(layout_builtin_reference) ->
[
{type,
@ -278,7 +242,6 @@ fields(layout_builtin_reference) ->
#{
'readOnly' => true,
importance => ?IMPORTANCE_LOW,
default => reference,
desc => ?DESC(layout_builtin_reference_type)
}
)}
@ -321,7 +284,7 @@ common_builtin_fields() ->
importance => ?IMPORTANCE_MEDIUM,
default =>
#{
<<"type">> => wildcard_optimized_v2
<<"type">> => wildcard_optimized
}
}
)}
@ -335,8 +298,6 @@ desc(builtin_write_buffer) ->
?DESC(builtin_write_buffer);
desc(layout_builtin_wildcard_optimized) ->
?DESC(layout_builtin_wildcard_optimized);
desc(layout_builtin_wildcard_optimized_v2) ->
?DESC(layout_builtin_wildcard_optimized);
desc(layout_builtin_reference) ->
?DESC(layout_builtin_reference);
desc(_) ->
@ -346,19 +307,6 @@ desc(_) ->
%% Internal functions
%%================================================================================
translate_layout(
#{
type := wildcard_optimized_v2,
bytes_per_topic_level := BytesPerTopicLevel,
topic_index_bytes := TopicIndexBytes,
serialization_schema := SSchema
}
) ->
{emqx_ds_storage_skipstream_lts, #{
wildcard_hash_bytes => BytesPerTopicLevel,
topic_index_bytes => TopicIndexBytes,
serialization_schema => SSchema
}};
translate_layout(
#{
type := wildcard_optimized,
@ -388,11 +336,7 @@ builtin_layouts() ->
%% suitable for production use. However, it's very simple and
%% produces a very predictabale replay order, which can be useful
%% for testing and debugging:
[
ref(layout_builtin_wildcard_optimized_v2),
ref(layout_builtin_wildcard_optimized),
ref(layout_builtin_reference)
].
[ref(layout_builtin_wildcard_optimized), ref(layout_builtin_reference)].
sc(Type, Meta) -> hoconsc:mk(Type, Meta).

View File

@ -117,13 +117,6 @@ try_subscribe(ClientId, Topic) ->
write
),
allow;
[#exclusive_subscription{clientid = ClientId, topic = Topic}] ->
%% Fixed the issue-13476
%% In this feature, the user must manually call `unsubscribe` to release the lock,
%% but sometimes the node may go down for some reason,
%% then the client will reconnect to this node and resubscribe.
%% We need to allow resubscription, otherwise the lock will never be released.
allow;
[_] ->
deny
end.

View File

@ -43,9 +43,7 @@
add_shared_route/2,
delete_shared_route/2,
add_persistent_route/2,
delete_persistent_route/2,
add_persistent_shared_route/3,
delete_persistent_shared_route/3
delete_persistent_route/2
]).
-export_type([dest/0]).
@ -131,12 +129,6 @@ add_persistent_route(Topic, ID) ->
delete_persistent_route(Topic, ID) ->
?safe_with_provider(?FUNCTION_NAME(Topic, ID), ok).
add_persistent_shared_route(Topic, Group, ID) ->
?safe_with_provider(?FUNCTION_NAME(Topic, Group, ID), ok).
delete_persistent_shared_route(Topic, Group, ID) ->
?safe_with_provider(?FUNCTION_NAME(Topic, Group, ID), ok).
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------

View File

@ -267,76 +267,47 @@ packet(Header, Variable) ->
packet(Header, Variable, Payload) ->
#mqtt_packet{header = Header, variable = Variable, payload = Payload}.
parse_connect(FrameBin, Options = #{strict_mode := StrictMode}) ->
{ProtoName, Rest0} = parse_utf8_string_with_cause(FrameBin, StrictMode, invalid_proto_name),
%% No need to parse and check proto_ver if proto_name is invalid, check it first
%% And the matching check of `proto_name` and `proto_ver` fields will be done in `emqx_packet:check_proto_ver/2`
_ = validate_proto_name(ProtoName),
{IsBridge, ProtoVer, Rest2} = parse_connect_proto_ver(Rest0),
NOptions = Options#{version => ProtoVer},
try
do_parse_connect(ProtoName, IsBridge, ProtoVer, Rest2, StrictMode)
catch
throw:{?FRAME_PARSE_ERROR, ReasonM} when is_map(ReasonM) ->
?PARSE_ERR(
ReasonM#{
proto_ver => ProtoVer,
proto_name => ProtoName,
parse_state => ?NONE(NOptions)
}
);
throw:{?FRAME_PARSE_ERROR, Reason} ->
?PARSE_ERR(
#{
cause => Reason,
proto_ver => ProtoVer,
proto_name => ProtoName,
parse_state => ?NONE(NOptions)
}
)
end.
parse_connect(FrameBin, StrictMode) ->
{ProtoName, Rest} = parse_utf8_string_with_cause(FrameBin, StrictMode, invalid_proto_name),
case ProtoName of
<<"MQTT">> ->
ok;
<<"MQIsdp">> ->
ok;
_ ->
%% from spec: the server MAY send disconnect with reason code 0x84
%% we chose to close socket because the client is likely not talking MQTT anyway
?PARSE_ERR(#{
cause => invalid_proto_name,
expected => <<"'MQTT' or 'MQIsdp'">>,
received => ProtoName
})
end,
parse_connect2(ProtoName, Rest, StrictMode).
do_parse_connect(
% Note: return malformed if reserved flag is not 0.
parse_connect2(
ProtoName,
IsBridge,
ProtoVer,
<<
UsernameFlagB:1,
PasswordFlagB:1,
WillRetainB:1,
WillQoS:2,
WillFlagB:1,
CleanStart:1,
Reserved:1,
KeepAlive:16/big,
Rest/binary
>>,
<<BridgeTag:4, ProtoVer:4, UsernameFlag:1, PasswordFlag:1, WillRetain:1, WillQoS:2, WillFlag:1,
CleanStart:1, Reserved:1, KeepAlive:16/big, Rest2/binary>>,
StrictMode
) ->
_ = validate_connect_reserved(Reserved),
_ = validate_connect_will(
WillFlag = bool(WillFlagB),
WillRetain = bool(WillRetainB),
WillQoS
),
_ = validate_connect_password_flag(
StrictMode,
ProtoVer,
UsernameFlag = bool(UsernameFlagB),
PasswordFlag = bool(PasswordFlagB)
),
{Properties, Rest3} = parse_properties(Rest, ProtoVer, StrictMode),
case Reserved of
0 -> ok;
1 -> ?PARSE_ERR(reserved_connect_flag)
end,
{Properties, Rest3} = parse_properties(Rest2, ProtoVer, StrictMode),
{ClientId, Rest4} = parse_utf8_string_with_cause(Rest3, StrictMode, invalid_clientid),
ConnPacket = #mqtt_packet_connect{
proto_name = ProtoName,
proto_ver = ProtoVer,
%% For bridge mode, non-standard implementation
%% Invented by mosquitto, named 'try_private': https://mosquitto.org/man/mosquitto-conf-5.html
is_bridge = IsBridge,
is_bridge = (BridgeTag =:= 8),
clean_start = bool(CleanStart),
will_flag = WillFlag,
will_flag = bool(WillFlag),
will_qos = WillQoS,
will_retain = WillRetain,
will_retain = bool(WillRetain),
keepalive = KeepAlive,
properties = Properties,
clientid = ClientId
@ -347,14 +318,14 @@ do_parse_connect(
fun(Bin) ->
parse_utf8_string_with_cause(Bin, StrictMode, invalid_username)
end,
UsernameFlag
bool(UsernameFlag)
),
{Password, Rest7} = parse_optional(
Rest6,
fun(Bin) ->
parse_utf8_string_with_cause(Bin, StrictMode, invalid_password)
end,
PasswordFlag
bool(PasswordFlag)
),
case Rest7 of
<<>> ->
@ -365,16 +336,16 @@ do_parse_connect(
unexpected_trailing_bytes => size(Rest7)
})
end;
do_parse_connect(_ProtoName, _IsBridge, _ProtoVer, Bin, _StrictMode) ->
%% sent less than 24 bytes
parse_connect2(_ProtoName, Bin, _StrictMode) ->
%% sent less than 32 bytes
?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}).
parse_packet(
#mqtt_packet_header{type = ?CONNECT},
FrameBin,
Options
#{strict_mode := StrictMode}
) ->
parse_connect(FrameBin, Options);
parse_connect(FrameBin, StrictMode);
parse_packet(
#mqtt_packet_header{type = ?CONNACK},
<<AckFlags:8, ReasonCode:8, Rest/binary>>,
@ -538,12 +509,6 @@ parse_packet_id(<<PacketId:16/big, Rest/binary>>) ->
parse_packet_id(_) ->
?PARSE_ERR(invalid_packet_id).
parse_connect_proto_ver(<<BridgeTag:4, ProtoVer:4, Rest/binary>>) ->
{_IsBridge = (BridgeTag =:= 8), ProtoVer, Rest};
parse_connect_proto_ver(Bin) ->
%% sent less than 1 bytes or empty
?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}).
parse_properties(Bin, Ver, _StrictMode) when Ver =/= ?MQTT_PROTO_V5 ->
{#{}, Bin};
%% TODO: version mess?
@ -767,8 +732,6 @@ serialize_fun(#{version := Ver, max_size := MaxSize, strict_mode := StrictMode})
initial_serialize_opts(Opts) ->
maps:merge(?DEFAULT_OPTIONS, Opts).
serialize_opts(?NONE(Options)) ->
maps:merge(?DEFAULT_OPTIONS, Options);
serialize_opts(#mqtt_packet_connect{proto_ver = ProtoVer, properties = ConnProps}) ->
MaxSize = get_property('Maximum-Packet-Size', ConnProps, ?MAX_PACKET_SIZE),
#{version => ProtoVer, max_size => MaxSize, strict_mode => false}.
@ -1187,49 +1150,6 @@ validate_subqos([3 | _]) -> ?PARSE_ERR(bad_subqos);
validate_subqos([_ | T]) -> validate_subqos(T);
validate_subqos([]) -> ok.
%% from spec: the server MAY send disconnect with reason code 0x84
%% we chose to close socket because the client is likely not talking MQTT anyway
validate_proto_name(<<"MQTT">>) ->
ok;
validate_proto_name(<<"MQIsdp">>) ->
ok;
validate_proto_name(ProtoName) ->
?PARSE_ERR(#{
cause => invalid_proto_name,
expected => <<"'MQTT' or 'MQIsdp'">>,
received => ProtoName
}).
%% MQTT-v3.1.1-[MQTT-3.1.2-3], MQTT-v5.0-[MQTT-3.1.2-3]
-compile({inline, [validate_connect_reserved/1]}).
validate_connect_reserved(0) -> ok;
validate_connect_reserved(1) -> ?PARSE_ERR(reserved_connect_flag).
-compile({inline, [validate_connect_will/3]}).
%% MQTT-v3.1.1-[MQTT-3.1.2-13], MQTT-v5.0-[MQTT-3.1.2-11]
validate_connect_will(false, _, WillQoS) when WillQoS > 0 -> ?PARSE_ERR(invalid_will_qos);
%% MQTT-v3.1.1-[MQTT-3.1.2-14], MQTT-v5.0-[MQTT-3.1.2-12]
validate_connect_will(true, _, WillQoS) when WillQoS > 2 -> ?PARSE_ERR(invalid_will_qos);
%% MQTT-v3.1.1-[MQTT-3.1.2-15], MQTT-v5.0-[MQTT-3.1.2-13]
validate_connect_will(false, WillRetain, _) when WillRetain -> ?PARSE_ERR(invalid_will_retain);
validate_connect_will(_, _, _) -> ok.
-compile({inline, [validate_connect_password_flag/4]}).
%% MQTT-v3.1
%% Username flag and password flag are not strongly related
%% https://public.dhe.ibm.com/software/dw/webservices/ws-mqtt/mqtt-v3r1.html#connect
validate_connect_password_flag(true, ?MQTT_PROTO_V3, _, _) ->
ok;
%% MQTT-v3.1.1-[MQTT-3.1.2-22]
validate_connect_password_flag(true, ?MQTT_PROTO_V4, UsernameFlag, PasswordFlag) ->
%% BUG-FOR-BUG compatible, only check when `strict-mode`
UsernameFlag orelse PasswordFlag andalso ?PARSE_ERR(invalid_password_flag);
validate_connect_password_flag(true, ?MQTT_PROTO_V5, _, _) ->
ok;
validate_connect_password_flag(_, _, _, _) ->
ok.
-compile({inline, [bool/1]}).
bool(0) -> false;
bool(1) -> true.

View File

@ -30,6 +30,7 @@
-export([
init/1,
run/2,
run/3,
info/1,
reset/1
@ -61,7 +62,12 @@ init(#{count := Count, bytes := Bytes}) ->
Oct = [{oct, {Bytes, Bytes}} || ?ENABLED(Bytes)],
?GCS(maps:from_list(Cnt ++ Oct)).
%% @doc Try to run GC based on reductions of count or bytes.
%% @doc Try to run GC based on reduntions of count or bytes.
-spec run(#{cnt := pos_integer(), oct := pos_integer()}, gc_state()) ->
{boolean(), gc_state()}.
run(#{cnt := Cnt, oct := Oct}, GcSt) ->
run(Cnt, Oct, GcSt).
-spec run(pos_integer(), pos_integer(), gc_state()) ->
{boolean(), gc_state()}.
run(Cnt, Oct, ?GCS(St)) ->

View File

@ -19,12 +19,10 @@
-export([
init/1,
init/2,
init/3,
info/1,
info/2,
check/1,
check/2,
update/3
update/2
]).
-elvis([{elvis_style, no_if_expression, disable}]).
@ -32,12 +30,8 @@
-export_type([keepalive/0]).
-record(keepalive, {
check_interval :: pos_integer(),
%% the received packets since last keepalive check
statval :: non_neg_integer(),
%% The number of idle intervals allowed before disconnecting the client.
idle_milliseconds = 0 :: non_neg_integer(),
max_idle_millisecond :: pos_integer()
interval :: pos_integer(),
statval :: non_neg_integer()
}).
-opaque keepalive() :: #keepalive{}.
@ -45,11 +39,7 @@
%% @doc Init keepalive.
-spec init(Interval :: non_neg_integer()) -> keepalive().
init(Interval) -> init(default, 0, Interval).
init(Zone, Interval) ->
RecvCnt = emqx_pd:get_counter(recv_pkt),
init(Zone, RecvCnt, Interval).
init(Interval) -> init(0, Interval).
%% from mqtt-v3.1.1 specific
%% A Keep Alive value of zero (0) has the effect of turning off the keep alive mechanism.
@ -63,88 +53,42 @@ init(Zone, Interval) ->
%% typically this is a few minutes.
%% The maximum value is (65535s) 18 hours 12 minutes and 15 seconds.
%% @doc Init keepalive.
-spec init(
Zone :: atom(),
StatVal :: non_neg_integer(),
Second :: non_neg_integer()
) -> keepalive() | undefined.
init(Zone, StatVal, Second) when Second > 0 andalso Second =< ?MAX_INTERVAL ->
#{keepalive_multiplier := Mul, keepalive_check_interval := CheckInterval} =
emqx_config:get_zone_conf(Zone, [mqtt]),
MilliSeconds = timer:seconds(Second),
Interval = emqx_utils:clamp(CheckInterval, 1000, max(MilliSeconds div 2, 1000)),
MaxIdleMs = ceil(MilliSeconds * Mul),
#keepalive{
check_interval = Interval,
statval = StatVal,
idle_milliseconds = 0,
max_idle_millisecond = MaxIdleMs
};
init(_Zone, _, 0) ->
-spec init(StatVal :: non_neg_integer(), Interval :: non_neg_integer()) -> keepalive() | undefined.
init(StatVal, Interval) when Interval > 0 andalso Interval =< ?MAX_INTERVAL ->
#keepalive{interval = Interval, statval = StatVal};
init(_, 0) ->
undefined;
init(Zone, StatVal, Interval) when Interval > ?MAX_INTERVAL -> init(Zone, StatVal, ?MAX_INTERVAL).
init(StatVal, Interval) when Interval > ?MAX_INTERVAL -> init(StatVal, ?MAX_INTERVAL).
%% @doc Get Info of the keepalive.
-spec info(keepalive()) -> emqx_types:infos().
info(#keepalive{
check_interval = Interval,
statval = StatVal,
idle_milliseconds = IdleIntervals,
max_idle_millisecond = MaxMs
interval = Interval,
statval = StatVal
}) ->
#{
check_interval => Interval,
statval => StatVal,
idle_milliseconds => IdleIntervals,
max_idle_millisecond => MaxMs
interval => Interval,
statval => StatVal
}.
-spec info(check_interval | statval | idle_milliseconds, keepalive()) ->
-spec info(interval | statval, keepalive()) ->
non_neg_integer().
info(check_interval, #keepalive{check_interval = Interval}) ->
info(interval, #keepalive{interval = Interval}) ->
Interval;
info(statval, #keepalive{statval = StatVal}) ->
StatVal;
info(idle_milliseconds, #keepalive{idle_milliseconds = Val}) ->
Val;
info(check_interval, undefined) ->
info(interval, undefined) ->
0.
check(Keepalive = #keepalive{}) ->
RecvCnt = emqx_pd:get_counter(recv_pkt),
check(RecvCnt, Keepalive);
check(Keepalive) ->
{ok, Keepalive}.
%% @doc Check keepalive.
-spec check(non_neg_integer(), keepalive()) ->
{ok, keepalive()} | {error, timeout}.
check(
NewVal,
#keepalive{
statval = NewVal,
idle_milliseconds = IdleAcc,
check_interval = Interval,
max_idle_millisecond = Max
}
) when IdleAcc + Interval >= Max ->
{error, timeout};
check(
NewVal,
#keepalive{
statval = NewVal,
idle_milliseconds = IdleAcc,
check_interval = Interval
} = KeepAlive
) ->
{ok, KeepAlive#keepalive{statval = NewVal, idle_milliseconds = IdleAcc + Interval}};
check(NewVal, #keepalive{} = KeepAlive) ->
{ok, KeepAlive#keepalive{statval = NewVal, idle_milliseconds = 0}}.
check(Val, #keepalive{statval = Val}) -> {error, timeout};
check(Val, KeepAlive) -> {ok, KeepAlive#keepalive{statval = Val}}.
%% @doc Update keepalive.
%% The statval of the previous keepalive will be used,
%% and normal checks will begin from the next cycle.
-spec update(atom(), non_neg_integer(), keepalive() | undefined) -> keepalive() | undefined.
update(Zone, Interval, undefined) -> init(Zone, 0, Interval);
update(Zone, Interval, #keepalive{statval = StatVal}) -> init(Zone, StatVal, Interval).
-spec update(non_neg_integer(), keepalive() | undefined) -> keepalive() | undefined.
update(Interval, undefined) -> init(0, Interval);
update(Interval, #keepalive{statval = StatVal}) -> init(StatVal, Interval).

View File

@ -212,29 +212,16 @@ short_paths_fields() ->
short_paths_fields(Importance) ->
[
{Name,
?HOCON(
rate_type(),
maps:merge(
#{
desc => ?DESC(Name),
required => false,
importance => Importance,
example => Example
},
short_paths_fields_extra(Name)
)
)}
?HOCON(rate_type(), #{
desc => ?DESC(Name),
required => false,
importance => Importance,
example => Example
})}
|| {Name, Example} <-
lists:zip(short_paths(), [<<"1000/s">>, <<"1000/s">>, <<"100MB/s">>])
].
short_paths_fields_extra(max_conn_rate) ->
#{
default => infinity
};
short_paths_fields_extra(_Name) ->
#{}.
desc(limiter) ->
"Settings for the rate limiter.";
desc(node_opts) ->

View File

@ -64,17 +64,6 @@
-export_type([listener_id/0]).
-dialyzer(
{no_unknown, [
is_running/3,
current_conns/3,
do_stop_listener/3,
do_start_listener/4,
do_update_listener/4,
quic_listener_conf_rollback/3
]}
).
-type listener_id() :: atom() | binary().
-type listener_type() :: tcp | ssl | ws | wss | quic | dtls.
@ -432,7 +421,7 @@ do_start_listener(Type, Name, Id, #{bind := ListenOn} = Opts) when ?ESOCKD_LISTE
esockd:open(
Id,
ListenOn,
merge_default(esockd_opts(Id, Type, Name, Opts, _OldOpts = undefined))
merge_default(esockd_opts(Id, Type, Name, Opts))
);
%% Start MQTT/WS listener
do_start_listener(Type, Name, Id, Opts) when ?COWBOY_LISTENER(Type) ->
@ -476,7 +465,7 @@ do_update_listener(Type, Name, OldConf, NewConf = #{bind := ListenOn}) when
Id = listener_id(Type, Name),
case maps:get(bind, OldConf) of
ListenOn ->
esockd:set_options({Id, ListenOn}, esockd_opts(Id, Type, Name, NewConf, OldConf));
esockd:set_options({Id, ListenOn}, esockd_opts(Id, Type, Name, NewConf));
_Different ->
%% TODO
%% Again, we're not strictly required to drop live connections in this case.
@ -588,7 +577,7 @@ perform_listener_change(update, {{Type, Name, ConfOld}, {_, _, ConfNew}}) ->
perform_listener_change(stop, {Type, Name, Conf}) ->
stop_listener(Type, Name, Conf).
esockd_opts(ListenerId, Type, Name, Opts0, OldOpts) ->
esockd_opts(ListenerId, Type, Name, Opts0) ->
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
Limiter = limiter(Opts0),
Opts2 =
@ -620,7 +609,7 @@ esockd_opts(ListenerId, Type, Name, Opts0, OldOpts) ->
tcp ->
Opts3#{tcp_options => tcp_opts(Opts0)};
ssl ->
OptsWithCRL = inject_crl_config(Opts0, OldOpts),
OptsWithCRL = inject_crl_config(Opts0),
OptsWithSNI = inject_sni_fun(ListenerId, OptsWithCRL),
OptsWithRootFun = inject_root_fun(OptsWithSNI),
OptsWithVerifyFun = inject_verify_fun(OptsWithRootFun),
@ -996,7 +985,7 @@ inject_sni_fun(_ListenerId, Conf) ->
Conf.
inject_crl_config(
Conf = #{ssl_options := #{enable_crl_check := true} = SSLOpts}, _OldOpts
Conf = #{ssl_options := #{enable_crl_check := true} = SSLOpts}
) ->
HTTPTimeout = emqx_config:get([crl_cache, http_timeout], timer:seconds(15)),
Conf#{
@ -1006,16 +995,7 @@ inject_crl_config(
crl_cache => {emqx_ssl_crl_cache, {internal, [{http, HTTPTimeout}]}}
}
};
inject_crl_config(#{ssl_options := SSLOpts0} = Conf0, #{} = OldOpts) ->
%% Note: we must set crl options to `undefined' to unset them. Otherwise,
%% `esockd' will retain such options when `esockd:merge_opts/2' is called and the SSL
%% options were previously enabled.
WasEnabled = emqx_utils_maps:deep_get([ssl_options, enable_crl_check], OldOpts, false),
Undefine = fun(Acc, K) -> emqx_utils_maps:put_if(Acc, K, undefined, WasEnabled) end,
SSLOpts1 = Undefine(SSLOpts0, crl_check),
SSLOpts = Undefine(SSLOpts1, crl_cache),
Conf0#{ssl_options := SSLOpts};
inject_crl_config(Conf, undefined = _OldOpts) ->
inject_crl_config(Conf) ->
Conf.
maybe_unregister_ocsp_stapling_refresh(
@ -1038,6 +1018,7 @@ ensure_max_conns(<<"infinity">>) -> <<"infinity">>;
ensure_max_conns(MaxConn) when is_binary(MaxConn) -> binary_to_integer(MaxConn);
ensure_max_conns(MaxConn) -> MaxConn.
-spec quic_listen_on(X :: any()) -> quicer:listen_on().
quic_listen_on(Bind) ->
case Bind of
{Addr, Port} when tuple_size(Addr) == 4 ->

View File

@ -25,7 +25,7 @@
-export([start_link/0]).
%% throttler API
-export([allow/2]).
-export([allow/1]).
%% gen_server callbacks
-export([
@ -40,29 +40,23 @@
-define(SEQ_ID(Msg), {?MODULE, Msg}).
-define(NEW_SEQ, atomics:new(1, [{signed, false}])).
-define(GET_SEQ(Msg), persistent_term:get(?SEQ_ID(Msg), undefined)).
-define(ERASE_SEQ(Msg), persistent_term:erase(?SEQ_ID(Msg))).
-define(RESET_SEQ(SeqRef), atomics:put(SeqRef, 1, 0)).
-define(INC_SEQ(SeqRef), atomics:add(SeqRef, 1, 1)).
-define(GET_DROPPED(SeqRef), atomics:get(SeqRef, 1) - 1).
-define(IS_ALLOWED(SeqRef), atomics:add_get(SeqRef, 1, 1) =:= 1).
-define(NEW_THROTTLE(Msg, SeqRef), persistent_term:put(?SEQ_ID(Msg), SeqRef)).
-define(MSGS_LIST, emqx:get_config([log, throttling, msgs], [])).
-define(TIME_WINDOW_MS, timer:seconds(emqx:get_config([log, throttling, time_window], 60))).
%% @doc Check if a throttled log message is allowed to pass down to the logger this time.
%% The Msg has to be an atom, and the second argument `UniqueKey' should be `undefined'
%% for predefined message IDs.
%% For relatively static resources created from configurations such as data integration
%% resource IDs `UniqueKey' should be of `binary()' type.
-spec allow(atom(), undefined | binary()) -> boolean().
allow(Msg, UniqueKey) when
is_atom(Msg) andalso (is_binary(UniqueKey) orelse UniqueKey =:= undefined)
->
-spec allow(atom()) -> boolean().
allow(Msg) when is_atom(Msg) ->
case emqx_logger:get_primary_log_level() of
debug ->
true;
_ ->
do_allow(Msg, UniqueKey)
do_allow(Msg)
end.
-spec start_link() -> startlink_ret().
@ -74,8 +68,7 @@ start_link() ->
%%--------------------------------------------------------------------
init([]) ->
process_flag(trap_exit, true),
ok = lists:foreach(fun new_throttler/1, ?MSGS_LIST),
ok = lists:foreach(fun(Msg) -> ?NEW_THROTTLE(Msg, ?NEW_SEQ) end, ?MSGS_LIST),
CurrentPeriodMs = ?TIME_WINDOW_MS,
TimerRef = schedule_refresh(CurrentPeriodMs),
{ok, #{timer_ref => TimerRef, current_period_ms => CurrentPeriodMs}}.
@ -93,22 +86,16 @@ handle_info(refresh, #{current_period_ms := PeriodMs} = State) ->
DroppedStats = lists:foldl(
fun(Msg, Acc) ->
case ?GET_SEQ(Msg) of
%% Should not happen, unless the static ids list is updated at run-time.
undefined ->
%% Should not happen, unless the static ids list is updated at run-time.
new_throttler(Msg),
?NEW_THROTTLE(Msg, ?NEW_SEQ),
?tp(log_throttler_new_msg, #{throttled_msg => Msg}),
Acc;
SeqMap when is_map(SeqMap) ->
maps:fold(
fun(Key, Ref, Acc0) ->
ID = iolist_to_binary([atom_to_binary(Msg), $:, Key]),
drop_stats(Ref, ID, Acc0)
end,
Acc,
SeqMap
);
SeqRef ->
drop_stats(SeqRef, Msg, Acc)
Dropped = ?GET_DROPPED(SeqRef),
ok = ?RESET_SEQ(SeqRef),
?tp(log_throttler_dropped, #{dropped_count => Dropped, throttled_msg => Msg}),
maybe_add_dropped(Msg, Dropped, Acc)
end
end,
#{},
@ -125,16 +112,7 @@ handle_info(Info, State) ->
?SLOG(error, #{msg => "unxpected_info", info => Info}),
{noreply, State}.
drop_stats(SeqRef, Msg, Acc) ->
Dropped = ?GET_DROPPED(SeqRef),
ok = ?RESET_SEQ(SeqRef),
?tp(log_throttler_dropped, #{dropped_count => Dropped, throttled_msg => Msg}),
maybe_add_dropped(Msg, Dropped, Acc).
terminate(_Reason, _State) ->
%% atomics do not have delete/remove/release/deallocate API
%% after the reference is garbage-collected the resource is released
lists:foreach(fun(Msg) -> ?ERASE_SEQ(Msg) end, ?MSGS_LIST),
ok.
code_change(_OldVsn, State, _Extra) ->
@ -144,27 +122,17 @@ code_change(_OldVsn, State, _Extra) ->
%% internal functions
%%--------------------------------------------------------------------
do_allow(Msg, UniqueKey) ->
do_allow(Msg) ->
case persistent_term:get(?SEQ_ID(Msg), undefined) of
undefined ->
%% This is either a race condition (emqx_log_throttler is not started yet)
%% or a developer mistake (msg used in ?SLOG_THROTTLE/2,3 macro is
%% not added to the default value of `log.throttling.msgs`.
?SLOG(debug, #{
msg => "log_throttle_disabled",
?SLOG(info, #{
msg => "missing_log_throttle_sequence",
throttled_msg => Msg
}),
true;
%% e.g: unrecoverable msg throttle according resource_id
SeqMap when is_map(SeqMap) ->
case maps:find(UniqueKey, SeqMap) of
{ok, SeqRef} ->
?IS_ALLOWED(SeqRef);
error ->
SeqRef = ?NEW_SEQ,
new_throttler(Msg, SeqMap#{UniqueKey => SeqRef}),
true
end;
SeqRef ->
?IS_ALLOWED(SeqRef)
end.
@ -186,11 +154,3 @@ maybe_log_dropped(_DroppedStats, _PeriodMs) ->
schedule_refresh(PeriodMs) ->
?tp(log_throttler_sched_refresh, #{new_period_ms => PeriodMs}),
erlang:send_after(PeriodMs, ?MODULE, refresh).
new_throttler(unrecoverable_resource_error = Msg) ->
new_throttler(Msg, #{});
new_throttler(Msg) ->
new_throttler(Msg, ?NEW_SEQ).
new_throttler(Msg, AtomicOrEmptyMap) ->
persistent_term:put(?SEQ_ID(Msg), AtomicOrEmptyMap).

View File

@ -55,8 +55,7 @@
depth => pos_integer() | unlimited,
report_cb => logger:report_cb(),
single_line => boolean(),
chars_limit => unlimited | pos_integer(),
payload_encode => text | hidden | hex
chars_limit => unlimited | pos_integer()
}.
-define(IS_STRING(String), (is_list(String) orelse is_binary(String))).
@ -104,8 +103,7 @@ format(Msg, Meta, Config) ->
maybe_format_msg(undefined, _Meta, _Config) ->
#{};
maybe_format_msg({report, Report0} = Msg, #{report_cb := Cb} = Meta, Config) ->
Report = emqx_logger_textfmt:try_encode_meta(Report0, Config),
maybe_format_msg({report, Report} = Msg, #{report_cb := Cb} = Meta, Config) ->
case is_map(Report) andalso Cb =:= ?DEFAULT_FORMATTER of
true ->
%% reporting a map without a customised format function

View File

@ -20,12 +20,11 @@
-export([format/2]).
-export([check_config/1]).
-export([try_format_unicode/1, try_encode_meta/2]).
-export([try_format_unicode/1]).
%% Used in the other log formatters
-export([evaluate_lazy_values_if_dbg_level/1, evaluate_lazy_values/1]).
check_config(X) ->
logger_formatter:check_config(maps:without([timestamp_format, with_mfa, payload_encode], X)).
check_config(X) -> logger_formatter:check_config(maps:without([timestamp_format, with_mfa], X)).
%% Principle here is to delegate the formatting to logger_formatter:format/2
%% as much as possible, and only enrich the report with clientid, peername, topic, username
@ -108,10 +107,9 @@ is_list_report_acceptable(#{report_cb := Cb}) ->
is_list_report_acceptable(_) ->
false.
enrich_report(ReportRaw0, Meta, Config) ->
enrich_report(ReportRaw, Meta, Config) ->
%% clientid and peername always in emqx_conn's process metadata.
%% topic and username can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2
ReportRaw = try_encode_meta(ReportRaw0, Config),
Topic =
case maps:get(topic, Meta, undefined) of
undefined -> maps:get(topic, ReportRaw, undefined);
@ -179,29 +177,3 @@ enrich_topic({Fmt, Args}, #{topic := Topic}) when is_list(Fmt) ->
{" topic: ~ts" ++ Fmt, [Topic | Args]};
enrich_topic(Msg, _) ->
Msg.
try_encode_meta(Report, Config) ->
lists:foldl(
fun(Meta, Acc) ->
try_encode_meta(Meta, Acc, Config)
end,
Report,
[payload, packet]
).
try_encode_meta(payload, #{payload := Payload} = Report, #{payload_encode := Encode}) ->
Report#{payload := encode_payload(Payload, Encode)};
try_encode_meta(packet, #{packet := Packet} = Report, #{payload_encode := Encode}) when
is_tuple(Packet)
->
Report#{packet := emqx_packet:format(Packet, Encode)};
try_encode_meta(_, Report, _Config) ->
Report.
encode_payload(Payload, text) ->
Payload;
encode_payload(_Payload, hidden) ->
"******";
encode_payload(Payload, hex) ->
Bin = emqx_utils_conv:bin(Payload),
binary:encode_hex(Bin).

View File

@ -51,6 +51,7 @@
]).
-export([
format/1,
format/2
]).
@ -480,6 +481,10 @@ will_msg(#mqtt_packet_connect{
headers = #{username => Username, properties => Props}
}.
%% @doc Format packet
-spec format(emqx_types:packet()) -> iolist().
format(Packet) -> format(Packet, emqx_trace_handler:payload_encode()).
%% @doc Format packet
-spec format(emqx_types:packet(), hex | text | hidden) -> iolist().
format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) ->

View File

@ -102,11 +102,7 @@ hash({SimpleHash, _Salt, disable}, Password) when is_binary(Password) ->
hash({SimpleHash, Salt, prefix}, Password) when is_binary(Password), is_binary(Salt) ->
hash_data(SimpleHash, <<Salt/binary, Password/binary>>);
hash({SimpleHash, Salt, suffix}, Password) when is_binary(Password), is_binary(Salt) ->
hash_data(SimpleHash, <<Password/binary, Salt/binary>>);
hash({_SimpleHash, Salt, _SaltPos}, _Password) when not is_binary(Salt) ->
error({salt_not_string, Salt});
hash({_SimpleHash, _Salt, _SaltPos}, Password) when not is_binary(Password) ->
error({password_not_string, Password}).
hash_data(SimpleHash, <<Password/binary, Salt/binary>>).
-spec hash_data(hash_type(), binary()) -> binary().
hash_data(plain, Data) when is_binary(Data) ->

View File

@ -182,9 +182,6 @@
shared_sub_s := shared_sub_state(),
%% Buffer:
inflight := emqx_persistent_session_ds_inflight:t(),
%% Last fetched stream:
%% Used as a continuation point for fair stream scheduling.
last_fetched_stream => emqx_persistent_session_ds_state:stream_key(),
%% In-progress replay:
%% List of stream replay states to be added to the inflight buffer.
replay => [{_StreamKey, stream_state()}, ...],
@ -621,13 +618,9 @@ handle_timeout(ClientInfo, ?TIMER_RETRY_REPLAY, Session0) ->
Session = replay_streams(Session0, ClientInfo),
{ok, [], Session};
handle_timeout(ClientInfo, ?TIMER_GET_STREAMS, Session0 = #{s := S0, shared_sub_s := SharedSubS0}) ->
%% `gc` and `renew_streams` methods may drop unsubscribed streams.
%% Shared subscription handler must have a chance to see unsubscribed streams
%% in the fully replayed state.
{S1, SharedSubS1} = emqx_persistent_session_ds_shared_subs:pre_renew_streams(S0, SharedSubS0),
S2 = emqx_persistent_session_ds_subs:gc(S1),
S3 = emqx_persistent_session_ds_stream_scheduler:renew_streams(S2),
{S, SharedSubS} = emqx_persistent_session_ds_shared_subs:renew_streams(S3, SharedSubS1),
S1 = emqx_persistent_session_ds_subs:gc(S0),
S2 = emqx_persistent_session_ds_stream_scheduler:renew_streams(S1),
{S, SharedSubS} = emqx_persistent_session_ds_shared_subs:renew_streams(S2, SharedSubS0),
Interval = get_config(ClientInfo, [renew_streams_interval]),
Session = emqx_session:ensure_timer(
?TIMER_GET_STREAMS,
@ -761,7 +754,7 @@ skip_batch(StreamKey, SRS0, Session = #{s := S0}, ClientInfo, Reason) ->
%%--------------------------------------------------------------------
-spec disconnect(session(), emqx_types:conninfo()) -> {shutdown, session()}.
disconnect(Session = #{id := Id, s := S0, shared_sub_s := SharedSubS0}, ConnInfo) ->
disconnect(Session = #{id := Id, s := S0}, ConnInfo) ->
S1 = maybe_set_offline_info(S0, Id),
S2 = emqx_persistent_session_ds_state:set_last_alive_at(now_ms(), S1),
S3 =
@ -771,9 +764,8 @@ disconnect(Session = #{id := Id, s := S0, shared_sub_s := SharedSubS0}, ConnInfo
_ ->
S2
end,
{S4, SharedSubS} = emqx_persistent_session_ds_shared_subs:on_disconnect(S3, SharedSubS0),
S = emqx_persistent_session_ds_state:commit(S4),
{shutdown, Session#{s => S, shared_sub_s => SharedSubS}}.
S = emqx_persistent_session_ds_state:commit(S3),
{shutdown, Session#{s => S}}.
-spec terminate(Reason :: term(), session()) -> ok.
terminate(_Reason, Session = #{id := Id, s := S}) ->
@ -821,12 +813,10 @@ list_client_subscriptions(ClientId) ->
{error, not_found}
end.
-spec get_client_subscription(emqx_types:clientid(), topic_filter() | share_topic_filter()) ->
-spec get_client_subscription(emqx_types:clientid(), emqx_types:topic()) ->
subscription() | undefined.
get_client_subscription(ClientId, #share{} = ShareTopicFilter) ->
emqx_persistent_session_ds_shared_subs:cold_get_subscription(ClientId, ShareTopicFilter);
get_client_subscription(ClientId, TopicFilter) ->
emqx_persistent_session_ds_subs:cold_get_subscription(ClientId, TopicFilter).
get_client_subscription(ClientId, Topic) ->
emqx_persistent_session_ds_subs:cold_get_subscription(ClientId, Topic).
%%--------------------------------------------------------------------
%% Session tables operations
@ -993,33 +983,27 @@ do_ensure_all_iterators_closed(_DSSessionID) ->
%% Normal replay:
%%--------------------------------------------------------------------
fetch_new_messages(Session0 = #{s := S0, shared_sub_s := SharedSubS0}, ClientInfo) ->
{S1, SharedSubS1} = emqx_persistent_session_ds_shared_subs:on_streams_replay(S0, SharedSubS0),
Session1 = Session0#{s => S1, shared_sub_s => SharedSubS1},
LFS = maps:get(last_fetched_stream, Session1, beginning),
ItStream = emqx_persistent_session_ds_stream_scheduler:iter_next_streams(LFS, S1),
BatchSize = get_config(ClientInfo, [batch_size]),
Session2 = fetch_new_messages(ItStream, BatchSize, Session1, ClientInfo),
Session2#{shared_sub_s => SharedSubS1}.
fetch_new_messages(Session0 = #{s := S0}, ClientInfo) ->
Streams = emqx_persistent_session_ds_stream_scheduler:find_new_streams(S0),
Session1 = fetch_new_messages(Streams, Session0, ClientInfo),
#{s := S1, shared_sub_s := SharedSubS0} = Session1,
{S2, SharedSubS1} = emqx_persistent_session_ds_shared_subs:on_streams_replayed(S1, SharedSubS0),
Session1#{s => S2, shared_sub_s => SharedSubS1}.
fetch_new_messages(ItStream0, BatchSize, Session0, ClientInfo) ->
#{inflight := Inflight} = Session0,
fetch_new_messages([], Session, _ClientInfo) ->
Session;
fetch_new_messages([I | Streams], Session0 = #{inflight := Inflight}, ClientInfo) ->
BatchSize = get_config(ClientInfo, [batch_size]),
case emqx_persistent_session_ds_inflight:n_buffered(all, Inflight) >= BatchSize of
true ->
%% Buffer is full:
Session0;
false ->
case emqx_persistent_session_ds_stream_scheduler:next_stream(ItStream0) of
{StreamKey, Srs, ItStream} ->
Session1 = new_batch(StreamKey, Srs, BatchSize, Session0, ClientInfo),
Session = Session1#{last_fetched_stream => StreamKey},
fetch_new_messages(ItStream, BatchSize, Session, ClientInfo);
none ->
Session0
end
Session = new_batch(I, BatchSize, Session0, ClientInfo),
fetch_new_messages(Streams, Session, ClientInfo)
end.
new_batch(StreamKey, Srs0, BatchSize, Session0 = #{s := S0}, ClientInfo) ->
new_batch({StreamKey, Srs0}, BatchSize, Session0 = #{s := S0}, ClientInfo) ->
SN1 = emqx_persistent_session_ds_state:get_seqno(?next(?QOS_1), S0),
SN2 = emqx_persistent_session_ds_state:get_seqno(?next(?QOS_2), S0),
Srs1 = Srs0#srs{

View File

@ -17,7 +17,7 @@
-module(emqx_persistent_session_ds_router).
-include("emqx.hrl").
-include("emqx_ps_ds_int.hrl").
-include("emqx_persistent_session_ds/emqx_ps_ds_int.hrl").
-export([init_tables/0]).
@ -47,7 +47,7 @@
-endif.
-type route() :: #ps_route{}.
-type dest() :: emqx_persistent_session_ds:id() | #share_dest{}.
-type dest() :: emqx_persistent_session_ds:id().
-export_type([dest/0, route/0]).
@ -161,7 +161,7 @@ topics() ->
print_routes(Topic) ->
lists:foreach(
fun(#ps_route{topic = To, dest = Dest}) ->
io:format("~ts -> ~tp~n", [To, Dest])
io:format("~ts -> ~ts~n", [To, Dest])
end,
match_routes(Topic)
).
@ -247,8 +247,6 @@ mk_filtertab_fold_fun(FoldFun) ->
match_filters(Topic) ->
emqx_topic_index:matches(Topic, ?PS_FILTERS_TAB, []).
get_dest_session_id(#share_dest{session_id = DSSessionId}) ->
DSSessionId;
get_dest_session_id({_, DSSessionId}) ->
DSSessionId;
get_dest_session_id(DSSessionId) ->

View File

@ -2,37 +2,11 @@
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
%% @doc This module
%% * handles creation and management of _shared_ subscriptions for the session;
%% * provides streams to the session;
%% * handles progress of stream replay.
%%
%% The logic is quite straightforward; most of the parts resemble the logic of the
%% `emqx_persistent_session_ds_subs` (subscribe/unsubscribe) and
%% `emqx_persistent_session_ds_scheduler` (providing new streams),
%% but some data is sent or received from the `emqx_persistent_session_ds_shared_subs_agent`
%% which communicates with remote shared subscription leaders.
%%
%% A tricky part is the concept of "scheduled actions". When we unsubscribe from a topic
%% we may have some streams that have unacked messages. So we do not have a reliable
%% progress for them. Sending the current progress to the leader and disconnecting
%% will lead to the duplication of messages. So after unsubscription, we need to wait
%% some time until all streams are acked, and only then we disconnect from the leader.
%%
%% For this purpose we have the `scheduled_actions` map in the state of the module.
%% We preserve there the streams that we need to wait for and collect their progress.
%% We also use `scheduled_actions` for resubscriptions. If a client quickly resubscribes
%% after unsubscription, we may still have the mentioned streams unacked. If we abandon
%% them, just connect to the leader, then it may lease us the same streams again, but with
%% the previous progress. So messages may duplicate.
-module(emqx_persistent_session_ds_shared_subs).
-include("emqx_mqtt.hrl").
-include("emqx.hrl").
-include("logger.hrl").
-include("session_internals.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
-export([
@ -41,56 +15,16 @@
on_subscribe/3,
on_unsubscribe/4,
on_disconnect/2,
on_streams_replay/2,
on_streams_replayed/2,
on_info/3,
pre_renew_streams/2,
renew_streams/2,
to_map/2
]).
%% Management API:
-export([
cold_get_subscription/2
]).
-export([
format_lease_events/1,
format_stream_progresses/1
]).
-define(schedule_subscribe, schedule_subscribe).
-define(schedule_unsubscribe, schedule_unsubscribe).
-type stream_key() :: {emqx_persistent_session_ds:id(), emqx_ds:stream()}.
-type scheduled_action_type() ::
{?schedule_subscribe, emqx_types:subopts()} | ?schedule_unsubscribe.
-type agent_stream_progress() :: #{
stream := emqx_ds:stream(),
progress := progress(),
use_finished := boolean()
}.
-type progress() ::
#{
iterator := emqx_ds:iterator()
}.
-type scheduled_action() :: #{
type := scheduled_action_type(),
stream_keys_to_wait := [stream_key()],
progresses := [agent_stream_progress()]
}.
-type t() :: #{
agent := emqx_persistent_session_ds_shared_subs_agent:t(),
scheduled_actions := #{
share_topic_filter() => scheduled_action()
}
agent := emqx_persistent_session_ds_shared_subs_agent:t()
}.
-type share_topic_filter() :: emqx_persistent_session_ds:share_topic_filter().
-type opts() :: #{
@ -100,90 +34,184 @@
-define(rank_x, rank_shared).
-define(rank_y, 0).
-export_type([
progress/0,
agent_stream_progress/0
]).
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
%%--------------------------------------------------------------------
%% new
-spec new(opts()) -> t().
new(Opts) ->
#{
agent => emqx_persistent_session_ds_shared_subs_agent:new(
agent_opts(Opts)
),
scheduled_actions => #{}
)
}.
%%--------------------------------------------------------------------
%% open
-spec open(emqx_persistent_session_ds_state:t(), opts()) ->
{ok, emqx_persistent_session_ds_state:t(), t()}.
open(S0, Opts) ->
open(S, Opts) ->
SharedSubscriptions = fold_shared_subs(
fun(#share{} = ShareTopicFilter, Sub, Acc) ->
[{ShareTopicFilter, to_agent_subscription(S0, Sub)} | Acc]
fun(#share{} = TopicFilter, Sub, Acc) ->
[{TopicFilter, to_agent_subscription(S, Sub)} | Acc]
end,
[],
S0
S
),
Agent = emqx_persistent_session_ds_shared_subs_agent:open(
SharedSubscriptions, agent_opts(Opts)
),
SharedSubS = #{agent => Agent, scheduled_actions => #{}},
S1 = revoke_all_streams(S0),
{ok, S1, SharedSubS}.
%%--------------------------------------------------------------------
%% on_subscribe
SharedSubS = #{agent => Agent},
{ok, S, SharedSubS}.
-spec on_subscribe(
share_topic_filter(),
emqx_types:subopts(),
emqx_persistent_session_ds:session()
) -> {ok, emqx_persistent_session_ds_state:t(), t()} | {error, emqx_types:reason_code()}.
on_subscribe(#share{} = ShareTopicFilter, SubOpts, #{s := S} = Session) ->
Subscription = emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S),
on_subscribe(Subscription, ShareTopicFilter, SubOpts, Session).
on_subscribe(TopicFilter, SubOpts, #{s := S} = Session) ->
Subscription = emqx_persistent_session_ds_state:get_subscription(TopicFilter, S),
on_subscribe(Subscription, TopicFilter, SubOpts, Session).
-spec on_unsubscribe(
emqx_persistent_session_ds:id(),
emqx_persistent_session_ds:topic_filter(),
emqx_persistent_session_ds_state:t(),
t()
) ->
{ok, emqx_persistent_session_ds_state:t(), t(), emqx_persistent_session_ds:subscription()}
| {error, emqx_types:reason_code()}.
on_unsubscribe(SessionId, TopicFilter, S0, #{agent := Agent0} = SharedSubS0) ->
case lookup(TopicFilter, S0) of
undefined ->
{error, ?RC_NO_SUBSCRIPTION_EXISTED};
Subscription ->
?tp(persistent_session_ds_subscription_delete, #{
session_id => SessionId, topic_filter => TopicFilter
}),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_unsubscribe(
Agent0, TopicFilter
),
SharedSubS = SharedSubS0#{agent => Agent1},
S = emqx_persistent_session_ds_state:del_subscription(TopicFilter, S0),
{ok, S, SharedSubS, Subscription}
end.
-spec renew_streams(emqx_persistent_session_ds_state:t(), t()) ->
{emqx_persistent_session_ds_state:t(), t()}.
renew_streams(S0, #{agent := Agent0} = SharedSubS0) ->
{StreamLeaseEvents, Agent1} = emqx_persistent_session_ds_shared_subs_agent:renew_streams(
Agent0
),
?tp(info, shared_subs_new_stream_lease_events, #{stream_lease_events => StreamLeaseEvents}),
S1 = lists:foldl(
fun
(#{type := lease} = Event, S) -> accept_stream(Event, S);
(#{type := revoke} = Event, S) -> revoke_stream(Event, S)
end,
S0,
StreamLeaseEvents
),
SharedSubS1 = SharedSubS0#{agent => Agent1},
{S1, SharedSubS1}.
-spec on_streams_replayed(
emqx_persistent_session_ds_state:t(),
t()
) -> {emqx_persistent_session_ds_state:t(), t()}.
on_streams_replayed(S, #{agent := Agent0} = SharedSubS0) ->
%% TODO
%% Is it sufficient for a report?
Progress = fold_shared_stream_states(
fun(TopicFilter, Stream, SRS, Acc) ->
#srs{it_begin = BeginIt} = SRS,
StreamProgress = #{
topic_filter => TopicFilter,
stream => Stream,
iterator => BeginIt
},
[StreamProgress | Acc]
end,
[],
S
),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_stream_progress(
Agent0, Progress
),
SharedSubS1 = SharedSubS0#{agent => Agent1},
{S, SharedSubS1}.
-spec on_info(emqx_persistent_session_ds_state:t(), t(), term()) ->
{emqx_persistent_session_ds_state:t(), t()}.
on_info(S, #{agent := Agent0} = SharedSubS0, Info) ->
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_info(Agent0, Info),
SharedSubS1 = SharedSubS0#{agent => Agent1},
{S, SharedSubS1}.
-spec to_map(emqx_persistent_session_ds_state:t(), t()) -> map().
to_map(_S, _SharedSubS) ->
%% TODO
#{}.
%%--------------------------------------------------------------------
%% on_subscribe internal functions
%% Internal functions
%%--------------------------------------------------------------------
on_subscribe(undefined, ShareTopicFilter, SubOpts, #{props := Props, s := S} = Session) ->
fold_shared_subs(Fun, Acc, S) ->
emqx_persistent_session_ds_state:fold_subscriptions(
fun
(#share{} = TopicFilter, Sub, Acc0) -> Fun(TopicFilter, Sub, Acc0);
(_, _Sub, Acc0) -> Acc0
end,
Acc,
S
).
fold_shared_stream_states(Fun, Acc, S) ->
%% TODO
%% Optimize or cache
TopicFilters = fold_shared_subs(
fun
(#share{} = TopicFilter, #{id := Id} = _Sub, Acc0) ->
Acc0#{Id => TopicFilter};
(_, _, Acc0) ->
Acc0
end,
#{},
S
),
emqx_persistent_session_ds_state:fold_streams(
fun({SubId, Stream}, SRS, Acc0) ->
case TopicFilters of
#{SubId := TopicFilter} ->
Fun(TopicFilter, Stream, SRS, Acc0);
_ ->
Acc0
end
end,
Acc,
S
).
on_subscribe(undefined, TopicFilter, SubOpts, #{props := Props, s := S} = Session) ->
#{max_subscriptions := MaxSubscriptions} = Props,
case emqx_persistent_session_ds_state:n_subscriptions(S) < MaxSubscriptions of
true ->
create_new_subscription(ShareTopicFilter, SubOpts, Session);
create_new_subscription(TopicFilter, SubOpts, Session);
false ->
{error, ?RC_QUOTA_EXCEEDED}
end;
on_subscribe(Subscription, ShareTopicFilter, SubOpts, Session) ->
update_subscription(Subscription, ShareTopicFilter, SubOpts, Session).
on_subscribe(Subscription, TopicFilter, SubOpts, Session) ->
update_subscription(Subscription, TopicFilter, SubOpts, Session).
-dialyzer({nowarn_function, create_new_subscription/3}).
create_new_subscription(#share{topic = TopicFilter, group = Group} = ShareTopicFilter, SubOpts, #{
id := SessionId,
s := S0,
shared_sub_s := #{agent := Agent} = SharedSubS0,
props := Props
create_new_subscription(TopicFilter, SubOpts, #{
id := SessionId, s := S0, shared_sub_s := #{agent := Agent0} = SharedSubS0, props := Props
}) ->
case
emqx_persistent_session_ds_shared_subs_agent:can_subscribe(
Agent, ShareTopicFilter, SubOpts
emqx_persistent_session_ds_shared_subs_agent:on_subscribe(
Agent0, TopicFilter, SubOpts
)
of
ok ->
ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, #share_dest{
session_id = SessionId, group = Group
}),
_ = emqx_external_broker:add_persistent_shared_route(TopicFilter, Group, SessionId),
{ok, Agent1} ->
#{upgrade_qos := UpgradeQoS} = Props,
{SubId, S1} = emqx_persistent_session_ds_state:new_id(S0),
{SStateId, S2} = emqx_persistent_session_ds_state:new_id(S1),
@ -199,20 +227,20 @@ create_new_subscription(#share{topic = TopicFilter, group = Group} = ShareTopicF
start_time => now_ms()
},
S = emqx_persistent_session_ds_state:put_subscription(
ShareTopicFilter, Subscription, S3
TopicFilter, Subscription, S3
),
SharedSubS = schedule_subscribe(SharedSubS0, ShareTopicFilter, SubOpts),
SharedSubS = SharedSubS0#{agent => Agent1},
?tp(persistent_session_ds_shared_subscription_added, #{
topic_filter => TopicFilter, session => SessionId
}),
{ok, S, SharedSubS};
{error, _} = Error ->
Error
end.
update_subscription(
#{current_state := SStateId0, id := SubId} = Sub0, ShareTopicFilter, SubOpts, #{
s := S0, shared_sub_s := SharedSubS, props := Props
}
) ->
update_subscription(#{current_state := SStateId0, id := SubId} = Sub0, TopicFilter, SubOpts, #{
s := S0, shared_sub_s := SharedSubS, props := Props
}) ->
#{upgrade_qos := UpgradeQoS} = Props,
SState = #{parent_subscription => SubId, upgrade_qos => UpgradeQoS, subopts => SubOpts},
case emqx_persistent_session_ds_state:get_subscription_state(SStateId0, S0) of
@ -226,173 +254,36 @@ update_subscription(
SStateId, SState, S1
),
Sub = Sub0#{current_state => SStateId},
S = emqx_persistent_session_ds_state:put_subscription(ShareTopicFilter, Sub, S2),
S = emqx_persistent_session_ds_state:put_subscription(TopicFilter, Sub, S2),
{ok, S, SharedSubS}
end.
-dialyzer({nowarn_function, schedule_subscribe/3}).
schedule_subscribe(
#{agent := Agent0, scheduled_actions := ScheduledActions0} = SharedSubS0,
ShareTopicFilter,
SubOpts
) ->
case ScheduledActions0 of
#{ShareTopicFilter := ScheduledAction} ->
ScheduledActions1 = ScheduledActions0#{
ShareTopicFilter => ScheduledAction#{type => {?schedule_subscribe, SubOpts}}
},
?tp(debug, shared_subs_schedule_subscribe_override, #{
share_topic_filter => ShareTopicFilter,
new_type => {?schedule_subscribe, SubOpts},
old_action => format_schedule_action(ScheduledAction)
}),
SharedSubS0#{scheduled_actions := ScheduledActions1};
_ ->
?tp(debug, shared_subs_schedule_subscribe_new, #{
share_topic_filter => ShareTopicFilter, subopts => SubOpts
}),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_subscribe(
Agent0, ShareTopicFilter, SubOpts
),
SharedSubS0#{agent => Agent1}
end.
%%--------------------------------------------------------------------
%% on_unsubscribe
-spec on_unsubscribe(
emqx_persistent_session_ds:id(),
share_topic_filter(),
emqx_persistent_session_ds_state:t(),
t()
) ->
{ok, emqx_persistent_session_ds_state:t(), t(), emqx_persistent_session_ds:subscription()}
| {error, emqx_types:reason_code()}.
on_unsubscribe(
SessionId, #share{topic = TopicFilter, group = Group} = ShareTopicFilter, S0, SharedSubS0
) ->
case lookup(ShareTopicFilter, S0) of
lookup(TopicFilter, S) ->
case emqx_persistent_session_ds_state:get_subscription(TopicFilter, S) of
Sub = #{current_state := SStateId} ->
case emqx_persistent_session_ds_state:get_subscription_state(SStateId, S) of
#{subopts := SubOpts} ->
Sub#{subopts => SubOpts};
undefined ->
undefined
end;
undefined ->
{error, ?RC_NO_SUBSCRIPTION_EXISTED};
#{id := SubId} = Subscription ->
?tp(persistent_session_ds_subscription_delete, #{
session_id => SessionId, share_topic_filter => ShareTopicFilter
}),
_ = emqx_external_broker:delete_persistent_shared_route(TopicFilter, Group, SessionId),
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, #share_dest{
session_id = SessionId, group = Group
}),
S = emqx_persistent_session_ds_state:del_subscription(ShareTopicFilter, S0),
SharedSubS = schedule_unsubscribe(S, SharedSubS0, SubId, ShareTopicFilter),
{ok, S, SharedSubS, Subscription}
end.
%%--------------------------------------------------------------------
%% on_unsubscribe internal functions
schedule_unsubscribe(
S, #{scheduled_actions := ScheduledActions0} = SharedSubS0, UnsubscridedSubId, ShareTopicFilter
) ->
case ScheduledActions0 of
#{ShareTopicFilter := ScheduledAction0} ->
ScheduledAction1 = ScheduledAction0#{type => ?schedule_unsubscribe},
ScheduledActions1 = ScheduledActions0#{
ShareTopicFilter => ScheduledAction1
},
?tp(debug, shared_subs_schedule_unsubscribe_override, #{
share_topic_filter => ShareTopicFilter,
new_type => ?schedule_unsubscribe,
old_action => format_schedule_action(ScheduledAction0)
}),
SharedSubS0#{scheduled_actions := ScheduledActions1};
_ ->
StreamKeys = stream_keys_by_sub_id(S, UnsubscridedSubId),
ScheduledActions1 = ScheduledActions0#{
ShareTopicFilter => #{
type => ?schedule_unsubscribe,
stream_keys_to_wait => StreamKeys,
progresses => []
}
},
?tp(debug, shared_subs_schedule_unsubscribe_new, #{
share_topic_filter => ShareTopicFilter,
stream_keys => format_stream_keys(StreamKeys)
}),
SharedSubS0#{scheduled_actions := ScheduledActions1}
end.
%%--------------------------------------------------------------------
%% pre_renew_streams
-spec pre_renew_streams(emqx_persistent_session_ds_state:t(), t()) ->
{emqx_persistent_session_ds_state:t(), t()}.
pre_renew_streams(S, SharedSubS) ->
on_streams_replay(S, SharedSubS).
%%--------------------------------------------------------------------
%% renew_streams
-spec renew_streams(emqx_persistent_session_ds_state:t(), t()) ->
{emqx_persistent_session_ds_state:t(), t()}.
renew_streams(S0, #{agent := Agent0, scheduled_actions := ScheduledActions} = SharedSubS0) ->
{StreamLeaseEvents, Agent1} = emqx_persistent_session_ds_shared_subs_agent:renew_streams(
Agent0
),
StreamLeaseEvents =/= [] andalso
?tp(debug, shared_subs_new_stream_lease_events, #{
stream_lease_events => format_lease_events(StreamLeaseEvents)
}),
S1 = lists:foldl(
fun
(#{type := lease} = Event, S) -> accept_stream(Event, S, ScheduledActions);
(#{type := revoke} = Event, S) -> revoke_stream(Event, S)
end,
S0,
StreamLeaseEvents
),
SharedSubS1 = SharedSubS0#{agent => Agent1},
{S1, SharedSubS1}.
%%--------------------------------------------------------------------
%% renew_streams internal functions
accept_stream(#{share_topic_filter := ShareTopicFilter} = Event, S, ScheduledActions) ->
%% If we have a pending action (subscribe or unsubscribe) for this topic filter,
%% we should not accept a stream and start replaying it. We won't use it anyway:
%% * if subscribe is pending, we will reset agent obtain a new lease
%% * if unsubscribe is pending, we will drop connection
case ScheduledActions of
#{ShareTopicFilter := _Action} ->
S;
_ ->
accept_stream(Event, S)
undefined
end.
accept_stream(
#{
share_topic_filter := ShareTopicFilter,
stream := Stream,
progress := #{iterator := Iterator} = _Progress
} = _Event,
S0
#{topic_filter := TopicFilter, stream := Stream, iterator := Iterator}, S0
) ->
case emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S0) of
case emqx_persistent_session_ds_state:get_subscription(TopicFilter, S0) of
undefined ->
%% We unsubscribed
S0;
%% This should not happen.
%% Agent should have received unsubscribe callback
%% and should not have passed this stream as a new one
error(new_stream_without_sub);
#{id := SubId, current_state := SStateId} ->
Key = {SubId, Stream},
NeedCreateStream =
case emqx_persistent_session_ds_state:get_stream(Key, S0) of
undefined ->
true;
#srs{unsubscribed = true} ->
true;
_SRS ->
false
end,
case NeedCreateStream of
true ->
case emqx_persistent_session_ds_state:get_stream(Key, S0) of
undefined ->
NewSRS =
#srs{
rank_x = ?rank_x,
@ -403,15 +294,15 @@ accept_stream(
},
S1 = emqx_persistent_session_ds_state:put_stream(Key, NewSRS, S0),
S1;
false ->
_SRS ->
S0
end
end.
revoke_stream(
#{share_topic_filter := ShareTopicFilter, stream := Stream}, S0
#{topic_filter := TopicFilter, stream := Stream}, S0
) ->
case emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S0) of
case emqx_persistent_session_ds_state:get_subscription(TopicFilter, S0) of
undefined ->
%% This should not happen.
%% Agent should have received unsubscribe callback
@ -429,363 +320,19 @@ revoke_stream(
end
end.
%%--------------------------------------------------------------------
%% on_streams_replay
-spec on_streams_replay(
emqx_persistent_session_ds_state:t(),
t()
) -> {emqx_persistent_session_ds_state:t(), t()}.
on_streams_replay(S0, SharedSubS0) ->
{S1, #{agent := Agent0, scheduled_actions := ScheduledActions0} = SharedSubS1} =
renew_streams(S0, SharedSubS0),
Progresses = all_stream_progresses(S1, Agent0),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_stream_progress(
Agent0, Progresses
),
{Agent2, ScheduledActions1} = run_scheduled_actions(S1, Agent1, ScheduledActions0),
SharedSubS2 = SharedSubS1#{
agent => Agent2,
scheduled_actions => ScheduledActions1
},
{S1, SharedSubS2}.
%%--------------------------------------------------------------------
%% on_streams_replay internal functions
all_stream_progresses(S, Agent) ->
all_stream_progresses(S, Agent, _NeedUnacked = false).
all_stream_progresses(S, _Agent, NeedUnacked) ->
CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
fold_shared_stream_states(
fun(ShareTopicFilter, Stream, SRS, ProgressesAcc0) ->
case
is_stream_started(CommQos1, CommQos2, SRS) and
(NeedUnacked or is_stream_fully_acked(CommQos1, CommQos2, SRS))
of
true ->
StreamProgress = stream_progress(CommQos1, CommQos2, Stream, SRS),
maps:update_with(
ShareTopicFilter,
fun(Progresses) -> [StreamProgress | Progresses] end,
[StreamProgress],
ProgressesAcc0
);
false ->
ProgressesAcc0
end
end,
#{},
S
).
run_scheduled_actions(S, Agent, ScheduledActions) ->
maps:fold(
fun(ShareTopicFilter, Action0, {AgentAcc0, ScheduledActionsAcc}) ->
case run_scheduled_action(S, AgentAcc0, ShareTopicFilter, Action0) of
{ok, AgentAcc1} ->
{AgentAcc1, maps:remove(ShareTopicFilter, ScheduledActionsAcc)};
{continue, Action1} ->
{AgentAcc0, ScheduledActionsAcc#{ShareTopicFilter => Action1}}
end
end,
{Agent, ScheduledActions},
ScheduledActions
).
run_scheduled_action(
S,
Agent0,
ShareTopicFilter,
#{type := Type, stream_keys_to_wait := StreamKeysToWait0, progresses := Progresses0} = Action
-spec to_agent_subscription(
emqx_persistent_session_ds_state:t(), emqx_persistent_session_ds:subscription()
) ->
StreamKeysToWait1 = filter_unfinished_streams(S, StreamKeysToWait0),
Progresses1 = stream_progresses(S, StreamKeysToWait0 -- StreamKeysToWait1) ++ Progresses0,
case StreamKeysToWait1 of
[] ->
?tp(debug, shared_subs_schedule_action_complete, #{
share_topic_filter => ShareTopicFilter,
progresses => format_stream_progresses(Progresses1),
type => Type
}),
%% Regular progress won't se unsubscribed streams, so we need to
%% send the progress explicitly.
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_stream_progress(
Agent0, #{ShareTopicFilter => Progresses1}
),
case Type of
{?schedule_subscribe, SubOpts} ->
{ok,
emqx_persistent_session_ds_shared_subs_agent:on_subscribe(
Agent1, ShareTopicFilter, SubOpts
)};
?schedule_unsubscribe ->
{ok,
emqx_persistent_session_ds_shared_subs_agent:on_unsubscribe(
Agent1, ShareTopicFilter, Progresses1
)}
end;
_ ->
Action1 = Action#{stream_keys_to_wait => StreamKeysToWait1, progresses => Progresses1},
?tp(debug, shared_subs_schedule_action_continue, #{
share_topic_filter => ShareTopicFilter,
new_action => format_schedule_action(Action1)
}),
{continue, Action1}
end.
filter_unfinished_streams(S, StreamKeysToWait) ->
CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
lists:filter(
fun(Key) ->
case emqx_persistent_session_ds_state:get_stream(Key, S) of
undefined ->
%% This should not happen: we should see any stream
%% in completed state before deletion
true;
SRS ->
not is_stream_fully_acked(CommQos1, CommQos2, SRS)
end
end,
StreamKeysToWait
).
stream_progresses(S, StreamKeys) ->
CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
lists:map(
fun({_SubId, Stream} = Key) ->
SRS = emqx_persistent_session_ds_state:get_stream(Key, S),
stream_progress(CommQos1, CommQos2, Stream, SRS)
end,
StreamKeys
).
%%--------------------------------------------------------------------
%% on_disconnect
on_disconnect(S0, #{agent := Agent0} = SharedSubS0) ->
S1 = revoke_all_streams(S0),
Progresses = all_stream_progresses(S1, Agent0, _NeedUnacked = true),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_disconnect(Agent0, Progresses),
SharedSubS1 = SharedSubS0#{agent => Agent1, scheduled_actions => #{}},
{S1, SharedSubS1}.
%%--------------------------------------------------------------------
%% on_disconnect helpers
revoke_all_streams(S0) ->
fold_shared_stream_states(
fun(ShareTopicFilter, Stream, _SRS, S) ->
revoke_stream(#{share_topic_filter => ShareTopicFilter, stream => Stream}, S)
end,
S0,
S0
).
%%--------------------------------------------------------------------
%% on_info
-spec on_info(emqx_persistent_session_ds_state:t(), t(), term()) ->
{emqx_persistent_session_ds_state:t(), t()}.
on_info(S, #{agent := Agent0} = SharedSubS0, Info) ->
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_info(Agent0, Info),
SharedSubS1 = SharedSubS0#{agent => Agent1},
{S, SharedSubS1}.
%%--------------------------------------------------------------------
%% to_map
-spec to_map(emqx_persistent_session_ds_state:t(), t()) -> map().
to_map(S, _SharedSubS) ->
fold_shared_subs(
fun(ShareTopicFilter, _, Acc) -> Acc#{ShareTopicFilter => lookup(ShareTopicFilter, S)} end,
#{},
S
).
%%--------------------------------------------------------------------
%% cold_get_subscription
-spec cold_get_subscription(emqx_persistent_session_ds:id(), share_topic_filter()) ->
emqx_persistent_session_ds:subscription() | undefined.
cold_get_subscription(SessionId, ShareTopicFilter) ->
case emqx_persistent_session_ds_state:cold_get_subscription(SessionId, ShareTopicFilter) of
[Sub = #{current_state := SStateId}] ->
case
emqx_persistent_session_ds_state:cold_get_subscription_state(SessionId, SStateId)
of
[#{subopts := Subopts}] ->
Sub#{subopts => Subopts};
_ ->
undefined
end;
_ ->
undefined
end.
%%--------------------------------------------------------------------
%% Generic helpers
%%--------------------------------------------------------------------
lookup(ShareTopicFilter, S) ->
case emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S) of
Sub = #{current_state := SStateId} ->
case emqx_persistent_session_ds_state:get_subscription_state(SStateId, S) of
#{subopts := SubOpts} ->
Sub#{subopts => SubOpts};
undefined ->
undefined
end;
undefined ->
undefined
end.
stream_keys_by_sub_id(S, MatchSubId) ->
emqx_persistent_session_ds_state:fold_streams(
fun({SubId, _Stream} = StreamKey, _SRS, StreamKeys) ->
case SubId of
MatchSubId ->
[StreamKey | StreamKeys];
_ ->
StreamKeys
end
end,
[],
S
).
stream_progress(
CommQos1,
CommQos2,
Stream,
#srs{
it_end = EndIt,
it_begin = BeginIt
} = SRS
) ->
Iterator =
case is_stream_fully_acked(CommQos1, CommQos2, SRS) of
true -> EndIt;
false -> BeginIt
end,
#{
stream => Stream,
progress => #{
iterator => Iterator
},
use_finished => is_use_finished(SRS)
}.
fold_shared_subs(Fun, Acc, S) ->
emqx_persistent_session_ds_state:fold_subscriptions(
fun
(#share{} = ShareTopicFilter, Sub, Acc0) -> Fun(ShareTopicFilter, Sub, Acc0);
(_, _Sub, Acc0) -> Acc0
end,
Acc,
S
).
fold_shared_stream_states(Fun, Acc, S) ->
%% TODO
%% Optimize or cache
ShareTopicFilters = fold_shared_subs(
fun
(#share{} = ShareTopicFilter, #{id := Id} = _Sub, Acc0) ->
Acc0#{Id => ShareTopicFilter};
(_, _, Acc0) ->
Acc0
end,
#{},
S
),
emqx_persistent_session_ds_state:fold_streams(
fun({SubId, Stream}, SRS, Acc0) ->
case ShareTopicFilters of
#{SubId := ShareTopicFilter} ->
Fun(ShareTopicFilter, Stream, SRS, Acc0);
_ ->
Acc0
end
end,
Acc,
S
).
emqx_persistent_session_ds_shared_subs_agent:subscription().
to_agent_subscription(_S, Subscription) ->
%% TODO
%% do we need anything from sub state?
maps:with([start_time], Subscription).
-spec agent_opts(opts()) -> emqx_persistent_session_ds_shared_subs_agent:opts().
agent_opts(#{session_id := SessionId}) ->
#{session_id => SessionId}.
-dialyzer({nowarn_function, now_ms/0}).
now_ms() ->
erlang:system_time(millisecond).
is_use_finished(#srs{unsubscribed = Unsubscribed}) ->
Unsubscribed.
is_stream_started(CommQos1, CommQos2, #srs{first_seqno_qos1 = Q1, last_seqno_qos1 = Q2}) ->
(CommQos1 >= Q1) or (CommQos2 >= Q2).
is_stream_fully_acked(_, _, #srs{
first_seqno_qos1 = Q1, last_seqno_qos1 = Q1, first_seqno_qos2 = Q2, last_seqno_qos2 = Q2
}) ->
%% Streams where the last chunk doesn't contain any QoS1 and 2
%% messages are considered fully acked:
true;
is_stream_fully_acked(Comm1, Comm2, #srs{last_seqno_qos1 = S1, last_seqno_qos2 = S2}) ->
(Comm1 >= S1) andalso (Comm2 >= S2).
%%--------------------------------------------------------------------
%% Formatters
%%--------------------------------------------------------------------
format_schedule_action(#{
type := Type, progresses := Progresses, stream_keys_to_wait := StreamKeysToWait
}) ->
#{
type => Type,
progresses => format_stream_progresses(Progresses),
stream_keys_to_wait => format_stream_keys(StreamKeysToWait)
}.
format_stream_progresses(Streams) ->
lists:map(
fun format_stream_progress/1,
Streams
).
format_stream_progress(#{stream := Stream, progress := Progress} = Value) ->
Value#{stream => format_opaque(Stream), progress => format_progress(Progress)}.
format_progress(#{iterator := Iterator} = Progress) ->
Progress#{iterator => format_opaque(Iterator)}.
format_stream_key(beginning) -> beginning;
format_stream_key({SubId, Stream}) -> {SubId, format_opaque(Stream)}.
format_stream_keys(StreamKeys) ->
lists:map(
fun format_stream_key/1,
StreamKeys
).
format_lease_events(Events) ->
lists:map(
fun format_lease_event/1,
Events
).
format_lease_event(#{stream := Stream, progress := Progress} = Event) ->
Event#{stream => format_opaque(Stream), progress => format_progress(Progress)};
format_lease_event(#{stream := Stream} = Event) ->
Event#{stream => format_opaque(Stream)}.
format_opaque(Opaque) ->
erlang:phash2(Opaque).

View File

@ -15,7 +15,7 @@
}.
-type t() :: term().
-type share_topic_filter() :: emqx_persistent_session_ds:share_topic_filter().
-type topic_filter() :: emqx_persistent_session_ds:share_topic_filter().
-type opts() :: #{
session_id := session_id()
@ -28,44 +28,41 @@
-type stream_lease() :: #{
type => lease,
%% Used as "external" subscription_id
share_topic_filter := share_topic_filter(),
topic_filter := topic_filter(),
stream := emqx_ds:stream(),
iterator := emqx_ds:iterator()
}.
-type stream_revoke() :: #{
type => revoke,
share_topic_filter := share_topic_filter(),
topic_filter := topic_filter(),
stream := emqx_ds:stream()
}.
-type stream_lease_event() :: stream_lease() | stream_revoke().
-type stream_progress() :: #{
share_topic_filter := share_topic_filter(),
topic_filter := topic_filter(),
stream := emqx_ds:stream(),
iterator := emqx_ds:iterator(),
use_finished := boolean()
iterator := emqx_ds:iterator()
}.
-export_type([
t/0,
subscription/0,
session_id/0,
stream_lease_event/0,
stream_lease/0,
opts/0
]).
-export([
new/1,
open/2,
can_subscribe/3,
on_subscribe/3,
on_unsubscribe/3,
on_unsubscribe/2,
on_stream_progress/2,
on_info/2,
on_disconnect/2,
renew_streams/1
]).
@ -80,13 +77,12 @@
%%--------------------------------------------------------------------
-callback new(opts()) -> t().
-callback open([{share_topic_filter(), subscription()}], opts()) -> t().
-callback can_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> ok | {error, term()}.
-callback on_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> t().
-callback on_unsubscribe(t(), share_topic_filter(), [stream_progress()]) -> t().
-callback on_disconnect(t(), [stream_progress()]) -> t().
-callback open([{topic_filter(), subscription()}], opts()) -> t().
-callback on_subscribe(t(), topic_filter(), emqx_types:subopts()) ->
{ok, t()} | {error, term()}.
-callback on_unsubscribe(t(), topic_filter()) -> t().
-callback renew_streams(t()) -> {[stream_lease_event()], t()}.
-callback on_stream_progress(t(), #{share_topic_filter() => [stream_progress()]}) -> t().
-callback on_stream_progress(t(), [stream_progress()]) -> t().
-callback on_info(t(), term()) -> t().
%%--------------------------------------------------------------------
@ -97,31 +93,24 @@
new(Opts) ->
?shared_subs_agent:new(Opts).
-spec open([{share_topic_filter(), subscription()}], opts()) -> t().
-spec open([{topic_filter(), subscription()}], opts()) -> t().
open(Topics, Opts) ->
?shared_subs_agent:open(Topics, Opts).
-spec can_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> ok | {error, term()}.
can_subscribe(Agent, ShareTopicFilter, SubOpts) ->
?shared_subs_agent:can_subscribe(Agent, ShareTopicFilter, SubOpts).
-spec on_subscribe(t(), topic_filter(), emqx_types:subopts()) ->
{ok, t()} | {error, emqx_types:reason_code()}.
on_subscribe(Agent, TopicFilter, SubOpts) ->
?shared_subs_agent:on_subscribe(Agent, TopicFilter, SubOpts).
-spec on_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> t().
on_subscribe(Agent, ShareTopicFilter, SubOpts) ->
?shared_subs_agent:on_subscribe(Agent, ShareTopicFilter, SubOpts).
-spec on_unsubscribe(t(), share_topic_filter(), [stream_progress()]) -> t().
on_unsubscribe(Agent, ShareTopicFilter, StreamProgresses) ->
?shared_subs_agent:on_unsubscribe(Agent, ShareTopicFilter, StreamProgresses).
-spec on_disconnect(t(), #{share_topic_filter() => [stream_progress()]}) -> t().
on_disconnect(Agent, StreamProgresses) ->
?shared_subs_agent:on_disconnect(Agent, StreamProgresses).
-spec on_unsubscribe(t(), topic_filter()) -> t().
on_unsubscribe(Agent, TopicFilter) ->
?shared_subs_agent:on_unsubscribe(Agent, TopicFilter).
-spec renew_streams(t()) -> {[stream_lease_event()], t()}.
renew_streams(Agent) ->
?shared_subs_agent:renew_streams(Agent).
-spec on_stream_progress(t(), #{share_topic_filter() => [stream_progress()]}) -> t().
-spec on_stream_progress(t(), [stream_progress()]) -> t().
on_stream_progress(Agent, StreamProgress) ->
?shared_subs_agent:on_stream_progress(Agent, StreamProgress).

View File

@ -9,13 +9,11 @@
-export([
new/1,
open/2,
can_subscribe/3,
on_subscribe/3,
on_unsubscribe/3,
on_unsubscribe/2,
on_stream_progress/2,
on_info/2,
on_disconnect/2,
renew_streams/1
]).
@ -32,16 +30,10 @@ new(_Opts) ->
open(_Topics, _Opts) ->
undefined.
can_subscribe(_Agent, _TopicFilter, _SubOpts) ->
on_subscribe(_Agent, _TopicFilter, _SubOpts) ->
{error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED}.
on_subscribe(Agent, _TopicFilter, _SubOpts) ->
Agent.
on_unsubscribe(Agent, _TopicFilter, _Progresses) ->
Agent.
on_disconnect(Agent, _) ->
on_unsubscribe(Agent, _TopicFilter) ->
Agent.
renew_streams(Agent) ->

View File

@ -39,7 +39,7 @@
-export([get_peername/1, set_peername/2]).
-export([get_protocol/1, set_protocol/2]).
-export([new_id/1]).
-export([get_stream/2, put_stream/3, del_stream/2, fold_streams/3, iter_streams/2, n_streams/1]).
-export([get_stream/2, put_stream/3, del_stream/2, fold_streams/3, n_streams/1]).
-export([get_seqno/2, put_seqno/3]).
-export([get_rank/2, put_rank/3, del_rank/2, fold_ranks/3]).
-export([
@ -66,14 +66,11 @@
n_awaiting_rel/1
]).
-export([iter_next/1]).
-export([make_session_iterator/0, session_iterator_next/2]).
-export_type([
t/0,
metadata/0,
iter/2,
seqno_type/0,
stream_key/0,
rank_key/0,
@ -92,8 +89,6 @@
-type message() :: emqx_types:message().
-opaque iter(K, V) :: gb_trees:iter(K, V).
-opaque session_iterator() :: emqx_persistent_session_ds:id() | '$end_of_table'.
%% Generic key-value wrapper that is used for exporting arbitrary
@ -118,7 +113,7 @@
-type pmap(K, V) ::
#pmap{
table :: atom(),
cache :: #{K => V} | gb_trees:tree(K, V),
cache :: #{K => V},
dirty :: #{K => dirty | del}
}.
@ -399,9 +394,7 @@ new_id(Rec) ->
get_subscription(TopicFilter, Rec) ->
gen_get(?subscriptions, TopicFilter, Rec).
-spec cold_get_subscription(
emqx_persistent_session_ds:id(), emqx_types:topic() | emqx_types:share()
) ->
-spec cold_get_subscription(emqx_persistent_session_ds:id(), emqx_types:topic()) ->
[emqx_persistent_session_ds_subs:subscription()].
cold_get_subscription(SessionId, Topic) ->
kv_pmap_read(?subscription_tab, SessionId, Topic).
@ -483,14 +476,6 @@ del_stream(Key, Rec) ->
fold_streams(Fun, Acc, Rec) ->
gen_fold(?streams, Fun, Acc, Rec).
-spec iter_streams(_StartAfter :: stream_key() | beginning, t()) ->
iter(stream_key(), emqx_persistent_session_ds:stream_state()).
iter_streams(After, Rec) ->
%% NOTE
%% No special handling for `beginning', as it always compares less
%% than any `stream_key()'.
gen_iter_after(?streams, After, Rec).
-spec n_streams(t()) -> non_neg_integer().
n_streams(Rec) ->
gen_size(?streams, Rec).
@ -549,12 +534,6 @@ n_awaiting_rel(Rec) ->
%%
-spec iter_next(iter(K, V)) -> {K, V, iter(K, V)} | none.
iter_next(It0) ->
gen_iter_next(It0).
%%
-spec make_session_iterator() -> session_iterator().
make_session_iterator() ->
mnesia:dirty_first(?session_tab).
@ -622,14 +601,6 @@ gen_size(Field, Rec) ->
check_sequence(Rec),
pmap_size(maps:get(Field, Rec)).
gen_iter_after(Field, After, Rec) ->
check_sequence(Rec),
pmap_iter_after(After, maps:get(Field, Rec)).
gen_iter_next(It) ->
%% NOTE: Currently, gbt iterators is the only type of iterators.
gbt_iter_next(It).
-spec update_pmaps(fun((pmap(_K, _V) | undefined, atom()) -> term()), map()) -> map().
update_pmaps(Fun, Map) ->
lists:foldl(
@ -648,7 +619,7 @@ update_pmaps(Fun, Map) ->
%% This functtion should be ran in a transaction.
-spec pmap_open(atom(), emqx_persistent_session_ds:id()) -> pmap(_K, _V).
pmap_open(Table, SessionId) ->
Clean = cache_from_list(Table, kv_pmap_restore(Table, SessionId)),
Clean = maps:from_list(kv_pmap_restore(Table, SessionId)),
#pmap{
table = Table,
cache = Clean,
@ -656,29 +627,29 @@ pmap_open(Table, SessionId) ->
}.
-spec pmap_get(K, pmap(K, V)) -> V | undefined.
pmap_get(K, #pmap{table = Table, cache = Cache}) ->
cache_get(Table, K, Cache).
pmap_get(K, #pmap{cache = Cache}) ->
maps:get(K, Cache, undefined).
-spec pmap_put(K, V, pmap(K, V)) -> pmap(K, V).
pmap_put(K, V, Pmap = #pmap{table = Table, dirty = Dirty, cache = Cache}) ->
pmap_put(K, V, Pmap = #pmap{dirty = Dirty, cache = Cache}) ->
Pmap#pmap{
cache = cache_put(Table, K, V, Cache),
cache = maps:put(K, V, Cache),
dirty = Dirty#{K => dirty}
}.
-spec pmap_del(K, pmap(K, V)) -> pmap(K, V).
pmap_del(
Key,
Pmap = #pmap{table = Table, dirty = Dirty, cache = Cache}
Pmap = #pmap{dirty = Dirty, cache = Cache}
) ->
Pmap#pmap{
cache = cache_remove(Table, Key, Cache),
cache = maps:remove(Key, Cache),
dirty = Dirty#{Key => del}
}.
-spec pmap_fold(fun((K, V, A) -> A), A, pmap(K, V)) -> A.
pmap_fold(Fun, Acc, #pmap{table = Table, cache = Cache}) ->
cache_fold(Table, Fun, Acc, Cache).
pmap_fold(Fun, Acc, #pmap{cache = Cache}) ->
maps:fold(Fun, Acc, Cache).
-spec pmap_commit(emqx_persistent_session_ds:id(), pmap(K, V)) -> pmap(K, V).
pmap_commit(
@ -689,7 +660,7 @@ pmap_commit(
(K, del) ->
kv_pmap_delete(Tab, SessionId, K);
(K, dirty) ->
V = cache_get(Tab, K, Cache),
V = maps:get(K, Cache),
kv_pmap_persist(Tab, SessionId, K, V)
end,
Dirty
@ -699,110 +670,13 @@ pmap_commit(
}.
-spec pmap_format(pmap(_K, _V)) -> map().
pmap_format(#pmap{table = Table, cache = Cache}) ->
cache_format(Table, Cache).
-spec pmap_size(pmap(_K, _V)) -> non_neg_integer().
pmap_size(#pmap{table = Table, cache = Cache}) ->
cache_size(Table, Cache).
pmap_iter_after(After, #pmap{table = Table, cache = Cache}) ->
%% NOTE: Only valid for gbt-backed PMAPs.
gbt = cache_data_type(Table),
gbt_iter_after(After, Cache).
%%
cache_data_type(?stream_tab) -> gbt;
cache_data_type(_Table) -> map.
cache_from_list(?stream_tab, L) ->
gbt_from_list(L);
cache_from_list(_Table, L) ->
maps:from_list(L).
cache_get(?stream_tab, K, Cache) ->
gbt_get(K, Cache, undefined);
cache_get(_Table, K, Cache) ->
maps:get(K, Cache, undefined).
cache_put(?stream_tab, K, V, Cache) ->
gbt_put(K, V, Cache);
cache_put(_Table, K, V, Cache) ->
maps:put(K, V, Cache).
cache_remove(?stream_tab, K, Cache) ->
gbt_remove(K, Cache);
cache_remove(_Table, K, Cache) ->
maps:remove(K, Cache).
cache_fold(?stream_tab, Fun, Acc, Cache) ->
gbt_fold(Fun, Acc, Cache);
cache_fold(_Table, Fun, Acc, Cache) ->
maps:fold(Fun, Acc, Cache).
cache_format(?stream_tab, Cache) ->
gbt_format(Cache);
cache_format(_Table, Cache) ->
pmap_format(#pmap{cache = Cache}) ->
Cache.
cache_size(?stream_tab, Cache) ->
gbt_size(Cache);
cache_size(_Table, Cache) ->
-spec pmap_size(pmap(_K, _V)) -> non_neg_integer().
pmap_size(#pmap{cache = Cache}) ->
maps:size(Cache).
%% PMAP Cache implementation backed by `gb_trees'.
%% Supports iteration starting from specific key.
gbt_from_list(L) ->
lists:foldl(
fun({K, V}, Acc) -> gb_trees:insert(K, V, Acc) end,
gb_trees:empty(),
L
).
gbt_get(K, Cache, undefined) ->
case gb_trees:lookup(K, Cache) of
none -> undefined;
{_, V} -> V
end.
gbt_put(K, V, Cache) ->
gb_trees:enter(K, V, Cache).
gbt_remove(K, Cache) ->
gb_trees:delete_any(K, Cache).
gbt_format(Cache) ->
gb_trees:to_list(Cache).
gbt_fold(Fun, Acc, Cache) ->
It = gb_trees:iterator(Cache),
gbt_fold_iter(Fun, Acc, It).
gbt_fold_iter(Fun, Acc, It0) ->
case gb_trees:next(It0) of
{K, V, It} ->
gbt_fold_iter(Fun, Fun(K, V, Acc), It);
_ ->
Acc
end.
gbt_size(Cache) ->
gb_trees:size(Cache).
gbt_iter_after(After, Cache) ->
It0 = gb_trees:iterator_from(After, Cache),
case gb_trees:next(It0) of
{After, _, It} ->
It;
_ ->
It0
end.
gbt_iter_next(It) ->
gb_trees:next(It).
%% Functions dealing with set tables:
kv_persist(Tab, SessionId, Val0) ->

View File

@ -16,8 +16,7 @@
-module(emqx_persistent_session_ds_stream_scheduler).
%% API:
-export([iter_next_streams/2, next_stream/1]).
-export([find_replay_streams/1, is_fully_acked/2]).
-export([find_new_streams/1, find_replay_streams/1, is_fully_acked/2]).
-export([renew_streams/1, on_unsubscribe/2]).
%% behavior callbacks:
@ -36,29 +35,6 @@
%% Type declarations
%%================================================================================
-type stream_key() :: emqx_persistent_session_ds_state:stream_key().
-type stream_state() :: emqx_persistent_session_ds:stream_state().
%% Restartable iterator with a filter and an iteration limit.
-record(iter, {
limit :: non_neg_integer(),
filter,
it,
it_cont
}).
-type iter(K, V, IterInner) :: #iter{
filter :: fun((K, V) -> boolean()),
it :: IterInner,
it_cont :: IterInner
}.
-type iter_stream() :: iter(
stream_key(),
stream_state(),
emqx_persistent_session_ds_state:iter(stream_key(), stream_state())
).
%%================================================================================
%% API functions
%%================================================================================
@ -94,9 +70,9 @@ find_replay_streams(S) ->
%%
%% This function is non-detereministic: it randomizes the order of
%% streams to ensure fair replay of different topics.
-spec iter_next_streams(_LastVisited :: stream_key(), emqx_persistent_session_ds_state:t()) ->
iter_stream().
iter_next_streams(LastVisited, S) ->
-spec find_new_streams(emqx_persistent_session_ds_state:t()) ->
[{emqx_persistent_session_ds_state:stream_key(), emqx_persistent_session_ds:stream_state()}].
find_new_streams(S) ->
%% FIXME: this function is currently very sensitive to the
%% consistency of the packet IDs on both broker and client side.
%%
@ -111,44 +87,23 @@ iter_next_streams(LastVisited, S) ->
%% after timeout?)
Comm1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
Comm2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
Filter = fun(_Key, Stream) -> is_fetchable(Comm1, Comm2, Stream) end,
#iter{
%% Limit the iteration to one round over all streams:
limit = emqx_persistent_session_ds_state:n_streams(S),
%% Filter out the streams not eligible for fetching:
filter = Filter,
%% Start the iteration right after the last visited stream:
it = emqx_persistent_session_ds_state:iter_streams(LastVisited, S),
%% Restart the iteration from the beginning:
it_cont = emqx_persistent_session_ds_state:iter_streams(beginning, S)
}.
-spec next_stream(iter_stream()) -> {stream_key(), stream_state(), iter_stream()} | none.
next_stream(#iter{limit = 0}) ->
none;
next_stream(ItStream0 = #iter{limit = N, filter = Filter, it = It0, it_cont = ItCont}) ->
case emqx_persistent_session_ds_state:iter_next(It0) of
{Key, Stream, It} ->
ItStream = ItStream0#iter{it = It, limit = N - 1},
case Filter(Key, Stream) of
true ->
{Key, Stream, ItStream};
false ->
next_stream(ItStream)
end;
none when It0 =/= ItCont ->
%% Restart the iteration from the beginning:
ItStream = ItStream0#iter{it = ItCont},
next_stream(ItStream);
none ->
%% No point in restarting the iteration, `ItCont` is empty:
none
end.
is_fetchable(_Comm1, _Comm2, #srs{it_end = end_of_stream}) ->
false;
is_fetchable(Comm1, Comm2, #srs{unsubscribed = Unsubscribed} = Stream) ->
is_fully_acked(Comm1, Comm2, Stream) andalso not Unsubscribed.
shuffle(
emqx_persistent_session_ds_state:fold_streams(
fun
(_Key, #srs{it_end = end_of_stream}, Acc) ->
Acc;
(Key, Stream, Acc) ->
case is_fully_acked(Comm1, Comm2, Stream) andalso not Stream#srs.unsubscribed of
true ->
[{Key, Stream} | Acc];
false ->
Acc
end
end,
[],
S
)
).
%% @doc This function makes the session aware of the new streams.
%%
@ -455,6 +410,19 @@ is_fully_acked(_, _, #srs{
is_fully_acked(Comm1, Comm2, #srs{last_seqno_qos1 = S1, last_seqno_qos2 = S2}) ->
(Comm1 >= S1) andalso (Comm2 >= S2).
-spec shuffle([A]) -> [A].
shuffle(L0) ->
L1 = lists:map(
fun(A) ->
%% maybe topic/stream prioritization could be introduced here?
{rand:uniform(), A}
end,
L0
),
L2 = lists:sort(L1),
{_, L} = lists:unzip(L2),
L.
fold_proper_subscriptions(Fun, Acc, S) ->
emqx_persistent_session_ds_state:fold_subscriptions(
fun

View File

@ -21,7 +21,7 @@
-record(ps_route, {
topic :: binary(),
dest :: emqx_persistent_session_ds_router:dest() | '_'
dest :: emqx_persistent_session_ds:id() | '_'
}).
-record(ps_routeidx, {

View File

@ -21,7 +21,6 @@
%% Till full implementation we need to dispach to the null agent.
%% It will report "not implemented" error for attempts to use shared subscriptions.
-define(shared_subs_agent, emqx_persistent_session_ds_shared_subs_null_agent).
% -define(shared_subs_agent, emqx_ds_shared_sub_agent).
%% end of -ifdef(TEST).
-endif.

View File

@ -1,40 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2017-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_post_upgrade).
%% Example of a hot upgrade callback function.
%% PR#12765
% -export([
% pr12765_update_stats_timer/1,
% pr20000_ensure_sup_started/3
% ]).
%% Please ensure that every callback function is reentrant.
%% This way, users can attempt upgrade multiple times if an issue arises.
%%
% pr12765_update_stats_timer(_FromVsn) ->
% emqx_stats:update_interval(broker_stats, fun emqx_broker_helper:stats_fun/0).
%
% pr20000_ensure_sup_started(_FromVsn, "5.6.1" ++ _, ChildSpec) ->
% ChildId = maps:get(id, ChildSpec),
% case supervisor:terminate_child(emqx_sup, ChildId) of
% ok -> supervisor:delete_child(emqx_sup, ChildId);
% Error -> Error
% end,
% supervisor:start_child(emqx_sup, ChildSpec);
% pr20000_ensure_sup_started(_FromVsn, _TargetVsn, _) ->
% ok.

View File

@ -62,7 +62,7 @@
streams := [{pid(), quicer:stream_handle()}],
%% New stream opts
stream_opts := map(),
%% If connection is resumed from session ticket
%% If conneciton is resumed from session ticket
is_resumed => boolean(),
%% mqtt message serializer config
serialize => undefined,
@ -70,8 +70,8 @@
}.
-type cb_ret() :: quicer_lib:cb_ret().
%% @doc Data streams initializations are started in parallel with control streams, data streams are blocked
%% for the activation from control stream after it is accepted as a legit connection.
%% @doc Data streams initializions are started in parallel with control streams, data streams are blocked
%% for the activation from control stream after it is accepted as a legit conneciton.
%% For security, the initial number of allowed data streams from client should be limited by
%% 'peer_bidi_stream_count` & 'peer_unidi_stream_count`
-spec activate_data_streams(pid(), {
@ -80,7 +80,7 @@
activate_data_streams(ConnOwner, {PS, Serialize, Channel}) ->
gen_server:call(ConnOwner, {activate_data_streams, {PS, Serialize, Channel}}, infinity).
%% @doc connection owner init callback
%% @doc conneciton owner init callback
-spec init(map()) -> {ok, cb_state()}.
init(#{stream_opts := SOpts} = S) when is_list(SOpts) ->
init(S#{stream_opts := maps:from_list(SOpts)});

View File

@ -39,8 +39,7 @@
getopts/2,
peername/1,
sockname/1,
peercert/1,
peersni/1
peercert/1
]).
-include_lib("quicer/include/quicer.hrl").
-include_lib("emqx/include/emqx_quic.hrl").
@ -107,10 +106,6 @@ peercert(_S) ->
%% @todo but unsupported by msquic
nossl.
peersni(_S) ->
%% @todo
undefined.
getstat({quic, Conn, _Stream, _Info}, Stats) ->
case quicer:getstat(Conn, Stats) of
{error, _} -> {error, closed};

View File

@ -0,0 +1,42 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2017-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_relup).
%% NOTE: DO NOT remove this `-include`.
%% We use this to force this module to be upgraded every release.
-include("emqx_release.hrl").
-export([
post_release_upgrade/2,
post_release_downgrade/2
]).
-define(INFO(FORMAT), io:format("[emqx_relup] " ++ FORMAT ++ "~n")).
-define(INFO(FORMAT, ARGS), io:format("[emqx_relup] " ++ FORMAT ++ "~n", ARGS)).
%% What to do after upgraded from an old release vsn.
post_release_upgrade(FromRelVsn, _) ->
?INFO("emqx has been upgraded from ~s to ~s!", [FromRelVsn, emqx_release:version()]),
reload_components().
%% What to do after downgraded to an old release vsn.
post_release_downgrade(ToRelVsn, _) ->
?INFO("emqx has been downgraded from ~s to ~s!", [emqx_release:version(), ToRelVsn]),
reload_components().
reload_components() ->
ok.

View File

@ -137,7 +137,7 @@ maybe_badrpc(Delivery) ->
Delivery.
max_client_num() ->
emqx:get_config([rpc, client_num], ?DefaultClientNum).
emqx:get_config([rpc, tcp_client_num], ?DefaultClientNum).
-spec unwrap_erpc(emqx_rpc:erpc(A) | [emqx_rpc:erpc(A)]) -> A | {error, _Err} | list().
unwrap_erpc(Res) when is_list(Res) ->

View File

@ -63,7 +63,6 @@
-type json_binary() :: binary().
-type template() :: binary().
-type template_str() :: string().
-type binary_kv() :: #{binary() => binary()}.
-typerefl_from_string({duration/0, emqx_schema, to_duration}).
-typerefl_from_string({duration_s/0, emqx_schema, to_duration_s}).
@ -168,8 +167,7 @@
json_binary/0,
port_number/0,
template/0,
template_str/0,
binary_kv/0
template_str/0
]).
-export([namespace/0, roots/0, roots/1, fields/1, desc/1, tags/0]).
@ -321,11 +319,6 @@ roots(low) ->
sc(
ref("crl_cache"),
#{importance => ?IMPORTANCE_HIDDEN}
)},
{banned,
sc(
ref("banned"),
#{importance => ?IMPORTANCE_HIDDEN}
)}
].
@ -351,7 +344,6 @@ fields("authz_cache") ->
#{
default => true,
required => true,
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(fields_cache_enable)
}
)},
@ -388,7 +380,6 @@ fields("flapping_detect") ->
boolean(),
#{
default => false,
%% importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(flapping_detect_enable)
}
)},
@ -425,7 +416,6 @@ fields("force_shutdown") ->
boolean(),
#{
default => true,
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(force_shutdown_enable)
}
)},
@ -455,7 +445,6 @@ fields("overload_protection") ->
boolean(),
#{
desc => ?DESC(overload_protection_enable),
%% importance => ?IMPORTANCE_NO_DOC,
default => false
}
)},
@ -516,11 +505,7 @@ fields("force_gc") ->
{"enable",
sc(
boolean(),
#{
default => true,
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(force_gc_enable)
}
#{default => true, desc => ?DESC(force_gc_enable)}
)},
{"count",
sc(
@ -1673,7 +1658,6 @@ fields("durable_sessions") ->
sc(
boolean(), #{
desc => ?DESC(durable_sessions_enable),
%% importance => ?IMPORTANCE_NO_DOC,
default => false
}
)},
@ -1778,17 +1762,6 @@ fields("client_attrs_init") ->
desc => ?DESC("client_attrs_init_set_as_attr"),
validator => fun restricted_string/1
})}
];
fields("banned") ->
[
{bootstrap_file,
sc(
binary(),
#{
desc => ?DESC("banned_bootstrap_file"),
require => false
}
)}
].
compile_variform(undefined, _Opts) ->
@ -1897,7 +1870,6 @@ base_listener(Bind) ->
#{
default => true,
aliases => [enabled],
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(fields_listener_enabled)
}
)},
@ -2129,8 +2101,6 @@ desc(durable_storage) ->
?DESC(durable_storage);
desc("client_attrs_init") ->
?DESC(client_attrs_init);
desc("banned") ->
"Banned .";
desc(_) ->
undefined.
@ -2426,7 +2396,6 @@ client_ssl_opts_schema(Defaults) ->
boolean(),
#{
default => false,
%% importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(client_ssl_opts_schema_enable)
}
)},
@ -3518,7 +3487,6 @@ mqtt_general() ->
)},
{"max_clientid_len",
sc(
%% MQTT-v3.1.1-[MQTT-3.1.3-5], MQTT-v5.0-[MQTT-3.1.3-5]
range(23, 65535),
#{
default => 65535,
@ -3640,17 +3608,9 @@ mqtt_general() ->
desc => ?DESC(mqtt_keepalive_multiplier)
}
)},
{"keepalive_check_interval",
sc(
timeout_duration(),
#{
default => <<"30s">>,
desc => ?DESC(mqtt_keepalive_check_interval)
}
)},
{"retry_interval",
sc(
hoconsc:union([infinity, timeout_duration()]),
hoconsc:union([infinity, duration()]),
#{
default => infinity,
desc => ?DESC(mqtt_retry_interval)

View File

@ -589,14 +589,6 @@ ensure_valid_options(Options, Versions) ->
ensure_valid_options([], _, Acc) ->
lists:reverse(Acc);
ensure_valid_options([{K, undefined} | T], Versions, Acc) when
K =:= crl_check;
K =:= crl_cache
->
%% Note: we must set crl options to `undefined' to unset them. Otherwise,
%% `esockd' will retain such options when `esockd:merge_opts/2' is called and the SSL
%% options were previously enabled.
ensure_valid_options(T, Versions, [{K, undefined} | Acc]);
ensure_valid_options([{_, undefined} | T], Versions, Acc) ->
ensure_valid_options(T, Versions, Acc);
ensure_valid_options([{_, ""} | T], Versions, Acc) ->

View File

@ -17,6 +17,7 @@
-include("emqx_mqtt.hrl").
-export([format/2]).
-export([format_meta_map/1]).
%% logger_formatter:config/0 is not exported.
-type config() :: map().
@ -42,6 +43,10 @@ format(
format(Event, Config) ->
emqx_logger_textfmt:format(Event, Config).
format_meta_map(Meta) ->
Encode = emqx_trace_handler:payload_encode(),
format_meta_map(Meta, Encode).
format_meta_map(Meta, Encode) ->
format_meta_map(Meta, Encode, [
{packet, fun format_packet/2},

View File

@ -280,7 +280,7 @@ websocket_init([Req, Opts]) ->
#{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener} = ListenerCfg} = Opts,
case check_max_connection(Type, Listener) of
allow ->
{Peername, PeerCert, PeerSNI} = get_peer_info(Type, Listener, Req, Opts),
{Peername, PeerCert} = get_peer_info(Type, Listener, Req, Opts),
Sockname = cowboy_req:sock(Req),
WsCookie = get_ws_cookie(Req),
ConnInfo = #{
@ -288,7 +288,6 @@ websocket_init([Req, Opts]) ->
peername => Peername,
sockname => Sockname,
peercert => PeerCert,
peersni => PeerSNI,
ws_cookie => WsCookie,
conn_mod => ?MODULE
},
@ -377,12 +376,11 @@ get_ws_cookie(Req) ->
end.
get_peer_info(Type, Listener, Req, Opts) ->
Host = maps:get(host, Req, undefined),
case
emqx_config:get_listener_conf(Type, Listener, [proxy_protocol]) andalso
maps:get(proxy_header, Req)
of
#{src_address := SrcAddr, src_port := SrcPort, ssl := SSL} = ProxyInfo ->
#{src_address := SrcAddr, src_port := SrcPort, ssl := SSL} ->
SourceName = {SrcAddr, SrcPort},
%% Notice: CN is only available in Proxy Protocol V2 additional info.
%% `CN` is unsupported in Proxy Protocol V1
@ -394,14 +392,12 @@ get_peer_info(Type, Listener, Req, Opts) ->
undefined -> undefined;
CN -> [{pp2_ssl_cn, CN}]
end,
PeerSNI = maps:get(authority, ProxyInfo, Host),
{SourceName, SourceSSL, PeerSNI};
#{src_address := SrcAddr, src_port := SrcPort} = ProxyInfo ->
PeerSNI = maps:get(authority, ProxyInfo, Host),
{SourceName, SourceSSL};
#{src_address := SrcAddr, src_port := SrcPort} ->
SourceName = {SrcAddr, SrcPort},
{SourceName, nossl, PeerSNI};
{SourceName, nossl};
_ ->
{get_peer(Req, Opts), cowboy_req:cert(Req), Host}
{get_peer(Req, Opts), cowboy_req:cert(Req)}
end.
websocket_handle({binary, Data}, State) when is_list(Data) ->
@ -436,7 +432,6 @@ websocket_handle({Frame, _}, State) ->
%% TODO: should not close the ws connection
?LOG(error, #{msg => "unexpected_frame", frame => Frame}),
shutdown(unexpected_ws_frame, State).
websocket_info({call, From, Req}, State) ->
handle_call(From, Req, State);
websocket_info({cast, rate_limit}, State) ->
@ -456,8 +451,8 @@ websocket_info({incoming, Packet}, State) ->
handle_incoming(Packet, State);
websocket_info({outgoing, Packets}, State) ->
return(enqueue(Packets, State));
websocket_info({check_gc, Cnt, Oct}, State) ->
return(check_oom(run_gc(Cnt, Oct, State)));
websocket_info({check_gc, Stats}, State) ->
return(check_oom(run_gc(Stats, State)));
websocket_info(
Deliver = {deliver, _Topic, _Msg},
State = #state{listener = {Type, Listener}}
@ -560,7 +555,8 @@ handle_info(Info, State) ->
handle_timeout(TRef, idle_timeout, State = #state{idle_timer = TRef}) ->
shutdown(idle_timeout, State);
handle_timeout(TRef, keepalive, State) when is_reference(TRef) ->
with_channel(handle_timeout, [TRef, keepalive], State);
RecvOct = emqx_pd:get_counter(recv_oct),
handle_timeout(TRef, {keepalive, RecvOct}, State);
handle_timeout(
TRef,
emit_stats,
@ -695,8 +691,8 @@ when_msg_in(Packets, Msgs, State) ->
%% Run GC, Check OOM
%%--------------------------------------------------------------------
run_gc(Cnt, Oct, State = #state{gc_state = GcSt}) ->
case ?ENABLED(GcSt) andalso emqx_gc:run(Cnt, Oct, GcSt) of
run_gc(Stats, State = #state{gc_state = GcSt}) ->
case ?ENABLED(GcSt) andalso emqx_gc:run(Stats, GcSt) of
false -> State;
{_IsGC, GcSt1} -> State#state{gc_state = GcSt1}
end.
@ -738,8 +734,7 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
input_bytes => Data
}),
FrameError = {frame_error, Reason},
NState = enrich_state(Reason, State),
{[{incoming, FrameError} | Packets], NState};
{[{incoming, FrameError} | Packets], State};
error:Reason:Stacktrace ->
?LOG(error, #{
at_state => emqx_frame:describe_state(ParseState),
@ -810,9 +805,11 @@ handle_outgoing(
get_active_n(Type, Listener)
of
true ->
Cnt = emqx_pd:reset_counter(outgoing_pubs),
Oct = emqx_pd:reset_counter(outgoing_bytes),
postpone({check_gc, Cnt, Oct}, State);
Stats = #{
cnt => emqx_pd:reset_counter(outgoing_pubs),
oct => emqx_pd:reset_counter(outgoing_bytes)
},
postpone({check_gc, Stats}, State);
false ->
State
end,
@ -832,7 +829,7 @@ serialize_and_inc_stats_fun(#state{serialize = Serialize}) ->
?LOG(warning, #{
msg => "packet_discarded",
reason => "frame_too_large",
packet => Packet
packet => emqx_packet:format(Packet)
}),
ok = emqx_metrics:inc('delivery.dropped.too_large'),
ok = emqx_metrics:inc('delivery.dropped'),
@ -1071,13 +1068,6 @@ check_max_connection(Type, Listener) ->
{denny, Reason}
end
end.
enrich_state(#{parse_state := NParseState}, State) ->
Serialize = emqx_frame:serialize_opts(NParseState),
State#state{parse_state = NParseState, serialize = Serialize};
enrich_state(_, State) ->
State.
%%--------------------------------------------------------------------
%% For CT tests
%%--------------------------------------------------------------------

View File

@ -1,4 +0,0 @@
as,who,reason,at,until,by
clientid,c1,right,2021-10-25T21:53:47+08:00,2025-10-25T21:53:47+08:00,boot
username,u1,reason 1,abc,2025-10-25T21:53:47+08:00,boot
usernamx,u2,reason 2,2021-10-25T21:53:47+08:00,2025-10-25T21:53:47+08:00,boot
1 as who reason at until by
2 clientid c1 right 2021-10-25T21:53:47+08:00 2025-10-25T21:53:47+08:00 boot
3 username u1 reason 1 abc 2025-10-25T21:53:47+08:00 boot
4 usernamx u2 reason 2 2021-10-25T21:53:47+08:00 2025-10-25T21:53:47+08:00 boot

View File

@ -1,3 +0,0 @@
as,who,reason,at,until,by
clientid,c1,reason 1,2021-10-25T21:53:47+08:00,2025-10-25T21:53:47+08:00,boot
username,u1,reason 2,2021-10-25T21:53:47+08:00,2025-10-25T21:53:47+08:00,boot
1 as who reason at until by
2 clientid c1 reason 1 2021-10-25T21:53:47+08:00 2025-10-25T21:53:47+08:00 boot
3 username u1 reason 2 2021-10-25T21:53:47+08:00 2025-10-25T21:53:47+08:00 boot

View File

@ -1,3 +0,0 @@
as,who,reason,at,until,by
clientid,c2,reason 1,2021-10-25T21:53:47+08:00,2025-10-25T21:53:47+08:00,boot
username,u2,reason 2,2021-10-25T21:53:47+08:00,2025-10-25T21:53:47+08:00,boot
1 as who reason at until by
2 clientid c2 reason 1 2021-10-25T21:53:47+08:00 2025-10-25T21:53:47+08:00 boot
3 username u2 reason 2 2021-10-25T21:53:47+08:00 2025-10-25T21:53:47+08:00 boot

View File

@ -1,3 +0,0 @@
as,who,reason,at,until,by
clientid,c1,,2021-10-25T21:53:47+08:00,2025-10-25T21:53:47+08:00,
username,u1,,2021-10-25T21:53:47+08:00,2025-10-25T21:53:47+08:00,
1 as who reason at until by
2 clientid c1 2021-10-25T21:53:47+08:00 2025-10-25T21:53:47+08:00
3 username u1 2021-10-25T21:53:47+08:00 2025-10-25T21:53:47+08:00

View File

@ -1,3 +0,0 @@
as,who
clientid,c1
username,u1
1 as who
2 clientid c1
3 username u1

View File

@ -37,7 +37,7 @@ end_per_suite(Config) ->
%%--------------------------------------------------------------------
t_cache_exclude(_) ->
ClientId = atom_to_binary(?FUNCTION_NAME),
ClientId = <<"test-id1">>,
{ok, Client} = emqtt:start_link([{clientid, ClientId}]),
{ok, _} = emqtt:connect(Client),
{ok, _, _} = emqtt:subscribe(Client, <<"nocache/+/#">>, 0),
@ -47,12 +47,11 @@ t_cache_exclude(_) ->
emqtt:stop(Client).
t_clean_authz_cache(_) ->
ClientId = atom_to_binary(?FUNCTION_NAME),
{ok, Client} = emqtt:start_link([{clientid, ClientId}]),
{ok, Client} = emqtt:start_link([{clientid, <<"emqx_c">>}]),
{ok, _} = emqtt:connect(Client),
{ok, _, _} = emqtt:subscribe(Client, <<"t2">>, 0),
emqtt:publish(Client, <<"t1">>, <<"{\"x\":1}">>, 0),
ClientPid = find_client_pid(ClientId),
ClientPid = find_client_pid(<<"emqx_c">>),
Caches = list_cache(ClientPid),
ct:log("authz caches: ~p", [Caches]),
?assert(length(Caches) > 0),
@ -61,12 +60,11 @@ t_clean_authz_cache(_) ->
emqtt:stop(Client).
t_drain_authz_cache(_) ->
ClientId = atom_to_binary(?FUNCTION_NAME),
{ok, Client} = emqtt:start_link([{clientid, ClientId}]),
{ok, Client} = emqtt:start_link([{clientid, <<"emqx_c">>}]),
{ok, _} = emqtt:connect(Client),
{ok, _, _} = emqtt:subscribe(Client, <<"t2">>, 0),
emqtt:publish(Client, <<"t1">>, <<"{\"x\":1}">>, 0),
ClientPid = find_client_pid(ClientId),
ClientPid = find_client_pid(<<"emqx_c">>),
Caches = list_cache(ClientPid),
ct:log("authz caches: ~p", [Caches]),
?assert(length(Caches) > 0),

View File

@ -254,45 +254,6 @@ t_session_taken(_) ->
{ok, #{}, [0]} = emqtt:unsubscribe(C3, Topic),
ok = emqtt:disconnect(C3).
t_full_bootstrap_file(_) ->
emqx_banned:clear(),
?assertEqual(ok, emqx_banned:init_from_csv(mk_bootstrap_file(<<"full.csv">>))),
FullDatas = lists:sort([
{banned, {username, <<"u1">>}, <<"boot">>, <<"reason 2">>, 1635170027, 1761400427},
{banned, {clientid, <<"c1">>}, <<"boot">>, <<"reason 1">>, 1635170027, 1761400427}
]),
?assertMatch(FullDatas, lists:sort(get_banned_list())),
?assertEqual(ok, emqx_banned:init_from_csv(mk_bootstrap_file(<<"full2.csv">>))),
?assertMatch(FullDatas, lists:sort(get_banned_list())),
ok.
t_optional_bootstrap_file(_) ->
emqx_banned:clear(),
?assertEqual(ok, emqx_banned:init_from_csv(mk_bootstrap_file(<<"optional.csv">>))),
Keys = lists:sort([{username, <<"u1">>}, {clientid, <<"c1">>}]),
?assertMatch(Keys, lists:sort([element(2, Data) || Data <- get_banned_list()])),
ok.
t_omitted_bootstrap_file(_) ->
emqx_banned:clear(),
?assertEqual(ok, emqx_banned:init_from_csv(mk_bootstrap_file(<<"omitted.csv">>))),
Keys = lists:sort([{username, <<"u1">>}, {clientid, <<"c1">>}]),
?assertMatch(Keys, lists:sort([element(2, Data) || Data <- get_banned_list()])),
ok.
t_error_bootstrap_file(_) ->
emqx_banned:clear(),
?assertEqual(
{error, enoent}, emqx_banned:init_from_csv(mk_bootstrap_file(<<"not_exists.csv">>))
),
?assertEqual(
ok, emqx_banned:init_from_csv(mk_bootstrap_file(<<"error.csv">>))
),
Keys = [{clientid, <<"c1">>}],
?assertMatch(Keys, [element(2, Data) || Data <- get_banned_list()]),
ok.
receive_messages(Count) ->
receive_messages(Count, []).
receive_messages(0, Msgs) ->
@ -308,17 +269,3 @@ receive_messages(Count, Msgs) ->
after 1200 ->
Msgs
end.
mk_bootstrap_file(File) ->
Dir = code:lib_dir(emqx, test),
filename:join([Dir, <<"data/banned">>, File]).
get_banned_list() ->
Tabs = emqx_banned:tables(),
lists:foldl(
fun(Tab, Acc) ->
Acc ++ ets:tab2list(Tab)
end,
[],
Tabs
).

View File

@ -21,7 +21,7 @@
-include_lib("common_test/include/ct.hrl").
-include_lib("stdlib/include/assert.hrl").
-include("../src/bpapi/emqx_bpapi.hrl").
-include_lib("emqx/src/bpapi/emqx_bpapi.hrl").
all() -> emqx_common_test_helpers:all(?MODULE).

View File

@ -60,8 +60,7 @@
{emqx_statsd, 1},
{emqx_plugin_libs, 1},
{emqx_persistent_session, 1},
{emqx_ds, 3},
{emqx_node_rebalance_purge, 1}
{emqx_ds, 3}
]).
%% List of known RPC backend modules:
-define(RPC_MODULES, "gen_rpc, erpc, rpc, emqx_rpc").

View File

@ -414,32 +414,24 @@ t_handle_in_auth(_) ->
emqx_channel:handle_in(?AUTH_PACKET(), Channel).
t_handle_in_frame_error(_) ->
IdleChannelV5 = channel(#{conn_state => idle}),
%% no CONNACK packet for v4
?assertMatch(
{shutdown, #{shutdown_count := frame_too_large, cause := frame_too_large}, _Chan},
emqx_channel:handle_in(
{frame_error, #{cause => frame_too_large}}, v4(IdleChannelV5)
)
),
IdleChannel = channel(#{conn_state => idle}),
{shutdown, #{shutdown_count := frame_too_large, cause := frame_too_large}, _Chan} =
emqx_channel:handle_in({frame_error, #{cause => frame_too_large}}, IdleChannel),
ConnectingChan = channel(#{conn_state => connecting}),
ConnackPacket = ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE),
?assertMatch(
{shutdown,
#{
shutdown_count := frame_too_large,
cause := frame_too_large,
limit := 100,
received := 101
},
ConnackPacket, _},
{shutdown,
#{
shutdown_count := frame_too_large,
cause := frame_too_large,
limit := 100,
received := 101
},
ConnackPacket,
_} =
emqx_channel:handle_in(
{frame_error, #{cause => frame_too_large, received => 101, limit => 100}},
ConnectingChan
)
),
),
DisconnectPacket = ?DISCONNECT_PACKET(?RC_PACKET_TOO_LARGE),
ConnectedChan = channel(#{conn_state => connected}),
?assertMatch(

View File

@ -78,7 +78,6 @@
start_epmd/0,
start_peer/2,
stop_peer/1,
ebin_path/0,
listener_port/2
]).
@ -249,7 +248,6 @@ render_and_load_app_config(App, Opts) ->
%% turn throw into error
error({Conf, E, St})
end.
do_render_app_config(App, Schema, ConfigFile, Opts) ->
%% copy acl_conf must run before read_schema_configs
copy_acl_conf(),

View File

@ -428,7 +428,6 @@ zone_global_defaults() ->
ignore_loop_deliver => false,
keepalive_backoff => 0.75,
keepalive_multiplier => 1.5,
keepalive_check_interval => 30000,
max_awaiting_rel => 100,
max_clientid_len => 65535,
max_inflight => 32,

View File

@ -415,7 +415,14 @@ assert_update_result(FailedPath, Update, Expect) ->
assert_update_result(Paths, UpdatePath, Update, Expect) ->
with_update_result(Paths, UpdatePath, Update, fun(Old, Result) ->
?assertEqual(Expect, Result),
case Expect of
{error, {post_config_update, ?MODULE, post_config_update_error}} ->
?assertMatch(
{error, {post_config_update, ?MODULE, {post_config_update_error, _}}}, Result
);
_ ->
?assertEqual(Expect, Result)
end,
New = emqx:get_raw_config(UpdatePath, undefined),
?assertEqual(Old, New)
end).

View File

@ -84,8 +84,7 @@ init_per_testcase(TestCase, Config) when
fun
(peername, [sock]) -> {ok, {{127, 0, 0, 1}, 3456}};
(sockname, [sock]) -> {ok, {{127, 0, 0, 1}, 1883}};
(peercert, [sock]) -> undefined;
(peersni, [sock]) -> undefined
(peercert, [sock]) -> undefined
end
),
ok = meck:expect(emqx_transport, setopts, fun(_Sock, _Opts) -> ok end),
@ -526,7 +525,7 @@ t_oom_shutdown(_) ->
with_conn(
fun(Pid) ->
Pid ! {tcp_passive, foo},
{ok, _} = ?block_until(#{?snk_kind := check_oom_shutdown}, 1000),
{ok, _} = ?block_until(#{?snk_kind := check_oom}, 1000),
{ok, _} = ?block_until(#{?snk_kind := terminate}, 100),
Trace = snabbkaffe:collect_trace(),
?assertEqual(1, length(?of_kind(terminate, Trace))),

View File

@ -138,14 +138,13 @@ init_per_testcase(t_refresh_config = TestCase, Config) ->
];
init_per_testcase(TestCase, Config) when
TestCase =:= t_update_listener;
TestCase =:= t_update_listener_enable_disable;
TestCase =:= t_validations
->
ct:timetrap({seconds, 30}),
ok = snabbkaffe:start_trace(),
%% when running emqx standalone tests, we can't use those
%% features.
case does_module_exist(emqx_mgmt) of
case does_module_exist(emqx_management) of
true ->
DataDir = ?config(data_dir, Config),
CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]),
@ -166,7 +165,7 @@ init_per_testcase(TestCase, Config) when
{emqx_conf, #{config => #{listeners => #{ssl => #{default => ListenerConf}}}}},
emqx,
emqx_management,
emqx_mgmt_api_test_util:emqx_dashboard()
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
],
#{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}
),
@ -207,7 +206,6 @@ read_crl(Filename) ->
end_per_testcase(TestCase, Config) when
TestCase =:= t_update_listener;
TestCase =:= t_update_listener_enable_disable;
TestCase =:= t_validations
->
Skip = proplists:get_bool(skip_does_not_apply, Config),
@ -1059,104 +1057,3 @@ do_t_validations(_Config) ->
),
ok.
%% Checks that if CRL is ever enabled and then disabled, clients can connect, even if they
%% would otherwise not have their corresponding CRLs cached and fail with `{bad_crls,
%% no_relevant_crls}`.
t_update_listener_enable_disable(Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ct:pal("skipping as this test does not apply in this profile"),
ok;
false ->
do_t_update_listener_enable_disable(Config)
end.
do_t_update_listener_enable_disable(Config) ->
DataDir = ?config(data_dir, Config),
Keyfile = filename:join([DataDir, "server.key.pem"]),
Certfile = filename:join([DataDir, "server.cert.pem"]),
Cacertfile = filename:join([DataDir, "ca-chain.cert.pem"]),
ClientCert = filename:join(DataDir, "client.cert.pem"),
ClientKey = filename:join(DataDir, "client.key.pem"),
ListenerId = "ssl:default",
%% Enable CRL
{ok, {{_, 200, _}, _, ListenerData0}} = get_listener_via_api(ListenerId),
CRLConfig0 =
#{
<<"ssl_options">> =>
#{
<<"keyfile">> => Keyfile,
<<"certfile">> => Certfile,
<<"cacertfile">> => Cacertfile,
<<"enable_crl_check">> => true,
<<"fail_if_no_peer_cert">> => true
}
},
ListenerData1 = emqx_utils_maps:deep_merge(ListenerData0, CRLConfig0),
{ok, {_, _, ListenerData2}} = update_listener_via_api(ListenerId, ListenerData1),
?assertMatch(
#{
<<"ssl_options">> :=
#{
<<"enable_crl_check">> := true,
<<"verify">> := <<"verify_peer">>,
<<"fail_if_no_peer_cert">> := true
}
},
ListenerData2
),
%% Disable CRL
CRLConfig1 =
#{
<<"ssl_options">> =>
#{
<<"keyfile">> => Keyfile,
<<"certfile">> => Certfile,
<<"cacertfile">> => Cacertfile,
<<"enable_crl_check">> => false,
<<"fail_if_no_peer_cert">> => true
}
},
ListenerData3 = emqx_utils_maps:deep_merge(ListenerData2, CRLConfig1),
redbug:start(
[
"esockd_server:get_listener_prop -> return",
"esockd_server:set_listener_prop -> return",
"esockd:merge_opts -> return",
"esockd_listener_sup:set_options -> return",
"emqx_listeners:inject_crl_config -> return"
],
[{msgs, 100}]
),
{ok, {_, _, ListenerData4}} = update_listener_via_api(ListenerId, ListenerData3),
?assertMatch(
#{
<<"ssl_options">> :=
#{
<<"enable_crl_check">> := false,
<<"verify">> := <<"verify_peer">>,
<<"fail_if_no_peer_cert">> := true
}
},
ListenerData4
),
%% Now the client that would be blocked tries to connect and should now be allowed.
{ok, C} = emqtt:start_link([
{ssl, true},
{ssl_opts, [
{certfile, ClientCert},
{keyfile, ClientKey},
{verify, verify_none}
]},
{port, 8883}
]),
?assertMatch({ok, _}, emqtt:connect(C)),
emqtt:stop(C),
?assertNotReceive({http_get, _}),
ok.

View File

@ -38,7 +38,7 @@
%% in `end_per_suite/1` or `end_per_group/2`) with the result from step 2.
-module(emqx_cth_cluster).
-export([start/1, start/2, restart/1]).
-export([start/1, start/2, restart/1, restart/2]).
-export([stop/1, stop_node/1]).
-export([start_bare_nodes/1, start_bare_nodes/2]).
@ -163,13 +163,13 @@ wait_clustered([Node | Nodes] = All, Check, Deadline) ->
wait_clustered(All, Check, Deadline)
end.
restart(NodeSpecs = [_ | _]) ->
Nodes = [maps:get(name, Spec) || Spec <- NodeSpecs],
ct:pal("Stopping peer nodes: ~p", [Nodes]),
ok = stop(Nodes),
start([Spec#{boot_type => restart} || Spec <- NodeSpecs]);
restart(NodeSpec = #{}) ->
restart([NodeSpec]).
restart(NodeSpec) ->
restart(maps:get(name, NodeSpec), NodeSpec).
restart(Node, Spec) ->
ct:pal("Stopping peer node ~p", [Node]),
ok = emqx_cth_peer:stop(Node),
start([Spec#{boot_type => restart}]).
mk_nodespecs(Nodes, ClusterOpts) ->
NodeSpecs = lists:zipwith(
@ -391,14 +391,7 @@ node_init(#{name := Node, work_dir := WorkDir}) ->
_ = share_load_module(Node, cthr),
%% Enable snabbkaffe trace forwarding
ok = snabbkaffe:forward_trace(Node),
when_cover_enabled(fun() ->
case cover:start([Node]) of
{ok, _} ->
ok;
{error, {already_started, _}} ->
ok
end
end),
when_cover_enabled(fun() -> {ok, _} = cover:start([Node]) end),
ok.
%% Returns 'true' if this node should appear in running nodes list.
@ -463,7 +456,7 @@ stop(Nodes) ->
stop_node(Name) ->
Node = node_name(Name),
when_cover_enabled(fun() -> ok = cover:flush([Node]) end),
when_cover_enabled(fun() -> cover:flush([Node]) end),
ok = emqx_cth_peer:stop(Node).
%% Ports

View File

@ -1,145 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_cth_listener).
-include_lib("esockd/include/esockd.hrl").
-export([
reload_listener_with_ppv2/1,
reload_listener_with_ppv2/2,
reload_listener_without_ppv2/1
]).
-export([meck_recv_ppv2/1, clear_meck_recv_ppv2/1]).
-define(DEFAULT_OPTS, #{
host => "127.0.0.1",
proto_ver => v5,
connect_timeout => 5,
ssl => false
}).
%%--------------------------------------------------------------------
%% APIs
%%--------------------------------------------------------------------
reload_listener_with_ppv2(Path = [listeners, _Type, _Name]) ->
reload_listener_with_ppv2(Path, <<>>).
reload_listener_with_ppv2(Path = [listeners, Type, Name], DefaultSni) when
Type == tcp; Type == ws
->
Cfg = emqx_config:get(Path),
ok = emqx_config:put(Path, Cfg#{proxy_protocol => true}),
ok = emqx_listeners:restart_listener(
emqx_listeners:listener_id(Type, Name)
),
ok = meck_recv_ppv2(Type),
client_conn_fn(Type, maps:get(bind, Cfg), DefaultSni).
client_conn_fn(tcp, Bind, Sni) ->
client_conn_fn_gen(connect, ?DEFAULT_OPTS#{port => bind2port(Bind), sni => Sni});
client_conn_fn(ws, Bind, Sni) ->
client_conn_fn_gen(ws_connect, ?DEFAULT_OPTS#{port => bind2port(Bind), sni => Sni}).
bind2port({_, Port}) -> Port;
bind2port(Port) when is_integer(Port) -> Port.
client_conn_fn_gen(Connect, Opts0) ->
fun(ClientId, Opts1) ->
Opts2 = maps:merge(Opts0, Opts1#{clientid => ClientId}),
Sni = maps:get(sni, Opts2, undefined),
NOpts = prepare_sni_for_meck(Sni, Opts2),
{ok, C} = emqtt:start_link(NOpts),
case emqtt:Connect(C) of
{ok, _} -> {ok, C};
{error, _} = Err -> Err
end
end.
prepare_sni_for_meck(ClientSni, Opts) when is_binary(ClientSni) ->
ServerSni =
case ClientSni of
disable -> undefined;
_ -> ClientSni
end,
persistent_term:put(current_client_sni, ServerSni),
case maps:get(ssl, Opts, false) of
false ->
Opts;
true ->
SslOpts = maps:get(ssl_opts, Opts, #{}),
Opts#{ssl_opts => [{server_name_indication, ClientSni} | SslOpts]}
end.
reload_listener_without_ppv2(Path = [listeners, Type, Name]) when
Type == tcp; Type == ws
->
Cfg = emqx_config:get(Path),
ok = emqx_config:put(Path, Cfg#{proxy_protocol => false}),
ok = emqx_listeners:restart_listener(
emqx_listeners:listener_id(Type, Name)
),
ok = clear_meck_recv_ppv2(Type).
meck_recv_ppv2(tcp) ->
ok = meck:new(esockd_proxy_protocol, [passthrough, no_history, no_link]),
ok = meck:expect(
esockd_proxy_protocol,
recv,
fun(_Transport, Socket, _Timeout) ->
SNI = persistent_term:get(current_client_sni, undefined),
{ok, {SrcAddr, SrcPort}} = esockd_transport:peername(Socket),
{ok, {DstAddr, DstPort}} = esockd_transport:sockname(Socket),
{ok, #proxy_socket{
inet = inet4,
socket = Socket,
src_addr = SrcAddr,
dst_addr = DstAddr,
src_port = SrcPort,
dst_port = DstPort,
pp2_additional_info = [{pp2_authority, SNI}]
}}
end
);
meck_recv_ppv2(ws) ->
ok = meck:new(ranch_tcp, [passthrough, no_history, no_link]),
ok = meck:expect(
ranch_tcp,
recv_proxy_header,
fun(Socket, _Timeout) ->
SNI = persistent_term:get(current_client_sni, undefined),
{ok, {SrcAddr, SrcPort}} = esockd_transport:peername(Socket),
{ok, {DstAddr, DstPort}} = esockd_transport:sockname(Socket),
{ok, #{
authority => SNI,
command => proxy,
dest_address => DstAddr,
dest_port => DstPort,
src_address => SrcAddr,
src_port => SrcPort,
transport_family => ipv4,
transport_protocol => stream,
version => 2
}}
end
).
clear_meck_recv_ppv2(tcp) ->
ok = meck:unload(esockd_proxy_protocol);
clear_meck_recv_ppv2(ws) ->
ok = meck:unload(ranch_tcp).

View File

@ -22,7 +22,6 @@
-export([start/2, start/3, start/4]).
-export([start_link/2, start_link/3, start_link/4]).
-export([stop/1]).
-export([kill/1]).
start(Name, Args) ->
start(Name, Args, []).
@ -67,32 +66,6 @@ stop(Node) when is_atom(Node) ->
ok
end.
%% @doc Kill a node abruptly, through mechanisms provided by OS.
%% Relies on POSIX `kill`.
kill(Node) ->
try erpc:call(Node, os, getpid, []) of
OSPid ->
Pid = whereis(Node),
_ = is_pid(Pid) andalso unlink(Pid),
Result = kill_os_process(OSPid),
%% Either ensure control process stops, or try to stop if not killed.
_ = is_pid(Pid) andalso catch peer:stop(Pid),
Result
catch
error:{erpc, _} = Reason ->
{error, Reason}
end.
kill_os_process(OSPid) ->
Cmd = "kill -SIGKILL " ++ OSPid,
Port = erlang:open_port({spawn, Cmd}, [binary, exit_status, hide]),
receive
{Port, {exit_status, 0}} ->
ok;
{Port, {exit_status, EC}} ->
{error, EC}
end.
parse_node_name(NodeName) ->
case string:tokens(atom_to_list(NodeName), "@") of
[Name, Host] ->

Some files were not shown because too many files have changed in this diff Show More