chore: merge 'upstream/master' into v5.1.2

This commit is contained in:
Ivan Dyachkov 2023-07-21 13:25:46 +02:00
commit 243b8f5b67
90 changed files with 2502 additions and 407 deletions

View File

@ -9,6 +9,7 @@ DYNAMO_TAG=1.21.0
CASSANDRA_TAG=3.11.6 CASSANDRA_TAG=3.11.6
MINIO_TAG=RELEASE.2023-03-20T20-16-18Z MINIO_TAG=RELEASE.2023-03-20T20-16-18Z
OPENTS_TAG=9aa7f88 OPENTS_TAG=9aa7f88
KINESIS_TAG=2.1
MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server
SQLSERVER_TAG=2019-CU19-ubuntu-20.04 SQLSERVER_TAG=2019-CU19-ubuntu-20.04

View File

@ -18,7 +18,7 @@ services:
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret - /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
kdc: kdc:
hostname: kdc.emqx.net hostname: kdc.emqx.net
image: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu20.04 image: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04
container_name: kdc.emqx.net container_name: kdc.emqx.net
expose: expose:
- 88 # kdc - 88 # kdc

View File

@ -0,0 +1,12 @@
version: '3.9'
services:
kinesis:
container_name: kinesis
image: localstack/localstack:2.1
environment:
- KINESIS_ERROR_PROBABILITY=0.0
- KINESIS_LATENCY=0
restart: always
networks:
- emqx_bridge

View File

@ -49,6 +49,8 @@ services:
- 38080:38080 - 38080:38080
# HStreamDB # HStreamDB
- 15670:5670 - 15670:5670
# Kinesis
- 4566:4566
command: command:
- "-host=0.0.0.0" - "-host=0.0.0.0"
- "-config=/config/toxiproxy.json" - "-config=/config/toxiproxy.json"

View File

@ -3,7 +3,7 @@ version: '3.9'
services: services:
erlang: erlang:
container_name: erlang container_name: erlang
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu20.04} image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04}
env_file: env_file:
- conf.env - conf.env
environment: environment:

View File

@ -161,5 +161,11 @@
"listen": "0.0.0.0:6570", "listen": "0.0.0.0:6570",
"upstream": "hstreamdb:6570", "upstream": "hstreamdb:6570",
"enabled": true "enabled": true
},
{
"name": "kinesis",
"listen": "0.0.0.0:4566",
"upstream": "kinesis:4566",
"enabled": true
} }
] ]

2
.github/CODEOWNERS vendored
View File

@ -18,7 +18,7 @@
/apps/emqx_rule_engine/ @emqx/emqx-review-board @kjellwinblad /apps/emqx_rule_engine/ @emqx/emqx-review-board @kjellwinblad
/apps/emqx_slow_subs/ @emqx/emqx-review-board @lafirest /apps/emqx_slow_subs/ @emqx/emqx-review-board @lafirest
/apps/emqx_statsd/ @emqx/emqx-review-board @JimMoen /apps/emqx_statsd/ @emqx/emqx-review-board @JimMoen
/apps/emqx_durable_storage/ @ieQu1 /apps/emqx_durable_storage/ @emqx/emqx-review-board @ieQu1 @keynslug
## CI ## CI
/deploy/ @emqx/emqx-review-board @Rory-Z /deploy/ @emqx/emqx-review-board @Rory-Z

View File

@ -25,7 +25,7 @@ jobs:
prepare: prepare:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
# prepare source with any OTP version, no need for a matrix # prepare source with any OTP version, no need for a matrix
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
outputs: outputs:
PROFILE: ${{ steps.get_profile.outputs.PROFILE }} PROFILE: ${{ steps.get_profile.outputs.PROFILE }}
@ -120,7 +120,7 @@ jobs:
# NOTE: 'otp' and 'elixir' are to configure emqx-builder image # NOTE: 'otp' and 'elixir' are to configure emqx-builder image
# only support latest otp and elixir, not a matrix # only support latest otp and elixir, not a matrix
builder: builder:
- 5.1-0 # update to latest - 5.1-3 # update to latest
otp: otp:
- 25.3.2-1 - 25.3.2-1
elixir: elixir:

View File

@ -21,7 +21,7 @@ on:
jobs: jobs:
prepare: prepare:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04 container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04
outputs: outputs:
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }} BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }} IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }}
@ -181,24 +181,26 @@ jobs:
- ubuntu22.04 - ubuntu22.04
- ubuntu20.04 - ubuntu20.04
- ubuntu18.04 - ubuntu18.04
- debian12
- debian11 - debian11
- debian10 - debian10
- el9 - el9
- el8 - el8
- el7 - el7
- amzn2 - amzn2
- amzn2023
build_machine: build_machine:
- aws-arm64 - aws-arm64
- ubuntu-22.04 - aws-amd64
builder: builder:
- 5.1-0 - 5.1-3
elixir: elixir:
- 1.14.5 - 1.14.5
with_elixir: with_elixir:
- 'no' - 'no'
exclude: exclude:
- arch: arm64 - arch: arm64
build_machine: ubuntu-22.04 build_machine: aws-amd64
- arch: amd64 - arch: amd64
build_machine: aws-arm64 build_machine: aws-arm64
include: include:
@ -206,16 +208,8 @@ jobs:
otp: 25.3.2-1 otp: 25.3.2-1
arch: amd64 arch: amd64
os: ubuntu22.04 os: ubuntu22.04
build_machine: ubuntu-22.04 build_machine: aws-amd64
builder: 5.1-0 builder: 5.1-3
elixir: 1.14.5
with_elixir: 'yes'
- profile: emqx
otp: 25.3.2-1
arch: amd64
os: amzn2
build_machine: ubuntu-22.04
builder: 5.1-0
elixir: 1.14.5 elixir: 1.14.5
with_elixir: 'yes' with_elixir: 'yes'
@ -225,18 +219,13 @@ jobs:
steps: steps:
- uses: AutoModality/action-clean@v1 - uses: AutoModality/action-clean@v1
if: matrix.build_machine == 'aws-arm64'
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
ref: ${{ github.event.inputs.branch_or_tag }} ref: ${{ github.event.inputs.branch_or_tag }}
fetch-depth: 0 fetch-depth: 0
- name: build emqx packages - name: fix workdir
env:
ELIXIR: ${{ matrix.elixir }}
PROFILE: ${{ matrix.profile }}
ARCH: ${{ matrix.arch }}
run: | run: |
set -eu set -eu
git config --global --add safe.directory "$GITHUB_WORKSPACE" git config --global --add safe.directory "$GITHUB_WORKSPACE"
@ -246,22 +235,33 @@ jobs:
cd /emqx cd /emqx
fi fi
echo "pwd is $PWD" echo "pwd is $PWD"
PKGTYPES="tgz pkg"
IS_ELIXIR=${{ matrix.with_elixir }} - name: build emqx packages
env:
PROFILE: ${{ matrix.profile }}
IS_ELIXIR: ${{ matrix.with_elixir }}
ACLOCAL_PATH: "/usr/share/aclocal:/usr/local/share/aclocal"
run: |
set -eu
if [ "${IS_ELIXIR:-}" == 'yes' ]; then if [ "${IS_ELIXIR:-}" == 'yes' ]; then
PKGTYPES="tgz" make "${PROFILE}-elixir-tgz"
else
make "${PROFILE}-tgz"
make "${PROFILE}-pkg"
fi
- name: test emqx packages
env:
PROFILE: ${{ matrix.profile }}
IS_ELIXIR: ${{ matrix.with_elixir }}
run: |
set -eu
if [ "${IS_ELIXIR:-}" == 'yes' ]; then
./scripts/pkg-tests.sh "${PROFILE}-elixir-tgz"
else
./scripts/pkg-tests.sh "${PROFILE}-tgz"
./scripts/pkg-tests.sh "${PROFILE}-pkg"
fi fi
for PKGTYPE in ${PKGTYPES};
do
./scripts/buildx.sh \
--profile "${PROFILE}" \
--pkgtype "${PKGTYPE}" \
--arch "${ARCH}" \
--elixir "${IS_ELIXIR}" \
--builder "force_host"
done
- uses: actions/upload-artifact@v3 - uses: actions/upload-artifact@v3
if: success()
with: with:
name: ${{ matrix.profile }} name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/ path: _packages/${{ matrix.profile }}/

View File

@ -30,9 +30,9 @@ jobs:
- amd64 - amd64
os: os:
- debian10 - debian10
- amzn2 - amzn2023
builder: builder:
- 5.1-0 - 5.1-3
elixir: elixir:
- 1.14.5 - 1.14.5

View File

@ -32,10 +32,10 @@ jobs:
profile: profile:
- ["emqx", "25.3.2-1", "el7", "erlang"] - ["emqx", "25.3.2-1", "el7", "erlang"]
- ["emqx", "25.3.2-1", "ubuntu22.04", "elixir"] - ["emqx", "25.3.2-1", "ubuntu22.04", "elixir"]
- ["emqx-enterprise", "25.3.2-1", "amzn2", "erlang"] - ["emqx-enterprise", "25.3.2-1", "amzn2023", "erlang"]
- ["emqx-enterprise", "25.3.2-1", "ubuntu20.04", "erlang"] - ["emqx-enterprise", "25.3.2-1", "ubuntu20.04", "erlang"]
builder: builder:
- 5.1-0 - 5.1-3
elixir: elixir:
- '1.14.5' - '1.14.5'

View File

@ -6,7 +6,7 @@ on:
jobs: jobs:
check_deps_integrity: check_deps_integrity:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04 container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View File

@ -5,7 +5,7 @@ on: [pull_request]
jobs: jobs:
code_style_check: code_style_check:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:

61
.github/workflows/codeql.yaml vendored Normal file
View File

@ -0,0 +1,61 @@
name: "CodeQL"
on:
schedule:
- cron: '33 14 * * 4'
workflow_dispatch:
inputs:
ref:
required: false
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
timeout-minutes: 360
permissions:
actions: read
contents: read
security-events: write
container:
image: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04
strategy:
fail-fast: false
matrix:
language: [ 'cpp', 'python' ]
steps:
- name: Checkout repository
uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.ref }}
- name: Ensure git safe dir
run: |
git config --global --add safe.directory "$GITHUB_WORKSPACE"
make ensure-rebar3
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
- name: Build
if: matrix.language == 'cpp'
env:
PROFILE: emqx-enterprise
run: |
make emqx-enterprise-compile
- name: Fetch deps
if: matrix.language == 'python'
env:
PROFILE: emqx-enterprise
run: |
make deps-emqx-enterprise
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"

View File

@ -9,7 +9,7 @@ jobs:
elixir_apps_check: elixir_apps_check:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
# just use the latest builder # just use the latest builder
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
strategy: strategy:
fail-fast: false fail-fast: false

View File

@ -8,7 +8,7 @@ on:
jobs: jobs:
elixir_deps_check: elixir_deps_check:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04 container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04
steps: steps:
- name: Checkout - name: Checkout

View File

@ -17,7 +17,7 @@ jobs:
profile: profile:
- emqx - emqx
- emqx-enterprise - emqx-enterprise
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04 container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3

View File

@ -23,7 +23,7 @@ jobs:
prepare: prepare:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository_owner == 'emqx' if: github.repository_owner == 'emqx'
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu20.04 container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04
outputs: outputs:
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }} BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }} PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}

View File

@ -87,20 +87,24 @@ jobs:
push "debian/buster" "packages/$PROFILE-$VERSION-debian10-arm64.deb" push "debian/buster" "packages/$PROFILE-$VERSION-debian10-arm64.deb"
push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-amd64.deb" push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-amd64.deb"
push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-arm64.deb" push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-arm64.deb"
push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-amd64.deb"
push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-arm64.deb"
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-amd64.deb" push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-amd64.deb"
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-arm64.deb" push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-arm64.deb"
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-amd64.deb" push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-amd64.deb"
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-arm64.deb" push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-arm64.deb"
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-amd64.deb" push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-amd64.deb"
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-arm64.deb" push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-arm64.deb"
push "el/6" "packages/$PROFILE-$VERSION-amzn2-amd64.rpm"
push "el/6" "packages/$PROFILE-$VERSION-amzn2-arm64.rpm"
push "el/7" "packages/$PROFILE-$VERSION-el7-amd64.rpm" push "el/7" "packages/$PROFILE-$VERSION-el7-amd64.rpm"
push "el/7" "packages/$PROFILE-$VERSION-el7-arm64.rpm" push "el/7" "packages/$PROFILE-$VERSION-el7-arm64.rpm"
push "el/8" "packages/$PROFILE-$VERSION-el8-amd64.rpm" push "el/8" "packages/$PROFILE-$VERSION-el8-amd64.rpm"
push "el/8" "packages/$PROFILE-$VERSION-el8-arm64.rpm" push "el/8" "packages/$PROFILE-$VERSION-el8-arm64.rpm"
push "el/9" "packages/$PROFILE-$VERSION-el9-amd64.rpm" push "el/9" "packages/$PROFILE-$VERSION-el9-amd64.rpm"
push "el/9" "packages/$PROFILE-$VERSION-el9-arm64.rpm" push "el/9" "packages/$PROFILE-$VERSION-el9-arm64.rpm"
push "amazon/2" "packages/$PROFILE-$VERSION-amzn2-amd64.rpm"
push "amazon/2" "packages/$PROFILE-$VERSION-amzn2-arm64.rpm"
push "amazon/2023" "packages/$PROFILE-$VERSION-amzn2023-amd64.rpm"
push "amazon/2023" "packages/$PROFILE-$VERSION-amzn2023-arm64.rpm"
rerun-apps-version-check: rerun-apps-version-check:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04

View File

@ -26,7 +26,7 @@ jobs:
profile: profile:
- emqx - emqx
- emqx-enterprise - emqx-enterprise
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
steps: steps:
- uses: AutoModality/action-clean@v1 - uses: AutoModality/action-clean@v1
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View File

@ -12,7 +12,7 @@ jobs:
strategy: strategy:
matrix: matrix:
builder: builder:
- 5.1-0 - 5.1-3
otp: otp:
- 25.3.2-1 - 25.3.2-1
# no need to use more than 1 version of Elixir, since tests # no need to use more than 1 version of Elixir, since tests

View File

@ -17,7 +17,7 @@ jobs:
prepare: prepare:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
# prepare source with any OTP version, no need for a matrix # prepare source with any OTP version, no need for a matrix
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11 container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@ -50,7 +50,7 @@ jobs:
os: os:
- ["debian11", "debian:11-slim"] - ["debian11", "debian:11-slim"]
builder: builder:
- 5.1-0 - 5.1-3
otp: otp:
- 25.3.2-1 - 25.3.2-1
elixir: elixir:
@ -123,7 +123,7 @@ jobs:
os: os:
- ["debian11", "debian:11-slim"] - ["debian11", "debian:11-slim"]
builder: builder:
- 5.1-0 - 5.1-3
otp: otp:
- 25.3.2-1 - 25.3.2-1
elixir: elixir:

View File

@ -15,7 +15,7 @@ concurrency:
jobs: jobs:
relup_test_plan: relup_test_plan:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
outputs: outputs:
CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }} CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }}
OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }} OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }}

View File

@ -34,12 +34,12 @@ jobs:
MATRIX="$(echo "${APPS}" | jq -c ' MATRIX="$(echo "${APPS}" | jq -c '
[ [
(.[] | select(.profile == "emqx") | . + { (.[] | select(.profile == "emqx") | . + {
builder: "5.1-0", builder: "5.1-3",
otp: "25.3.2-1", otp: "25.3.2-1",
elixir: "1.14.5" elixir: "1.14.5"
}), }),
(.[] | select(.profile == "emqx-enterprise") | . + { (.[] | select(.profile == "emqx-enterprise") | . + {
builder: "5.1-0", builder: "5.1-3",
otp: ["25.3.2-1"][], otp: ["25.3.2-1"][],
elixir: "1.14.5" elixir: "1.14.5"
}) })
@ -286,7 +286,7 @@ jobs:
- ct - ct
- ct_docker - ct_docker
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
steps: steps:
- uses: AutoModality/action-clean@v1 - uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3 - uses: actions/download-artifact@v3

View File

@ -2,7 +2,7 @@ REBAR = $(CURDIR)/rebar3
BUILD = $(CURDIR)/build BUILD = $(CURDIR)/build
SCRIPTS = $(CURDIR)/scripts SCRIPTS = $(CURDIR)/scripts
export EMQX_RELUP ?= true export EMQX_RELUP ?= true
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11 export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11
export EMQX_DEFAULT_RUNNER = debian:11-slim export EMQX_DEFAULT_RUNNER = debian:11-slim
export EMQX_REL_FORM ?= tgz export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1 export QUICER_DOWNLOAD_FROM_RELEASE = 1
@ -15,7 +15,7 @@ endif
# Dashboard version # Dashboard version
# from https://github.com/emqx/emqx-dashboard5 # from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.3.1 export EMQX_DASHBOARD_VERSION ?= v1.3.2
export EMQX_EE_DASHBOARD_VERSION ?= e1.1.1-beta.4 export EMQX_EE_DASHBOARD_VERSION ?= e1.1.1-beta.4
# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used # `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used
@ -99,6 +99,7 @@ static_checks:
@$(REBAR) as check do xref, dialyzer @$(REBAR) as check do xref, dialyzer
@if [ "$${PROFILE}" = 'emqx-enterprise' ]; then $(REBAR) ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE); fi @if [ "$${PROFILE}" = 'emqx-enterprise' ]; then $(REBAR) ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE); fi
./scripts/check-i18n-style.sh ./scripts/check-i18n-style.sh
./scripts/check_missing_reboot_apps.exs
APPS=$(shell $(SCRIPTS)/find-apps.sh) APPS=$(shell $(SCRIPTS)/find-apps.sh)

40
SECURITY.md Normal file
View File

@ -0,0 +1,40 @@
# Security Policy
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 5.1.x | :white_check_mark: |
| 5.0.x | :white_check_mark: |
| 4.4.x | :white_check_mark: |
| < 4.4 | :x: |
## Qualifying Vulnerabilities
Any design or implementation issue that substantially affects the confidentiality or integrity of user data is likely to be in scope for the program. Common examples including:
* Cross-site scripting
* Cross-site request forgery
* Mixed-content scripts
* Authentication or authorization flaws
* Server-side code execution bugs
Out of concern for the availability of our services to all users, please do not attempt to carry out DoS attacks, leverage black hat SEO techniques, spam people, brute force authentication, or do other similarly questionable things. We also discourage the use of any vulnerability testing tools that automatically generate very significant volumes of traffic.
## Non-qualifying Vulnerabilities
Depending on their impacts, some of the reported issues may not qualify.
Although we review them on a case-by-case basis, here are some of the issues that typically do not earn a monetary reward:
* Bugs requiring exceedingly unlikely user interaction Brute forcing
* User enumeration
* Non security related bugs
* Abuse
## Reporting a Vulnerability
1. When investigating a vulnerability, please, only ever target your own accounts. Never attempt to access anyone else's data and do not engage in any activity that would be disruptive or damaging to other users.
2. In the case the same vulnerability is present on multiple products, please combine and send one report.
3. If you have found a vulnerability, please contact us at security@emqx.io.
4. Note that we are only able to answer technical vulnerability reports. Duplicate reports will not be rewarded, first report on the specific vulnerability will be rewarded.
5. The report should include steps in plain text how to reproduce the vulnerability (not only video or images).

View File

@ -23,6 +23,7 @@
%% `git_subdir` dependency in other projects. %% `git_subdir` dependency in other projects.
{deps, [ {deps, [
{emqx_utils, {path, "../emqx_utils"}}, {emqx_utils, {path, "../emqx_utils"}},
{emqx_durable_storage, {path, "../emqx_durable_storage"}},
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}}, {lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}, {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},

View File

@ -16,7 +16,8 @@
sasl, sasl,
os_mon, os_mon,
lc, lc,
hocon hocon,
emqx_durable_storage
]}, ]},
{mod, {emqx_app, []}}, {mod, {emqx_app, []}},
{env, []}, {env, []},

View File

@ -39,6 +39,7 @@
start(_Type, _Args) -> start(_Type, _Args) ->
ok = maybe_load_config(), ok = maybe_load_config(),
ok = emqx_persistent_session:init_db_backend(), ok = emqx_persistent_session:init_db_backend(),
_ = emqx_persistent_session_ds:init(),
ok = maybe_start_quicer(), ok = maybe_start_quicer(),
ok = emqx_bpapi:start(), ok = emqx_bpapi:start(),
ok = emqx_alarm_handler:load(), ok = emqx_alarm_handler:load(),

View File

@ -224,7 +224,7 @@ publish(Msg) when is_record(Msg, message) ->
}), }),
[]; [];
Msg1 = #message{topic = Topic} -> Msg1 = #message{topic = Topic} ->
emqx_persistent_session:persist_message(Msg1), _ = emqx_persistent_session_ds:persist_message(Msg1),
route(aggre(emqx_router:match_routes(Topic)), delivery(Msg1)) route(aggre(emqx_router:match_routes(Topic)), delivery(Msg1))
end. end.

View File

@ -153,6 +153,7 @@ code_change(_OldVsn, State, _Extra) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
clean_down(SubPid) -> clean_down(SubPid) ->
try
case ets:lookup(?SUBMON, SubPid) of case ets:lookup(?SUBMON, SubPid) of
[{_, SubId}] -> [{_, SubId}] ->
true = ets:delete(?SUBMON, SubPid), true = ets:delete(?SUBMON, SubPid),
@ -162,4 +163,7 @@ clean_down(SubPid) ->
emqx_broker:subscriber_down(SubPid); emqx_broker:subscriber_down(SubPid);
[] -> [] ->
ok ok
end
catch
error:badarg -> ok
end. end.

View File

@ -61,8 +61,7 @@
%% Export for emqx_channel implementations %% Export for emqx_channel implementations
-export([ -export([
maybe_nack/1, maybe_nack/1
maybe_mark_as_delivered/2
]). ]).
%% Exports for CT %% Exports for CT
@ -199,11 +198,6 @@ info(timers, #channel{timers = Timers}) ->
set_conn_state(ConnState, Channel) -> set_conn_state(ConnState, Channel) ->
Channel#channel{conn_state = ConnState}. Channel#channel{conn_state = ConnState}.
set_session(Session, Channel = #channel{conninfo = ConnInfo, clientinfo = ClientInfo}) ->
%% Assume that this is also an updated session. Allow side effect.
Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session),
Channel#channel{session = Session1}.
-spec stats(channel()) -> emqx_types:stats(). -spec stats(channel()) -> emqx_types:stats().
stats(#channel{session = undefined}) -> stats(#channel{session = undefined}) ->
emqx_pd:get_counters(?CHANNEL_METRICS); emqx_pd:get_counters(?CHANNEL_METRICS);
@ -417,10 +411,10 @@ handle_in(
case emqx_session:puback(ClientInfo, PacketId, Session) of case emqx_session:puback(ClientInfo, PacketId, Session) of
{ok, Msg, NSession} -> {ok, Msg, NSession} ->
ok = after_message_acked(ClientInfo, Msg, Properties), ok = after_message_acked(ClientInfo, Msg, Properties),
{ok, set_session(NSession, Channel)}; {ok, Channel#channel{session = NSession}};
{ok, Msg, Publishes, NSession} -> {ok, Msg, Publishes, NSession} ->
ok = after_message_acked(ClientInfo, Msg, Properties), ok = after_message_acked(ClientInfo, Msg, Properties),
handle_out(publish, Publishes, set_session(NSession, Channel)); handle_out(publish, Publishes, Channel#channel{session = NSession});
{error, ?RC_PACKET_IDENTIFIER_IN_USE} -> {error, ?RC_PACKET_IDENTIFIER_IN_USE} ->
?SLOG(warning, #{msg => "puback_packetId_inuse", packetId => PacketId}), ?SLOG(warning, #{msg => "puback_packetId_inuse", packetId => PacketId}),
ok = emqx_metrics:inc('packets.puback.inuse'), ok = emqx_metrics:inc('packets.puback.inuse'),
@ -438,7 +432,7 @@ handle_in(
case emqx_session:pubrec(ClientInfo, PacketId, Session) of case emqx_session:pubrec(ClientInfo, PacketId, Session) of
{ok, Msg, NSession} -> {ok, Msg, NSession} ->
ok = after_message_acked(ClientInfo, Msg, Properties), ok = after_message_acked(ClientInfo, Msg, Properties),
NChannel = set_session(NSession, Channel), NChannel = Channel#channel{session = NSession},
handle_out(pubrel, {PacketId, ?RC_SUCCESS}, NChannel); handle_out(pubrel, {PacketId, ?RC_SUCCESS}, NChannel);
{error, RC = ?RC_PACKET_IDENTIFIER_IN_USE} -> {error, RC = ?RC_PACKET_IDENTIFIER_IN_USE} ->
?SLOG(warning, #{msg => "pubrec_packetId_inuse", packetId => PacketId}), ?SLOG(warning, #{msg => "pubrec_packetId_inuse", packetId => PacketId}),
@ -458,7 +452,7 @@ handle_in(
) -> ) ->
case emqx_session:pubrel(ClientInfo, PacketId, Session) of case emqx_session:pubrel(ClientInfo, PacketId, Session) of
{ok, NSession} -> {ok, NSession} ->
NChannel = set_session(NSession, Channel), NChannel = Channel#channel{session = NSession},
handle_out(pubcomp, {PacketId, ?RC_SUCCESS}, NChannel); handle_out(pubcomp, {PacketId, ?RC_SUCCESS}, NChannel);
{error, RC = ?RC_PACKET_IDENTIFIER_NOT_FOUND} -> {error, RC = ?RC_PACKET_IDENTIFIER_NOT_FOUND} ->
?SLOG(warning, #{msg => "pubrel_packetId_not_found", packetId => PacketId}), ?SLOG(warning, #{msg => "pubrel_packetId_not_found", packetId => PacketId}),
@ -473,9 +467,9 @@ handle_in(
) -> ) ->
case emqx_session:pubcomp(ClientInfo, PacketId, Session) of case emqx_session:pubcomp(ClientInfo, PacketId, Session) of
{ok, NSession} -> {ok, NSession} ->
{ok, set_session(NSession, Channel)}; {ok, Channel#channel{session = NSession}};
{ok, Publishes, NSession} -> {ok, Publishes, NSession} ->
handle_out(publish, Publishes, set_session(NSession, Channel)); handle_out(publish, Publishes, Channel#channel{session = NSession});
{error, ?RC_PACKET_IDENTIFIER_IN_USE} -> {error, ?RC_PACKET_IDENTIFIER_IN_USE} ->
ok = emqx_metrics:inc('packets.pubcomp.inuse'), ok = emqx_metrics:inc('packets.pubcomp.inuse'),
{ok, Channel}; {ok, Channel};
@ -734,7 +728,7 @@ do_publish(
case emqx_session:publish(ClientInfo, PacketId, Msg, Session) of case emqx_session:publish(ClientInfo, PacketId, Msg, Session) of
{ok, PubRes, NSession} -> {ok, PubRes, NSession} ->
RC = pubrec_reason_code(PubRes), RC = pubrec_reason_code(PubRes),
NChannel0 = set_session(NSession, Channel), NChannel0 = Channel#channel{session = NSession},
NChannel1 = ensure_timer(await_timer, NChannel0), NChannel1 = ensure_timer(await_timer, NChannel0),
NChannel2 = ensure_quota(PubRes, NChannel1), NChannel2 = ensure_quota(PubRes, NChannel1),
handle_out(pubrec, {PacketId, RC}, NChannel2); handle_out(pubrec, {PacketId, RC}, NChannel2);
@ -830,7 +824,7 @@ do_subscribe(
NSubOpts = enrich_subopts(maps:merge(?DEFAULT_SUBOPTS, SubOpts), Channel), NSubOpts = enrich_subopts(maps:merge(?DEFAULT_SUBOPTS, SubOpts), Channel),
case emqx_session:subscribe(ClientInfo, NTopicFilter, NSubOpts, Session) of case emqx_session:subscribe(ClientInfo, NTopicFilter, NSubOpts, Session) of
{ok, NSession} -> {ok, NSession} ->
{QoS, set_session(NSession, Channel)}; {QoS, Channel#channel{session = NSession}};
{error, RC} -> {error, RC} ->
?SLOG( ?SLOG(
warning, warning,
@ -869,7 +863,7 @@ do_unsubscribe(
TopicFilter1 = emqx_mountpoint:mount(MountPoint, TopicFilter), TopicFilter1 = emqx_mountpoint:mount(MountPoint, TopicFilter),
case emqx_session:unsubscribe(ClientInfo, TopicFilter1, SubOpts, Session) of case emqx_session:unsubscribe(ClientInfo, TopicFilter1, SubOpts, Session) of
{ok, NSession} -> {ok, NSession} ->
{?RC_SUCCESS, set_session(NSession, Channel)}; {?RC_SUCCESS, Channel#channel{session = NSession}};
{error, RC} -> {error, RC} ->
{RC, Channel} {RC, Channel}
end. end.
@ -898,7 +892,7 @@ process_disconnect(ReasonCode, Properties, Channel) ->
maybe_update_expiry_interval( maybe_update_expiry_interval(
#{'Session-Expiry-Interval' := Interval}, #{'Session-Expiry-Interval' := Interval},
Channel = #channel{conninfo = ConnInfo, clientinfo = ClientInfo} Channel = #channel{conninfo = ConnInfo}
) -> ) ->
EI = timer:seconds(Interval), EI = timer:seconds(Interval),
OldEI = maps:get(expiry_interval, ConnInfo, 0), OldEI = maps:get(expiry_interval, ConnInfo, 0),
@ -907,12 +901,11 @@ maybe_update_expiry_interval(
Channel; Channel;
false -> false ->
NChannel = Channel#channel{conninfo = ConnInfo#{expiry_interval => EI}}, NChannel = Channel#channel{conninfo = ConnInfo#{expiry_interval => EI}},
ClientID = maps:get(clientid, ClientInfo, undefined),
%% Check if the client turns off persistence (turning it on is disallowed) %% Check if the client turns off persistence (turning it on is disallowed)
case EI =:= 0 andalso OldEI > 0 of case EI =:= 0 andalso OldEI > 0 of
true -> true ->
S = emqx_persistent_session:discard(ClientID, NChannel#channel.session), NSession = emqx_session:unpersist(NChannel#channel.session),
set_session(S, NChannel); NChannel#channel{session = NSession};
false -> false ->
NChannel NChannel
end end
@ -931,18 +924,13 @@ handle_deliver(
Delivers, Delivers,
Channel = #channel{ Channel = #channel{
takeover = true, takeover = true,
pendings = Pendings, pendings = Pendings
session = Session,
clientinfo = #{clientid := ClientId} = ClientInfo
} }
) -> ) ->
%% NOTE: Order is important here. While the takeover is in %% NOTE: Order is important here. While the takeover is in
%% progress, the session cannot enqueue messages, since it already %% progress, the session cannot enqueue messages, since it already
%% passed on the queue to the new connection in the session state. %% passed on the queue to the new connection in the session state.
NPendings = lists:append( NPendings = lists:append(Pendings, maybe_nack(Delivers)),
Pendings,
emqx_session:ignore_local(ClientInfo, maybe_nack(Delivers), ClientId, Session)
),
{ok, Channel#channel{pendings = NPendings}}; {ok, Channel#channel{pendings = NPendings}};
handle_deliver( handle_deliver(
Delivers, Delivers,
@ -950,37 +938,27 @@ handle_deliver(
conn_state = disconnected, conn_state = disconnected,
takeover = false, takeover = false,
session = Session, session = Session,
clientinfo = #{clientid := ClientId} = ClientInfo clientinfo = ClientInfo
} }
) -> ) ->
Delivers1 = maybe_nack(Delivers), Delivers1 = maybe_nack(Delivers),
Delivers2 = emqx_session:ignore_local(ClientInfo, Delivers1, ClientId, Session), NSession = emqx_session:enqueue(ClientInfo, Delivers1, Session),
NSession = emqx_session:enqueue(ClientInfo, Delivers2, Session), NChannel = Channel#channel{session = NSession},
NChannel = set_session(NSession, Channel),
%% We consider queued/dropped messages as delivered since they are now in the session state.
maybe_mark_as_delivered(Session, Delivers),
{ok, NChannel}; {ok, NChannel};
handle_deliver( handle_deliver(
Delivers, Delivers,
Channel = #channel{ Channel = #channel{
session = Session, session = Session,
takeover = false, takeover = false,
clientinfo = #{clientid := ClientId} = ClientInfo clientinfo = ClientInfo
} }
) -> ) ->
case case emqx_session:deliver(ClientInfo, Delivers, Session) of
emqx_session:deliver(
ClientInfo,
emqx_session:ignore_local(ClientInfo, Delivers, ClientId, Session),
Session
)
of
{ok, Publishes, NSession} -> {ok, Publishes, NSession} ->
NChannel = set_session(NSession, Channel), NChannel = Channel#channel{session = NSession},
maybe_mark_as_delivered(NSession, Delivers),
handle_out(publish, Publishes, ensure_timer(retry_timer, NChannel)); handle_out(publish, Publishes, ensure_timer(retry_timer, NChannel));
{ok, NSession} -> {ok, NSession} ->
{ok, set_session(NSession, Channel)} {ok, Channel#channel{session = NSession}}
end. end.
%% Nack delivers from shared subscription %% Nack delivers from shared subscription
@ -996,15 +974,6 @@ not_nacked({deliver, _Topic, Msg}) ->
true true
end. end.
maybe_mark_as_delivered(Session, Delivers) ->
case emqx_session:info(is_persistent, Session) of
false ->
skip;
true ->
SessionID = emqx_session:info(id, Session),
emqx_persistent_session:mark_as_delivered(SessionID, Delivers)
end.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Handle outgoing packet %% Handle outgoing packet
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -1096,11 +1065,11 @@ return_connack(AckPacket, Channel) ->
ignore -> ignore ->
{ok, Replies, Channel}; {ok, Replies, Channel};
{ok, Publishes, NSession} -> {ok, Publishes, NSession} ->
NChannel0 = Channel#channel{ NChannel1 = Channel#channel{
resuming = false, resuming = false,
pendings = [] pendings = [],
session = NSession
}, },
NChannel1 = set_session(NSession, NChannel0),
{Packets, NChannel2} = do_deliver(Publishes, NChannel1), {Packets, NChannel2} = do_deliver(Publishes, NChannel1),
Outgoing = [{outgoing, Packets} || length(Packets) > 0], Outgoing = [{outgoing, Packets} || length(Packets) > 0],
{ok, Replies ++ Outgoing, NChannel2} {ok, Replies ++ Outgoing, NChannel2}
@ -1345,9 +1314,10 @@ handle_timeout(
) -> ) ->
case emqx_session:retry(ClientInfo, Session) of case emqx_session:retry(ClientInfo, Session) of
{ok, NSession} -> {ok, NSession} ->
{ok, clean_timer(retry_timer, set_session(NSession, Channel))}; NChannel = Channel#channel{session = NSession},
{ok, clean_timer(retry_timer, NChannel)};
{ok, Publishes, Timeout, NSession} -> {ok, Publishes, Timeout, NSession} ->
NChannel = set_session(NSession, Channel), NChannel = Channel#channel{session = NSession},
handle_out(publish, Publishes, reset_timer(retry_timer, Timeout, NChannel)) handle_out(publish, Publishes, reset_timer(retry_timer, Timeout, NChannel))
end; end;
handle_timeout( handle_timeout(
@ -1363,9 +1333,11 @@ handle_timeout(
) -> ) ->
case emqx_session:expire(ClientInfo, awaiting_rel, Session) of case emqx_session:expire(ClientInfo, awaiting_rel, Session) of
{ok, NSession} -> {ok, NSession} ->
{ok, clean_timer(await_timer, set_session(NSession, Channel))}; NChannel = Channel#channel{session = NSession},
{ok, clean_timer(await_timer, NChannel)};
{ok, Timeout, NSession} -> {ok, Timeout, NSession} ->
{ok, reset_timer(await_timer, Timeout, set_session(NSession, Channel))} NChannel = Channel#channel{session = NSession},
{ok, reset_timer(await_timer, Timeout, NChannel)}
end; end;
handle_timeout(_TRef, expire_session, Channel) -> handle_timeout(_TRef, expire_session, Channel) ->
shutdown(expired, Channel); shutdown(expired, Channel);
@ -1453,25 +1425,11 @@ terminate(Reason, Channel = #channel{clientinfo = ClientInfo, will_msg = WillMsg
%% if will_msg still exists when the session is terminated, it %% if will_msg still exists when the session is terminated, it
%% must be published immediately. %% must be published immediately.
WillMsg =/= undefined andalso publish_will_msg(ClientInfo, WillMsg), WillMsg =/= undefined andalso publish_will_msg(ClientInfo, WillMsg),
(Reason =:= expired) andalso persist_if_session(Channel),
run_terminate_hook(Reason, Channel). run_terminate_hook(Reason, Channel).
persist_if_session(#channel{session = Session} = Channel) -> run_terminate_hook(_Reason, #channel{session = undefined}) ->
case emqx_session:is_session(Session) of
true ->
_ = emqx_persistent_session:persist(
Channel#channel.clientinfo,
Channel#channel.conninfo,
Channel#channel.session
),
ok; ok;
false -> run_terminate_hook(Reason, #channel{clientinfo = ClientInfo, session = Session}) ->
ok
end.
run_terminate_hook(_Reason, #channel{session = undefined} = _Channel) ->
ok;
run_terminate_hook(Reason, #channel{clientinfo = ClientInfo, session = Session} = _Channel) ->
emqx_session:terminate(ClientInfo, Reason, Session). emqx_session:terminate(ClientInfo, Reason, Session).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -2096,11 +2054,9 @@ maybe_resume_session(#channel{
session = Session, session = Session,
resuming = true, resuming = true,
pendings = Pendings, pendings = Pendings,
clientinfo = #{clientid := ClientId} = ClientInfo clientinfo = ClientInfo
}) -> }) ->
{ok, Publishes, Session1} = emqx_session:replay(ClientInfo, Session), {ok, Publishes, Session1} = emqx_session:replay(ClientInfo, Session),
%% We consider queued/dropped messages as delivered since they are now in the session state.
emqx_persistent_session:mark_as_delivered(ClientId, Pendings),
case emqx_session:deliver(ClientInfo, Pendings, Session1) of case emqx_session:deliver(ClientInfo, Pendings, Session1) of
{ok, Session2} -> {ok, Session2} ->
{ok, Publishes, Session2}; {ok, Publishes, Session2};

View File

@ -277,65 +277,24 @@ open_session(true, ClientInfo = #{clientid := ClientId}, ConnInfo) ->
Self = self(), Self = self(),
CleanStart = fun(_) -> CleanStart = fun(_) ->
ok = discard_session(ClientId), ok = discard_session(ClientId),
ok = emqx_persistent_session:discard_if_present(ClientId), ok = emqx_session:destroy(ClientId),
Session = create_session(ClientInfo, ConnInfo), create_register_session(ClientInfo, ConnInfo, Self)
Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session),
register_channel(ClientId, Self, ConnInfo),
{ok, #{session => Session1, present => false}}
end, end,
emqx_cm_locker:trans(ClientId, CleanStart); emqx_cm_locker:trans(ClientId, CleanStart);
open_session(false, ClientInfo = #{clientid := ClientId}, ConnInfo) -> open_session(false, ClientInfo = #{clientid := ClientId}, ConnInfo) ->
Self = self(), Self = self(),
ResumeStart = fun(_) -> ResumeStart = fun(_) ->
CreateSess =
fun() ->
Session = create_session(ClientInfo, ConnInfo),
Session1 = emqx_persistent_session:persist(
ClientInfo, ConnInfo, Session
),
register_channel(ClientId, Self, ConnInfo),
{ok, #{session => Session1, present => false}}
end,
case takeover_session(ClientId) of case takeover_session(ClientId) of
{persistent, Session} ->
%% This is a persistent session without a managing process.
{Session1, Pendings} =
emqx_persistent_session:resume(ClientInfo, ConnInfo, Session),
register_channel(ClientId, Self, ConnInfo),
{ok, #{
session => clean_session(Session1),
present => true,
pendings => clean_pendings(Pendings)
}};
{living, ConnMod, ChanPid, Session} -> {living, ConnMod, ChanPid, Session} ->
ok = emqx_session:resume(ClientInfo, Session), ok = emqx_session:resume(ClientInfo, Session),
case wrap_rpc(emqx_cm_proto_v2:takeover_finish(ConnMod, ChanPid)) of case wrap_rpc(emqx_cm_proto_v2:takeover_finish(ConnMod, ChanPid)) of
{ok, Pendings} -> {ok, Pendings} ->
Session1 = emqx_persistent_session:persist( clean_register_session(Session, Pendings, ClientInfo, ConnInfo, Self);
ClientInfo, ConnInfo, Session
),
register_channel(ClientId, Self, ConnInfo),
{ok, #{
session => clean_session(Session1),
present => true,
pendings => clean_pendings(Pendings)
}};
{error, _} -> {error, _} ->
CreateSess() create_register_session(ClientInfo, ConnInfo, Self)
end; end;
{expired, OldSession} ->
_ = emqx_persistent_session:discard(ClientId, OldSession),
Session = create_session(ClientInfo, ConnInfo),
Session1 = emqx_persistent_session:persist(
ClientInfo,
ConnInfo,
Session
),
register_channel(ClientId, Self, ConnInfo),
{ok, #{session => Session1, present => false}};
none -> none ->
CreateSess() create_register_session(ClientInfo, ConnInfo, Self)
end end
end, end,
emqx_cm_locker:trans(ClientId, ResumeStart). emqx_cm_locker:trans(ClientId, ResumeStart).
@ -347,6 +306,19 @@ create_session(ClientInfo, ConnInfo) ->
ok = emqx_hooks:run('session.created', [ClientInfo, emqx_session:info(Session)]), ok = emqx_hooks:run('session.created', [ClientInfo, emqx_session:info(Session)]),
Session. Session.
create_register_session(ClientInfo = #{clientid := ClientId}, ConnInfo, ChanPid) ->
Session = create_session(ClientInfo, ConnInfo),
ok = register_channel(ClientId, ChanPid, ConnInfo),
{ok, #{session => Session, present => false}}.
clean_register_session(Session, Pendings, #{clientid := ClientId}, ConnInfo, ChanPid) ->
ok = register_channel(ClientId, ChanPid, ConnInfo),
{ok, #{
session => clean_session(Session),
present => true,
pendings => clean_pendings(Pendings)
}}.
get_session_confs(#{zone := Zone, clientid := ClientId}, #{ get_session_confs(#{zone := Zone, clientid := ClientId}, #{
receive_maximum := MaxInflight, expiry_interval := EI receive_maximum := MaxInflight, expiry_interval := EI
}) -> }) ->
@ -385,7 +357,7 @@ get_mqtt_conf(Zone, Key) ->
takeover_session(ClientId) -> takeover_session(ClientId) ->
case lookup_channels(ClientId) of case lookup_channels(ClientId) of
[] -> [] ->
emqx_persistent_session:lookup(ClientId); emqx_session:lookup(ClientId);
[ChanPid] -> [ChanPid] ->
takeover_session(ClientId, ChanPid); takeover_session(ClientId, ChanPid);
ChanPids -> ChanPids ->
@ -417,16 +389,16 @@ takeover_session(ClientId, Pid) ->
%% request_stepdown/3 %% request_stepdown/3
R == unexpected_exception R == unexpected_exception
-> ->
emqx_persistent_session:lookup(ClientId); emqx_session:lookup(ClientId);
% rpc_call/3 % rpc_call/3
_:{'EXIT', {noproc, _}} -> _:{'EXIT', {noproc, _}} ->
emqx_persistent_session:lookup(ClientId) emqx_session:lookup(ClientId)
end. end.
do_takeover_session(ClientId, ChanPid) when node(ChanPid) == node() -> do_takeover_session(ClientId, ChanPid) when node(ChanPid) == node() ->
case get_chann_conn_mod(ClientId, ChanPid) of case get_chann_conn_mod(ClientId, ChanPid) of
undefined -> undefined ->
emqx_persistent_session:lookup(ClientId); emqx_session:lookup(ClientId);
ConnMod when is_atom(ConnMod) -> ConnMod when is_atom(ConnMod) ->
case request_stepdown({takeover, 'begin'}, ConnMod, ChanPid) of case request_stepdown({takeover, 'begin'}, ConnMod, ChanPid) of
{ok, Session} -> {ok, Session} ->
@ -734,7 +706,11 @@ code_change(_OldVsn, State, _Extra) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
clean_down({ChanPid, ClientId}) -> clean_down({ChanPid, ClientId}) ->
do_unregister_channel({ClientId, ChanPid}), try
do_unregister_channel({ClientId, ChanPid})
catch
error:badarg -> ok
end,
ok = ?tp(debug, emqx_cm_clean_down, #{client_id => ClientId}). ok = ?tp(debug, emqx_cm_clean_down, #{client_id => ClientId}).
stats_fun() -> stats_fun() ->

View File

@ -641,7 +641,7 @@ backup_and_write(Path, Content) ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "failed_to_save_conf_file", msg => "failed_to_save_conf_file",
hint => hint =>
"The updated cluster config is note saved on this node, please check the file system.", "The updated cluster config is not saved on this node, please check the file system.",
filename => TmpFile, filename => TmpFile,
reason => Reason reason => Reason
}), }),

View File

@ -0,0 +1,86 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2021-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_persistent_session_ds).
-export([init/0]).
-export([persist_message/1]).
-export([
serialize_message/1,
deserialize_message/1
]).
%% FIXME
-define(DS_SHARD, <<"local">>).
-define(WHEN_ENABLED(DO),
case is_store_enabled() of
true -> DO;
false -> {skipped, disabled}
end
).
%%
init() ->
?WHEN_ENABLED(
ok = emqx_ds:ensure_shard(?DS_SHARD, #{
dir => filename:join([emqx:data_dir(), ds, messages, ?DS_SHARD])
})
).
%%
-spec persist_message(emqx_types:message()) ->
ok | {skipped, _Reason} | {error, _TODO}.
persist_message(Msg) ->
?WHEN_ENABLED(
case needs_persistence(Msg) andalso find_subscribers(Msg) of
[_ | _] ->
store_message(Msg);
% [] ->
% {skipped, no_subscribers};
false ->
{skipped, needs_no_persistence}
end
).
needs_persistence(Msg) ->
not (emqx_message:get_flag(dup, Msg) orelse emqx_message:is_sys(Msg)).
store_message(Msg) ->
ID = emqx_message:id(Msg),
Timestamp = emqx_guid:timestamp(ID),
Topic = emqx_topic:words(emqx_message:topic(Msg)),
emqx_ds_storage_layer:store(?DS_SHARD, ID, Timestamp, Topic, serialize_message(Msg)).
find_subscribers(_Msg) ->
[node()].
%%
serialize_message(Msg) ->
term_to_binary(emqx_message:to_map(Msg)).
deserialize_message(Bin) ->
emqx_message:from_map(binary_to_term(Bin)).
%%
is_store_enabled() ->
emqx_config:get([persistent_session_store, ds]).

View File

@ -319,6 +319,14 @@ fields("persistent_session_store") ->
desc => ?DESC(persistent_session_store_enabled) desc => ?DESC(persistent_session_store_enabled)
} }
)}, )},
{"ds",
sc(
boolean(),
#{
default => false,
importance => ?IMPORTANCE_HIDDEN
}
)},
{"on_disc", {"on_disc",
sc( sc(
boolean(), boolean(),

View File

@ -47,19 +47,23 @@
-include("emqx_mqtt.hrl"). -include("emqx_mqtt.hrl").
-include("logger.hrl"). -include("logger.hrl").
-include("types.hrl"). -include("types.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-ifdef(TEST). -ifdef(TEST).
-compile(export_all). -compile(export_all).
-compile(nowarn_export_all). -compile(nowarn_export_all).
-endif. -endif.
-export([
lookup/1,
destroy/1,
unpersist/1
]).
-export([init/1]). -export([init/1]).
-export([ -export([
info/1, info/1,
info/2, info/2,
is_session/1,
stats/1, stats/1,
obtain_next_pkt_id/1, obtain_next_pkt_id/1,
get_mqueue/1 get_mqueue/1
@ -83,7 +87,6 @@
enqueue/3, enqueue/3,
dequeue/2, dequeue/2,
filter_queue/2, filter_queue/2,
ignore_local/4,
retry/2, retry/2,
terminate/3 terminate/3
]). ]).
@ -226,13 +229,27 @@ init(Opts) ->
created_at = erlang:system_time(millisecond) created_at = erlang:system_time(millisecond)
}. }.
-spec lookup(emqx_types:clientid()) -> none.
lookup(_ClientId) ->
% NOTE
% This is a stub. This session impl has no backing store, thus always `none`.
none.
-spec destroy(emqx_types:clientid()) -> ok.
destroy(_ClientId) ->
% NOTE
% This is a stub. This session impl has no backing store, thus always `ok`.
ok.
-spec unpersist(session()) -> session().
unpersist(Session) ->
ok = destroy(Session#session.clientid),
Session#session{is_persistent = false}.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Info, Stats %% Info, Stats
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
is_session(#session{}) -> true;
is_session(_) -> false.
%% @doc Get infos of the session. %% @doc Get infos of the session.
-spec info(session()) -> emqx_types:infos(). -spec info(session()) -> emqx_types:infos().
info(Session) -> info(Session) ->
@ -242,6 +259,8 @@ info(Keys, Session) when is_list(Keys) ->
[{Key, info(Key, Session)} || Key <- Keys]; [{Key, info(Key, Session)} || Key <- Keys];
info(id, #session{id = Id}) -> info(id, #session{id = Id}) ->
Id; Id;
info(clientid, #session{clientid = ClientId}) ->
ClientId;
info(is_persistent, #session{is_persistent = Bool}) -> info(is_persistent, #session{is_persistent = Bool}) ->
Bool; Bool;
info(subscriptions, #session{subscriptions = Subs}) -> info(subscriptions, #session{subscriptions = Subs}) ->
@ -285,27 +304,6 @@ info(created_at, #session{created_at = CreatedAt}) ->
-spec stats(session()) -> emqx_types:stats(). -spec stats(session()) -> emqx_types:stats().
stats(Session) -> info(?STATS_KEYS, Session). stats(Session) -> info(?STATS_KEYS, Session).
%%--------------------------------------------------------------------
%% Ignore local messages
%%--------------------------------------------------------------------
ignore_local(ClientInfo, Delivers, Subscriber, Session) ->
Subs = info(subscriptions, Session),
lists:filter(
fun({deliver, Topic, #message{from = Publisher} = Msg}) ->
case maps:find(Topic, Subs) of
{ok, #{nl := 1}} when Subscriber =:= Publisher ->
ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, no_local]),
ok = emqx_metrics:inc('delivery.dropped'),
ok = emqx_metrics:inc('delivery.dropped.no_local'),
false;
_ ->
true
end
end,
Delivers
).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Client -> Broker: SUBSCRIBE %% Client -> Broker: SUBSCRIBE
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -321,13 +319,12 @@ subscribe(
ClientInfo = #{clientid := ClientId}, ClientInfo = #{clientid := ClientId},
TopicFilter, TopicFilter,
SubOpts, SubOpts,
Session = #session{id = SessionID, is_persistent = IsPS, subscriptions = Subs} Session = #session{subscriptions = Subs}
) -> ) ->
IsNew = not maps:is_key(TopicFilter, Subs), IsNew = not maps:is_key(TopicFilter, Subs),
case IsNew andalso is_subscriptions_full(Session) of case IsNew andalso is_subscriptions_full(Session) of
false -> false ->
ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts), ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts),
ok = emqx_persistent_session:add_subscription(TopicFilter, SessionID, IsPS),
ok = emqx_hooks:run( ok = emqx_hooks:run(
'session.subscribed', 'session.subscribed',
[ClientInfo, TopicFilter, SubOpts#{is_new => IsNew}] [ClientInfo, TopicFilter, SubOpts#{is_new => IsNew}]
@ -355,12 +352,11 @@ unsubscribe(
ClientInfo, ClientInfo,
TopicFilter, TopicFilter,
UnSubOpts, UnSubOpts,
Session = #session{id = SessionID, subscriptions = Subs, is_persistent = IsPS} Session = #session{subscriptions = Subs}
) -> ) ->
case maps:find(TopicFilter, Subs) of case maps:find(TopicFilter, Subs) of
{ok, SubOpts} -> {ok, SubOpts} ->
ok = emqx_broker:unsubscribe(TopicFilter), ok = emqx_broker:unsubscribe(TopicFilter),
ok = emqx_persistent_session:remove_subscription(TopicFilter, SessionID, IsPS),
ok = emqx_hooks:run( ok = emqx_hooks:run(
'session.unsubscribed', 'session.unsubscribed',
[ClientInfo, TopicFilter, maps:merge(SubOpts, UnSubOpts)] [ClientInfo, TopicFilter, maps:merge(SubOpts, UnSubOpts)]
@ -588,7 +584,10 @@ deliver_msg(
MarkedMsg = mark_begin_deliver(Msg), MarkedMsg = mark_begin_deliver(Msg),
Inflight1 = emqx_inflight:insert(PacketId, with_ts(MarkedMsg), Inflight), Inflight1 = emqx_inflight:insert(PacketId, with_ts(MarkedMsg), Inflight),
{ok, [Publish], next_pkt_id(Session#session{inflight = Inflight1})} {ok, [Publish], next_pkt_id(Session#session{inflight = Inflight1})}
end. end;
deliver_msg(ClientInfo, {drop, Msg, Reason}, Session) ->
handle_dropped(ClientInfo, Msg, Reason, Session),
{ok, Session}.
-spec enqueue( -spec enqueue(
emqx_types:clientinfo(), emqx_types:clientinfo(),
@ -607,7 +606,10 @@ enqueue(ClientInfo, Delivers, Session) when is_list(Delivers) ->
enqueue(ClientInfo, #message{} = Msg, Session = #session{mqueue = Q}) -> enqueue(ClientInfo, #message{} = Msg, Session = #session{mqueue = Q}) ->
{Dropped, NewQ} = emqx_mqueue:in(Msg, Q), {Dropped, NewQ} = emqx_mqueue:in(Msg, Q),
(Dropped =/= undefined) andalso handle_dropped(ClientInfo, Dropped, Session), (Dropped =/= undefined) andalso handle_dropped(ClientInfo, Dropped, Session),
Session#session{mqueue = NewQ}. Session#session{mqueue = NewQ};
enqueue(ClientInfo, {drop, Msg, Reason}, Session) ->
handle_dropped(ClientInfo, Msg, Reason, Session),
Session.
handle_dropped(ClientInfo, Msg = #message{qos = QoS, topic = Topic}, #session{mqueue = Q}) -> handle_dropped(ClientInfo, Msg = #message{qos = QoS, topic = Topic}, #session{mqueue = Q}) ->
Payload = emqx_message:to_log_map(Msg), Payload = emqx_message:to_log_map(Msg),
@ -644,8 +646,18 @@ handle_dropped(ClientInfo, Msg = #message{qos = QoS, topic = Topic}, #session{mq
) )
end. end.
handle_dropped(ClientInfo, Msg, Reason, _Session) ->
ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, Reason]),
ok = emqx_metrics:inc('delivery.dropped'),
ok = emqx_metrics:inc('delivery.dropped.no_local').
enrich_deliver({deliver, Topic, Msg}, Session = #session{subscriptions = Subs}) -> enrich_deliver({deliver, Topic, Msg}, Session = #session{subscriptions = Subs}) ->
enrich_subopts(get_subopts(Topic, Subs), Msg, Session). enrich_deliver(Msg, maps:find(Topic, Subs), Session).
enrich_deliver(Msg = #message{from = ClientId}, {ok, #{nl := 1}}, #session{clientid = ClientId}) ->
{drop, Msg, no_local};
enrich_deliver(Msg, SubOpts, Session) ->
enrich_subopts(mk_subopts(SubOpts), Msg, Session).
maybe_ack(Msg) -> maybe_ack(Msg) ->
emqx_shared_sub:maybe_ack(Msg). emqx_shared_sub:maybe_ack(Msg).
@ -653,8 +665,8 @@ maybe_ack(Msg) ->
maybe_nack(Msg) -> maybe_nack(Msg) ->
emqx_shared_sub:maybe_nack_dropped(Msg). emqx_shared_sub:maybe_nack_dropped(Msg).
get_subopts(Topic, SubMap) -> mk_subopts(SubOpts) ->
case maps:find(Topic, SubMap) of case SubOpts of
{ok, #{nl := Nl, qos := QoS, rap := Rap, subid := SubId}} -> {ok, #{nl := Nl, qos := QoS, rap := Rap, subid := SubId}} ->
[{nl, Nl}, {qos, QoS}, {rap, Rap}, {subid, SubId}]; [{nl, Nl}, {qos, QoS}, {rap, Rap}, {subid, SubId}];
{ok, #{nl := Nl, qos := QoS, rap := Rap}} -> {ok, #{nl := Nl, qos := QoS, rap := Rap}} ->

View File

@ -272,7 +272,7 @@ remove_subscription(_TopicFilter, _SessionID, false = _IsPersistent) ->
%% Must be called inside a emqx_cm_locker transaction. %% Must be called inside a emqx_cm_locker transaction.
-spec resume(emqx_types:clientinfo(), emqx_types:conninfo(), emqx_session:session()) -> -spec resume(emqx_types:clientinfo(), emqx_types:conninfo(), emqx_session:session()) ->
{emqx_session:session(), [emqx_types:deliver()]}. {emqx_session:session(), [emqx_types:deliver()]}.
resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> resume(ClientInfo, ConnInfo, Session) ->
SessionID = emqx_session:info(id, Session), SessionID = emqx_session:info(id, Session),
?tp(ps_resuming, #{from => db, sid => SessionID}), ?tp(ps_resuming, #{from => db, sid => SessionID}),
@ -281,7 +281,6 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
%% 1. Get pending messages from DB. %% 1. Get pending messages from DB.
?tp(ps_initial_pendings, #{sid => SessionID}), ?tp(ps_initial_pendings, #{sid => SessionID}),
Pendings1 = pending(SessionID), Pendings1 = pending(SessionID),
Pendings2 = emqx_session:ignore_local(ClientInfo, Pendings1, ClientID, Session),
?tp(ps_got_initial_pendings, #{ ?tp(ps_got_initial_pendings, #{
sid => SessionID, sid => SessionID,
msgs => Pendings1 msgs => Pendings1
@ -290,11 +289,11 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
%% 2. Enqueue messages to mimic that the process was alive %% 2. Enqueue messages to mimic that the process was alive
%% when the messages were delivered. %% when the messages were delivered.
?tp(ps_persist_pendings, #{sid => SessionID}), ?tp(ps_persist_pendings, #{sid => SessionID}),
Session1 = emqx_session:enqueue(ClientInfo, Pendings2, Session), Session1 = emqx_session:enqueue(ClientInfo, Pendings1, Session),
Session2 = persist(ClientInfo, ConnInfo, Session1), Session2 = persist(ClientInfo, ConnInfo, Session1),
mark_as_delivered(SessionID, Pendings2), mark_as_delivered(SessionID, Pendings1),
?tp(ps_persist_pendings_msgs, #{ ?tp(ps_persist_pendings_msgs, #{
msgs => Pendings2, msgs => Pendings1,
sid => SessionID sid => SessionID
}), }),
@ -312,11 +311,10 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
%% 5. Get pending messages from DB until we find all markers. %% 5. Get pending messages from DB until we find all markers.
?tp(ps_marker_pendings, #{sid => SessionID}), ?tp(ps_marker_pendings, #{sid => SessionID}),
MarkerIDs = [Marker || {_, Marker} <- NodeMarkers], MarkerIDs = [Marker || {_, Marker} <- NodeMarkers],
Pendings3 = pending(SessionID, MarkerIDs), Pendings2 = pending(SessionID, MarkerIDs),
Pendings4 = emqx_session:ignore_local(ClientInfo, Pendings3, ClientID, Session),
?tp(ps_marker_pendings_msgs, #{ ?tp(ps_marker_pendings_msgs, #{
sid => SessionID, sid => SessionID,
msgs => Pendings4 msgs => Pendings2
}), }),
%% 6. Get pending messages from writers. %% 6. Get pending messages from writers.
@ -329,7 +327,7 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
%% 7. Drain the inbox and usort the messages %% 7. Drain the inbox and usort the messages
%% with the pending messages. (Should be done by caller.) %% with the pending messages. (Should be done by caller.)
{Session2, Pendings4 ++ WriterPendings}. {Session2, Pendings2 ++ WriterPendings}.
resume_begin(Nodes, SessionID) -> resume_begin(Nodes, SessionID) ->
Res = emqx_persistent_session_proto_v1:resume_begin(Nodes, self(), SessionID), Res = emqx_persistent_session_proto_v1:resume_begin(Nodes, self(), SessionID),

View File

@ -584,7 +584,7 @@ t_handle_deliver(_) ->
t_handle_deliver_nl(_) -> t_handle_deliver_nl(_) ->
ClientInfo = clientinfo(#{clientid => <<"clientid">>}), ClientInfo = clientinfo(#{clientid => <<"clientid">>}),
Session = session(#{subscriptions => #{<<"t1">> => #{nl => 1}}}), Session = session(ClientInfo, #{subscriptions => #{<<"t1">> => #{nl => 1}}}),
Channel = channel(#{clientinfo => ClientInfo, session => Session}), Channel = channel(#{clientinfo => ClientInfo, session => Session}),
Msg = emqx_message:make(<<"clientid">>, ?QOS_1, <<"t1">>, <<"qos1">>), Msg = emqx_message:make(<<"clientid">>, ?QOS_1, <<"t1">>, <<"qos1">>),
NMsg = emqx_message:set_flag(nl, Msg), NMsg = emqx_message:set_flag(nl, Msg),
@ -1071,11 +1071,14 @@ connpkt(Props) ->
password = <<"passwd">> password = <<"passwd">>
}. }.
session() -> session(#{}). session() -> session(#{zone => default, clientid => <<"fake-test">>}, #{}).
session(InitFields) when is_map(InitFields) -> session(InitFields) -> session(#{zone => default, clientid => <<"fake-test">>}, InitFields).
session(ClientInfo, InitFields) when is_map(InitFields) ->
Conf = emqx_cm:get_session_confs( Conf = emqx_cm:get_session_confs(
#{zone => default, clientid => <<"fake-test">>}, #{ ClientInfo,
receive_maximum => 0, expiry_interval => 0 #{
receive_maximum => 0,
expiry_interval => 0
} }
), ),
Session = emqx_session:init(Conf), Session = emqx_session:init(Conf),

View File

@ -0,0 +1,116 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_persistent_messages_SUITE).
-include_lib("stdlib/include/assert.hrl").
-compile(export_all).
-compile(nowarn_export_all).
-define(NOW,
(calendar:system_time_to_rfc3339(erlang:system_time(millisecond), [{unit, millisecond}]))
).
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
{ok, _} = application:ensure_all_started(emqx_durable_storage),
ok = emqx_common_test_helpers:start_apps([], fun
(emqx) ->
emqx_common_test_helpers:boot_modules(all),
emqx_config:init_load(emqx_schema, <<"persistent_session_store.ds = true">>),
emqx_app:set_config_loader(?MODULE);
(_) ->
ok
end),
Config.
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([]),
application:stop(emqx_durable_storage),
ok.
t_messages_persisted(_Config) ->
C1 = connect(<<?MODULE_STRING "1">>, true, 30),
C2 = connect(<<?MODULE_STRING "2">>, false, 60),
C3 = connect(<<?MODULE_STRING "3">>, false, undefined),
C4 = connect(<<?MODULE_STRING "4">>, false, 0),
CP = connect(<<?MODULE_STRING "-pub">>, true, undefined),
{ok, _, [1]} = emqtt:subscribe(C1, <<"client/+/topic">>, qos1),
{ok, _, [0]} = emqtt:subscribe(C2, <<"client/+/topic">>, qos0),
{ok, _, [1]} = emqtt:subscribe(C2, <<"random/+">>, qos1),
{ok, _, [2]} = emqtt:subscribe(C3, <<"client/#">>, qos2),
{ok, _, [0]} = emqtt:subscribe(C4, <<"random/#">>, qos0),
Messages = [
M1 = {<<"client/1/topic">>, <<"1">>},
M2 = {<<"client/2/topic">>, <<"2">>},
M3 = {<<"client/3/topic/sub">>, <<"3">>},
M4 = {<<"client/4">>, <<"4">>},
M5 = {<<"random/5">>, <<"5">>},
M6 = {<<"random/6/topic">>, <<"6">>},
M7 = {<<"client/7/topic">>, <<"7">>},
M8 = {<<"client/8/topic/sub">>, <<"8">>},
M9 = {<<"random/9">>, <<"9">>},
M10 = {<<"random/10">>, <<"10">>}
],
Results = [emqtt:publish(CP, Topic, Payload, 1) || {Topic, Payload} <- Messages],
ct:pal("Results = ~p", [Results]),
Persisted = consume(<<"local">>, {['#'], 0}),
ct:pal("Persisted = ~p", [Persisted]),
?assertEqual(
% [M1, M2, M5, M7, M9, M10],
[M1, M2, M3, M4, M5, M6, M7, M8, M9, M10],
[{emqx_message:topic(M), emqx_message:payload(M)} || M <- Persisted]
),
ok.
%%
connect(ClientId, CleanStart, EI) ->
{ok, Client} = emqtt:start_link([
{clientid, ClientId},
{proto_ver, v5},
{clean_start, CleanStart},
{properties,
maps:from_list(
[{'Session-Expiry-Interval', EI} || is_integer(EI)]
)}
]),
{ok, _} = emqtt:connect(Client),
Client.
consume(Shard, Replay) ->
{ok, It} = emqx_ds_storage_layer:make_iterator(Shard, Replay),
consume(It).
consume(It) ->
case emqx_ds_storage_layer:next(It) of
{value, Msg, NIt} ->
[emqx_persistent_session_ds:deserialize_message(Msg) | consume(NIt)];
none ->
[]
end.

View File

@ -31,7 +31,10 @@
all() -> all() ->
[ [
{group, persistent_store_enabled}, % NOTE
% Tests are disabled while existing session persistence impl is being
% phased out.
% {group, persistent_store_enabled},
{group, persistent_store_disabled} {group, persistent_store_disabled}
]. ].

View File

@ -88,7 +88,8 @@
T == sqlserver; T == sqlserver;
T == pulsar_producer; T == pulsar_producer;
T == oracle; T == oracle;
T == iotdb T == iotdb;
T == kinesis_producer
). ).
-define(ROOT_KEY, bridges). -define(ROOT_KEY, bridges).

View File

@ -546,7 +546,12 @@ schema("/bridges_probe") ->
?NO_CONTENT; ?NO_CONTENT;
{error, #{kind := validation_error} = Reason} -> {error, #{kind := validation_error} = Reason} ->
?BAD_REQUEST('TEST_FAILED', map_to_json(Reason)); ?BAD_REQUEST('TEST_FAILED', map_to_json(Reason));
{error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' -> {error, Reason0} when not is_tuple(Reason0); element(1, Reason0) =/= 'exit' ->
Reason =
case Reason0 of
{unhealthy_target, Message} -> Message;
_ -> Reason0
end,
?BAD_REQUEST('TEST_FAILED', Reason) ?BAD_REQUEST('TEST_FAILED', Reason)
end; end;
BadRequest -> BadRequest ->

View File

@ -374,6 +374,8 @@ parse_confs(<<"kafka">> = _Type, Name, Conf) ->
Conf#{bridge_name => Name}; Conf#{bridge_name => Name};
parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) -> parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) ->
Conf#{bridge_name => Name}; Conf#{bridge_name => Name};
parse_confs(<<"kinesis_producer">> = _Type, Name, Conf) ->
Conf#{bridge_name => Name};
parse_confs(_Type, _Name, Conf) -> parse_confs(_Type, _Name, Conf) ->
Conf. Conf.

View File

@ -20,8 +20,7 @@ api_schemas(Method) ->
%% We need to map the `type' field of a request (binary) to a %% We need to map the `type' field of a request (binary) to a
%% bridge schema module. %% bridge schema module.
api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub">>, Method ++ "_producer"), api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub">>, Method ++ "_producer"),
%% TODO: un-hide for e5.2.0... api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub_consumer">>, Method ++ "_consumer"),
%% api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub_consumer">>, Method ++ "_consumer"),
api_ref(emqx_bridge_kafka, <<"kafka_consumer">>, Method ++ "_consumer"), api_ref(emqx_bridge_kafka, <<"kafka_consumer">>, Method ++ "_consumer"),
%% TODO: rename this to `kafka_producer' after alias support is added %% TODO: rename this to `kafka_producer' after alias support is added
%% to hocon; keeping this as just `kafka' for backwards compatibility. %% to hocon; keeping this as just `kafka' for backwards compatibility.
@ -49,7 +48,8 @@ api_schemas(Method) ->
api_ref(emqx_bridge_pulsar, <<"pulsar_producer">>, Method ++ "_producer"), api_ref(emqx_bridge_pulsar, <<"pulsar_producer">>, Method ++ "_producer"),
api_ref(emqx_bridge_oracle, <<"oracle">>, Method), api_ref(emqx_bridge_oracle, <<"oracle">>, Method),
api_ref(emqx_bridge_iotdb, <<"iotdb">>, Method), api_ref(emqx_bridge_iotdb, <<"iotdb">>, Method),
api_ref(emqx_bridge_rabbitmq, <<"rabbitmq">>, Method) api_ref(emqx_bridge_rabbitmq, <<"rabbitmq">>, Method),
api_ref(emqx_bridge_kinesis, <<"kinesis_producer">>, Method ++ "_producer")
]. ].
schema_modules() -> schema_modules() ->
@ -74,7 +74,8 @@ schema_modules() ->
emqx_bridge_pulsar, emqx_bridge_pulsar,
emqx_bridge_oracle, emqx_bridge_oracle,
emqx_bridge_iotdb, emqx_bridge_iotdb,
emqx_bridge_rabbitmq emqx_bridge_rabbitmq,
emqx_bridge_kinesis
]. ].
examples(Method) -> examples(Method) ->
@ -119,7 +120,8 @@ resource_type(opents) -> emqx_bridge_opents_connector;
resource_type(pulsar_producer) -> emqx_bridge_pulsar_impl_producer; resource_type(pulsar_producer) -> emqx_bridge_pulsar_impl_producer;
resource_type(oracle) -> emqx_oracle; resource_type(oracle) -> emqx_oracle;
resource_type(iotdb) -> emqx_bridge_iotdb_impl; resource_type(iotdb) -> emqx_bridge_iotdb_impl;
resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector. resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector;
resource_type(kinesis_producer) -> emqx_bridge_kinesis_impl_producer.
fields(bridges) -> fields(bridges) ->
[ [
@ -199,7 +201,8 @@ fields(bridges) ->
] ++ kafka_structs() ++ pulsar_structs() ++ gcp_pubsub_structs() ++ mongodb_structs() ++ ] ++ kafka_structs() ++ pulsar_structs() ++ gcp_pubsub_structs() ++ mongodb_structs() ++
influxdb_structs() ++ influxdb_structs() ++
redis_structs() ++ redis_structs() ++
pgsql_structs() ++ clickhouse_structs() ++ sqlserver_structs() ++ rabbitmq_structs(). pgsql_structs() ++ clickhouse_structs() ++ sqlserver_structs() ++ rabbitmq_structs() ++
kinesis_structs().
mongodb_structs() -> mongodb_structs() ->
[ [
@ -263,7 +266,6 @@ gcp_pubsub_structs() ->
hoconsc:map(name, ref(emqx_bridge_gcp_pubsub, "config_consumer")), hoconsc:map(name, ref(emqx_bridge_gcp_pubsub, "config_consumer")),
#{ #{
desc => <<"EMQX Enterprise Config">>, desc => <<"EMQX Enterprise Config">>,
importance => ?IMPORTANCE_HIDDEN,
required => false required => false
} }
)} )}
@ -365,6 +367,18 @@ rabbitmq_structs() ->
)} )}
]. ].
kinesis_structs() ->
[
{kinesis_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_kinesis, "config_producer")),
#{
desc => <<"Amazon Kinesis Producer Bridge Config">>,
required => false
}
)}
].
api_ref(Module, Type, Method) -> api_ref(Module, Type, Method) ->
{Type, ref(Module, Method)}. {Type, ref(Module, Method)}.

View File

@ -1,6 +1,6 @@
%% -*- mode: erlang; -*- %% -*- mode: erlang; -*-
{erl_opts, [debug_info]}. {erl_opts, [debug_info]}.
{deps, [ {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-1"}}} {deps, [ {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-2"}}}
, {emqx_connector, {path, "../../apps/emqx_connector"}} , {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}} , {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}} , {emqx_bridge, {path, "../../apps/emqx_bridge"}}

View File

@ -249,6 +249,8 @@ check_workers(InstanceId, Client) ->
#{return_values => true} #{return_values => true}
) )
of of
{ok, []} ->
connecting;
{ok, Values} -> {ok, Values} ->
AllOk = lists:all(fun(S) -> S =:= subscription_ok end, Values), AllOk = lists:all(fun(S) -> S =:= subscription_ok end, Values),
case AllOk of case AllOk of

View File

@ -908,16 +908,15 @@ t_consume_ok(Config) ->
?assertEqual(3, emqx_resource_metrics:received_get(ResourceId)) ?assertEqual(3, emqx_resource_metrics:received_get(ResourceId))
), ),
%% FIXME: uncomment after API spec is un-hidden... %% Check that the bridge probe API doesn't leak atoms.
%% %% Check that the bridge probe API doesn't leak atoms. ProbeRes0 = probe_bridge_api(Config),
%% ProbeRes0 = probe_bridge_api(Config), ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
%% ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), AtomsBefore = erlang:system_info(atom_count),
%% AtomsBefore = erlang:system_info(atom_count), %% Probe again; shouldn't have created more atoms.
%% %% Probe again; shouldn't have created more atoms. ProbeRes1 = probe_bridge_api(Config),
%% ProbeRes1 = probe_bridge_api(Config), ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
%% ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), AtomsAfter = erlang:system_info(atom_count),
%% AtomsAfter = erlang:system_info(atom_count), ?assertEqual(AtomsBefore, AtomsAfter),
%% ?assertEqual(AtomsBefore, AtomsAfter),
assert_non_received_metrics(BridgeName), assert_non_received_metrics(BridgeName),
?block_until( ?block_until(
@ -1010,11 +1009,31 @@ t_bridge_rule_action_source(Config) ->
ok. ok.
t_on_get_status(Config) -> t_on_get_status(Config) ->
ResourceId = resource_id(Config),
emqx_bridge_testlib:t_on_get_status(Config, #{failure_status => connecting}), emqx_bridge_testlib:t_on_get_status(Config, #{failure_status => connecting}),
%% no workers alive
?retry(
_Interval0 = 200,
_NAttempts0 = 20,
?assertMatch({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
WorkerPids = get_pull_worker_pids(Config),
emqx_utils:pmap(
fun(Pid) ->
Ref = monitor(process, Pid),
exit(Pid, kill),
receive
{'DOWN', Ref, process, Pid, killed} ->
ok
end
end,
WorkerPids
),
?assertMatch({ok, connecting}, emqx_resource_manager:health_check(ResourceId)),
ok. ok.
t_create_via_http_api(_Config) -> t_create_update_via_http_api(Config) ->
ct:comment("FIXME: implement after API specs are un-hidden in e5.2.0..."), emqx_bridge_testlib:t_create_via_http(Config),
ok. ok.
t_multiple_topic_mappings(Config) -> t_multiple_topic_mappings(Config) ->
@ -1197,7 +1216,7 @@ t_nonexistent_topic(Config) ->
emqx_resource_manager:health_check(ResourceId) emqx_resource_manager:health_check(ResourceId)
), ),
?assertMatch( ?assertMatch(
{ok, _Group, #{error := "GCP PubSub topics are invalid" ++ _}}, {ok, _Group, #{error := {unhealthy_target, "GCP PubSub topics are invalid" ++ _}}},
emqx_resource_manager:lookup_cached(ResourceId) emqx_resource_manager:lookup_cached(ResourceId)
), ),
%% now create the topic and restart the bridge %% now create the topic and restart the bridge

View File

@ -208,6 +208,7 @@ bridge_async_config(#{port := Port} = Config) ->
ConnectTimeout = maps:get(connect_timeout, Config, "1s"), ConnectTimeout = maps:get(connect_timeout, Config, "1s"),
RequestTimeout = maps:get(request_timeout, Config, "10s"), RequestTimeout = maps:get(request_timeout, Config, "10s"),
ResumeInterval = maps:get(resume_interval, Config, "1s"), ResumeInterval = maps:get(resume_interval, Config, "1s"),
HealthCheckInterval = maps:get(health_check_interval, Config, "200ms"),
ResourceRequestTTL = maps:get(resource_request_ttl, Config, "infinity"), ResourceRequestTTL = maps:get(resource_request_ttl, Config, "infinity"),
LocalTopic = LocalTopic =
case maps:find(local_topic, Config) of case maps:find(local_topic, Config) of
@ -232,7 +233,7 @@ bridge_async_config(#{port := Port} = Config) ->
" body = \"${id}\"\n" " body = \"${id}\"\n"
" resource_opts {\n" " resource_opts {\n"
" inflight_window = 100\n" " inflight_window = 100\n"
" health_check_interval = \"200ms\"\n" " health_check_interval = \"~s\"\n"
" max_buffer_bytes = \"1GB\"\n" " max_buffer_bytes = \"1GB\"\n"
" query_mode = \"~s\"\n" " query_mode = \"~s\"\n"
" request_ttl = \"~p\"\n" " request_ttl = \"~p\"\n"
@ -254,6 +255,7 @@ bridge_async_config(#{port := Port} = Config) ->
LocalTopic, LocalTopic,
PoolSize, PoolSize,
RequestTimeout, RequestTimeout,
HealthCheckInterval,
QueryMode, QueryMode,
ResourceRequestTTL, ResourceRequestTTL,
ResumeInterval ResumeInterval
@ -350,19 +352,27 @@ t_send_async_connection_timeout(Config) ->
port => Port, port => Port,
pool_size => 1, pool_size => 1,
query_mode => "async", query_mode => "async",
connect_timeout => integer_to_list(ResponseDelayMS * 2) ++ "s", connect_timeout => integer_to_list(ResponseDelayMS * 2) ++ "ms",
request_timeout => "10s", request_timeout => "10s",
resume_interval => "200ms",
health_check_interval => "200ms",
resource_request_ttl => "infinity" resource_request_ttl => "infinity"
}), }),
ResourceId = emqx_bridge_resource:resource_id(BridgeID),
?retry(
_Interval0 = 200,
_NAttempts0 = 20,
?assertMatch({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
NumberOfMessagesToSend = 10, NumberOfMessagesToSend = 10,
[ [
emqx_bridge:send_message(BridgeID, #{<<"id">> => Id}) emqx_bridge:send_message(BridgeID, #{<<"id">> => Id})
|| Id <- lists:seq(1, NumberOfMessagesToSend) || Id <- lists:seq(1, NumberOfMessagesToSend)
], ],
%% Make sure server recive all messages %% Make sure server receives all messages
ct:pal("Sent messages\n"), ct:pal("Sent messages\n"),
MessageIDs = maps:from_keys(lists:seq(1, NumberOfMessagesToSend), void), MessageIDs = maps:from_keys(lists:seq(1, NumberOfMessagesToSend), void),
receive_request_notifications(MessageIDs, ResponseDelayMS), receive_request_notifications(MessageIDs, ResponseDelayMS, []),
ok. ok.
t_async_free_retries(Config) -> t_async_free_retries(Config) ->
@ -569,15 +579,16 @@ do_t_async_retries(TestContext, Error, Fn) ->
), ),
ok. ok.
receive_request_notifications(MessageIDs, _ResponseDelay) when map_size(MessageIDs) =:= 0 -> receive_request_notifications(MessageIDs, _ResponseDelay, _Acc) when map_size(MessageIDs) =:= 0 ->
ok; ok;
receive_request_notifications(MessageIDs, ResponseDelay) -> receive_request_notifications(MessageIDs, ResponseDelay, Acc) ->
receive receive
{http_server, received, Req} -> {http_server, received, Req} ->
RemainingMessageIDs = remove_message_id(MessageIDs, Req), RemainingMessageIDs = remove_message_id(MessageIDs, Req),
receive_request_notifications(RemainingMessageIDs, ResponseDelay) receive_request_notifications(RemainingMessageIDs, ResponseDelay, [Req | Acc])
after (30 * 1000) -> after (30 * 1000) ->
ct:pal("Waited to long time but did not get any message\n"), ct:pal("Waited a long time but did not get any message"),
ct:pal("Messages received so far:\n ~p", [Acc]),
ct:fail("All requests did not reach server at least once") ct:fail("All requests did not reach server at least once")
end. end.

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,22 @@
# Amazon Kinesis Data Integration Bridge
This application houses the Amazon Kinesis Producer data
integration bridge for EMQX Enterprise Edition. It provides the means to
connect to Amazon Kinesis Data Streams and publish messages to it.
# Documentation links
For more information about Amazon Kinesis Data Streams, please see its
[official site](https://aws.amazon.com/kinesis/data-streams/).
# Configurations
Please see [Ingest Data into Kinesis](https://docs.emqx.com/en/enterprise/v5.1/data-integration/data-bridge-kinesis.html) for more detailed info.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,2 @@
toxiproxy
kinesis

View File

@ -0,0 +1,11 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-2"}}}
, {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{shell, [
{apps, [emqx_bridge_kinesis]}
]}.

View File

@ -0,0 +1,13 @@
{application, emqx_bridge_kinesis, [
{description, "EMQX Enterprise Amazon Kinesis Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [
kernel,
stdlib,
erlcloud
]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -0,0 +1,167 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_kinesis).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
%% hocon_schema API
-export([
namespace/0,
roots/0,
fields/1,
desc/1
]).
-export([
conn_bridge_examples/1
]).
%%-------------------------------------------------------------------------------------------------
%% `hocon_schema' API
%%-------------------------------------------------------------------------------------------------
namespace() ->
"bridge_kinesis".
roots() ->
[].
fields("config_producer") ->
emqx_bridge_schema:common_bridge_fields() ++
emqx_resource_schema:fields("resource_opts") ++
fields(connector_config) ++ fields(producer);
fields(connector_config) ->
[
{aws_access_key_id,
mk(
binary(),
#{
required => true,
desc => ?DESC("aws_access_key_id")
}
)},
{aws_secret_access_key,
mk(
binary(),
#{
required => true,
desc => ?DESC("aws_secret_access_key"),
sensitive => true
}
)},
{endpoint,
mk(
binary(),
#{
default => <<"https://kinesis.us-east-1.amazonaws.com">>,
desc => ?DESC("endpoint")
}
)},
{max_retries,
mk(
non_neg_integer(),
#{
required => false,
default => 2,
desc => ?DESC("max_retries")
}
)},
{pool_size,
sc(
pos_integer(),
#{
default => 8,
desc => ?DESC("pool_size")
}
)}
];
fields(producer) ->
[
{payload_template,
sc(
binary(),
#{
default => <<>>,
desc => ?DESC("payload_template")
}
)},
{local_topic,
sc(
binary(),
#{
desc => ?DESC("local_topic")
}
)},
{stream_name,
sc(
binary(),
#{
required => true,
desc => ?DESC("stream_name")
}
)},
{partition_key,
sc(
binary(),
#{
required => true,
desc => ?DESC("partition_key")
}
)}
];
fields("get_producer") ->
emqx_bridge_schema:status_fields() ++ fields("post_producer");
fields("post_producer") ->
[type_field_producer(), name_field() | fields("config_producer")];
fields("put_producer") ->
fields("config_producer").
desc("config_producer") ->
?DESC("desc_config");
desc(_) ->
undefined.
conn_bridge_examples(Method) ->
[
#{
<<"kinesis_producer">> => #{
summary => <<"Amazon Kinesis Producer Bridge">>,
value => values(producer, Method)
}
}
].
values(producer, _Method) ->
#{
aws_access_key_id => <<"aws_access_key_id">>,
aws_secret_access_key => <<"******">>,
endpoint => <<"https://kinesis.us-east-1.amazonaws.com">>,
max_retries => 3,
stream_name => <<"stream_name">>,
partition_key => <<"key">>,
resource_opts => #{
worker_pool_size => 1,
health_check_interval => 15000,
query_mode => async,
inflight_window => 100,
max_buffer_bytes => 100 * 1024 * 1024
}
}.
%%-------------------------------------------------------------------------------------------------
%% Helper fns
%%-------------------------------------------------------------------------------------------------
sc(Type, Meta) -> hoconsc:mk(Type, Meta).
mk(Type, Meta) -> hoconsc:mk(Type, Meta).
enum(OfSymbols) -> hoconsc:enum(OfSymbols).
type_field_producer() ->
{type, mk(enum([kinesis_producer]), #{required => true, desc => ?DESC("desc_type")})}.
name_field() ->
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.

View File

@ -0,0 +1,178 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_kinesis_connector_client).
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-include_lib("erlcloud/include/erlcloud_aws.hrl").
-behaviour(gen_server).
-type state() :: #{
instance_id := resource_id(),
partition_key := binary(),
stream_name := binary()
}.
-type record() :: {Data :: binary(), PartitionKey :: binary()}.
-define(DEFAULT_PORT, 443).
%% API
-export([
start_link/1,
connection_status/1,
query/2
]).
%% gen_server callbacks
-export([
init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3
]).
-ifdef(TEST).
-export([execute/2]).
-endif.
%% The default timeout for Kinesis API calls is 10 seconds,
%% but this value for `gen_server:call` is 5s,
%% so we should adjust timeout for `gen_server:call`
-define(HEALTH_CHECK_TIMEOUT, 15000).
%%%===================================================================
%%% API
%%%===================================================================
connection_status(Pid) ->
try
gen_server:call(Pid, connection_status, ?HEALTH_CHECK_TIMEOUT)
catch
_:_ ->
{error, timeout}
end.
query(Pid, Records) ->
gen_server:call(Pid, {query, Records}, infinity).
%%--------------------------------------------------------------------
%% @doc
%% Starts Bridge which communicates to Amazon Kinesis Data Streams
%% @end
%%--------------------------------------------------------------------
start_link(Options) ->
gen_server:start_link(?MODULE, Options, []).
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
%% Initialize kinesis connector
-spec init(emqx_bridge_kinesis_impl_producer:config()) -> {ok, state()}.
init(#{
aws_access_key_id := AwsAccessKey,
aws_secret_access_key := AwsSecretAccessKey,
endpoint := Endpoint,
partition_key := PartitionKey,
stream_name := StreamName,
max_retries := MaxRetries,
instance_id := InstanceId
}) ->
process_flag(trap_exit, true),
#{scheme := Scheme, hostname := Host, port := Port} =
emqx_schema:parse_server(
Endpoint,
#{
default_port => ?DEFAULT_PORT,
supported_schemes => ["http", "https"]
}
),
State = #{
instance_id => InstanceId,
partition_key => PartitionKey,
stream_name => StreamName
},
New =
fun(AccessKeyID, SecretAccessKey, HostAddr, HostPort, ConnectionScheme) ->
Config0 = erlcloud_kinesis:new(
AccessKeyID,
SecretAccessKey,
HostAddr,
HostPort,
ConnectionScheme ++ "://"
),
Config0#aws_config{retry_num = MaxRetries}
end,
erlcloud_config:configure(
to_str(AwsAccessKey), to_str(AwsSecretAccessKey), Host, Port, Scheme, New
),
{ok, State}.
handle_call(connection_status, _From, #{stream_name := StreamName} = State) ->
Status =
case erlcloud_kinesis:describe_stream(StreamName) of
{ok, _} ->
{ok, connected};
{error, {<<"ResourceNotFoundException">>, _}} ->
{error, unhealthy_target};
Error ->
{error, Error}
end,
{reply, Status, State};
handle_call({query, Records}, _From, #{stream_name := StreamName} = State) ->
Result = do_query(StreamName, Records),
{reply, Result, State};
handle_call(_Request, _From, State) ->
{reply, {error, unknown_call}, State}.
handle_cast(_Request, State) ->
{noreply, State}.
handle_info(_Info, State) ->
{noreply, State}.
terminate(Reason, #{instance_id := InstanceId} = _State) ->
?tp(kinesis_stop, #{instance_id => InstanceId, reason => Reason}),
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
-spec do_query(binary(), [record()]) ->
{ok, jsx:json_term() | binary()}
| {error, {unrecoverable_error, term()}}
| {error, term()}.
do_query(StreamName, Records) ->
try
execute(put_record, {StreamName, Records})
catch
_Type:Reason ->
{error, {unrecoverable_error, {invalid_request, Reason}}}
end.
-spec execute(put_record, {binary(), [record()]}) ->
{ok, jsx:json_term() | binary()}
| {error, term()}.
execute(put_record, {StreamName, [{Data, PartitionKey}] = Record}) ->
Result = erlcloud_kinesis:put_record(StreamName, PartitionKey, Data),
?tp(kinesis_put_record, #{records => Record, result => Result}),
Result;
execute(put_record, {StreamName, Items}) when is_list(Items) ->
Result = erlcloud_kinesis:put_records(StreamName, Items),
?tp(kinesis_put_record, #{records => Items, result => Result}),
Result.
-spec to_str(list() | binary()) -> list().
to_str(List) when is_list(List) ->
List;
to_str(Bin) when is_binary(Bin) ->
erlang:binary_to_list(Bin).

View File

@ -0,0 +1,247 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_kinesis_impl_producer).
-include_lib("emqx/include/logger.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-define(HEALTH_CHECK_TIMEOUT, 15000).
-define(TOPIC_MESSAGE,
"Kinesis stream is invalid. Please check if the stream exist in Kinesis account."
).
-type config() :: #{
aws_access_key_id := binary(),
aws_secret_access_key := binary(),
endpoint := binary(),
stream_name := binary(),
partition_key := binary(),
payload_template := binary(),
max_retries := non_neg_integer(),
pool_size := non_neg_integer(),
instance_id => resource_id(),
any() => term()
}.
-type templates() :: #{
partition_key := list(),
send_message := list()
}.
-type state() :: #{
pool_name := resource_id(),
templates := templates()
}.
-export_type([config/0]).
%% `emqx_resource' API
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_query/3,
on_batch_query/3,
on_get_status/2
]).
-export([
connect/1
]).
%%-------------------------------------------------------------------------------------------------
%% `emqx_resource' API
%%-------------------------------------------------------------------------------------------------
callback_mode() -> always_sync.
-spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}.
on_start(
InstanceId,
#{
pool_size := PoolSize
} = Config0
) ->
?SLOG(info, #{
msg => "starting_kinesis_bridge",
connector => InstanceId,
config => redact(Config0)
}),
Config = Config0#{instance_id => InstanceId},
Options = [
{config, Config},
{pool_size, PoolSize}
],
Templates = parse_template(Config),
State = #{
pool_name => InstanceId,
templates => Templates
},
case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of
ok ->
?tp(emqx_bridge_kinesis_impl_producer_start_ok, #{config => Config}),
{ok, State};
Error ->
?tp(emqx_bridge_kinesis_impl_producer_start_failed, #{config => Config}),
Error
end.
-spec on_stop(resource_id(), state()) -> ok | {error, term()}.
on_stop(InstanceId, _State) ->
emqx_resource_pool:stop(InstanceId).
-spec on_get_status(resource_id(), state()) ->
connected | disconnected | {disconnected, state(), {unhealthy_target, string()}}.
on_get_status(_InstanceId, #{pool_name := Pool} = State) ->
case
emqx_resource_pool:health_check_workers(
Pool,
{emqx_bridge_kinesis_connector_client, connection_status, []},
?HEALTH_CHECK_TIMEOUT,
#{return_values => true}
)
of
{ok, Values} ->
AllOk = lists:all(fun(S) -> S =:= {ok, connected} end, Values),
case AllOk of
true ->
connected;
false ->
Unhealthy = lists:any(fun(S) -> S =:= {error, unhealthy_target} end, Values),
case Unhealthy of
true -> {disconnected, State, {unhealthy_target, ?TOPIC_MESSAGE}};
false -> disconnected
end
end;
{error, _} ->
disconnected
end.
-spec on_query(
resource_id(),
{send_message, map()},
state()
) ->
{ok, map()}
| {error, {recoverable_error, term()}}
| {error, term()}.
on_query(ResourceId, {send_message, Message}, State) ->
Requests = [{send_message, Message}],
?tp(emqx_bridge_kinesis_impl_producer_sync_query, #{message => Message}),
do_send_requests_sync(ResourceId, Requests, State).
-spec on_batch_query(
resource_id(),
[{send_message, map()}],
state()
) ->
{ok, map()}
| {error, {recoverable_error, term()}}
| {error, term()}.
%% we only support batch insert
on_batch_query(ResourceId, [{send_message, _} | _] = Requests, State) ->
?tp(emqx_bridge_kinesis_impl_producer_sync_batch_query, #{requests => Requests}),
do_send_requests_sync(ResourceId, Requests, State).
connect(Opts) ->
Options = proplists:get_value(config, Opts),
emqx_bridge_kinesis_connector_client:start_link(Options).
%%-------------------------------------------------------------------------------------------------
%% Helper fns
%%-------------------------------------------------------------------------------------------------
-spec do_send_requests_sync(
resource_id(),
[{send_message, map()}],
state()
) ->
{ok, jsx:json_term() | binary()}
| {error, {recoverable_error, term()}}
| {error, {unrecoverable_error, {invalid_request, term()}}}
| {error, {unrecoverable_error, {unhealthy_target, string()}}}
| {error, {unrecoverable_error, term()}}
| {error, term()}.
do_send_requests_sync(
InstanceId,
Requests,
#{pool_name := PoolName, templates := Templates}
) ->
Records = render_records(Requests, Templates),
Result = ecpool:pick_and_do(
PoolName,
{emqx_bridge_kinesis_connector_client, query, [Records]},
no_handover
),
handle_result(Result, Requests, InstanceId).
handle_result({ok, _} = Result, _Requests, _InstanceId) ->
Result;
handle_result({error, {<<"ResourceNotFoundException">>, _} = Reason}, Requests, InstanceId) ->
?SLOG(error, #{
msg => "kinesis_error_response",
request => Requests,
connector => InstanceId,
reason => Reason
}),
{error, {unrecoverable_error, {unhealthy_target, ?TOPIC_MESSAGE}}};
handle_result(
{error, {<<"ProvisionedThroughputExceededException">>, _} = Reason}, Requests, InstanceId
) ->
?SLOG(error, #{
msg => "kinesis_error_response",
request => Requests,
connector => InstanceId,
reason => Reason
}),
{error, {recoverable_error, Reason}};
handle_result({error, {<<"InvalidArgumentException">>, _} = Reason}, Requests, InstanceId) ->
?SLOG(error, #{
msg => "kinesis_error_response",
request => Requests,
connector => InstanceId,
reason => Reason
}),
{error, {unrecoverable_error, Reason}};
handle_result({error, {econnrefused = Reason, _}}, Requests, InstanceId) ->
?SLOG(error, #{
msg => "kinesis_error_response",
request => Requests,
connector => InstanceId,
reason => Reason
}),
{error, {recoverable_error, Reason}};
handle_result({error, Reason} = Error, Requests, InstanceId) ->
?SLOG(error, #{
msg => "kinesis_error_response",
request => Requests,
connector => InstanceId,
reason => Reason
}),
Error.
parse_template(Config) ->
#{payload_template := PayloadTemplate, partition_key := PartitionKeyTemplate} = Config,
Templates = #{send_message => PayloadTemplate, partition_key => PartitionKeyTemplate},
maps:map(fun(_K, V) -> emqx_placeholder:preproc_tmpl(V) end, Templates).
render_records(Items, Templates) ->
PartitionKeyTemplate = maps:get(partition_key, Templates),
MsgTemplate = maps:get(send_message, Templates),
render_messages(Items, {MsgTemplate, PartitionKeyTemplate}, []).
render_messages([], _Templates, RenderedMsgs) ->
RenderedMsgs;
render_messages(
[{send_message, Msg} | Others],
{MsgTemplate, PartitionKeyTemplate} = Templates,
RenderedMsgs
) ->
Data = emqx_placeholder:proc_tmpl(MsgTemplate, Msg),
PartitionKey = emqx_placeholder:proc_tmpl(PartitionKeyTemplate, Msg),
RenderedMsg = {Data, PartitionKey},
render_messages(Others, Templates, [RenderedMsg | RenderedMsgs]).
redact(Config) ->
emqx_utils:redact(Config, fun(Any) -> Any =:= aws_secret_access_key end).

View File

@ -0,0 +1,817 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_kinesis_impl_producer_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-define(PRODUCER, emqx_bridge_kinesis_impl_producer).
-define(BRIDGE_TYPE, kinesis_producer).
-define(BRIDGE_TYPE_BIN, <<"kinesis_producer">>).
-define(KINESIS_PORT, 4566).
-define(TOPIC, <<"t/topic">>).
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
[
{group, with_batch},
{group, without_batch}
].
groups() ->
TCs = emqx_common_test_helpers:all(?MODULE),
[
{with_batch, TCs},
{without_batch, TCs}
].
init_per_suite(Config) ->
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy.emqx.net"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
ProxyName = "kinesis",
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]),
{ok, _} = application:ensure_all_started(emqx_connector),
emqx_mgmt_api_test_util:init_suite(),
[
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
{kinesis_port, ?KINESIS_PORT},
{proxy_name, ProxyName}
| Config
].
end_per_suite(_Config) ->
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]),
_ = application:stop(emqx_connector),
ok.
init_per_group(with_batch, Config) ->
[{batch_size, 100} | Config];
init_per_group(without_batch, Config) ->
[{batch_size, 1} | Config];
init_per_group(_Group, Config) ->
Config.
end_per_group(_Group, _Config) ->
ok.
init_per_testcase(TestCase, Config0) ->
ok = snabbkaffe:start_trace(),
ProxyHost = ?config(proxy_host, Config0),
ProxyPort = ?config(proxy_port, Config0),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
TimeTrap =
case TestCase of
t_wrong_server -> 60;
_ -> 30
end,
ct:timetrap({seconds, TimeTrap}),
delete_all_bridges(),
Tid = install_telemetry_handler(TestCase),
put(telemetry_table, Tid),
Config = generate_config(Config0),
create_stream(Config),
[{telemetry_table, Tid} | Config].
end_per_testcase(_TestCase, Config) ->
ok = snabbkaffe:stop(),
delete_all_bridges(),
delete_stream(Config),
emqx_common_test_helpers:call_janitor(),
ok.
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
generate_config(Config0) ->
#{
name := Name,
config_string := ConfigString,
kinesis_config := KinesisConfig
} = kinesis_config(Config0),
Endpoint = map_get(<<"endpoint">>, KinesisConfig),
#{scheme := Scheme, hostname := Host, port := Port} =
emqx_schema:parse_server(
Endpoint,
#{
default_port => 443,
supported_schemes => ["http", "https"]
}
),
ErlcloudConfig = erlcloud_kinesis:new("access_key", "secret", Host, Port, Scheme ++ "://"),
ResourceId = emqx_bridge_resource:resource_id(?BRIDGE_TYPE_BIN, Name),
BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, Name),
[
{kinesis_name, Name},
{connection_scheme, Scheme},
{kinesis_config, KinesisConfig},
{kinesis_config_string, ConfigString},
{resource_id, ResourceId},
{bridge_id, BridgeId},
{erlcloud_config, ErlcloudConfig}
| Config0
].
kinesis_config(Config) ->
QueryMode = proplists:get_value(query_mode, Config, async),
Scheme = proplists:get_value(connection_scheme, Config, "http"),
ProxyHost = proplists:get_value(proxy_host, Config),
KinesisPort = proplists:get_value(kinesis_port, Config),
BatchSize = proplists:get_value(batch_size, Config, 100),
BatchTime = proplists:get_value(batch_time, Config, <<"500ms">>),
PayloadTemplate = proplists:get_value(payload_template, Config, "${payload}"),
StreamName = proplists:get_value(stream_name, Config, <<"mystream">>),
PartitionKey = proplists:get_value(partition_key, Config, <<"key">>),
MaxRetries = proplists:get_value(max_retries, Config, 3),
GUID = emqx_guid:to_hexstr(emqx_guid:gen()),
Name = <<(atom_to_binary(?MODULE))/binary, (GUID)/binary>>,
ConfigString =
io_lib:format(
"bridges.kinesis_producer.~s {\n"
" enable = true\n"
" aws_access_key_id = \"aws_access_key_id\"\n"
" aws_secret_access_key = \"aws_secret_access_key\"\n"
" endpoint = \"~s://~s:~b\"\n"
" stream_name = \"~s\"\n"
" partition_key = \"~s\"\n"
" payload_template = \"~s\"\n"
" max_retries = ~b\n"
" pool_size = 1\n"
" resource_opts = {\n"
" health_check_interval = \"3s\"\n"
" request_ttl = 30s\n"
" resume_interval = 1s\n"
" metrics_flush_interval = \"700ms\"\n"
" worker_pool_size = 1\n"
" query_mode = ~s\n"
" batch_size = ~b\n"
" batch_time = \"~s\"\n"
" }\n"
"}\n",
[
Name,
Scheme,
ProxyHost,
KinesisPort,
StreamName,
PartitionKey,
PayloadTemplate,
MaxRetries,
QueryMode,
BatchSize,
BatchTime
]
),
#{
name => Name,
config_string => ConfigString,
kinesis_config => parse_and_check(ConfigString, Name)
}.
parse_and_check(ConfigString, Name) ->
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
TypeBin = <<"kinesis_producer">>,
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
#{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf,
Config.
delete_all_bridges() ->
ct:pal("deleting all bridges"),
lists:foreach(
fun(#{name := Name, type := Type}) ->
emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
).
delete_bridge(Config) ->
Type = ?BRIDGE_TYPE,
Name = ?config(kinesis_name, Config),
ct:pal("deleting bridge ~p", [{Type, Name}]),
emqx_bridge:remove(Type, Name).
create_bridge_http(Config) ->
create_bridge_http(Config, _KinesisConfigOverrides = #{}).
create_bridge_http(Config, KinesisConfigOverrides) ->
TypeBin = ?BRIDGE_TYPE_BIN,
Name = ?config(kinesis_name, Config),
KinesisConfig0 = ?config(kinesis_config, Config),
KinesisConfig = emqx_utils_maps:deep_merge(KinesisConfig0, KinesisConfigOverrides),
Params = KinesisConfig#{<<"type">> => TypeBin, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
ProbePath = emqx_mgmt_api_test_util:api_path(["bridges_probe"]),
ProbeResult = emqx_mgmt_api_test_util:request_api(post, ProbePath, "", AuthHeader, Params),
ct:pal("creating bridge (via http): ~p", [Params]),
ct:pal("probe result: ~p", [ProbeResult]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
{ok, Res0} -> {ok, emqx_utils_json:decode(Res0, [return_maps])};
Error -> Error
end,
ct:pal("bridge creation result: ~p", [Res]),
?assertEqual(element(1, ProbeResult), element(1, Res)),
Res.
create_bridge(Config) ->
create_bridge(Config, _KinesisConfigOverrides = #{}).
create_bridge(Config, KinesisConfigOverrides) ->
TypeBin = ?BRIDGE_TYPE_BIN,
Name = ?config(kinesis_name, Config),
KinesisConfig0 = ?config(kinesis_config, Config),
KinesisConfig = emqx_utils_maps:deep_merge(KinesisConfig0, KinesisConfigOverrides),
ct:pal("creating bridge: ~p", [KinesisConfig]),
Res = emqx_bridge:create(TypeBin, Name, KinesisConfig),
ct:pal("bridge creation result: ~p", [Res]),
Res.
create_rule_and_action_http(Config) ->
BridgeId = ?config(bridge_id, Config),
Params = #{
enable => true,
sql => <<"SELECT * FROM \"", ?TOPIC/binary, "\"">>,
actions => [BridgeId]
},
Path = emqx_mgmt_api_test_util:api_path(["rules"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
{ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])};
Error -> Error
end.
create_stream(Config) ->
KinesisConfig = ?config(kinesis_config, Config),
ErlcloudConfig = ?config(erlcloud_config, Config),
StreamName = map_get(<<"stream_name">>, KinesisConfig),
{ok, _} = application:ensure_all_started(erlcloud),
delete_stream(StreamName, ErlcloudConfig),
{ok, _} = erlcloud_kinesis:create_stream(StreamName, 1, ErlcloudConfig),
?retry(
_Sleep = 100,
_Attempts = 10,
begin
{ok, [{<<"StreamDescription">>, StreamInfo}]} =
erlcloud_kinesis:describe_stream(StreamName, ErlcloudConfig),
?assertEqual(
<<"ACTIVE">>,
proplists:get_value(<<"StreamStatus">>, StreamInfo)
)
end
),
ok.
delete_stream(Config) ->
KinesisConfig = ?config(kinesis_config, Config),
ErlcloudConfig = ?config(erlcloud_config, Config),
StreamName = map_get(<<"stream_name">>, KinesisConfig),
{ok, _} = application:ensure_all_started(erlcloud),
delete_stream(StreamName, ErlcloudConfig),
ok.
delete_stream(StreamName, ErlcloudConfig) ->
case erlcloud_kinesis:delete_stream(StreamName, ErlcloudConfig) of
{ok, _} ->
?retry(
_Sleep = 100,
_Attempts = 10,
?assertMatch(
{error, {<<"ResourceNotFoundException">>, _}},
erlcloud_kinesis:describe_stream(StreamName, ErlcloudConfig)
)
);
_ ->
ok
end,
ok.
wait_record(Config, ShardIt, Timeout, Attempts) ->
[Record] = wait_records(Config, ShardIt, 1, Timeout, Attempts),
Record.
wait_records(Config, ShardIt, Count, Timeout, Attempts) ->
ErlcloudConfig = ?config(erlcloud_config, Config),
?retry(
Timeout,
Attempts,
begin
{ok, Ret} = erlcloud_kinesis:get_records(ShardIt, ErlcloudConfig),
Records = proplists:get_value(<<"Records">>, Ret),
Count = length(Records),
Records
end
).
get_shard_iterator(Config) ->
get_shard_iterator(Config, 1).
get_shard_iterator(Config, Index) ->
KinesisConfig = ?config(kinesis_config, Config),
ErlcloudConfig = ?config(erlcloud_config, Config),
StreamName = map_get(<<"stream_name">>, KinesisConfig),
{ok, [{<<"Shards">>, Shards}]} = erlcloud_kinesis:list_shards(StreamName, ErlcloudConfig),
Shard = lists:nth(Index, lists:sort(Shards)),
ShardId = proplists:get_value(<<"ShardId">>, Shard),
{ok, [{<<"ShardIterator">>, ShardIt}]} =
erlcloud_kinesis:get_shard_iterator(StreamName, ShardId, <<"LATEST">>, ErlcloudConfig),
ShardIt.
install_telemetry_handler(TestCase) ->
Tid = ets:new(TestCase, [ordered_set, public]),
HandlerId = TestCase,
TestPid = self(),
_ = telemetry:attach_many(
HandlerId,
emqx_resource_metrics:events(),
fun(EventName, Measurements, Metadata, _Config) ->
Data = #{
name => EventName,
measurements => Measurements,
metadata => Metadata
},
ets:insert(Tid, {erlang:monotonic_time(), Data}),
TestPid ! {telemetry, Data},
ok
end,
unused_config
),
emqx_common_test_helpers:on_exit(fun() ->
telemetry:detach(HandlerId),
ets:delete(Tid)
end),
Tid.
current_metrics(ResourceId) ->
Mapping = metrics_mapping(),
maps:from_list([
{Metric, F(ResourceId)}
|| {Metric, F} <- maps:to_list(Mapping)
]).
metrics_mapping() ->
#{
dropped => fun emqx_resource_metrics:dropped_get/1,
dropped_expired => fun emqx_resource_metrics:dropped_expired_get/1,
dropped_other => fun emqx_resource_metrics:dropped_other_get/1,
dropped_queue_full => fun emqx_resource_metrics:dropped_queue_full_get/1,
dropped_resource_not_found => fun emqx_resource_metrics:dropped_resource_not_found_get/1,
dropped_resource_stopped => fun emqx_resource_metrics:dropped_resource_stopped_get/1,
late_reply => fun emqx_resource_metrics:late_reply_get/1,
failed => fun emqx_resource_metrics:failed_get/1,
inflight => fun emqx_resource_metrics:inflight_get/1,
matched => fun emqx_resource_metrics:matched_get/1,
queuing => fun emqx_resource_metrics:queuing_get/1,
retried => fun emqx_resource_metrics:retried_get/1,
retried_failed => fun emqx_resource_metrics:retried_failed_get/1,
retried_success => fun emqx_resource_metrics:retried_success_get/1,
success => fun emqx_resource_metrics:success_get/1
}.
assert_metrics(ExpectedMetrics, ResourceId) ->
Mapping = metrics_mapping(),
Metrics =
lists:foldl(
fun(Metric, Acc) ->
#{Metric := Fun} = Mapping,
Value = Fun(ResourceId),
Acc#{Metric => Value}
end,
#{},
maps:keys(ExpectedMetrics)
),
CurrentMetrics = current_metrics(ResourceId),
TelemetryTable = get(telemetry_table),
RecordedEvents = ets:tab2list(TelemetryTable),
?assertEqual(ExpectedMetrics, Metrics, #{
current_metrics => CurrentMetrics, recorded_events => RecordedEvents
}),
ok.
assert_empty_metrics(ResourceId) ->
Mapping = metrics_mapping(),
ExpectedMetrics =
lists:foldl(
fun(Metric, Acc) ->
Acc#{Metric => 0}
end,
#{},
maps:keys(Mapping)
),
assert_metrics(ExpectedMetrics, ResourceId).
wait_telemetry_event(TelemetryTable, EventName, ResourceId) ->
wait_telemetry_event(TelemetryTable, EventName, ResourceId, #{timeout => 5_000, n_events => 1}).
wait_telemetry_event(
TelemetryTable,
EventName,
ResourceId,
_Opts = #{
timeout := Timeout,
n_events := NEvents
}
) ->
wait_n_events(TelemetryTable, ResourceId, NEvents, Timeout, EventName).
wait_n_events(_TelemetryTable, _ResourceId, NEvents, _Timeout, _EventName) when NEvents =< 0 ->
ok;
wait_n_events(TelemetryTable, ResourceId, NEvents, Timeout, EventName) ->
receive
{telemetry, #{name := [_, _, EventName], measurements := #{counter_inc := Inc}} = Event} ->
ct:pal("telemetry event: ~p", [Event]),
wait_n_events(TelemetryTable, ResourceId, NEvents - Inc, Timeout, EventName)
after Timeout ->
RecordedEvents = ets:tab2list(TelemetryTable),
CurrentMetrics = current_metrics(ResourceId),
ct:pal("recorded events: ~p", [RecordedEvents]),
ct:pal("current metrics: ~p", [CurrentMetrics]),
error({timeout_waiting_for_telemetry, EventName})
end.
wait_until_gauge_is(GaugeName, ExpectedValue, Timeout) ->
Events = receive_all_events(GaugeName, Timeout),
case length(Events) > 0 andalso lists:last(Events) of
#{measurements := #{gauge_set := ExpectedValue}} ->
ok;
#{measurements := #{gauge_set := Value}} ->
ct:pal("events: ~p", [Events]),
ct:fail(
"gauge ~p didn't reach expected value ~p; last value: ~p",
[GaugeName, ExpectedValue, Value]
);
false ->
ct:pal("no ~p gauge events received!", [GaugeName])
end.
receive_all_events(EventName, Timeout) ->
receive_all_events(EventName, Timeout, _MaxEvents = 10, _Count = 0, _Acc = []).
receive_all_events(_EventName, _Timeout, MaxEvents, Count, Acc) when Count >= MaxEvents ->
lists:reverse(Acc);
receive_all_events(EventName, Timeout, MaxEvents, Count, Acc) ->
receive
{telemetry, #{name := [_, _, EventName]} = Event} ->
receive_all_events(EventName, Timeout, MaxEvents, Count + 1, [Event | Acc])
after Timeout ->
lists:reverse(Acc)
end.
to_str(List) when is_list(List) ->
List;
to_str(Bin) when is_binary(Bin) ->
erlang:binary_to_list(Bin);
to_str(Int) when is_integer(Int) ->
erlang:integer_to_list(Int).
to_bin(Str) when is_list(Str) ->
erlang:list_to_binary(Str).
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_create_via_http(Config) ->
?assertMatch({ok, _}, create_bridge_http(Config)),
ok.
t_start_failed_then_fix(Config) ->
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
ResourceId = ?config(resource_id, Config),
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
ct:sleep(1000),
?wait_async_action(
create_bridge(Config),
#{?snk_kind := emqx_bridge_kinesis_impl_producer_start_failed},
20_000
)
end),
?retry(
_Sleep1 = 1_000,
_Attempts1 = 30,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
ok.
t_stop(Config) ->
Name = ?config(kinesis_name, Config),
{ok, _} = create_bridge(Config),
?check_trace(
?wait_async_action(
emqx_bridge_resource:stop(?BRIDGE_TYPE, Name),
#{?snk_kind := kinesis_stop},
5_000
),
fun(Trace) ->
?assertMatch([_], ?of_kind(kinesis_stop, Trace)),
ok
end
),
ok.
t_get_status_ok(Config) ->
ResourceId = ?config(resource_id, Config),
{ok, _} = create_bridge(Config),
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)),
ok.
t_create_unhealthy(Config) ->
delete_stream(Config),
ResourceId = ?config(resource_id, Config),
{ok, _} = create_bridge(Config),
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)),
?assertMatch(
{ok, _, #{error := {unhealthy_target, _}}},
emqx_resource_manager:lookup_cached(ResourceId)
),
ok.
t_get_status_unhealthy(Config) ->
delete_stream(Config),
ResourceId = ?config(resource_id, Config),
{ok, _} = create_bridge(Config),
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)),
?assertMatch(
{ok, _, #{error := {unhealthy_target, _}}},
emqx_resource_manager:lookup_cached(ResourceId)
),
ok.
t_publish_success(Config) ->
ResourceId = ?config(resource_id, Config),
TelemetryTable = ?config(telemetry_table, Config),
?assertMatch({ok, _}, create_bridge(Config)),
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
assert_empty_metrics(ResourceId),
ShardIt = get_shard_iterator(Config),
Payload = <<"payload">>,
Message = emqx_message:make(?TOPIC, Payload),
emqx:publish(Message),
%% to avoid test flakiness
wait_telemetry_event(TelemetryTable, success, ResourceId),
wait_until_gauge_is(queuing, 0, 500),
wait_until_gauge_is(inflight, 0, 500),
assert_metrics(
#{
dropped => 0,
failed => 0,
inflight => 0,
matched => 1,
queuing => 0,
retried => 0,
success => 1
},
ResourceId
),
Record = wait_record(Config, ShardIt, 100, 10),
?assertEqual(Payload, proplists:get_value(<<"Data">>, Record)),
ok.
t_publish_success_with_template(Config) ->
ResourceId = ?config(resource_id, Config),
TelemetryTable = ?config(telemetry_table, Config),
Overrides =
#{
<<"payload_template">> => <<"${payload.data}">>,
<<"partition_key">> => <<"${payload.key}">>
},
?assertMatch({ok, _}, create_bridge(Config, Overrides)),
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
assert_empty_metrics(ResourceId),
ShardIt = get_shard_iterator(Config),
Payload = <<"{\"key\":\"my_key\", \"data\":\"my_data\"}">>,
Message = emqx_message:make(?TOPIC, Payload),
emqx:publish(Message),
%% to avoid test flakiness
wait_telemetry_event(TelemetryTable, success, ResourceId),
wait_until_gauge_is(queuing, 0, 500),
wait_until_gauge_is(inflight, 0, 500),
assert_metrics(
#{
dropped => 0,
failed => 0,
inflight => 0,
matched => 1,
queuing => 0,
retried => 0,
success => 1
},
ResourceId
),
Record = wait_record(Config, ShardIt, 100, 10),
?assertEqual(<<"my_data">>, proplists:get_value(<<"Data">>, Record)),
ok.
t_publish_multiple_msgs_success(Config) ->
ResourceId = ?config(resource_id, Config),
TelemetryTable = ?config(telemetry_table, Config),
?assertMatch({ok, _}, create_bridge(Config)),
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
assert_empty_metrics(ResourceId),
ShardIt = get_shard_iterator(Config),
lists:foreach(
fun(I) ->
Payload = "payload_" ++ to_str(I),
Message = emqx_message:make(?TOPIC, Payload),
emqx:publish(Message)
end,
lists:seq(1, 10)
),
Records = wait_records(Config, ShardIt, 10, 100, 10),
ReceivedPayloads =
lists:map(fun(Record) -> proplists:get_value(<<"Data">>, Record) end, Records),
lists:foreach(
fun(I) ->
ExpectedPayload = to_bin("payload_" ++ to_str(I)),
?assertEqual(
{ExpectedPayload, true},
{ExpectedPayload, lists:member(ExpectedPayload, ReceivedPayloads)}
)
end,
lists:seq(1, 10)
),
%% to avoid test flakiness
wait_telemetry_event(TelemetryTable, success, ResourceId),
wait_until_gauge_is(queuing, 0, 500),
wait_until_gauge_is(inflight, 0, 500),
assert_metrics(
#{
dropped => 0,
failed => 0,
inflight => 0,
matched => 10,
queuing => 0,
retried => 0,
success => 10
},
ResourceId
),
ok.
t_publish_unhealthy(Config) ->
ResourceId = ?config(resource_id, Config),
TelemetryTable = ?config(telemetry_table, Config),
?assertMatch({ok, _}, create_bridge(Config)),
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
assert_empty_metrics(ResourceId),
ShardIt = get_shard_iterator(Config),
Payload = <<"payload">>,
Message = emqx_message:make(?TOPIC, Payload),
delete_stream(Config),
emqx:publish(Message),
?assertError(
{badmatch, {error, {<<"ResourceNotFoundException">>, _}}},
wait_record(Config, ShardIt, 100, 10)
),
%% to avoid test flakiness
wait_telemetry_event(TelemetryTable, failed, ResourceId),
wait_until_gauge_is(queuing, 0, 500),
wait_until_gauge_is(inflight, 0, 500),
assert_metrics(
#{
dropped => 0,
failed => 1,
inflight => 0,
matched => 1,
queuing => 0,
retried => 0,
success => 0
},
ResourceId
),
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)),
?assertMatch(
{ok, _, #{error := {unhealthy_target, _}}},
emqx_resource_manager:lookup_cached(ResourceId)
),
ok.
t_publish_big_msg(Config) ->
ResourceId = ?config(resource_id, Config),
TelemetryTable = ?config(telemetry_table, Config),
?assertMatch({ok, _}, create_bridge(Config)),
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
assert_empty_metrics(ResourceId),
% Maximum size is 1MB. Using 1MB + 1 here.
Payload = binary:copy(<<"a">>, 1 * 1024 * 1024 + 1),
Message = emqx_message:make(?TOPIC, Payload),
emqx:publish(Message),
%% to avoid test flakiness
wait_telemetry_event(TelemetryTable, failed, ResourceId),
wait_until_gauge_is(queuing, 0, 500),
wait_until_gauge_is(inflight, 0, 500),
assert_metrics(
#{
dropped => 0,
failed => 1,
inflight => 0,
matched => 1,
queuing => 0,
retried => 0,
success => 0
},
ResourceId
),
ok.
t_publish_connection_down(Config0) ->
Config = generate_config([{max_retries, 2} | Config0]),
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
ResourceId = ?config(resource_id, Config),
TelemetryTable = ?config(telemetry_table, Config),
?assertMatch({ok, _}, create_bridge(Config)),
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
?retry(
_Sleep1 = 1_000,
_Attempts1 = 30,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
assert_empty_metrics(ResourceId),
ShardIt = get_shard_iterator(Config),
Payload = <<"payload">>,
Message = emqx_message:make(?TOPIC, Payload),
Kind =
case proplists:get_value(batch_size, Config) of
1 -> emqx_bridge_kinesis_impl_producer_sync_query;
_ -> emqx_bridge_kinesis_impl_producer_sync_batch_query
end,
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
ct:sleep(1000),
?wait_async_action(
emqx:publish(Message),
#{?snk_kind := Kind},
5_000
),
ct:sleep(1000)
end),
% Wait for reconnection.
?retry(
_Sleep3 = 1_000,
_Attempts3 = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Record = wait_record(Config, ShardIt, 2000, 10),
%% to avoid test flakiness
wait_telemetry_event(TelemetryTable, retried_success, ResourceId),
wait_until_gauge_is(queuing, 0, 500),
wait_until_gauge_is(inflight, 0, 500),
assert_metrics(
#{
dropped => 0,
failed => 0,
inflight => 0,
matched => 1,
queuing => 0,
retried => 1,
success => 1,
retried_success => 1
},
ResourceId
),
Data = proplists:get_value(<<"Data">>, Record),
?assertEqual(Payload, Data),
ok.
t_wrong_server(Config) ->
Name = ?config(kinesis_name, Config),
ResourceId = ?config(resource_id, Config),
Overrides =
#{
<<"max_retries">> => 0,
<<"endpoint">> => <<"https://wrong_server:12345">>,
<<"resource_opts">> => #{
<<"health_check_interval">> => <<"60s">>
}
},
?wait_async_action(
create_bridge(Config, Overrides),
#{?snk_kind := emqx_bridge_kinesis_impl_producer_start_ok},
30_000
),
?assertEqual({error, timeout}, emqx_resource_manager:health_check(ResourceId)),
emqx_bridge_resource:stop(?BRIDGE_TYPE, Name),
emqx_bridge_resource:remove(?BRIDGE_TYPE, Name),
ok.

View File

@ -16,6 +16,7 @@
-module(emqx_ds). -module(emqx_ds).
%% API: %% API:
-export([ensure_shard/2]).
%% Messages: %% Messages:
-export([message_store/2, message_store/1, message_stats/0]). -export([message_store/2, message_store/1, message_stats/0]).
%% Iterator: %% Iterator:
@ -79,6 +80,18 @@
%% API funcions %% API funcions
%%================================================================================ %%================================================================================
-spec ensure_shard(shard(), emqx_ds_storage_layer:options()) ->
ok | {error, _Reason}.
ensure_shard(Shard, Options) ->
case emqx_ds_storage_layer_sup:start_shard(Shard, Options) of
{ok, _Pid} ->
ok;
{error, {already_started, _Pid}} ->
ok;
{error, Reason} ->
{error, Reason}
end.
%%-------------------------------------------------------------------------------- %%--------------------------------------------------------------------------------
%% Message %% Message
%%-------------------------------------------------------------------------------- %%--------------------------------------------------------------------------------

View File

@ -175,7 +175,7 @@
cf :: rocksdb:cf_handle(), cf :: rocksdb:cf_handle(),
keymapper :: keymapper(), keymapper :: keymapper(),
write_options = [{sync, true}] :: emqx_ds_storage_layer:db_write_options(), write_options = [{sync, true}] :: emqx_ds_storage_layer:db_write_options(),
read_options = [] :: emqx_ds_storage_layer:db_write_options() read_options = [] :: emqx_ds_storage_layer:db_read_options()
}). }).
-record(it, { -record(it, {

View File

@ -6,7 +6,7 @@
-behaviour(gen_server). -behaviour(gen_server).
%% API: %% API:
-export([start_link/1]). -export([start_link/2]).
-export([create_generation/3]). -export([create_generation/3]).
-export([store/5]). -export([store/5]).
@ -18,7 +18,8 @@
%% behaviour callbacks: %% behaviour callbacks:
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]).
-export_type([cf_refs/0, gen_id/0, db_write_options/0, state/0, iterator/0]). -export_type([cf_refs/0, gen_id/0, options/0, state/0, iterator/0]).
-export_type([db_options/0, db_write_options/0, db_read_options/0]).
-compile({inline, [meta_lookup/2]}). -compile({inline, [meta_lookup/2]}).
@ -26,10 +27,16 @@
%% Type declarations %% Type declarations
%%================================================================================ %%================================================================================
%% see rocksdb:db_options() -type options() :: #{
% -type options() :: proplists:proplist(). dir => file:filename()
}.
%% see rocksdb:db_options()
-type db_options() :: proplists:proplist().
%% see rocksdb:write_options()
-type db_write_options() :: proplists:proplist(). -type db_write_options() :: proplists:proplist().
%% see rocksdb:read_options()
-type db_read_options() :: proplists:proplist().
-type cf_refs() :: [{string(), rocksdb:cf_handle()}]. -type cf_refs() :: [{string(), rocksdb:cf_handle()}].
@ -110,18 +117,16 @@
%% API funcions %% API funcions
%%================================================================================ %%================================================================================
-spec start_link(emqx_ds:shard()) -> {ok, pid()}. -spec start_link(emqx_ds:shard(), emqx_ds_storage_layer:options()) -> {ok, pid()}.
start_link(Shard) -> start_link(Shard, Options) ->
gen_server:start_link(?REF(Shard), ?MODULE, [Shard], []). gen_server:start_link(?REF(Shard), ?MODULE, {Shard, Options}, []).
-spec create_generation(emqx_ds:shard(), emqx_ds:time(), emqx_ds_conf:backend_config()) -> -spec create_generation(emqx_ds:shard(), emqx_ds:time(), emqx_ds_conf:backend_config()) ->
{ok, gen_id()} | {error, nonmonotonic}. {ok, gen_id()} | {error, nonmonotonic}.
create_generation(Shard, Since, Config = {_Module, _Options}) -> create_generation(Shard, Since, Config = {_Module, _Options}) ->
gen_server:call(?REF(Shard), {create_generation, Since, Config}). gen_server:call(?REF(Shard), {create_generation, Since, Config}).
-spec store( -spec store(emqx_ds:shard(), emqx_guid:guid(), emqx_ds:time(), emqx_ds:topic(), binary()) ->
emqx_ds:shard(), emqx_guid:guid(), emqx_ds:time(), emqx_ds:topic(), binary()
) ->
ok | {error, _}. ok | {error, _}.
store(Shard, GUID, Time, Topic, Msg) -> store(Shard, GUID, Time, Topic, Msg) ->
{_GenId, #{module := Mod, data := Data}} = meta_lookup_gen(Shard, Time), {_GenId, #{module := Mod, data := Data}} = meta_lookup_gen(Shard, Time),
@ -181,9 +186,9 @@ discard_iterator(Shard, ReplayID) ->
%% behaviour callbacks %% behaviour callbacks
%%================================================================================ %%================================================================================
init([Shard]) -> init({Shard, Options}) ->
process_flag(trap_exit, true), process_flag(trap_exit, true),
{ok, S0} = open_db(Shard), {ok, S0} = open_db(Shard, Options),
S = ensure_current_generation(S0), S = ensure_current_generation(S0),
ok = populate_metadata(S), ok = populate_metadata(S),
{ok, S}. {ok, S}.
@ -265,16 +270,17 @@ create_gen(GenId, Since, {Module, Options}, S = #s{db = DBHandle, cf_generations
}, },
{ok, Gen, S#s{cf_generations = NewCFs ++ CFs}}. {ok, Gen, S#s{cf_generations = NewCFs ++ CFs}}.
-spec open_db(emqx_ds:shard()) -> {ok, state()} | {error, _TODO}. -spec open_db(emqx_ds:shard(), options()) -> {ok, state()} | {error, _TODO}.
open_db(Shard) -> open_db(Shard, Options) ->
Filename = binary_to_list(Shard), DBDir = unicode:characters_to_list(maps:get(dir, Options, Shard)),
DBOptions = [ DBOptions = [
{create_if_missing, true}, {create_if_missing, true},
{create_missing_column_families, true} {create_missing_column_families, true}
| emqx_ds_conf:db_options() | emqx_ds_conf:db_options()
], ],
_ = filelib:ensure_dir(DBDir),
ExistingCFs = ExistingCFs =
case rocksdb:list_column_families(Filename, DBOptions) of case rocksdb:list_column_families(DBDir, DBOptions) of
{ok, CFs} -> {ok, CFs} ->
[{Name, []} || Name <- CFs, Name /= ?DEFAULT_CF, Name /= ?ITERATOR_CF]; [{Name, []} || Name <- CFs, Name /= ?DEFAULT_CF, Name /= ?ITERATOR_CF];
% DB is not present. First start % DB is not present. First start
@ -286,7 +292,7 @@ open_db(Shard) ->
{?ITERATOR_CF, ?ITERATOR_CF_OPTS} {?ITERATOR_CF, ?ITERATOR_CF_OPTS}
| ExistingCFs | ExistingCFs
], ],
case rocksdb:open(Filename, DBOptions, ColumnFamilies) of case rocksdb:open(DBDir, DBOptions, ColumnFamilies) of
{ok, DBHandle, [_CFDefault, CFIterator | CFRefs]} -> {ok, DBHandle, [_CFDefault, CFIterator | CFRefs]} ->
{CFNames, _} = lists:unzip(ExistingCFs), {CFNames, _} = lists:unzip(ExistingCFs),
{ok, #s{ {ok, #s{

View File

@ -6,7 +6,7 @@
-behaviour(supervisor). -behaviour(supervisor).
%% API: %% API:
-export([start_link/0, start_shard/1, stop_shard/1]). -export([start_link/0, start_shard/2, stop_shard/1]).
%% behaviour callbacks: %% behaviour callbacks:
-export([init/1]). -export([init/1]).
@ -25,9 +25,10 @@
start_link() -> start_link() ->
supervisor:start_link({local, ?SUP}, ?MODULE, []). supervisor:start_link({local, ?SUP}, ?MODULE, []).
-spec start_shard(emqx_ds:shard()) -> supervisor:startchild_ret(). -spec start_shard(emqx_ds:shard(), emqx_ds_storage_layer:options()) ->
start_shard(Shard) -> supervisor:startchild_ret().
supervisor:start_child(?SUP, shard_child_spec(Shard)). start_shard(Shard, Options) ->
supervisor:start_child(?SUP, shard_child_spec(Shard, Options)).
-spec stop_shard(emqx_ds:shard()) -> ok | {error, _}. -spec stop_shard(emqx_ds:shard()) -> ok | {error, _}.
stop_shard(Shard) -> stop_shard(Shard) ->
@ -51,11 +52,12 @@ init([]) ->
%% Internal functions %% Internal functions
%%================================================================================ %%================================================================================
-spec shard_child_spec(emqx_ds:shard()) -> supervisor:child_spec(). -spec shard_child_spec(emqx_ds:shard(), emqx_ds_storage_layer:options()) ->
shard_child_spec(Shard) -> supervisor:child_spec().
shard_child_spec(Shard, Options) ->
#{ #{
id => Shard, id => Shard,
start => {emqx_ds_storage_layer, start_link, [Shard]}, start => {emqx_ds_storage_layer, start_link, [Shard, Options]},
shutdown => 5_000, shutdown => 5_000,
restart => permanent, restart => permanent,
type => worker type => worker

View File

@ -2,7 +2,7 @@
{application, emqx_durable_storage, [ {application, emqx_durable_storage, [
{description, "Message persistence and subscription replays for EMQX"}, {description, "Message persistence and subscription replays for EMQX"},
% strict semver, bump manually! % strict semver, bump manually!
{vsn, "0.1.0"}, {vsn, "0.1.1"},
{modules, []}, {modules, []},
{registered, []}, {registered, []},
{applications, [kernel, stdlib, rocksdb, gproc, mria]}, {applications, [kernel, stdlib, rocksdb, gproc, mria]},

View File

@ -33,7 +33,7 @@
%% Smoke test for opening and reopening the database %% Smoke test for opening and reopening the database
t_open(_Config) -> t_open(_Config) ->
ok = emqx_ds_storage_layer_sup:stop_shard(?SHARD), ok = emqx_ds_storage_layer_sup:stop_shard(?SHARD),
{ok, _} = emqx_ds_storage_layer_sup:start_shard(?SHARD). {ok, _} = emqx_ds_storage_layer_sup:start_shard(?SHARD, #{}).
%% Smoke test of store function %% Smoke test of store function
t_store(_Config) -> t_store(_Config) ->
@ -263,7 +263,7 @@ end_per_suite(_Config) ->
init_per_testcase(TC, Config) -> init_per_testcase(TC, Config) ->
ok = set_shard_config(shard(TC), ?DEFAULT_CONFIG), ok = set_shard_config(shard(TC), ?DEFAULT_CONFIG),
{ok, _} = emqx_ds_storage_layer_sup:start_shard(shard(TC)), {ok, _} = emqx_ds_storage_layer_sup:start_shard(shard(TC), #{}),
Config. Config.
end_per_testcase(TC, _Config) -> end_per_testcase(TC, _Config) ->

View File

@ -1,6 +1,6 @@
{application, emqx_eviction_agent, [ {application, emqx_eviction_agent, [
{description, "EMQX Eviction Agent"}, {description, "EMQX Eviction Agent"},
{vsn, "5.0.1"}, {vsn, "5.1.0"},
{registered, [ {registered, [
emqx_eviction_agent_sup, emqx_eviction_agent_sup,
emqx_eviction_agent, emqx_eviction_agent,

View File

@ -165,9 +165,8 @@ handle_cast(Msg, Channel) ->
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
{noreply, Channel}. {noreply, Channel}.
terminate(Reason, #{conninfo := ConnInfo, clientinfo := ClientInfo, session := Session} = Channel) -> terminate(Reason, #{clientinfo := ClientInfo, session := Session} = Channel) ->
ok = cancel_expiry_timer(Channel), ok = cancel_expiry_timer(Channel),
(Reason =:= expired) andalso emqx_persistent_session:persist(ClientInfo, ConnInfo, Session),
emqx_session:terminate(ClientInfo, Reason, Session). emqx_session:terminate(ClientInfo, Reason, Session).
code_change(_OldVsn, Channel, _Extra) -> code_change(_OldVsn, Channel, _Extra) ->
@ -181,34 +180,25 @@ handle_deliver(
Delivers, Delivers,
#{ #{
takeover := true, takeover := true,
pendings := Pendings, pendings := Pendings
session := Session,
clientinfo := #{clientid := ClientId} = ClientInfo
} = Channel } = Channel
) -> ) ->
%% NOTE: Order is important here. While the takeover is in %% NOTE: Order is important here. While the takeover is in
%% progress, the session cannot enqueue messages, since it already %% progress, the session cannot enqueue messages, since it already
%% passed on the queue to the new connection in the session state. %% passed on the queue to the new connection in the session state.
NPendings = lists:append( NPendings = lists:append(Pendings, emqx_channel:maybe_nack(Delivers)),
Pendings,
emqx_session:ignore_local(ClientInfo, emqx_channel:maybe_nack(Delivers), ClientId, Session)
),
Channel#{pendings => NPendings}; Channel#{pendings => NPendings};
handle_deliver( handle_deliver(
Delivers, Delivers,
#{ #{
takeover := false, takeover := false,
session := Session, session := Session,
clientinfo := #{clientid := ClientId} = ClientInfo clientinfo := ClientInfo
} = Channel } = Channel
) -> ) ->
Delivers1 = emqx_channel:maybe_nack(Delivers), Delivers1 = emqx_channel:maybe_nack(Delivers),
Delivers2 = emqx_session:ignore_local(ClientInfo, Delivers1, ClientId, Session), NSession = emqx_session:enqueue(ClientInfo, Delivers1, Session),
NSession = emqx_session:enqueue(ClientInfo, Delivers2, Session), Channel#{session := NSession}.
NChannel = persist(NSession, Channel),
%% We consider queued/dropped messages as delivered since they are now in the session state.
emqx_channel:maybe_mark_as_delivered(Session, Delivers),
NChannel.
cancel_expiry_timer(#{expiry_timer := TRef}) when is_reference(TRef) -> cancel_expiry_timer(#{expiry_timer := TRef}) when is_reference(TRef) ->
_ = erlang:cancel_timer(TRef), _ = erlang:cancel_timer(TRef),
@ -251,13 +241,8 @@ open_session(ConnInfo, #{clientid := ClientId} = ClientInfo) ->
), ),
Pendings1 = lists:usort(lists:append(Pendings0, emqx_utils:drain_deliver())), Pendings1 = lists:usort(lists:append(Pendings0, emqx_utils:drain_deliver())),
NSession = emqx_session:enqueue( NSession = emqx_session:enqueue(
ClientInfo,
emqx_session:ignore_local(
ClientInfo, ClientInfo,
emqx_channel:maybe_nack(Pendings1), emqx_channel:maybe_nack(Pendings1),
ClientId,
Session
),
Session Session
), ),
NChannel = Channel#{session => NSession}, NChannel = Channel#{session => NSession},
@ -334,10 +319,6 @@ channel(ConnInfo, ClientInfo) ->
pendings => [] pendings => []
}. }.
persist(Session, #{clientinfo := ClientInfo, conninfo := ConnInfo} = Channel) ->
Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session),
Channel#{session => Session1}.
info(Channel) -> info(Channel) ->
#{ #{
conninfo => maps:get(conninfo, Channel, undefined), conninfo => maps:get(conninfo, Channel, undefined),

View File

@ -30,19 +30,12 @@ init_per_suite(Config) ->
end_per_suite(_Config) -> end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([emqx_eviction_agent, emqx_conf]). emqx_common_test_helpers:stop_apps([emqx_eviction_agent, emqx_conf]).
init_per_testcase(t_persistence, Config) -> init_per_testcase(t_persistence, _Config) ->
emqx_config:put([persistent_session_store, enabled], true), {skip, "Existing session persistence implementation is being phased out"};
{ok, _} = emqx_persistent_session_sup:start_link(),
emqx_persistent_session:init_db_backend(),
?assert(emqx_persistent_session:is_store_enabled()),
Config;
init_per_testcase(_TestCase, Config) -> init_per_testcase(_TestCase, Config) ->
Config. Config.
end_per_testcase(t_persistence, Config) -> end_per_testcase(t_persistence, Config) ->
emqx_config:put([persistent_session_store, enabled], false),
emqx_persistent_session:init_db_backend(),
?assertNot(emqx_persistent_session:is_store_enabled()),
Config; Config;
end_per_testcase(_TestCase, _Config) -> end_per_testcase(_TestCase, _Config) ->
ok. ok.

View File

@ -823,7 +823,11 @@ code_change(_OldVsn, State, _Extra) ->
do_unregister_channel_task(Items, GwName, CmTabs) -> do_unregister_channel_task(Items, GwName, CmTabs) ->
lists:foreach( lists:foreach(
fun({ChanPid, ClientId}) -> fun({ChanPid, ClientId}) ->
try
do_unregister_channel(GwName, {ClientId, ChanPid}, CmTabs) do_unregister_channel(GwName, {ClientId, ChanPid}, CmTabs)
catch
error:badarg -> ok
end
end, end,
Items Items
). ).

View File

@ -642,7 +642,6 @@ status_to_error(_) ->
{error, undefined}. {error, undefined}.
%% Compatibility %% Compatibility
external_error({error, {unhealthy_target, Message}}) -> Message;
external_error({error, Reason}) -> Reason; external_error({error, Reason}) -> Reason;
external_error(Other) -> Other. external_error(Other) -> Other.

View File

@ -1,6 +1,6 @@
{deps, [ {deps, [
{emqx, {path, "../../apps/emqx"}}, {emqx, {path, "../../apps/emqx"}},
{erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-1"}}}, {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-2"}}},
{emqx_bridge_http, {path, "../emqx_bridge_http"}} {emqx_bridge_http, {path, "../emqx_bridge_http"}}
]}. ]}.

View File

@ -811,8 +811,8 @@ is_down() {
PID="$1" PID="$1"
if ps -p "$PID" >/dev/null; then if ps -p "$PID" >/dev/null; then
# still around # still around
# shellcheck disable=SC2009 # this grep pattern is not a part of the progra names # shellcheck disable=SC2009 # this grep pattern is not a part of the program names
if ps -p "$PID" | $GREP -q 'defunct'; then if ps -efp "$PID" | $GREP -q 'defunct'; then
# zombie state, print parent pid # zombie state, print parent pid
parent="$(ps -o ppid= -p "$PID" | tr -d ' ')" parent="$(ps -o ppid= -p "$PID" | tr -d ' ')"
logwarn "$PID is marked <defunct>, parent: $(ps -p "$parent")" logwarn "$PID is marked <defunct>, parent: $(ps -p "$parent")"
@ -974,7 +974,7 @@ maybe_warn_default_cookie() {
## using Mnesia DB backend. ## using Mnesia DB backend.
if [[ "$IS_BOOT_COMMAND" == 'yes' && "$(get_boot_config 'node.db_backend')" == "rlog" ]]; then if [[ "$IS_BOOT_COMMAND" == 'yes' && "$(get_boot_config 'node.db_backend')" == "rlog" ]]; then
if ! (echo -e "$COMPATIBILITY_INFO" | $GREP -q 'MNESIA_OK'); then if ! (echo -e "$COMPATIBILITY_INFO" | $GREP -q 'MNESIA_OK'); then
logerr "DB Backend is RLOG, but an incompatible OTP version has been detected. Falling back to using Mnesia DB backend." logwarn "DB Backend is RLOG, but an incompatible OTP version has been detected. Falling back to using Mnesia DB backend."
export EMQX_NODE__DB_BACKEND=mnesia export EMQX_NODE__DB_BACKEND=mnesia
export EMQX_NODE__DB_ROLE=core export EMQX_NODE__DB_ROLE=core
fi fi

View File

@ -0,0 +1 @@
Release packages for Amazon Linux 2023

View File

@ -0,0 +1 @@
Release packages for Debian 12.

View File

@ -0,0 +1 @@
Updated `jq` dependency to version 0.3.10 which includes `oniguruma` library update to version 6.9.8 with few minor security fixes.

View File

@ -0,0 +1 @@
Updated RocksDB version to 1.8.0-emqx-1 via ekka update to 0.15.6.

View File

@ -0,0 +1 @@
Avoid logging irrelevant error messages during EMQX shutdown.

View File

@ -0,0 +1,4 @@
Import additional configurations from EMQX backup file (`emqx ctl import` command):
- rule_engine (previously not imported due to the bug)
- topic_metrics (previously not implemented)
- slow_subs (previously not implemented).

View File

@ -0,0 +1 @@
Implemented Amazon Kinesis Data Streams producer data integration bridge .

View File

@ -1,4 +1,4 @@
ARG BUILD_FROM=ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11 ARG BUILD_FROM=ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11
ARG RUN_FROM=debian:11-slim ARG RUN_FROM=debian:11-slim
FROM ${BUILD_FROM} AS builder FROM ${BUILD_FROM} AS builder

6
dev
View File

@ -373,15 +373,15 @@ boot() {
{:ok, _} = Application.ensure_all_started(:emqx_machine) {:ok, _} = Application.ensure_all_started(:emqx_machine)
' '
if [ -n "${EPMD_ARGS:-}" ]; then if [ -n "${EPMD_ARGS:-}" ]; then
EPMD_ARGS_ELIXIR="--erl $EPMD_ARGS" EPMD_ARGS_ELIXIR="$EPMD_ARGS"
else else
EPMD_ARGS_ELIXIR="" EPMD_ARGS_ELIXIR="-no_op true"
fi fi
# shellcheck disable=SC2086 # shellcheck disable=SC2086
env APPS="$APPS" iex \ env APPS="$APPS" iex \
--name "$EMQX_NODE_NAME" \ --name "$EMQX_NODE_NAME" \
$EPMD_ARGS_ELIXIR \ --erl "$EPMD_ARGS_ELIXIR" \
--erl '-user Elixir.IEx.CLI' \ --erl '-user Elixir.IEx.CLI' \
--erl '-proto_dist ekka' \ --erl '-proto_dist ekka' \
--vm-args "$ARGS_FILE" \ --vm-args "$ARGS_FILE" \

28
mix.exs
View File

@ -54,7 +54,7 @@ defmodule EMQXUmbrella.MixProject do
{:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true}, {:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true},
{:cowboy, github: "emqx/cowboy", tag: "2.9.2", override: true}, {:cowboy, github: "emqx/cowboy", tag: "2.9.2", override: true},
{:esockd, github: "emqx/esockd", tag: "5.9.6", override: true}, {:esockd, github: "emqx/esockd", tag: "5.9.6", override: true},
{:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.7.2-emqx-11", override: true}, {:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-1", override: true},
{:ekka, github: "emqx/ekka", tag: "0.15.7", override: true}, {:ekka, github: "emqx/ekka", tag: "0.15.7", override: true},
{:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true},
{:grpc, github: "emqx/grpc-erl", tag: "0.6.8", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.8", override: true},
@ -191,7 +191,8 @@ defmodule EMQXUmbrella.MixProject do
:emqx_ft, :emqx_ft,
:emqx_s3, :emqx_s3,
:emqx_schema_registry, :emqx_schema_registry,
:emqx_enterprise :emqx_enterprise,
:emqx_bridge_kinesis
]) ])
end end
@ -392,13 +393,32 @@ defmodule EMQXUmbrella.MixProject do
def check_profile!() do def check_profile!() do
valid_envs = [ valid_envs = [
:dev,
:emqx, :emqx,
:"emqx-pkg", :"emqx-pkg",
:"emqx-enterprise", :"emqx-enterprise",
:"emqx-enterprise-pkg" :"emqx-enterprise-pkg"
] ]
if Mix.env() == :dev do
env_profile = System.get_env("PROFILE")
if env_profile do
# copy from PROFILE env var
System.get_env("PROFILE")
|> String.to_atom()
|> Mix.env()
else
IO.puts(
IO.ANSI.format([
:yellow,
"Warning: env var PROFILE is unset; defaulting to emqx"
])
)
Mix.env(:emqx)
end
end
if Mix.env() not in valid_envs do if Mix.env() not in valid_envs do
formatted_envs = formatted_envs =
valid_envs valid_envs
@ -769,7 +789,7 @@ defmodule EMQXUmbrella.MixProject do
defp jq_dep() do defp jq_dep() do
if enable_jq?(), if enable_jq?(),
do: [{:jq, github: "emqx/jq", tag: "v0.3.9", override: true}], do: [{:jq, github: "emqx/jq", tag: "v0.3.10", override: true}],
else: [] else: []
end end

View File

@ -61,7 +61,7 @@
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}} , {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}} , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}} , {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}
, {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.7.2-emqx-11"}}} , {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-1"}}}
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.7"}}} , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.7"}}}
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}} , {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.8"}}} , {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.8"}}}

View File

@ -42,7 +42,7 @@ quicer() ->
{quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.114"}}}. {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.114"}}}.
jq() -> jq() ->
{jq, {git, "https://github.com/emqx/jq", {tag, "v0.3.9"}}}. {jq, {git, "https://github.com/emqx/jq", {tag, "v0.3.10"}}}.
deps(Config) -> deps(Config) ->
{deps, OldDeps} = lists:keyfind(deps, 1, Config), {deps, OldDeps} = lists:keyfind(deps, 1, Config),
@ -104,6 +104,7 @@ is_community_umbrella_app("apps/emqx_ft") -> false;
is_community_umbrella_app("apps/emqx_s3") -> false; is_community_umbrella_app("apps/emqx_s3") -> false;
is_community_umbrella_app("apps/emqx_schema_registry") -> false; is_community_umbrella_app("apps/emqx_schema_registry") -> false;
is_community_umbrella_app("apps/emqx_enterprise") -> false; is_community_umbrella_app("apps/emqx_enterprise") -> false;
is_community_umbrella_app("apps/emqx_bridge_kinesis") -> false;
is_community_umbrella_app(_) -> true. is_community_umbrella_app(_) -> true.
is_jq_supported() -> is_jq_supported() ->

View File

@ -0,0 +1,85 @@
emqx_bridge_kinesis {
config_enable.desc:
"""Enable or disable this bridge"""
config_enable.label:
"""Enable Or Disable Bridge"""
desc_config.desc:
"""Configuration for an Amazon Kinesis bridge."""
desc_config.label:
"""Amazon Kinesis Bridge Configuration"""
desc_name.desc:
"""Bridge name."""
desc_name.label:
"""Bridge Name"""
desc_type.desc:
"""The Bridge Type"""
desc_type.label:
"""Bridge Type"""
pool_size.desc:
"""The pool size."""
pool_size.label:
"""Pool Size"""
local_topic.desc:
"""The MQTT topic filter to be forwarded to Amazon Kinesis. All MQTT `PUBLISH` messages with the topic
matching the `local_topic` will be forwarded.</br>
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also `local_topic` is
configured, then both the data got from the rule and the MQTT messages that match `local_topic`
will be forwarded."""
local_topic.label:
"""Local Topic"""
payload_template.desc:
"""The template for formatting the outgoing messages. If undefined, will send all the available context in JSON format."""
payload_template.label:
"""Payload template"""
aws_access_key_id.desc:
"""Access Key ID for connecting to Amazon Kinesis."""
aws_access_key_id.label:
"""AWS Access Key ID"""
aws_secret_access_key.desc:
"""AWS Secret Access Key for connecting to Amazon Kinesis."""
aws_secret_access_key.label:
"""AWS Secret Access Key"""
endpoint.desc:
"""The url of Amazon Kinesis endpoint."""
endpoint.label:
"""Amazon Kinesis Endpoint"""
stream_name.desc:
"""The Amazon Kinesis Stream to publish messages to."""
stream_name.label:
"""Amazon Kinesis Stream"""
partition_key.desc:
"""The Amazon Kinesis Partition Key associated to published message. Placeholders in format of ${var} are supported."""
partition_key.label:
"""Partition key"""
max_retries.desc:
"""Max retry times if an error occurs when sending a request."""
max_retries.label:
"""Max Retries"""
}

View File

@ -9,7 +9,7 @@
## example: ## example:
## ./scripts/buildx.sh --profile emqx --pkgtype tgz --arch arm64 \ ## ./scripts/buildx.sh --profile emqx --pkgtype tgz --arch arm64 \
## --builder ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11 ## --builder ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11
set -euo pipefail set -euo pipefail
@ -24,7 +24,7 @@ help() {
echo "--arch amd64|arm64: Target arch to build the EMQX package for" echo "--arch amd64|arm64: Target arch to build the EMQX package for"
echo "--src_dir <SRC_DIR>: EMQX source code in this dir, default to PWD" echo "--src_dir <SRC_DIR>: EMQX source code in this dir, default to PWD"
echo "--builder <BUILDER>: Builder image to pull" echo "--builder <BUILDER>: Builder image to pull"
echo " E.g. ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11" echo " E.g. ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11"
} }
die() { die() {

View File

@ -0,0 +1,78 @@
#!/usr/bin/env elixir
alias EMQXUmbrella.MixProject
{:ok, _} = Application.ensure_all_started(:mix)
# note: run from the project root
File.cwd!()
|> Path.join("mix.exs")
|> Code.compile_file()
inputs = MixProject.check_profile!()
profile = Mix.env()
# need to use this information because we might have compiled all
# applications in the test profile, and thus filter what's in the
# release lib directory.
rel_apps = MixProject.applications(inputs.edition_type)
apps =
rel_apps
|> Keyword.keys()
|> Enum.filter(&(to_string(&1) =~ "emqx"))
|> Enum.reject(&(&1 in [:emqx_mix]))
:xref.start(:xref)
:xref.set_default(:xref, warnings: false)
rel_dir = '_build/#{profile}/lib/'
:xref.add_release(:xref, rel_dir)
{:ok, calls} = :xref.q(:xref, '(App) (XC | [#{Enum.join(apps, ",")}] || mria:create_table/_)')
emqx_calls =
calls
|> Enum.map(&elem(&1, 0))
|> Enum.filter(&(to_string(&1) =~ "emqx_"))
|> MapSet.new()
Path.wildcard(rel_dir ++ "*/ebin")
|> Enum.each(fn dir ->
dir
|> to_charlist()
|> :code.add_pathz()
end)
Path.wildcard(rel_dir ++ "*")
|> Enum.map(fn dir ->
dir
|> Path.basename()
|> String.to_atom()
|> Application.load()
end)
reboot_apps = :emqx_machine_boot.sorted_reboot_apps() |> MapSet.new()
missing_reboot_apps = MapSet.difference(emqx_calls, reboot_apps)
if MapSet.size(missing_reboot_apps) != 0 do
IO.puts(
:stderr,
IO.ANSI.format([
:red,
"Some applications are missing from `emqx_machine_boot:sorted_reboot_apps/0`!\n",
"Missing applications:\n",
Enum.map(missing_reboot_apps, fn app ->
" * #{app}\n"
end),
"\n",
:green,
"Hint: maybe add them to `emqx_machine_boot:basic_reboot_apps_edition/1`\n",
"\n",
:yellow,
"Applications that call `mria:create_table` need to be added to that list;\n",
" otherwise, when a node joins a cluster, it might lose tables.\n"
])
)
System.halt(1)
end

View File

@ -219,6 +219,9 @@ for dep in ${CT_DEPS}; do
hstreamdb) hstreamdb)
FILES+=( '.ci/docker-compose-file/docker-compose-hstreamdb.yaml' ) FILES+=( '.ci/docker-compose-file/docker-compose-hstreamdb.yaml' )
;; ;;
kinesis)
FILES+=( '.ci/docker-compose-file/docker-compose-kinesis.yaml' )
;;
*) *)
echo "unknown_ct_dependency $dep" echo "unknown_ct_dependency $dep"
exit 1 exit 1

View File

@ -57,7 +57,7 @@ codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=r
codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=runtime \ codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=runtime \
"${REL_DIR}"/lib/os_mon-*/priv/bin/{cpu_sup,memsup} "${REL_DIR}"/lib/os_mon-*/priv/bin/{cpu_sup,memsup}
codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=runtime \ codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=runtime \
"${REL_DIR}"/lib/jq-*/priv/{jq_nif1.so,libjq.1.dylib,libonig.4.dylib,erlang_jq_port} "${REL_DIR}"/lib/jq-*/priv/{jq_nif1.so,libjq.1.dylib,libonig.5.dylib,erlang_jq_port}
# other files from runtime and dependencies # other files from runtime and dependencies
for f in \ for f in \
asn1rt_nif.so \ asn1rt_nif.so \

View File

@ -46,6 +46,7 @@ export SCRIPTS="${CODE_PATH}/scripts"
export EMQX_NAME export EMQX_NAME
export PACKAGE_PATH="${CODE_PATH}/_packages/${EMQX_NAME}" export PACKAGE_PATH="${CODE_PATH}/_packages/${EMQX_NAME}"
export RELUP_PACKAGE_PATH="${CODE_PATH}/_upgrade_base" export RELUP_PACKAGE_PATH="${CODE_PATH}/_upgrade_base"
export PAHO_MQTT_TESTING_PATH="${PAHO_MQTT_TESTING_PATH:-/paho-mqtt-testing}"
SYSTEM="$("$SCRIPTS"/get-distro.sh)" SYSTEM="$("$SCRIPTS"/get-distro.sh)"
@ -64,7 +65,7 @@ fi
PACKAGE_VERSION="$("$CODE_PATH"/pkg-vsn.sh "${EMQX_NAME}")" PACKAGE_VERSION="$("$CODE_PATH"/pkg-vsn.sh "${EMQX_NAME}")"
PACKAGE_VERSION_LONG="$("$CODE_PATH"/pkg-vsn.sh "${EMQX_NAME}" --long --elixir "${IS_ELIXIR}")" PACKAGE_VERSION_LONG="$("$CODE_PATH"/pkg-vsn.sh "${EMQX_NAME}" --long --elixir "${IS_ELIXIR}")"
PACKAGE_NAME="${EMQX_NAME}-${PACKAGE_VERSION_LONG}" PACKAGE_NAME="${EMQX_NAME}-${PACKAGE_VERSION_LONG}"
PACKAGE_FILE_NAME="${PACKAGE_NAME}.${PKG_SUFFIX}" PACKAGE_FILE_NAME="${PACKAGE_FILE_NAME:-${PACKAGE_NAME}.${PKG_SUFFIX}}"
PACKAGE_FILE="${PACKAGE_PATH}/${PACKAGE_FILE_NAME}" PACKAGE_FILE="${PACKAGE_PATH}/${PACKAGE_FILE_NAME}"
if ! [ -f "$PACKAGE_FILE" ]; then if ! [ -f "$PACKAGE_FILE" ]; then
@ -75,9 +76,21 @@ fi
emqx_prepare(){ emqx_prepare(){
mkdir -p "${PACKAGE_PATH}" mkdir -p "${PACKAGE_PATH}"
if [ ! -d "/paho-mqtt-testing" ]; then if [ ! -d "${PAHO_MQTT_TESTING_PATH}" ]; then
git clone -b develop-4.0 https://github.com/emqx/paho.mqtt.testing.git /paho-mqtt-testing git clone -b develop-4.0 https://github.com/emqx/paho.mqtt.testing.git "${PAHO_MQTT_TESTING_PATH}"
fi fi
# Debian 12 complains if we don't use venv
case "${SYSTEM:-}" in
debian12)
apt-get update -y && apt-get install -y virtualenv
virtualenv venv
# https://www.shellcheck.net/wiki/SC1091
# shellcheck source=/dev/null
source ./venv/bin/activate
;;
*)
;;
esac
pip3 install pytest pip3 install pytest
} }
@ -97,36 +110,22 @@ emqx_test(){
# fi # fi
# sed -i '/emqx_telemetry/d' "${PACKAGE_PATH}"/emqx/data/loaded_plugins # sed -i '/emqx_telemetry/d' "${PACKAGE_PATH}"/emqx/data/loaded_plugins
echo "running ${packagename} start" run_test "${PACKAGE_PATH}/emqx/bin" "${PACKAGE_PATH}/emqx/log" "${PACKAGE_PATH}/emqx/releases/emqx_vars"
if ! "${PACKAGE_PATH}"/emqx/bin/emqx start; then
cat "${PACKAGE_PATH}"/emqx/log/erlang.log.1 || true
cat "${PACKAGE_PATH}"/emqx/log/emqx.log.1 || true
exit 1
fi
"$SCRIPTS/test/emqx-smoke-test.sh" 127.0.0.1 18083
pytest -v /paho-mqtt-testing/interoperability/test_client/V5/test_connect.py::test_basic
if ! "${PACKAGE_PATH}"/emqx/bin/emqx stop; then
cat "${PACKAGE_PATH}"/emqx/log/erlang.log.1 || true
cat "${PACKAGE_PATH}"/emqx/log/emqx.log.1 || true
exit 1
fi
echo "running ${packagename} stop"
rm -rf "${PACKAGE_PATH}"/emqx rm -rf "${PACKAGE_PATH}"/emqx
;; ;;
"deb") "deb")
dpkg -i "${PACKAGE_PATH}/${packagename}" dpkg -i "${PACKAGE_PATH}/${packagename}"
if [ "$(dpkg -l |grep emqx |awk '{print $1}')" != "ii" ] if [ "$(dpkg -l | grep ${EMQX_NAME} | awk '{print $1}')" != "ii" ]
then then
echo "package install error" echo "package install error"
exit 1 exit 1
fi fi
echo "running ${packagename} start" run_test "/usr/bin" "/var/log/emqx" "$(dpkg -L ${EMQX_NAME} | grep emqx_vars)"
run_test
echo "running ${packagename} stop"
dpkg -r "${EMQX_NAME}" dpkg -r "${EMQX_NAME}"
if [ "$(dpkg -l |grep emqx |awk '{print $1}')" != "rc" ] if [ "$(dpkg -l | grep ${EMQX_NAME} | awk '{print $1}')" != "rc" ]
then then
echo "package remove error" echo "package remove error"
exit 1 exit 1
@ -146,6 +145,10 @@ emqx_test(){
# el8 is fine with python3 # el8 is fine with python3
true true
;; ;;
"el9")
# el9 is fine with python3
true
;;
*) *)
alternatives --list | grep python && alternatives --set python /usr/bin/python2 alternatives --list | grep python && alternatives --set python /usr/bin/python2
;; ;;
@ -161,12 +164,10 @@ emqx_test(){
exit 1 exit 1
fi fi
echo "running ${packagename} start" run_test "/usr/bin" "/var/log/emqx" "$(rpm -ql ${EMQX_NAME} | grep emqx_vars)"
run_test
echo "running ${packagename} stop"
rpm -e "${EMQX_NAME}" rpm -e "${EMQX_NAME}"
if [ "$(rpm -q emqx)" != "package emqx is not installed" ];then if [ "$(rpm -q ${EMQX_NAME})" != "package ${EMQX_NAME} is not installed" ];then
echo "package uninstall error" echo "package uninstall error"
exit 1 exit 1
fi fi
@ -175,8 +176,10 @@ emqx_test(){
} }
run_test(){ run_test(){
local bin_dir="$1"
local log_dir="$2"
local emqx_env_vars="$3"
# sed -i '/emqx_telemetry/d' /var/lib/emqx/loaded_plugins # sed -i '/emqx_telemetry/d' /var/lib/emqx/loaded_plugins
emqx_env_vars=$(dirname "$(readlink "$(command -v emqx)")")/../releases/emqx_vars
if [ -f "$emqx_env_vars" ]; if [ -f "$emqx_env_vars" ];
then then
@ -194,21 +197,21 @@ EOF
echo "Error: cannot locate emqx_vars" echo "Error: cannot locate emqx_vars"
exit 1 exit 1
fi fi
if ! emqx 'start'; then echo "running ${packagename} start"
cat /var/log/emqx/erlang.log.1 || true if ! "${bin_dir}/emqx" 'start'; then
cat /var/log/emqx/emqx.log.1 || true echo "ERROR: failed_to_start_emqx"
cat "${log_dir}/erlang.log.1" || true
cat "${log_dir}/emqx.log.1" || true
exit 1 exit 1
fi fi
"$SCRIPTS/test/emqx-smoke-test.sh" 127.0.0.1 18083 "$SCRIPTS/test/emqx-smoke-test.sh" 127.0.0.1 18083
pytest -v /paho-mqtt-testing/interoperability/test_client/V5/test_connect.py::test_basic pytest -v "${PAHO_MQTT_TESTING_PATH}"/interoperability/test_client/V5/test_connect.py::test_basic
# shellcheck disable=SC2009 # pgrep does not support Extended Regular Expressions "${bin_dir}/emqx" ping
ps -ef | grep -E '\-progname\s.+emqx\s' echo "running ${packagename} stop"
if ! emqx 'stop'; then if ! "${bin_dir}/emqx" 'stop'; then
# shellcheck disable=SC2009 # pgrep does not support Extended Regular Expressions
ps -ef | grep -E '\-progname\s.+emqx\s'
echo "ERROR: failed_to_stop_emqx_with_the_stop_command" echo "ERROR: failed_to_stop_emqx_with_the_stop_command"
cat /var/log/emqx/erlang.log.1 || true cat "${log_dir}/erlang.log.1" || true
cat /var/log/emqx/emqx.log.1 || true cat "${log_dir}/emqx.log.1" || true
exit 1 exit 1
fi fi
} }

View File

@ -22,7 +22,7 @@ WEBHOOK="webhook.$NET"
BENCH="bench.$NET" BENCH="bench.$NET"
COOKIE='this-is-a-secret' COOKIE='this-is-a-secret'
## Erlang image is needed to run webhook server and emqtt-bench ## Erlang image is needed to run webhook server and emqtt-bench
ERLANG_IMAGE="ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu20.04" ERLANG_IMAGE="ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04"
# builder has emqtt-bench installed # builder has emqtt-bench installed
BENCH_IMAGE="$ERLANG_IMAGE" BENCH_IMAGE="$ERLANG_IMAGE"