chore: merge master into release-51
This commit is contained in:
commit
cbfca8c043
|
@ -9,6 +9,7 @@ DYNAMO_TAG=1.21.0
|
|||
CASSANDRA_TAG=3.11.6
|
||||
MINIO_TAG=RELEASE.2023-03-20T20-16-18Z
|
||||
OPENTS_TAG=9aa7f88
|
||||
KINESIS_TAG=2.1
|
||||
|
||||
MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server
|
||||
SQLSERVER_TAG=2019-CU19-ubuntu-20.04
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
greptimedb:
|
||||
container_name: greptimedb
|
||||
hostname: greptimedb
|
||||
image: greptime/greptimedb:0.3.2
|
||||
expose:
|
||||
- "4000"
|
||||
- "4001"
|
||||
# uncomment for local testing
|
||||
# ports:
|
||||
# - "4000:4000"
|
||||
# - "4001:4001"
|
||||
restart: always
|
||||
networks:
|
||||
- emqx_bridge
|
||||
command:
|
||||
standalone start
|
||||
--user-provider=static_user_provider:cmd:greptime_user=greptime_pwd
|
||||
--http-addr="0.0.0.0:4000"
|
||||
--rpc-addr="0.0.0.0:4001"
|
|
@ -18,7 +18,7 @@ services:
|
|||
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
|
||||
kdc:
|
||||
hostname: kdc.emqx.net
|
||||
image: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu20.04
|
||||
image: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04
|
||||
container_name: kdc.emqx.net
|
||||
expose:
|
||||
- 88 # kdc
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
kinesis:
|
||||
container_name: kinesis
|
||||
image: localstack/localstack:2.1
|
||||
environment:
|
||||
- KINESIS_ERROR_PROBABILITY=0.0
|
||||
- KINESIS_LATENCY=0
|
||||
restart: always
|
||||
networks:
|
||||
- emqx_bridge
|
|
@ -49,6 +49,11 @@ services:
|
|||
- 38080:38080
|
||||
# HStreamDB
|
||||
- 15670:5670
|
||||
# Kinesis
|
||||
- 4566:4566
|
||||
# GreptimeDB
|
||||
- 4000:4000
|
||||
- 4001:4001
|
||||
command:
|
||||
- "-host=0.0.0.0"
|
||||
- "-config=/config/toxiproxy.json"
|
||||
|
|
|
@ -3,7 +3,7 @@ version: '3.9'
|
|||
services:
|
||||
erlang:
|
||||
container_name: erlang
|
||||
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu20.04}
|
||||
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04}
|
||||
env_file:
|
||||
- conf.env
|
||||
environment:
|
||||
|
|
|
@ -161,5 +161,23 @@
|
|||
"listen": "0.0.0.0:6570",
|
||||
"upstream": "hstreamdb:6570",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "greptimedb_http",
|
||||
"listen": "0.0.0.0:4000",
|
||||
"upstream": "greptimedb:4000",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "greptimedb_grpc",
|
||||
"listen": "0.0.0.0:4001",
|
||||
"upstream": "greptimedb:4001",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "kinesis",
|
||||
"listen": "0.0.0.0:4566",
|
||||
"upstream": "kinesis:4566",
|
||||
"enabled": true
|
||||
}
|
||||
]
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
/apps/emqx_rule_engine/ @emqx/emqx-review-board @kjellwinblad
|
||||
/apps/emqx_slow_subs/ @emqx/emqx-review-board @lafirest
|
||||
/apps/emqx_statsd/ @emqx/emqx-review-board @JimMoen
|
||||
/apps/emqx_durable_storage/ @ieQu1
|
||||
/apps/emqx_durable_storage/ @emqx/emqx-review-board @ieQu1 @keynslug
|
||||
|
||||
## CI
|
||||
/deploy/ @emqx/emqx-review-board @Rory-Z
|
||||
|
|
|
@ -25,7 +25,7 @@ jobs:
|
|||
prepare:
|
||||
runs-on: ubuntu-22.04
|
||||
# prepare source with any OTP version, no need for a matrix
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
|
||||
outputs:
|
||||
PROFILE: ${{ steps.get_profile.outputs.PROFILE }}
|
||||
|
@ -120,7 +120,7 @@ jobs:
|
|||
# NOTE: 'otp' and 'elixir' are to configure emqx-builder image
|
||||
# only support latest otp and elixir, not a matrix
|
||||
builder:
|
||||
- 5.1-0 # update to latest
|
||||
- 5.1-3 # update to latest
|
||||
otp:
|
||||
- 25.3.2-1
|
||||
elixir:
|
||||
|
|
|
@ -21,7 +21,7 @@ on:
|
|||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-22.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04
|
||||
outputs:
|
||||
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
|
||||
IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }}
|
||||
|
@ -181,24 +181,26 @@ jobs:
|
|||
- ubuntu22.04
|
||||
- ubuntu20.04
|
||||
- ubuntu18.04
|
||||
- debian12
|
||||
- debian11
|
||||
- debian10
|
||||
- el9
|
||||
- el8
|
||||
- el7
|
||||
- amzn2
|
||||
- amzn2023
|
||||
build_machine:
|
||||
- aws-arm64
|
||||
- ubuntu-22.04
|
||||
- aws-amd64
|
||||
builder:
|
||||
- 5.1-0
|
||||
- 5.1-3
|
||||
elixir:
|
||||
- 1.14.5
|
||||
with_elixir:
|
||||
- 'no'
|
||||
exclude:
|
||||
- arch: arm64
|
||||
build_machine: ubuntu-22.04
|
||||
build_machine: aws-amd64
|
||||
- arch: amd64
|
||||
build_machine: aws-arm64
|
||||
include:
|
||||
|
@ -206,16 +208,8 @@ jobs:
|
|||
otp: 25.3.2-1
|
||||
arch: amd64
|
||||
os: ubuntu22.04
|
||||
build_machine: ubuntu-22.04
|
||||
builder: 5.1-0
|
||||
elixir: 1.14.5
|
||||
with_elixir: 'yes'
|
||||
- profile: emqx
|
||||
otp: 25.3.2-1
|
||||
arch: amd64
|
||||
os: amzn2
|
||||
build_machine: ubuntu-22.04
|
||||
builder: 5.1-0
|
||||
build_machine: aws-amd64
|
||||
builder: 5.1-3
|
||||
elixir: 1.14.5
|
||||
with_elixir: 'yes'
|
||||
|
||||
|
@ -225,18 +219,13 @@ jobs:
|
|||
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
if: matrix.build_machine == 'aws-arm64'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch_or_tag }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: build emqx packages
|
||||
env:
|
||||
ELIXIR: ${{ matrix.elixir }}
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
ARCH: ${{ matrix.arch }}
|
||||
- name: fix workdir
|
||||
run: |
|
||||
set -eu
|
||||
git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
|
@ -246,22 +235,33 @@ jobs:
|
|||
cd /emqx
|
||||
fi
|
||||
echo "pwd is $PWD"
|
||||
PKGTYPES="tgz pkg"
|
||||
IS_ELIXIR=${{ matrix.with_elixir }}
|
||||
|
||||
- name: build emqx packages
|
||||
env:
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
IS_ELIXIR: ${{ matrix.with_elixir }}
|
||||
ACLOCAL_PATH: "/usr/share/aclocal:/usr/local/share/aclocal"
|
||||
run: |
|
||||
set -eu
|
||||
if [ "${IS_ELIXIR:-}" == 'yes' ]; then
|
||||
PKGTYPES="tgz"
|
||||
make "${PROFILE}-elixir-tgz"
|
||||
else
|
||||
make "${PROFILE}-tgz"
|
||||
make "${PROFILE}-pkg"
|
||||
fi
|
||||
- name: test emqx packages
|
||||
env:
|
||||
PROFILE: ${{ matrix.profile }}
|
||||
IS_ELIXIR: ${{ matrix.with_elixir }}
|
||||
run: |
|
||||
set -eu
|
||||
if [ "${IS_ELIXIR:-}" == 'yes' ]; then
|
||||
./scripts/pkg-tests.sh "${PROFILE}-elixir-tgz"
|
||||
else
|
||||
./scripts/pkg-tests.sh "${PROFILE}-tgz"
|
||||
./scripts/pkg-tests.sh "${PROFILE}-pkg"
|
||||
fi
|
||||
for PKGTYPE in ${PKGTYPES};
|
||||
do
|
||||
./scripts/buildx.sh \
|
||||
--profile "${PROFILE}" \
|
||||
--pkgtype "${PKGTYPE}" \
|
||||
--arch "${ARCH}" \
|
||||
--elixir "${IS_ELIXIR}" \
|
||||
--builder "force_host"
|
||||
done
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: success()
|
||||
with:
|
||||
name: ${{ matrix.profile }}
|
||||
path: _packages/${{ matrix.profile }}/
|
||||
|
|
|
@ -30,9 +30,9 @@ jobs:
|
|||
- amd64
|
||||
os:
|
||||
- debian10
|
||||
- amzn2
|
||||
- amzn2023
|
||||
builder:
|
||||
- 5.1-0
|
||||
- 5.1-3
|
||||
elixir:
|
||||
- 1.14.5
|
||||
|
||||
|
|
|
@ -32,10 +32,10 @@ jobs:
|
|||
profile:
|
||||
- ["emqx", "25.3.2-1", "el7", "erlang"]
|
||||
- ["emqx", "25.3.2-1", "ubuntu22.04", "elixir"]
|
||||
- ["emqx-enterprise", "25.3.2-1", "amzn2", "erlang"]
|
||||
- ["emqx-enterprise", "25.3.2-1", "amzn2023", "erlang"]
|
||||
- ["emqx-enterprise", "25.3.2-1", "ubuntu20.04", "erlang"]
|
||||
builder:
|
||||
- 5.1-0
|
||||
- 5.1-3
|
||||
elixir:
|
||||
- '1.14.5'
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ on:
|
|||
jobs:
|
||||
check_deps_integrity:
|
||||
runs-on: ubuntu-22.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
|
|
@ -5,7 +5,7 @@ on: [pull_request]
|
|||
jobs:
|
||||
code_style_check:
|
||||
runs-on: ubuntu-22.04
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '33 14 * * 4'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
ref:
|
||||
required: false
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 360
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
container:
|
||||
image: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'cpp', 'python' ]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.inputs.ref }}
|
||||
|
||||
- name: Ensure git safe dir
|
||||
run: |
|
||||
git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
make ensure-rebar3
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Build
|
||||
if: matrix.language == 'cpp'
|
||||
env:
|
||||
PROFILE: emqx-enterprise
|
||||
run: |
|
||||
make emqx-enterprise-compile
|
||||
|
||||
- name: Fetch deps
|
||||
if: matrix.language == 'python'
|
||||
env:
|
||||
PROFILE: emqx-enterprise
|
||||
run: |
|
||||
make deps-emqx-enterprise
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
|
@ -9,7 +9,7 @@ jobs:
|
|||
elixir_apps_check:
|
||||
runs-on: ubuntu-22.04
|
||||
# just use the latest builder
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
|
|
@ -8,7 +8,7 @@ on:
|
|||
jobs:
|
||||
elixir_deps_check:
|
||||
runs-on: ubuntu-22.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
|
|
@ -17,7 +17,7 @@ jobs:
|
|||
profile:
|
||||
- emqx
|
||||
- emqx-enterprise
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'emqx'
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu20.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04
|
||||
outputs:
|
||||
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
|
||||
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
|
||||
|
|
|
@ -87,20 +87,24 @@ jobs:
|
|||
push "debian/buster" "packages/$PROFILE-$VERSION-debian10-arm64.deb"
|
||||
push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-amd64.deb"
|
||||
push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-arm64.deb"
|
||||
push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-amd64.deb"
|
||||
push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-arm64.deb"
|
||||
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-amd64.deb"
|
||||
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-arm64.deb"
|
||||
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-amd64.deb"
|
||||
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-arm64.deb"
|
||||
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-amd64.deb"
|
||||
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-arm64.deb"
|
||||
push "el/6" "packages/$PROFILE-$VERSION-amzn2-amd64.rpm"
|
||||
push "el/6" "packages/$PROFILE-$VERSION-amzn2-arm64.rpm"
|
||||
push "el/7" "packages/$PROFILE-$VERSION-el7-amd64.rpm"
|
||||
push "el/7" "packages/$PROFILE-$VERSION-el7-arm64.rpm"
|
||||
push "el/8" "packages/$PROFILE-$VERSION-el8-amd64.rpm"
|
||||
push "el/8" "packages/$PROFILE-$VERSION-el8-arm64.rpm"
|
||||
push "el/9" "packages/$PROFILE-$VERSION-el9-amd64.rpm"
|
||||
push "el/9" "packages/$PROFILE-$VERSION-el9-arm64.rpm"
|
||||
push "amazon/2" "packages/$PROFILE-$VERSION-amzn2-amd64.rpm"
|
||||
push "amazon/2" "packages/$PROFILE-$VERSION-amzn2-arm64.rpm"
|
||||
push "amazon/2023" "packages/$PROFILE-$VERSION-amzn2023-amd64.rpm"
|
||||
push "amazon/2023" "packages/$PROFILE-$VERSION-amzn2023-arm64.rpm"
|
||||
|
||||
rerun-apps-version-check:
|
||||
runs-on: ubuntu-22.04
|
||||
|
|
|
@ -26,7 +26,7 @@ jobs:
|
|||
profile:
|
||||
- emqx
|
||||
- emqx-enterprise
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/checkout@v3
|
||||
|
|
|
@ -12,7 +12,7 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
builder:
|
||||
- 5.1-0
|
||||
- 5.1-3
|
||||
otp:
|
||||
- 25.3.2-1
|
||||
# no need to use more than 1 version of Elixir, since tests
|
||||
|
|
|
@ -17,7 +17,7 @@ jobs:
|
|||
prepare:
|
||||
runs-on: ubuntu-22.04
|
||||
# prepare source with any OTP version, no need for a matrix
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11
|
||||
container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -50,7 +50,7 @@ jobs:
|
|||
os:
|
||||
- ["debian11", "debian:11-slim"]
|
||||
builder:
|
||||
- 5.1-0
|
||||
- 5.1-3
|
||||
otp:
|
||||
- 25.3.2-1
|
||||
elixir:
|
||||
|
@ -123,7 +123,7 @@ jobs:
|
|||
os:
|
||||
- ["debian11", "debian:11-slim"]
|
||||
builder:
|
||||
- 5.1-0
|
||||
- 5.1-3
|
||||
otp:
|
||||
- 25.3.2-1
|
||||
elixir:
|
||||
|
|
|
@ -15,7 +15,7 @@ concurrency:
|
|||
jobs:
|
||||
relup_test_plan:
|
||||
runs-on: ubuntu-22.04
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
outputs:
|
||||
CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }}
|
||||
OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }}
|
||||
|
|
|
@ -34,12 +34,12 @@ jobs:
|
|||
MATRIX="$(echo "${APPS}" | jq -c '
|
||||
[
|
||||
(.[] | select(.profile == "emqx") | . + {
|
||||
builder: "5.1-0",
|
||||
builder: "5.1-3",
|
||||
otp: "25.3.2-1",
|
||||
elixir: "1.14.5"
|
||||
}),
|
||||
(.[] | select(.profile == "emqx-enterprise") | . + {
|
||||
builder: "5.1-0",
|
||||
builder: "5.1-3",
|
||||
otp: ["25.3.2-1"][],
|
||||
elixir: "1.14.5"
|
||||
})
|
||||
|
@ -286,7 +286,7 @@ jobs:
|
|||
- ct
|
||||
- ct_docker
|
||||
runs-on: ubuntu-22.04
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
|
||||
steps:
|
||||
- uses: AutoModality/action-clean@v1
|
||||
- uses: actions/download-artifact@v3
|
||||
|
|
5
Makefile
5
Makefile
|
@ -2,7 +2,7 @@ REBAR = $(CURDIR)/rebar3
|
|||
BUILD = $(CURDIR)/build
|
||||
SCRIPTS = $(CURDIR)/scripts
|
||||
export EMQX_RELUP ?= true
|
||||
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11
|
||||
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11
|
||||
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
||||
export EMQX_REL_FORM ?= tgz
|
||||
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
||||
|
@ -15,7 +15,7 @@ endif
|
|||
|
||||
# Dashboard version
|
||||
# from https://github.com/emqx/emqx-dashboard5
|
||||
export EMQX_DASHBOARD_VERSION ?= v1.3.1
|
||||
export EMQX_DASHBOARD_VERSION ?= v1.3.2
|
||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.1.1
|
||||
|
||||
# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used
|
||||
|
@ -99,6 +99,7 @@ static_checks:
|
|||
@$(REBAR) as check do xref, dialyzer
|
||||
@if [ "$${PROFILE}" = 'emqx-enterprise' ]; then $(REBAR) ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE); fi
|
||||
./scripts/check-i18n-style.sh
|
||||
./scripts/check_missing_reboot_apps.exs
|
||||
|
||||
APPS=$(shell $(SCRIPTS)/find-apps.sh)
|
||||
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 5.1.x | :white_check_mark: |
|
||||
| 5.0.x | :white_check_mark: |
|
||||
| 4.4.x | :white_check_mark: |
|
||||
| < 4.4 | :x: |
|
||||
|
||||
## Qualifying Vulnerabilities
|
||||
|
||||
Any design or implementation issue that substantially affects the confidentiality or integrity of user data is likely to be in scope for the program. Common examples including:
|
||||
|
||||
* Cross-site scripting
|
||||
* Cross-site request forgery
|
||||
* Mixed-content scripts
|
||||
* Authentication or authorization flaws
|
||||
* Server-side code execution bugs
|
||||
|
||||
Out of concern for the availability of our services to all users, please do not attempt to carry out DoS attacks, leverage black hat SEO techniques, spam people, brute force authentication, or do other similarly questionable things. We also discourage the use of any vulnerability testing tools that automatically generate very significant volumes of traffic.
|
||||
|
||||
## Non-qualifying Vulnerabilities
|
||||
|
||||
Depending on their impacts, some of the reported issues may not qualify.
|
||||
Although we review them on a case-by-case basis, here are some of the issues that typically do not earn a monetary reward:
|
||||
|
||||
* Bugs requiring exceedingly unlikely user interaction Brute forcing
|
||||
* User enumeration
|
||||
* Non security related bugs
|
||||
* Abuse
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
1. When investigating a vulnerability, please, only ever target your own accounts. Never attempt to access anyone else's data and do not engage in any activity that would be disruptive or damaging to other users.
|
||||
2. In the case the same vulnerability is present on multiple products, please combine and send one report.
|
||||
3. If you have found a vulnerability, please contact us at security@emqx.io.
|
||||
4. Note that we are only able to answer technical vulnerability reports. Duplicate reports will not be rewarded, first report on the specific vulnerability will be rewarded.
|
||||
5. The report should include steps in plain text how to reproduce the vulnerability (not only video or images).
|
|
@ -32,7 +32,7 @@
|
|||
%% `apps/emqx/src/bpapi/README.md'
|
||||
|
||||
%% Opensource edition
|
||||
-define(EMQX_RELEASE_CE, "5.1.1").
|
||||
-define(EMQX_RELEASE_CE, "5.1.2").
|
||||
|
||||
%% Enterprise edition
|
||||
-define(EMQX_RELEASE_EE, "5.1.1").
|
||||
|
|
|
@ -23,11 +23,12 @@
|
|||
%% `git_subdir` dependency in other projects.
|
||||
{deps, [
|
||||
{emqx_utils, {path, "../emqx_utils"}},
|
||||
{emqx_durable_storage, {path, "../emqx_durable_storage"}},
|
||||
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
|
||||
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
|
||||
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
|
||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
|
||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.7"}}},
|
||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.9"}}},
|
||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.14"}}},
|
||||
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
{application, emqx, [
|
||||
{id, "emqx"},
|
||||
{description, "EMQX Core"},
|
||||
{vsn, "5.1.2"},
|
||||
{vsn, "5.1.3"},
|
||||
{modules, []},
|
||||
{registered, []},
|
||||
{applications, [
|
||||
|
@ -16,7 +16,8 @@
|
|||
sasl,
|
||||
os_mon,
|
||||
lc,
|
||||
hocon
|
||||
hocon,
|
||||
emqx_durable_storage
|
||||
]},
|
||||
{mod, {emqx_app, []}},
|
||||
{env, []},
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
start(_Type, _Args) ->
|
||||
ok = maybe_load_config(),
|
||||
ok = emqx_persistent_session:init_db_backend(),
|
||||
_ = emqx_persistent_session_ds:init(),
|
||||
ok = maybe_start_quicer(),
|
||||
ok = emqx_bpapi:start(),
|
||||
ok = emqx_alarm_handler:load(),
|
||||
|
|
|
@ -224,7 +224,7 @@ publish(Msg) when is_record(Msg, message) ->
|
|||
}),
|
||||
[];
|
||||
Msg1 = #message{topic = Topic} ->
|
||||
emqx_persistent_session:persist_message(Msg1),
|
||||
_ = emqx_persistent_session_ds:persist_message(Msg1),
|
||||
route(aggre(emqx_router:match_routes(Topic)), delivery(Msg1))
|
||||
end.
|
||||
|
||||
|
|
|
@ -153,6 +153,7 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
clean_down(SubPid) ->
|
||||
try
|
||||
case ets:lookup(?SUBMON, SubPid) of
|
||||
[{_, SubId}] ->
|
||||
true = ets:delete(?SUBMON, SubPid),
|
||||
|
@ -162,4 +163,7 @@ clean_down(SubPid) ->
|
|||
emqx_broker:subscriber_down(SubPid);
|
||||
[] ->
|
||||
ok
|
||||
end
|
||||
catch
|
||||
error:badarg -> ok
|
||||
end.
|
||||
|
|
|
@ -61,8 +61,7 @@
|
|||
|
||||
%% Export for emqx_channel implementations
|
||||
-export([
|
||||
maybe_nack/1,
|
||||
maybe_mark_as_delivered/2
|
||||
maybe_nack/1
|
||||
]).
|
||||
|
||||
%% Exports for CT
|
||||
|
@ -152,7 +151,7 @@
|
|||
info(Channel) ->
|
||||
maps:from_list(info(?INFO_KEYS, Channel)).
|
||||
|
||||
-spec info(list(atom()) | atom(), channel()) -> term().
|
||||
-spec info(list(atom()) | atom() | tuple(), channel()) -> term().
|
||||
info(Keys, Channel) when is_list(Keys) ->
|
||||
[{Key, info(Key, Channel)} || Key <- Keys];
|
||||
info(conninfo, #channel{conninfo = ConnInfo}) ->
|
||||
|
@ -181,6 +180,8 @@ info(username, #channel{clientinfo = ClientInfo}) ->
|
|||
maps:get(username, ClientInfo, undefined);
|
||||
info(session, #channel{session = Session}) ->
|
||||
maybe_apply(fun emqx_session:info/1, Session);
|
||||
info({session, Info}, #channel{session = Session}) ->
|
||||
maybe_apply(fun(S) -> emqx_session:info(Info, S) end, Session);
|
||||
info(conn_state, #channel{conn_state = ConnState}) ->
|
||||
ConnState;
|
||||
info(keepalive, #channel{keepalive = Keepalive}) ->
|
||||
|
@ -199,11 +200,6 @@ info(timers, #channel{timers = Timers}) ->
|
|||
set_conn_state(ConnState, Channel) ->
|
||||
Channel#channel{conn_state = ConnState}.
|
||||
|
||||
set_session(Session, Channel = #channel{conninfo = ConnInfo, clientinfo = ClientInfo}) ->
|
||||
%% Assume that this is also an updated session. Allow side effect.
|
||||
Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session),
|
||||
Channel#channel{session = Session1}.
|
||||
|
||||
-spec stats(channel()) -> emqx_types:stats().
|
||||
stats(#channel{session = undefined}) ->
|
||||
emqx_pd:get_counters(?CHANNEL_METRICS);
|
||||
|
@ -417,10 +413,10 @@ handle_in(
|
|||
case emqx_session:puback(ClientInfo, PacketId, Session) of
|
||||
{ok, Msg, NSession} ->
|
||||
ok = after_message_acked(ClientInfo, Msg, Properties),
|
||||
{ok, set_session(NSession, Channel)};
|
||||
{ok, Channel#channel{session = NSession}};
|
||||
{ok, Msg, Publishes, NSession} ->
|
||||
ok = after_message_acked(ClientInfo, Msg, Properties),
|
||||
handle_out(publish, Publishes, set_session(NSession, Channel));
|
||||
handle_out(publish, Publishes, Channel#channel{session = NSession});
|
||||
{error, ?RC_PACKET_IDENTIFIER_IN_USE} ->
|
||||
?SLOG(warning, #{msg => "puback_packetId_inuse", packetId => PacketId}),
|
||||
ok = emqx_metrics:inc('packets.puback.inuse'),
|
||||
|
@ -438,7 +434,7 @@ handle_in(
|
|||
case emqx_session:pubrec(ClientInfo, PacketId, Session) of
|
||||
{ok, Msg, NSession} ->
|
||||
ok = after_message_acked(ClientInfo, Msg, Properties),
|
||||
NChannel = set_session(NSession, Channel),
|
||||
NChannel = Channel#channel{session = NSession},
|
||||
handle_out(pubrel, {PacketId, ?RC_SUCCESS}, NChannel);
|
||||
{error, RC = ?RC_PACKET_IDENTIFIER_IN_USE} ->
|
||||
?SLOG(warning, #{msg => "pubrec_packetId_inuse", packetId => PacketId}),
|
||||
|
@ -458,7 +454,7 @@ handle_in(
|
|||
) ->
|
||||
case emqx_session:pubrel(ClientInfo, PacketId, Session) of
|
||||
{ok, NSession} ->
|
||||
NChannel = set_session(NSession, Channel),
|
||||
NChannel = Channel#channel{session = NSession},
|
||||
handle_out(pubcomp, {PacketId, ?RC_SUCCESS}, NChannel);
|
||||
{error, RC = ?RC_PACKET_IDENTIFIER_NOT_FOUND} ->
|
||||
?SLOG(warning, #{msg => "pubrel_packetId_not_found", packetId => PacketId}),
|
||||
|
@ -473,9 +469,9 @@ handle_in(
|
|||
) ->
|
||||
case emqx_session:pubcomp(ClientInfo, PacketId, Session) of
|
||||
{ok, NSession} ->
|
||||
{ok, set_session(NSession, Channel)};
|
||||
{ok, Channel#channel{session = NSession}};
|
||||
{ok, Publishes, NSession} ->
|
||||
handle_out(publish, Publishes, set_session(NSession, Channel));
|
||||
handle_out(publish, Publishes, Channel#channel{session = NSession});
|
||||
{error, ?RC_PACKET_IDENTIFIER_IN_USE} ->
|
||||
ok = emqx_metrics:inc('packets.pubcomp.inuse'),
|
||||
{ok, Channel};
|
||||
|
@ -734,7 +730,7 @@ do_publish(
|
|||
case emqx_session:publish(ClientInfo, PacketId, Msg, Session) of
|
||||
{ok, PubRes, NSession} ->
|
||||
RC = pubrec_reason_code(PubRes),
|
||||
NChannel0 = set_session(NSession, Channel),
|
||||
NChannel0 = Channel#channel{session = NSession},
|
||||
NChannel1 = ensure_timer(await_timer, NChannel0),
|
||||
NChannel2 = ensure_quota(PubRes, NChannel1),
|
||||
handle_out(pubrec, {PacketId, RC}, NChannel2);
|
||||
|
@ -830,7 +826,7 @@ do_subscribe(
|
|||
NSubOpts = enrich_subopts(maps:merge(?DEFAULT_SUBOPTS, SubOpts), Channel),
|
||||
case emqx_session:subscribe(ClientInfo, NTopicFilter, NSubOpts, Session) of
|
||||
{ok, NSession} ->
|
||||
{QoS, set_session(NSession, Channel)};
|
||||
{QoS, Channel#channel{session = NSession}};
|
||||
{error, RC} ->
|
||||
?SLOG(
|
||||
warning,
|
||||
|
@ -869,7 +865,7 @@ do_unsubscribe(
|
|||
TopicFilter1 = emqx_mountpoint:mount(MountPoint, TopicFilter),
|
||||
case emqx_session:unsubscribe(ClientInfo, TopicFilter1, SubOpts, Session) of
|
||||
{ok, NSession} ->
|
||||
{?RC_SUCCESS, set_session(NSession, Channel)};
|
||||
{?RC_SUCCESS, Channel#channel{session = NSession}};
|
||||
{error, RC} ->
|
||||
{RC, Channel}
|
||||
end.
|
||||
|
@ -898,7 +894,7 @@ process_disconnect(ReasonCode, Properties, Channel) ->
|
|||
|
||||
maybe_update_expiry_interval(
|
||||
#{'Session-Expiry-Interval' := Interval},
|
||||
Channel = #channel{conninfo = ConnInfo, clientinfo = ClientInfo}
|
||||
Channel = #channel{conninfo = ConnInfo}
|
||||
) ->
|
||||
EI = timer:seconds(Interval),
|
||||
OldEI = maps:get(expiry_interval, ConnInfo, 0),
|
||||
|
@ -907,12 +903,11 @@ maybe_update_expiry_interval(
|
|||
Channel;
|
||||
false ->
|
||||
NChannel = Channel#channel{conninfo = ConnInfo#{expiry_interval => EI}},
|
||||
ClientID = maps:get(clientid, ClientInfo, undefined),
|
||||
%% Check if the client turns off persistence (turning it on is disallowed)
|
||||
case EI =:= 0 andalso OldEI > 0 of
|
||||
true ->
|
||||
S = emqx_persistent_session:discard(ClientID, NChannel#channel.session),
|
||||
set_session(S, NChannel);
|
||||
NSession = emqx_session:unpersist(NChannel#channel.session),
|
||||
NChannel#channel{session = NSession};
|
||||
false ->
|
||||
NChannel
|
||||
end
|
||||
|
@ -931,18 +926,13 @@ handle_deliver(
|
|||
Delivers,
|
||||
Channel = #channel{
|
||||
takeover = true,
|
||||
pendings = Pendings,
|
||||
session = Session,
|
||||
clientinfo = #{clientid := ClientId} = ClientInfo
|
||||
pendings = Pendings
|
||||
}
|
||||
) ->
|
||||
%% NOTE: Order is important here. While the takeover is in
|
||||
%% progress, the session cannot enqueue messages, since it already
|
||||
%% passed on the queue to the new connection in the session state.
|
||||
NPendings = lists:append(
|
||||
Pendings,
|
||||
emqx_session:ignore_local(ClientInfo, maybe_nack(Delivers), ClientId, Session)
|
||||
),
|
||||
NPendings = lists:append(Pendings, maybe_nack(Delivers)),
|
||||
{ok, Channel#channel{pendings = NPendings}};
|
||||
handle_deliver(
|
||||
Delivers,
|
||||
|
@ -950,37 +940,27 @@ handle_deliver(
|
|||
conn_state = disconnected,
|
||||
takeover = false,
|
||||
session = Session,
|
||||
clientinfo = #{clientid := ClientId} = ClientInfo
|
||||
clientinfo = ClientInfo
|
||||
}
|
||||
) ->
|
||||
Delivers1 = maybe_nack(Delivers),
|
||||
Delivers2 = emqx_session:ignore_local(ClientInfo, Delivers1, ClientId, Session),
|
||||
NSession = emqx_session:enqueue(ClientInfo, Delivers2, Session),
|
||||
NChannel = set_session(NSession, Channel),
|
||||
%% We consider queued/dropped messages as delivered since they are now in the session state.
|
||||
maybe_mark_as_delivered(Session, Delivers),
|
||||
NSession = emqx_session:enqueue(ClientInfo, Delivers1, Session),
|
||||
NChannel = Channel#channel{session = NSession},
|
||||
{ok, NChannel};
|
||||
handle_deliver(
|
||||
Delivers,
|
||||
Channel = #channel{
|
||||
session = Session,
|
||||
takeover = false,
|
||||
clientinfo = #{clientid := ClientId} = ClientInfo
|
||||
clientinfo = ClientInfo
|
||||
}
|
||||
) ->
|
||||
case
|
||||
emqx_session:deliver(
|
||||
ClientInfo,
|
||||
emqx_session:ignore_local(ClientInfo, Delivers, ClientId, Session),
|
||||
Session
|
||||
)
|
||||
of
|
||||
case emqx_session:deliver(ClientInfo, Delivers, Session) of
|
||||
{ok, Publishes, NSession} ->
|
||||
NChannel = set_session(NSession, Channel),
|
||||
maybe_mark_as_delivered(NSession, Delivers),
|
||||
NChannel = Channel#channel{session = NSession},
|
||||
handle_out(publish, Publishes, ensure_timer(retry_timer, NChannel));
|
||||
{ok, NSession} ->
|
||||
{ok, set_session(NSession, Channel)}
|
||||
{ok, Channel#channel{session = NSession}}
|
||||
end.
|
||||
|
||||
%% Nack delivers from shared subscription
|
||||
|
@ -996,15 +976,6 @@ not_nacked({deliver, _Topic, Msg}) ->
|
|||
true
|
||||
end.
|
||||
|
||||
maybe_mark_as_delivered(Session, Delivers) ->
|
||||
case emqx_session:info(is_persistent, Session) of
|
||||
false ->
|
||||
skip;
|
||||
true ->
|
||||
SessionID = emqx_session:info(id, Session),
|
||||
emqx_persistent_session:mark_as_delivered(SessionID, Delivers)
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Handle outgoing packet
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -1096,11 +1067,11 @@ return_connack(AckPacket, Channel) ->
|
|||
ignore ->
|
||||
{ok, Replies, Channel};
|
||||
{ok, Publishes, NSession} ->
|
||||
NChannel0 = Channel#channel{
|
||||
NChannel1 = Channel#channel{
|
||||
resuming = false,
|
||||
pendings = []
|
||||
pendings = [],
|
||||
session = NSession
|
||||
},
|
||||
NChannel1 = set_session(NSession, NChannel0),
|
||||
{Packets, NChannel2} = do_deliver(Publishes, NChannel1),
|
||||
Outgoing = [{outgoing, Packets} || length(Packets) > 0],
|
||||
{ok, Replies ++ Outgoing, NChannel2}
|
||||
|
@ -1226,8 +1197,6 @@ handle_call(
|
|||
ChanInfo1 = info(NChannel),
|
||||
emqx_cm:set_chan_info(ClientId, ChanInfo1#{sockinfo => SockInfo}),
|
||||
reply(ok, reset_timer(alive_timer, NChannel));
|
||||
handle_call(get_mqueue, Channel) ->
|
||||
reply({ok, get_mqueue(Channel)}, Channel);
|
||||
handle_call(Req, Channel) ->
|
||||
?SLOG(error, #{msg => "unexpected_call", call => Req}),
|
||||
reply(ignored, Channel).
|
||||
|
@ -1345,9 +1314,10 @@ handle_timeout(
|
|||
) ->
|
||||
case emqx_session:retry(ClientInfo, Session) of
|
||||
{ok, NSession} ->
|
||||
{ok, clean_timer(retry_timer, set_session(NSession, Channel))};
|
||||
NChannel = Channel#channel{session = NSession},
|
||||
{ok, clean_timer(retry_timer, NChannel)};
|
||||
{ok, Publishes, Timeout, NSession} ->
|
||||
NChannel = set_session(NSession, Channel),
|
||||
NChannel = Channel#channel{session = NSession},
|
||||
handle_out(publish, Publishes, reset_timer(retry_timer, Timeout, NChannel))
|
||||
end;
|
||||
handle_timeout(
|
||||
|
@ -1363,9 +1333,11 @@ handle_timeout(
|
|||
) ->
|
||||
case emqx_session:expire(ClientInfo, awaiting_rel, Session) of
|
||||
{ok, NSession} ->
|
||||
{ok, clean_timer(await_timer, set_session(NSession, Channel))};
|
||||
NChannel = Channel#channel{session = NSession},
|
||||
{ok, clean_timer(await_timer, NChannel)};
|
||||
{ok, Timeout, NSession} ->
|
||||
{ok, reset_timer(await_timer, Timeout, set_session(NSession, Channel))}
|
||||
NChannel = Channel#channel{session = NSession},
|
||||
{ok, reset_timer(await_timer, Timeout, NChannel)}
|
||||
end;
|
||||
handle_timeout(_TRef, expire_session, Channel) ->
|
||||
shutdown(expired, Channel);
|
||||
|
@ -1453,25 +1425,11 @@ terminate(Reason, Channel = #channel{clientinfo = ClientInfo, will_msg = WillMsg
|
|||
%% if will_msg still exists when the session is terminated, it
|
||||
%% must be published immediately.
|
||||
WillMsg =/= undefined andalso publish_will_msg(ClientInfo, WillMsg),
|
||||
(Reason =:= expired) andalso persist_if_session(Channel),
|
||||
run_terminate_hook(Reason, Channel).
|
||||
|
||||
persist_if_session(#channel{session = Session} = Channel) ->
|
||||
case emqx_session:is_session(Session) of
|
||||
true ->
|
||||
_ = emqx_persistent_session:persist(
|
||||
Channel#channel.clientinfo,
|
||||
Channel#channel.conninfo,
|
||||
Channel#channel.session
|
||||
),
|
||||
run_terminate_hook(_Reason, #channel{session = undefined}) ->
|
||||
ok;
|
||||
false ->
|
||||
ok
|
||||
end.
|
||||
|
||||
run_terminate_hook(_Reason, #channel{session = undefined} = _Channel) ->
|
||||
ok;
|
||||
run_terminate_hook(Reason, #channel{clientinfo = ClientInfo, session = Session} = _Channel) ->
|
||||
run_terminate_hook(Reason, #channel{clientinfo = ClientInfo, session = Session}) ->
|
||||
emqx_session:terminate(ClientInfo, Reason, Session).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -2093,11 +2051,9 @@ maybe_resume_session(#channel{
|
|||
session = Session,
|
||||
resuming = true,
|
||||
pendings = Pendings,
|
||||
clientinfo = #{clientid := ClientId} = ClientInfo
|
||||
clientinfo = ClientInfo
|
||||
}) ->
|
||||
{ok, Publishes, Session1} = emqx_session:replay(ClientInfo, Session),
|
||||
%% We consider queued/dropped messages as delivered since they are now in the session state.
|
||||
emqx_persistent_session:mark_as_delivered(ClientId, Pendings),
|
||||
case emqx_session:deliver(ClientInfo, Pendings, Session1) of
|
||||
{ok, Session2} ->
|
||||
{ok, Publishes, Session2};
|
||||
|
@ -2281,6 +2237,3 @@ get_mqtt_conf(Zone, Key, Default) ->
|
|||
set_field(Name, Value, Channel) ->
|
||||
Pos = emqx_utils:index_of(Name, record_info(fields, channel)),
|
||||
setelement(Pos + 1, Channel, Value).
|
||||
|
||||
get_mqueue(#channel{session = Session}) ->
|
||||
emqx_session:get_mqueue(Session).
|
||||
|
|
|
@ -277,65 +277,24 @@ open_session(true, ClientInfo = #{clientid := ClientId}, ConnInfo) ->
|
|||
Self = self(),
|
||||
CleanStart = fun(_) ->
|
||||
ok = discard_session(ClientId),
|
||||
ok = emqx_persistent_session:discard_if_present(ClientId),
|
||||
Session = create_session(ClientInfo, ConnInfo),
|
||||
Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{session => Session1, present => false}}
|
||||
ok = emqx_session:destroy(ClientId),
|
||||
create_register_session(ClientInfo, ConnInfo, Self)
|
||||
end,
|
||||
emqx_cm_locker:trans(ClientId, CleanStart);
|
||||
open_session(false, ClientInfo = #{clientid := ClientId}, ConnInfo) ->
|
||||
Self = self(),
|
||||
ResumeStart = fun(_) ->
|
||||
CreateSess =
|
||||
fun() ->
|
||||
Session = create_session(ClientInfo, ConnInfo),
|
||||
Session1 = emqx_persistent_session:persist(
|
||||
ClientInfo, ConnInfo, Session
|
||||
),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{session => Session1, present => false}}
|
||||
end,
|
||||
case takeover_session(ClientId) of
|
||||
{persistent, Session} ->
|
||||
%% This is a persistent session without a managing process.
|
||||
{Session1, Pendings} =
|
||||
emqx_persistent_session:resume(ClientInfo, ConnInfo, Session),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
|
||||
{ok, #{
|
||||
session => clean_session(Session1),
|
||||
present => true,
|
||||
pendings => clean_pendings(Pendings)
|
||||
}};
|
||||
{living, ConnMod, ChanPid, Session} ->
|
||||
ok = emqx_session:resume(ClientInfo, Session),
|
||||
case wrap_rpc(emqx_cm_proto_v2:takeover_finish(ConnMod, ChanPid)) of
|
||||
{ok, Pendings} ->
|
||||
Session1 = emqx_persistent_session:persist(
|
||||
ClientInfo, ConnInfo, Session
|
||||
),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{
|
||||
session => clean_session(Session1),
|
||||
present => true,
|
||||
pendings => clean_pendings(Pendings)
|
||||
}};
|
||||
clean_register_session(Session, Pendings, ClientInfo, ConnInfo, Self);
|
||||
{error, _} ->
|
||||
CreateSess()
|
||||
create_register_session(ClientInfo, ConnInfo, Self)
|
||||
end;
|
||||
{expired, OldSession} ->
|
||||
_ = emqx_persistent_session:discard(ClientId, OldSession),
|
||||
Session = create_session(ClientInfo, ConnInfo),
|
||||
Session1 = emqx_persistent_session:persist(
|
||||
ClientInfo,
|
||||
ConnInfo,
|
||||
Session
|
||||
),
|
||||
register_channel(ClientId, Self, ConnInfo),
|
||||
{ok, #{session => Session1, present => false}};
|
||||
none ->
|
||||
CreateSess()
|
||||
create_register_session(ClientInfo, ConnInfo, Self)
|
||||
end
|
||||
end,
|
||||
emqx_cm_locker:trans(ClientId, ResumeStart).
|
||||
|
@ -347,6 +306,19 @@ create_session(ClientInfo, ConnInfo) ->
|
|||
ok = emqx_hooks:run('session.created', [ClientInfo, emqx_session:info(Session)]),
|
||||
Session.
|
||||
|
||||
create_register_session(ClientInfo = #{clientid := ClientId}, ConnInfo, ChanPid) ->
|
||||
Session = create_session(ClientInfo, ConnInfo),
|
||||
ok = register_channel(ClientId, ChanPid, ConnInfo),
|
||||
{ok, #{session => Session, present => false}}.
|
||||
|
||||
clean_register_session(Session, Pendings, #{clientid := ClientId}, ConnInfo, ChanPid) ->
|
||||
ok = register_channel(ClientId, ChanPid, ConnInfo),
|
||||
{ok, #{
|
||||
session => clean_session(Session),
|
||||
present => true,
|
||||
pendings => clean_pendings(Pendings)
|
||||
}}.
|
||||
|
||||
get_session_confs(#{zone := Zone, clientid := ClientId}, #{
|
||||
receive_maximum := MaxInflight, expiry_interval := EI
|
||||
}) ->
|
||||
|
@ -385,7 +357,7 @@ get_mqtt_conf(Zone, Key) ->
|
|||
takeover_session(ClientId) ->
|
||||
case lookup_channels(ClientId) of
|
||||
[] ->
|
||||
emqx_persistent_session:lookup(ClientId);
|
||||
emqx_session:lookup(ClientId);
|
||||
[ChanPid] ->
|
||||
takeover_session(ClientId, ChanPid);
|
||||
ChanPids ->
|
||||
|
@ -417,16 +389,16 @@ takeover_session(ClientId, Pid) ->
|
|||
%% request_stepdown/3
|
||||
R == unexpected_exception
|
||||
->
|
||||
emqx_persistent_session:lookup(ClientId);
|
||||
emqx_session:lookup(ClientId);
|
||||
% rpc_call/3
|
||||
_:{'EXIT', {noproc, _}} ->
|
||||
emqx_persistent_session:lookup(ClientId)
|
||||
emqx_session:lookup(ClientId)
|
||||
end.
|
||||
|
||||
do_takeover_session(ClientId, ChanPid) when node(ChanPid) == node() ->
|
||||
case get_chann_conn_mod(ClientId, ChanPid) of
|
||||
undefined ->
|
||||
emqx_persistent_session:lookup(ClientId);
|
||||
emqx_session:lookup(ClientId);
|
||||
ConnMod when is_atom(ConnMod) ->
|
||||
case request_stepdown({takeover, 'begin'}, ConnMod, ChanPid) of
|
||||
{ok, Session} ->
|
||||
|
@ -734,7 +706,11 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
clean_down({ChanPid, ClientId}) ->
|
||||
do_unregister_channel({ClientId, ChanPid}),
|
||||
try
|
||||
do_unregister_channel({ClientId, ChanPid})
|
||||
catch
|
||||
error:badarg -> ok
|
||||
end,
|
||||
ok = ?tp(debug, emqx_cm_clean_down, #{client_id => ClientId}).
|
||||
|
||||
stats_fun() ->
|
||||
|
|
|
@ -641,7 +641,7 @@ backup_and_write(Path, Content) ->
|
|||
?SLOG(error, #{
|
||||
msg => "failed_to_save_conf_file",
|
||||
hint =>
|
||||
"The updated cluster config is note saved on this node, please check the file system.",
|
||||
"The updated cluster config is not saved on this node, please check the file system.",
|
||||
filename => TmpFile,
|
||||
reason => Reason
|
||||
}),
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
|
||||
-export([
|
||||
info/1,
|
||||
info/2,
|
||||
stats/1
|
||||
]).
|
||||
|
||||
|
@ -221,11 +222,10 @@ info(CPid) when is_pid(CPid) ->
|
|||
call(CPid, info);
|
||||
info(State = #state{channel = Channel}) ->
|
||||
ChanInfo = emqx_channel:info(Channel),
|
||||
SockInfo = maps:from_list(
|
||||
info(?INFO_KEYS, State)
|
||||
),
|
||||
SockInfo = maps:from_list(info(?INFO_KEYS, State)),
|
||||
ChanInfo#{sockinfo => SockInfo}.
|
||||
|
||||
-spec info([atom()] | atom() | tuple(), pid() | state()) -> term().
|
||||
info(Keys, State) when is_list(Keys) ->
|
||||
[{Key, info(Key, State)} || Key <- Keys];
|
||||
info(socktype, #state{transport = Transport, socket = Socket}) ->
|
||||
|
@ -241,7 +241,9 @@ info(stats_timer, #state{stats_timer = StatsTimer}) ->
|
|||
info(limiter, #state{limiter = Limiter}) ->
|
||||
Limiter;
|
||||
info(limiter_timer, #state{limiter_timer = Timer}) ->
|
||||
Timer.
|
||||
Timer;
|
||||
info({channel, Info}, #state{channel = Channel}) ->
|
||||
emqx_channel:info(Info, Channel).
|
||||
|
||||
%% @doc Get stats of the connection/channel.
|
||||
-spec stats(pid() | state()) -> emqx_types:stats().
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_persistent_session_ds).
|
||||
|
||||
-export([init/0]).
|
||||
|
||||
-export([persist_message/1]).
|
||||
|
||||
-export([
|
||||
serialize_message/1,
|
||||
deserialize_message/1
|
||||
]).
|
||||
|
||||
%% FIXME
|
||||
-define(DS_SHARD, <<"local">>).
|
||||
|
||||
-define(WHEN_ENABLED(DO),
|
||||
case is_store_enabled() of
|
||||
true -> DO;
|
||||
false -> {skipped, disabled}
|
||||
end
|
||||
).
|
||||
|
||||
%%
|
||||
|
||||
init() ->
|
||||
?WHEN_ENABLED(
|
||||
ok = emqx_ds:ensure_shard(?DS_SHARD, #{
|
||||
dir => filename:join([emqx:data_dir(), ds, messages, ?DS_SHARD])
|
||||
})
|
||||
).
|
||||
|
||||
%%
|
||||
|
||||
-spec persist_message(emqx_types:message()) ->
|
||||
ok | {skipped, _Reason} | {error, _TODO}.
|
||||
persist_message(Msg) ->
|
||||
?WHEN_ENABLED(
|
||||
case needs_persistence(Msg) andalso find_subscribers(Msg) of
|
||||
[_ | _] ->
|
||||
store_message(Msg);
|
||||
% [] ->
|
||||
% {skipped, no_subscribers};
|
||||
false ->
|
||||
{skipped, needs_no_persistence}
|
||||
end
|
||||
).
|
||||
|
||||
needs_persistence(Msg) ->
|
||||
not (emqx_message:get_flag(dup, Msg) orelse emqx_message:is_sys(Msg)).
|
||||
|
||||
store_message(Msg) ->
|
||||
ID = emqx_message:id(Msg),
|
||||
Timestamp = emqx_guid:timestamp(ID),
|
||||
Topic = emqx_topic:words(emqx_message:topic(Msg)),
|
||||
emqx_ds_storage_layer:store(?DS_SHARD, ID, Timestamp, Topic, serialize_message(Msg)).
|
||||
|
||||
find_subscribers(_Msg) ->
|
||||
[node()].
|
||||
|
||||
%%
|
||||
|
||||
serialize_message(Msg) ->
|
||||
term_to_binary(emqx_message:to_map(Msg)).
|
||||
|
||||
deserialize_message(Bin) ->
|
||||
emqx_message:from_map(binary_to_term(Bin)).
|
||||
|
||||
%%
|
||||
|
||||
is_store_enabled() ->
|
||||
emqx_config:get([persistent_session_store, ds]).
|
|
@ -319,6 +319,14 @@ fields("persistent_session_store") ->
|
|||
desc => ?DESC(persistent_session_store_enabled)
|
||||
}
|
||||
)},
|
||||
{"ds",
|
||||
sc(
|
||||
boolean(),
|
||||
#{
|
||||
default => false,
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)},
|
||||
{"on_disc",
|
||||
sc(
|
||||
boolean(),
|
||||
|
|
|
@ -47,22 +47,25 @@
|
|||
-include("emqx_mqtt.hrl").
|
||||
-include("logger.hrl").
|
||||
-include("types.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
-ifdef(TEST).
|
||||
-compile(export_all).
|
||||
-compile(nowarn_export_all).
|
||||
-endif.
|
||||
|
||||
-export([
|
||||
lookup/1,
|
||||
destroy/1,
|
||||
unpersist/1
|
||||
]).
|
||||
|
||||
-export([init/1]).
|
||||
|
||||
-export([
|
||||
info/1,
|
||||
info/2,
|
||||
is_session/1,
|
||||
stats/1,
|
||||
obtain_next_pkt_id/1,
|
||||
get_mqueue/1
|
||||
obtain_next_pkt_id/1
|
||||
]).
|
||||
|
||||
-export([
|
||||
|
@ -83,7 +86,6 @@
|
|||
enqueue/3,
|
||||
dequeue/2,
|
||||
filter_queue/2,
|
||||
ignore_local/4,
|
||||
retry/2,
|
||||
terminate/3
|
||||
]).
|
||||
|
@ -226,13 +228,27 @@ init(Opts) ->
|
|||
created_at = erlang:system_time(millisecond)
|
||||
}.
|
||||
|
||||
-spec lookup(emqx_types:clientid()) -> none.
|
||||
lookup(_ClientId) ->
|
||||
% NOTE
|
||||
% This is a stub. This session impl has no backing store, thus always `none`.
|
||||
none.
|
||||
|
||||
-spec destroy(emqx_types:clientid()) -> ok.
|
||||
destroy(_ClientId) ->
|
||||
% NOTE
|
||||
% This is a stub. This session impl has no backing store, thus always `ok`.
|
||||
ok.
|
||||
|
||||
-spec unpersist(session()) -> session().
|
||||
unpersist(Session) ->
|
||||
ok = destroy(Session#session.clientid),
|
||||
Session#session{is_persistent = false}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Info, Stats
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
is_session(#session{}) -> true;
|
||||
is_session(_) -> false.
|
||||
|
||||
%% @doc Get infos of the session.
|
||||
-spec info(session()) -> emqx_types:infos().
|
||||
info(Session) ->
|
||||
|
@ -242,6 +258,8 @@ info(Keys, Session) when is_list(Keys) ->
|
|||
[{Key, info(Key, Session)} || Key <- Keys];
|
||||
info(id, #session{id = Id}) ->
|
||||
Id;
|
||||
info(clientid, #session{clientid = ClientId}) ->
|
||||
ClientId;
|
||||
info(is_persistent, #session{is_persistent = Bool}) ->
|
||||
Bool;
|
||||
info(subscriptions, #session{subscriptions = Subs}) ->
|
||||
|
@ -285,27 +303,6 @@ info(created_at, #session{created_at = CreatedAt}) ->
|
|||
-spec stats(session()) -> emqx_types:stats().
|
||||
stats(Session) -> info(?STATS_KEYS, Session).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Ignore local messages
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
ignore_local(ClientInfo, Delivers, Subscriber, Session) ->
|
||||
Subs = info(subscriptions, Session),
|
||||
lists:filter(
|
||||
fun({deliver, Topic, #message{from = Publisher} = Msg}) ->
|
||||
case maps:find(Topic, Subs) of
|
||||
{ok, #{nl := 1}} when Subscriber =:= Publisher ->
|
||||
ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, no_local]),
|
||||
ok = emqx_metrics:inc('delivery.dropped'),
|
||||
ok = emqx_metrics:inc('delivery.dropped.no_local'),
|
||||
false;
|
||||
_ ->
|
||||
true
|
||||
end
|
||||
end,
|
||||
Delivers
|
||||
).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Client -> Broker: SUBSCRIBE
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -321,13 +318,12 @@ subscribe(
|
|||
ClientInfo = #{clientid := ClientId},
|
||||
TopicFilter,
|
||||
SubOpts,
|
||||
Session = #session{id = SessionID, is_persistent = IsPS, subscriptions = Subs}
|
||||
Session = #session{subscriptions = Subs}
|
||||
) ->
|
||||
IsNew = not maps:is_key(TopicFilter, Subs),
|
||||
case IsNew andalso is_subscriptions_full(Session) of
|
||||
false ->
|
||||
ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts),
|
||||
ok = emqx_persistent_session:add_subscription(TopicFilter, SessionID, IsPS),
|
||||
ok = emqx_hooks:run(
|
||||
'session.subscribed',
|
||||
[ClientInfo, TopicFilter, SubOpts#{is_new => IsNew}]
|
||||
|
@ -355,12 +351,11 @@ unsubscribe(
|
|||
ClientInfo,
|
||||
TopicFilter,
|
||||
UnSubOpts,
|
||||
Session = #session{id = SessionID, subscriptions = Subs, is_persistent = IsPS}
|
||||
Session = #session{subscriptions = Subs}
|
||||
) ->
|
||||
case maps:find(TopicFilter, Subs) of
|
||||
{ok, SubOpts} ->
|
||||
ok = emqx_broker:unsubscribe(TopicFilter),
|
||||
ok = emqx_persistent_session:remove_subscription(TopicFilter, SessionID, IsPS),
|
||||
ok = emqx_hooks:run(
|
||||
'session.unsubscribed',
|
||||
[ClientInfo, TopicFilter, maps:merge(SubOpts, UnSubOpts)]
|
||||
|
@ -588,7 +583,10 @@ deliver_msg(
|
|||
MarkedMsg = mark_begin_deliver(Msg),
|
||||
Inflight1 = emqx_inflight:insert(PacketId, with_ts(MarkedMsg), Inflight),
|
||||
{ok, [Publish], next_pkt_id(Session#session{inflight = Inflight1})}
|
||||
end.
|
||||
end;
|
||||
deliver_msg(ClientInfo, {drop, Msg, Reason}, Session) ->
|
||||
handle_dropped(ClientInfo, Msg, Reason, Session),
|
||||
{ok, Session}.
|
||||
|
||||
-spec enqueue(
|
||||
emqx_types:clientinfo(),
|
||||
|
@ -607,7 +605,10 @@ enqueue(ClientInfo, Delivers, Session) when is_list(Delivers) ->
|
|||
enqueue(ClientInfo, #message{} = Msg, Session = #session{mqueue = Q}) ->
|
||||
{Dropped, NewQ} = emqx_mqueue:in(Msg, Q),
|
||||
(Dropped =/= undefined) andalso handle_dropped(ClientInfo, Dropped, Session),
|
||||
Session#session{mqueue = NewQ}.
|
||||
Session#session{mqueue = NewQ};
|
||||
enqueue(ClientInfo, {drop, Msg, Reason}, Session) ->
|
||||
handle_dropped(ClientInfo, Msg, Reason, Session),
|
||||
Session.
|
||||
|
||||
handle_dropped(ClientInfo, Msg = #message{qos = QoS, topic = Topic}, #session{mqueue = Q}) ->
|
||||
Payload = emqx_message:to_log_map(Msg),
|
||||
|
@ -644,8 +645,18 @@ handle_dropped(ClientInfo, Msg = #message{qos = QoS, topic = Topic}, #session{mq
|
|||
)
|
||||
end.
|
||||
|
||||
handle_dropped(ClientInfo, Msg, Reason, _Session) ->
|
||||
ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, Reason]),
|
||||
ok = emqx_metrics:inc('delivery.dropped'),
|
||||
ok = emqx_metrics:inc('delivery.dropped.no_local').
|
||||
|
||||
enrich_deliver({deliver, Topic, Msg}, Session = #session{subscriptions = Subs}) ->
|
||||
enrich_subopts(get_subopts(Topic, Subs), Msg, Session).
|
||||
enrich_deliver(Msg, maps:find(Topic, Subs), Session).
|
||||
|
||||
enrich_deliver(Msg = #message{from = ClientId}, {ok, #{nl := 1}}, #session{clientid = ClientId}) ->
|
||||
{drop, Msg, no_local};
|
||||
enrich_deliver(Msg, SubOpts, Session) ->
|
||||
enrich_subopts(mk_subopts(SubOpts), Msg, Session).
|
||||
|
||||
maybe_ack(Msg) ->
|
||||
emqx_shared_sub:maybe_ack(Msg).
|
||||
|
@ -653,8 +664,8 @@ maybe_ack(Msg) ->
|
|||
maybe_nack(Msg) ->
|
||||
emqx_shared_sub:maybe_nack_dropped(Msg).
|
||||
|
||||
get_subopts(Topic, SubMap) ->
|
||||
case maps:find(Topic, SubMap) of
|
||||
mk_subopts(SubOpts) ->
|
||||
case SubOpts of
|
||||
{ok, #{nl := Nl, qos := QoS, rap := Rap, subid := SubId}} ->
|
||||
[{nl, Nl}, {qos, QoS}, {rap, Rap}, {subid, SubId}];
|
||||
{ok, #{nl := Nl, qos := QoS, rap := Rap}} ->
|
||||
|
@ -943,6 +954,3 @@ age(Now, Ts) -> Now - Ts.
|
|||
set_field(Name, Value, Session) ->
|
||||
Pos = emqx_utils:index_of(Name, record_info(fields, session)),
|
||||
setelement(Pos + 1, Session, Value).
|
||||
|
||||
get_mqueue(#session{mqueue = Q}) ->
|
||||
emqx_mqueue:to_list(Q).
|
||||
|
|
|
@ -272,7 +272,7 @@ remove_subscription(_TopicFilter, _SessionID, false = _IsPersistent) ->
|
|||
%% Must be called inside a emqx_cm_locker transaction.
|
||||
-spec resume(emqx_types:clientinfo(), emqx_types:conninfo(), emqx_session:session()) ->
|
||||
{emqx_session:session(), [emqx_types:deliver()]}.
|
||||
resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
|
||||
resume(ClientInfo, ConnInfo, Session) ->
|
||||
SessionID = emqx_session:info(id, Session),
|
||||
?tp(ps_resuming, #{from => db, sid => SessionID}),
|
||||
|
||||
|
@ -281,7 +281,6 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
|
|||
%% 1. Get pending messages from DB.
|
||||
?tp(ps_initial_pendings, #{sid => SessionID}),
|
||||
Pendings1 = pending(SessionID),
|
||||
Pendings2 = emqx_session:ignore_local(ClientInfo, Pendings1, ClientID, Session),
|
||||
?tp(ps_got_initial_pendings, #{
|
||||
sid => SessionID,
|
||||
msgs => Pendings1
|
||||
|
@ -290,11 +289,11 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
|
|||
%% 2. Enqueue messages to mimic that the process was alive
|
||||
%% when the messages were delivered.
|
||||
?tp(ps_persist_pendings, #{sid => SessionID}),
|
||||
Session1 = emqx_session:enqueue(ClientInfo, Pendings2, Session),
|
||||
Session1 = emqx_session:enqueue(ClientInfo, Pendings1, Session),
|
||||
Session2 = persist(ClientInfo, ConnInfo, Session1),
|
||||
mark_as_delivered(SessionID, Pendings2),
|
||||
mark_as_delivered(SessionID, Pendings1),
|
||||
?tp(ps_persist_pendings_msgs, #{
|
||||
msgs => Pendings2,
|
||||
msgs => Pendings1,
|
||||
sid => SessionID
|
||||
}),
|
||||
|
||||
|
@ -312,11 +311,10 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
|
|||
%% 5. Get pending messages from DB until we find all markers.
|
||||
?tp(ps_marker_pendings, #{sid => SessionID}),
|
||||
MarkerIDs = [Marker || {_, Marker} <- NodeMarkers],
|
||||
Pendings3 = pending(SessionID, MarkerIDs),
|
||||
Pendings4 = emqx_session:ignore_local(ClientInfo, Pendings3, ClientID, Session),
|
||||
Pendings2 = pending(SessionID, MarkerIDs),
|
||||
?tp(ps_marker_pendings_msgs, #{
|
||||
sid => SessionID,
|
||||
msgs => Pendings4
|
||||
msgs => Pendings2
|
||||
}),
|
||||
|
||||
%% 6. Get pending messages from writers.
|
||||
|
@ -329,7 +327,7 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) ->
|
|||
|
||||
%% 7. Drain the inbox and usort the messages
|
||||
%% with the pending messages. (Should be done by caller.)
|
||||
{Session2, Pendings4 ++ WriterPendings}.
|
||||
{Session2, Pendings2 ++ WriterPendings}.
|
||||
|
||||
resume_begin(Nodes, SessionID) ->
|
||||
Res = emqx_persistent_session_proto_v1:resume_begin(Nodes, self(), SessionID),
|
||||
|
|
|
@ -584,7 +584,7 @@ t_handle_deliver(_) ->
|
|||
|
||||
t_handle_deliver_nl(_) ->
|
||||
ClientInfo = clientinfo(#{clientid => <<"clientid">>}),
|
||||
Session = session(#{subscriptions => #{<<"t1">> => #{nl => 1}}}),
|
||||
Session = session(ClientInfo, #{subscriptions => #{<<"t1">> => #{nl => 1}}}),
|
||||
Channel = channel(#{clientinfo => ClientInfo, session => Session}),
|
||||
Msg = emqx_message:make(<<"clientid">>, ?QOS_1, <<"t1">>, <<"qos1">>),
|
||||
NMsg = emqx_message:set_flag(nl, Msg),
|
||||
|
@ -1070,11 +1070,14 @@ connpkt(Props) ->
|
|||
password = <<"passwd">>
|
||||
}.
|
||||
|
||||
session() -> session(#{}).
|
||||
session(InitFields) when is_map(InitFields) ->
|
||||
session() -> session(#{zone => default, clientid => <<"fake-test">>}, #{}).
|
||||
session(InitFields) -> session(#{zone => default, clientid => <<"fake-test">>}, InitFields).
|
||||
session(ClientInfo, InitFields) when is_map(InitFields) ->
|
||||
Conf = emqx_cm:get_session_confs(
|
||||
#{zone => default, clientid => <<"fake-test">>}, #{
|
||||
receive_maximum => 0, expiry_interval => 0
|
||||
ClientInfo,
|
||||
#{
|
||||
receive_maximum => 0,
|
||||
expiry_interval => 0
|
||||
}
|
||||
),
|
||||
Session = emqx_session:init(Conf),
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_persistent_messages_SUITE).
|
||||
|
||||
-include_lib("stdlib/include/assert.hrl").
|
||||
|
||||
-compile(export_all).
|
||||
-compile(nowarn_export_all).
|
||||
|
||||
-define(NOW,
|
||||
(calendar:system_time_to_rfc3339(erlang:system_time(millisecond), [{unit, millisecond}]))
|
||||
).
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
{ok, _} = application:ensure_all_started(emqx_durable_storage),
|
||||
ok = emqx_common_test_helpers:start_apps([], fun
|
||||
(emqx) ->
|
||||
emqx_common_test_helpers:boot_modules(all),
|
||||
emqx_config:init_load(emqx_schema, <<"persistent_session_store.ds = true">>),
|
||||
emqx_app:set_config_loader(?MODULE);
|
||||
(_) ->
|
||||
ok
|
||||
end),
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
emqx_common_test_helpers:stop_apps([]),
|
||||
application:stop(emqx_durable_storage),
|
||||
ok.
|
||||
|
||||
t_messages_persisted(_Config) ->
|
||||
C1 = connect(<<?MODULE_STRING "1">>, true, 30),
|
||||
C2 = connect(<<?MODULE_STRING "2">>, false, 60),
|
||||
C3 = connect(<<?MODULE_STRING "3">>, false, undefined),
|
||||
C4 = connect(<<?MODULE_STRING "4">>, false, 0),
|
||||
|
||||
CP = connect(<<?MODULE_STRING "-pub">>, true, undefined),
|
||||
|
||||
{ok, _, [1]} = emqtt:subscribe(C1, <<"client/+/topic">>, qos1),
|
||||
{ok, _, [0]} = emqtt:subscribe(C2, <<"client/+/topic">>, qos0),
|
||||
{ok, _, [1]} = emqtt:subscribe(C2, <<"random/+">>, qos1),
|
||||
{ok, _, [2]} = emqtt:subscribe(C3, <<"client/#">>, qos2),
|
||||
{ok, _, [0]} = emqtt:subscribe(C4, <<"random/#">>, qos0),
|
||||
|
||||
Messages = [
|
||||
M1 = {<<"client/1/topic">>, <<"1">>},
|
||||
M2 = {<<"client/2/topic">>, <<"2">>},
|
||||
M3 = {<<"client/3/topic/sub">>, <<"3">>},
|
||||
M4 = {<<"client/4">>, <<"4">>},
|
||||
M5 = {<<"random/5">>, <<"5">>},
|
||||
M6 = {<<"random/6/topic">>, <<"6">>},
|
||||
M7 = {<<"client/7/topic">>, <<"7">>},
|
||||
M8 = {<<"client/8/topic/sub">>, <<"8">>},
|
||||
M9 = {<<"random/9">>, <<"9">>},
|
||||
M10 = {<<"random/10">>, <<"10">>}
|
||||
],
|
||||
|
||||
Results = [emqtt:publish(CP, Topic, Payload, 1) || {Topic, Payload} <- Messages],
|
||||
|
||||
ct:pal("Results = ~p", [Results]),
|
||||
|
||||
Persisted = consume(<<"local">>, {['#'], 0}),
|
||||
|
||||
ct:pal("Persisted = ~p", [Persisted]),
|
||||
|
||||
?assertEqual(
|
||||
% [M1, M2, M5, M7, M9, M10],
|
||||
[M1, M2, M3, M4, M5, M6, M7, M8, M9, M10],
|
||||
[{emqx_message:topic(M), emqx_message:payload(M)} || M <- Persisted]
|
||||
),
|
||||
|
||||
ok.
|
||||
|
||||
%%
|
||||
|
||||
connect(ClientId, CleanStart, EI) ->
|
||||
{ok, Client} = emqtt:start_link([
|
||||
{clientid, ClientId},
|
||||
{proto_ver, v5},
|
||||
{clean_start, CleanStart},
|
||||
{properties,
|
||||
maps:from_list(
|
||||
[{'Session-Expiry-Interval', EI} || is_integer(EI)]
|
||||
)}
|
||||
]),
|
||||
{ok, _} = emqtt:connect(Client),
|
||||
Client.
|
||||
|
||||
consume(Shard, Replay) ->
|
||||
{ok, It} = emqx_ds_storage_layer:make_iterator(Shard, Replay),
|
||||
consume(It).
|
||||
|
||||
consume(It) ->
|
||||
case emqx_ds_storage_layer:next(It) of
|
||||
{value, Msg, NIt} ->
|
||||
[emqx_persistent_session_ds:deserialize_message(Msg) | consume(NIt)];
|
||||
none ->
|
||||
[]
|
||||
end.
|
|
@ -31,7 +31,10 @@
|
|||
|
||||
all() ->
|
||||
[
|
||||
{group, persistent_store_enabled},
|
||||
% NOTE
|
||||
% Tests are disabled while existing session persistence impl is being
|
||||
% phased out.
|
||||
% {group, persistent_store_enabled},
|
||||
{group, persistent_store_disabled}
|
||||
].
|
||||
|
||||
|
|
|
@ -758,13 +758,16 @@ t_qos1_random_dispatch_if_all_members_are_down(Config) when is_list(Config) ->
|
|||
|
||||
{ok, _} = emqtt:publish(ConnPub, Topic, <<"hello11">>, 1),
|
||||
ct:sleep(100),
|
||||
{ok, Msgs1} = gen_server:call(Pid1, get_mqueue),
|
||||
{ok, Msgs2} = gen_server:call(Pid2, get_mqueue),
|
||||
Msgs1 = emqx_mqueue:to_list(get_mqueue(Pid1)),
|
||||
Msgs2 = emqx_mqueue:to_list(get_mqueue(Pid2)),
|
||||
%% assert the message is in mqueue (because socket is closed)
|
||||
?assertMatch([#message{payload = <<"hello11">>}], Msgs1 ++ Msgs2),
|
||||
emqtt:stop(ConnPub),
|
||||
ok.
|
||||
|
||||
get_mqueue(ConnPid) ->
|
||||
emqx_connection:info({channel, {session, mqueue}}, sys:get_state(ConnPid)).
|
||||
|
||||
%% No ack, QoS 2 subscriptions,
|
||||
%% client1 receives one message, send pubrec, then suspend
|
||||
%% client2 acts normal (auto_ack=true)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_bridge, [
|
||||
{description, "EMQX bridges"},
|
||||
{vsn, "0.1.23"},
|
||||
{vsn, "0.1.24"},
|
||||
{registered, [emqx_bridge_sup]},
|
||||
{mod, {emqx_bridge_app, []}},
|
||||
{applications, [
|
||||
|
|
|
@ -88,7 +88,9 @@
|
|||
T == sqlserver;
|
||||
T == pulsar_producer;
|
||||
T == oracle;
|
||||
T == iotdb
|
||||
T == iotdb;
|
||||
T == kinesis_producer;
|
||||
T == greptimedb
|
||||
).
|
||||
|
||||
-define(ROOT_KEY, bridges).
|
||||
|
|
|
@ -546,7 +546,12 @@ schema("/bridges_probe") ->
|
|||
?NO_CONTENT;
|
||||
{error, #{kind := validation_error} = Reason} ->
|
||||
?BAD_REQUEST('TEST_FAILED', map_to_json(Reason));
|
||||
{error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' ->
|
||||
{error, Reason0} when not is_tuple(Reason0); element(1, Reason0) =/= 'exit' ->
|
||||
Reason =
|
||||
case Reason0 of
|
||||
{unhealthy_target, Message} -> Message;
|
||||
_ -> Reason0
|
||||
end,
|
||||
?BAD_REQUEST('TEST_FAILED', Reason)
|
||||
end;
|
||||
BadRequest ->
|
||||
|
|
|
@ -374,6 +374,8 @@ parse_confs(<<"kafka">> = _Type, Name, Conf) ->
|
|||
Conf#{bridge_name => Name};
|
||||
parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) ->
|
||||
Conf#{bridge_name => Name};
|
||||
parse_confs(<<"kinesis_producer">> = _Type, Name, Conf) ->
|
||||
Conf#{bridge_name => Name};
|
||||
parse_confs(_Type, _Name, Conf) ->
|
||||
Conf.
|
||||
|
||||
|
|
|
@ -20,8 +20,7 @@ api_schemas(Method) ->
|
|||
%% We need to map the `type' field of a request (binary) to a
|
||||
%% bridge schema module.
|
||||
api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub">>, Method ++ "_producer"),
|
||||
%% TODO: un-hide for e5.2.0...
|
||||
%% api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub_consumer">>, Method ++ "_consumer"),
|
||||
api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub_consumer">>, Method ++ "_consumer"),
|
||||
api_ref(emqx_bridge_kafka, <<"kafka_consumer">>, Method ++ "_consumer"),
|
||||
%% TODO: rename this to `kafka_producer' after alias support is added
|
||||
%% to hocon; keeping this as just `kafka' for backwards compatibility.
|
||||
|
@ -50,7 +49,9 @@ api_schemas(Method) ->
|
|||
api_ref(emqx_bridge_pulsar, <<"pulsar_producer">>, Method ++ "_producer"),
|
||||
api_ref(emqx_bridge_oracle, <<"oracle">>, Method),
|
||||
api_ref(emqx_bridge_iotdb, <<"iotdb">>, Method),
|
||||
api_ref(emqx_bridge_rabbitmq, <<"rabbitmq">>, Method)
|
||||
api_ref(emqx_bridge_rabbitmq, <<"rabbitmq">>, Method),
|
||||
api_ref(emqx_bridge_kinesis, <<"kinesis_producer">>, Method ++ "_producer"),
|
||||
api_ref(emqx_bridge_greptimedb, <<"greptimedb">>, Method ++ "_grpc_v1")
|
||||
].
|
||||
|
||||
schema_modules() ->
|
||||
|
@ -75,7 +76,9 @@ schema_modules() ->
|
|||
emqx_bridge_pulsar,
|
||||
emqx_bridge_oracle,
|
||||
emqx_bridge_iotdb,
|
||||
emqx_bridge_rabbitmq
|
||||
emqx_bridge_rabbitmq,
|
||||
emqx_bridge_kinesis,
|
||||
emqx_bridge_greptimedb
|
||||
].
|
||||
|
||||
examples(Method) ->
|
||||
|
@ -120,7 +123,9 @@ resource_type(opents) -> emqx_bridge_opents_connector;
|
|||
resource_type(pulsar_producer) -> emqx_bridge_pulsar_impl_producer;
|
||||
resource_type(oracle) -> emqx_oracle;
|
||||
resource_type(iotdb) -> emqx_bridge_iotdb_impl;
|
||||
resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector.
|
||||
resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector;
|
||||
resource_type(kinesis_producer) -> emqx_bridge_kinesis_impl_producer;
|
||||
resource_type(greptimedb) -> emqx_bridge_greptimedb_connector.
|
||||
|
||||
fields(bridges) ->
|
||||
[
|
||||
|
@ -201,7 +206,9 @@ fields(bridges) ->
|
|||
] ++ kafka_structs() ++ pulsar_structs() ++ gcp_pubsub_structs() ++ mongodb_structs() ++
|
||||
influxdb_structs() ++
|
||||
redis_structs() ++
|
||||
pgsql_structs() ++ clickhouse_structs() ++ sqlserver_structs() ++ rabbitmq_structs().
|
||||
pgsql_structs() ++ clickhouse_structs() ++ sqlserver_structs() ++ rabbitmq_structs() ++
|
||||
kinesis_structs() ++
|
||||
greptimedb_structs().
|
||||
|
||||
mongodb_structs() ->
|
||||
[
|
||||
|
@ -265,7 +272,6 @@ gcp_pubsub_structs() ->
|
|||
hoconsc:map(name, ref(emqx_bridge_gcp_pubsub, "config_consumer")),
|
||||
#{
|
||||
desc => <<"EMQX Enterprise Config">>,
|
||||
importance => ?IMPORTANCE_HIDDEN,
|
||||
required => false
|
||||
}
|
||||
)}
|
||||
|
@ -287,6 +293,21 @@ influxdb_structs() ->
|
|||
]
|
||||
].
|
||||
|
||||
greptimedb_structs() ->
|
||||
[
|
||||
{Protocol,
|
||||
mk(
|
||||
hoconsc:map(name, ref(emqx_bridge_greptimedb, Protocol)),
|
||||
#{
|
||||
desc => <<"GreptimeDB Bridge Config">>,
|
||||
required => false
|
||||
}
|
||||
)}
|
||||
|| Protocol <- [
|
||||
greptimedb
|
||||
]
|
||||
].
|
||||
|
||||
redis_structs() ->
|
||||
[
|
||||
{Type,
|
||||
|
@ -367,6 +388,18 @@ rabbitmq_structs() ->
|
|||
)}
|
||||
].
|
||||
|
||||
kinesis_structs() ->
|
||||
[
|
||||
{kinesis_producer,
|
||||
mk(
|
||||
hoconsc:map(name, ref(emqx_bridge_kinesis, "config_producer")),
|
||||
#{
|
||||
desc => <<"Amazon Kinesis Producer Bridge Config">>,
|
||||
required => false
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
||||
api_ref(Module, Type, Method) ->
|
||||
{Type, ref(Module, Method)}.
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
%% -*- mode: erlang; -*-
|
||||
{erl_opts, [debug_info]}.
|
||||
{deps, [ {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-1"}}}
|
||||
{deps, [ {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-2"}}}
|
||||
, {emqx_connector, {path, "../../apps/emqx_connector"}}
|
||||
, {emqx_resource, {path, "../../apps/emqx_resource"}}
|
||||
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{application, emqx_bridge_gcp_pubsub, [
|
||||
{description, "EMQX Enterprise GCP Pub/Sub Bridge"},
|
||||
{vsn, "0.1.4"},
|
||||
{vsn, "0.1.5"},
|
||||
{registered, []},
|
||||
{applications, [
|
||||
kernel,
|
||||
|
|
|
@ -249,6 +249,8 @@ check_workers(InstanceId, Client) ->
|
|||
#{return_values => true}
|
||||
)
|
||||
of
|
||||
{ok, []} ->
|
||||
connecting;
|
||||
{ok, Values} ->
|
||||
AllOk = lists:all(fun(S) -> S =:= subscription_ok end, Values),
|
||||
case AllOk of
|
||||
|
|
|
@ -908,16 +908,15 @@ t_consume_ok(Config) ->
|
|||
?assertEqual(3, emqx_resource_metrics:received_get(ResourceId))
|
||||
),
|
||||
|
||||
%% FIXME: uncomment after API spec is un-hidden...
|
||||
%% %% Check that the bridge probe API doesn't leak atoms.
|
||||
%% ProbeRes0 = probe_bridge_api(Config),
|
||||
%% ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
|
||||
%% AtomsBefore = erlang:system_info(atom_count),
|
||||
%% %% Probe again; shouldn't have created more atoms.
|
||||
%% ProbeRes1 = probe_bridge_api(Config),
|
||||
%% ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
|
||||
%% AtomsAfter = erlang:system_info(atom_count),
|
||||
%% ?assertEqual(AtomsBefore, AtomsAfter),
|
||||
%% Check that the bridge probe API doesn't leak atoms.
|
||||
ProbeRes0 = probe_bridge_api(Config),
|
||||
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
|
||||
AtomsBefore = erlang:system_info(atom_count),
|
||||
%% Probe again; shouldn't have created more atoms.
|
||||
ProbeRes1 = probe_bridge_api(Config),
|
||||
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
|
||||
AtomsAfter = erlang:system_info(atom_count),
|
||||
?assertEqual(AtomsBefore, AtomsAfter),
|
||||
|
||||
assert_non_received_metrics(BridgeName),
|
||||
?block_until(
|
||||
|
@ -1010,11 +1009,31 @@ t_bridge_rule_action_source(Config) ->
|
|||
ok.
|
||||
|
||||
t_on_get_status(Config) ->
|
||||
ResourceId = resource_id(Config),
|
||||
emqx_bridge_testlib:t_on_get_status(Config, #{failure_status => connecting}),
|
||||
%% no workers alive
|
||||
?retry(
|
||||
_Interval0 = 200,
|
||||
_NAttempts0 = 20,
|
||||
?assertMatch({ok, connected}, emqx_resource_manager:health_check(ResourceId))
|
||||
),
|
||||
WorkerPids = get_pull_worker_pids(Config),
|
||||
emqx_utils:pmap(
|
||||
fun(Pid) ->
|
||||
Ref = monitor(process, Pid),
|
||||
exit(Pid, kill),
|
||||
receive
|
||||
{'DOWN', Ref, process, Pid, killed} ->
|
||||
ok
|
||||
end
|
||||
end,
|
||||
WorkerPids
|
||||
),
|
||||
?assertMatch({ok, connecting}, emqx_resource_manager:health_check(ResourceId)),
|
||||
ok.
|
||||
|
||||
t_create_via_http_api(_Config) ->
|
||||
ct:comment("FIXME: implement after API specs are un-hidden in e5.2.0..."),
|
||||
t_create_update_via_http_api(Config) ->
|
||||
emqx_bridge_testlib:t_create_via_http(Config),
|
||||
ok.
|
||||
|
||||
t_multiple_topic_mappings(Config) ->
|
||||
|
@ -1197,7 +1216,7 @@ t_nonexistent_topic(Config) ->
|
|||
emqx_resource_manager:health_check(ResourceId)
|
||||
),
|
||||
?assertMatch(
|
||||
{ok, _Group, #{error := "GCP PubSub topics are invalid" ++ _}},
|
||||
{ok, _Group, #{error := {unhealthy_target, "GCP PubSub topics are invalid" ++ _}}},
|
||||
emqx_resource_manager:lookup_cached(ResourceId)
|
||||
),
|
||||
%% now create the topic and restart the bridge
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
.rebar3
|
||||
_*
|
||||
.eunit
|
||||
*.o
|
||||
*.beam
|
||||
*.plt
|
||||
*.swp
|
||||
*.swo
|
||||
.erlang.cookie
|
||||
ebin
|
||||
log
|
||||
erl_crash.dump
|
||||
.rebar
|
||||
logs
|
||||
_build
|
||||
.idea
|
||||
*.iml
|
||||
rebar3.crashdump
|
||||
*~
|
|
@ -0,0 +1,94 @@
|
|||
Business Source License 1.1
|
||||
|
||||
Licensor: Hangzhou EMQ Technologies Co., Ltd.
|
||||
Licensed Work: EMQX Enterprise Edition
|
||||
The Licensed Work is (c) 2023
|
||||
Hangzhou EMQ Technologies Co., Ltd.
|
||||
Additional Use Grant: Students and educators are granted right to copy,
|
||||
modify, and create derivative work for research
|
||||
or education.
|
||||
Change Date: 2027-02-01
|
||||
Change License: Apache License, Version 2.0
|
||||
|
||||
For information about alternative licensing arrangements for the Software,
|
||||
please contact Licensor: https://www.emqx.com/en/contact
|
||||
|
||||
Notice
|
||||
|
||||
The Business Source License (this document, or the “License”) is not an Open
|
||||
Source license. However, the Licensed Work will eventually be made available
|
||||
under an Open Source License, as stated in this License.
|
||||
|
||||
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
|
||||
“Business Source License” is a trademark of MariaDB Corporation Ab.
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
Business Source License 1.1
|
||||
|
||||
Terms
|
||||
|
||||
The Licensor hereby grants you the right to copy, modify, create derivative
|
||||
works, redistribute, and make non-production use of the Licensed Work. The
|
||||
Licensor may make an Additional Use Grant, above, permitting limited
|
||||
production use.
|
||||
|
||||
Effective on the Change Date, or the fourth anniversary of the first publicly
|
||||
available distribution of a specific version of the Licensed Work under this
|
||||
License, whichever comes first, the Licensor hereby grants you rights under
|
||||
the terms of the Change License, and the rights granted in the paragraph
|
||||
above terminate.
|
||||
|
||||
If your use of the Licensed Work does not comply with the requirements
|
||||
currently in effect as described in this License, you must purchase a
|
||||
commercial license from the Licensor, its affiliated entities, or authorized
|
||||
resellers, or you must refrain from using the Licensed Work.
|
||||
|
||||
All copies of the original and modified Licensed Work, and derivative works
|
||||
of the Licensed Work, are subject to this License. This License applies
|
||||
separately for each version of the Licensed Work and the Change Date may vary
|
||||
for each version of the Licensed Work released by Licensor.
|
||||
|
||||
You must conspicuously display this License on each original or modified copy
|
||||
of the Licensed Work. If you receive the Licensed Work in original or
|
||||
modified form from a third party, the terms and conditions set forth in this
|
||||
License apply to your use of that work.
|
||||
|
||||
Any use of the Licensed Work in violation of this License will automatically
|
||||
terminate your rights under this License for the current and all other
|
||||
versions of the Licensed Work.
|
||||
|
||||
This License does not grant you any right in any trademark or logo of
|
||||
Licensor or its affiliates (provided that you may use a trademark or logo of
|
||||
Licensor as expressly required by this License).
|
||||
|
||||
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
|
||||
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
|
||||
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
|
||||
TITLE.
|
||||
|
||||
MariaDB hereby grants you permission to use this License’s text to license
|
||||
your works, and to refer to it using the trademark “Business Source License”,
|
||||
as long as you comply with the Covenants of Licensor below.
|
||||
|
||||
Covenants of Licensor
|
||||
|
||||
In consideration of the right to use this License’s text and the “Business
|
||||
Source License” name and trademark, Licensor covenants to MariaDB, and to all
|
||||
other recipients of the licensed work to be provided by Licensor:
|
||||
|
||||
1. To specify as the Change License the GPL Version 2.0 or any later version,
|
||||
or a license that is compatible with GPL Version 2.0 or a later version,
|
||||
where “compatible” means that software provided under the Change License can
|
||||
be included in a program with software provided under GPL Version 2.0 or a
|
||||
later version. Licensor may specify additional Change Licenses without
|
||||
limitation.
|
||||
|
||||
2. To either: (a) specify an additional grant of rights to use that does not
|
||||
impose any additional restriction on the right granted in this License, as
|
||||
the Additional Use Grant; or (b) insert the text “None”.
|
||||
|
||||
3. To specify a Change Date.
|
||||
|
||||
4. Not to modify this License in any other way.
|
|
@ -0,0 +1,27 @@
|
|||
# emqx_bridge_greptimedb
|
||||
This application houses the GreptimeDB data integration to EMQX.
|
||||
It provides the means to connect to GreptimeDB and publish messages to it.
|
||||
|
||||
It implements connection management and interaction without the need for a
|
||||
separate connector app, since it's not used for authentication and authorization
|
||||
applications.
|
||||
|
||||
## Docs
|
||||
|
||||
For more information about GreptimeDB, please refer to [official
|
||||
document](https://docs.greptime.com/).
|
||||
|
||||
## Configurations
|
||||
|
||||
Just like the InfluxDB data bridge but have some different parameters. Below are several important parameters:
|
||||
- `server`: The IPv4 or IPv6 address or the hostname to connect to.
|
||||
- `dbname`: The GreptimeDB database name.
|
||||
- `write_syntax`: Like the `write_syntax` in `InfluxDB` conf, it's the conf of InfluxDB line protocol to write data points. It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point, and placeholder supported.
|
||||
|
||||
|
||||
# Contributing - [Mandatory]
|
||||
Please see our [contributing.md](../../CONTRIBUTING.md).
|
||||
|
||||
# License
|
||||
|
||||
See [BSL](./BSL.txt).
|
|
@ -0,0 +1,2 @@
|
|||
toxiproxy
|
||||
greptimedb
|
|
@ -0,0 +1,12 @@
|
|||
{erl_opts, [
|
||||
debug_info
|
||||
]}.
|
||||
|
||||
{deps, [
|
||||
{emqx_connector, {path, "../../apps/emqx_connector"}},
|
||||
{emqx_resource, {path, "../../apps/emqx_resource"}},
|
||||
{emqx_bridge, {path, "../../apps/emqx_bridge"}},
|
||||
{greptimedb, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.2"}}}
|
||||
]}.
|
||||
{plugins, [rebar3_path_deps]}.
|
||||
{project_plugins, [erlfmt]}.
|
|
@ -0,0 +1,14 @@
|
|||
{application, emqx_bridge_greptimedb, [
|
||||
{description, "EMQX GreptimeDB Bridge"},
|
||||
{vsn, "0.1.0"},
|
||||
{registered, []},
|
||||
{applications, [
|
||||
kernel,
|
||||
stdlib,
|
||||
emqx_resource,
|
||||
greptimedb
|
||||
]},
|
||||
{env, []},
|
||||
{modules, []},
|
||||
{links, []}
|
||||
]}.
|
|
@ -0,0 +1,299 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
-module(emqx_bridge_greptimedb).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx_connector/include/emqx_connector.hrl").
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
|
||||
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||
|
||||
-export([
|
||||
conn_bridge_examples/1
|
||||
]).
|
||||
|
||||
-export([
|
||||
namespace/0,
|
||||
roots/0,
|
||||
fields/1,
|
||||
desc/1
|
||||
]).
|
||||
|
||||
-type write_syntax() :: list().
|
||||
-reflect_type([write_syntax/0]).
|
||||
-typerefl_from_string({write_syntax/0, ?MODULE, to_influx_lines}).
|
||||
-export([to_influx_lines/1]).
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% api
|
||||
|
||||
conn_bridge_examples(Method) ->
|
||||
[
|
||||
#{
|
||||
<<"greptimedb">> => #{
|
||||
summary => <<"Greptimedb HTTP API V2 Bridge">>,
|
||||
value => values("greptimedb", Method)
|
||||
}
|
||||
}
|
||||
].
|
||||
|
||||
values(Protocol, get) ->
|
||||
values(Protocol, post);
|
||||
values("greptimedb", post) ->
|
||||
SupportUint = <<"uint_value=${payload.uint_key}u,">>,
|
||||
TypeOpts = #{
|
||||
bucket => <<"example_bucket">>,
|
||||
org => <<"examlpe_org">>,
|
||||
token => <<"example_token">>,
|
||||
server => <<"127.0.0.1:4001">>
|
||||
},
|
||||
values(common, "greptimedb", SupportUint, TypeOpts);
|
||||
values(Protocol, put) ->
|
||||
values(Protocol, post).
|
||||
|
||||
values(common, Protocol, SupportUint, TypeOpts) ->
|
||||
CommonConfigs = #{
|
||||
type => list_to_atom(Protocol),
|
||||
name => <<"demo">>,
|
||||
enable => true,
|
||||
local_topic => <<"local/topic/#">>,
|
||||
write_syntax =>
|
||||
<<"${topic},clientid=${clientid}", " ", "payload=${payload},",
|
||||
"${clientid}_int_value=${payload.int_key}i,", SupportUint/binary,
|
||||
"bool=${payload.bool}">>,
|
||||
precision => ms,
|
||||
resource_opts => #{
|
||||
batch_size => 100,
|
||||
batch_time => <<"20ms">>
|
||||
},
|
||||
server => <<"127.0.0.1:4001">>,
|
||||
ssl => #{enable => false}
|
||||
},
|
||||
maps:merge(TypeOpts, CommonConfigs).
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% Hocon Schema Definitions
|
||||
namespace() -> "bridge_greptimedb".
|
||||
|
||||
roots() -> [].
|
||||
|
||||
fields("post_grpc_v1") ->
|
||||
method_fields(post, greptimedb);
|
||||
fields("put_grpc_v1") ->
|
||||
method_fields(put, greptimedb);
|
||||
fields("get_grpc_v1") ->
|
||||
method_fields(get, greptimedb);
|
||||
fields(Type) when
|
||||
Type == greptimedb
|
||||
->
|
||||
greptimedb_bridge_common_fields() ++
|
||||
connector_fields(Type).
|
||||
|
||||
method_fields(post, ConnectorType) ->
|
||||
greptimedb_bridge_common_fields() ++
|
||||
connector_fields(ConnectorType) ++
|
||||
type_name_fields(ConnectorType);
|
||||
method_fields(get, ConnectorType) ->
|
||||
greptimedb_bridge_common_fields() ++
|
||||
connector_fields(ConnectorType) ++
|
||||
type_name_fields(ConnectorType) ++
|
||||
emqx_bridge_schema:status_fields();
|
||||
method_fields(put, ConnectorType) ->
|
||||
greptimedb_bridge_common_fields() ++
|
||||
connector_fields(ConnectorType).
|
||||
|
||||
greptimedb_bridge_common_fields() ->
|
||||
emqx_bridge_schema:common_bridge_fields() ++
|
||||
[
|
||||
{local_topic, mk(binary(), #{desc => ?DESC("local_topic")})},
|
||||
{write_syntax, fun write_syntax/1}
|
||||
] ++
|
||||
emqx_resource_schema:fields("resource_opts").
|
||||
|
||||
connector_fields(Type) ->
|
||||
emqx_bridge_greptimedb_connector:fields(Type).
|
||||
|
||||
type_name_fields(Type) ->
|
||||
[
|
||||
{type, mk(Type, #{required => true, desc => ?DESC("desc_type")})},
|
||||
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}
|
||||
].
|
||||
|
||||
desc("config") ->
|
||||
?DESC("desc_config");
|
||||
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
|
||||
["Configuration for Greptimedb using `", string:to_upper(Method), "` method."];
|
||||
desc(greptimedb) ->
|
||||
?DESC(emqx_bridge_greptimedb_connector, "greptimedb");
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
||||
write_syntax(type) ->
|
||||
?MODULE:write_syntax();
|
||||
write_syntax(required) ->
|
||||
true;
|
||||
write_syntax(validator) ->
|
||||
[?NOT_EMPTY("the value of the field 'write_syntax' cannot be empty")];
|
||||
write_syntax(converter) ->
|
||||
fun to_influx_lines/1;
|
||||
write_syntax(desc) ->
|
||||
?DESC("write_syntax");
|
||||
write_syntax(format) ->
|
||||
<<"sql">>;
|
||||
write_syntax(_) ->
|
||||
undefined.
|
||||
|
||||
to_influx_lines(RawLines) ->
|
||||
try
|
||||
influx_lines(str(RawLines), [])
|
||||
catch
|
||||
_:Reason:Stacktrace ->
|
||||
Msg = lists:flatten(
|
||||
io_lib:format("Unable to parse Greptimedb line protocol: ~p", [RawLines])
|
||||
),
|
||||
?SLOG(error, #{msg => Msg, error_reason => Reason, stacktrace => Stacktrace}),
|
||||
throw(Msg)
|
||||
end.
|
||||
|
||||
-define(MEASUREMENT_ESC_CHARS, [$,, $\s]).
|
||||
-define(TAG_FIELD_KEY_ESC_CHARS, [$,, $=, $\s]).
|
||||
-define(FIELD_VAL_ESC_CHARS, [$", $\\]).
|
||||
% Common separator for both tags and fields
|
||||
-define(SEP, $\s).
|
||||
-define(MEASUREMENT_TAG_SEP, $,).
|
||||
-define(KEY_SEP, $=).
|
||||
-define(VAL_SEP, $,).
|
||||
-define(NON_EMPTY, [_ | _]).
|
||||
|
||||
influx_lines([] = _RawLines, Acc) ->
|
||||
?NON_EMPTY = lists:reverse(Acc);
|
||||
influx_lines(RawLines, Acc) ->
|
||||
{Acc1, RawLines1} = influx_line(string:trim(RawLines, leading, "\s\n"), Acc),
|
||||
influx_lines(RawLines1, Acc1).
|
||||
|
||||
influx_line([], Acc) ->
|
||||
{Acc, []};
|
||||
influx_line(Line, Acc) ->
|
||||
{?NON_EMPTY = Measurement, Line1} = measurement(Line),
|
||||
{Tags, Line2} = tags(Line1),
|
||||
{?NON_EMPTY = Fields, Line3} = influx_fields(Line2),
|
||||
{Timestamp, Line4} = timestamp(Line3),
|
||||
{
|
||||
[
|
||||
#{
|
||||
measurement => Measurement,
|
||||
tags => Tags,
|
||||
fields => Fields,
|
||||
timestamp => Timestamp
|
||||
}
|
||||
| Acc
|
||||
],
|
||||
Line4
|
||||
}.
|
||||
|
||||
measurement(Line) ->
|
||||
unescape(?MEASUREMENT_ESC_CHARS, [?MEASUREMENT_TAG_SEP, ?SEP], Line, []).
|
||||
|
||||
tags([?MEASUREMENT_TAG_SEP | Line]) ->
|
||||
tags1(Line, []);
|
||||
tags(Line) ->
|
||||
{[], Line}.
|
||||
|
||||
%% Empty line is invalid as fields are required after tags,
|
||||
%% need to break recursion here and fail later on parsing fields
|
||||
tags1([] = Line, Acc) ->
|
||||
{lists:reverse(Acc), Line};
|
||||
%% Matching non empty Acc treats lines like "m, field=field_val" invalid
|
||||
tags1([?SEP | _] = Line, ?NON_EMPTY = Acc) ->
|
||||
{lists:reverse(Acc), Line};
|
||||
tags1(Line, Acc) ->
|
||||
{Tag, Line1} = tag(Line),
|
||||
tags1(Line1, [Tag | Acc]).
|
||||
|
||||
tag(Line) ->
|
||||
{?NON_EMPTY = Key, Line1} = key(Line),
|
||||
{?NON_EMPTY = Val, Line2} = tag_val(Line1),
|
||||
{{Key, Val}, Line2}.
|
||||
|
||||
tag_val(Line) ->
|
||||
{Val, Line1} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?VAL_SEP, ?SEP], Line, []),
|
||||
{Val, strip_l(Line1, ?VAL_SEP)}.
|
||||
|
||||
influx_fields([?SEP | Line]) ->
|
||||
fields1(string:trim(Line, leading, "\s"), []).
|
||||
|
||||
%% Timestamp is optional, so fields may be at the very end of the line
|
||||
fields1([Ch | _] = Line, Acc) when Ch =:= ?SEP; Ch =:= $\n ->
|
||||
{lists:reverse(Acc), Line};
|
||||
fields1([] = Line, Acc) ->
|
||||
{lists:reverse(Acc), Line};
|
||||
fields1(Line, Acc) ->
|
||||
{Field, Line1} = field(Line),
|
||||
fields1(Line1, [Field | Acc]).
|
||||
|
||||
field(Line) ->
|
||||
{?NON_EMPTY = Key, Line1} = key(Line),
|
||||
{Val, Line2} = field_val(Line1),
|
||||
{{Key, Val}, Line2}.
|
||||
|
||||
field_val([$" | Line]) ->
|
||||
{Val, [$" | Line1]} = unescape(?FIELD_VAL_ESC_CHARS, [$"], Line, []),
|
||||
%% Quoted val can be empty
|
||||
{Val, strip_l(Line1, ?VAL_SEP)};
|
||||
field_val(Line) ->
|
||||
%% Unquoted value should not be un-escaped according to Greptimedb protocol,
|
||||
%% as it can only hold float, integer, uinteger or boolean value.
|
||||
%% However, as templates are possible, un-escaping is applied here,
|
||||
%% which also helps to detect some invalid lines, e.g.: "m,tag=1 field= ${timestamp}"
|
||||
{Val, Line1} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?VAL_SEP, ?SEP, $\n], Line, []),
|
||||
{?NON_EMPTY = Val, strip_l(Line1, ?VAL_SEP)}.
|
||||
|
||||
timestamp([?SEP | Line]) ->
|
||||
Line1 = string:trim(Line, leading, "\s"),
|
||||
%% Similarly to unquoted field value, un-escape a timestamp to validate and handle
|
||||
%% potentially escaped characters in a template
|
||||
{T, Line2} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?SEP, $\n], Line1, []),
|
||||
{timestamp1(T), Line2};
|
||||
timestamp(Line) ->
|
||||
{undefined, Line}.
|
||||
|
||||
timestamp1(?NON_EMPTY = Ts) -> Ts;
|
||||
timestamp1(_Ts) -> undefined.
|
||||
|
||||
%% Common for both tag and field keys
|
||||
key(Line) ->
|
||||
{Key, Line1} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?KEY_SEP], Line, []),
|
||||
{Key, strip_l(Line1, ?KEY_SEP)}.
|
||||
|
||||
%% Only strip a character between pairs, don't strip it(and let it fail)
|
||||
%% if the char to be stripped is at the end, e.g.: m,tag=val, field=val
|
||||
strip_l([Ch, Ch1 | Str], Ch) when Ch1 =/= ?SEP ->
|
||||
[Ch1 | Str];
|
||||
strip_l(Str, _Ch) ->
|
||||
Str.
|
||||
|
||||
unescape(EscapeChars, SepChars, [$\\, Char | T], Acc) ->
|
||||
ShouldEscapeBackslash = lists:member($\\, EscapeChars),
|
||||
Acc1 =
|
||||
case lists:member(Char, EscapeChars) of
|
||||
true -> [Char | Acc];
|
||||
false when not ShouldEscapeBackslash -> [Char, $\\ | Acc]
|
||||
end,
|
||||
unescape(EscapeChars, SepChars, T, Acc1);
|
||||
unescape(EscapeChars, SepChars, [Char | T] = L, Acc) ->
|
||||
IsEscapeChar = lists:member(Char, EscapeChars),
|
||||
case lists:member(Char, SepChars) of
|
||||
true -> {lists:reverse(Acc), L};
|
||||
false when not IsEscapeChar -> unescape(EscapeChars, SepChars, T, [Char | Acc])
|
||||
end;
|
||||
unescape(_EscapeChars, _SepChars, [] = L, Acc) ->
|
||||
{lists:reverse(Acc), L}.
|
||||
|
||||
str(A) when is_atom(A) ->
|
||||
atom_to_list(A);
|
||||
str(B) when is_binary(B) ->
|
||||
binary_to_list(B);
|
||||
str(S) when is_list(S) ->
|
||||
S.
|
|
@ -0,0 +1,636 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
-module(emqx_bridge_greptimedb_connector).
|
||||
|
||||
-include_lib("emqx_connector/include/emqx_connector.hrl").
|
||||
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||
|
||||
-behaviour(emqx_resource).
|
||||
|
||||
%% callbacks of behaviour emqx_resource
|
||||
-export([
|
||||
callback_mode/0,
|
||||
on_start/2,
|
||||
on_stop/2,
|
||||
on_query/3,
|
||||
on_batch_query/3,
|
||||
on_get_status/2
|
||||
]).
|
||||
|
||||
-export([
|
||||
roots/0,
|
||||
namespace/0,
|
||||
fields/1,
|
||||
desc/1
|
||||
]).
|
||||
|
||||
%% only for test
|
||||
-ifdef(TEST).
|
||||
-export([is_unrecoverable_error/1]).
|
||||
-endif.
|
||||
|
||||
-type ts_precision() :: ns | us | ms | s.
|
||||
|
||||
%% Allocatable resources
|
||||
-define(greptime_client, greptime_client).
|
||||
|
||||
-define(GREPTIMEDB_DEFAULT_PORT, 4001).
|
||||
|
||||
-define(DEFAULT_DB, <<"public">>).
|
||||
|
||||
-define(GREPTIMEDB_HOST_OPTIONS, #{
|
||||
default_port => ?GREPTIMEDB_DEFAULT_PORT
|
||||
}).
|
||||
|
||||
-define(DEFAULT_TIMESTAMP_TMPL, "${timestamp}").
|
||||
|
||||
-define(AUTO_RECONNECT_S, 1).
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% resource callback
|
||||
callback_mode() -> always_sync.
|
||||
|
||||
on_start(InstId, Config) ->
|
||||
%% InstID as pool would be handled by greptimedb client
|
||||
%% so there is no need to allocate pool_name here
|
||||
%% See: greptimedb:start_client/1
|
||||
start_client(InstId, Config).
|
||||
|
||||
on_stop(InstId, _State) ->
|
||||
case emqx_resource:get_allocated_resources(InstId) of
|
||||
#{?greptime_client := Client} ->
|
||||
greptimedb:stop_client(Client);
|
||||
_ ->
|
||||
ok
|
||||
end.
|
||||
|
||||
on_query(InstId, {send_message, Data}, _State = #{write_syntax := SyntaxLines, client := Client}) ->
|
||||
case data_to_points(Data, SyntaxLines) of
|
||||
{ok, Points} ->
|
||||
?tp(
|
||||
greptimedb_connector_send_query,
|
||||
#{points => Points, batch => false, mode => sync}
|
||||
),
|
||||
do_query(InstId, Client, Points);
|
||||
{error, ErrorPoints} ->
|
||||
?tp(
|
||||
greptimedb_connector_send_query_error,
|
||||
#{batch => false, mode => sync, error => ErrorPoints}
|
||||
),
|
||||
log_error_points(InstId, ErrorPoints),
|
||||
{error, ErrorPoints}
|
||||
end.
|
||||
|
||||
%% Once a Batched Data trans to points failed.
|
||||
%% This batch query failed
|
||||
on_batch_query(InstId, BatchData, _State = #{write_syntax := SyntaxLines, client := Client}) ->
|
||||
case parse_batch_data(InstId, BatchData, SyntaxLines) of
|
||||
{ok, Points} ->
|
||||
?tp(
|
||||
greptimedb_connector_send_query,
|
||||
#{points => Points, batch => true, mode => sync}
|
||||
),
|
||||
do_query(InstId, Client, Points);
|
||||
{error, Reason} ->
|
||||
?tp(
|
||||
greptimedb_connector_send_query_error,
|
||||
#{batch => true, mode => sync, error => Reason}
|
||||
),
|
||||
{error, {unrecoverable_error, Reason}}
|
||||
end.
|
||||
|
||||
on_get_status(_InstId, #{client := Client}) ->
|
||||
case greptimedb:is_alive(Client) of
|
||||
true ->
|
||||
connected;
|
||||
false ->
|
||||
disconnected
|
||||
end.
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% schema
|
||||
namespace() -> connector_greptimedb.
|
||||
|
||||
roots() ->
|
||||
[
|
||||
{config, #{
|
||||
type => hoconsc:union(
|
||||
[
|
||||
hoconsc:ref(?MODULE, greptimedb)
|
||||
]
|
||||
)
|
||||
}}
|
||||
].
|
||||
|
||||
fields(common) ->
|
||||
[
|
||||
{server, server()},
|
||||
{precision,
|
||||
%% The greptimedb only supports these 4 precision
|
||||
mk(enum([ns, us, ms, s]), #{
|
||||
required => false, default => ms, desc => ?DESC("precision")
|
||||
})}
|
||||
];
|
||||
fields(greptimedb) ->
|
||||
fields(common) ++
|
||||
[
|
||||
{dbname, mk(binary(), #{required => true, desc => ?DESC("dbname")})},
|
||||
{username, mk(binary(), #{desc => ?DESC("username")})},
|
||||
{password,
|
||||
mk(binary(), #{
|
||||
desc => ?DESC("password"),
|
||||
format => <<"password">>,
|
||||
sensitive => true,
|
||||
converter => fun emqx_schema:password_converter/2
|
||||
})}
|
||||
] ++ emqx_connector_schema_lib:ssl_fields().
|
||||
|
||||
server() ->
|
||||
Meta = #{
|
||||
required => false,
|
||||
default => <<"127.0.0.1:4001">>,
|
||||
desc => ?DESC("server"),
|
||||
converter => fun convert_server/2
|
||||
},
|
||||
emqx_schema:servers_sc(Meta, ?GREPTIMEDB_HOST_OPTIONS).
|
||||
|
||||
desc(common) ->
|
||||
?DESC("common");
|
||||
desc(greptimedb) ->
|
||||
?DESC("greptimedb").
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% internal functions
|
||||
|
||||
start_client(InstId, Config) ->
|
||||
ClientConfig = client_config(InstId, Config),
|
||||
?SLOG(info, #{
|
||||
msg => "starting greptimedb connector",
|
||||
connector => InstId,
|
||||
config => emqx_utils:redact(Config),
|
||||
client_config => emqx_utils:redact(ClientConfig)
|
||||
}),
|
||||
try do_start_client(InstId, ClientConfig, Config) of
|
||||
Res = {ok, #{client := Client}} ->
|
||||
ok = emqx_resource:allocate_resource(InstId, ?greptime_client, Client),
|
||||
Res;
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
catch
|
||||
E:R:S ->
|
||||
?tp(greptimedb_connector_start_exception, #{error => {E, R}}),
|
||||
?SLOG(warning, #{
|
||||
msg => "start greptimedb connector error",
|
||||
connector => InstId,
|
||||
error => E,
|
||||
reason => emqx_utils:redact(R),
|
||||
stack => emqx_utils:redact(S)
|
||||
}),
|
||||
{error, R}
|
||||
end.
|
||||
|
||||
do_start_client(
|
||||
InstId,
|
||||
ClientConfig,
|
||||
Config = #{write_syntax := Lines}
|
||||
) ->
|
||||
Precision = maps:get(precision, Config, ms),
|
||||
case greptimedb:start_client(ClientConfig) of
|
||||
{ok, Client} ->
|
||||
case greptimedb:is_alive(Client, true) of
|
||||
true ->
|
||||
State = #{
|
||||
client => Client,
|
||||
dbname => proplists:get_value(dbname, ClientConfig, ?DEFAULT_DB),
|
||||
write_syntax => to_config(Lines, Precision)
|
||||
},
|
||||
?SLOG(info, #{
|
||||
msg => "starting greptimedb connector success",
|
||||
connector => InstId,
|
||||
client => redact_auth(Client),
|
||||
state => redact_auth(State)
|
||||
}),
|
||||
{ok, State};
|
||||
{false, Reason} ->
|
||||
?tp(greptimedb_connector_start_failed, #{
|
||||
error => greptimedb_client_not_alive, reason => Reason
|
||||
}),
|
||||
?SLOG(warning, #{
|
||||
msg => "failed_to_start_greptimedb_connector",
|
||||
connector => InstId,
|
||||
client => redact_auth(Client),
|
||||
reason => Reason
|
||||
}),
|
||||
%% no leak
|
||||
_ = greptimedb:stop_client(Client),
|
||||
{error, greptimedb_client_not_alive}
|
||||
end;
|
||||
{error, {already_started, Client0}} ->
|
||||
?tp(greptimedb_connector_start_already_started, #{}),
|
||||
?SLOG(info, #{
|
||||
msg => "restarting greptimedb connector, found already started client",
|
||||
connector => InstId,
|
||||
old_client => redact_auth(Client0)
|
||||
}),
|
||||
_ = greptimedb:stop_client(Client0),
|
||||
do_start_client(InstId, ClientConfig, Config);
|
||||
{error, Reason} ->
|
||||
?tp(greptimedb_connector_start_failed, #{error => Reason}),
|
||||
?SLOG(warning, #{
|
||||
msg => "failed_to_start_greptimedb_connector",
|
||||
connector => InstId,
|
||||
reason => Reason
|
||||
}),
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
client_config(
|
||||
InstId,
|
||||
Config = #{
|
||||
server := Server
|
||||
}
|
||||
) ->
|
||||
#{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?GREPTIMEDB_HOST_OPTIONS),
|
||||
[
|
||||
{endpoints, [{http, str(Host), Port}]},
|
||||
{pool_size, erlang:system_info(schedulers)},
|
||||
{pool, InstId},
|
||||
{pool_type, random},
|
||||
{auto_reconnect, ?AUTO_RECONNECT_S},
|
||||
{timeunit, maps:get(precision, Config, ms)}
|
||||
] ++ protocol_config(Config).
|
||||
|
||||
protocol_config(
|
||||
#{
|
||||
dbname := DbName,
|
||||
ssl := SSL
|
||||
} = Config
|
||||
) ->
|
||||
[
|
||||
{dbname, str(DbName)}
|
||||
] ++ auth(Config) ++
|
||||
ssl_config(SSL).
|
||||
|
||||
ssl_config(#{enable := false}) ->
|
||||
[
|
||||
{https_enabled, false}
|
||||
];
|
||||
ssl_config(SSL = #{enable := true}) ->
|
||||
[
|
||||
{https_enabled, true},
|
||||
{transport, ssl},
|
||||
{transport_opts, emqx_tls_lib:to_client_opts(SSL)}
|
||||
].
|
||||
|
||||
auth(#{username := Username, password := Password}) ->
|
||||
[
|
||||
{auth, {basic, #{username => str(Username), password => str(Password)}}}
|
||||
];
|
||||
auth(_) ->
|
||||
[].
|
||||
|
||||
redact_auth(Term) ->
|
||||
emqx_utils:redact(Term, fun is_auth_key/1).
|
||||
|
||||
is_auth_key(Key) when is_binary(Key) ->
|
||||
string:equal("authorization", Key, true);
|
||||
is_auth_key(_) ->
|
||||
false.
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% Query
|
||||
do_query(InstId, Client, Points) ->
|
||||
case greptimedb:write_batch(Client, Points) of
|
||||
{ok, #{response := {affected_rows, #{value := Rows}}}} ->
|
||||
?SLOG(debug, #{
|
||||
msg => "greptimedb write point success",
|
||||
connector => InstId,
|
||||
points => Points
|
||||
}),
|
||||
{ok, {affected_rows, Rows}};
|
||||
{error, {unauth, _, _}} ->
|
||||
?tp(greptimedb_connector_do_query_failure, #{error => <<"authorization failure">>}),
|
||||
?SLOG(error, #{
|
||||
msg => "greptimedb_authorization_failed",
|
||||
client => redact_auth(Client),
|
||||
connector => InstId
|
||||
}),
|
||||
{error, {unrecoverable_error, <<"authorization failure">>}};
|
||||
{error, Reason} = Err ->
|
||||
?tp(greptimedb_connector_do_query_failure, #{error => Reason}),
|
||||
?SLOG(error, #{
|
||||
msg => "greptimedb write point failed",
|
||||
connector => InstId,
|
||||
reason => Reason
|
||||
}),
|
||||
case is_unrecoverable_error(Err) of
|
||||
true ->
|
||||
{error, {unrecoverable_error, Reason}};
|
||||
false ->
|
||||
{error, {recoverable_error, Reason}}
|
||||
end
|
||||
end.
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% Tags & Fields Config Trans
|
||||
|
||||
to_config(Lines, Precision) ->
|
||||
to_config(Lines, [], Precision).
|
||||
|
||||
to_config([], Acc, _Precision) ->
|
||||
lists:reverse(Acc);
|
||||
to_config([Item0 | Rest], Acc, Precision) ->
|
||||
Ts0 = maps:get(timestamp, Item0, ?DEFAULT_TIMESTAMP_TMPL),
|
||||
{Ts, FromPrecision, ToPrecision} = preproc_tmpl_timestamp(Ts0, Precision),
|
||||
Item = #{
|
||||
measurement => emqx_placeholder:preproc_tmpl(maps:get(measurement, Item0)),
|
||||
timestamp => Ts,
|
||||
precision => {FromPrecision, ToPrecision},
|
||||
tags => to_kv_config(maps:get(tags, Item0)),
|
||||
fields => to_kv_config(maps:get(fields, Item0))
|
||||
},
|
||||
to_config(Rest, [Item | Acc], Precision).
|
||||
|
||||
%% pre-process the timestamp template
|
||||
%% returns a tuple of three elements:
|
||||
%% 1. The timestamp template itself.
|
||||
%% 2. The source timestamp precision (ms if the template ${timestamp} is used).
|
||||
%% 3. The target timestamp precision (configured for the client).
|
||||
preproc_tmpl_timestamp(undefined, Precision) ->
|
||||
%% not configured, we default it to the message timestamp
|
||||
preproc_tmpl_timestamp(?DEFAULT_TIMESTAMP_TMPL, Precision);
|
||||
preproc_tmpl_timestamp(Ts, Precision) when is_integer(Ts) ->
|
||||
%% a const value is used which is very much unusual, but we have to add a special handling
|
||||
{Ts, Precision, Precision};
|
||||
preproc_tmpl_timestamp(Ts, Precision) when is_list(Ts) ->
|
||||
preproc_tmpl_timestamp(iolist_to_binary(Ts), Precision);
|
||||
preproc_tmpl_timestamp(<<?DEFAULT_TIMESTAMP_TMPL>> = Ts, Precision) ->
|
||||
{emqx_placeholder:preproc_tmpl(Ts), ms, Precision};
|
||||
preproc_tmpl_timestamp(Ts, Precision) when is_binary(Ts) ->
|
||||
%% a placehold is in use. e.g. ${payload.my_timestamp}
|
||||
%% we can only hope it the value will be of the same precision in the configs
|
||||
{emqx_placeholder:preproc_tmpl(Ts), Precision, Precision}.
|
||||
|
||||
to_kv_config(KVfields) ->
|
||||
lists:foldl(
|
||||
fun({K, V}, Acc) -> to_maps_config(K, V, Acc) end,
|
||||
#{},
|
||||
KVfields
|
||||
).
|
||||
|
||||
to_maps_config(K, V, Res) ->
|
||||
NK = emqx_placeholder:preproc_tmpl(bin(K)),
|
||||
NV = emqx_placeholder:preproc_tmpl(bin(V)),
|
||||
Res#{NK => NV}.
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% Tags & Fields Data Trans
|
||||
parse_batch_data(InstId, BatchData, SyntaxLines) ->
|
||||
{Points, Errors} = lists:foldl(
|
||||
fun({send_message, Data}, {ListOfPoints, ErrAccIn}) ->
|
||||
case data_to_points(Data, SyntaxLines) of
|
||||
{ok, Points} ->
|
||||
{[Points | ListOfPoints], ErrAccIn};
|
||||
{error, ErrorPoints} ->
|
||||
log_error_points(InstId, ErrorPoints),
|
||||
{ListOfPoints, ErrAccIn + 1}
|
||||
end
|
||||
end,
|
||||
{[], 0},
|
||||
BatchData
|
||||
),
|
||||
case Errors of
|
||||
0 ->
|
||||
{ok, lists:flatten(Points)};
|
||||
_ ->
|
||||
?SLOG(error, #{
|
||||
msg => io_lib:format("Greptimedb trans point failed, count: ~p", [Errors]),
|
||||
connector => InstId,
|
||||
reason => points_trans_failed
|
||||
}),
|
||||
{error, points_trans_failed}
|
||||
end.
|
||||
|
||||
-spec data_to_points(map(), [
|
||||
#{
|
||||
fields := [{binary(), binary()}],
|
||||
measurement := binary(),
|
||||
tags := [{binary(), binary()}],
|
||||
timestamp := emqx_placeholder:tmpl_token() | integer(),
|
||||
precision := {From :: ts_precision(), To :: ts_precision()}
|
||||
}
|
||||
]) -> {ok, [map()]} | {error, term()}.
|
||||
data_to_points(Data, SyntaxLines) ->
|
||||
lines_to_points(Data, SyntaxLines, [], []).
|
||||
|
||||
%% When converting multiple rows data into Greptimedb Line Protocol, they are considered to be strongly correlated.
|
||||
%% And once a row fails to convert, all of them are considered to have failed.
|
||||
lines_to_points(_, [], Points, ErrorPoints) ->
|
||||
case ErrorPoints of
|
||||
[] ->
|
||||
{ok, Points};
|
||||
_ ->
|
||||
%% ignore trans succeeded points
|
||||
{error, ErrorPoints}
|
||||
end;
|
||||
lines_to_points(Data, [#{timestamp := Ts} = Item | Rest], ResultPointsAcc, ErrorPointsAcc) when
|
||||
is_list(Ts)
|
||||
->
|
||||
TransOptions = #{return => rawlist, var_trans => fun data_filter/1},
|
||||
case parse_timestamp(emqx_placeholder:proc_tmpl(Ts, Data, TransOptions)) of
|
||||
{ok, TsInt} ->
|
||||
Item1 = Item#{timestamp => TsInt},
|
||||
continue_lines_to_points(Data, Item1, Rest, ResultPointsAcc, ErrorPointsAcc);
|
||||
{error, BadTs} ->
|
||||
lines_to_points(Data, Rest, ResultPointsAcc, [
|
||||
{error, {bad_timestamp, BadTs}} | ErrorPointsAcc
|
||||
])
|
||||
end;
|
||||
lines_to_points(Data, [#{timestamp := Ts} = Item | Rest], ResultPointsAcc, ErrorPointsAcc) when
|
||||
is_integer(Ts)
|
||||
->
|
||||
continue_lines_to_points(Data, Item, Rest, ResultPointsAcc, ErrorPointsAcc).
|
||||
|
||||
parse_timestamp([TsInt]) when is_integer(TsInt) ->
|
||||
{ok, TsInt};
|
||||
parse_timestamp([TsBin]) ->
|
||||
try
|
||||
{ok, binary_to_integer(TsBin)}
|
||||
catch
|
||||
_:_ ->
|
||||
{error, TsBin}
|
||||
end.
|
||||
|
||||
continue_lines_to_points(Data, Item, Rest, ResultPointsAcc, ErrorPointsAcc) ->
|
||||
case line_to_point(Data, Item) of
|
||||
{_, [#{fields := Fields}]} when map_size(Fields) =:= 0 ->
|
||||
%% greptimedb client doesn't like empty field maps...
|
||||
ErrorPointsAcc1 = [{error, no_fields} | ErrorPointsAcc],
|
||||
lines_to_points(Data, Rest, ResultPointsAcc, ErrorPointsAcc1);
|
||||
Point ->
|
||||
lines_to_points(Data, Rest, [Point | ResultPointsAcc], ErrorPointsAcc)
|
||||
end.
|
||||
|
||||
line_to_point(
|
||||
Data,
|
||||
#{
|
||||
measurement := Measurement,
|
||||
tags := Tags,
|
||||
fields := Fields,
|
||||
timestamp := Ts,
|
||||
precision := Precision
|
||||
} = Item
|
||||
) ->
|
||||
{_, EncodedTags} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Tags),
|
||||
{_, EncodedFields} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Fields),
|
||||
TableName = emqx_placeholder:proc_tmpl(Measurement, Data),
|
||||
{TableName, [
|
||||
maps:without([precision, measurement], Item#{
|
||||
tags => EncodedTags,
|
||||
fields => EncodedFields,
|
||||
timestamp => maybe_convert_time_unit(Ts, Precision)
|
||||
})
|
||||
]}.
|
||||
|
||||
maybe_convert_time_unit(Ts, {FromPrecision, ToPrecision}) ->
|
||||
erlang:convert_time_unit(Ts, time_unit(FromPrecision), time_unit(ToPrecision)).
|
||||
|
||||
time_unit(s) -> second;
|
||||
time_unit(ms) -> millisecond;
|
||||
time_unit(us) -> microsecond;
|
||||
time_unit(ns) -> nanosecond.
|
||||
|
||||
maps_config_to_data(K, V, {Data, Res}) ->
|
||||
KTransOptions = #{return => rawlist, var_trans => fun key_filter/1},
|
||||
VTransOptions = #{return => rawlist, var_trans => fun data_filter/1},
|
||||
NK0 = emqx_placeholder:proc_tmpl(K, Data, KTransOptions),
|
||||
NV = emqx_placeholder:proc_tmpl(V, Data, VTransOptions),
|
||||
case {NK0, NV} of
|
||||
{[undefined], _} ->
|
||||
{Data, Res};
|
||||
%% undefined value in normal format [undefined] or int/uint format [undefined, <<"i">>]
|
||||
{_, [undefined | _]} ->
|
||||
{Data, Res};
|
||||
_ ->
|
||||
NK = list_to_binary(NK0),
|
||||
{Data, Res#{NK => value_type(NV)}}
|
||||
end.
|
||||
|
||||
value_type([Int, <<"i">>]) when
|
||||
is_integer(Int)
|
||||
->
|
||||
greptimedb_values:int64_value(Int);
|
||||
value_type([UInt, <<"u">>]) when
|
||||
is_integer(UInt)
|
||||
->
|
||||
greptimedb_values:uint64_value(UInt);
|
||||
value_type([<<"t">>]) ->
|
||||
greptimedb_values:boolean_value(true);
|
||||
value_type([<<"T">>]) ->
|
||||
greptimedb_values:boolean_value(true);
|
||||
value_type([true]) ->
|
||||
greptimedb_values:boolean_value(true);
|
||||
value_type([<<"TRUE">>]) ->
|
||||
greptimedb_values:boolean_value(true);
|
||||
value_type([<<"True">>]) ->
|
||||
greptimedb_values:boolean_value(true);
|
||||
value_type([<<"f">>]) ->
|
||||
greptimedb_values:boolean_value(false);
|
||||
value_type([<<"F">>]) ->
|
||||
greptimedb_values:boolean_value(false);
|
||||
value_type([false]) ->
|
||||
greptimedb_values:boolean_value(false);
|
||||
value_type([<<"FALSE">>]) ->
|
||||
greptimedb_values:boolean_value(false);
|
||||
value_type([<<"False">>]) ->
|
||||
greptimedb_values:boolean_value(false);
|
||||
value_type([Float]) when is_float(Float) ->
|
||||
Float;
|
||||
value_type(Val) ->
|
||||
#{values => #{string_values => Val}, datatype => 'STRING'}.
|
||||
|
||||
key_filter(undefined) -> undefined;
|
||||
key_filter(Value) -> emqx_utils_conv:bin(Value).
|
||||
|
||||
data_filter(undefined) -> undefined;
|
||||
data_filter(Int) when is_integer(Int) -> Int;
|
||||
data_filter(Number) when is_number(Number) -> Number;
|
||||
data_filter(Bool) when is_boolean(Bool) -> Bool;
|
||||
data_filter(Data) -> bin(Data).
|
||||
|
||||
bin(Data) -> emqx_utils_conv:bin(Data).
|
||||
|
||||
%% helper funcs
|
||||
log_error_points(InstId, Errs) ->
|
||||
lists:foreach(
|
||||
fun({error, Reason}) ->
|
||||
?SLOG(error, #{
|
||||
msg => "greptimedb trans point failed",
|
||||
connector => InstId,
|
||||
reason => Reason
|
||||
})
|
||||
end,
|
||||
Errs
|
||||
).
|
||||
|
||||
convert_server(<<"http://", Server/binary>>, HoconOpts) ->
|
||||
convert_server(Server, HoconOpts);
|
||||
convert_server(<<"https://", Server/binary>>, HoconOpts) ->
|
||||
convert_server(Server, HoconOpts);
|
||||
convert_server(Server, HoconOpts) ->
|
||||
emqx_schema:convert_servers(Server, HoconOpts).
|
||||
|
||||
str(A) when is_atom(A) ->
|
||||
atom_to_list(A);
|
||||
str(B) when is_binary(B) ->
|
||||
binary_to_list(B);
|
||||
str(S) when is_list(S) ->
|
||||
S.
|
||||
|
||||
is_unrecoverable_error({error, {unrecoverable_error, _}}) ->
|
||||
true;
|
||||
is_unrecoverable_error(_) ->
|
||||
false.
|
||||
|
||||
%%===================================================================
|
||||
%% eunit tests
|
||||
%%===================================================================
|
||||
|
||||
-ifdef(TEST).
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
is_auth_key_test_() ->
|
||||
[
|
||||
?_assert(is_auth_key(<<"Authorization">>)),
|
||||
?_assertNot(is_auth_key(<<"Something">>)),
|
||||
?_assertNot(is_auth_key(89))
|
||||
].
|
||||
|
||||
%% for coverage
|
||||
desc_test_() ->
|
||||
[
|
||||
?_assertMatch(
|
||||
{desc, _, _},
|
||||
desc(common)
|
||||
),
|
||||
?_assertMatch(
|
||||
{desc, _, _},
|
||||
desc(greptimedb)
|
||||
),
|
||||
?_assertMatch(
|
||||
{desc, _, _},
|
||||
hocon_schema:field_schema(server(), desc)
|
||||
),
|
||||
?_assertMatch(
|
||||
connector_greptimedb,
|
||||
namespace()
|
||||
)
|
||||
].
|
||||
-endif.
|
|
@ -0,0 +1,939 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
-module(emqx_bridge_greptimedb_SUITE).
|
||||
|
||||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% CT boilerplate
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
all() ->
|
||||
[
|
||||
{group, with_batch},
|
||||
{group, without_batch}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
TCs = emqx_common_test_helpers:all(?MODULE),
|
||||
[
|
||||
{with_batch, [
|
||||
{group, sync_query}
|
||||
]},
|
||||
{without_batch, [
|
||||
{group, sync_query}
|
||||
]},
|
||||
{sync_query, [
|
||||
{group, grpcv1_tcp}
|
||||
%% uncomment tls when we are ready
|
||||
%% {group, grpcv1_tls}
|
||||
]},
|
||||
{grpcv1_tcp, TCs}
|
||||
%%{grpcv1_tls, TCs}
|
||||
].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
delete_all_bridges(),
|
||||
emqx_mgmt_api_test_util:end_suite(),
|
||||
ok = emqx_connector_test_helpers:stop_apps([
|
||||
emqx_conf, emqx_bridge, emqx_resource, emqx_rule_engine
|
||||
]),
|
||||
_ = application:stop(emqx_connector),
|
||||
ok.
|
||||
|
||||
init_per_group(GreptimedbType, Config0) when
|
||||
GreptimedbType =:= grpcv1_tcp;
|
||||
GreptimedbType =:= grpcv1_tls
|
||||
->
|
||||
#{
|
||||
host := GreptimedbHost,
|
||||
port := GreptimedbPort,
|
||||
http_port := GreptimedbHttpPort,
|
||||
use_tls := UseTLS,
|
||||
proxy_name := ProxyName
|
||||
} =
|
||||
case GreptimedbType of
|
||||
grpcv1_tcp ->
|
||||
#{
|
||||
host => os:getenv("GREPTIMEDB_GRPCV1_TCP_HOST", "toxiproxy"),
|
||||
port => list_to_integer(os:getenv("GREPTIMEDB_GRPCV1_TCP_PORT", "4001")),
|
||||
http_port => list_to_integer(os:getenv("GREPTIMEDB_HTTP_PORT", "4000")),
|
||||
use_tls => false,
|
||||
proxy_name => "greptimedb_grpc"
|
||||
};
|
||||
grpcv1_tls ->
|
||||
#{
|
||||
host => os:getenv("GREPTIMEDB_GRPCV1_TLS_HOST", "toxiproxy"),
|
||||
port => list_to_integer(os:getenv("GREPTIMEDB_GRPCV1_TLS_PORT", "4001")),
|
||||
http_port => list_to_integer(os:getenv("GREPTIMEDB_HTTP_PORT", "4000")),
|
||||
use_tls => true,
|
||||
proxy_name => "greptimedb_tls"
|
||||
}
|
||||
end,
|
||||
case emqx_common_test_helpers:is_tcp_server_available(GreptimedbHost, GreptimedbHttpPort) of
|
||||
true ->
|
||||
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
|
||||
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
|
||||
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||
ok = start_apps(),
|
||||
{ok, _} = application:ensure_all_started(emqx_connector),
|
||||
{ok, _} = application:ensure_all_started(greptimedb),
|
||||
emqx_mgmt_api_test_util:init_suite(),
|
||||
Config = [{use_tls, UseTLS} | Config0],
|
||||
{Name, ConfigString, GreptimedbConfig} = greptimedb_config(
|
||||
grpcv1, GreptimedbHost, GreptimedbPort, Config
|
||||
),
|
||||
EHttpcPoolNameBin = <<(atom_to_binary(?MODULE))/binary, "_http">>,
|
||||
EHttpcPoolName = binary_to_atom(EHttpcPoolNameBin),
|
||||
{EHttpcTransport, EHttpcTransportOpts} =
|
||||
case UseTLS of
|
||||
true -> {tls, [{verify, verify_none}]};
|
||||
false -> {tcp, []}
|
||||
end,
|
||||
EHttpcPoolOpts = [
|
||||
{host, GreptimedbHost},
|
||||
{port, GreptimedbHttpPort},
|
||||
{pool_size, 1},
|
||||
{transport, EHttpcTransport},
|
||||
{transport_opts, EHttpcTransportOpts}
|
||||
],
|
||||
{ok, _} = ehttpc_sup:start_pool(EHttpcPoolName, EHttpcPoolOpts),
|
||||
[
|
||||
{proxy_host, ProxyHost},
|
||||
{proxy_port, ProxyPort},
|
||||
{proxy_name, ProxyName},
|
||||
{greptimedb_host, GreptimedbHost},
|
||||
{greptimedb_port, GreptimedbPort},
|
||||
{greptimedb_http_port, GreptimedbHttpPort},
|
||||
{greptimedb_type, grpcv1},
|
||||
{greptimedb_config, GreptimedbConfig},
|
||||
{greptimedb_config_string, ConfigString},
|
||||
{ehttpc_pool_name, EHttpcPoolName},
|
||||
{greptimedb_name, Name}
|
||||
| Config
|
||||
];
|
||||
false ->
|
||||
{skip, no_greptimedb}
|
||||
end;
|
||||
init_per_group(sync_query, Config) ->
|
||||
[{query_mode, sync} | Config];
|
||||
init_per_group(with_batch, Config) ->
|
||||
[{batch_size, 100} | Config];
|
||||
init_per_group(without_batch, Config) ->
|
||||
[{batch_size, 1} | Config];
|
||||
init_per_group(_Group, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_group(Group, Config) when
|
||||
Group =:= grpcv1_tcp;
|
||||
Group =:= grpcv1_tls
|
||||
->
|
||||
ProxyHost = ?config(proxy_host, Config),
|
||||
ProxyPort = ?config(proxy_port, Config),
|
||||
EHttpcPoolName = ?config(ehttpc_pool_name, Config),
|
||||
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||
ehttpc_sup:stop_pool(EHttpcPoolName),
|
||||
delete_bridge(Config),
|
||||
_ = application:stop(greptimedb),
|
||||
ok;
|
||||
end_per_group(_Group, _Config) ->
|
||||
ok.
|
||||
|
||||
init_per_testcase(_Testcase, Config) ->
|
||||
delete_all_rules(),
|
||||
delete_all_bridges(),
|
||||
Config.
|
||||
|
||||
end_per_testcase(_Testcase, Config) ->
|
||||
ProxyHost = ?config(proxy_host, Config),
|
||||
ProxyPort = ?config(proxy_port, Config),
|
||||
ok = snabbkaffe:stop(),
|
||||
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||
delete_all_rules(),
|
||||
delete_all_bridges(),
|
||||
ok.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Helper fns
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
start_apps() ->
|
||||
%% some configs in emqx_conf app are mandatory
|
||||
%% we want to make sure they are loaded before
|
||||
%% ekka start in emqx_common_test_helpers:start_apps/1
|
||||
emqx_common_test_helpers:render_and_load_app_config(emqx_conf),
|
||||
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
|
||||
ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]).
|
||||
|
||||
example_write_syntax() ->
|
||||
%% N.B.: this single space character is relevant
|
||||
<<"${topic},clientid=${clientid}", " ", "payload=${payload},",
|
||||
"${clientid}_int_value=${payload.int_key}i,",
|
||||
"uint_value=${payload.uint_key}u,"
|
||||
"float_value=${payload.float_key},", "undef_value=${payload.undef},",
|
||||
"${undef_key}=\"hard-coded-value\",", "bool=${payload.bool}">>.
|
||||
|
||||
greptimedb_config(grpcv1 = Type, GreptimedbHost, GreptimedbPort, Config) ->
|
||||
BatchSize = proplists:get_value(batch_size, Config, 100),
|
||||
QueryMode = proplists:get_value(query_mode, Config, sync),
|
||||
UseTLS = proplists:get_value(use_tls, Config, false),
|
||||
Name = atom_to_binary(?MODULE),
|
||||
WriteSyntax = example_write_syntax(),
|
||||
ConfigString =
|
||||
io_lib:format(
|
||||
"bridges.greptimedb.~s {\n"
|
||||
" enable = true\n"
|
||||
" server = \"~p:~b\"\n"
|
||||
" dbname = public\n"
|
||||
" username = greptime_user\n"
|
||||
" password = greptime_pwd\n"
|
||||
" precision = ns\n"
|
||||
" write_syntax = \"~s\"\n"
|
||||
" resource_opts = {\n"
|
||||
" request_ttl = 1s\n"
|
||||
" query_mode = ~s\n"
|
||||
" batch_size = ~b\n"
|
||||
" }\n"
|
||||
" ssl {\n"
|
||||
" enable = ~p\n"
|
||||
" verify = verify_none\n"
|
||||
" }\n"
|
||||
"}\n",
|
||||
[
|
||||
Name,
|
||||
GreptimedbHost,
|
||||
GreptimedbPort,
|
||||
WriteSyntax,
|
||||
QueryMode,
|
||||
BatchSize,
|
||||
UseTLS
|
||||
]
|
||||
),
|
||||
{Name, ConfigString, parse_and_check(ConfigString, Type, Name)}.
|
||||
|
||||
parse_and_check(ConfigString, Type, Name) ->
|
||||
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
|
||||
TypeBin = greptimedb_type_bin(Type),
|
||||
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
|
||||
#{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf,
|
||||
Config.
|
||||
|
||||
greptimedb_type_bin(grpcv1) ->
|
||||
<<"greptimedb">>.
|
||||
|
||||
create_bridge(Config) ->
|
||||
create_bridge(Config, _Overrides = #{}).
|
||||
|
||||
create_bridge(Config, Overrides) ->
|
||||
Type = greptimedb_type_bin(?config(greptimedb_type, Config)),
|
||||
Name = ?config(greptimedb_name, Config),
|
||||
GreptimedbConfig0 = ?config(greptimedb_config, Config),
|
||||
GreptimedbConfig = emqx_utils_maps:deep_merge(GreptimedbConfig0, Overrides),
|
||||
emqx_bridge:create(Type, Name, GreptimedbConfig).
|
||||
|
||||
delete_bridge(Config) ->
|
||||
Type = greptimedb_type_bin(?config(greptimedb_type, Config)),
|
||||
Name = ?config(greptimedb_name, Config),
|
||||
emqx_bridge:remove(Type, Name).
|
||||
|
||||
delete_all_bridges() ->
|
||||
lists:foreach(
|
||||
fun(#{name := Name, type := Type}) ->
|
||||
emqx_bridge:remove(Type, Name)
|
||||
end,
|
||||
emqx_bridge:list()
|
||||
).
|
||||
|
||||
delete_all_rules() ->
|
||||
lists:foreach(
|
||||
fun(#{id := RuleId}) ->
|
||||
ok = emqx_rule_engine:delete_rule(RuleId)
|
||||
end,
|
||||
emqx_rule_engine:get_rules()
|
||||
).
|
||||
|
||||
create_rule_and_action_http(Config) ->
|
||||
create_rule_and_action_http(Config, _Overrides = #{}).
|
||||
|
||||
create_rule_and_action_http(Config, Overrides) ->
|
||||
GreptimedbName = ?config(greptimedb_name, Config),
|
||||
Type = greptimedb_type_bin(?config(greptimedb_type, Config)),
|
||||
BridgeId = emqx_bridge_resource:bridge_id(Type, GreptimedbName),
|
||||
Params0 = #{
|
||||
enable => true,
|
||||
sql => <<"SELECT * FROM \"t/topic\"">>,
|
||||
actions => [BridgeId]
|
||||
},
|
||||
Params = emqx_utils_maps:deep_merge(Params0, Overrides),
|
||||
Path = emqx_mgmt_api_test_util:api_path(["rules"]),
|
||||
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
|
||||
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
|
||||
{ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])};
|
||||
Error -> Error
|
||||
end.
|
||||
|
||||
send_message(Config, Payload) ->
|
||||
Name = ?config(greptimedb_name, Config),
|
||||
Type = greptimedb_type_bin(?config(greptimedb_type, Config)),
|
||||
BridgeId = emqx_bridge_resource:bridge_id(Type, Name),
|
||||
Resp = emqx_bridge:send_message(BridgeId, Payload),
|
||||
Resp.
|
||||
|
||||
query_by_clientid(Topic, ClientId, Config) ->
|
||||
GreptimedbHost = ?config(greptimedb_host, Config),
|
||||
GreptimedbPort = ?config(greptimedb_http_port, Config),
|
||||
EHttpcPoolName = ?config(ehttpc_pool_name, Config),
|
||||
UseTLS = ?config(use_tls, Config),
|
||||
Path = <<"/v1/sql?db=public">>,
|
||||
Scheme =
|
||||
case UseTLS of
|
||||
true -> <<"https://">>;
|
||||
false -> <<"http://">>
|
||||
end,
|
||||
URI = iolist_to_binary([
|
||||
Scheme,
|
||||
list_to_binary(GreptimedbHost),
|
||||
":",
|
||||
integer_to_binary(GreptimedbPort),
|
||||
Path
|
||||
]),
|
||||
Headers = [
|
||||
{"Authorization", "Basic Z3JlcHRpbWVfdXNlcjpncmVwdGltZV9wd2Q="},
|
||||
{"Content-Type", "application/x-www-form-urlencoded"}
|
||||
],
|
||||
Body = <<"sql=select * from \"", Topic/binary, "\" where clientid='", ClientId/binary, "'">>,
|
||||
{ok, 200, _Headers, RawBody0} =
|
||||
ehttpc:request(
|
||||
EHttpcPoolName,
|
||||
post,
|
||||
{URI, Headers, Body},
|
||||
_Timeout = 10_000,
|
||||
_Retry = 0
|
||||
),
|
||||
|
||||
case emqx_utils_json:decode(RawBody0, [return_maps]) of
|
||||
#{
|
||||
<<"code">> := 0,
|
||||
<<"output">> := [
|
||||
#{
|
||||
<<"records">> := #{
|
||||
<<"rows">> := Rows,
|
||||
<<"schema">> := Schema
|
||||
}
|
||||
}
|
||||
]
|
||||
} ->
|
||||
make_row(Schema, Rows);
|
||||
#{
|
||||
<<"code">> := Code,
|
||||
<<"error">> := Error
|
||||
} ->
|
||||
GreptimedbName = ?config(greptimedb_name, Config),
|
||||
Type = greptimedb_type_bin(?config(greptimedb_type, Config)),
|
||||
BridgeId = emqx_bridge_resource:bridge_id(Type, GreptimedbName),
|
||||
|
||||
?SLOG(error, #{
|
||||
msg => io_lib:format("Failed to query: ~p, ~p", [Code, Error]),
|
||||
connector => BridgeId,
|
||||
reason => Error
|
||||
}),
|
||||
%% TODO(dennis): check the error by code
|
||||
case binary:match(Error, <<"Table not found">>) of
|
||||
nomatch ->
|
||||
{error, Error};
|
||||
_ ->
|
||||
%% Table not found
|
||||
#{}
|
||||
end
|
||||
end.
|
||||
|
||||
make_row(null, _Rows) ->
|
||||
#{};
|
||||
make_row(_Schema, []) ->
|
||||
#{};
|
||||
make_row(#{<<"column_schemas">> := ColumnsSchemas}, [Row]) ->
|
||||
Columns = lists:map(fun(#{<<"name">> := Name}) -> Name end, ColumnsSchemas),
|
||||
maps:from_list(lists:zip(Columns, Row)).
|
||||
|
||||
assert_persisted_data(ClientId, Expected, PersistedData) ->
|
||||
ClientIdIntKey = <<ClientId/binary, "_int_value">>,
|
||||
maps:foreach(
|
||||
fun
|
||||
(int_value, ExpectedValue) ->
|
||||
?assertMatch(
|
||||
ExpectedValue,
|
||||
maps:get(ClientIdIntKey, PersistedData)
|
||||
);
|
||||
(Key, ExpectedValue) ->
|
||||
?assertMatch(
|
||||
ExpectedValue,
|
||||
maps:get(atom_to_binary(Key), PersistedData),
|
||||
#{expected => ExpectedValue}
|
||||
)
|
||||
end,
|
||||
Expected
|
||||
),
|
||||
ok.
|
||||
|
||||
resource_id(Config) ->
|
||||
Type = greptimedb_type_bin(?config(greptimedb_type, Config)),
|
||||
Name = ?config(greptimedb_name, Config),
|
||||
emqx_bridge_resource:resource_id(Type, Name).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Testcases
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
t_start_ok(Config) ->
|
||||
QueryMode = ?config(query_mode, Config),
|
||||
?assertMatch(
|
||||
{ok, _},
|
||||
create_bridge(Config)
|
||||
),
|
||||
ClientId = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||
Payload = #{
|
||||
int_key => -123,
|
||||
bool => true,
|
||||
float_key => 24.5,
|
||||
uint_key => 123
|
||||
},
|
||||
SentData = #{
|
||||
<<"clientid">> => ClientId,
|
||||
<<"topic">> => atom_to_binary(?FUNCTION_NAME),
|
||||
<<"payload">> => Payload,
|
||||
<<"timestamp">> => erlang:system_time(millisecond)
|
||||
},
|
||||
?check_trace(
|
||||
begin
|
||||
case QueryMode of
|
||||
sync ->
|
||||
?assertMatch({ok, _}, send_message(Config, SentData))
|
||||
end,
|
||||
PersistedData = query_by_clientid(atom_to_binary(?FUNCTION_NAME), ClientId, Config),
|
||||
Expected = #{
|
||||
bool => true,
|
||||
int_value => -123,
|
||||
uint_value => 123,
|
||||
float_value => 24.5,
|
||||
payload => emqx_utils_json:encode(Payload)
|
||||
},
|
||||
assert_persisted_data(ClientId, Expected, PersistedData),
|
||||
ok
|
||||
end,
|
||||
fun(Trace0) ->
|
||||
Trace = ?of_kind(greptimedb_connector_send_query, Trace0),
|
||||
?assertMatch([#{points := [_]}], Trace),
|
||||
[#{points := [Point0]}] = Trace,
|
||||
{Measurement, [Point]} = Point0,
|
||||
ct:pal("sent point: ~p", [Point]),
|
||||
?assertMatch(
|
||||
<<_/binary>>,
|
||||
Measurement
|
||||
),
|
||||
?assertMatch(
|
||||
#{
|
||||
fields := #{},
|
||||
tags := #{},
|
||||
timestamp := TS
|
||||
} when is_integer(TS),
|
||||
Point
|
||||
),
|
||||
#{fields := Fields} = Point,
|
||||
?assert(lists:all(fun is_binary/1, maps:keys(Fields))),
|
||||
?assertNot(maps:is_key(<<"undefined">>, Fields)),
|
||||
?assertNot(maps:is_key(<<"undef_value">>, Fields)),
|
||||
ok
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
t_start_already_started(Config) ->
|
||||
Type = greptimedb_type_bin(?config(greptimedb_type, Config)),
|
||||
Name = ?config(greptimedb_name, Config),
|
||||
GreptimedbConfigString = ?config(greptimedb_config_string, Config),
|
||||
?assertMatch(
|
||||
{ok, _},
|
||||
create_bridge(Config)
|
||||
),
|
||||
ResourceId = resource_id(Config),
|
||||
TypeAtom = binary_to_atom(Type),
|
||||
NameAtom = binary_to_atom(Name),
|
||||
{ok, #{bridges := #{TypeAtom := #{NameAtom := GreptimedbConfigMap}}}} = emqx_hocon:check(
|
||||
emqx_bridge_schema, GreptimedbConfigString
|
||||
),
|
||||
?check_trace(
|
||||
emqx_bridge_greptimedb_connector:on_start(ResourceId, GreptimedbConfigMap),
|
||||
fun(Result, Trace) ->
|
||||
?assertMatch({ok, _}, Result),
|
||||
?assertMatch([_], ?of_kind(greptimedb_connector_start_already_started, Trace)),
|
||||
ok
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
t_start_ok_timestamp_write_syntax(Config) ->
|
||||
GreptimedbType = ?config(greptimedb_type, Config),
|
||||
GreptimedbName = ?config(greptimedb_name, Config),
|
||||
GreptimedbConfigString0 = ?config(greptimedb_config_string, Config),
|
||||
GreptimedbTypeCfg =
|
||||
case GreptimedbType of
|
||||
grpcv1 -> "greptimedb"
|
||||
end,
|
||||
WriteSyntax =
|
||||
%% N.B.: this single space characters are relevant
|
||||
<<"${topic},clientid=${clientid}", " ", "payload=${payload},",
|
||||
"${clientid}_int_value=${payload.int_key}i,",
|
||||
"uint_value=${payload.uint_key}u,"
|
||||
"bool=${payload.bool}", " ", "${timestamp}">>,
|
||||
%% append this to override the config
|
||||
GreptimedbConfigString1 =
|
||||
io_lib:format(
|
||||
"bridges.~s.~s {\n"
|
||||
" write_syntax = \"~s\"\n"
|
||||
"}\n",
|
||||
[GreptimedbTypeCfg, GreptimedbName, WriteSyntax]
|
||||
),
|
||||
GreptimedbConfig1 = parse_and_check(
|
||||
GreptimedbConfigString0 ++ GreptimedbConfigString1,
|
||||
GreptimedbType,
|
||||
GreptimedbName
|
||||
),
|
||||
Config1 = [{greptimedb_config, GreptimedbConfig1} | Config],
|
||||
?assertMatch(
|
||||
{ok, _},
|
||||
create_bridge(Config1)
|
||||
),
|
||||
ok.
|
||||
|
||||
t_start_ok_no_subject_tags_write_syntax(Config) ->
|
||||
GreptimedbType = ?config(greptimedb_type, Config),
|
||||
GreptimedbName = ?config(greptimedb_name, Config),
|
||||
GreptimedbConfigString0 = ?config(greptimedb_config_string, Config),
|
||||
GreptimedbTypeCfg =
|
||||
case GreptimedbType of
|
||||
grpcv1 -> "greptimedb"
|
||||
end,
|
||||
WriteSyntax =
|
||||
%% N.B.: this single space characters are relevant
|
||||
<<"${topic}", " ", "payload=${payload},", "${clientid}_int_value=${payload.int_key}i,",
|
||||
"uint_value=${payload.uint_key}u,"
|
||||
"bool=${payload.bool}", " ", "${timestamp}">>,
|
||||
%% append this to override the config
|
||||
GreptimedbConfigString1 =
|
||||
io_lib:format(
|
||||
"bridges.~s.~s {\n"
|
||||
" write_syntax = \"~s\"\n"
|
||||
"}\n",
|
||||
[GreptimedbTypeCfg, GreptimedbName, WriteSyntax]
|
||||
),
|
||||
GreptimedbConfig1 = parse_and_check(
|
||||
GreptimedbConfigString0 ++ GreptimedbConfigString1,
|
||||
GreptimedbType,
|
||||
GreptimedbName
|
||||
),
|
||||
Config1 = [{greptimedb_config, GreptimedbConfig1} | Config],
|
||||
?assertMatch(
|
||||
{ok, _},
|
||||
create_bridge(Config1)
|
||||
),
|
||||
ok.
|
||||
|
||||
t_const_timestamp(Config) ->
|
||||
QueryMode = ?config(query_mode, Config),
|
||||
Const = erlang:system_time(nanosecond),
|
||||
ConstBin = integer_to_binary(Const),
|
||||
?assertMatch(
|
||||
{ok, _},
|
||||
create_bridge(
|
||||
Config,
|
||||
#{
|
||||
<<"write_syntax">> =>
|
||||
<<"mqtt,clientid=${clientid} foo=${payload.foo}i,bar=5i ", ConstBin/binary>>
|
||||
}
|
||||
)
|
||||
),
|
||||
ClientId = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||
Payload = #{<<"foo">> => 123},
|
||||
SentData = #{
|
||||
<<"clientid">> => ClientId,
|
||||
<<"topic">> => atom_to_binary(?FUNCTION_NAME),
|
||||
<<"payload">> => Payload,
|
||||
<<"timestamp">> => erlang:system_time(millisecond)
|
||||
},
|
||||
case QueryMode of
|
||||
sync ->
|
||||
?assertMatch({ok, _}, send_message(Config, SentData))
|
||||
end,
|
||||
PersistedData = query_by_clientid(<<"mqtt">>, ClientId, Config),
|
||||
Expected = #{foo => 123},
|
||||
assert_persisted_data(ClientId, Expected, PersistedData),
|
||||
TimeReturned = maps:get(<<"greptime_timestamp">>, PersistedData),
|
||||
?assertEqual(Const, TimeReturned).
|
||||
|
||||
t_boolean_variants(Config) ->
|
||||
QueryMode = ?config(query_mode, Config),
|
||||
?assertMatch(
|
||||
{ok, _},
|
||||
create_bridge(Config)
|
||||
),
|
||||
BoolVariants = #{
|
||||
true => true,
|
||||
false => false,
|
||||
<<"t">> => true,
|
||||
<<"f">> => false,
|
||||
<<"T">> => true,
|
||||
<<"F">> => false,
|
||||
<<"TRUE">> => true,
|
||||
<<"FALSE">> => false,
|
||||
<<"True">> => true,
|
||||
<<"False">> => false
|
||||
},
|
||||
maps:foreach(
|
||||
fun(BoolVariant, Translation) ->
|
||||
ClientId = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||
Payload = #{
|
||||
int_key => -123,
|
||||
bool => BoolVariant,
|
||||
uint_key => 123
|
||||
},
|
||||
SentData = #{
|
||||
<<"clientid">> => ClientId,
|
||||
<<"topic">> => atom_to_binary(?FUNCTION_NAME),
|
||||
<<"timestamp">> => erlang:system_time(millisecond),
|
||||
<<"payload">> => Payload
|
||||
},
|
||||
case QueryMode of
|
||||
sync ->
|
||||
?assertMatch({ok, _}, send_message(Config, SentData))
|
||||
end,
|
||||
case QueryMode of
|
||||
sync -> ok
|
||||
end,
|
||||
PersistedData = query_by_clientid(atom_to_binary(?FUNCTION_NAME), ClientId, Config),
|
||||
Expected = #{
|
||||
bool => Translation,
|
||||
int_value => -123,
|
||||
uint_value => 123,
|
||||
payload => emqx_utils_json:encode(Payload)
|
||||
},
|
||||
assert_persisted_data(ClientId, Expected, PersistedData),
|
||||
ok
|
||||
end,
|
||||
BoolVariants
|
||||
),
|
||||
ok.
|
||||
|
||||
t_bad_timestamp(Config) ->
|
||||
GreptimedbType = ?config(greptimedb_type, Config),
|
||||
GreptimedbName = ?config(greptimedb_name, Config),
|
||||
QueryMode = ?config(query_mode, Config),
|
||||
BatchSize = ?config(batch_size, Config),
|
||||
GreptimedbConfigString0 = ?config(greptimedb_config_string, Config),
|
||||
GreptimedbTypeCfg =
|
||||
case GreptimedbType of
|
||||
grpcv1 -> "greptimedb"
|
||||
end,
|
||||
WriteSyntax =
|
||||
%% N.B.: this single space characters are relevant
|
||||
<<"${topic}", " ", "payload=${payload},", "${clientid}_int_value=${payload.int_key}i,",
|
||||
"uint_value=${payload.uint_key}u,"
|
||||
"bool=${payload.bool}", " ", "bad_timestamp">>,
|
||||
%% append this to override the config
|
||||
GreptimedbConfigString1 =
|
||||
io_lib:format(
|
||||
"bridges.~s.~s {\n"
|
||||
" write_syntax = \"~s\"\n"
|
||||
"}\n",
|
||||
[GreptimedbTypeCfg, GreptimedbName, WriteSyntax]
|
||||
),
|
||||
GreptimedbConfig1 = parse_and_check(
|
||||
GreptimedbConfigString0 ++ GreptimedbConfigString1,
|
||||
GreptimedbType,
|
||||
GreptimedbName
|
||||
),
|
||||
Config1 = [{greptimedb_config, GreptimedbConfig1} | Config],
|
||||
?assertMatch(
|
||||
{ok, _},
|
||||
create_bridge(Config1)
|
||||
),
|
||||
ClientId = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||
Payload = #{
|
||||
int_key => -123,
|
||||
bool => false,
|
||||
uint_key => 123
|
||||
},
|
||||
SentData = #{
|
||||
<<"clientid">> => ClientId,
|
||||
<<"topic">> => atom_to_binary(?FUNCTION_NAME),
|
||||
<<"timestamp">> => erlang:system_time(millisecond),
|
||||
<<"payload">> => Payload
|
||||
},
|
||||
?check_trace(
|
||||
?wait_async_action(
|
||||
send_message(Config1, SentData),
|
||||
#{?snk_kind := greptimedb_connector_send_query_error},
|
||||
10_000
|
||||
),
|
||||
fun(Result, _Trace) ->
|
||||
?assertMatch({_, {ok, _}}, Result),
|
||||
{Return, {ok, _}} = Result,
|
||||
IsBatch = BatchSize > 1,
|
||||
case {QueryMode, IsBatch} of
|
||||
{sync, false} ->
|
||||
?assertEqual(
|
||||
{error, [
|
||||
{error, {bad_timestamp, <<"bad_timestamp">>}}
|
||||
]},
|
||||
Return
|
||||
);
|
||||
{sync, true} ->
|
||||
?assertEqual({error, {unrecoverable_error, points_trans_failed}}, Return)
|
||||
end,
|
||||
ok
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
t_get_status(Config) ->
|
||||
ProxyPort = ?config(proxy_port, Config),
|
||||
ProxyHost = ?config(proxy_host, Config),
|
||||
ProxyName = ?config(proxy_name, Config),
|
||||
{ok, _} = create_bridge(Config),
|
||||
ResourceId = resource_id(Config),
|
||||
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)),
|
||||
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
|
||||
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId))
|
||||
end),
|
||||
ok.
|
||||
|
||||
t_create_disconnected(Config) ->
|
||||
ProxyPort = ?config(proxy_port, Config),
|
||||
ProxyHost = ?config(proxy_host, Config),
|
||||
ProxyName = ?config(proxy_name, Config),
|
||||
?check_trace(
|
||||
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
|
||||
?assertMatch({ok, _}, create_bridge(Config))
|
||||
end),
|
||||
fun(Trace) ->
|
||||
?assertMatch(
|
||||
[#{error := greptimedb_client_not_alive, reason := _SomeReason}],
|
||||
?of_kind(greptimedb_connector_start_failed, Trace)
|
||||
),
|
||||
ok
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
t_start_error(Config) ->
|
||||
%% simulate client start error
|
||||
?check_trace(
|
||||
emqx_common_test_helpers:with_mock(
|
||||
greptimedb,
|
||||
start_client,
|
||||
fun(_Config) -> {error, some_error} end,
|
||||
fun() ->
|
||||
?wait_async_action(
|
||||
?assertMatch({ok, _}, create_bridge(Config)),
|
||||
#{?snk_kind := greptimedb_connector_start_failed},
|
||||
10_000
|
||||
)
|
||||
end
|
||||
),
|
||||
fun(Trace) ->
|
||||
?assertMatch(
|
||||
[#{error := some_error}],
|
||||
?of_kind(greptimedb_connector_start_failed, Trace)
|
||||
),
|
||||
ok
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
t_start_exception(Config) ->
|
||||
%% simulate client start exception
|
||||
?check_trace(
|
||||
emqx_common_test_helpers:with_mock(
|
||||
greptimedb,
|
||||
start_client,
|
||||
fun(_Config) -> error(boom) end,
|
||||
fun() ->
|
||||
?wait_async_action(
|
||||
?assertMatch({ok, _}, create_bridge(Config)),
|
||||
#{?snk_kind := greptimedb_connector_start_exception},
|
||||
10_000
|
||||
)
|
||||
end
|
||||
),
|
||||
fun(Trace) ->
|
||||
?assertMatch(
|
||||
[#{error := {error, boom}}],
|
||||
?of_kind(greptimedb_connector_start_exception, Trace)
|
||||
),
|
||||
ok
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
t_write_failure(Config) ->
|
||||
ProxyName = ?config(proxy_name, Config),
|
||||
ProxyPort = ?config(proxy_port, Config),
|
||||
ProxyHost = ?config(proxy_host, Config),
|
||||
QueryMode = ?config(query_mode, Config),
|
||||
{ok, _} = create_bridge(Config),
|
||||
ClientId = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||
Payload = #{
|
||||
int_key => -123,
|
||||
bool => true,
|
||||
float_key => 24.5,
|
||||
uint_key => 123
|
||||
},
|
||||
SentData = #{
|
||||
<<"clientid">> => ClientId,
|
||||
<<"topic">> => atom_to_binary(?FUNCTION_NAME),
|
||||
<<"timestamp">> => erlang:system_time(millisecond),
|
||||
<<"payload">> => Payload
|
||||
},
|
||||
?check_trace(
|
||||
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
|
||||
case QueryMode of
|
||||
sync ->
|
||||
?wait_async_action(
|
||||
?assertMatch(
|
||||
{error, {resource_error, #{reason := timeout}}},
|
||||
send_message(Config, SentData)
|
||||
),
|
||||
#{?snk_kind := greptimedb_connector_do_query_failure, action := nack},
|
||||
16_000
|
||||
)
|
||||
end
|
||||
end),
|
||||
fun(Trace) ->
|
||||
case QueryMode of
|
||||
sync ->
|
||||
?assertMatch(
|
||||
[#{error := _} | _],
|
||||
?of_kind(greptimedb_connector_do_query_failure, Trace)
|
||||
)
|
||||
end,
|
||||
ok
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
t_missing_field(Config) ->
|
||||
BatchSize = ?config(batch_size, Config),
|
||||
IsBatch = BatchSize > 1,
|
||||
{ok, _} =
|
||||
create_bridge(
|
||||
Config,
|
||||
#{
|
||||
<<"resource_opts">> => #{<<"worker_pool_size">> => 1},
|
||||
<<"write_syntax">> => <<"${clientid} foo=${foo}i">>
|
||||
}
|
||||
),
|
||||
%% note: we don't select foo here, but we interpolate it in the
|
||||
%% fields, so it'll become undefined.
|
||||
{ok, _} = create_rule_and_action_http(Config, #{sql => <<"select * from \"t/topic\"">>}),
|
||||
ClientId0 = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||
ClientId1 = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||
%% Message with the field that we "forgot" to select in the rule
|
||||
Msg0 = emqx_message:make(ClientId0, <<"t/topic">>, emqx_utils_json:encode(#{foo => 123})),
|
||||
%% Message without any fields
|
||||
Msg1 = emqx_message:make(ClientId1, <<"t/topic">>, emqx_utils_json:encode(#{})),
|
||||
?check_trace(
|
||||
begin
|
||||
emqx:publish(Msg0),
|
||||
emqx:publish(Msg1),
|
||||
NEvents = 1,
|
||||
{ok, _} =
|
||||
snabbkaffe:block_until(
|
||||
?match_n_events(NEvents, #{
|
||||
?snk_kind := greptimedb_connector_send_query_error
|
||||
}),
|
||||
_Timeout1 = 16_000
|
||||
),
|
||||
ok
|
||||
end,
|
||||
fun(Trace) ->
|
||||
PersistedData0 = query_by_clientid(ClientId0, ClientId0, Config),
|
||||
PersistedData1 = query_by_clientid(ClientId1, ClientId1, Config),
|
||||
case IsBatch of
|
||||
true ->
|
||||
?assertMatch(
|
||||
[#{error := points_trans_failed} | _],
|
||||
?of_kind(greptimedb_connector_send_query_error, Trace)
|
||||
);
|
||||
false ->
|
||||
?assertMatch(
|
||||
[#{error := [{error, no_fields}]} | _],
|
||||
?of_kind(greptimedb_connector_send_query_error, Trace)
|
||||
)
|
||||
end,
|
||||
%% nothing should have been persisted
|
||||
?assertEqual(#{}, PersistedData0),
|
||||
?assertEqual(#{}, PersistedData1),
|
||||
ok
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
t_authentication_error_on_send_message(Config0) ->
|
||||
ResourceId = resource_id(Config0),
|
||||
QueryMode = proplists:get_value(query_mode, Config0, sync),
|
||||
GreptimedbType = ?config(greptimedb_type, Config0),
|
||||
GreptimeConfig0 = proplists:get_value(greptimedb_config, Config0),
|
||||
GreptimeConfig =
|
||||
case GreptimedbType of
|
||||
grpcv1 -> GreptimeConfig0#{<<"password">> => <<"wrong_password">>}
|
||||
end,
|
||||
Config = lists:keyreplace(greptimedb_config, 1, Config0, {greptimedb_config, GreptimeConfig}),
|
||||
|
||||
% Fake initialization to simulate credential update after bridge was created.
|
||||
emqx_common_test_helpers:with_mock(
|
||||
greptimedb,
|
||||
check_auth,
|
||||
fun(_) ->
|
||||
ok
|
||||
end,
|
||||
fun() ->
|
||||
{ok, _} = create_bridge(Config),
|
||||
?retry(
|
||||
_Sleep = 1_000,
|
||||
_Attempts = 10,
|
||||
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
|
||||
)
|
||||
end
|
||||
),
|
||||
|
||||
% Now back to wrong credentials
|
||||
ClientId = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||
Payload = #{
|
||||
int_key => -123,
|
||||
bool => true,
|
||||
float_key => 24.5,
|
||||
uint_key => 123
|
||||
},
|
||||
SentData = #{
|
||||
<<"clientid">> => ClientId,
|
||||
<<"topic">> => atom_to_binary(?FUNCTION_NAME),
|
||||
<<"timestamp">> => erlang:system_time(millisecond),
|
||||
<<"payload">> => Payload
|
||||
},
|
||||
case QueryMode of
|
||||
sync ->
|
||||
?assertMatch(
|
||||
{error, {unrecoverable_error, <<"authorization failure">>}},
|
||||
send_message(Config, SentData)
|
||||
)
|
||||
end,
|
||||
ok.
|
|
@ -0,0 +1,155 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_greptimedb_connector_SUITE).
|
||||
|
||||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
-include_lib("emqx_connector/include/emqx_connector.hrl").
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
|
||||
-define(GREPTIMEDB_RESOURCE_MOD, emqx_bridge_greptimedb_connector).
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
groups() ->
|
||||
[].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
GreptimedbTCPHost = os:getenv("GREPTIMEDB_GRPCV1_TCP_HOST", "toxiproxy"),
|
||||
GreptimedbTCPPort = list_to_integer(os:getenv("GREPTIMEDB_GRPCV1_TCP_PORT", "4001")),
|
||||
Servers = [{GreptimedbTCPHost, GreptimedbTCPPort}],
|
||||
case emqx_common_test_helpers:is_all_tcp_servers_available(Servers) of
|
||||
true ->
|
||||
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
|
||||
ok = emqx_connector_test_helpers:start_apps([emqx_resource]),
|
||||
{ok, _} = application:ensure_all_started(emqx_connector),
|
||||
{ok, _} = application:ensure_all_started(greptimedb),
|
||||
[
|
||||
{greptimedb_tcp_host, GreptimedbTCPHost},
|
||||
{greptimedb_tcp_port, GreptimedbTCPPort}
|
||||
| Config
|
||||
];
|
||||
false ->
|
||||
case os:getenv("IS_CI") of
|
||||
"yes" ->
|
||||
throw(no_greptimedb);
|
||||
_ ->
|
||||
{skip, no_greptimedb}
|
||||
end
|
||||
end.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
|
||||
ok = emqx_connector_test_helpers:stop_apps([emqx_resource]),
|
||||
_ = application:stop(emqx_connector),
|
||||
_ = application:stop(greptimedb),
|
||||
ok.
|
||||
|
||||
init_per_testcase(_, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_testcase(_, _Config) ->
|
||||
ok.
|
||||
|
||||
% %%------------------------------------------------------------------------------
|
||||
% %% Testcases
|
||||
% %%------------------------------------------------------------------------------
|
||||
|
||||
t_lifecycle(Config) ->
|
||||
Host = ?config(greptimedb_tcp_host, Config),
|
||||
Port = ?config(greptimedb_tcp_port, Config),
|
||||
perform_lifecycle_check(
|
||||
<<"emqx_bridge_greptimedb_connector_SUITE">>,
|
||||
greptimedb_config(Host, Port)
|
||||
).
|
||||
|
||||
perform_lifecycle_check(PoolName, InitialConfig) ->
|
||||
{ok, #{config := CheckedConfig}} =
|
||||
emqx_resource:check_config(?GREPTIMEDB_RESOURCE_MOD, InitialConfig),
|
||||
% We need to add a write_syntax to the config since the connector
|
||||
% expects this
|
||||
FullConfig = CheckedConfig#{write_syntax => greptimedb_write_syntax()},
|
||||
{ok, #{
|
||||
state := #{client := #{pool := ReturnedPoolName}} = State,
|
||||
status := InitialStatus
|
||||
}} = emqx_resource:create_local(
|
||||
PoolName,
|
||||
?CONNECTOR_RESOURCE_GROUP,
|
||||
?GREPTIMEDB_RESOURCE_MOD,
|
||||
FullConfig,
|
||||
#{}
|
||||
),
|
||||
?assertEqual(InitialStatus, connected),
|
||||
% Instance should match the state and status of the just started resource
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
||||
% % Perform query as further check that the resource is working as expected
|
||||
?assertMatch({ok, _}, emqx_resource:query(PoolName, test_query())),
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||
% as the worker no longer exists.
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := StoppedStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(stopped, StoppedStatus),
|
||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)),
|
||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
% Can call stop/1 again on an already stopped instance
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
% Make sure it can be restarted and the healthchecks and queries work properly
|
||||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
||||
% async restart, need to wait resource
|
||||
timer:sleep(500),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
||||
?assertMatch({ok, _}, emqx_resource:query(PoolName, test_query())),
|
||||
% Stop and remove the resource in one go.
|
||||
?assertEqual(ok, emqx_resource:remove_local(PoolName)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
||||
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)).
|
||||
|
||||
% %%------------------------------------------------------------------------------
|
||||
% %% Helpers
|
||||
% %%------------------------------------------------------------------------------
|
||||
|
||||
greptimedb_config(Host, Port) ->
|
||||
Server = list_to_binary(io_lib:format("~s:~b", [Host, Port])),
|
||||
ResourceConfig = #{
|
||||
<<"dbname">> => <<"public">>,
|
||||
<<"server">> => Server,
|
||||
<<"username">> => <<"greptime_user">>,
|
||||
<<"password">> => <<"greptime_pwd">>
|
||||
},
|
||||
#{<<"config">> => ResourceConfig}.
|
||||
|
||||
greptimedb_write_syntax() ->
|
||||
[
|
||||
#{
|
||||
measurement => "${topic}",
|
||||
tags => [{"clientid", "${clientid}"}],
|
||||
fields => [{"payload", "${payload}"}],
|
||||
timestamp => undefined
|
||||
}
|
||||
].
|
||||
|
||||
test_query() ->
|
||||
{send_message, #{
|
||||
<<"clientid">> => <<"something">>,
|
||||
<<"payload">> => #{bool => true},
|
||||
<<"topic">> => <<"connector_test">>,
|
||||
<<"timestamp">> => 1678220316257
|
||||
}}.
|
|
@ -0,0 +1,348 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
-module(emqx_bridge_greptimedb_tests).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-define(INVALID_LINES, [
|
||||
" ",
|
||||
" \n",
|
||||
" \n\n\n ",
|
||||
"\n",
|
||||
" \n\n \n \n",
|
||||
"measurement",
|
||||
"measurement ",
|
||||
"measurement,tag",
|
||||
"measurement field",
|
||||
"measurement,tag field",
|
||||
"measurement,tag field ${timestamp}",
|
||||
"measurement,tag=",
|
||||
"measurement,tag=tag1",
|
||||
"measurement,tag =",
|
||||
"measurement field=",
|
||||
"measurement field= ",
|
||||
"measurement field = ",
|
||||
"measurement, tag = field = ",
|
||||
"measurement, tag = field = ",
|
||||
"measurement, tag = tag_val field = field_val",
|
||||
"measurement, tag = tag_val field = field_val ${timestamp}",
|
||||
"measurement,= = ${timestamp}",
|
||||
"measurement,t=a, f=a, ${timestamp}",
|
||||
"measurement,t=a,t1=b, f=a,f1=b, ${timestamp}",
|
||||
"measurement,t=a,t1=b, f=a,f1=b,",
|
||||
"measurement,t=a, t1=b, f=a,f1=b,",
|
||||
"measurement,t=a,,t1=b, f=a,f1=b,",
|
||||
"measurement,t=a,,t1=b f=a,,f1=b",
|
||||
"measurement,t=a,,t1=b f=a,f1=b ${timestamp}",
|
||||
"measurement, f=a,f1=b",
|
||||
"measurement, f=a,f1=b ${timestamp}",
|
||||
"measurement,, f=a,f1=b ${timestamp}",
|
||||
"measurement,, f=a,f1=b",
|
||||
"measurement,, f=a,f1=b,, ${timestamp}",
|
||||
"measurement f=a,f1=b,, ${timestamp}",
|
||||
"measurement,t=a f=a,f1=b,, ${timestamp}",
|
||||
"measurement,t=a f=a,f1=b,, ",
|
||||
"measurement,t=a f=a,f1=b,,",
|
||||
"measurement, t=a f=a,f1=b",
|
||||
"measurement,t=a f=a, f1=b",
|
||||
"measurement,t=a f=a, f1=b ${timestamp}",
|
||||
"measurement, t=a f=a, f1=b ${timestamp}",
|
||||
"measurement,t= a f=a,f1=b ${timestamp}",
|
||||
"measurement,t= a f=a,f1 =b ${timestamp}",
|
||||
"measurement, t = a f = a,f1 = b ${timestamp}",
|
||||
"measurement,t=a f=a,f1=b \n ${timestamp}",
|
||||
"measurement,t=a \n f=a,f1=b \n ${timestamp}",
|
||||
"measurement,t=a \n f=a,f1=b \n ",
|
||||
"\n measurement,t=a \n f=a,f1=b \n ${timestamp}",
|
||||
"\n measurement,t=a \n f=a,f1=b \n",
|
||||
%% not escaped backslash in a quoted field value is invalid
|
||||
"measurement,tag=1 field=\"val\\1\""
|
||||
]).
|
||||
|
||||
-define(VALID_LINE_PARSED_PAIRS, [
|
||||
{"m1,tag=tag1 field=field1 ${timestamp1}", #{
|
||||
measurement => "m1",
|
||||
tags => [{"tag", "tag1"}],
|
||||
fields => [{"field", "field1"}],
|
||||
timestamp => "${timestamp1}"
|
||||
}},
|
||||
{"m2,tag=tag2 field=field2", #{
|
||||
measurement => "m2",
|
||||
tags => [{"tag", "tag2"}],
|
||||
fields => [{"field", "field2"}],
|
||||
timestamp => undefined
|
||||
}},
|
||||
{"m3 field=field3 ${timestamp3}", #{
|
||||
measurement => "m3",
|
||||
tags => [],
|
||||
fields => [{"field", "field3"}],
|
||||
timestamp => "${timestamp3}"
|
||||
}},
|
||||
{"m4 field=field4", #{
|
||||
measurement => "m4",
|
||||
tags => [],
|
||||
fields => [{"field", "field4"}],
|
||||
timestamp => undefined
|
||||
}},
|
||||
{"m5,tag=tag5,tag_a=tag5a,tag_b=tag5b field=field5,field_a=field5a,field_b=field5b ${timestamp5}",
|
||||
#{
|
||||
measurement => "m5",
|
||||
tags => [{"tag", "tag5"}, {"tag_a", "tag5a"}, {"tag_b", "tag5b"}],
|
||||
fields => [{"field", "field5"}, {"field_a", "field5a"}, {"field_b", "field5b"}],
|
||||
timestamp => "${timestamp5}"
|
||||
}},
|
||||
{"m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=field6,field_a=field6a,field_b=field6b", #{
|
||||
measurement => "m6",
|
||||
tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}],
|
||||
fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}],
|
||||
timestamp => undefined
|
||||
}},
|
||||
{"m7,tag=tag7,tag_a=\"tag7a\",tag_b=tag7b field=\"field7\",field_a=field7a,field_b=\"field7b\"",
|
||||
#{
|
||||
measurement => "m7",
|
||||
tags => [{"tag", "tag7"}, {"tag_a", "\"tag7a\""}, {"tag_b", "tag7b"}],
|
||||
fields => [{"field", "field7"}, {"field_a", "field7a"}, {"field_b", "field7b"}],
|
||||
timestamp => undefined
|
||||
}},
|
||||
{"m8,tag=tag8,tag_a=\"tag8a\",tag_b=tag8b field=\"field8\",field_a=field8a,field_b=\"field8b\" ${timestamp8}",
|
||||
#{
|
||||
measurement => "m8",
|
||||
tags => [{"tag", "tag8"}, {"tag_a", "\"tag8a\""}, {"tag_b", "tag8b"}],
|
||||
fields => [{"field", "field8"}, {"field_a", "field8a"}, {"field_b", "field8b"}],
|
||||
timestamp => "${timestamp8}"
|
||||
}},
|
||||
{"m9,tag=tag9,tag_a=\"tag9a\",tag_b=tag9b field=\"field9\",field_a=field9a,field_b=\"\" ${timestamp9}",
|
||||
#{
|
||||
measurement => "m9",
|
||||
tags => [{"tag", "tag9"}, {"tag_a", "\"tag9a\""}, {"tag_b", "tag9b"}],
|
||||
fields => [{"field", "field9"}, {"field_a", "field9a"}, {"field_b", ""}],
|
||||
timestamp => "${timestamp9}"
|
||||
}},
|
||||
{"m10 field=\"\" ${timestamp10}", #{
|
||||
measurement => "m10",
|
||||
tags => [],
|
||||
fields => [{"field", ""}],
|
||||
timestamp => "${timestamp10}"
|
||||
}}
|
||||
]).
|
||||
|
||||
-define(VALID_LINE_EXTRA_SPACES_PARSED_PAIRS, [
|
||||
{"\n m1,tag=tag1 field=field1 ${timestamp1} \n", #{
|
||||
measurement => "m1",
|
||||
tags => [{"tag", "tag1"}],
|
||||
fields => [{"field", "field1"}],
|
||||
timestamp => "${timestamp1}"
|
||||
}},
|
||||
{" m2,tag=tag2 field=field2 ", #{
|
||||
measurement => "m2",
|
||||
tags => [{"tag", "tag2"}],
|
||||
fields => [{"field", "field2"}],
|
||||
timestamp => undefined
|
||||
}},
|
||||
{" m3 field=field3 ${timestamp3} ", #{
|
||||
measurement => "m3",
|
||||
tags => [],
|
||||
fields => [{"field", "field3"}],
|
||||
timestamp => "${timestamp3}"
|
||||
}},
|
||||
{" \n m4 field=field4\n ", #{
|
||||
measurement => "m4",
|
||||
tags => [],
|
||||
fields => [{"field", "field4"}],
|
||||
timestamp => undefined
|
||||
}},
|
||||
{" \n m5,tag=tag5,tag_a=tag5a,tag_b=tag5b field=field5,field_a=field5a,field_b=field5b ${timestamp5} \n",
|
||||
#{
|
||||
measurement => "m5",
|
||||
tags => [{"tag", "tag5"}, {"tag_a", "tag5a"}, {"tag_b", "tag5b"}],
|
||||
fields => [{"field", "field5"}, {"field_a", "field5a"}, {"field_b", "field5b"}],
|
||||
timestamp => "${timestamp5}"
|
||||
}},
|
||||
{" m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=field6,field_a=field6a,field_b=field6b\n ", #{
|
||||
measurement => "m6",
|
||||
tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}],
|
||||
fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}],
|
||||
timestamp => undefined
|
||||
}}
|
||||
]).
|
||||
|
||||
-define(VALID_LINE_PARSED_ESCAPED_CHARS_PAIRS, [
|
||||
{"m\\ =1\\,,\\,tag\\ \\==\\=tag\\ 1\\, \\,fie\\ ld\\ =\\ field\\,1 ${timestamp1}", #{
|
||||
measurement => "m =1,",
|
||||
tags => [{",tag =", "=tag 1,"}],
|
||||
fields => [{",fie ld ", " field,1"}],
|
||||
timestamp => "${timestamp1}"
|
||||
}},
|
||||
{"m2,tag=tag2 field=\"field \\\"2\\\",\n\"", #{
|
||||
measurement => "m2",
|
||||
tags => [{"tag", "tag2"}],
|
||||
fields => [{"field", "field \"2\",\n"}],
|
||||
timestamp => undefined
|
||||
}},
|
||||
{"m\\ 3 field=\"field3\" ${payload.timestamp\\ 3}", #{
|
||||
measurement => "m 3",
|
||||
tags => [],
|
||||
fields => [{"field", "field3"}],
|
||||
timestamp => "${payload.timestamp 3}"
|
||||
}},
|
||||
{"m4 field=\"\\\"field\\\\4\\\"\"", #{
|
||||
measurement => "m4",
|
||||
tags => [],
|
||||
fields => [{"field", "\"field\\4\""}],
|
||||
timestamp => undefined
|
||||
}},
|
||||
{
|
||||
"m5\\,mA,tag=\\=tag5\\=,\\,tag_a\\,=tag\\ 5a,tag_b=tag5b \\ field\\ =field5,"
|
||||
"field\\ _\\ a=field5a,\\,field_b\\ =\\=\\,\\ field5b ${timestamp5}",
|
||||
#{
|
||||
measurement => "m5,mA",
|
||||
tags => [{"tag", "=tag5="}, {",tag_a,", "tag 5a"}, {"tag_b", "tag5b"}],
|
||||
fields => [
|
||||
{" field ", "field5"}, {"field _ a", "field5a"}, {",field_b ", "=, field5b"}
|
||||
],
|
||||
timestamp => "${timestamp5}"
|
||||
}
|
||||
},
|
||||
{"m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=\"field6\",field_a=\"field6a\",field_b=\"field6b\"",
|
||||
#{
|
||||
measurement => "m6",
|
||||
tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}],
|
||||
fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}],
|
||||
timestamp => undefined
|
||||
}},
|
||||
{
|
||||
"\\ \\ m7\\ \\ ,tag=\\ tag\\,7\\ ,tag_a=\"tag7a\",tag_b\\,tag1=tag7b field=\"field7\","
|
||||
"field_a=field7a,field_b=\"field7b\\\\\n\"",
|
||||
#{
|
||||
measurement => " m7 ",
|
||||
tags => [{"tag", " tag,7 "}, {"tag_a", "\"tag7a\""}, {"tag_b,tag1", "tag7b"}],
|
||||
fields => [{"field", "field7"}, {"field_a", "field7a"}, {"field_b", "field7b\\\n"}],
|
||||
timestamp => undefined
|
||||
}
|
||||
},
|
||||
{
|
||||
"m8,tag=tag8,tag_a=\"tag8a\",tag_b=tag8b field=\"field8\",field_a=field8a,"
|
||||
"field_b=\"\\\"field\\\" = 8b\" ${timestamp8}",
|
||||
#{
|
||||
measurement => "m8",
|
||||
tags => [{"tag", "tag8"}, {"tag_a", "\"tag8a\""}, {"tag_b", "tag8b"}],
|
||||
fields => [{"field", "field8"}, {"field_a", "field8a"}, {"field_b", "\"field\" = 8b"}],
|
||||
timestamp => "${timestamp8}"
|
||||
}
|
||||
},
|
||||
{"m\\9,tag=tag9,tag_a=\"tag9a\",tag_b=tag9b field\\=field=\"field9\",field_a=field9a,field_b=\"\" ${timestamp9}",
|
||||
#{
|
||||
measurement => "m\\9",
|
||||
tags => [{"tag", "tag9"}, {"tag_a", "\"tag9a\""}, {"tag_b", "tag9b"}],
|
||||
fields => [{"field=field", "field9"}, {"field_a", "field9a"}, {"field_b", ""}],
|
||||
timestamp => "${timestamp9}"
|
||||
}},
|
||||
{"m\\,10 \"field\\\\\"=\"\" ${timestamp10}", #{
|
||||
measurement => "m,10",
|
||||
tags => [],
|
||||
%% backslash should not be un-escaped in tag key
|
||||
fields => [{"\"field\\\\\"", ""}],
|
||||
timestamp => "${timestamp10}"
|
||||
}}
|
||||
]).
|
||||
|
||||
-define(VALID_LINE_PARSED_ESCAPED_CHARS_EXTRA_SPACES_PAIRS, [
|
||||
{" \n m\\ =1\\,,\\,tag\\ \\==\\=tag\\ 1\\, \\,fie\\ ld\\ =\\ field\\,1 ${timestamp1} ", #{
|
||||
measurement => "m =1,",
|
||||
tags => [{",tag =", "=tag 1,"}],
|
||||
fields => [{",fie ld ", " field,1"}],
|
||||
timestamp => "${timestamp1}"
|
||||
}},
|
||||
{" m2,tag=tag2 field=\"field \\\"2\\\",\n\" ", #{
|
||||
measurement => "m2",
|
||||
tags => [{"tag", "tag2"}],
|
||||
fields => [{"field", "field \"2\",\n"}],
|
||||
timestamp => undefined
|
||||
}},
|
||||
{" m\\ 3 field=\"field3\" ${payload.timestamp\\ 3} ", #{
|
||||
measurement => "m 3",
|
||||
tags => [],
|
||||
fields => [{"field", "field3"}],
|
||||
timestamp => "${payload.timestamp 3}"
|
||||
}},
|
||||
{" m4 field=\"\\\"field\\\\4\\\"\" ", #{
|
||||
measurement => "m4",
|
||||
tags => [],
|
||||
fields => [{"field", "\"field\\4\""}],
|
||||
timestamp => undefined
|
||||
}},
|
||||
{
|
||||
" m5\\,mA,tag=\\=tag5\\=,\\,tag_a\\,=tag\\ 5a,tag_b=tag5b \\ field\\ =field5,"
|
||||
"field\\ _\\ a=field5a,\\,field_b\\ =\\=\\,\\ field5b ${timestamp5} ",
|
||||
#{
|
||||
measurement => "m5,mA",
|
||||
tags => [{"tag", "=tag5="}, {",tag_a,", "tag 5a"}, {"tag_b", "tag5b"}],
|
||||
fields => [
|
||||
{" field ", "field5"}, {"field _ a", "field5a"}, {",field_b ", "=, field5b"}
|
||||
],
|
||||
timestamp => "${timestamp5}"
|
||||
}
|
||||
},
|
||||
{" m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=\"field6\",field_a=\"field6a\",field_b=\"field6b\" ",
|
||||
#{
|
||||
measurement => "m6",
|
||||
tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}],
|
||||
fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}],
|
||||
timestamp => undefined
|
||||
}}
|
||||
]).
|
||||
|
||||
invalid_write_syntax_line_test_() ->
|
||||
[?_assertThrow(_, to_influx_lines(L)) || L <- ?INVALID_LINES].
|
||||
|
||||
invalid_write_syntax_multiline_test_() ->
|
||||
LinesList = [
|
||||
join("\n", ?INVALID_LINES),
|
||||
join("\n\n\n", ?INVALID_LINES),
|
||||
join("\n\n", lists:reverse(?INVALID_LINES))
|
||||
],
|
||||
[?_assertThrow(_, to_influx_lines(Lines)) || Lines <- LinesList].
|
||||
|
||||
valid_write_syntax_test_() ->
|
||||
test_pairs(?VALID_LINE_PARSED_PAIRS).
|
||||
|
||||
valid_write_syntax_with_extra_spaces_test_() ->
|
||||
test_pairs(?VALID_LINE_EXTRA_SPACES_PARSED_PAIRS).
|
||||
|
||||
valid_write_syntax_escaped_chars_test_() ->
|
||||
test_pairs(?VALID_LINE_PARSED_ESCAPED_CHARS_PAIRS).
|
||||
|
||||
valid_write_syntax_escaped_chars_with_extra_spaces_test_() ->
|
||||
test_pairs(?VALID_LINE_PARSED_ESCAPED_CHARS_EXTRA_SPACES_PAIRS).
|
||||
|
||||
test_pairs(PairsList) ->
|
||||
{Lines, AllExpected} = lists:unzip(PairsList),
|
||||
JoinedLines = join("\n", Lines),
|
||||
JoinedLines1 = join("\n\n\n", Lines),
|
||||
JoinedLines2 = join("\n\n", lists:reverse(Lines)),
|
||||
SingleLineTests =
|
||||
[
|
||||
?_assertEqual([Expected], to_influx_lines(Line))
|
||||
|| {Line, Expected} <- PairsList
|
||||
],
|
||||
JoinedLinesTests =
|
||||
[
|
||||
?_assertEqual(AllExpected, to_influx_lines(JoinedLines)),
|
||||
?_assertEqual(AllExpected, to_influx_lines(JoinedLines1)),
|
||||
?_assertEqual(lists:reverse(AllExpected), to_influx_lines(JoinedLines2))
|
||||
],
|
||||
SingleLineTests ++ JoinedLinesTests.
|
||||
|
||||
join(Sep, LinesList) ->
|
||||
lists:flatten(lists:join(Sep, LinesList)).
|
||||
|
||||
to_influx_lines(RawLines) ->
|
||||
OldLevel = emqx_logger:get_primary_log_level(),
|
||||
try
|
||||
%% mute error logs from this call
|
||||
emqx_logger:set_primary_log_level(none),
|
||||
emqx_bridge_greptimedb:to_influx_lines(RawLines)
|
||||
after
|
||||
emqx_logger:set_primary_log_level(OldLevel)
|
||||
end.
|
|
@ -215,6 +215,7 @@ bridge_async_config(#{port := Port} = Config) ->
|
|||
ConnectTimeout = maps:get(connect_timeout, Config, "1s"),
|
||||
RequestTimeout = maps:get(request_timeout, Config, "10s"),
|
||||
ResumeInterval = maps:get(resume_interval, Config, "1s"),
|
||||
HealthCheckInterval = maps:get(health_check_interval, Config, "200ms"),
|
||||
ResourceRequestTTL = maps:get(resource_request_ttl, Config, "infinity"),
|
||||
LocalTopic =
|
||||
case maps:find(local_topic, Config) of
|
||||
|
@ -239,7 +240,7 @@ bridge_async_config(#{port := Port} = Config) ->
|
|||
" body = \"${id}\"\n"
|
||||
" resource_opts {\n"
|
||||
" inflight_window = 100\n"
|
||||
" health_check_interval = \"200ms\"\n"
|
||||
" health_check_interval = \"~s\"\n"
|
||||
" max_buffer_bytes = \"1GB\"\n"
|
||||
" query_mode = \"~s\"\n"
|
||||
" request_ttl = \"~p\"\n"
|
||||
|
@ -262,6 +263,7 @@ bridge_async_config(#{port := Port} = Config) ->
|
|||
LocalTopic,
|
||||
PoolSize,
|
||||
RequestTimeout,
|
||||
HealthCheckInterval,
|
||||
QueryMode,
|
||||
ResourceRequestTTL,
|
||||
ResumeInterval
|
||||
|
@ -358,19 +360,27 @@ t_send_async_connection_timeout(Config) ->
|
|||
port => Port,
|
||||
pool_size => 1,
|
||||
query_mode => "async",
|
||||
connect_timeout => integer_to_list(ResponseDelayMS * 2) ++ "s",
|
||||
connect_timeout => integer_to_list(ResponseDelayMS * 2) ++ "ms",
|
||||
request_timeout => "10s",
|
||||
resume_interval => "200ms",
|
||||
health_check_interval => "200ms",
|
||||
resource_request_ttl => "infinity"
|
||||
}),
|
||||
ResourceId = emqx_bridge_resource:resource_id(BridgeID),
|
||||
?retry(
|
||||
_Interval0 = 200,
|
||||
_NAttempts0 = 20,
|
||||
?assertMatch({ok, connected}, emqx_resource_manager:health_check(ResourceId))
|
||||
),
|
||||
NumberOfMessagesToSend = 10,
|
||||
[
|
||||
emqx_bridge:send_message(BridgeID, #{<<"id">> => Id})
|
||||
|| Id <- lists:seq(1, NumberOfMessagesToSend)
|
||||
],
|
||||
%% Make sure server recive all messages
|
||||
%% Make sure server receives all messages
|
||||
ct:pal("Sent messages\n"),
|
||||
MessageIDs = maps:from_keys(lists:seq(1, NumberOfMessagesToSend), void),
|
||||
receive_request_notifications(MessageIDs, ResponseDelayMS),
|
||||
receive_request_notifications(MessageIDs, ResponseDelayMS, []),
|
||||
ok.
|
||||
|
||||
t_async_free_retries(Config) ->
|
||||
|
@ -632,15 +642,16 @@ do_t_async_retries(TestContext, Error, Fn) ->
|
|||
),
|
||||
ok.
|
||||
|
||||
receive_request_notifications(MessageIDs, _ResponseDelay) when map_size(MessageIDs) =:= 0 ->
|
||||
receive_request_notifications(MessageIDs, _ResponseDelay, _Acc) when map_size(MessageIDs) =:= 0 ->
|
||||
ok;
|
||||
receive_request_notifications(MessageIDs, ResponseDelay) ->
|
||||
receive_request_notifications(MessageIDs, ResponseDelay, Acc) ->
|
||||
receive
|
||||
{http_server, received, Req} ->
|
||||
RemainingMessageIDs = remove_message_id(MessageIDs, Req),
|
||||
receive_request_notifications(RemainingMessageIDs, ResponseDelay)
|
||||
receive_request_notifications(RemainingMessageIDs, ResponseDelay, [Req | Acc])
|
||||
after (30 * 1000) ->
|
||||
ct:pal("Waited to long time but did not get any message\n"),
|
||||
ct:pal("Waited a long time but did not get any message"),
|
||||
ct:pal("Messages received so far:\n ~p", [Acc]),
|
||||
ct:fail("All requests did not reach server at least once")
|
||||
end.
|
||||
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
Business Source License 1.1
|
||||
|
||||
Licensor: Hangzhou EMQ Technologies Co., Ltd.
|
||||
Licensed Work: EMQX Enterprise Edition
|
||||
The Licensed Work is (c) 2023
|
||||
Hangzhou EMQ Technologies Co., Ltd.
|
||||
Additional Use Grant: Students and educators are granted right to copy,
|
||||
modify, and create derivative work for research
|
||||
or education.
|
||||
Change Date: 2027-02-01
|
||||
Change License: Apache License, Version 2.0
|
||||
|
||||
For information about alternative licensing arrangements for the Software,
|
||||
please contact Licensor: https://www.emqx.com/en/contact
|
||||
|
||||
Notice
|
||||
|
||||
The Business Source License (this document, or the “License”) is not an Open
|
||||
Source license. However, the Licensed Work will eventually be made available
|
||||
under an Open Source License, as stated in this License.
|
||||
|
||||
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
|
||||
“Business Source License” is a trademark of MariaDB Corporation Ab.
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
Business Source License 1.1
|
||||
|
||||
Terms
|
||||
|
||||
The Licensor hereby grants you the right to copy, modify, create derivative
|
||||
works, redistribute, and make non-production use of the Licensed Work. The
|
||||
Licensor may make an Additional Use Grant, above, permitting limited
|
||||
production use.
|
||||
|
||||
Effective on the Change Date, or the fourth anniversary of the first publicly
|
||||
available distribution of a specific version of the Licensed Work under this
|
||||
License, whichever comes first, the Licensor hereby grants you rights under
|
||||
the terms of the Change License, and the rights granted in the paragraph
|
||||
above terminate.
|
||||
|
||||
If your use of the Licensed Work does not comply with the requirements
|
||||
currently in effect as described in this License, you must purchase a
|
||||
commercial license from the Licensor, its affiliated entities, or authorized
|
||||
resellers, or you must refrain from using the Licensed Work.
|
||||
|
||||
All copies of the original and modified Licensed Work, and derivative works
|
||||
of the Licensed Work, are subject to this License. This License applies
|
||||
separately for each version of the Licensed Work and the Change Date may vary
|
||||
for each version of the Licensed Work released by Licensor.
|
||||
|
||||
You must conspicuously display this License on each original or modified copy
|
||||
of the Licensed Work. If you receive the Licensed Work in original or
|
||||
modified form from a third party, the terms and conditions set forth in this
|
||||
License apply to your use of that work.
|
||||
|
||||
Any use of the Licensed Work in violation of this License will automatically
|
||||
terminate your rights under this License for the current and all other
|
||||
versions of the Licensed Work.
|
||||
|
||||
This License does not grant you any right in any trademark or logo of
|
||||
Licensor or its affiliates (provided that you may use a trademark or logo of
|
||||
Licensor as expressly required by this License).
|
||||
|
||||
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
|
||||
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
|
||||
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
|
||||
TITLE.
|
||||
|
||||
MariaDB hereby grants you permission to use this License’s text to license
|
||||
your works, and to refer to it using the trademark “Business Source License”,
|
||||
as long as you comply with the Covenants of Licensor below.
|
||||
|
||||
Covenants of Licensor
|
||||
|
||||
In consideration of the right to use this License’s text and the “Business
|
||||
Source License” name and trademark, Licensor covenants to MariaDB, and to all
|
||||
other recipients of the licensed work to be provided by Licensor:
|
||||
|
||||
1. To specify as the Change License the GPL Version 2.0 or any later version,
|
||||
or a license that is compatible with GPL Version 2.0 or a later version,
|
||||
where “compatible” means that software provided under the Change License can
|
||||
be included in a program with software provided under GPL Version 2.0 or a
|
||||
later version. Licensor may specify additional Change Licenses without
|
||||
limitation.
|
||||
|
||||
2. To either: (a) specify an additional grant of rights to use that does not
|
||||
impose any additional restriction on the right granted in this License, as
|
||||
the Additional Use Grant; or (b) insert the text “None”.
|
||||
|
||||
3. To specify a Change Date.
|
||||
|
||||
4. Not to modify this License in any other way.
|
|
@ -0,0 +1,22 @@
|
|||
# Amazon Kinesis Data Integration Bridge
|
||||
|
||||
This application houses the Amazon Kinesis Producer data
|
||||
integration bridge for EMQX Enterprise Edition. It provides the means to
|
||||
connect to Amazon Kinesis Data Streams and publish messages to it.
|
||||
|
||||
# Documentation links
|
||||
|
||||
For more information about Amazon Kinesis Data Streams, please see its
|
||||
[official site](https://aws.amazon.com/kinesis/data-streams/).
|
||||
|
||||
# Configurations
|
||||
|
||||
Please see [Ingest Data into Kinesis](https://docs.emqx.com/en/enterprise/v5.1/data-integration/data-bridge-kinesis.html) for more detailed info.
|
||||
|
||||
# Contributing
|
||||
|
||||
Please see our [contributing.md](../../CONTRIBUTING.md).
|
||||
|
||||
# License
|
||||
|
||||
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).
|
|
@ -0,0 +1,2 @@
|
|||
toxiproxy
|
||||
kinesis
|
|
@ -0,0 +1,11 @@
|
|||
%% -*- mode: erlang; -*-
|
||||
{erl_opts, [debug_info]}.
|
||||
{deps, [ {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-2"}}}
|
||||
, {emqx_connector, {path, "../../apps/emqx_connector"}}
|
||||
, {emqx_resource, {path, "../../apps/emqx_resource"}}
|
||||
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
|
||||
]}.
|
||||
|
||||
{shell, [
|
||||
{apps, [emqx_bridge_kinesis]}
|
||||
]}.
|
|
@ -0,0 +1,13 @@
|
|||
{application, emqx_bridge_kinesis, [
|
||||
{description, "EMQX Enterprise Amazon Kinesis Bridge"},
|
||||
{vsn, "0.1.0"},
|
||||
{registered, []},
|
||||
{applications, [
|
||||
kernel,
|
||||
stdlib,
|
||||
erlcloud
|
||||
]},
|
||||
{env, []},
|
||||
{modules, []},
|
||||
{links, []}
|
||||
]}.
|
|
@ -0,0 +1,167 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_kinesis).
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
|
||||
%% hocon_schema API
|
||||
-export([
|
||||
namespace/0,
|
||||
roots/0,
|
||||
fields/1,
|
||||
desc/1
|
||||
]).
|
||||
|
||||
-export([
|
||||
conn_bridge_examples/1
|
||||
]).
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% `hocon_schema' API
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
|
||||
namespace() ->
|
||||
"bridge_kinesis".
|
||||
|
||||
roots() ->
|
||||
[].
|
||||
|
||||
fields("config_producer") ->
|
||||
emqx_bridge_schema:common_bridge_fields() ++
|
||||
emqx_resource_schema:fields("resource_opts") ++
|
||||
fields(connector_config) ++ fields(producer);
|
||||
fields(connector_config) ->
|
||||
[
|
||||
{aws_access_key_id,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("aws_access_key_id")
|
||||
}
|
||||
)},
|
||||
{aws_secret_access_key,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("aws_secret_access_key"),
|
||||
sensitive => true
|
||||
}
|
||||
)},
|
||||
{endpoint,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
default => <<"https://kinesis.us-east-1.amazonaws.com">>,
|
||||
desc => ?DESC("endpoint")
|
||||
}
|
||||
)},
|
||||
{max_retries,
|
||||
mk(
|
||||
non_neg_integer(),
|
||||
#{
|
||||
required => false,
|
||||
default => 2,
|
||||
desc => ?DESC("max_retries")
|
||||
}
|
||||
)},
|
||||
{pool_size,
|
||||
sc(
|
||||
pos_integer(),
|
||||
#{
|
||||
default => 8,
|
||||
desc => ?DESC("pool_size")
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields(producer) ->
|
||||
[
|
||||
{payload_template,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
default => <<>>,
|
||||
desc => ?DESC("payload_template")
|
||||
}
|
||||
)},
|
||||
{local_topic,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
desc => ?DESC("local_topic")
|
||||
}
|
||||
)},
|
||||
{stream_name,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("stream_name")
|
||||
}
|
||||
)},
|
||||
{partition_key,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("partition_key")
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields("get_producer") ->
|
||||
emqx_bridge_schema:status_fields() ++ fields("post_producer");
|
||||
fields("post_producer") ->
|
||||
[type_field_producer(), name_field() | fields("config_producer")];
|
||||
fields("put_producer") ->
|
||||
fields("config_producer").
|
||||
|
||||
desc("config_producer") ->
|
||||
?DESC("desc_config");
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
||||
conn_bridge_examples(Method) ->
|
||||
[
|
||||
#{
|
||||
<<"kinesis_producer">> => #{
|
||||
summary => <<"Amazon Kinesis Producer Bridge">>,
|
||||
value => values(producer, Method)
|
||||
}
|
||||
}
|
||||
].
|
||||
|
||||
values(producer, _Method) ->
|
||||
#{
|
||||
aws_access_key_id => <<"aws_access_key_id">>,
|
||||
aws_secret_access_key => <<"******">>,
|
||||
endpoint => <<"https://kinesis.us-east-1.amazonaws.com">>,
|
||||
max_retries => 3,
|
||||
stream_name => <<"stream_name">>,
|
||||
partition_key => <<"key">>,
|
||||
resource_opts => #{
|
||||
worker_pool_size => 1,
|
||||
health_check_interval => 15000,
|
||||
query_mode => async,
|
||||
inflight_window => 100,
|
||||
max_buffer_bytes => 100 * 1024 * 1024
|
||||
}
|
||||
}.
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% Helper fns
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
|
||||
sc(Type, Meta) -> hoconsc:mk(Type, Meta).
|
||||
|
||||
mk(Type, Meta) -> hoconsc:mk(Type, Meta).
|
||||
|
||||
enum(OfSymbols) -> hoconsc:enum(OfSymbols).
|
||||
|
||||
type_field_producer() ->
|
||||
{type, mk(enum([kinesis_producer]), #{required => true, desc => ?DESC("desc_type")})}.
|
||||
|
||||
name_field() ->
|
||||
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.
|
|
@ -0,0 +1,178 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_kinesis_connector_client).
|
||||
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
-include_lib("emqx_resource/include/emqx_resource.hrl").
|
||||
-include_lib("erlcloud/include/erlcloud_aws.hrl").
|
||||
|
||||
-behaviour(gen_server).
|
||||
|
||||
-type state() :: #{
|
||||
instance_id := resource_id(),
|
||||
partition_key := binary(),
|
||||
stream_name := binary()
|
||||
}.
|
||||
-type record() :: {Data :: binary(), PartitionKey :: binary()}.
|
||||
|
||||
-define(DEFAULT_PORT, 443).
|
||||
|
||||
%% API
|
||||
-export([
|
||||
start_link/1,
|
||||
connection_status/1,
|
||||
query/2
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
-export([execute/2]).
|
||||
-endif.
|
||||
|
||||
%% The default timeout for Kinesis API calls is 10 seconds,
|
||||
%% but this value for `gen_server:call` is 5s,
|
||||
%% so we should adjust timeout for `gen_server:call`
|
||||
-define(HEALTH_CHECK_TIMEOUT, 15000).
|
||||
|
||||
%%%===================================================================
|
||||
%%% API
|
||||
%%%===================================================================
|
||||
connection_status(Pid) ->
|
||||
try
|
||||
gen_server:call(Pid, connection_status, ?HEALTH_CHECK_TIMEOUT)
|
||||
catch
|
||||
_:_ ->
|
||||
{error, timeout}
|
||||
end.
|
||||
|
||||
query(Pid, Records) ->
|
||||
gen_server:call(Pid, {query, Records}, infinity).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @doc
|
||||
%% Starts Bridge which communicates to Amazon Kinesis Data Streams
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
start_link(Options) ->
|
||||
gen_server:start_link(?MODULE, Options, []).
|
||||
|
||||
%%%===================================================================
|
||||
%%% gen_server callbacks
|
||||
%%%===================================================================
|
||||
|
||||
%% Initialize kinesis connector
|
||||
-spec init(emqx_bridge_kinesis_impl_producer:config()) -> {ok, state()}.
|
||||
init(#{
|
||||
aws_access_key_id := AwsAccessKey,
|
||||
aws_secret_access_key := AwsSecretAccessKey,
|
||||
endpoint := Endpoint,
|
||||
partition_key := PartitionKey,
|
||||
stream_name := StreamName,
|
||||
max_retries := MaxRetries,
|
||||
instance_id := InstanceId
|
||||
}) ->
|
||||
process_flag(trap_exit, true),
|
||||
|
||||
#{scheme := Scheme, hostname := Host, port := Port} =
|
||||
emqx_schema:parse_server(
|
||||
Endpoint,
|
||||
#{
|
||||
default_port => ?DEFAULT_PORT,
|
||||
supported_schemes => ["http", "https"]
|
||||
}
|
||||
),
|
||||
State = #{
|
||||
instance_id => InstanceId,
|
||||
partition_key => PartitionKey,
|
||||
stream_name => StreamName
|
||||
},
|
||||
New =
|
||||
fun(AccessKeyID, SecretAccessKey, HostAddr, HostPort, ConnectionScheme) ->
|
||||
Config0 = erlcloud_kinesis:new(
|
||||
AccessKeyID,
|
||||
SecretAccessKey,
|
||||
HostAddr,
|
||||
HostPort,
|
||||
ConnectionScheme ++ "://"
|
||||
),
|
||||
Config0#aws_config{retry_num = MaxRetries}
|
||||
end,
|
||||
erlcloud_config:configure(
|
||||
to_str(AwsAccessKey), to_str(AwsSecretAccessKey), Host, Port, Scheme, New
|
||||
),
|
||||
{ok, State}.
|
||||
|
||||
handle_call(connection_status, _From, #{stream_name := StreamName} = State) ->
|
||||
Status =
|
||||
case erlcloud_kinesis:describe_stream(StreamName) of
|
||||
{ok, _} ->
|
||||
{ok, connected};
|
||||
{error, {<<"ResourceNotFoundException">>, _}} ->
|
||||
{error, unhealthy_target};
|
||||
Error ->
|
||||
{error, Error}
|
||||
end,
|
||||
{reply, Status, State};
|
||||
handle_call({query, Records}, _From, #{stream_name := StreamName} = State) ->
|
||||
Result = do_query(StreamName, Records),
|
||||
{reply, Result, State};
|
||||
handle_call(_Request, _From, State) ->
|
||||
{reply, {error, unknown_call}, State}.
|
||||
|
||||
handle_cast(_Request, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
handle_info(_Info, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
terminate(Reason, #{instance_id := InstanceId} = _State) ->
|
||||
?tp(kinesis_stop, #{instance_id => InstanceId, reason => Reason}),
|
||||
ok.
|
||||
|
||||
code_change(_OldVsn, State, _Extra) ->
|
||||
{ok, State}.
|
||||
|
||||
%%%===================================================================
|
||||
%%% Internal functions
|
||||
%%%===================================================================
|
||||
|
||||
-spec do_query(binary(), [record()]) ->
|
||||
{ok, jsx:json_term() | binary()}
|
||||
| {error, {unrecoverable_error, term()}}
|
||||
| {error, term()}.
|
||||
do_query(StreamName, Records) ->
|
||||
try
|
||||
execute(put_record, {StreamName, Records})
|
||||
catch
|
||||
_Type:Reason ->
|
||||
{error, {unrecoverable_error, {invalid_request, Reason}}}
|
||||
end.
|
||||
|
||||
-spec execute(put_record, {binary(), [record()]}) ->
|
||||
{ok, jsx:json_term() | binary()}
|
||||
| {error, term()}.
|
||||
execute(put_record, {StreamName, [{Data, PartitionKey}] = Record}) ->
|
||||
Result = erlcloud_kinesis:put_record(StreamName, PartitionKey, Data),
|
||||
?tp(kinesis_put_record, #{records => Record, result => Result}),
|
||||
Result;
|
||||
execute(put_record, {StreamName, Items}) when is_list(Items) ->
|
||||
Result = erlcloud_kinesis:put_records(StreamName, Items),
|
||||
?tp(kinesis_put_record, #{records => Items, result => Result}),
|
||||
Result.
|
||||
|
||||
-spec to_str(list() | binary()) -> list().
|
||||
to_str(List) when is_list(List) ->
|
||||
List;
|
||||
to_str(Bin) when is_binary(Bin) ->
|
||||
erlang:binary_to_list(Bin).
|
|
@ -0,0 +1,247 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_kinesis_impl_producer).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx_resource/include/emqx_resource.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
-define(HEALTH_CHECK_TIMEOUT, 15000).
|
||||
-define(TOPIC_MESSAGE,
|
||||
"Kinesis stream is invalid. Please check if the stream exist in Kinesis account."
|
||||
).
|
||||
|
||||
-type config() :: #{
|
||||
aws_access_key_id := binary(),
|
||||
aws_secret_access_key := binary(),
|
||||
endpoint := binary(),
|
||||
stream_name := binary(),
|
||||
partition_key := binary(),
|
||||
payload_template := binary(),
|
||||
max_retries := non_neg_integer(),
|
||||
pool_size := non_neg_integer(),
|
||||
instance_id => resource_id(),
|
||||
any() => term()
|
||||
}.
|
||||
-type templates() :: #{
|
||||
partition_key := list(),
|
||||
send_message := list()
|
||||
}.
|
||||
-type state() :: #{
|
||||
pool_name := resource_id(),
|
||||
templates := templates()
|
||||
}.
|
||||
-export_type([config/0]).
|
||||
|
||||
%% `emqx_resource' API
|
||||
-export([
|
||||
callback_mode/0,
|
||||
on_start/2,
|
||||
on_stop/2,
|
||||
on_query/3,
|
||||
on_batch_query/3,
|
||||
on_get_status/2
|
||||
]).
|
||||
|
||||
-export([
|
||||
connect/1
|
||||
]).
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% `emqx_resource' API
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
|
||||
callback_mode() -> always_sync.
|
||||
|
||||
-spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}.
|
||||
on_start(
|
||||
InstanceId,
|
||||
#{
|
||||
pool_size := PoolSize
|
||||
} = Config0
|
||||
) ->
|
||||
?SLOG(info, #{
|
||||
msg => "starting_kinesis_bridge",
|
||||
connector => InstanceId,
|
||||
config => redact(Config0)
|
||||
}),
|
||||
Config = Config0#{instance_id => InstanceId},
|
||||
Options = [
|
||||
{config, Config},
|
||||
{pool_size, PoolSize}
|
||||
],
|
||||
Templates = parse_template(Config),
|
||||
State = #{
|
||||
pool_name => InstanceId,
|
||||
templates => Templates
|
||||
},
|
||||
|
||||
case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of
|
||||
ok ->
|
||||
?tp(emqx_bridge_kinesis_impl_producer_start_ok, #{config => Config}),
|
||||
{ok, State};
|
||||
Error ->
|
||||
?tp(emqx_bridge_kinesis_impl_producer_start_failed, #{config => Config}),
|
||||
Error
|
||||
end.
|
||||
|
||||
-spec on_stop(resource_id(), state()) -> ok | {error, term()}.
|
||||
on_stop(InstanceId, _State) ->
|
||||
emqx_resource_pool:stop(InstanceId).
|
||||
|
||||
-spec on_get_status(resource_id(), state()) ->
|
||||
connected | disconnected | {disconnected, state(), {unhealthy_target, string()}}.
|
||||
on_get_status(_InstanceId, #{pool_name := Pool} = State) ->
|
||||
case
|
||||
emqx_resource_pool:health_check_workers(
|
||||
Pool,
|
||||
{emqx_bridge_kinesis_connector_client, connection_status, []},
|
||||
?HEALTH_CHECK_TIMEOUT,
|
||||
#{return_values => true}
|
||||
)
|
||||
of
|
||||
{ok, Values} ->
|
||||
AllOk = lists:all(fun(S) -> S =:= {ok, connected} end, Values),
|
||||
case AllOk of
|
||||
true ->
|
||||
connected;
|
||||
false ->
|
||||
Unhealthy = lists:any(fun(S) -> S =:= {error, unhealthy_target} end, Values),
|
||||
case Unhealthy of
|
||||
true -> {disconnected, State, {unhealthy_target, ?TOPIC_MESSAGE}};
|
||||
false -> disconnected
|
||||
end
|
||||
end;
|
||||
{error, _} ->
|
||||
disconnected
|
||||
end.
|
||||
|
||||
-spec on_query(
|
||||
resource_id(),
|
||||
{send_message, map()},
|
||||
state()
|
||||
) ->
|
||||
{ok, map()}
|
||||
| {error, {recoverable_error, term()}}
|
||||
| {error, term()}.
|
||||
on_query(ResourceId, {send_message, Message}, State) ->
|
||||
Requests = [{send_message, Message}],
|
||||
?tp(emqx_bridge_kinesis_impl_producer_sync_query, #{message => Message}),
|
||||
do_send_requests_sync(ResourceId, Requests, State).
|
||||
|
||||
-spec on_batch_query(
|
||||
resource_id(),
|
||||
[{send_message, map()}],
|
||||
state()
|
||||
) ->
|
||||
{ok, map()}
|
||||
| {error, {recoverable_error, term()}}
|
||||
| {error, term()}.
|
||||
%% we only support batch insert
|
||||
on_batch_query(ResourceId, [{send_message, _} | _] = Requests, State) ->
|
||||
?tp(emqx_bridge_kinesis_impl_producer_sync_batch_query, #{requests => Requests}),
|
||||
do_send_requests_sync(ResourceId, Requests, State).
|
||||
|
||||
connect(Opts) ->
|
||||
Options = proplists:get_value(config, Opts),
|
||||
emqx_bridge_kinesis_connector_client:start_link(Options).
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% Helper fns
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
|
||||
-spec do_send_requests_sync(
|
||||
resource_id(),
|
||||
[{send_message, map()}],
|
||||
state()
|
||||
) ->
|
||||
{ok, jsx:json_term() | binary()}
|
||||
| {error, {recoverable_error, term()}}
|
||||
| {error, {unrecoverable_error, {invalid_request, term()}}}
|
||||
| {error, {unrecoverable_error, {unhealthy_target, string()}}}
|
||||
| {error, {unrecoverable_error, term()}}
|
||||
| {error, term()}.
|
||||
do_send_requests_sync(
|
||||
InstanceId,
|
||||
Requests,
|
||||
#{pool_name := PoolName, templates := Templates}
|
||||
) ->
|
||||
Records = render_records(Requests, Templates),
|
||||
Result = ecpool:pick_and_do(
|
||||
PoolName,
|
||||
{emqx_bridge_kinesis_connector_client, query, [Records]},
|
||||
no_handover
|
||||
),
|
||||
handle_result(Result, Requests, InstanceId).
|
||||
|
||||
handle_result({ok, _} = Result, _Requests, _InstanceId) ->
|
||||
Result;
|
||||
handle_result({error, {<<"ResourceNotFoundException">>, _} = Reason}, Requests, InstanceId) ->
|
||||
?SLOG(error, #{
|
||||
msg => "kinesis_error_response",
|
||||
request => Requests,
|
||||
connector => InstanceId,
|
||||
reason => Reason
|
||||
}),
|
||||
{error, {unrecoverable_error, {unhealthy_target, ?TOPIC_MESSAGE}}};
|
||||
handle_result(
|
||||
{error, {<<"ProvisionedThroughputExceededException">>, _} = Reason}, Requests, InstanceId
|
||||
) ->
|
||||
?SLOG(error, #{
|
||||
msg => "kinesis_error_response",
|
||||
request => Requests,
|
||||
connector => InstanceId,
|
||||
reason => Reason
|
||||
}),
|
||||
{error, {recoverable_error, Reason}};
|
||||
handle_result({error, {<<"InvalidArgumentException">>, _} = Reason}, Requests, InstanceId) ->
|
||||
?SLOG(error, #{
|
||||
msg => "kinesis_error_response",
|
||||
request => Requests,
|
||||
connector => InstanceId,
|
||||
reason => Reason
|
||||
}),
|
||||
{error, {unrecoverable_error, Reason}};
|
||||
handle_result({error, {econnrefused = Reason, _}}, Requests, InstanceId) ->
|
||||
?SLOG(error, #{
|
||||
msg => "kinesis_error_response",
|
||||
request => Requests,
|
||||
connector => InstanceId,
|
||||
reason => Reason
|
||||
}),
|
||||
{error, {recoverable_error, Reason}};
|
||||
handle_result({error, Reason} = Error, Requests, InstanceId) ->
|
||||
?SLOG(error, #{
|
||||
msg => "kinesis_error_response",
|
||||
request => Requests,
|
||||
connector => InstanceId,
|
||||
reason => Reason
|
||||
}),
|
||||
Error.
|
||||
|
||||
parse_template(Config) ->
|
||||
#{payload_template := PayloadTemplate, partition_key := PartitionKeyTemplate} = Config,
|
||||
Templates = #{send_message => PayloadTemplate, partition_key => PartitionKeyTemplate},
|
||||
maps:map(fun(_K, V) -> emqx_placeholder:preproc_tmpl(V) end, Templates).
|
||||
|
||||
render_records(Items, Templates) ->
|
||||
PartitionKeyTemplate = maps:get(partition_key, Templates),
|
||||
MsgTemplate = maps:get(send_message, Templates),
|
||||
render_messages(Items, {MsgTemplate, PartitionKeyTemplate}, []).
|
||||
|
||||
render_messages([], _Templates, RenderedMsgs) ->
|
||||
RenderedMsgs;
|
||||
render_messages(
|
||||
[{send_message, Msg} | Others],
|
||||
{MsgTemplate, PartitionKeyTemplate} = Templates,
|
||||
RenderedMsgs
|
||||
) ->
|
||||
Data = emqx_placeholder:proc_tmpl(MsgTemplate, Msg),
|
||||
PartitionKey = emqx_placeholder:proc_tmpl(PartitionKeyTemplate, Msg),
|
||||
RenderedMsg = {Data, PartitionKey},
|
||||
render_messages(Others, Templates, [RenderedMsg | RenderedMsgs]).
|
||||
|
||||
redact(Config) ->
|
||||
emqx_utils:redact(Config, fun(Any) -> Any =:= aws_secret_access_key end).
|
|
@ -0,0 +1,817 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_kinesis_impl_producer_SUITE).
|
||||
|
||||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
-define(PRODUCER, emqx_bridge_kinesis_impl_producer).
|
||||
-define(BRIDGE_TYPE, kinesis_producer).
|
||||
-define(BRIDGE_TYPE_BIN, <<"kinesis_producer">>).
|
||||
-define(KINESIS_PORT, 4566).
|
||||
-define(TOPIC, <<"t/topic">>).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% CT boilerplate
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
all() ->
|
||||
[
|
||||
{group, with_batch},
|
||||
{group, without_batch}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
TCs = emqx_common_test_helpers:all(?MODULE),
|
||||
[
|
||||
{with_batch, TCs},
|
||||
{without_batch, TCs}
|
||||
].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy.emqx.net"),
|
||||
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
|
||||
ProxyName = "kinesis",
|
||||
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
|
||||
ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]),
|
||||
{ok, _} = application:ensure_all_started(emqx_connector),
|
||||
emqx_mgmt_api_test_util:init_suite(),
|
||||
[
|
||||
{proxy_host, ProxyHost},
|
||||
{proxy_port, ProxyPort},
|
||||
{kinesis_port, ?KINESIS_PORT},
|
||||
{proxy_name, ProxyName}
|
||||
| Config
|
||||
].
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
emqx_mgmt_api_test_util:end_suite(),
|
||||
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
|
||||
ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]),
|
||||
_ = application:stop(emqx_connector),
|
||||
ok.
|
||||
|
||||
init_per_group(with_batch, Config) ->
|
||||
[{batch_size, 100} | Config];
|
||||
init_per_group(without_batch, Config) ->
|
||||
[{batch_size, 1} | Config];
|
||||
init_per_group(_Group, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_group(_Group, _Config) ->
|
||||
ok.
|
||||
|
||||
init_per_testcase(TestCase, Config0) ->
|
||||
ok = snabbkaffe:start_trace(),
|
||||
ProxyHost = ?config(proxy_host, Config0),
|
||||
ProxyPort = ?config(proxy_port, Config0),
|
||||
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||
TimeTrap =
|
||||
case TestCase of
|
||||
t_wrong_server -> 60;
|
||||
_ -> 30
|
||||
end,
|
||||
ct:timetrap({seconds, TimeTrap}),
|
||||
delete_all_bridges(),
|
||||
Tid = install_telemetry_handler(TestCase),
|
||||
put(telemetry_table, Tid),
|
||||
Config = generate_config(Config0),
|
||||
create_stream(Config),
|
||||
[{telemetry_table, Tid} | Config].
|
||||
|
||||
end_per_testcase(_TestCase, Config) ->
|
||||
ok = snabbkaffe:stop(),
|
||||
delete_all_bridges(),
|
||||
delete_stream(Config),
|
||||
emqx_common_test_helpers:call_janitor(),
|
||||
ok.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Helper fns
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
generate_config(Config0) ->
|
||||
#{
|
||||
name := Name,
|
||||
config_string := ConfigString,
|
||||
kinesis_config := KinesisConfig
|
||||
} = kinesis_config(Config0),
|
||||
Endpoint = map_get(<<"endpoint">>, KinesisConfig),
|
||||
#{scheme := Scheme, hostname := Host, port := Port} =
|
||||
emqx_schema:parse_server(
|
||||
Endpoint,
|
||||
#{
|
||||
default_port => 443,
|
||||
supported_schemes => ["http", "https"]
|
||||
}
|
||||
),
|
||||
ErlcloudConfig = erlcloud_kinesis:new("access_key", "secret", Host, Port, Scheme ++ "://"),
|
||||
ResourceId = emqx_bridge_resource:resource_id(?BRIDGE_TYPE_BIN, Name),
|
||||
BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, Name),
|
||||
[
|
||||
{kinesis_name, Name},
|
||||
{connection_scheme, Scheme},
|
||||
{kinesis_config, KinesisConfig},
|
||||
{kinesis_config_string, ConfigString},
|
||||
{resource_id, ResourceId},
|
||||
{bridge_id, BridgeId},
|
||||
{erlcloud_config, ErlcloudConfig}
|
||||
| Config0
|
||||
].
|
||||
|
||||
kinesis_config(Config) ->
|
||||
QueryMode = proplists:get_value(query_mode, Config, async),
|
||||
Scheme = proplists:get_value(connection_scheme, Config, "http"),
|
||||
ProxyHost = proplists:get_value(proxy_host, Config),
|
||||
KinesisPort = proplists:get_value(kinesis_port, Config),
|
||||
BatchSize = proplists:get_value(batch_size, Config, 100),
|
||||
BatchTime = proplists:get_value(batch_time, Config, <<"500ms">>),
|
||||
PayloadTemplate = proplists:get_value(payload_template, Config, "${payload}"),
|
||||
StreamName = proplists:get_value(stream_name, Config, <<"mystream">>),
|
||||
PartitionKey = proplists:get_value(partition_key, Config, <<"key">>),
|
||||
MaxRetries = proplists:get_value(max_retries, Config, 3),
|
||||
GUID = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||
Name = <<(atom_to_binary(?MODULE))/binary, (GUID)/binary>>,
|
||||
ConfigString =
|
||||
io_lib:format(
|
||||
"bridges.kinesis_producer.~s {\n"
|
||||
" enable = true\n"
|
||||
" aws_access_key_id = \"aws_access_key_id\"\n"
|
||||
" aws_secret_access_key = \"aws_secret_access_key\"\n"
|
||||
" endpoint = \"~s://~s:~b\"\n"
|
||||
" stream_name = \"~s\"\n"
|
||||
" partition_key = \"~s\"\n"
|
||||
" payload_template = \"~s\"\n"
|
||||
" max_retries = ~b\n"
|
||||
" pool_size = 1\n"
|
||||
" resource_opts = {\n"
|
||||
" health_check_interval = \"3s\"\n"
|
||||
" request_ttl = 30s\n"
|
||||
" resume_interval = 1s\n"
|
||||
" metrics_flush_interval = \"700ms\"\n"
|
||||
" worker_pool_size = 1\n"
|
||||
" query_mode = ~s\n"
|
||||
" batch_size = ~b\n"
|
||||
" batch_time = \"~s\"\n"
|
||||
" }\n"
|
||||
"}\n",
|
||||
[
|
||||
Name,
|
||||
Scheme,
|
||||
ProxyHost,
|
||||
KinesisPort,
|
||||
StreamName,
|
||||
PartitionKey,
|
||||
PayloadTemplate,
|
||||
MaxRetries,
|
||||
QueryMode,
|
||||
BatchSize,
|
||||
BatchTime
|
||||
]
|
||||
),
|
||||
#{
|
||||
name => Name,
|
||||
config_string => ConfigString,
|
||||
kinesis_config => parse_and_check(ConfigString, Name)
|
||||
}.
|
||||
|
||||
parse_and_check(ConfigString, Name) ->
|
||||
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
|
||||
TypeBin = <<"kinesis_producer">>,
|
||||
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
|
||||
#{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf,
|
||||
Config.
|
||||
|
||||
delete_all_bridges() ->
|
||||
ct:pal("deleting all bridges"),
|
||||
lists:foreach(
|
||||
fun(#{name := Name, type := Type}) ->
|
||||
emqx_bridge:remove(Type, Name)
|
||||
end,
|
||||
emqx_bridge:list()
|
||||
).
|
||||
|
||||
delete_bridge(Config) ->
|
||||
Type = ?BRIDGE_TYPE,
|
||||
Name = ?config(kinesis_name, Config),
|
||||
ct:pal("deleting bridge ~p", [{Type, Name}]),
|
||||
emqx_bridge:remove(Type, Name).
|
||||
|
||||
create_bridge_http(Config) ->
|
||||
create_bridge_http(Config, _KinesisConfigOverrides = #{}).
|
||||
|
||||
create_bridge_http(Config, KinesisConfigOverrides) ->
|
||||
TypeBin = ?BRIDGE_TYPE_BIN,
|
||||
Name = ?config(kinesis_name, Config),
|
||||
KinesisConfig0 = ?config(kinesis_config, Config),
|
||||
KinesisConfig = emqx_utils_maps:deep_merge(KinesisConfig0, KinesisConfigOverrides),
|
||||
Params = KinesisConfig#{<<"type">> => TypeBin, <<"name">> => Name},
|
||||
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
|
||||
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
|
||||
ProbePath = emqx_mgmt_api_test_util:api_path(["bridges_probe"]),
|
||||
ProbeResult = emqx_mgmt_api_test_util:request_api(post, ProbePath, "", AuthHeader, Params),
|
||||
ct:pal("creating bridge (via http): ~p", [Params]),
|
||||
ct:pal("probe result: ~p", [ProbeResult]),
|
||||
Res =
|
||||
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
|
||||
{ok, Res0} -> {ok, emqx_utils_json:decode(Res0, [return_maps])};
|
||||
Error -> Error
|
||||
end,
|
||||
ct:pal("bridge creation result: ~p", [Res]),
|
||||
?assertEqual(element(1, ProbeResult), element(1, Res)),
|
||||
Res.
|
||||
|
||||
create_bridge(Config) ->
|
||||
create_bridge(Config, _KinesisConfigOverrides = #{}).
|
||||
|
||||
create_bridge(Config, KinesisConfigOverrides) ->
|
||||
TypeBin = ?BRIDGE_TYPE_BIN,
|
||||
Name = ?config(kinesis_name, Config),
|
||||
KinesisConfig0 = ?config(kinesis_config, Config),
|
||||
KinesisConfig = emqx_utils_maps:deep_merge(KinesisConfig0, KinesisConfigOverrides),
|
||||
ct:pal("creating bridge: ~p", [KinesisConfig]),
|
||||
Res = emqx_bridge:create(TypeBin, Name, KinesisConfig),
|
||||
ct:pal("bridge creation result: ~p", [Res]),
|
||||
Res.
|
||||
|
||||
create_rule_and_action_http(Config) ->
|
||||
BridgeId = ?config(bridge_id, Config),
|
||||
Params = #{
|
||||
enable => true,
|
||||
sql => <<"SELECT * FROM \"", ?TOPIC/binary, "\"">>,
|
||||
actions => [BridgeId]
|
||||
},
|
||||
Path = emqx_mgmt_api_test_util:api_path(["rules"]),
|
||||
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
|
||||
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
|
||||
{ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])};
|
||||
Error -> Error
|
||||
end.
|
||||
|
||||
create_stream(Config) ->
|
||||
KinesisConfig = ?config(kinesis_config, Config),
|
||||
ErlcloudConfig = ?config(erlcloud_config, Config),
|
||||
StreamName = map_get(<<"stream_name">>, KinesisConfig),
|
||||
{ok, _} = application:ensure_all_started(erlcloud),
|
||||
delete_stream(StreamName, ErlcloudConfig),
|
||||
{ok, _} = erlcloud_kinesis:create_stream(StreamName, 1, ErlcloudConfig),
|
||||
?retry(
|
||||
_Sleep = 100,
|
||||
_Attempts = 10,
|
||||
begin
|
||||
{ok, [{<<"StreamDescription">>, StreamInfo}]} =
|
||||
erlcloud_kinesis:describe_stream(StreamName, ErlcloudConfig),
|
||||
?assertEqual(
|
||||
<<"ACTIVE">>,
|
||||
proplists:get_value(<<"StreamStatus">>, StreamInfo)
|
||||
)
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
delete_stream(Config) ->
|
||||
KinesisConfig = ?config(kinesis_config, Config),
|
||||
ErlcloudConfig = ?config(erlcloud_config, Config),
|
||||
StreamName = map_get(<<"stream_name">>, KinesisConfig),
|
||||
{ok, _} = application:ensure_all_started(erlcloud),
|
||||
delete_stream(StreamName, ErlcloudConfig),
|
||||
ok.
|
||||
|
||||
delete_stream(StreamName, ErlcloudConfig) ->
|
||||
case erlcloud_kinesis:delete_stream(StreamName, ErlcloudConfig) of
|
||||
{ok, _} ->
|
||||
?retry(
|
||||
_Sleep = 100,
|
||||
_Attempts = 10,
|
||||
?assertMatch(
|
||||
{error, {<<"ResourceNotFoundException">>, _}},
|
||||
erlcloud_kinesis:describe_stream(StreamName, ErlcloudConfig)
|
||||
)
|
||||
);
|
||||
_ ->
|
||||
ok
|
||||
end,
|
||||
ok.
|
||||
|
||||
wait_record(Config, ShardIt, Timeout, Attempts) ->
|
||||
[Record] = wait_records(Config, ShardIt, 1, Timeout, Attempts),
|
||||
Record.
|
||||
|
||||
wait_records(Config, ShardIt, Count, Timeout, Attempts) ->
|
||||
ErlcloudConfig = ?config(erlcloud_config, Config),
|
||||
?retry(
|
||||
Timeout,
|
||||
Attempts,
|
||||
begin
|
||||
{ok, Ret} = erlcloud_kinesis:get_records(ShardIt, ErlcloudConfig),
|
||||
Records = proplists:get_value(<<"Records">>, Ret),
|
||||
Count = length(Records),
|
||||
Records
|
||||
end
|
||||
).
|
||||
|
||||
get_shard_iterator(Config) ->
|
||||
get_shard_iterator(Config, 1).
|
||||
|
||||
get_shard_iterator(Config, Index) ->
|
||||
KinesisConfig = ?config(kinesis_config, Config),
|
||||
ErlcloudConfig = ?config(erlcloud_config, Config),
|
||||
StreamName = map_get(<<"stream_name">>, KinesisConfig),
|
||||
{ok, [{<<"Shards">>, Shards}]} = erlcloud_kinesis:list_shards(StreamName, ErlcloudConfig),
|
||||
Shard = lists:nth(Index, lists:sort(Shards)),
|
||||
ShardId = proplists:get_value(<<"ShardId">>, Shard),
|
||||
{ok, [{<<"ShardIterator">>, ShardIt}]} =
|
||||
erlcloud_kinesis:get_shard_iterator(StreamName, ShardId, <<"LATEST">>, ErlcloudConfig),
|
||||
ShardIt.
|
||||
|
||||
install_telemetry_handler(TestCase) ->
|
||||
Tid = ets:new(TestCase, [ordered_set, public]),
|
||||
HandlerId = TestCase,
|
||||
TestPid = self(),
|
||||
_ = telemetry:attach_many(
|
||||
HandlerId,
|
||||
emqx_resource_metrics:events(),
|
||||
fun(EventName, Measurements, Metadata, _Config) ->
|
||||
Data = #{
|
||||
name => EventName,
|
||||
measurements => Measurements,
|
||||
metadata => Metadata
|
||||
},
|
||||
ets:insert(Tid, {erlang:monotonic_time(), Data}),
|
||||
TestPid ! {telemetry, Data},
|
||||
ok
|
||||
end,
|
||||
unused_config
|
||||
),
|
||||
emqx_common_test_helpers:on_exit(fun() ->
|
||||
telemetry:detach(HandlerId),
|
||||
ets:delete(Tid)
|
||||
end),
|
||||
Tid.
|
||||
|
||||
current_metrics(ResourceId) ->
|
||||
Mapping = metrics_mapping(),
|
||||
maps:from_list([
|
||||
{Metric, F(ResourceId)}
|
||||
|| {Metric, F} <- maps:to_list(Mapping)
|
||||
]).
|
||||
|
||||
metrics_mapping() ->
|
||||
#{
|
||||
dropped => fun emqx_resource_metrics:dropped_get/1,
|
||||
dropped_expired => fun emqx_resource_metrics:dropped_expired_get/1,
|
||||
dropped_other => fun emqx_resource_metrics:dropped_other_get/1,
|
||||
dropped_queue_full => fun emqx_resource_metrics:dropped_queue_full_get/1,
|
||||
dropped_resource_not_found => fun emqx_resource_metrics:dropped_resource_not_found_get/1,
|
||||
dropped_resource_stopped => fun emqx_resource_metrics:dropped_resource_stopped_get/1,
|
||||
late_reply => fun emqx_resource_metrics:late_reply_get/1,
|
||||
failed => fun emqx_resource_metrics:failed_get/1,
|
||||
inflight => fun emqx_resource_metrics:inflight_get/1,
|
||||
matched => fun emqx_resource_metrics:matched_get/1,
|
||||
queuing => fun emqx_resource_metrics:queuing_get/1,
|
||||
retried => fun emqx_resource_metrics:retried_get/1,
|
||||
retried_failed => fun emqx_resource_metrics:retried_failed_get/1,
|
||||
retried_success => fun emqx_resource_metrics:retried_success_get/1,
|
||||
success => fun emqx_resource_metrics:success_get/1
|
||||
}.
|
||||
|
||||
assert_metrics(ExpectedMetrics, ResourceId) ->
|
||||
Mapping = metrics_mapping(),
|
||||
Metrics =
|
||||
lists:foldl(
|
||||
fun(Metric, Acc) ->
|
||||
#{Metric := Fun} = Mapping,
|
||||
Value = Fun(ResourceId),
|
||||
Acc#{Metric => Value}
|
||||
end,
|
||||
#{},
|
||||
maps:keys(ExpectedMetrics)
|
||||
),
|
||||
CurrentMetrics = current_metrics(ResourceId),
|
||||
TelemetryTable = get(telemetry_table),
|
||||
RecordedEvents = ets:tab2list(TelemetryTable),
|
||||
?assertEqual(ExpectedMetrics, Metrics, #{
|
||||
current_metrics => CurrentMetrics, recorded_events => RecordedEvents
|
||||
}),
|
||||
ok.
|
||||
|
||||
assert_empty_metrics(ResourceId) ->
|
||||
Mapping = metrics_mapping(),
|
||||
ExpectedMetrics =
|
||||
lists:foldl(
|
||||
fun(Metric, Acc) ->
|
||||
Acc#{Metric => 0}
|
||||
end,
|
||||
#{},
|
||||
maps:keys(Mapping)
|
||||
),
|
||||
assert_metrics(ExpectedMetrics, ResourceId).
|
||||
|
||||
wait_telemetry_event(TelemetryTable, EventName, ResourceId) ->
|
||||
wait_telemetry_event(TelemetryTable, EventName, ResourceId, #{timeout => 5_000, n_events => 1}).
|
||||
|
||||
wait_telemetry_event(
|
||||
TelemetryTable,
|
||||
EventName,
|
||||
ResourceId,
|
||||
_Opts = #{
|
||||
timeout := Timeout,
|
||||
n_events := NEvents
|
||||
}
|
||||
) ->
|
||||
wait_n_events(TelemetryTable, ResourceId, NEvents, Timeout, EventName).
|
||||
|
||||
wait_n_events(_TelemetryTable, _ResourceId, NEvents, _Timeout, _EventName) when NEvents =< 0 ->
|
||||
ok;
|
||||
wait_n_events(TelemetryTable, ResourceId, NEvents, Timeout, EventName) ->
|
||||
receive
|
||||
{telemetry, #{name := [_, _, EventName], measurements := #{counter_inc := Inc}} = Event} ->
|
||||
ct:pal("telemetry event: ~p", [Event]),
|
||||
wait_n_events(TelemetryTable, ResourceId, NEvents - Inc, Timeout, EventName)
|
||||
after Timeout ->
|
||||
RecordedEvents = ets:tab2list(TelemetryTable),
|
||||
CurrentMetrics = current_metrics(ResourceId),
|
||||
ct:pal("recorded events: ~p", [RecordedEvents]),
|
||||
ct:pal("current metrics: ~p", [CurrentMetrics]),
|
||||
error({timeout_waiting_for_telemetry, EventName})
|
||||
end.
|
||||
|
||||
wait_until_gauge_is(GaugeName, ExpectedValue, Timeout) ->
|
||||
Events = receive_all_events(GaugeName, Timeout),
|
||||
case length(Events) > 0 andalso lists:last(Events) of
|
||||
#{measurements := #{gauge_set := ExpectedValue}} ->
|
||||
ok;
|
||||
#{measurements := #{gauge_set := Value}} ->
|
||||
ct:pal("events: ~p", [Events]),
|
||||
ct:fail(
|
||||
"gauge ~p didn't reach expected value ~p; last value: ~p",
|
||||
[GaugeName, ExpectedValue, Value]
|
||||
);
|
||||
false ->
|
||||
ct:pal("no ~p gauge events received!", [GaugeName])
|
||||
end.
|
||||
|
||||
receive_all_events(EventName, Timeout) ->
|
||||
receive_all_events(EventName, Timeout, _MaxEvents = 10, _Count = 0, _Acc = []).
|
||||
|
||||
receive_all_events(_EventName, _Timeout, MaxEvents, Count, Acc) when Count >= MaxEvents ->
|
||||
lists:reverse(Acc);
|
||||
receive_all_events(EventName, Timeout, MaxEvents, Count, Acc) ->
|
||||
receive
|
||||
{telemetry, #{name := [_, _, EventName]} = Event} ->
|
||||
receive_all_events(EventName, Timeout, MaxEvents, Count + 1, [Event | Acc])
|
||||
after Timeout ->
|
||||
lists:reverse(Acc)
|
||||
end.
|
||||
|
||||
to_str(List) when is_list(List) ->
|
||||
List;
|
||||
to_str(Bin) when is_binary(Bin) ->
|
||||
erlang:binary_to_list(Bin);
|
||||
to_str(Int) when is_integer(Int) ->
|
||||
erlang:integer_to_list(Int).
|
||||
|
||||
to_bin(Str) when is_list(Str) ->
|
||||
erlang:list_to_binary(Str).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Testcases
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
t_create_via_http(Config) ->
|
||||
?assertMatch({ok, _}, create_bridge_http(Config)),
|
||||
ok.
|
||||
|
||||
t_start_failed_then_fix(Config) ->
|
||||
ProxyPort = ?config(proxy_port, Config),
|
||||
ProxyHost = ?config(proxy_host, Config),
|
||||
ProxyName = ?config(proxy_name, Config),
|
||||
ResourceId = ?config(resource_id, Config),
|
||||
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
|
||||
ct:sleep(1000),
|
||||
?wait_async_action(
|
||||
create_bridge(Config),
|
||||
#{?snk_kind := emqx_bridge_kinesis_impl_producer_start_failed},
|
||||
20_000
|
||||
)
|
||||
end),
|
||||
?retry(
|
||||
_Sleep1 = 1_000,
|
||||
_Attempts1 = 30,
|
||||
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
|
||||
),
|
||||
ok.
|
||||
|
||||
t_stop(Config) ->
|
||||
Name = ?config(kinesis_name, Config),
|
||||
{ok, _} = create_bridge(Config),
|
||||
?check_trace(
|
||||
?wait_async_action(
|
||||
emqx_bridge_resource:stop(?BRIDGE_TYPE, Name),
|
||||
#{?snk_kind := kinesis_stop},
|
||||
5_000
|
||||
),
|
||||
fun(Trace) ->
|
||||
?assertMatch([_], ?of_kind(kinesis_stop, Trace)),
|
||||
ok
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
t_get_status_ok(Config) ->
|
||||
ResourceId = ?config(resource_id, Config),
|
||||
{ok, _} = create_bridge(Config),
|
||||
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)),
|
||||
ok.
|
||||
|
||||
t_create_unhealthy(Config) ->
|
||||
delete_stream(Config),
|
||||
ResourceId = ?config(resource_id, Config),
|
||||
{ok, _} = create_bridge(Config),
|
||||
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)),
|
||||
?assertMatch(
|
||||
{ok, _, #{error := {unhealthy_target, _}}},
|
||||
emqx_resource_manager:lookup_cached(ResourceId)
|
||||
),
|
||||
ok.
|
||||
|
||||
t_get_status_unhealthy(Config) ->
|
||||
delete_stream(Config),
|
||||
ResourceId = ?config(resource_id, Config),
|
||||
{ok, _} = create_bridge(Config),
|
||||
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)),
|
||||
?assertMatch(
|
||||
{ok, _, #{error := {unhealthy_target, _}}},
|
||||
emqx_resource_manager:lookup_cached(ResourceId)
|
||||
),
|
||||
ok.
|
||||
|
||||
t_publish_success(Config) ->
|
||||
ResourceId = ?config(resource_id, Config),
|
||||
TelemetryTable = ?config(telemetry_table, Config),
|
||||
?assertMatch({ok, _}, create_bridge(Config)),
|
||||
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
|
||||
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
|
||||
assert_empty_metrics(ResourceId),
|
||||
ShardIt = get_shard_iterator(Config),
|
||||
Payload = <<"payload">>,
|
||||
Message = emqx_message:make(?TOPIC, Payload),
|
||||
emqx:publish(Message),
|
||||
%% to avoid test flakiness
|
||||
wait_telemetry_event(TelemetryTable, success, ResourceId),
|
||||
wait_until_gauge_is(queuing, 0, 500),
|
||||
wait_until_gauge_is(inflight, 0, 500),
|
||||
assert_metrics(
|
||||
#{
|
||||
dropped => 0,
|
||||
failed => 0,
|
||||
inflight => 0,
|
||||
matched => 1,
|
||||
queuing => 0,
|
||||
retried => 0,
|
||||
success => 1
|
||||
},
|
||||
ResourceId
|
||||
),
|
||||
Record = wait_record(Config, ShardIt, 100, 10),
|
||||
?assertEqual(Payload, proplists:get_value(<<"Data">>, Record)),
|
||||
ok.
|
||||
|
||||
t_publish_success_with_template(Config) ->
|
||||
ResourceId = ?config(resource_id, Config),
|
||||
TelemetryTable = ?config(telemetry_table, Config),
|
||||
Overrides =
|
||||
#{
|
||||
<<"payload_template">> => <<"${payload.data}">>,
|
||||
<<"partition_key">> => <<"${payload.key}">>
|
||||
},
|
||||
?assertMatch({ok, _}, create_bridge(Config, Overrides)),
|
||||
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
|
||||
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
|
||||
assert_empty_metrics(ResourceId),
|
||||
ShardIt = get_shard_iterator(Config),
|
||||
Payload = <<"{\"key\":\"my_key\", \"data\":\"my_data\"}">>,
|
||||
Message = emqx_message:make(?TOPIC, Payload),
|
||||
emqx:publish(Message),
|
||||
%% to avoid test flakiness
|
||||
wait_telemetry_event(TelemetryTable, success, ResourceId),
|
||||
wait_until_gauge_is(queuing, 0, 500),
|
||||
wait_until_gauge_is(inflight, 0, 500),
|
||||
assert_metrics(
|
||||
#{
|
||||
dropped => 0,
|
||||
failed => 0,
|
||||
inflight => 0,
|
||||
matched => 1,
|
||||
queuing => 0,
|
||||
retried => 0,
|
||||
success => 1
|
||||
},
|
||||
ResourceId
|
||||
),
|
||||
Record = wait_record(Config, ShardIt, 100, 10),
|
||||
?assertEqual(<<"my_data">>, proplists:get_value(<<"Data">>, Record)),
|
||||
ok.
|
||||
|
||||
t_publish_multiple_msgs_success(Config) ->
|
||||
ResourceId = ?config(resource_id, Config),
|
||||
TelemetryTable = ?config(telemetry_table, Config),
|
||||
?assertMatch({ok, _}, create_bridge(Config)),
|
||||
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
|
||||
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
|
||||
assert_empty_metrics(ResourceId),
|
||||
ShardIt = get_shard_iterator(Config),
|
||||
lists:foreach(
|
||||
fun(I) ->
|
||||
Payload = "payload_" ++ to_str(I),
|
||||
Message = emqx_message:make(?TOPIC, Payload),
|
||||
emqx:publish(Message)
|
||||
end,
|
||||
lists:seq(1, 10)
|
||||
),
|
||||
Records = wait_records(Config, ShardIt, 10, 100, 10),
|
||||
ReceivedPayloads =
|
||||
lists:map(fun(Record) -> proplists:get_value(<<"Data">>, Record) end, Records),
|
||||
lists:foreach(
|
||||
fun(I) ->
|
||||
ExpectedPayload = to_bin("payload_" ++ to_str(I)),
|
||||
?assertEqual(
|
||||
{ExpectedPayload, true},
|
||||
{ExpectedPayload, lists:member(ExpectedPayload, ReceivedPayloads)}
|
||||
)
|
||||
end,
|
||||
lists:seq(1, 10)
|
||||
),
|
||||
%% to avoid test flakiness
|
||||
wait_telemetry_event(TelemetryTable, success, ResourceId),
|
||||
wait_until_gauge_is(queuing, 0, 500),
|
||||
wait_until_gauge_is(inflight, 0, 500),
|
||||
assert_metrics(
|
||||
#{
|
||||
dropped => 0,
|
||||
failed => 0,
|
||||
inflight => 0,
|
||||
matched => 10,
|
||||
queuing => 0,
|
||||
retried => 0,
|
||||
success => 10
|
||||
},
|
||||
ResourceId
|
||||
),
|
||||
ok.
|
||||
|
||||
t_publish_unhealthy(Config) ->
|
||||
ResourceId = ?config(resource_id, Config),
|
||||
TelemetryTable = ?config(telemetry_table, Config),
|
||||
?assertMatch({ok, _}, create_bridge(Config)),
|
||||
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
|
||||
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
|
||||
assert_empty_metrics(ResourceId),
|
||||
ShardIt = get_shard_iterator(Config),
|
||||
Payload = <<"payload">>,
|
||||
Message = emqx_message:make(?TOPIC, Payload),
|
||||
delete_stream(Config),
|
||||
emqx:publish(Message),
|
||||
?assertError(
|
||||
{badmatch, {error, {<<"ResourceNotFoundException">>, _}}},
|
||||
wait_record(Config, ShardIt, 100, 10)
|
||||
),
|
||||
%% to avoid test flakiness
|
||||
wait_telemetry_event(TelemetryTable, failed, ResourceId),
|
||||
wait_until_gauge_is(queuing, 0, 500),
|
||||
wait_until_gauge_is(inflight, 0, 500),
|
||||
assert_metrics(
|
||||
#{
|
||||
dropped => 0,
|
||||
failed => 1,
|
||||
inflight => 0,
|
||||
matched => 1,
|
||||
queuing => 0,
|
||||
retried => 0,
|
||||
success => 0
|
||||
},
|
||||
ResourceId
|
||||
),
|
||||
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)),
|
||||
?assertMatch(
|
||||
{ok, _, #{error := {unhealthy_target, _}}},
|
||||
emqx_resource_manager:lookup_cached(ResourceId)
|
||||
),
|
||||
ok.
|
||||
|
||||
t_publish_big_msg(Config) ->
|
||||
ResourceId = ?config(resource_id, Config),
|
||||
TelemetryTable = ?config(telemetry_table, Config),
|
||||
?assertMatch({ok, _}, create_bridge(Config)),
|
||||
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
|
||||
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
|
||||
assert_empty_metrics(ResourceId),
|
||||
% Maximum size is 1MB. Using 1MB + 1 here.
|
||||
Payload = binary:copy(<<"a">>, 1 * 1024 * 1024 + 1),
|
||||
Message = emqx_message:make(?TOPIC, Payload),
|
||||
emqx:publish(Message),
|
||||
%% to avoid test flakiness
|
||||
wait_telemetry_event(TelemetryTable, failed, ResourceId),
|
||||
wait_until_gauge_is(queuing, 0, 500),
|
||||
wait_until_gauge_is(inflight, 0, 500),
|
||||
assert_metrics(
|
||||
#{
|
||||
dropped => 0,
|
||||
failed => 1,
|
||||
inflight => 0,
|
||||
matched => 1,
|
||||
queuing => 0,
|
||||
retried => 0,
|
||||
success => 0
|
||||
},
|
||||
ResourceId
|
||||
),
|
||||
ok.
|
||||
|
||||
t_publish_connection_down(Config0) ->
|
||||
Config = generate_config([{max_retries, 2} | Config0]),
|
||||
ProxyPort = ?config(proxy_port, Config),
|
||||
ProxyHost = ?config(proxy_host, Config),
|
||||
ProxyName = ?config(proxy_name, Config),
|
||||
ResourceId = ?config(resource_id, Config),
|
||||
TelemetryTable = ?config(telemetry_table, Config),
|
||||
?assertMatch({ok, _}, create_bridge(Config)),
|
||||
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
|
||||
?retry(
|
||||
_Sleep1 = 1_000,
|
||||
_Attempts1 = 30,
|
||||
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
|
||||
),
|
||||
emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
|
||||
assert_empty_metrics(ResourceId),
|
||||
ShardIt = get_shard_iterator(Config),
|
||||
Payload = <<"payload">>,
|
||||
Message = emqx_message:make(?TOPIC, Payload),
|
||||
Kind =
|
||||
case proplists:get_value(batch_size, Config) of
|
||||
1 -> emqx_bridge_kinesis_impl_producer_sync_query;
|
||||
_ -> emqx_bridge_kinesis_impl_producer_sync_batch_query
|
||||
end,
|
||||
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
|
||||
ct:sleep(1000),
|
||||
?wait_async_action(
|
||||
emqx:publish(Message),
|
||||
#{?snk_kind := Kind},
|
||||
5_000
|
||||
),
|
||||
ct:sleep(1000)
|
||||
end),
|
||||
% Wait for reconnection.
|
||||
?retry(
|
||||
_Sleep3 = 1_000,
|
||||
_Attempts3 = 20,
|
||||
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
|
||||
),
|
||||
Record = wait_record(Config, ShardIt, 2000, 10),
|
||||
%% to avoid test flakiness
|
||||
wait_telemetry_event(TelemetryTable, retried_success, ResourceId),
|
||||
wait_until_gauge_is(queuing, 0, 500),
|
||||
wait_until_gauge_is(inflight, 0, 500),
|
||||
assert_metrics(
|
||||
#{
|
||||
dropped => 0,
|
||||
failed => 0,
|
||||
inflight => 0,
|
||||
matched => 1,
|
||||
queuing => 0,
|
||||
retried => 1,
|
||||
success => 1,
|
||||
retried_success => 1
|
||||
},
|
||||
ResourceId
|
||||
),
|
||||
Data = proplists:get_value(<<"Data">>, Record),
|
||||
?assertEqual(Payload, Data),
|
||||
ok.
|
||||
|
||||
t_wrong_server(Config) ->
|
||||
Name = ?config(kinesis_name, Config),
|
||||
ResourceId = ?config(resource_id, Config),
|
||||
Overrides =
|
||||
#{
|
||||
<<"max_retries">> => 0,
|
||||
<<"endpoint">> => <<"https://wrong_server:12345">>,
|
||||
<<"resource_opts">> => #{
|
||||
<<"health_check_interval">> => <<"60s">>
|
||||
}
|
||||
},
|
||||
?wait_async_action(
|
||||
create_bridge(Config, Overrides),
|
||||
#{?snk_kind := emqx_bridge_kinesis_impl_producer_start_ok},
|
||||
30_000
|
||||
),
|
||||
?assertEqual({error, timeout}, emqx_resource_manager:health_check(ResourceId)),
|
||||
emqx_bridge_resource:stop(?BRIDGE_TYPE, Name),
|
||||
emqx_bridge_resource:remove(?BRIDGE_TYPE, Name),
|
||||
ok.
|
|
@ -16,6 +16,7 @@
|
|||
-module(emqx_ds).
|
||||
|
||||
%% API:
|
||||
-export([ensure_shard/2]).
|
||||
%% Messages:
|
||||
-export([message_store/2, message_store/1, message_stats/0]).
|
||||
%% Iterator:
|
||||
|
@ -79,6 +80,18 @@
|
|||
%% API funcions
|
||||
%%================================================================================
|
||||
|
||||
-spec ensure_shard(shard(), emqx_ds_storage_layer:options()) ->
|
||||
ok | {error, _Reason}.
|
||||
ensure_shard(Shard, Options) ->
|
||||
case emqx_ds_storage_layer_sup:start_shard(Shard, Options) of
|
||||
{ok, _Pid} ->
|
||||
ok;
|
||||
{error, {already_started, _Pid}} ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------------------
|
||||
%% Message
|
||||
%%--------------------------------------------------------------------------------
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
-module(emqx_ds_app).
|
||||
|
||||
-dialyzer({nowarn_function, storage/0}).
|
||||
|
||||
-export([start/2]).
|
||||
|
||||
-include("emqx_ds_int.hrl").
|
||||
|
@ -13,13 +15,22 @@ start(_Type, _Args) ->
|
|||
emqx_ds_sup:start_link().
|
||||
|
||||
init_mnesia() ->
|
||||
%% FIXME: This is a temporary workaround to avoid crashes when starting on Windows
|
||||
ok = mria:create_table(
|
||||
?SESSION_TAB,
|
||||
[
|
||||
{rlog_shard, ?DS_SHARD},
|
||||
{type, set},
|
||||
{storage, rocksdb_copies},
|
||||
{storage, storage()},
|
||||
{record_name, session},
|
||||
{attributes, record_info(fields, session)}
|
||||
]
|
||||
).
|
||||
|
||||
storage() ->
|
||||
case mria:rocksdb_backend_available() of
|
||||
true ->
|
||||
rocksdb_copies;
|
||||
_ ->
|
||||
disc_copies
|
||||
end.
|
||||
|
|
|
@ -175,7 +175,7 @@
|
|||
cf :: rocksdb:cf_handle(),
|
||||
keymapper :: keymapper(),
|
||||
write_options = [{sync, true}] :: emqx_ds_storage_layer:db_write_options(),
|
||||
read_options = [] :: emqx_ds_storage_layer:db_write_options()
|
||||
read_options = [] :: emqx_ds_storage_layer:db_read_options()
|
||||
}).
|
||||
|
||||
-record(it, {
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
-behaviour(gen_server).
|
||||
|
||||
%% API:
|
||||
-export([start_link/1]).
|
||||
-export([start_link/2]).
|
||||
-export([create_generation/3]).
|
||||
|
||||
-export([store/5]).
|
||||
|
@ -18,7 +18,8 @@
|
|||
%% behaviour callbacks:
|
||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]).
|
||||
|
||||
-export_type([cf_refs/0, gen_id/0, db_write_options/0, state/0, iterator/0]).
|
||||
-export_type([cf_refs/0, gen_id/0, options/0, state/0, iterator/0]).
|
||||
-export_type([db_options/0, db_write_options/0, db_read_options/0]).
|
||||
|
||||
-compile({inline, [meta_lookup/2]}).
|
||||
|
||||
|
@ -26,10 +27,16 @@
|
|||
%% Type declarations
|
||||
%%================================================================================
|
||||
|
||||
%% see rocksdb:db_options()
|
||||
% -type options() :: proplists:proplist().
|
||||
-type options() :: #{
|
||||
dir => file:filename()
|
||||
}.
|
||||
|
||||
%% see rocksdb:db_options()
|
||||
-type db_options() :: proplists:proplist().
|
||||
%% see rocksdb:write_options()
|
||||
-type db_write_options() :: proplists:proplist().
|
||||
%% see rocksdb:read_options()
|
||||
-type db_read_options() :: proplists:proplist().
|
||||
|
||||
-type cf_refs() :: [{string(), rocksdb:cf_handle()}].
|
||||
|
||||
|
@ -110,18 +117,16 @@
|
|||
%% API funcions
|
||||
%%================================================================================
|
||||
|
||||
-spec start_link(emqx_ds:shard()) -> {ok, pid()}.
|
||||
start_link(Shard) ->
|
||||
gen_server:start_link(?REF(Shard), ?MODULE, [Shard], []).
|
||||
-spec start_link(emqx_ds:shard(), emqx_ds_storage_layer:options()) -> {ok, pid()}.
|
||||
start_link(Shard, Options) ->
|
||||
gen_server:start_link(?REF(Shard), ?MODULE, {Shard, Options}, []).
|
||||
|
||||
-spec create_generation(emqx_ds:shard(), emqx_ds:time(), emqx_ds_conf:backend_config()) ->
|
||||
{ok, gen_id()} | {error, nonmonotonic}.
|
||||
create_generation(Shard, Since, Config = {_Module, _Options}) ->
|
||||
gen_server:call(?REF(Shard), {create_generation, Since, Config}).
|
||||
|
||||
-spec store(
|
||||
emqx_ds:shard(), emqx_guid:guid(), emqx_ds:time(), emqx_ds:topic(), binary()
|
||||
) ->
|
||||
-spec store(emqx_ds:shard(), emqx_guid:guid(), emqx_ds:time(), emqx_ds:topic(), binary()) ->
|
||||
ok | {error, _}.
|
||||
store(Shard, GUID, Time, Topic, Msg) ->
|
||||
{_GenId, #{module := Mod, data := Data}} = meta_lookup_gen(Shard, Time),
|
||||
|
@ -181,9 +186,9 @@ discard_iterator(Shard, ReplayID) ->
|
|||
%% behaviour callbacks
|
||||
%%================================================================================
|
||||
|
||||
init([Shard]) ->
|
||||
init({Shard, Options}) ->
|
||||
process_flag(trap_exit, true),
|
||||
{ok, S0} = open_db(Shard),
|
||||
{ok, S0} = open_db(Shard, Options),
|
||||
S = ensure_current_generation(S0),
|
||||
ok = populate_metadata(S),
|
||||
{ok, S}.
|
||||
|
@ -265,16 +270,17 @@ create_gen(GenId, Since, {Module, Options}, S = #s{db = DBHandle, cf_generations
|
|||
},
|
||||
{ok, Gen, S#s{cf_generations = NewCFs ++ CFs}}.
|
||||
|
||||
-spec open_db(emqx_ds:shard()) -> {ok, state()} | {error, _TODO}.
|
||||
open_db(Shard) ->
|
||||
Filename = binary_to_list(Shard),
|
||||
-spec open_db(emqx_ds:shard(), options()) -> {ok, state()} | {error, _TODO}.
|
||||
open_db(Shard, Options) ->
|
||||
DBDir = unicode:characters_to_list(maps:get(dir, Options, Shard)),
|
||||
DBOptions = [
|
||||
{create_if_missing, true},
|
||||
{create_missing_column_families, true}
|
||||
| emqx_ds_conf:db_options()
|
||||
],
|
||||
_ = filelib:ensure_dir(DBDir),
|
||||
ExistingCFs =
|
||||
case rocksdb:list_column_families(Filename, DBOptions) of
|
||||
case rocksdb:list_column_families(DBDir, DBOptions) of
|
||||
{ok, CFs} ->
|
||||
[{Name, []} || Name <- CFs, Name /= ?DEFAULT_CF, Name /= ?ITERATOR_CF];
|
||||
% DB is not present. First start
|
||||
|
@ -286,7 +292,7 @@ open_db(Shard) ->
|
|||
{?ITERATOR_CF, ?ITERATOR_CF_OPTS}
|
||||
| ExistingCFs
|
||||
],
|
||||
case rocksdb:open(Filename, DBOptions, ColumnFamilies) of
|
||||
case rocksdb:open(DBDir, DBOptions, ColumnFamilies) of
|
||||
{ok, DBHandle, [_CFDefault, CFIterator | CFRefs]} ->
|
||||
{CFNames, _} = lists:unzip(ExistingCFs),
|
||||
{ok, #s{
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
-behaviour(supervisor).
|
||||
|
||||
%% API:
|
||||
-export([start_link/0, start_shard/1, stop_shard/1]).
|
||||
-export([start_link/0, start_shard/2, stop_shard/1]).
|
||||
|
||||
%% behaviour callbacks:
|
||||
-export([init/1]).
|
||||
|
@ -25,9 +25,10 @@
|
|||
start_link() ->
|
||||
supervisor:start_link({local, ?SUP}, ?MODULE, []).
|
||||
|
||||
-spec start_shard(emqx_ds:shard()) -> supervisor:startchild_ret().
|
||||
start_shard(Shard) ->
|
||||
supervisor:start_child(?SUP, shard_child_spec(Shard)).
|
||||
-spec start_shard(emqx_ds:shard(), emqx_ds_storage_layer:options()) ->
|
||||
supervisor:startchild_ret().
|
||||
start_shard(Shard, Options) ->
|
||||
supervisor:start_child(?SUP, shard_child_spec(Shard, Options)).
|
||||
|
||||
-spec stop_shard(emqx_ds:shard()) -> ok | {error, _}.
|
||||
stop_shard(Shard) ->
|
||||
|
@ -51,11 +52,12 @@ init([]) ->
|
|||
%% Internal functions
|
||||
%%================================================================================
|
||||
|
||||
-spec shard_child_spec(emqx_ds:shard()) -> supervisor:child_spec().
|
||||
shard_child_spec(Shard) ->
|
||||
-spec shard_child_spec(emqx_ds:shard(), emqx_ds_storage_layer:options()) ->
|
||||
supervisor:child_spec().
|
||||
shard_child_spec(Shard, Options) ->
|
||||
#{
|
||||
id => Shard,
|
||||
start => {emqx_ds_storage_layer, start_link, [Shard]},
|
||||
start => {emqx_ds_storage_layer, start_link, [Shard, Options]},
|
||||
shutdown => 5_000,
|
||||
restart => permanent,
|
||||
type => worker
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
{application, emqx_durable_storage, [
|
||||
{description, "Message persistence and subscription replays for EMQX"},
|
||||
% strict semver, bump manually!
|
||||
{vsn, "0.1.0"},
|
||||
{vsn, "0.1.2"},
|
||||
{modules, []},
|
||||
{registered, []},
|
||||
{applications, [kernel, stdlib, rocksdb, gproc, mria]},
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
%% Smoke test for opening and reopening the database
|
||||
t_open(_Config) ->
|
||||
ok = emqx_ds_storage_layer_sup:stop_shard(?SHARD),
|
||||
{ok, _} = emqx_ds_storage_layer_sup:start_shard(?SHARD).
|
||||
{ok, _} = emqx_ds_storage_layer_sup:start_shard(?SHARD, #{}).
|
||||
|
||||
%% Smoke test of store function
|
||||
t_store(_Config) ->
|
||||
|
@ -263,7 +263,7 @@ end_per_suite(_Config) ->
|
|||
|
||||
init_per_testcase(TC, Config) ->
|
||||
ok = set_shard_config(shard(TC), ?DEFAULT_CONFIG),
|
||||
{ok, _} = emqx_ds_storage_layer_sup:start_shard(shard(TC)),
|
||||
{ok, _} = emqx_ds_storage_layer_sup:start_shard(shard(TC), #{}),
|
||||
Config.
|
||||
|
||||
end_per_testcase(TC, _Config) ->
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{application, emqx_eviction_agent, [
|
||||
{description, "EMQX Eviction Agent"},
|
||||
{vsn, "5.0.1"},
|
||||
{vsn, "5.1.0"},
|
||||
{registered, [
|
||||
emqx_eviction_agent_sup,
|
||||
emqx_eviction_agent,
|
||||
|
|
|
@ -165,9 +165,8 @@ handle_cast(Msg, Channel) ->
|
|||
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
|
||||
{noreply, Channel}.
|
||||
|
||||
terminate(Reason, #{conninfo := ConnInfo, clientinfo := ClientInfo, session := Session} = Channel) ->
|
||||
terminate(Reason, #{clientinfo := ClientInfo, session := Session} = Channel) ->
|
||||
ok = cancel_expiry_timer(Channel),
|
||||
(Reason =:= expired) andalso emqx_persistent_session:persist(ClientInfo, ConnInfo, Session),
|
||||
emqx_session:terminate(ClientInfo, Reason, Session).
|
||||
|
||||
code_change(_OldVsn, Channel, _Extra) ->
|
||||
|
@ -181,34 +180,25 @@ handle_deliver(
|
|||
Delivers,
|
||||
#{
|
||||
takeover := true,
|
||||
pendings := Pendings,
|
||||
session := Session,
|
||||
clientinfo := #{clientid := ClientId} = ClientInfo
|
||||
pendings := Pendings
|
||||
} = Channel
|
||||
) ->
|
||||
%% NOTE: Order is important here. While the takeover is in
|
||||
%% progress, the session cannot enqueue messages, since it already
|
||||
%% passed on the queue to the new connection in the session state.
|
||||
NPendings = lists:append(
|
||||
Pendings,
|
||||
emqx_session:ignore_local(ClientInfo, emqx_channel:maybe_nack(Delivers), ClientId, Session)
|
||||
),
|
||||
NPendings = lists:append(Pendings, emqx_channel:maybe_nack(Delivers)),
|
||||
Channel#{pendings => NPendings};
|
||||
handle_deliver(
|
||||
Delivers,
|
||||
#{
|
||||
takeover := false,
|
||||
session := Session,
|
||||
clientinfo := #{clientid := ClientId} = ClientInfo
|
||||
clientinfo := ClientInfo
|
||||
} = Channel
|
||||
) ->
|
||||
Delivers1 = emqx_channel:maybe_nack(Delivers),
|
||||
Delivers2 = emqx_session:ignore_local(ClientInfo, Delivers1, ClientId, Session),
|
||||
NSession = emqx_session:enqueue(ClientInfo, Delivers2, Session),
|
||||
NChannel = persist(NSession, Channel),
|
||||
%% We consider queued/dropped messages as delivered since they are now in the session state.
|
||||
emqx_channel:maybe_mark_as_delivered(Session, Delivers),
|
||||
NChannel.
|
||||
NSession = emqx_session:enqueue(ClientInfo, Delivers1, Session),
|
||||
Channel#{session := NSession}.
|
||||
|
||||
cancel_expiry_timer(#{expiry_timer := TRef}) when is_reference(TRef) ->
|
||||
_ = erlang:cancel_timer(TRef),
|
||||
|
@ -251,13 +241,8 @@ open_session(ConnInfo, #{clientid := ClientId} = ClientInfo) ->
|
|||
),
|
||||
Pendings1 = lists:usort(lists:append(Pendings0, emqx_utils:drain_deliver())),
|
||||
NSession = emqx_session:enqueue(
|
||||
ClientInfo,
|
||||
emqx_session:ignore_local(
|
||||
ClientInfo,
|
||||
emqx_channel:maybe_nack(Pendings1),
|
||||
ClientId,
|
||||
Session
|
||||
),
|
||||
Session
|
||||
),
|
||||
NChannel = Channel#{session => NSession},
|
||||
|
@ -334,10 +319,6 @@ channel(ConnInfo, ClientInfo) ->
|
|||
pendings => []
|
||||
}.
|
||||
|
||||
persist(Session, #{clientinfo := ClientInfo, conninfo := ConnInfo} = Channel) ->
|
||||
Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session),
|
||||
Channel#{session => Session1}.
|
||||
|
||||
info(Channel) ->
|
||||
#{
|
||||
conninfo => maps:get(conninfo, Channel, undefined),
|
||||
|
|
|
@ -30,19 +30,12 @@ init_per_suite(Config) ->
|
|||
end_per_suite(_Config) ->
|
||||
emqx_common_test_helpers:stop_apps([emqx_eviction_agent, emqx_conf]).
|
||||
|
||||
init_per_testcase(t_persistence, Config) ->
|
||||
emqx_config:put([persistent_session_store, enabled], true),
|
||||
{ok, _} = emqx_persistent_session_sup:start_link(),
|
||||
emqx_persistent_session:init_db_backend(),
|
||||
?assert(emqx_persistent_session:is_store_enabled()),
|
||||
Config;
|
||||
init_per_testcase(t_persistence, _Config) ->
|
||||
{skip, "Existing session persistence implementation is being phased out"};
|
||||
init_per_testcase(_TestCase, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_testcase(t_persistence, Config) ->
|
||||
emqx_config:put([persistent_session_store, enabled], false),
|
||||
emqx_persistent_session:init_db_backend(),
|
||||
?assertNot(emqx_persistent_session:is_store_enabled()),
|
||||
Config;
|
||||
end_per_testcase(_TestCase, _Config) ->
|
||||
ok.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_gateway, [
|
||||
{description, "The Gateway management application"},
|
||||
{vsn, "0.1.21"},
|
||||
{vsn, "0.1.22"},
|
||||
{registered, []},
|
||||
{mod, {emqx_gateway_app, []}},
|
||||
{applications, [kernel, stdlib, emqx, emqx_authn, emqx_ctl]},
|
||||
|
|
|
@ -823,7 +823,11 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
do_unregister_channel_task(Items, GwName, CmTabs) ->
|
||||
lists:foreach(
|
||||
fun({ChanPid, ClientId}) ->
|
||||
try
|
||||
do_unregister_channel(GwName, {ClientId, ChanPid}, CmTabs)
|
||||
catch
|
||||
error:badarg -> ok
|
||||
end
|
||||
end,
|
||||
Items
|
||||
).
|
||||
|
|
|
@ -85,6 +85,7 @@
|
|||
emqx_bridge_opents,
|
||||
emqx_bridge_clickhouse,
|
||||
emqx_bridge_dynamo,
|
||||
emqx_bridge_greptimedb,
|
||||
emqx_bridge_hstreamdb,
|
||||
emqx_bridge_influxdb,
|
||||
emqx_bridge_iotdb,
|
||||
|
@ -97,6 +98,7 @@
|
|||
emqx_bridge_tdengine,
|
||||
emqx_bridge_timescale,
|
||||
emqx_bridge_sqlserver,
|
||||
emqx_bridge_kinesis,
|
||||
emqx_oracle,
|
||||
emqx_bridge_oracle,
|
||||
emqx_bridge_rabbitmq,
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
{id, "emqx_machine"},
|
||||
{description, "The EMQX Machine"},
|
||||
% strict semver, bump manually!
|
||||
{vsn, "0.2.8"},
|
||||
{vsn, "0.2.9"},
|
||||
{modules, []},
|
||||
{registered, []},
|
||||
{applications, [kernel, stdlib, emqx_ctl]},
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_resource, [
|
||||
{description, "Manager for all external resources"},
|
||||
{vsn, "0.1.20"},
|
||||
{vsn, "0.1.21"},
|
||||
{registered, []},
|
||||
{mod, {emqx_resource_app, []}},
|
||||
{applications, [
|
||||
|
|
|
@ -642,7 +642,6 @@ status_to_error(_) ->
|
|||
{error, undefined}.
|
||||
|
||||
%% Compatibility
|
||||
external_error({error, {unhealthy_target, Message}}) -> Message;
|
||||
external_error({error, Reason}) -> Reason;
|
||||
external_error(Other) -> Other.
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{deps, [
|
||||
{emqx, {path, "../../apps/emqx"}},
|
||||
{erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-1"}}},
|
||||
{erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-2"}}},
|
||||
{emqx_bridge_http, {path, "../emqx_bridge_http"}}
|
||||
]}.
|
||||
|
||||
|
|
6
bin/emqx
6
bin/emqx
|
@ -811,8 +811,8 @@ is_down() {
|
|||
PID="$1"
|
||||
if ps -p "$PID" >/dev/null; then
|
||||
# still around
|
||||
# shellcheck disable=SC2009 # this grep pattern is not a part of the progra names
|
||||
if ps -p "$PID" | $GREP -q 'defunct'; then
|
||||
# shellcheck disable=SC2009 # this grep pattern is not a part of the program names
|
||||
if ps -efp "$PID" | $GREP -q 'defunct'; then
|
||||
# zombie state, print parent pid
|
||||
parent="$(ps -o ppid= -p "$PID" | tr -d ' ')"
|
||||
logwarn "$PID is marked <defunct>, parent: $(ps -p "$parent")"
|
||||
|
@ -974,7 +974,7 @@ maybe_warn_default_cookie() {
|
|||
## using Mnesia DB backend.
|
||||
if [[ "$IS_BOOT_COMMAND" == 'yes' && "$(get_boot_config 'node.db_backend')" == "rlog" ]]; then
|
||||
if ! (echo -e "$COMPATIBILITY_INFO" | $GREP -q 'MNESIA_OK'); then
|
||||
logerr "DB Backend is RLOG, but an incompatible OTP version has been detected. Falling back to using Mnesia DB backend."
|
||||
logwarn "DB Backend is RLOG, but an incompatible OTP version has been detected. Falling back to using Mnesia DB backend."
|
||||
export EMQX_NODE__DB_BACKEND=mnesia
|
||||
export EMQX_NODE__DB_ROLE=core
|
||||
fi
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Release packages for Amazon Linux 2023
|
|
@ -0,0 +1 @@
|
|||
Release packages for Debian 12.
|
|
@ -0,0 +1 @@
|
|||
Updated `jq` dependency to version 0.3.10 which includes `oniguruma` library update to version 6.9.8 with few minor security fixes.
|
|
@ -0,0 +1 @@
|
|||
Updated RocksDB version to 1.8.0-emqx-1 via ekka update to 0.15.6.
|
|
@ -0,0 +1 @@
|
|||
Avoid logging irrelevant error messages during EMQX shutdown.
|
|
@ -0,0 +1,4 @@
|
|||
Import additional configurations from EMQX backup file (`emqx ctl import` command):
|
||||
- rule_engine (previously not imported due to the bug)
|
||||
- topic_metrics (previously not implemented)
|
||||
- slow_subs (previously not implemented).
|
|
@ -0,0 +1,3 @@
|
|||
Update ekka to version 0.15.8, mria to version 0.15.8, and optvar to 1.0.5.
|
||||
This fixes occasional assertion failures:
|
||||
`{{badmatch,noproc},[{optvar,read,2,[{file,"optvar.erl"},{line,140}]},{optvar,read,1,[{file,"optvar.erl"},{line,124}]},...`
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue