Merge branch 'master' into file-transfer
* master: (194 commits) fix(limiter): update change && fix deprecated version chore: update changes perf(limiter): simplify the memory represent of limiter configuration ci(perf test): update tf variable name and set job timeout ci: fix artifact name in scheduled packages workflow fix: build_packages_cron.yaml workflow ci: move scheduled builds to a separate workflow build: check mnesia compatibility when generating mria config docs: fix a typo in api doc description feat(./dev): use command style and added 'ctl' command test: fix delayed-pubish test case flakyness refactor: remove raw_with_default config load option chore: add changelog for trace timestrap feat: increase the time precision of trace logs to microseconds chore: make sure topic_metrics/rewrite's default is [] docs: Update changes/ce/perf-10417.en.md chore: bump `snabbkaffe` to 1.0.8 ci: run static checks in separate jobs chore(schema): mark deprecated quic listener fields ?IMPORTANCE_HIDDEN chore: remove unused mqtt cap 'subscription_identifiers' ...
This commit is contained in:
commit
dd3471bc22
|
@ -8,6 +8,7 @@ TDENGINE_TAG=3.0.2.4
|
||||||
DYNAMO_TAG=1.21.0
|
DYNAMO_TAG=1.21.0
|
||||||
CASSANDRA_TAG=3.11.6
|
CASSANDRA_TAG=3.11.6
|
||||||
MINIO_TAG=RELEASE.2023-03-20T20-16-18Z
|
MINIO_TAG=RELEASE.2023-03-20T20-16-18Z
|
||||||
|
OPENTS_TAG=9aa7f88
|
||||||
|
|
||||||
MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server
|
MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server
|
||||||
SQLSERVER_TAG=2019-CU19-ubuntu-20.04
|
SQLSERVER_TAG=2019-CU19-ubuntu-20.04
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
version: '3.9'
|
||||||
|
|
||||||
|
services:
|
||||||
|
opents_server:
|
||||||
|
container_name: opents
|
||||||
|
image: petergrace/opentsdb-docker:${OPENTS_TAG}
|
||||||
|
restart: always
|
||||||
|
networks:
|
||||||
|
- emqx_bridge
|
|
@ -0,0 +1,11 @@
|
||||||
|
version: '3.9'
|
||||||
|
|
||||||
|
services:
|
||||||
|
oracle_server:
|
||||||
|
container_name: oracle
|
||||||
|
image: oracleinanutshell/oracle-xe-11g:1.0.0
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
ORACLE_DISABLE_ASYNCH_IO: true
|
||||||
|
networks:
|
||||||
|
- emqx_bridge
|
|
@ -0,0 +1,32 @@
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
pulsar:
|
||||||
|
container_name: pulsar
|
||||||
|
image: apachepulsar/pulsar:2.11.0
|
||||||
|
# ports:
|
||||||
|
# - 6650:6650
|
||||||
|
# - 8080:8080
|
||||||
|
networks:
|
||||||
|
emqx_bridge:
|
||||||
|
volumes:
|
||||||
|
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem
|
||||||
|
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem
|
||||||
|
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem
|
||||||
|
restart: always
|
||||||
|
command:
|
||||||
|
- bash
|
||||||
|
- "-c"
|
||||||
|
- |
|
||||||
|
sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf
|
||||||
|
sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf
|
||||||
|
sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf
|
||||||
|
sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf
|
||||||
|
sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf
|
||||||
|
sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf
|
||||||
|
sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf
|
||||||
|
sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf
|
||||||
|
sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf
|
||||||
|
sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf
|
||||||
|
echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf
|
||||||
|
bin/pulsar standalone -nfw -nss
|
|
@ -43,6 +43,8 @@ services:
|
||||||
- 19000:19000
|
- 19000:19000
|
||||||
# S3 TLS
|
# S3 TLS
|
||||||
- 19100:19100
|
- 19100:19100
|
||||||
|
# IOTDB
|
||||||
|
- 14242:4242
|
||||||
command:
|
command:
|
||||||
- "-host=0.0.0.0"
|
- "-host=0.0.0.0"
|
||||||
- "-config=/config/toxiproxy.json"
|
- "-config=/config/toxiproxy.json"
|
||||||
|
|
|
@ -20,8 +20,8 @@ esac
|
||||||
|
|
||||||
{
|
{
|
||||||
echo "HOCON_ENV_OVERRIDE_PREFIX=EMQX_"
|
echo "HOCON_ENV_OVERRIDE_PREFIX=EMQX_"
|
||||||
echo "EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s"
|
echo "EMQX_MQTT__RETRY_INTERVAL=2s"
|
||||||
echo "EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10"
|
echo "EMQX_MQTT__MAX_TOPIC_ALIAS=10"
|
||||||
echo "EMQX_AUTHORIZATION__SOURCES=[]"
|
echo "EMQX_AUTHORIZATION__SOURCES=[]"
|
||||||
echo "EMQX_AUTHORIZATION__NO_MATCH=allow"
|
echo "EMQX_AUTHORIZATION__NO_MATCH=allow"
|
||||||
} >> .ci/docker-compose-file/conf.cluster.env
|
} >> .ci/docker-compose-file/conf.cluster.env
|
||||||
|
|
|
@ -102,6 +102,30 @@
|
||||||
"upstream": "sqlserver:1433",
|
"upstream": "sqlserver:1433",
|
||||||
"enabled": true
|
"enabled": true
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "opents",
|
||||||
|
"listen": "0.0.0.0:4242",
|
||||||
|
"upstream": "opents:4242",
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "pulsar_plain",
|
||||||
|
"listen": "0.0.0.0:6652",
|
||||||
|
"upstream": "pulsar:6652",
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "pulsar_tls",
|
||||||
|
"listen": "0.0.0.0:6653",
|
||||||
|
"upstream": "pulsar:6653",
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "oracle",
|
||||||
|
"listen": "0.0.0.0:1521",
|
||||||
|
"upstream": "oracle:1521",
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "minio_tcp",
|
"name": "minio_tcp",
|
||||||
"listen": "0.0.0.0:19000",
|
"listen": "0.0.0.0:19000",
|
||||||
|
|
|
@ -5,8 +5,6 @@ concurrency:
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
on:
|
on:
|
||||||
schedule:
|
|
||||||
- cron: '0 */6 * * *'
|
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'ci/**'
|
- 'ci/**'
|
||||||
|
@ -23,7 +21,6 @@ on:
|
||||||
jobs:
|
jobs:
|
||||||
prepare:
|
prepare:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
if: (github.repository_owner == 'emqx' && github.event_name == 'schedule') || github.event_name != 'schedule'
|
|
||||||
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04
|
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04
|
||||||
outputs:
|
outputs:
|
||||||
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
|
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
|
||||||
|
@ -134,14 +131,6 @@ jobs:
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.profile }}
|
name: ${{ matrix.profile }}
|
||||||
path: source/_packages/${{ matrix.profile }}/
|
path: source/_packages/${{ matrix.profile }}/
|
||||||
- name: Send notification to Slack
|
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
|
||||||
if: failure() && github.event_name == 'schedule'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
|
||||||
with:
|
|
||||||
payload: |
|
|
||||||
{"text": "Scheduled run of ${{ github.workflow }}@Windows failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
|
|
||||||
|
|
||||||
mac:
|
mac:
|
||||||
needs: prepare
|
needs: prepare
|
||||||
|
@ -182,14 +171,6 @@ jobs:
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.profile }}
|
name: ${{ matrix.profile }}
|
||||||
path: _packages/${{ matrix.profile }}/
|
path: _packages/${{ matrix.profile }}/
|
||||||
- name: Send notification to Slack
|
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
|
||||||
if: failure() && github.event_name == 'schedule'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
|
||||||
with:
|
|
||||||
payload: |
|
|
||||||
{"text": "Scheduled run of ${{ github.workflow }}@${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
|
|
||||||
|
|
||||||
linux:
|
linux:
|
||||||
needs: prepare
|
needs: prepare
|
||||||
|
@ -304,19 +285,11 @@ jobs:
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.profile }}
|
name: ${{ matrix.profile }}
|
||||||
path: source/_packages/${{ matrix.profile }}/
|
path: source/_packages/${{ matrix.profile }}/
|
||||||
- name: Send notification to Slack
|
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
|
||||||
if: failure() && github.event_name == 'schedule'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
|
||||||
with:
|
|
||||||
payload: |
|
|
||||||
{"text": "Scheduled run of ${{ github.workflow }}@${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
|
|
||||||
|
|
||||||
publish_artifacts:
|
publish_artifacts:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
needs: [prepare, mac, linux]
|
needs: [prepare, mac, linux]
|
||||||
if: needs.prepare.outputs.IS_EXACT_TAG && github.event_name != 'schedule'
|
if: needs.prepare.outputs.IS_EXACT_TAG
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
|
|
@ -0,0 +1,153 @@
|
||||||
|
name: Scheduled build packages
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: build-${{ github.event_name }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 */6 * * *'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
prepare:
|
||||||
|
runs-on: aws-amd64
|
||||||
|
if: github.repository_owner == 'emqx'
|
||||||
|
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
profile:
|
||||||
|
- ['emqx', 'master']
|
||||||
|
- ['emqx-enterprise', 'release-50']
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ matrix.profile[1] }}
|
||||||
|
path: source
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: get_all_deps
|
||||||
|
run: |
|
||||||
|
make -C source deps-all
|
||||||
|
zip -ryq source.zip source/* source/.[^.]*
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: source-${{ matrix.profile[0] }}
|
||||||
|
path: source.zip
|
||||||
|
|
||||||
|
linux:
|
||||||
|
needs: prepare
|
||||||
|
runs-on: aws-${{ matrix.arch }}
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
profile:
|
||||||
|
- emqx
|
||||||
|
- emqx-enterprise
|
||||||
|
otp:
|
||||||
|
- 24.3.4.2-3
|
||||||
|
arch:
|
||||||
|
- amd64
|
||||||
|
os:
|
||||||
|
- debian10
|
||||||
|
- amzn2
|
||||||
|
builder:
|
||||||
|
- 5.0-34
|
||||||
|
elixir:
|
||||||
|
- 1.13.4
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: AutoModality/action-clean@v1
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: source-${{ matrix.profile }}
|
||||||
|
path: .
|
||||||
|
- name: unzip source code
|
||||||
|
run: unzip -q source.zip
|
||||||
|
- name: build emqx packages
|
||||||
|
working-directory: source
|
||||||
|
env:
|
||||||
|
BUILDER: ${{ matrix.builder }}
|
||||||
|
ELIXIR: ${{ matrix.elixir }}
|
||||||
|
OTP: ${{ matrix.otp }}
|
||||||
|
PROFILE: ${{ matrix.profile[0] }}
|
||||||
|
ARCH: ${{ matrix.arch }}
|
||||||
|
OS: ${{ matrix.os }}
|
||||||
|
run: |
|
||||||
|
set -eu
|
||||||
|
PKGTYPES="tgz pkg"
|
||||||
|
IS_ELIXIR="no"
|
||||||
|
for PKGTYPE in ${PKGTYPES};
|
||||||
|
do
|
||||||
|
./scripts/buildx.sh \
|
||||||
|
--profile "${PROFILE}" \
|
||||||
|
--pkgtype "${PKGTYPE}" \
|
||||||
|
--arch "${ARCH}" \
|
||||||
|
--elixir "${IS_ELIXIR}" \
|
||||||
|
--builder "ghcr.io/emqx/emqx-builder/${BUILDER}:${ELIXIR}-${OTP}-${OS}
|
||||||
|
done
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: success()
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.profile }}
|
||||||
|
path: source/_packages/${{ matrix.profile }}/
|
||||||
|
- name: Send notification to Slack
|
||||||
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
|
if: failure()
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
|
||||||
|
|
||||||
|
mac:
|
||||||
|
needs: prepare
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
profile:
|
||||||
|
- emqx
|
||||||
|
otp:
|
||||||
|
- 24.3.4.2-3
|
||||||
|
os:
|
||||||
|
- macos-12
|
||||||
|
- macos-12-arm64
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: emqx/self-hosted-cleanup-action@v1.0.3
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: source-${{ matrix.profile }}
|
||||||
|
path: .
|
||||||
|
- name: unzip source code
|
||||||
|
run: |
|
||||||
|
ln -s . source
|
||||||
|
unzip -o -q source.zip
|
||||||
|
rm source source.zip
|
||||||
|
- uses: ./.github/actions/package-macos
|
||||||
|
with:
|
||||||
|
profile: ${{ matrix.profile }}
|
||||||
|
otp: ${{ matrix.otp }}
|
||||||
|
os: ${{ matrix.os }}
|
||||||
|
apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }}
|
||||||
|
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
|
||||||
|
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
|
||||||
|
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: success()
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.profile }}
|
||||||
|
path: _packages/${{ matrix.profile }}/
|
||||||
|
- name: Send notification to Slack
|
||||||
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
|
if: failure()
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
|
|
@ -194,15 +194,12 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG)
|
CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG)
|
||||||
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID)
|
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID)
|
||||||
export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='yes'
|
|
||||||
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
|
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
|
||||||
docker stop $CID
|
docker stop $CID
|
||||||
- name: test two nodes cluster with proto_dist=inet_tls in docker
|
- name: test two nodes cluster with proto_dist=inet_tls in docker
|
||||||
run: |
|
run: |
|
||||||
./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG
|
./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG
|
||||||
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy)
|
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy)
|
||||||
# versions before 5.0.22 have hidden fields included in the API spec
|
|
||||||
export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='no'
|
|
||||||
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
|
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
|
||||||
# cleanup
|
# cleanup
|
||||||
./scripts/test/start-two-nodes-in-docker.sh -c
|
./scripts/test/start-two-nodes-in-docker.sh -c
|
||||||
|
|
|
@ -0,0 +1,127 @@
|
||||||
|
name: Performance Test Suite
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'perf/**'
|
||||||
|
schedule:
|
||||||
|
- cron: '0 1 * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
ref:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
prepare:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu20.04
|
||||||
|
outputs:
|
||||||
|
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
|
||||||
|
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
ref: ${{ github.event.inputs.ref }}
|
||||||
|
- name: Work around https://github.com/actions/checkout/issues/766
|
||||||
|
run: |
|
||||||
|
git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||||
|
- id: prepare
|
||||||
|
run: |
|
||||||
|
echo "EMQX_NAME=emqx" >> $GITHUB_ENV
|
||||||
|
echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
|
||||||
|
echo "BENCH_ID=$(date --utc +%F)/emqx-$(./pkg-vsn.sh emqx)" >> $GITHUB_OUTPUT
|
||||||
|
- name: Build deb package
|
||||||
|
run: |
|
||||||
|
make ${EMQX_NAME}-pkg
|
||||||
|
./scripts/pkg-tests.sh ${EMQX_NAME}-pkg
|
||||||
|
- name: Get package file name
|
||||||
|
id: package_file
|
||||||
|
run: |
|
||||||
|
echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: emqx-ubuntu20.04
|
||||||
|
path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }}
|
||||||
|
|
||||||
|
tf_emqx_perf_test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- prepare
|
||||||
|
env:
|
||||||
|
TF_VAR_bench_id: ${{ needs.prepare.outputs.BENCH_ID }}
|
||||||
|
TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }}
|
||||||
|
TF_VAR_test_duration: 300
|
||||||
|
TF_VAR_grafana_api_key: ${{ secrets.TF_EMQX_PERF_TEST_GRAFANA_API_KEY }}
|
||||||
|
TF_AWS_REGION: eu-north-1
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Configure AWS Credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v2
|
||||||
|
with:
|
||||||
|
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }}
|
||||||
|
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
|
||||||
|
aws-region: eu-north-1
|
||||||
|
- name: Checkout tf-emqx-performance-test
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: emqx/tf-emqx-performance-test
|
||||||
|
path: tf-emqx-performance-test
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: emqx-ubuntu20.04
|
||||||
|
path: tf-emqx-performance-test/
|
||||||
|
- name: Setup Terraform
|
||||||
|
uses: hashicorp/setup-terraform@v2
|
||||||
|
with:
|
||||||
|
terraform_wrapper: false
|
||||||
|
- name: terraform init
|
||||||
|
working-directory: ./tf-emqx-performance-test
|
||||||
|
run: |
|
||||||
|
terraform init
|
||||||
|
- name: terraform apply
|
||||||
|
working-directory: ./tf-emqx-performance-test
|
||||||
|
run: |
|
||||||
|
terraform apply -auto-approve
|
||||||
|
- name: Wait for test results
|
||||||
|
timeout-minutes: 30
|
||||||
|
working-directory: ./tf-emqx-performance-test
|
||||||
|
id: test-results
|
||||||
|
run: |
|
||||||
|
sleep $TF_VAR_test_duration
|
||||||
|
until aws s3api head-object --bucket tf-emqx-performance-test --key "$TF_VAR_bench_id/DONE" > /dev/null 2>&1
|
||||||
|
do
|
||||||
|
printf '.'
|
||||||
|
sleep 10
|
||||||
|
done
|
||||||
|
echo
|
||||||
|
aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/metrics.json" ./
|
||||||
|
aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/stats.json" ./
|
||||||
|
echo MESSAGES_DELIVERED=$(cat metrics.json | jq '[.[]."messages.delivered"] | add') >> $GITHUB_OUTPUT
|
||||||
|
echo MESSAGES_DROPPED=$(cat metrics.json | jq '[.[]."messages.dropped"] | add') >> $GITHUB_OUTPUT
|
||||||
|
- name: Send notification to Slack
|
||||||
|
if: success()
|
||||||
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||||
|
with:
|
||||||
|
payload: |
|
||||||
|
{"text": "EMQX performance test completed.\nMessages delivered: ${{ steps.test-results.outputs.MESSAGES_DELIVERED }}.\nMessages dropped: ${{ steps.test-results.outputs.MESSAGES_DROPPED }}.\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
|
||||||
|
- name: terraform destroy
|
||||||
|
if: always()
|
||||||
|
working-directory: ./tf-emqx-performance-test
|
||||||
|
run: |
|
||||||
|
terraform destroy -auto-approve
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: success()
|
||||||
|
with:
|
||||||
|
name: test-results
|
||||||
|
path: "./tf-emqx-performance-test/*.json"
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
name: terraform
|
||||||
|
path: |
|
||||||
|
./tf-emqx-performance-test/.terraform
|
||||||
|
./tf-emqx-performance-test/*.tfstate
|
|
@ -167,8 +167,8 @@ jobs:
|
||||||
--set image.pullPolicy=Never \
|
--set image.pullPolicy=Never \
|
||||||
--set image.tag=$EMQX_TAG \
|
--set image.tag=$EMQX_TAG \
|
||||||
--set emqxAclConfig="" \
|
--set emqxAclConfig="" \
|
||||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \
|
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
|
||||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \
|
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
|
||||||
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
|
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
|
||||||
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
|
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
|
||||||
deploy/charts/${{ matrix.profile }} \
|
deploy/charts/${{ matrix.profile }} \
|
||||||
|
@ -185,8 +185,8 @@ jobs:
|
||||||
--set image.pullPolicy=Never \
|
--set image.pullPolicy=Never \
|
||||||
--set image.tag=$EMQX_TAG \
|
--set image.tag=$EMQX_TAG \
|
||||||
--set emqxAclConfig="" \
|
--set emqxAclConfig="" \
|
||||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \
|
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
|
||||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \
|
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
|
||||||
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
|
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
|
||||||
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
|
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
|
||||||
deploy/charts/${{ matrix.profile }} \
|
deploy/charts/${{ matrix.profile }} \
|
||||||
|
|
|
@ -14,6 +14,9 @@ on:
|
||||||
- e*
|
- e*
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
|
env:
|
||||||
|
IS_CI: "yes"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-matrix:
|
build-matrix:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
@ -69,21 +72,14 @@ jobs:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
path: source
|
path: source
|
||||||
- uses: actions/cache@v3
|
|
||||||
id: cache
|
|
||||||
with:
|
|
||||||
path: "$HOME/.cache/rebar3/rebar3_${{ matrix.otp }}_plt"
|
|
||||||
key: rebar3-dialyzer-plt-${{ matrix.otp }}
|
|
||||||
- name: get_all_deps
|
- name: get_all_deps
|
||||||
working-directory: source
|
working-directory: source
|
||||||
env:
|
env:
|
||||||
PROFILE: ${{ matrix.profile }}
|
PROFILE: ${{ matrix.profile }}
|
||||||
#DIAGNOSTIC: 1
|
|
||||||
run: |
|
run: |
|
||||||
make ensure-rebar3
|
make ensure-rebar3
|
||||||
# fetch all deps and compile
|
# fetch all deps and compile
|
||||||
make ${{ matrix.profile }}
|
make ${{ matrix.profile }}-compile
|
||||||
make static_checks
|
|
||||||
make test-compile
|
make test-compile
|
||||||
cd ..
|
cd ..
|
||||||
zip -ryq source.zip source/* source/.[^.]*
|
zip -ryq source.zip source/* source/.[^.]*
|
||||||
|
@ -92,6 +88,34 @@ jobs:
|
||||||
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
||||||
path: source.zip
|
path: source.zip
|
||||||
|
|
||||||
|
static_checks:
|
||||||
|
needs:
|
||||||
|
- build-matrix
|
||||||
|
- prepare
|
||||||
|
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include: ${{ fromJson(needs.build-matrix.outputs.prepare) }}
|
||||||
|
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
|
||||||
|
steps:
|
||||||
|
- uses: AutoModality/action-clean@v1
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: source-${{ matrix.profile }}-${{ matrix.otp }}
|
||||||
|
path: .
|
||||||
|
- name: unzip source code
|
||||||
|
run: unzip -o -q source.zip
|
||||||
|
- uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: "source/emqx_dialyzer_${{ matrix.otp }}_plt"
|
||||||
|
key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }}
|
||||||
|
- name: run static checks
|
||||||
|
env:
|
||||||
|
PROFILE: ${{ matrix.profile }}
|
||||||
|
working-directory: source
|
||||||
|
run: make static_checks
|
||||||
|
|
||||||
eunit_and_proper:
|
eunit_and_proper:
|
||||||
needs:
|
needs:
|
||||||
- build-matrix
|
- build-matrix
|
||||||
|
@ -168,6 +192,7 @@ jobs:
|
||||||
REDIS_TAG: "7.0"
|
REDIS_TAG: "7.0"
|
||||||
INFLUXDB_TAG: "2.5.0"
|
INFLUXDB_TAG: "2.5.0"
|
||||||
TDENGINE_TAG: "3.0.2.4"
|
TDENGINE_TAG: "3.0.2.4"
|
||||||
|
OPENTS_TAG: "9aa7f88"
|
||||||
MINIO_TAG: "RELEASE.2023-03-20T20-16-18Z"
|
MINIO_TAG: "RELEASE.2023-03-20T20-16-18Z"
|
||||||
PROFILE: ${{ matrix.profile }}
|
PROFILE: ${{ matrix.profile }}
|
||||||
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
||||||
|
|
|
@ -43,8 +43,7 @@ tmp/
|
||||||
_packages
|
_packages
|
||||||
elvis
|
elvis
|
||||||
emqx_dialyzer_*_plt
|
emqx_dialyzer_*_plt
|
||||||
*/emqx_dashboard/priv/www
|
*/emqx_dashboard/priv/
|
||||||
*/emqx_dashboard/priv/i18n.conf
|
|
||||||
dist.zip
|
dist.zip
|
||||||
scripts/git-token
|
scripts/git-token
|
||||||
apps/*/etc/*.all
|
apps/*/etc/*.all
|
||||||
|
@ -71,3 +70,5 @@ apps/emqx/test/emqx_static_checks_data/master.bpapi
|
||||||
lux_logs/
|
lux_logs/
|
||||||
/.prepare
|
/.prepare
|
||||||
bom.json
|
bom.json
|
||||||
|
ct_run*/
|
||||||
|
apps/emqx_conf/etc/emqx.conf.all.rendered*
|
||||||
|
|
6
LICENSE
6
LICENSE
|
@ -1,7 +1,7 @@
|
||||||
Source code in this repository is variously licensed under below licenses.
|
Source code in this repository is variously licensed under below licenses.
|
||||||
|
|
||||||
For EMQX: Apache License 2.0, see APL.txt,
|
For Default: Apache License 2.0, see APL.txt,
|
||||||
which applies to all source files except for lib-ee sub-directory.
|
which applies to all source files except for folders applied with Business Source License.
|
||||||
|
|
||||||
For EMQX Enterprise (since version 5.0): Business Source License 1.1,
|
For EMQX Enterprise (since version 5.0): Business Source License 1.1,
|
||||||
see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory.
|
see apps/emqx_bridge_kafka/BSL.txt as an example, please check license files under sub directory of apps.
|
||||||
|
|
19
Makefile
19
Makefile
|
@ -7,7 +7,8 @@ export EMQX_DEFAULT_RUNNER = debian:11-slim
|
||||||
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
||||||
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
||||||
export EMQX_DASHBOARD_VERSION ?= v1.2.3
|
export EMQX_DASHBOARD_VERSION ?= v1.2.3
|
||||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.1
|
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.2
|
||||||
|
|
||||||
export EMQX_REL_FORM ?= tgz
|
export EMQX_REL_FORM ?= tgz
|
||||||
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
||||||
ifeq ($(OS),Windows_NT)
|
ifeq ($(OS),Windows_NT)
|
||||||
|
@ -73,6 +74,10 @@ proper: $(REBAR)
|
||||||
test-compile: $(REBAR) merge-config
|
test-compile: $(REBAR) merge-config
|
||||||
$(REBAR) as test compile
|
$(REBAR) as test compile
|
||||||
|
|
||||||
|
.PHONY: $(REL_PROFILES:%=%-compile)
|
||||||
|
$(REL_PROFILES:%=%-compile): $(REBAR) merge-config
|
||||||
|
$(REBAR) as $(@:%-compile=%) compile
|
||||||
|
|
||||||
.PHONY: ct
|
.PHONY: ct
|
||||||
ct: $(REBAR) merge-config
|
ct: $(REBAR) merge-config
|
||||||
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
|
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
|
||||||
|
@ -88,10 +93,9 @@ APPS=$(shell $(SCRIPTS)/find-apps.sh)
|
||||||
|
|
||||||
.PHONY: $(APPS:%=%-ct)
|
.PHONY: $(APPS:%=%-ct)
|
||||||
define gen-app-ct-target
|
define gen-app-ct-target
|
||||||
$1-ct: $(REBAR)
|
$1-ct: $(REBAR) merge-config
|
||||||
$(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1))
|
$(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1))
|
||||||
ifneq ($(SUITES),)
|
ifneq ($(SUITES),)
|
||||||
@$(SCRIPTS)/pre-compile.sh $(PROFILE)
|
|
||||||
@ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
|
@ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
|
||||||
--readable=$(CT_READABLE) \
|
--readable=$(CT_READABLE) \
|
||||||
--name $(CT_NODE_NAME) \
|
--name $(CT_NODE_NAME) \
|
||||||
|
@ -139,6 +143,11 @@ COMMON_DEPS := $(REBAR)
|
||||||
$(REL_PROFILES:%=%): $(COMMON_DEPS)
|
$(REL_PROFILES:%=%): $(COMMON_DEPS)
|
||||||
@$(BUILD) $(@) rel
|
@$(BUILD) $(@) rel
|
||||||
|
|
||||||
|
.PHONY: compile $(PROFILES:%=compile-%)
|
||||||
|
compile: $(PROFILES:%=compile-%)
|
||||||
|
$(PROFILES:%=compile-%):
|
||||||
|
@$(BUILD) $(@:compile-%=%) apps
|
||||||
|
|
||||||
## Not calling rebar3 clean because
|
## Not calling rebar3 clean because
|
||||||
## 1. rebar3 clean relies on rebar3, meaning it reads config, fetches dependencies etc.
|
## 1. rebar3 clean relies on rebar3, meaning it reads config, fetches dependencies etc.
|
||||||
## 2. it's slow
|
## 2. it's slow
|
||||||
|
@ -222,11 +231,11 @@ endef
|
||||||
$(foreach pt,$(PKG_PROFILES),$(eval $(call gen-pkg-target,$(pt))))
|
$(foreach pt,$(PKG_PROFILES),$(eval $(call gen-pkg-target,$(pt))))
|
||||||
|
|
||||||
.PHONY: run
|
.PHONY: run
|
||||||
run: $(PROFILE) quickrun
|
run: compile-$(PROFILE) quickrun
|
||||||
|
|
||||||
.PHONY: quickrun
|
.PHONY: quickrun
|
||||||
quickrun:
|
quickrun:
|
||||||
./_build/$(PROFILE)/rel/emqx/bin/emqx console
|
./dev -p $(PROFILE)
|
||||||
|
|
||||||
## Take the currently set PROFILE
|
## Take the currently set PROFILE
|
||||||
docker:
|
docker:
|
||||||
|
|
|
@ -1,43 +0,0 @@
|
||||||
listeners.tcp.default {
|
|
||||||
bind = "0.0.0.0:1883"
|
|
||||||
max_connections = 1024000
|
|
||||||
}
|
|
||||||
|
|
||||||
listeners.ssl.default {
|
|
||||||
bind = "0.0.0.0:8883"
|
|
||||||
max_connections = 512000
|
|
||||||
ssl_options {
|
|
||||||
keyfile = "{{ platform_etc_dir }}/certs/key.pem"
|
|
||||||
certfile = "{{ platform_etc_dir }}/certs/cert.pem"
|
|
||||||
cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
listeners.ws.default {
|
|
||||||
bind = "0.0.0.0:8083"
|
|
||||||
max_connections = 1024000
|
|
||||||
websocket.mqtt_path = "/mqtt"
|
|
||||||
}
|
|
||||||
|
|
||||||
listeners.wss.default {
|
|
||||||
bind = "0.0.0.0:8084"
|
|
||||||
max_connections = 512000
|
|
||||||
websocket.mqtt_path = "/mqtt"
|
|
||||||
ssl_options {
|
|
||||||
keyfile = "{{ platform_etc_dir }}/certs/key.pem"
|
|
||||||
certfile = "{{ platform_etc_dir }}/certs/cert.pem"
|
|
||||||
cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# listeners.quic.default {
|
|
||||||
# enabled = true
|
|
||||||
# bind = "0.0.0.0:14567"
|
|
||||||
# max_connections = 1024000
|
|
||||||
# ssl_options {
|
|
||||||
# verify = verify_none
|
|
||||||
# keyfile = "{{ platform_etc_dir }}/certs/key.pem"
|
|
||||||
# certfile = "{{ platform_etc_dir }}/certs/cert.pem"
|
|
||||||
# cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
|
|
||||||
# }
|
|
||||||
# }
|
|
|
@ -32,10 +32,10 @@
|
||||||
%% `apps/emqx/src/bpapi/README.md'
|
%% `apps/emqx/src/bpapi/README.md'
|
||||||
|
|
||||||
%% Community edition
|
%% Community edition
|
||||||
-define(EMQX_RELEASE_CE, "5.0.23").
|
-define(EMQX_RELEASE_CE, "5.0.24").
|
||||||
|
|
||||||
%% Enterprise edition
|
%% Enterprise edition
|
||||||
-define(EMQX_RELEASE_EE, "5.0.3-alpha.1").
|
-define(EMQX_RELEASE_EE, "5.0.3-alpha.5").
|
||||||
|
|
||||||
%% the HTTP API version
|
%% the HTTP API version
|
||||||
-define(EMQX_API_VERSION, "5.0").
|
-define(EMQX_API_VERSION, "5.0").
|
||||||
|
|
|
@ -0,0 +1,23 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-ifndef(EMQX_SCHEMA_HRL).
|
||||||
|
-define(EMQX_SCHEMA_HRL, true).
|
||||||
|
|
||||||
|
-define(TOMBSTONE_TYPE, marked_for_deletion).
|
||||||
|
-define(TOMBSTONE_VALUE, <<"marked_for_deletion">>).
|
||||||
|
-define(TOMBSTONE_CONFIG_CHANGE_REQ, mark_it_for_deletion).
|
||||||
|
|
||||||
|
-endif.
|
|
@ -27,13 +27,13 @@
|
||||||
{gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}},
|
{gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}},
|
||||||
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
|
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
|
||||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
|
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
|
||||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}},
|
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.1"}}},
|
||||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
||||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}},
|
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.4"}}},
|
||||||
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
|
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
|
||||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||||
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
||||||
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.7"}}}
|
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.8"}}}
|
||||||
]}.
|
]}.
|
||||||
|
|
||||||
{plugins, [{rebar3_proper, "0.12.1"}, rebar3_path_deps]}.
|
{plugins, [{rebar3_proper, "0.12.1"}, rebar3_path_deps]}.
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
{id, "emqx"},
|
{id, "emqx"},
|
||||||
{description, "EMQX Core"},
|
{description, "EMQX Core"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "5.0.24"},
|
{vsn, "5.0.25"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -89,7 +89,7 @@
|
||||||
%% Authentication Data Cache
|
%% Authentication Data Cache
|
||||||
auth_cache :: maybe(map()),
|
auth_cache :: maybe(map()),
|
||||||
%% Quota checkers
|
%% Quota checkers
|
||||||
quota :: maybe(emqx_limiter_container:limiter()),
|
quota :: emqx_limiter_container:limiter(),
|
||||||
%% Timers
|
%% Timers
|
||||||
timers :: #{atom() => disabled | maybe(reference())},
|
timers :: #{atom() => disabled | maybe(reference())},
|
||||||
%% Conn State
|
%% Conn State
|
||||||
|
@ -768,7 +768,7 @@ do_finish_publish(PacketId, PubRes, RC, Channel) ->
|
||||||
NChannel = ensure_quota(PubRes, Channel),
|
NChannel = ensure_quota(PubRes, Channel),
|
||||||
handle_out(puback, {PacketId, RC}, NChannel).
|
handle_out(puback, {PacketId, RC}, NChannel).
|
||||||
|
|
||||||
ensure_quota(_, Channel = #channel{quota = undefined}) ->
|
ensure_quota(_, Channel = #channel{quota = infinity}) ->
|
||||||
Channel;
|
Channel;
|
||||||
ensure_quota(PubRes, Channel = #channel{quota = Limiter}) ->
|
ensure_quota(PubRes, Channel = #channel{quota = Limiter}) ->
|
||||||
Cnt = lists:foldl(
|
Cnt = lists:foldl(
|
||||||
|
|
|
@ -22,7 +22,6 @@
|
||||||
-export([
|
-export([
|
||||||
init_load/1,
|
init_load/1,
|
||||||
init_load/2,
|
init_load/2,
|
||||||
init_load/3,
|
|
||||||
read_override_conf/1,
|
read_override_conf/1,
|
||||||
has_deprecated_file/0,
|
has_deprecated_file/0,
|
||||||
delete_override_conf_files/0,
|
delete_override_conf_files/0,
|
||||||
|
@ -35,7 +34,6 @@
|
||||||
save_to_config_map/2,
|
save_to_config_map/2,
|
||||||
save_to_override_conf/3
|
save_to_override_conf/3
|
||||||
]).
|
]).
|
||||||
-export([raw_conf_with_default/4]).
|
|
||||||
-export([merge_envs/2]).
|
-export([merge_envs/2]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
@ -90,7 +88,7 @@
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-ifdef(TEST).
|
-ifdef(TEST).
|
||||||
-export([erase_schema_mod_and_names/0]).
|
-export([erase_all/0]).
|
||||||
-endif.
|
-endif.
|
||||||
|
|
||||||
-include("logger.hrl").
|
-include("logger.hrl").
|
||||||
|
@ -103,6 +101,8 @@
|
||||||
-define(ZONE_CONF_PATH(ZONE, PATH), [zones, ZONE | PATH]).
|
-define(ZONE_CONF_PATH(ZONE, PATH), [zones, ZONE | PATH]).
|
||||||
-define(LISTENER_CONF_PATH(TYPE, LISTENER, PATH), [listeners, TYPE, LISTENER | PATH]).
|
-define(LISTENER_CONF_PATH(TYPE, LISTENER, PATH), [listeners, TYPE, LISTENER | PATH]).
|
||||||
|
|
||||||
|
-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound').
|
||||||
|
|
||||||
-export_type([
|
-export_type([
|
||||||
update_request/0,
|
update_request/0,
|
||||||
raw_config/0,
|
raw_config/0,
|
||||||
|
@ -164,9 +164,8 @@ get(KeyPath, Default) -> do_get(?CONF, KeyPath, Default).
|
||||||
-spec find(emqx_utils_maps:config_key_path()) ->
|
-spec find(emqx_utils_maps:config_key_path()) ->
|
||||||
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
|
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
|
||||||
find([]) ->
|
find([]) ->
|
||||||
Ref = make_ref(),
|
case do_get(?CONF, [], ?CONFIG_NOT_FOUND_MAGIC) of
|
||||||
case do_get(?CONF, [], Ref) of
|
?CONFIG_NOT_FOUND_MAGIC -> {not_found, []};
|
||||||
Ref -> {not_found, []};
|
|
||||||
Res -> {ok, Res}
|
Res -> {ok, Res}
|
||||||
end;
|
end;
|
||||||
find(KeyPath) ->
|
find(KeyPath) ->
|
||||||
|
@ -179,9 +178,8 @@ find(KeyPath) ->
|
||||||
-spec find_raw(emqx_utils_maps:config_key_path()) ->
|
-spec find_raw(emqx_utils_maps:config_key_path()) ->
|
||||||
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
|
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
|
||||||
find_raw([]) ->
|
find_raw([]) ->
|
||||||
Ref = make_ref(),
|
case do_get_raw([], ?CONFIG_NOT_FOUND_MAGIC) of
|
||||||
case do_get_raw([], Ref) of
|
?CONFIG_NOT_FOUND_MAGIC -> {not_found, []};
|
||||||
Ref -> {not_found, []};
|
|
||||||
Res -> {ok, Res}
|
Res -> {ok, Res}
|
||||||
end;
|
end;
|
||||||
find_raw(KeyPath) ->
|
find_raw(KeyPath) ->
|
||||||
|
@ -315,45 +313,38 @@ put_raw(KeyPath, Config) ->
|
||||||
%%============================================================================
|
%%============================================================================
|
||||||
init_load(SchemaMod) ->
|
init_load(SchemaMod) ->
|
||||||
ConfFiles = application:get_env(emqx, config_files, []),
|
ConfFiles = application:get_env(emqx, config_files, []),
|
||||||
init_load(SchemaMod, ConfFiles, #{raw_with_default => true}).
|
init_load(SchemaMod, ConfFiles).
|
||||||
|
|
||||||
init_load(SchemaMod, Opts) when is_map(Opts) ->
|
|
||||||
ConfFiles = application:get_env(emqx, config_files, []),
|
|
||||||
init_load(SchemaMod, ConfFiles, Opts);
|
|
||||||
init_load(SchemaMod, ConfFiles) ->
|
|
||||||
init_load(SchemaMod, ConfFiles, #{raw_with_default => false}).
|
|
||||||
|
|
||||||
%% @doc Initial load of the given config files.
|
%% @doc Initial load of the given config files.
|
||||||
%% NOTE: The order of the files is significant, configs from files ordered
|
%% NOTE: The order of the files is significant, configs from files ordered
|
||||||
%% in the rear of the list overrides prior values.
|
%% in the rear of the list overrides prior values.
|
||||||
-spec init_load(module(), [string()] | binary() | hocon:config()) -> ok.
|
-spec init_load(module(), [string()] | binary() | hocon:config()) -> ok.
|
||||||
init_load(SchemaMod, Conf, Opts) when is_list(Conf) orelse is_binary(Conf) ->
|
init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) ->
|
||||||
|
ok = save_schema_mod_and_names(SchemaMod),
|
||||||
HasDeprecatedFile = has_deprecated_file(),
|
HasDeprecatedFile = has_deprecated_file(),
|
||||||
RawConf = parse_hocon(HasDeprecatedFile, Conf),
|
RawConf0 = load_config_files(HasDeprecatedFile, Conf),
|
||||||
init_load(HasDeprecatedFile, SchemaMod, RawConf, Opts).
|
RawConf1 =
|
||||||
|
case HasDeprecatedFile of
|
||||||
|
true ->
|
||||||
|
overlay_v0(SchemaMod, RawConf0);
|
||||||
|
false ->
|
||||||
|
overlay_v1(SchemaMod, RawConf0)
|
||||||
|
end,
|
||||||
|
RawConf = fill_defaults_for_all_roots(SchemaMod, RawConf1),
|
||||||
|
%% check configs against the schema
|
||||||
|
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf, #{}),
|
||||||
|
save_to_app_env(AppEnvs),
|
||||||
|
ok = save_to_config_map(CheckedConf, RawConf).
|
||||||
|
|
||||||
init_load(true, SchemaMod, RawConf, Opts) when is_map(RawConf) ->
|
%% Merge environment variable overrides on top, then merge with overrides.
|
||||||
%% deprecated conf will be removed in 5.1
|
overlay_v0(SchemaMod, RawConf) when is_map(RawConf) ->
|
||||||
%% Merge environment variable overrides on top
|
|
||||||
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
|
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
|
||||||
Overrides = read_override_confs(),
|
Overrides = read_override_confs(),
|
||||||
RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides),
|
hocon:deep_merge(RawConfWithEnvs, Overrides).
|
||||||
RootNames = get_root_names(),
|
|
||||||
RawConfAll = raw_conf_with_default(SchemaMod, RootNames, RawConfWithOverrides, Opts),
|
%% Merge environment variable overrides on top.
|
||||||
%% check configs against the schema
|
overlay_v1(SchemaMod, RawConf) when is_map(RawConf) ->
|
||||||
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}),
|
merge_envs(SchemaMod, RawConf).
|
||||||
save_to_app_env(AppEnvs),
|
|
||||||
ok = save_to_config_map(CheckedConf, RawConfAll);
|
|
||||||
init_load(false, SchemaMod, RawConf, Opts) when is_map(RawConf) ->
|
|
||||||
ok = save_schema_mod_and_names(SchemaMod),
|
|
||||||
RootNames = get_root_names(),
|
|
||||||
%% Merge environment variable overrides on top
|
|
||||||
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
|
|
||||||
RawConfAll = raw_conf_with_default(SchemaMod, RootNames, RawConfWithEnvs, Opts),
|
|
||||||
%% check configs against the schema
|
|
||||||
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}),
|
|
||||||
save_to_app_env(AppEnvs),
|
|
||||||
ok = save_to_config_map(CheckedConf, RawConfAll).
|
|
||||||
|
|
||||||
%% @doc Read merged cluster + local overrides.
|
%% @doc Read merged cluster + local overrides.
|
||||||
read_override_confs() ->
|
read_override_confs() ->
|
||||||
|
@ -362,47 +353,58 @@ read_override_confs() ->
|
||||||
hocon:deep_merge(ClusterOverrides, LocalOverrides).
|
hocon:deep_merge(ClusterOverrides, LocalOverrides).
|
||||||
|
|
||||||
%% keep the raw and non-raw conf has the same keys to make update raw conf easier.
|
%% keep the raw and non-raw conf has the same keys to make update raw conf easier.
|
||||||
raw_conf_with_default(SchemaMod, RootNames, RawConf, #{raw_with_default := true}) ->
|
fill_defaults_for_all_roots(SchemaMod, RawConf0) ->
|
||||||
Fun = fun(Name, Acc) ->
|
RootSchemas = hocon_schema:roots(SchemaMod),
|
||||||
case maps:is_key(Name, RawConf) of
|
%% the roots which are missing from the loaded configs
|
||||||
true ->
|
MissingRoots = lists:filtermap(
|
||||||
Acc;
|
fun({BinName, Sc}) ->
|
||||||
false ->
|
case maps:is_key(BinName, RawConf0) orelse is_already_loaded(BinName) of
|
||||||
case lists:keyfind(Name, 1, hocon_schema:roots(SchemaMod)) of
|
true -> false;
|
||||||
false ->
|
false -> {true, Sc}
|
||||||
Acc;
|
end
|
||||||
{_, {_, Schema}} ->
|
end,
|
||||||
Acc#{Name => schema_default(Schema)}
|
RootSchemas
|
||||||
end
|
),
|
||||||
end
|
RawConf = lists:foldl(
|
||||||
end,
|
fun({RootName, Schema}, Acc) ->
|
||||||
RawDefault = lists:foldl(Fun, #{}, RootNames),
|
Acc#{bin(RootName) => seed_default(Schema)}
|
||||||
maps:merge(RawConf, fill_defaults(SchemaMod, RawDefault, #{}));
|
end,
|
||||||
raw_conf_with_default(_SchemaMod, _RootNames, RawConf, _Opts) ->
|
RawConf0,
|
||||||
RawConf.
|
MissingRoots
|
||||||
|
),
|
||||||
|
fill_defaults(RawConf).
|
||||||
|
|
||||||
schema_default(Schema) ->
|
%% So far, this can only return true when testing.
|
||||||
case hocon_schema:field_schema(Schema, type) of
|
%% e.g. when testing an app, we need to load its config first
|
||||||
?ARRAY(_) ->
|
%% then start emqx_conf application which will load the
|
||||||
[];
|
%% possibly empty config again (then filled with defaults).
|
||||||
_ ->
|
is_already_loaded(Name) ->
|
||||||
#{}
|
?MODULE:get_raw([Name], #{}) =/= #{}.
|
||||||
|
|
||||||
|
%% if a root is not found in the raw conf, fill it with default values.
|
||||||
|
seed_default(Schema) ->
|
||||||
|
case hocon_schema:field_schema(Schema, default) of
|
||||||
|
undefined ->
|
||||||
|
%% so far all roots without a default value are objects
|
||||||
|
#{};
|
||||||
|
Value ->
|
||||||
|
Value
|
||||||
end.
|
end.
|
||||||
|
|
||||||
parse_hocon(HasDeprecatedFile, Conf) ->
|
load_config_files(HasDeprecatedFile, Conf) ->
|
||||||
IncDirs = include_dirs(),
|
IncDirs = include_dirs(),
|
||||||
case do_parse_hocon(HasDeprecatedFile, Conf, IncDirs) of
|
case do_parse_hocon(HasDeprecatedFile, Conf, IncDirs) of
|
||||||
{ok, HoconMap} ->
|
{ok, HoconMap} ->
|
||||||
HoconMap;
|
HoconMap;
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "failed_to_load_hocon_file",
|
msg => "failed_to_load_config_file",
|
||||||
reason => Reason,
|
reason => Reason,
|
||||||
pwd => file:get_cwd(),
|
pwd => file:get_cwd(),
|
||||||
include_dirs => IncDirs,
|
include_dirs => IncDirs,
|
||||||
config_file => Conf
|
config_file => Conf
|
||||||
}),
|
}),
|
||||||
error(failed_to_load_hocon_file)
|
error(failed_to_load_config_file)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
do_parse_hocon(true, Conf, IncDirs) ->
|
do_parse_hocon(true, Conf, IncDirs) ->
|
||||||
|
@ -547,7 +549,9 @@ save_schema_mod_and_names(SchemaMod) ->
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-ifdef(TEST).
|
-ifdef(TEST).
|
||||||
erase_schema_mod_and_names() ->
|
erase_all() ->
|
||||||
|
Names = get_root_names(),
|
||||||
|
lists:foreach(fun erase/1, Names),
|
||||||
persistent_term:erase(?PERSIS_SCHEMA_MODS).
|
persistent_term:erase(?PERSIS_SCHEMA_MODS).
|
||||||
-endif.
|
-endif.
|
||||||
|
|
||||||
|
@ -665,11 +669,9 @@ do_get_raw(Path, Default) ->
|
||||||
do_get(?RAW_CONF, Path, Default).
|
do_get(?RAW_CONF, Path, Default).
|
||||||
|
|
||||||
do_get(Type, KeyPath) ->
|
do_get(Type, KeyPath) ->
|
||||||
Ref = make_ref(),
|
case do_get(Type, KeyPath, ?CONFIG_NOT_FOUND_MAGIC) of
|
||||||
Res = do_get(Type, KeyPath, Ref),
|
?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, KeyPath});
|
||||||
case Res =:= Ref of
|
Res -> Res
|
||||||
true -> error({config_not_found, KeyPath});
|
|
||||||
false -> Res
|
|
||||||
end.
|
end.
|
||||||
|
|
||||||
do_get(Type, [], Default) ->
|
do_get(Type, [], Default) ->
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
-module(emqx_config_handler).
|
-module(emqx_config_handler).
|
||||||
|
|
||||||
-include("logger.hrl").
|
-include("logger.hrl").
|
||||||
|
-include("emqx_schema.hrl").
|
||||||
-include_lib("hocon/include/hoconsc.hrl").
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
|
||||||
-behaviour(gen_server).
|
-behaviour(gen_server).
|
||||||
|
@ -447,11 +448,17 @@ merge_to_override_config(RawConf, Opts) ->
|
||||||
up_req({remove, _Opts}) -> '$remove';
|
up_req({remove, _Opts}) -> '$remove';
|
||||||
up_req({{update, Req}, _Opts}) -> Req.
|
up_req({{update, Req}, _Opts}) -> Req.
|
||||||
|
|
||||||
return_change_result(ConfKeyPath, {{update, _Req}, Opts}) ->
|
return_change_result(ConfKeyPath, {{update, Req}, Opts}) ->
|
||||||
#{
|
case Req =/= ?TOMBSTONE_CONFIG_CHANGE_REQ of
|
||||||
config => emqx_config:get(ConfKeyPath),
|
true ->
|
||||||
raw_config => return_rawconf(ConfKeyPath, Opts)
|
#{
|
||||||
};
|
config => emqx_config:get(ConfKeyPath),
|
||||||
|
raw_config => return_rawconf(ConfKeyPath, Opts)
|
||||||
|
};
|
||||||
|
false ->
|
||||||
|
%% like remove, nothing to return
|
||||||
|
#{}
|
||||||
|
end;
|
||||||
return_change_result(_ConfKeyPath, {remove, _Opts}) ->
|
return_change_result(_ConfKeyPath, {remove, _Opts}) ->
|
||||||
#{}.
|
#{}.
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,7 @@
|
||||||
listener :: {Type :: atom(), Name :: atom()},
|
listener :: {Type :: atom(), Name :: atom()},
|
||||||
|
|
||||||
%% Limiter
|
%% Limiter
|
||||||
limiter :: maybe(limiter()),
|
limiter :: limiter(),
|
||||||
|
|
||||||
%% limiter buffer for overload use
|
%% limiter buffer for overload use
|
||||||
limiter_buffer :: queue:queue(pending_req()),
|
limiter_buffer :: queue:queue(pending_req()),
|
||||||
|
@ -974,55 +974,61 @@ handle_cast(Req, State) ->
|
||||||
list(any()),
|
list(any()),
|
||||||
state()
|
state()
|
||||||
) -> _.
|
) -> _.
|
||||||
|
|
||||||
|
check_limiter(
|
||||||
|
_Needs,
|
||||||
|
Data,
|
||||||
|
WhenOk,
|
||||||
|
Msgs,
|
||||||
|
#state{limiter = infinity} = State
|
||||||
|
) ->
|
||||||
|
WhenOk(Data, Msgs, State);
|
||||||
check_limiter(
|
check_limiter(
|
||||||
Needs,
|
Needs,
|
||||||
Data,
|
Data,
|
||||||
WhenOk,
|
WhenOk,
|
||||||
Msgs,
|
Msgs,
|
||||||
#state{
|
#state{limiter_timer = undefined, limiter = Limiter} = State
|
||||||
limiter = Limiter,
|
) ->
|
||||||
limiter_timer = LimiterTimer,
|
case emqx_limiter_container:check_list(Needs, Limiter) of
|
||||||
limiter_buffer = Cache
|
{ok, Limiter2} ->
|
||||||
} = State
|
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
|
||||||
) when Limiter =/= undefined ->
|
{pause, Time, Limiter2} ->
|
||||||
case LimiterTimer of
|
?SLOG(debug, #{
|
||||||
undefined ->
|
msg => "pause_time_dueto_rate_limit",
|
||||||
case emqx_limiter_container:check_list(Needs, Limiter) of
|
needs => Needs,
|
||||||
{ok, Limiter2} ->
|
time_in_ms => Time
|
||||||
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
|
}),
|
||||||
{pause, Time, Limiter2} ->
|
|
||||||
?SLOG(debug, #{
|
|
||||||
msg => "pause_time_dueto_rate_limit",
|
|
||||||
needs => Needs,
|
|
||||||
time_in_ms => Time
|
|
||||||
}),
|
|
||||||
|
|
||||||
Retry = #retry{
|
Retry = #retry{
|
||||||
types = [Type || {_, Type} <- Needs],
|
types = [Type || {_, Type} <- Needs],
|
||||||
data = Data,
|
data = Data,
|
||||||
next = WhenOk
|
next = WhenOk
|
||||||
},
|
},
|
||||||
|
|
||||||
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
|
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
|
||||||
|
|
||||||
TRef = start_timer(Time, limit_timeout),
|
TRef = start_timer(Time, limit_timeout),
|
||||||
|
|
||||||
{ok, State#state{
|
{ok, State#state{
|
||||||
limiter = Limiter3,
|
limiter = Limiter3,
|
||||||
limiter_timer = TRef
|
limiter_timer = TRef
|
||||||
}};
|
}};
|
||||||
{drop, Limiter2} ->
|
{drop, Limiter2} ->
|
||||||
{ok, State#state{limiter = Limiter2}}
|
{ok, State#state{limiter = Limiter2}}
|
||||||
end;
|
|
||||||
_ ->
|
|
||||||
%% if there has a retry timer,
|
|
||||||
%% cache the operation and execute it after the retry is over
|
|
||||||
%% the maximum length of the cache queue is equal to the active_n
|
|
||||||
New = #pending_req{need = Needs, data = Data, next = WhenOk},
|
|
||||||
{ok, State#state{limiter_buffer = queue:in(New, Cache)}}
|
|
||||||
end;
|
end;
|
||||||
check_limiter(_, Data, WhenOk, Msgs, State) ->
|
check_limiter(
|
||||||
WhenOk(Data, Msgs, State).
|
Needs,
|
||||||
|
Data,
|
||||||
|
WhenOk,
|
||||||
|
_Msgs,
|
||||||
|
#state{limiter_buffer = Cache} = State
|
||||||
|
) ->
|
||||||
|
%% if there has a retry timer,
|
||||||
|
%% cache the operation and execute it after the retry is over
|
||||||
|
%% the maximum length of the cache queue is equal to the active_n
|
||||||
|
New = #pending_req{need = Needs, data = Data, next = WhenOk},
|
||||||
|
{ok, State#state{limiter_buffer = queue:in(New, Cache)}}.
|
||||||
|
|
||||||
%% try to perform a retry
|
%% try to perform a retry
|
||||||
-spec retry_limiter(state()) -> _.
|
-spec retry_limiter(state()) -> _.
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
|
|
||||||
%% API
|
%% API
|
||||||
-export([
|
-export([
|
||||||
make_token_bucket_limiter/2,
|
make_local_limiter/2,
|
||||||
make_ref_limiter/2,
|
make_ref_limiter/2,
|
||||||
check/2,
|
check/2,
|
||||||
consume/2,
|
consume/2,
|
||||||
|
@ -32,12 +32,11 @@
|
||||||
make_future/1,
|
make_future/1,
|
||||||
available/1
|
available/1
|
||||||
]).
|
]).
|
||||||
-export_type([token_bucket_limiter/0]).
|
-export_type([local_limiter/0]).
|
||||||
|
|
||||||
%% a token bucket limiter with a limiter server's bucket reference
|
%% a token bucket limiter which may or not contains a reference to another limiter,
|
||||||
|
%% and can be used in a client alone
|
||||||
%% the number of tokens currently available
|
-type local_limiter() :: #{
|
||||||
-type token_bucket_limiter() :: #{
|
|
||||||
tokens := non_neg_integer(),
|
tokens := non_neg_integer(),
|
||||||
rate := decimal(),
|
rate := decimal(),
|
||||||
capacity := decimal(),
|
capacity := decimal(),
|
||||||
|
@ -58,12 +57,12 @@
|
||||||
retry_ctx =>
|
retry_ctx =>
|
||||||
undefined
|
undefined
|
||||||
%% the retry context
|
%% the retry context
|
||||||
| retry_context(token_bucket_limiter()),
|
| retry_context(local_limiter()),
|
||||||
%% allow to add other keys
|
%% allow to add other keys
|
||||||
atom => any()
|
atom => any()
|
||||||
}.
|
}.
|
||||||
|
|
||||||
%% a limiter server's bucket reference
|
%% a limiter instance which only contains a reference to another limiter(bucket)
|
||||||
-type ref_limiter() :: #{
|
-type ref_limiter() :: #{
|
||||||
max_retry_time := non_neg_integer(),
|
max_retry_time := non_neg_integer(),
|
||||||
failure_strategy := failure_strategy(),
|
failure_strategy := failure_strategy(),
|
||||||
|
@ -88,7 +87,7 @@
|
||||||
}.
|
}.
|
||||||
|
|
||||||
-type bucket() :: emqx_limiter_bucket_ref:bucket_ref().
|
-type bucket() :: emqx_limiter_bucket_ref:bucket_ref().
|
||||||
-type limiter() :: token_bucket_limiter() | ref_limiter() | infinity.
|
-type limiter() :: local_limiter() | ref_limiter() | infinity.
|
||||||
-type millisecond() :: non_neg_integer().
|
-type millisecond() :: non_neg_integer().
|
||||||
|
|
||||||
-type pause_type() :: pause | partial.
|
-type pause_type() :: pause | partial.
|
||||||
|
@ -116,7 +115,7 @@
|
||||||
rate := decimal(),
|
rate := decimal(),
|
||||||
initial := non_neg_integer(),
|
initial := non_neg_integer(),
|
||||||
low_watermark := non_neg_integer(),
|
low_watermark := non_neg_integer(),
|
||||||
capacity := decimal(),
|
burst := decimal(),
|
||||||
divisible := boolean(),
|
divisible := boolean(),
|
||||||
max_retry_time := non_neg_integer(),
|
max_retry_time := non_neg_integer(),
|
||||||
failure_strategy := failure_strategy()
|
failure_strategy := failure_strategy()
|
||||||
|
@ -134,8 +133,8 @@
|
||||||
%% API
|
%% API
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%%@doc create a limiter
|
%%@doc create a limiter
|
||||||
-spec make_token_bucket_limiter(limiter_bucket_cfg(), bucket()) -> _.
|
-spec make_local_limiter(limiter_bucket_cfg(), bucket()) -> _.
|
||||||
make_token_bucket_limiter(Cfg, Bucket) ->
|
make_local_limiter(Cfg, Bucket) ->
|
||||||
Cfg#{
|
Cfg#{
|
||||||
tokens => emqx_limiter_server:get_initial_val(Cfg),
|
tokens => emqx_limiter_server:get_initial_val(Cfg),
|
||||||
lasttime => ?NOW,
|
lasttime => ?NOW,
|
||||||
|
@ -312,8 +311,8 @@ on_failure(throw, Limiter) ->
|
||||||
Message = io_lib:format("limiter consume failed, limiter:~p~n", [Limiter]),
|
Message = io_lib:format("limiter consume failed, limiter:~p~n", [Limiter]),
|
||||||
erlang:throw({rate_check_fail, Message}).
|
erlang:throw({rate_check_fail, Message}).
|
||||||
|
|
||||||
-spec do_check_with_parent_limiter(pos_integer(), token_bucket_limiter()) ->
|
-spec do_check_with_parent_limiter(pos_integer(), local_limiter()) ->
|
||||||
inner_check_result(token_bucket_limiter()).
|
inner_check_result(local_limiter()).
|
||||||
do_check_with_parent_limiter(
|
do_check_with_parent_limiter(
|
||||||
Need,
|
Need,
|
||||||
#{
|
#{
|
||||||
|
@ -336,7 +335,7 @@ do_check_with_parent_limiter(
|
||||||
)
|
)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec do_reset(pos_integer(), token_bucket_limiter()) -> inner_check_result(token_bucket_limiter()).
|
-spec do_reset(pos_integer(), local_limiter()) -> inner_check_result(local_limiter()).
|
||||||
do_reset(
|
do_reset(
|
||||||
Need,
|
Need,
|
||||||
#{
|
#{
|
||||||
|
|
|
@ -34,16 +34,18 @@
|
||||||
|
|
||||||
-export_type([container/0, check_result/0]).
|
-export_type([container/0, check_result/0]).
|
||||||
|
|
||||||
-type container() :: #{
|
-type container() ::
|
||||||
limiter_type() => undefined | limiter(),
|
infinity
|
||||||
%% the retry context of the limiter
|
| #{
|
||||||
retry_key() =>
|
limiter_type() => undefined | limiter(),
|
||||||
undefined
|
%% the retry context of the limiter
|
||||||
| retry_context()
|
retry_key() =>
|
||||||
| future(),
|
undefined
|
||||||
%% the retry context of the container
|
| retry_context()
|
||||||
retry_ctx := undefined | any()
|
| future(),
|
||||||
}.
|
%% the retry context of the container
|
||||||
|
retry_ctx := undefined | any()
|
||||||
|
}.
|
||||||
|
|
||||||
-type future() :: pos_integer().
|
-type future() :: pos_integer().
|
||||||
-type limiter_id() :: emqx_limiter_schema:limiter_id().
|
-type limiter_id() :: emqx_limiter_schema:limiter_id().
|
||||||
|
@ -78,7 +80,20 @@ get_limiter_by_types(Id, Types, BucketCfgs) ->
|
||||||
{ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs),
|
{ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs),
|
||||||
add_new(Type, Limiter, Acc)
|
add_new(Type, Limiter, Acc)
|
||||||
end,
|
end,
|
||||||
lists:foldl(Init, #{retry_ctx => undefined}, Types).
|
Container = lists:foldl(Init, #{retry_ctx => undefined}, Types),
|
||||||
|
case
|
||||||
|
lists:all(
|
||||||
|
fun(Type) ->
|
||||||
|
maps:get(Type, Container) =:= infinity
|
||||||
|
end,
|
||||||
|
Types
|
||||||
|
)
|
||||||
|
of
|
||||||
|
true ->
|
||||||
|
infinity;
|
||||||
|
_ ->
|
||||||
|
Container
|
||||||
|
end.
|
||||||
|
|
||||||
-spec add_new(limiter_type(), limiter(), container()) -> container().
|
-spec add_new(limiter_type(), limiter(), container()) -> container().
|
||||||
add_new(Type, Limiter, Container) ->
|
add_new(Type, Limiter, Container) ->
|
||||||
|
@ -89,11 +104,15 @@ add_new(Type, Limiter, Container) ->
|
||||||
|
|
||||||
%% @doc check the specified limiter
|
%% @doc check the specified limiter
|
||||||
-spec check(pos_integer(), limiter_type(), container()) -> check_result().
|
-spec check(pos_integer(), limiter_type(), container()) -> check_result().
|
||||||
|
check(_Need, _Type, infinity) ->
|
||||||
|
{ok, infinity};
|
||||||
check(Need, Type, Container) ->
|
check(Need, Type, Container) ->
|
||||||
check_list([{Need, Type}], Container).
|
check_list([{Need, Type}], Container).
|
||||||
|
|
||||||
%% @doc check multiple limiters
|
%% @doc check multiple limiters
|
||||||
-spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result().
|
-spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result().
|
||||||
|
check_list(_Need, infinity) ->
|
||||||
|
{ok, infinity};
|
||||||
check_list([{Need, Type} | T], Container) ->
|
check_list([{Need, Type} | T], Container) ->
|
||||||
Limiter = maps:get(Type, Container),
|
Limiter = maps:get(Type, Container),
|
||||||
case emqx_htb_limiter:check(Need, Limiter) of
|
case emqx_htb_limiter:check(Need, Limiter) of
|
||||||
|
@ -121,11 +140,15 @@ check_list([], Container) ->
|
||||||
|
|
||||||
%% @doc retry the specified limiter
|
%% @doc retry the specified limiter
|
||||||
-spec retry(limiter_type(), container()) -> check_result().
|
-spec retry(limiter_type(), container()) -> check_result().
|
||||||
|
retry(_Type, infinity) ->
|
||||||
|
{ok, infinity};
|
||||||
retry(Type, Container) ->
|
retry(Type, Container) ->
|
||||||
retry_list([Type], Container).
|
retry_list([Type], Container).
|
||||||
|
|
||||||
%% @doc retry multiple limiters
|
%% @doc retry multiple limiters
|
||||||
-spec retry_list(list(limiter_type()), container()) -> check_result().
|
-spec retry_list(list(limiter_type()), container()) -> check_result().
|
||||||
|
retry_list(_Types, infinity) ->
|
||||||
|
{ok, infinity};
|
||||||
retry_list([Type | T], Container) ->
|
retry_list([Type | T], Container) ->
|
||||||
Key = ?RETRY_KEY(Type),
|
Key = ?RETRY_KEY(Type),
|
||||||
case Container of
|
case Container of
|
||||||
|
|
|
@ -30,6 +30,12 @@
|
||||||
post_config_update/5
|
post_config_update/5
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
find_root/1,
|
||||||
|
insert_root/2,
|
||||||
|
delete_root/1
|
||||||
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
start_server/1,
|
start_server/1,
|
||||||
start_server/2,
|
start_server/2,
|
||||||
|
@ -62,6 +68,7 @@
|
||||||
|
|
||||||
-define(UID(Id, Type), {Id, Type}).
|
-define(UID(Id, Type), {Id, Type}).
|
||||||
-define(TAB, emqx_limiter_counters).
|
-define(TAB, emqx_limiter_counters).
|
||||||
|
-define(ROOT_ID, root).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% API
|
%% API
|
||||||
|
@ -104,9 +111,25 @@ insert_bucket(Id, Type, Bucket) ->
|
||||||
).
|
).
|
||||||
|
|
||||||
-spec delete_bucket(limiter_id(), limiter_type()) -> true.
|
-spec delete_bucket(limiter_id(), limiter_type()) -> true.
|
||||||
delete_bucket(Type, Id) ->
|
delete_bucket(Id, Type) ->
|
||||||
ets:delete(?TAB, ?UID(Id, Type)).
|
ets:delete(?TAB, ?UID(Id, Type)).
|
||||||
|
|
||||||
|
-spec find_root(limiter_type()) ->
|
||||||
|
{ok, bucket_ref()} | undefined.
|
||||||
|
find_root(Type) ->
|
||||||
|
find_bucket(?ROOT_ID, Type).
|
||||||
|
|
||||||
|
-spec insert_root(
|
||||||
|
limiter_type(),
|
||||||
|
bucket_ref()
|
||||||
|
) -> boolean().
|
||||||
|
insert_root(Type, Bucket) ->
|
||||||
|
insert_bucket(?ROOT_ID, Type, Bucket).
|
||||||
|
|
||||||
|
-spec delete_root(limiter_type()) -> true.
|
||||||
|
delete_root(Type) ->
|
||||||
|
delete_bucket(?ROOT_ID, Type).
|
||||||
|
|
||||||
post_config_update([limiter], _Config, NewConf, _OldConf, _AppEnvs) ->
|
post_config_update([limiter], _Config, NewConf, _OldConf, _AppEnvs) ->
|
||||||
Types = lists:delete(client, maps:keys(NewConf)),
|
Types = lists:delete(client, maps:keys(NewConf)),
|
||||||
_ = [on_post_config_update(Type, NewConf) || Type <- Types],
|
_ = [on_post_config_update(Type, NewConf) || Type <- Types],
|
||||||
|
|
|
@ -32,15 +32,17 @@
|
||||||
get_bucket_cfg_path/2,
|
get_bucket_cfg_path/2,
|
||||||
desc/1,
|
desc/1,
|
||||||
types/0,
|
types/0,
|
||||||
calc_capacity/1
|
calc_capacity/1,
|
||||||
|
extract_with_type/2,
|
||||||
|
default_client_config/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-define(KILOBYTE, 1024).
|
-define(KILOBYTE, 1024).
|
||||||
-define(BUCKET_KEYS, [
|
-define(LISTENER_BUCKET_KEYS, [
|
||||||
{bytes, bucket_infinity},
|
bytes,
|
||||||
{messages, bucket_infinity},
|
messages,
|
||||||
{connection, bucket_limit},
|
connection,
|
||||||
{message_routing, bucket_infinity}
|
message_routing
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-type limiter_type() ::
|
-type limiter_type() ::
|
||||||
|
@ -94,30 +96,33 @@
|
||||||
namespace() -> limiter.
|
namespace() -> limiter.
|
||||||
|
|
||||||
roots() ->
|
roots() ->
|
||||||
[{limiter, hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{importance => ?IMPORTANCE_HIDDEN})}].
|
[
|
||||||
|
{limiter,
|
||||||
|
hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
|
})}
|
||||||
|
].
|
||||||
|
|
||||||
fields(limiter) ->
|
fields(limiter) ->
|
||||||
[
|
[
|
||||||
{Type,
|
{Type,
|
||||||
?HOCON(?R_REF(node_opts), #{
|
?HOCON(?R_REF(node_opts), #{
|
||||||
desc => ?DESC(Type),
|
desc => ?DESC(Type),
|
||||||
default => #{},
|
|
||||||
importance => ?IMPORTANCE_HIDDEN,
|
importance => ?IMPORTANCE_HIDDEN,
|
||||||
aliases => alias_of_type(Type)
|
aliases => alias_of_type(Type)
|
||||||
})}
|
})}
|
||||||
|| Type <- types()
|
|| Type <- types()
|
||||||
] ++
|
] ++
|
||||||
[
|
[
|
||||||
|
%% This is an undocumented feature, and it won't be support anymore
|
||||||
{client,
|
{client,
|
||||||
?HOCON(
|
?HOCON(
|
||||||
?R_REF(client_fields),
|
?R_REF(client_fields),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(client),
|
desc => ?DESC(client),
|
||||||
importance => ?IMPORTANCE_HIDDEN,
|
importance => ?IMPORTANCE_HIDDEN,
|
||||||
default => maps:from_list([
|
required => {false, recursively},
|
||||||
{erlang:atom_to_binary(Type), #{}}
|
deprecated => {since, "5.0.25"}
|
||||||
|| Type <- types()
|
|
||||||
])
|
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
|
@ -131,11 +136,9 @@ fields(node_opts) ->
|
||||||
})}
|
})}
|
||||||
];
|
];
|
||||||
fields(client_fields) ->
|
fields(client_fields) ->
|
||||||
client_fields(types(), #{default => #{}});
|
client_fields(types());
|
||||||
fields(bucket_infinity) ->
|
fields(bucket_opts) ->
|
||||||
fields_of_bucket(<<"infinity">>);
|
fields_of_bucket(<<"infinity">>);
|
||||||
fields(bucket_limit) ->
|
|
||||||
fields_of_bucket(<<"1000/s">>);
|
|
||||||
fields(client_opts) ->
|
fields(client_opts) ->
|
||||||
[
|
[
|
||||||
{rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})},
|
{rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})},
|
||||||
|
@ -194,10 +197,9 @@ fields(client_opts) ->
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
fields(listener_fields) ->
|
fields(listener_fields) ->
|
||||||
composite_bucket_fields(?BUCKET_KEYS, listener_client_fields);
|
composite_bucket_fields(?LISTENER_BUCKET_KEYS, listener_client_fields);
|
||||||
fields(listener_client_fields) ->
|
fields(listener_client_fields) ->
|
||||||
{Types, _} = lists:unzip(?BUCKET_KEYS),
|
client_fields(?LISTENER_BUCKET_KEYS);
|
||||||
client_fields(Types, #{required => false});
|
|
||||||
fields(Type) ->
|
fields(Type) ->
|
||||||
simple_bucket_field(Type).
|
simple_bucket_field(Type).
|
||||||
|
|
||||||
|
@ -205,10 +207,8 @@ desc(limiter) ->
|
||||||
"Settings for the rate limiter.";
|
"Settings for the rate limiter.";
|
||||||
desc(node_opts) ->
|
desc(node_opts) ->
|
||||||
"Settings for the limiter of the node level.";
|
"Settings for the limiter of the node level.";
|
||||||
desc(bucket_infinity) ->
|
desc(bucket_opts) ->
|
||||||
"Settings for the bucket.";
|
"Settings for the bucket.";
|
||||||
desc(bucket_limit) ->
|
|
||||||
desc(bucket_infinity);
|
|
||||||
desc(client_opts) ->
|
desc(client_opts) ->
|
||||||
"Settings for the client in bucket level.";
|
"Settings for the client in bucket level.";
|
||||||
desc(client_fields) ->
|
desc(client_fields) ->
|
||||||
|
@ -241,6 +241,31 @@ calc_capacity(#{rate := infinity}) ->
|
||||||
calc_capacity(#{rate := Rate, burst := Burst}) ->
|
calc_capacity(#{rate := Rate, burst := Burst}) ->
|
||||||
erlang:floor(1000 * Rate / default_period()) + Burst.
|
erlang:floor(1000 * Rate / default_period()) + Burst.
|
||||||
|
|
||||||
|
extract_with_type(_Type, undefined) ->
|
||||||
|
undefined;
|
||||||
|
extract_with_type(Type, #{client := ClientCfg} = BucketCfg) ->
|
||||||
|
BucketVal = maps:find(Type, BucketCfg),
|
||||||
|
ClientVal = maps:find(Type, ClientCfg),
|
||||||
|
merge_client_bucket(Type, ClientVal, BucketVal);
|
||||||
|
extract_with_type(Type, BucketCfg) ->
|
||||||
|
BucketVal = maps:find(Type, BucketCfg),
|
||||||
|
merge_client_bucket(Type, undefined, BucketVal).
|
||||||
|
|
||||||
|
%% Since the client configuration can be absent and be a undefined value,
|
||||||
|
%% but we must need some basic settings to control the behaviour of the limiter,
|
||||||
|
%% so here add this helper function to generate a default setting.
|
||||||
|
%% This is a temporary workaround until we found a better way to simplify.
|
||||||
|
default_client_config() ->
|
||||||
|
#{
|
||||||
|
rate => infinity,
|
||||||
|
initial => 0,
|
||||||
|
low_watermark => 0,
|
||||||
|
burst => 0,
|
||||||
|
divisible => false,
|
||||||
|
max_retry_time => timer:seconds(10),
|
||||||
|
failure_strategy => force
|
||||||
|
}.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Internal functions
|
%% Internal functions
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -360,14 +385,14 @@ apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
|
||||||
|
|
||||||
%% A bucket with only one type
|
%% A bucket with only one type
|
||||||
simple_bucket_field(Type) when is_atom(Type) ->
|
simple_bucket_field(Type) when is_atom(Type) ->
|
||||||
fields(bucket_infinity) ++
|
fields(bucket_opts) ++
|
||||||
[
|
[
|
||||||
{client,
|
{client,
|
||||||
?HOCON(
|
?HOCON(
|
||||||
?R_REF(?MODULE, client_opts),
|
?R_REF(?MODULE, client_opts),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(client),
|
desc => ?DESC(client),
|
||||||
required => false,
|
required => {false, recursively},
|
||||||
importance => importance_of_type(Type),
|
importance => importance_of_type(Type),
|
||||||
aliases => alias_of_type(Type)
|
aliases => alias_of_type(Type)
|
||||||
}
|
}
|
||||||
|
@ -378,13 +403,13 @@ simple_bucket_field(Type) when is_atom(Type) ->
|
||||||
composite_bucket_fields(Types, ClientRef) ->
|
composite_bucket_fields(Types, ClientRef) ->
|
||||||
[
|
[
|
||||||
{Type,
|
{Type,
|
||||||
?HOCON(?R_REF(?MODULE, Opts), #{
|
?HOCON(?R_REF(?MODULE, bucket_opts), #{
|
||||||
desc => ?DESC(?MODULE, Type),
|
desc => ?DESC(?MODULE, Type),
|
||||||
required => false,
|
required => {false, recursively},
|
||||||
importance => importance_of_type(Type),
|
importance => importance_of_type(Type),
|
||||||
aliases => alias_of_type(Type)
|
aliases => alias_of_type(Type)
|
||||||
})}
|
})}
|
||||||
|| {Type, Opts} <- Types
|
|| Type <- Types
|
||||||
] ++
|
] ++
|
||||||
[
|
[
|
||||||
{client,
|
{client,
|
||||||
|
@ -392,7 +417,7 @@ composite_bucket_fields(Types, ClientRef) ->
|
||||||
?R_REF(?MODULE, ClientRef),
|
?R_REF(?MODULE, ClientRef),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(client),
|
desc => ?DESC(client),
|
||||||
required => false
|
required => {false, recursively}
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
@ -415,11 +440,12 @@ fields_of_bucket(Default) ->
|
||||||
})}
|
})}
|
||||||
].
|
].
|
||||||
|
|
||||||
client_fields(Types, Meta) ->
|
client_fields(Types) ->
|
||||||
[
|
[
|
||||||
{Type,
|
{Type,
|
||||||
?HOCON(?R_REF(client_opts), Meta#{
|
?HOCON(?R_REF(client_opts), #{
|
||||||
desc => ?DESC(Type),
|
desc => ?DESC(Type),
|
||||||
|
required => false,
|
||||||
importance => importance_of_type(Type),
|
importance => importance_of_type(Type),
|
||||||
aliases => alias_of_type(Type)
|
aliases => alias_of_type(Type)
|
||||||
})}
|
})}
|
||||||
|
@ -441,3 +467,12 @@ alias_of_type(bytes) ->
|
||||||
[bytes_in];
|
[bytes_in];
|
||||||
alias_of_type(_) ->
|
alias_of_type(_) ->
|
||||||
[].
|
[].
|
||||||
|
|
||||||
|
merge_client_bucket(Type, {ok, ClientVal}, {ok, BucketVal}) ->
|
||||||
|
#{Type => BucketVal, client => #{Type => ClientVal}};
|
||||||
|
merge_client_bucket(Type, {ok, ClientVal}, _) ->
|
||||||
|
#{client => #{Type => ClientVal}};
|
||||||
|
merge_client_bucket(Type, _, {ok, BucketVal}) ->
|
||||||
|
#{Type => BucketVal};
|
||||||
|
merge_client_bucket(_, _, _) ->
|
||||||
|
undefined.
|
||||||
|
|
|
@ -59,7 +59,8 @@
|
||||||
burst := rate(),
|
burst := rate(),
|
||||||
%% token generation interval(second)
|
%% token generation interval(second)
|
||||||
period := pos_integer(),
|
period := pos_integer(),
|
||||||
produced := float()
|
produced := float(),
|
||||||
|
correction := emqx_limiter_decimal:zero_or_float()
|
||||||
}.
|
}.
|
||||||
|
|
||||||
-type bucket() :: #{
|
-type bucket() :: #{
|
||||||
|
@ -98,6 +99,7 @@
|
||||||
%% minimum coefficient for overloaded limiter
|
%% minimum coefficient for overloaded limiter
|
||||||
-define(OVERLOAD_MIN_ALLOC, 0.3).
|
-define(OVERLOAD_MIN_ALLOC, 0.3).
|
||||||
-define(COUNTER_SIZE, 8).
|
-define(COUNTER_SIZE, 8).
|
||||||
|
-define(ROOT_COUNTER_IDX, 1).
|
||||||
|
|
||||||
-export_type([index/0]).
|
-export_type([index/0]).
|
||||||
-import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]).
|
-import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]).
|
||||||
|
@ -110,47 +112,24 @@
|
||||||
-spec connect(
|
-spec connect(
|
||||||
limiter_id(),
|
limiter_id(),
|
||||||
limiter_type(),
|
limiter_type(),
|
||||||
bucket_name() | #{limiter_type() => bucket_name() | undefined}
|
hocons:config() | undefined
|
||||||
) ->
|
) ->
|
||||||
{ok, emqx_htb_limiter:limiter()} | {error, _}.
|
{ok, emqx_htb_limiter:limiter()} | {error, _}.
|
||||||
%% If no bucket path is set in config, there will be no limit
|
%% undefined is the default situation, no limiter setting by default
|
||||||
connect(_Id, _Type, undefined) ->
|
connect(Id, Type, undefined) ->
|
||||||
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
create_limiter(Id, Type, undefined, undefined);
|
||||||
|
connect(Id, Type, #{rate := _} = Cfg) ->
|
||||||
|
create_limiter(Id, Type, maps:get(client, Cfg, undefined), Cfg);
|
||||||
connect(Id, Type, Cfg) ->
|
connect(Id, Type, Cfg) ->
|
||||||
case find_limiter_cfg(Type, Cfg) of
|
create_limiter(
|
||||||
{_ClientCfg, undefined, _NodeCfg} ->
|
Id,
|
||||||
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
Type,
|
||||||
{#{rate := infinity}, #{rate := infinity}, #{rate := infinity}} ->
|
emqx_utils_maps:deep_get([client, Type], Cfg, undefined),
|
||||||
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
maps:get(Type, Cfg, undefined)
|
||||||
{ClientCfg, #{rate := infinity}, #{rate := infinity}} ->
|
).
|
||||||
{ok,
|
|
||||||
emqx_htb_limiter:make_token_bucket_limiter(
|
|
||||||
ClientCfg, emqx_limiter_bucket_ref:infinity_bucket()
|
|
||||||
)};
|
|
||||||
{
|
|
||||||
#{rate := CliRate} = ClientCfg,
|
|
||||||
#{rate := BucketRate} = BucketCfg,
|
|
||||||
_
|
|
||||||
} ->
|
|
||||||
case emqx_limiter_manager:find_bucket(Id, Type) of
|
|
||||||
{ok, Bucket} ->
|
|
||||||
BucketSize = emqx_limiter_schema:calc_capacity(BucketCfg),
|
|
||||||
CliSize = emqx_limiter_schema:calc_capacity(ClientCfg),
|
|
||||||
{ok,
|
|
||||||
if
|
|
||||||
CliRate < BucketRate orelse CliSize < BucketSize ->
|
|
||||||
emqx_htb_limiter:make_token_bucket_limiter(ClientCfg, Bucket);
|
|
||||||
true ->
|
|
||||||
emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)
|
|
||||||
end};
|
|
||||||
undefined ->
|
|
||||||
?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}),
|
|
||||||
{error, invalid_bucket}
|
|
||||||
end
|
|
||||||
end.
|
|
||||||
|
|
||||||
-spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok.
|
-spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok.
|
||||||
add_bucket(_Id, _Type, undefine) ->
|
add_bucket(_Id, _Type, undefined) ->
|
||||||
ok;
|
ok;
|
||||||
add_bucket(Id, Type, Cfg) ->
|
add_bucket(Id, Type, Cfg) ->
|
||||||
?CALL(Type, {add_bucket, Id, Cfg}).
|
?CALL(Type, {add_bucket, Id, Cfg}).
|
||||||
|
@ -288,7 +267,8 @@ handle_info(Info, State) ->
|
||||||
Reason :: normal | shutdown | {shutdown, term()} | term(),
|
Reason :: normal | shutdown | {shutdown, term()} | term(),
|
||||||
State :: term()
|
State :: term()
|
||||||
) -> any().
|
) -> any().
|
||||||
terminate(_Reason, _State) ->
|
terminate(_Reason, #{type := Type}) ->
|
||||||
|
emqx_limiter_manager:delete_root(Type),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -343,10 +323,14 @@ oscillation(
|
||||||
oscillate(Interval),
|
oscillate(Interval),
|
||||||
Ordereds = get_ordered_buckets(Buckets),
|
Ordereds = get_ordered_buckets(Buckets),
|
||||||
{Alloced, Buckets2} = transverse(Ordereds, Flow, 0.0, Buckets),
|
{Alloced, Buckets2} = transverse(Ordereds, Flow, 0.0, Buckets),
|
||||||
maybe_burst(State#{
|
State2 = maybe_adjust_root_tokens(
|
||||||
buckets := Buckets2,
|
State#{
|
||||||
root := Root#{produced := Produced + Alloced}
|
buckets := Buckets2,
|
||||||
}).
|
root := Root#{produced := Produced + Alloced}
|
||||||
|
},
|
||||||
|
Alloced
|
||||||
|
),
|
||||||
|
maybe_burst(State2).
|
||||||
|
|
||||||
%% @doc horizontal spread
|
%% @doc horizontal spread
|
||||||
-spec transverse(
|
-spec transverse(
|
||||||
|
@ -419,6 +403,24 @@ get_ordered_buckets(Buckets) ->
|
||||||
Buckets
|
Buckets
|
||||||
).
|
).
|
||||||
|
|
||||||
|
-spec maybe_adjust_root_tokens(state(), float()) -> state().
|
||||||
|
maybe_adjust_root_tokens(#{root := #{rate := infinity}} = State, _Alloced) ->
|
||||||
|
State;
|
||||||
|
maybe_adjust_root_tokens(#{root := #{rate := Rate}} = State, Alloced) when Alloced >= Rate ->
|
||||||
|
State;
|
||||||
|
maybe_adjust_root_tokens(#{root := #{rate := Rate} = Root, counter := Counter} = State, Alloced) ->
|
||||||
|
InFlow = Rate - Alloced,
|
||||||
|
Token = counters:get(Counter, ?ROOT_COUNTER_IDX),
|
||||||
|
case Token >= Rate of
|
||||||
|
true ->
|
||||||
|
State;
|
||||||
|
_ ->
|
||||||
|
Available = erlang:min(Rate - Token, InFlow),
|
||||||
|
{Inc, Root2} = emqx_limiter_correction:add(Available, Root),
|
||||||
|
counters:add(Counter, ?ROOT_COUNTER_IDX, Inc),
|
||||||
|
State#{root := Root2}
|
||||||
|
end.
|
||||||
|
|
||||||
-spec maybe_burst(state()) -> state().
|
-spec maybe_burst(state()) -> state().
|
||||||
maybe_burst(
|
maybe_burst(
|
||||||
#{
|
#{
|
||||||
|
@ -482,12 +484,16 @@ init_tree(Type) when is_atom(Type) ->
|
||||||
Cfg = emqx:get_config([limiter, Type]),
|
Cfg = emqx:get_config([limiter, Type]),
|
||||||
init_tree(Type, Cfg).
|
init_tree(Type, Cfg).
|
||||||
|
|
||||||
init_tree(Type, Cfg) ->
|
init_tree(Type, #{rate := Rate} = Cfg) ->
|
||||||
|
Counter = counters:new(?COUNTER_SIZE, [write_concurrency]),
|
||||||
|
RootBucket = emqx_limiter_bucket_ref:new(Counter, ?ROOT_COUNTER_IDX, Rate),
|
||||||
|
emqx_limiter_manager:insert_root(Type, RootBucket),
|
||||||
#{
|
#{
|
||||||
type => Type,
|
type => Type,
|
||||||
root => make_root(Cfg),
|
root => make_root(Cfg),
|
||||||
counter => counters:new(?COUNTER_SIZE, [write_concurrency]),
|
counter => Counter,
|
||||||
index => 0,
|
%% The first slot is reserved for the root
|
||||||
|
index => ?ROOT_COUNTER_IDX,
|
||||||
buckets => #{}
|
buckets => #{}
|
||||||
}.
|
}.
|
||||||
|
|
||||||
|
@ -497,7 +503,8 @@ make_root(#{rate := Rate, burst := Burst}) ->
|
||||||
rate => Rate,
|
rate => Rate,
|
||||||
burst => Burst,
|
burst => Burst,
|
||||||
period => emqx_limiter_schema:default_period(),
|
period => emqx_limiter_schema:default_period(),
|
||||||
produced => 0.0
|
produced => 0.0,
|
||||||
|
correction => 0
|
||||||
}.
|
}.
|
||||||
|
|
||||||
do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) ->
|
do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) ->
|
||||||
|
@ -571,25 +578,61 @@ call(Type, Msg) ->
|
||||||
gen_server:call(Pid, Msg)
|
gen_server:call(Pid, Msg)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
find_limiter_cfg(Type, #{rate := _} = Cfg) ->
|
create_limiter(Id, Type, #{rate := Rate} = ClientCfg, BucketCfg) when Rate =/= infinity ->
|
||||||
{find_client_cfg(Type, maps:get(client, Cfg, undefined)), Cfg, find_node_cfg(Type)};
|
create_limiter_with_client(Id, Type, ClientCfg, BucketCfg);
|
||||||
find_limiter_cfg(Type, Cfg) ->
|
create_limiter(Id, Type, _, BucketCfg) ->
|
||||||
{
|
create_limiter_without_client(Id, Type, BucketCfg).
|
||||||
find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined)),
|
|
||||||
maps:get(Type, Cfg, undefined),
|
|
||||||
find_node_cfg(Type)
|
|
||||||
}.
|
|
||||||
|
|
||||||
find_client_cfg(Type, BucketCfg) ->
|
%% create a limiter with the client-level configuration
|
||||||
NodeCfg = emqx:get_config([limiter, client, Type], undefined),
|
create_limiter_with_client(Id, Type, ClientCfg, BucketCfg) ->
|
||||||
merge_client_cfg(NodeCfg, BucketCfg).
|
case find_referenced_bucket(Id, Type, BucketCfg) of
|
||||||
|
false ->
|
||||||
|
{ok, emqx_htb_limiter:make_local_limiter(ClientCfg, infinity)};
|
||||||
|
{ok, Bucket, RefCfg} ->
|
||||||
|
create_limiter_with_ref(Bucket, ClientCfg, RefCfg);
|
||||||
|
Error ->
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
merge_client_cfg(undefined, BucketCfg) ->
|
%% create a limiter only with the referenced configuration
|
||||||
BucketCfg;
|
create_limiter_without_client(Id, Type, BucketCfg) ->
|
||||||
merge_client_cfg(NodeCfg, undefined) ->
|
case find_referenced_bucket(Id, Type, BucketCfg) of
|
||||||
NodeCfg;
|
false ->
|
||||||
merge_client_cfg(NodeCfg, BucketCfg) ->
|
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
||||||
maps:merge(NodeCfg, BucketCfg).
|
{ok, Bucket, RefCfg} ->
|
||||||
|
ClientCfg = emqx_limiter_schema:default_client_config(),
|
||||||
|
create_limiter_with_ref(Bucket, ClientCfg, RefCfg);
|
||||||
|
Error ->
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
find_node_cfg(Type) ->
|
create_limiter_with_ref(
|
||||||
emqx:get_config([limiter, Type], #{rate => infinity, burst => 0}).
|
Bucket,
|
||||||
|
#{rate := CliRate} = ClientCfg,
|
||||||
|
#{rate := RefRate}
|
||||||
|
) when CliRate < RefRate ->
|
||||||
|
{ok, emqx_htb_limiter:make_local_limiter(ClientCfg, Bucket)};
|
||||||
|
create_limiter_with_ref(Bucket, ClientCfg, _) ->
|
||||||
|
{ok, emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)}.
|
||||||
|
|
||||||
|
%% this is a listener(server)-level reference
|
||||||
|
find_referenced_bucket(Id, Type, #{rate := Rate} = Cfg) when Rate =/= infinity ->
|
||||||
|
case emqx_limiter_manager:find_bucket(Id, Type) of
|
||||||
|
{ok, Bucket} ->
|
||||||
|
{ok, Bucket, Cfg};
|
||||||
|
_ ->
|
||||||
|
?SLOG(error, #{msg => "bucket not found", type => Type, id => Id}),
|
||||||
|
{error, invalid_bucket}
|
||||||
|
end;
|
||||||
|
%% this is a node-level reference
|
||||||
|
find_referenced_bucket(Id, Type, _) ->
|
||||||
|
case emqx:get_config([limiter, Type], undefined) of
|
||||||
|
#{rate := infinity} ->
|
||||||
|
false;
|
||||||
|
undefined ->
|
||||||
|
?SLOG(error, #{msg => "invalid limiter type", type => Type, id => Id}),
|
||||||
|
{error, invalid_bucket};
|
||||||
|
NodeCfg ->
|
||||||
|
{ok, Bucket} = emqx_limiter_manager:find_root(Type),
|
||||||
|
{ok, Bucket, NodeCfg}
|
||||||
|
end.
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
-elvis([{elvis_style, dont_repeat_yourself, #{min_complexity => 10000}}]).
|
-elvis([{elvis_style, dont_repeat_yourself, #{min_complexity => 10000}}]).
|
||||||
|
|
||||||
-include("emqx_mqtt.hrl").
|
-include("emqx_mqtt.hrl").
|
||||||
|
-include("emqx_schema.hrl").
|
||||||
-include("logger.hrl").
|
-include("logger.hrl").
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
|
@ -33,7 +34,8 @@
|
||||||
is_running/1,
|
is_running/1,
|
||||||
current_conns/2,
|
current_conns/2,
|
||||||
max_conns/2,
|
max_conns/2,
|
||||||
id_example/0
|
id_example/0,
|
||||||
|
default_max_conn/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
@ -61,8 +63,11 @@
|
||||||
-export([certs_dir/2]).
|
-export([certs_dir/2]).
|
||||||
-endif.
|
-endif.
|
||||||
|
|
||||||
|
-type listener_id() :: atom() | binary().
|
||||||
|
|
||||||
-define(CONF_KEY_PATH, [listeners, '?', '?']).
|
-define(CONF_KEY_PATH, [listeners, '?', '?']).
|
||||||
-define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]).
|
-define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]).
|
||||||
|
-define(MARK_DEL, ?TOMBSTONE_CONFIG_CHANGE_REQ).
|
||||||
|
|
||||||
-spec id_example() -> atom().
|
-spec id_example() -> atom().
|
||||||
id_example() -> 'tcp:default'.
|
id_example() -> 'tcp:default'.
|
||||||
|
@ -105,19 +110,22 @@ do_list_raw() ->
|
||||||
|
|
||||||
format_raw_listeners({Type0, Conf}) ->
|
format_raw_listeners({Type0, Conf}) ->
|
||||||
Type = binary_to_atom(Type0),
|
Type = binary_to_atom(Type0),
|
||||||
lists:map(
|
lists:filtermap(
|
||||||
fun({LName, LConf0}) when is_map(LConf0) ->
|
fun
|
||||||
Bind = parse_bind(LConf0),
|
({LName, LConf0}) when is_map(LConf0) ->
|
||||||
Running = is_running(Type, listener_id(Type, LName), LConf0#{bind => Bind}),
|
Bind = parse_bind(LConf0),
|
||||||
LConf1 = maps:remove(<<"authentication">>, LConf0),
|
Running = is_running(Type, listener_id(Type, LName), LConf0#{bind => Bind}),
|
||||||
LConf3 = maps:put(<<"running">>, Running, LConf1),
|
LConf1 = maps:remove(<<"authentication">>, LConf0),
|
||||||
CurrConn =
|
LConf2 = maps:put(<<"running">>, Running, LConf1),
|
||||||
case Running of
|
CurrConn =
|
||||||
true -> current_conns(Type, LName, Bind);
|
case Running of
|
||||||
false -> 0
|
true -> current_conns(Type, LName, Bind);
|
||||||
end,
|
false -> 0
|
||||||
LConf4 = maps:put(<<"current_connections">>, CurrConn, LConf3),
|
end,
|
||||||
{Type0, LName, LConf4}
|
LConf = maps:put(<<"current_connections">>, CurrConn, LConf2),
|
||||||
|
{true, {Type0, LName, LConf}};
|
||||||
|
({_LName, _MarkDel}) ->
|
||||||
|
false
|
||||||
end,
|
end,
|
||||||
maps:to_list(Conf)
|
maps:to_list(Conf)
|
||||||
).
|
).
|
||||||
|
@ -195,7 +203,7 @@ start() ->
|
||||||
ok = emqx_config_handler:add_handler(?CONF_KEY_PATH, ?MODULE),
|
ok = emqx_config_handler:add_handler(?CONF_KEY_PATH, ?MODULE),
|
||||||
foreach_listeners(fun start_listener/3).
|
foreach_listeners(fun start_listener/3).
|
||||||
|
|
||||||
-spec start_listener(atom()) -> ok | {error, term()}.
|
-spec start_listener(listener_id()) -> ok | {error, term()}.
|
||||||
start_listener(ListenerId) ->
|
start_listener(ListenerId) ->
|
||||||
apply_on_listener(ListenerId, fun start_listener/3).
|
apply_on_listener(ListenerId, fun start_listener/3).
|
||||||
|
|
||||||
|
@ -246,7 +254,7 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
|
||||||
restart() ->
|
restart() ->
|
||||||
foreach_listeners(fun restart_listener/3).
|
foreach_listeners(fun restart_listener/3).
|
||||||
|
|
||||||
-spec restart_listener(atom()) -> ok | {error, term()}.
|
-spec restart_listener(listener_id()) -> ok | {error, term()}.
|
||||||
restart_listener(ListenerId) ->
|
restart_listener(ListenerId) ->
|
||||||
apply_on_listener(ListenerId, fun restart_listener/3).
|
apply_on_listener(ListenerId, fun restart_listener/3).
|
||||||
|
|
||||||
|
@ -271,7 +279,7 @@ stop() ->
|
||||||
_ = emqx_config_handler:remove_handler(?CONF_KEY_PATH),
|
_ = emqx_config_handler:remove_handler(?CONF_KEY_PATH),
|
||||||
foreach_listeners(fun stop_listener/3).
|
foreach_listeners(fun stop_listener/3).
|
||||||
|
|
||||||
-spec stop_listener(atom()) -> ok | {error, term()}.
|
-spec stop_listener(listener_id()) -> ok | {error, term()}.
|
||||||
stop_listener(ListenerId) ->
|
stop_listener(ListenerId) ->
|
||||||
apply_on_listener(ListenerId, fun stop_listener/3).
|
apply_on_listener(ListenerId, fun stop_listener/3).
|
||||||
|
|
||||||
|
@ -419,7 +427,9 @@ do_start_listener(quic, ListenerName, #{bind := Bind} = Opts) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%% Update the listeners at runtime
|
%% Update the listeners at runtime
|
||||||
pre_config_update([listeners, Type, Name], {create, NewConf}, undefined) ->
|
pre_config_update([listeners, Type, Name], {create, NewConf}, V) when
|
||||||
|
V =:= undefined orelse V =:= ?TOMBSTONE_VALUE
|
||||||
|
->
|
||||||
CertsDir = certs_dir(Type, Name),
|
CertsDir = certs_dir(Type, Name),
|
||||||
{ok, convert_certs(CertsDir, NewConf)};
|
{ok, convert_certs(CertsDir, NewConf)};
|
||||||
pre_config_update([listeners, _Type, _Name], {create, _NewConf}, _RawConf) ->
|
pre_config_update([listeners, _Type, _Name], {create, _NewConf}, _RawConf) ->
|
||||||
|
@ -434,6 +444,8 @@ pre_config_update([listeners, Type, Name], {update, Request}, RawConf) ->
|
||||||
pre_config_update([listeners, _Type, _Name], {action, _Action, Updated}, RawConf) ->
|
pre_config_update([listeners, _Type, _Name], {action, _Action, Updated}, RawConf) ->
|
||||||
NewConf = emqx_utils_maps:deep_merge(RawConf, Updated),
|
NewConf = emqx_utils_maps:deep_merge(RawConf, Updated),
|
||||||
{ok, NewConf};
|
{ok, NewConf};
|
||||||
|
pre_config_update([listeners, _Type, _Name], ?MARK_DEL, _RawConf) ->
|
||||||
|
{ok, ?TOMBSTONE_VALUE};
|
||||||
pre_config_update(_Path, _Request, RawConf) ->
|
pre_config_update(_Path, _Request, RawConf) ->
|
||||||
{ok, RawConf}.
|
{ok, RawConf}.
|
||||||
|
|
||||||
|
@ -441,13 +453,15 @@ post_config_update([listeners, Type, Name], {create, _Request}, NewConf, undefin
|
||||||
start_listener(Type, Name, NewConf);
|
start_listener(Type, Name, NewConf);
|
||||||
post_config_update([listeners, Type, Name], {update, _Request}, NewConf, OldConf, _AppEnvs) ->
|
post_config_update([listeners, Type, Name], {update, _Request}, NewConf, OldConf, _AppEnvs) ->
|
||||||
try_clear_ssl_files(certs_dir(Type, Name), NewConf, OldConf),
|
try_clear_ssl_files(certs_dir(Type, Name), NewConf, OldConf),
|
||||||
|
ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf),
|
||||||
case NewConf of
|
case NewConf of
|
||||||
#{enabled := true} -> restart_listener(Type, Name, {OldConf, NewConf});
|
#{enabled := true} -> restart_listener(Type, Name, {OldConf, NewConf});
|
||||||
_ -> ok
|
_ -> ok
|
||||||
end;
|
end;
|
||||||
post_config_update([listeners, _Type, _Name], '$remove', undefined, undefined, _AppEnvs) ->
|
post_config_update([listeners, Type, Name], Op, _, OldConf, _AppEnvs) when
|
||||||
ok;
|
Op =:= ?MARK_DEL andalso is_map(OldConf)
|
||||||
post_config_update([listeners, Type, Name], '$remove', undefined, OldConf, _AppEnvs) ->
|
->
|
||||||
|
ok = unregister_ocsp_stapling_refresh(Type, Name),
|
||||||
case stop_listener(Type, Name, OldConf) of
|
case stop_listener(Type, Name, OldConf) of
|
||||||
ok ->
|
ok ->
|
||||||
_ = emqx_authentication:delete_chain(listener_id(Type, Name)),
|
_ = emqx_authentication:delete_chain(listener_id(Type, Name)),
|
||||||
|
@ -460,10 +474,18 @@ post_config_update([listeners, Type, Name], {action, _Action, _}, NewConf, OldCo
|
||||||
#{enabled := NewEnabled} = NewConf,
|
#{enabled := NewEnabled} = NewConf,
|
||||||
#{enabled := OldEnabled} = OldConf,
|
#{enabled := OldEnabled} = OldConf,
|
||||||
case {NewEnabled, OldEnabled} of
|
case {NewEnabled, OldEnabled} of
|
||||||
{true, true} -> restart_listener(Type, Name, {OldConf, NewConf});
|
{true, true} ->
|
||||||
{true, false} -> start_listener(Type, Name, NewConf);
|
ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf),
|
||||||
{false, true} -> stop_listener(Type, Name, OldConf);
|
restart_listener(Type, Name, {OldConf, NewConf});
|
||||||
{false, false} -> stop_listener(Type, Name, OldConf)
|
{true, false} ->
|
||||||
|
ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf),
|
||||||
|
start_listener(Type, Name, NewConf);
|
||||||
|
{false, true} ->
|
||||||
|
ok = unregister_ocsp_stapling_refresh(Type, Name),
|
||||||
|
stop_listener(Type, Name, OldConf);
|
||||||
|
{false, false} ->
|
||||||
|
ok = unregister_ocsp_stapling_refresh(Type, Name),
|
||||||
|
stop_listener(Type, Name, OldConf)
|
||||||
end;
|
end;
|
||||||
post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) ->
|
post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) ->
|
||||||
ok.
|
ok.
|
||||||
|
@ -472,7 +494,7 @@ esockd_opts(ListenerId, Type, Opts0) ->
|
||||||
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
|
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
|
||||||
Limiter = limiter(Opts0),
|
Limiter = limiter(Opts0),
|
||||||
Opts2 =
|
Opts2 =
|
||||||
case maps:get(connection, Limiter, undefined) of
|
case emqx_limiter_schema:extract_with_type(connection, Limiter) of
|
||||||
undefined ->
|
undefined ->
|
||||||
Opts1;
|
Opts1;
|
||||||
BucketCfg ->
|
BucketCfg ->
|
||||||
|
@ -601,6 +623,7 @@ format_bind(Bin) when is_binary(Bin) ->
|
||||||
listener_id(Type, ListenerName) ->
|
listener_id(Type, ListenerName) ->
|
||||||
list_to_atom(lists:append([str(Type), ":", str(ListenerName)])).
|
list_to_atom(lists:append([str(Type), ":", str(ListenerName)])).
|
||||||
|
|
||||||
|
-spec parse_listener_id(listener_id()) -> {ok, #{type => atom(), name => atom()}} | {error, term()}.
|
||||||
parse_listener_id(Id) ->
|
parse_listener_id(Id) ->
|
||||||
case string:split(str(Id), ":", leading) of
|
case string:split(str(Id), ":", leading) of
|
||||||
[Type, Name] ->
|
[Type, Name] ->
|
||||||
|
@ -616,7 +639,7 @@ zone(Opts) ->
|
||||||
maps:get(zone, Opts, undefined).
|
maps:get(zone, Opts, undefined).
|
||||||
|
|
||||||
limiter(Opts) ->
|
limiter(Opts) ->
|
||||||
maps:get(limiter, Opts, #{}).
|
maps:get(limiter, Opts, undefined).
|
||||||
|
|
||||||
add_limiter_bucket(Id, #{limiter := Limiter}) ->
|
add_limiter_bucket(Id, #{limiter := Limiter}) ->
|
||||||
maps:fold(
|
maps:fold(
|
||||||
|
@ -813,3 +836,22 @@ inject_crl_config(
|
||||||
};
|
};
|
||||||
inject_crl_config(Conf) ->
|
inject_crl_config(Conf) ->
|
||||||
Conf.
|
Conf.
|
||||||
|
|
||||||
|
maybe_unregister_ocsp_stapling_refresh(
|
||||||
|
ssl = Type, Name, #{ssl_options := #{ocsp := #{enable_ocsp_stapling := false}}} = _Conf
|
||||||
|
) ->
|
||||||
|
unregister_ocsp_stapling_refresh(Type, Name),
|
||||||
|
ok;
|
||||||
|
maybe_unregister_ocsp_stapling_refresh(_Type, _Name, _Conf) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
unregister_ocsp_stapling_refresh(Type, Name) ->
|
||||||
|
ListenerId = listener_id(Type, Name),
|
||||||
|
emqx_ocsp_cache:unregister_listener(ListenerId),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%% There is currently an issue with frontend
|
||||||
|
%% infinity is not a good value for it, so we use 5m for now
|
||||||
|
default_max_conn() ->
|
||||||
|
%% TODO: <<"infinity">>
|
||||||
|
5_000_000.
|
||||||
|
|
|
@ -37,7 +37,6 @@
|
||||||
max_qos_allowed => emqx_types:qos(),
|
max_qos_allowed => emqx_types:qos(),
|
||||||
retain_available => boolean(),
|
retain_available => boolean(),
|
||||||
wildcard_subscription => boolean(),
|
wildcard_subscription => boolean(),
|
||||||
subscription_identifiers => boolean(),
|
|
||||||
shared_subscription => boolean(),
|
shared_subscription => boolean(),
|
||||||
exclusive_subscription => boolean()
|
exclusive_subscription => boolean()
|
||||||
}.
|
}.
|
||||||
|
@ -58,18 +57,17 @@
|
||||||
exclusive_subscription
|
exclusive_subscription
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-define(DEFAULT_CAPS, #{
|
-define(DEFAULT_CAPS_KEYS, [
|
||||||
max_packet_size => ?MAX_PACKET_SIZE,
|
max_packet_size,
|
||||||
max_clientid_len => ?MAX_CLIENTID_LEN,
|
max_clientid_len,
|
||||||
max_topic_alias => ?MAX_TOPIC_AlIAS,
|
max_topic_alias,
|
||||||
max_topic_levels => ?MAX_TOPIC_LEVELS,
|
max_topic_levels,
|
||||||
max_qos_allowed => ?QOS_2,
|
max_qos_allowed,
|
||||||
retain_available => true,
|
retain_available,
|
||||||
wildcard_subscription => true,
|
wildcard_subscription,
|
||||||
subscription_identifiers => true,
|
shared_subscription,
|
||||||
shared_subscription => true,
|
exclusive_subscription
|
||||||
exclusive_subscription => false
|
]).
|
||||||
}).
|
|
||||||
|
|
||||||
-spec check_pub(
|
-spec check_pub(
|
||||||
emqx_types:zone(),
|
emqx_types:zone(),
|
||||||
|
@ -88,7 +86,7 @@ check_pub(Zone, Flags) when is_map(Flags) ->
|
||||||
error ->
|
error ->
|
||||||
Flags
|
Flags
|
||||||
end,
|
end,
|
||||||
maps:with(?PUBCAP_KEYS, get_caps(Zone))
|
get_caps(?PUBCAP_KEYS, Zone)
|
||||||
).
|
).
|
||||||
|
|
||||||
do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when
|
do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when
|
||||||
|
@ -111,7 +109,7 @@ do_check_pub(_Flags, _Caps) ->
|
||||||
) ->
|
) ->
|
||||||
ok_or_error(emqx_types:reason_code()).
|
ok_or_error(emqx_types:reason_code()).
|
||||||
check_sub(ClientInfo = #{zone := Zone}, Topic, SubOpts) ->
|
check_sub(ClientInfo = #{zone := Zone}, Topic, SubOpts) ->
|
||||||
Caps = maps:with(?SUBCAP_KEYS, get_caps(Zone)),
|
Caps = get_caps(?SUBCAP_KEYS, Zone),
|
||||||
Flags = lists:foldl(
|
Flags = lists:foldl(
|
||||||
fun
|
fun
|
||||||
(max_topic_levels, Map) ->
|
(max_topic_levels, Map) ->
|
||||||
|
@ -152,10 +150,12 @@ do_check_sub(_Flags, _Caps, _, _) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
get_caps(Zone) ->
|
get_caps(Zone) ->
|
||||||
lists:foldl(
|
get_caps(?DEFAULT_CAPS_KEYS, Zone).
|
||||||
fun({K, V}, Acc) ->
|
get_caps(Keys, Zone) ->
|
||||||
Acc#{K => emqx_config:get_zone_conf(Zone, [mqtt, K], V)}
|
maps:with(
|
||||||
end,
|
Keys,
|
||||||
#{},
|
maps:merge(
|
||||||
maps:to_list(?DEFAULT_CAPS)
|
emqx_config:get([mqtt]),
|
||||||
|
emqx_config:get_zone_conf(Zone, [mqtt])
|
||||||
|
)
|
||||||
).
|
).
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
sni_fun/2,
|
sni_fun/2,
|
||||||
fetch_response/1,
|
fetch_response/1,
|
||||||
register_listener/2,
|
register_listener/2,
|
||||||
|
unregister_listener/1,
|
||||||
inject_sni_fun/2
|
inject_sni_fun/2
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
@ -107,6 +108,9 @@ fetch_response(ListenerID) ->
|
||||||
register_listener(ListenerID, Opts) ->
|
register_listener(ListenerID, Opts) ->
|
||||||
gen_server:call(?MODULE, {register_listener, ListenerID, Opts}, ?CALL_TIMEOUT).
|
gen_server:call(?MODULE, {register_listener, ListenerID, Opts}, ?CALL_TIMEOUT).
|
||||||
|
|
||||||
|
unregister_listener(ListenerID) ->
|
||||||
|
gen_server:cast(?MODULE, {unregister_listener, ListenerID}).
|
||||||
|
|
||||||
-spec inject_sni_fun(emqx_listeners:listener_id(), map()) -> map().
|
-spec inject_sni_fun(emqx_listeners:listener_id(), map()) -> map().
|
||||||
inject_sni_fun(ListenerID, Conf0) ->
|
inject_sni_fun(ListenerID, Conf0) ->
|
||||||
SNIFun = emqx_const_v1:make_sni_fun(ListenerID),
|
SNIFun = emqx_const_v1:make_sni_fun(ListenerID),
|
||||||
|
@ -160,6 +164,18 @@ handle_call({register_listener, ListenerID, Conf}, _From, State0) ->
|
||||||
handle_call(Call, _From, State) ->
|
handle_call(Call, _From, State) ->
|
||||||
{reply, {error, {unknown_call, Call}}, State}.
|
{reply, {error, {unknown_call, Call}}, State}.
|
||||||
|
|
||||||
|
handle_cast({unregister_listener, ListenerID}, State0) ->
|
||||||
|
State2 =
|
||||||
|
case maps:take(?REFRESH_TIMER(ListenerID), State0) of
|
||||||
|
error ->
|
||||||
|
State0;
|
||||||
|
{TRef, State1} ->
|
||||||
|
emqx_utils:cancel_timer(TRef),
|
||||||
|
State1
|
||||||
|
end,
|
||||||
|
State = maps:remove({refresh_interval, ListenerID}, State2),
|
||||||
|
?tp(ocsp_cache_listener_unregistered, #{listener_id => ListenerID}),
|
||||||
|
{noreply, State};
|
||||||
handle_cast(_Cast, State) ->
|
handle_cast(_Cast, State) ->
|
||||||
{noreply, State}.
|
{noreply, State}.
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
-dialyzer(no_fail_call).
|
-dialyzer(no_fail_call).
|
||||||
-elvis([{elvis_style, invalid_dynamic_call, disable}]).
|
-elvis([{elvis_style, invalid_dynamic_call, disable}]).
|
||||||
|
|
||||||
|
-include("emqx_schema.hrl").
|
||||||
-include("emqx_authentication.hrl").
|
-include("emqx_authentication.hrl").
|
||||||
-include("emqx_access_control.hrl").
|
-include("emqx_access_control.hrl").
|
||||||
-include_lib("typerefl/include/types.hrl").
|
-include_lib("typerefl/include/types.hrl").
|
||||||
|
@ -42,7 +43,12 @@
|
||||||
-type ip_port() :: tuple() | integer().
|
-type ip_port() :: tuple() | integer().
|
||||||
-type cipher() :: map().
|
-type cipher() :: map().
|
||||||
-type port_number() :: 1..65536.
|
-type port_number() :: 1..65536.
|
||||||
-type server_parse_option() :: #{default_port => port_number(), no_port => boolean()}.
|
-type server_parse_option() :: #{
|
||||||
|
default_port => port_number(),
|
||||||
|
no_port => boolean(),
|
||||||
|
supported_schemes => [string()],
|
||||||
|
default_scheme => string()
|
||||||
|
}.
|
||||||
-type url() :: binary().
|
-type url() :: binary().
|
||||||
-type json_binary() :: binary().
|
-type json_binary() :: binary().
|
||||||
|
|
||||||
|
@ -61,12 +67,19 @@
|
||||||
-typerefl_from_string({url/0, emqx_schema, to_url}).
|
-typerefl_from_string({url/0, emqx_schema, to_url}).
|
||||||
-typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}).
|
-typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}).
|
||||||
|
|
||||||
|
-type parsed_server() :: #{
|
||||||
|
hostname := string(),
|
||||||
|
port => port_number(),
|
||||||
|
scheme => string()
|
||||||
|
}.
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
validate_heap_size/1,
|
validate_heap_size/1,
|
||||||
user_lookup_fun_tr/2,
|
user_lookup_fun_tr/2,
|
||||||
validate_alarm_actions/1,
|
validate_alarm_actions/1,
|
||||||
non_empty_string/1,
|
non_empty_string/1,
|
||||||
validations/0
|
validations/0,
|
||||||
|
naive_env_interpolation/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([qos/0]).
|
-export([qos/0]).
|
||||||
|
@ -99,6 +112,12 @@
|
||||||
convert_servers/2
|
convert_servers/2
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
%% tombstone types
|
||||||
|
-export([
|
||||||
|
tombstone_map/2,
|
||||||
|
get_tombstone_map_value_type/1
|
||||||
|
]).
|
||||||
|
|
||||||
-behaviour(hocon_schema).
|
-behaviour(hocon_schema).
|
||||||
|
|
||||||
-reflect_type([
|
-reflect_type([
|
||||||
|
@ -776,41 +795,48 @@ fields("listeners") ->
|
||||||
[
|
[
|
||||||
{"tcp",
|
{"tcp",
|
||||||
sc(
|
sc(
|
||||||
map(name, ref("mqtt_tcp_listener")),
|
tombstone_map(name, ref("mqtt_tcp_listener")),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(fields_listeners_tcp),
|
desc => ?DESC(fields_listeners_tcp),
|
||||||
|
converter => fun(X, _) ->
|
||||||
|
ensure_default_listener(X, tcp)
|
||||||
|
end,
|
||||||
required => {false, recursively}
|
required => {false, recursively}
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"ssl",
|
{"ssl",
|
||||||
sc(
|
sc(
|
||||||
map(name, ref("mqtt_ssl_listener")),
|
tombstone_map(name, ref("mqtt_ssl_listener")),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(fields_listeners_ssl),
|
desc => ?DESC(fields_listeners_ssl),
|
||||||
|
converter => fun(X, _) -> ensure_default_listener(X, ssl) end,
|
||||||
required => {false, recursively}
|
required => {false, recursively}
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"ws",
|
{"ws",
|
||||||
sc(
|
sc(
|
||||||
map(name, ref("mqtt_ws_listener")),
|
tombstone_map(name, ref("mqtt_ws_listener")),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(fields_listeners_ws),
|
desc => ?DESC(fields_listeners_ws),
|
||||||
|
converter => fun(X, _) -> ensure_default_listener(X, ws) end,
|
||||||
required => {false, recursively}
|
required => {false, recursively}
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"wss",
|
{"wss",
|
||||||
sc(
|
sc(
|
||||||
map(name, ref("mqtt_wss_listener")),
|
tombstone_map(name, ref("mqtt_wss_listener")),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(fields_listeners_wss),
|
desc => ?DESC(fields_listeners_wss),
|
||||||
|
converter => fun(X, _) -> ensure_default_listener(X, wss) end,
|
||||||
required => {false, recursively}
|
required => {false, recursively}
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"quic",
|
{"quic",
|
||||||
sc(
|
sc(
|
||||||
map(name, ref("mqtt_quic_listener")),
|
tombstone_map(name, ref("mqtt_quic_listener")),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(fields_listeners_quic),
|
desc => ?DESC(fields_listeners_quic),
|
||||||
|
converter => fun keep_default_tombstone/2,
|
||||||
required => {false, recursively}
|
required => {false, recursively}
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
|
@ -821,7 +847,7 @@ fields("crl_cache") ->
|
||||||
%% same URL. If they had diverging timeout options, it would be
|
%% same URL. If they had diverging timeout options, it would be
|
||||||
%% confusing.
|
%% confusing.
|
||||||
[
|
[
|
||||||
{"refresh_interval",
|
{refresh_interval,
|
||||||
sc(
|
sc(
|
||||||
duration(),
|
duration(),
|
||||||
#{
|
#{
|
||||||
|
@ -829,7 +855,7 @@ fields("crl_cache") ->
|
||||||
desc => ?DESC("crl_cache_refresh_interval")
|
desc => ?DESC("crl_cache_refresh_interval")
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"http_timeout",
|
{http_timeout,
|
||||||
sc(
|
sc(
|
||||||
duration(),
|
duration(),
|
||||||
#{
|
#{
|
||||||
|
@ -837,7 +863,7 @@ fields("crl_cache") ->
|
||||||
desc => ?DESC("crl_cache_refresh_http_timeout")
|
desc => ?DESC("crl_cache_refresh_http_timeout")
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"capacity",
|
{capacity,
|
||||||
sc(
|
sc(
|
||||||
pos_integer(),
|
pos_integer(),
|
||||||
#{
|
#{
|
||||||
|
@ -909,15 +935,17 @@ fields("mqtt_quic_listener") ->
|
||||||
string(),
|
string(),
|
||||||
#{
|
#{
|
||||||
%% TODO: deprecated => {since, "5.1.0"}
|
%% TODO: deprecated => {since, "5.1.0"}
|
||||||
desc => ?DESC(fields_mqtt_quic_listener_certfile)
|
desc => ?DESC(fields_mqtt_quic_listener_certfile),
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"keyfile",
|
{"keyfile",
|
||||||
sc(
|
sc(
|
||||||
string(),
|
string(),
|
||||||
%% TODO: deprecated => {since, "5.1.0"}
|
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(fields_mqtt_quic_listener_keyfile)
|
%% TODO: deprecated => {since, "5.1.0"}
|
||||||
|
desc => ?DESC(fields_mqtt_quic_listener_keyfile),
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"ciphers", ciphers_schema(quic)},
|
{"ciphers", ciphers_schema(quic)},
|
||||||
|
@ -993,7 +1021,10 @@ fields("mqtt_quic_listener") ->
|
||||||
duration_ms(),
|
duration_ms(),
|
||||||
#{
|
#{
|
||||||
default => 0,
|
default => 0,
|
||||||
desc => ?DESC(fields_mqtt_quic_listener_idle_timeout)
|
desc => ?DESC(fields_mqtt_quic_listener_idle_timeout),
|
||||||
|
%% TODO: deprecated => {since, "5.1.0"}
|
||||||
|
%% deprecated, use idle_timeout_ms instead
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"idle_timeout_ms",
|
{"idle_timeout_ms",
|
||||||
|
@ -1007,7 +1038,10 @@ fields("mqtt_quic_listener") ->
|
||||||
duration_ms(),
|
duration_ms(),
|
||||||
#{
|
#{
|
||||||
default => <<"10s">>,
|
default => <<"10s">>,
|
||||||
desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout)
|
desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout),
|
||||||
|
%% TODO: deprecated => {since, "5.1.0"}
|
||||||
|
%% use handshake_idle_timeout_ms
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"handshake_idle_timeout_ms",
|
{"handshake_idle_timeout_ms",
|
||||||
|
@ -1021,7 +1055,10 @@ fields("mqtt_quic_listener") ->
|
||||||
duration_ms(),
|
duration_ms(),
|
||||||
#{
|
#{
|
||||||
default => 0,
|
default => 0,
|
||||||
desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval)
|
desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval),
|
||||||
|
%% TODO: deprecated => {since, "5.1.0"}
|
||||||
|
%% use keep_alive_interval_ms instead
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"keep_alive_interval_ms",
|
{"keep_alive_interval_ms",
|
||||||
|
@ -1354,7 +1391,7 @@ fields("ssl_client_opts") ->
|
||||||
client_ssl_opts_schema(#{});
|
client_ssl_opts_schema(#{});
|
||||||
fields("ocsp") ->
|
fields("ocsp") ->
|
||||||
[
|
[
|
||||||
{"enable_ocsp_stapling",
|
{enable_ocsp_stapling,
|
||||||
sc(
|
sc(
|
||||||
boolean(),
|
boolean(),
|
||||||
#{
|
#{
|
||||||
|
@ -1362,7 +1399,7 @@ fields("ocsp") ->
|
||||||
desc => ?DESC("server_ssl_opts_schema_enable_ocsp_stapling")
|
desc => ?DESC("server_ssl_opts_schema_enable_ocsp_stapling")
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"responder_url",
|
{responder_url,
|
||||||
sc(
|
sc(
|
||||||
url(),
|
url(),
|
||||||
#{
|
#{
|
||||||
|
@ -1370,7 +1407,7 @@ fields("ocsp") ->
|
||||||
desc => ?DESC("server_ssl_opts_schema_ocsp_responder_url")
|
desc => ?DESC("server_ssl_opts_schema_ocsp_responder_url")
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"issuer_pem",
|
{issuer_pem,
|
||||||
sc(
|
sc(
|
||||||
binary(),
|
binary(),
|
||||||
#{
|
#{
|
||||||
|
@ -1378,7 +1415,7 @@ fields("ocsp") ->
|
||||||
desc => ?DESC("server_ssl_opts_schema_ocsp_issuer_pem")
|
desc => ?DESC("server_ssl_opts_schema_ocsp_issuer_pem")
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"refresh_interval",
|
{refresh_interval,
|
||||||
sc(
|
sc(
|
||||||
duration(),
|
duration(),
|
||||||
#{
|
#{
|
||||||
|
@ -1386,7 +1423,7 @@ fields("ocsp") ->
|
||||||
desc => ?DESC("server_ssl_opts_schema_ocsp_refresh_interval")
|
desc => ?DESC("server_ssl_opts_schema_ocsp_refresh_interval")
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"refresh_http_timeout",
|
{refresh_http_timeout,
|
||||||
sc(
|
sc(
|
||||||
duration(),
|
duration(),
|
||||||
#{
|
#{
|
||||||
|
@ -1489,10 +1526,8 @@ fields("broker") ->
|
||||||
sc(
|
sc(
|
||||||
boolean(),
|
boolean(),
|
||||||
#{
|
#{
|
||||||
%% TODO: deprecated => {since, "5.1.0"}
|
deprecated => {since, "5.1.0"},
|
||||||
%% in favor of session message re-dispatch at termination
|
importance => ?IMPORTANCE_HIDDEN,
|
||||||
%% we will stop supporting dispatch acks for shared
|
|
||||||
%% subscriptions.
|
|
||||||
default => false,
|
default => false,
|
||||||
desc => ?DESC(broker_shared_dispatch_ack_enabled)
|
desc => ?DESC(broker_shared_dispatch_ack_enabled)
|
||||||
}
|
}
|
||||||
|
@ -1938,7 +1973,7 @@ base_listener(Bind) ->
|
||||||
sc(
|
sc(
|
||||||
hoconsc:union([infinity, pos_integer()]),
|
hoconsc:union([infinity, pos_integer()]),
|
||||||
#{
|
#{
|
||||||
default => <<"infinity">>,
|
default => emqx_listeners:default_max_conn(),
|
||||||
desc => ?DESC(base_listener_max_connections)
|
desc => ?DESC(base_listener_max_connections)
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
|
@ -2314,12 +2349,12 @@ server_ssl_opts_schema(Defaults, IsRanchListener) ->
|
||||||
Field
|
Field
|
||||||
|| not IsRanchListener,
|
|| not IsRanchListener,
|
||||||
Field <- [
|
Field <- [
|
||||||
{"gc_after_handshake",
|
{gc_after_handshake,
|
||||||
sc(boolean(), #{
|
sc(boolean(), #{
|
||||||
default => false,
|
default => false,
|
||||||
desc => ?DESC(server_ssl_opts_schema_gc_after_handshake)
|
desc => ?DESC(server_ssl_opts_schema_gc_after_handshake)
|
||||||
})},
|
})},
|
||||||
{"ocsp",
|
{ocsp,
|
||||||
sc(
|
sc(
|
||||||
ref("ocsp"),
|
ref("ocsp"),
|
||||||
#{
|
#{
|
||||||
|
@ -2327,7 +2362,7 @@ server_ssl_opts_schema(Defaults, IsRanchListener) ->
|
||||||
validator => fun ocsp_inner_validator/1
|
validator => fun ocsp_inner_validator/1
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"enable_crl_check",
|
{enable_crl_check,
|
||||||
sc(
|
sc(
|
||||||
boolean(),
|
boolean(),
|
||||||
#{
|
#{
|
||||||
|
@ -2790,6 +2825,7 @@ authentication(Which) ->
|
||||||
hoconsc:mk(Type, #{
|
hoconsc:mk(Type, #{
|
||||||
desc => Desc,
|
desc => Desc,
|
||||||
converter => fun ensure_array/2,
|
converter => fun ensure_array/2,
|
||||||
|
default => [],
|
||||||
importance => Importance
|
importance => Importance
|
||||||
}).
|
}).
|
||||||
|
|
||||||
|
@ -2898,7 +2934,7 @@ servers_validator(Opts, Required) ->
|
||||||
%% `no_port': by default it's `false', when set to `true',
|
%% `no_port': by default it's `false', when set to `true',
|
||||||
%% a `throw' exception is raised if the port is found.
|
%% a `throw' exception is raised if the port is found.
|
||||||
-spec parse_server(undefined | string() | binary(), server_parse_option()) ->
|
-spec parse_server(undefined | string() | binary(), server_parse_option()) ->
|
||||||
{string(), port_number()}.
|
undefined | parsed_server().
|
||||||
parse_server(Str, Opts) ->
|
parse_server(Str, Opts) ->
|
||||||
case parse_servers(Str, Opts) of
|
case parse_servers(Str, Opts) of
|
||||||
undefined ->
|
undefined ->
|
||||||
|
@ -2912,7 +2948,7 @@ parse_server(Str, Opts) ->
|
||||||
%% @doc Parse comma separated `host[:port][,host[:port]]' endpoints
|
%% @doc Parse comma separated `host[:port][,host[:port]]' endpoints
|
||||||
%% into a list of `{Host, Port}' tuples or just `Host' string.
|
%% into a list of `{Host, Port}' tuples or just `Host' string.
|
||||||
-spec parse_servers(undefined | string() | binary(), server_parse_option()) ->
|
-spec parse_servers(undefined | string() | binary(), server_parse_option()) ->
|
||||||
[{string(), port_number()}].
|
undefined | [parsed_server()].
|
||||||
parse_servers(undefined, _Opts) ->
|
parse_servers(undefined, _Opts) ->
|
||||||
%% should not parse 'undefined' as string,
|
%% should not parse 'undefined' as string,
|
||||||
%% not to throw exception either,
|
%% not to throw exception either,
|
||||||
|
@ -2958,6 +2994,9 @@ split_host_port(Str) ->
|
||||||
do_parse_server(Str, Opts) ->
|
do_parse_server(Str, Opts) ->
|
||||||
DefaultPort = maps:get(default_port, Opts, undefined),
|
DefaultPort = maps:get(default_port, Opts, undefined),
|
||||||
NotExpectingPort = maps:get(no_port, Opts, false),
|
NotExpectingPort = maps:get(no_port, Opts, false),
|
||||||
|
DefaultScheme = maps:get(default_scheme, Opts, undefined),
|
||||||
|
SupportedSchemes = maps:get(supported_schemes, Opts, []),
|
||||||
|
NotExpectingScheme = (not is_list(DefaultScheme)) andalso length(SupportedSchemes) =:= 0,
|
||||||
case is_integer(DefaultPort) andalso NotExpectingPort of
|
case is_integer(DefaultPort) andalso NotExpectingPort of
|
||||||
true ->
|
true ->
|
||||||
%% either provide a default port from schema,
|
%% either provide a default port from schema,
|
||||||
|
@ -2966,22 +3005,129 @@ do_parse_server(Str, Opts) ->
|
||||||
false ->
|
false ->
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
|
case is_list(DefaultScheme) andalso (not lists:member(DefaultScheme, SupportedSchemes)) of
|
||||||
|
true ->
|
||||||
|
%% inconsistent schema
|
||||||
|
error("bad_schema");
|
||||||
|
false ->
|
||||||
|
ok
|
||||||
|
end,
|
||||||
%% do not split with space, there should be no space allowed between host and port
|
%% do not split with space, there should be no space allowed between host and port
|
||||||
case string:tokens(Str, ":") of
|
Tokens = string:tokens(Str, ":"),
|
||||||
[Hostname, Port] ->
|
Context = #{
|
||||||
NotExpectingPort andalso throw("not_expecting_port_number"),
|
not_expecting_port => NotExpectingPort,
|
||||||
{check_hostname(Hostname), parse_port(Port)};
|
not_expecting_scheme => NotExpectingScheme,
|
||||||
[Hostname] ->
|
default_port => DefaultPort,
|
||||||
case is_integer(DefaultPort) of
|
default_scheme => DefaultScheme,
|
||||||
true ->
|
opts => Opts
|
||||||
{check_hostname(Hostname), DefaultPort};
|
},
|
||||||
false when NotExpectingPort ->
|
check_server_parts(Tokens, Context).
|
||||||
check_hostname(Hostname);
|
|
||||||
false ->
|
check_server_parts([Scheme, "//" ++ Hostname, Port], Context) ->
|
||||||
throw("missing_port_number")
|
#{
|
||||||
end;
|
not_expecting_scheme := NotExpectingScheme,
|
||||||
_ ->
|
not_expecting_port := NotExpectingPort,
|
||||||
throw("bad_host_port")
|
opts := Opts
|
||||||
|
} = Context,
|
||||||
|
NotExpectingPort andalso throw("not_expecting_port_number"),
|
||||||
|
NotExpectingScheme andalso throw("not_expecting_scheme"),
|
||||||
|
#{
|
||||||
|
scheme => check_scheme(Scheme, Opts),
|
||||||
|
hostname => check_hostname(Hostname),
|
||||||
|
port => parse_port(Port)
|
||||||
|
};
|
||||||
|
check_server_parts([Scheme, "//" ++ Hostname], Context) ->
|
||||||
|
#{
|
||||||
|
not_expecting_scheme := NotExpectingScheme,
|
||||||
|
not_expecting_port := NotExpectingPort,
|
||||||
|
default_port := DefaultPort,
|
||||||
|
opts := Opts
|
||||||
|
} = Context,
|
||||||
|
NotExpectingScheme andalso throw("not_expecting_scheme"),
|
||||||
|
case is_integer(DefaultPort) of
|
||||||
|
true ->
|
||||||
|
#{
|
||||||
|
scheme => check_scheme(Scheme, Opts),
|
||||||
|
hostname => check_hostname(Hostname),
|
||||||
|
port => DefaultPort
|
||||||
|
};
|
||||||
|
false when NotExpectingPort ->
|
||||||
|
#{
|
||||||
|
scheme => check_scheme(Scheme, Opts),
|
||||||
|
hostname => check_hostname(Hostname)
|
||||||
|
};
|
||||||
|
false ->
|
||||||
|
throw("missing_port_number")
|
||||||
|
end;
|
||||||
|
check_server_parts([Hostname, Port], Context) ->
|
||||||
|
#{
|
||||||
|
not_expecting_port := NotExpectingPort,
|
||||||
|
default_scheme := DefaultScheme
|
||||||
|
} = Context,
|
||||||
|
NotExpectingPort andalso throw("not_expecting_port_number"),
|
||||||
|
case is_list(DefaultScheme) of
|
||||||
|
false ->
|
||||||
|
#{
|
||||||
|
hostname => check_hostname(Hostname),
|
||||||
|
port => parse_port(Port)
|
||||||
|
};
|
||||||
|
true ->
|
||||||
|
#{
|
||||||
|
scheme => DefaultScheme,
|
||||||
|
hostname => check_hostname(Hostname),
|
||||||
|
port => parse_port(Port)
|
||||||
|
}
|
||||||
|
end;
|
||||||
|
check_server_parts([Hostname], Context) ->
|
||||||
|
#{
|
||||||
|
not_expecting_scheme := NotExpectingScheme,
|
||||||
|
not_expecting_port := NotExpectingPort,
|
||||||
|
default_port := DefaultPort,
|
||||||
|
default_scheme := DefaultScheme
|
||||||
|
} = Context,
|
||||||
|
case is_integer(DefaultPort) orelse NotExpectingPort of
|
||||||
|
true ->
|
||||||
|
ok;
|
||||||
|
false ->
|
||||||
|
throw("missing_port_number")
|
||||||
|
end,
|
||||||
|
case is_list(DefaultScheme) orelse NotExpectingScheme of
|
||||||
|
true ->
|
||||||
|
ok;
|
||||||
|
false ->
|
||||||
|
throw("missing_scheme")
|
||||||
|
end,
|
||||||
|
case {is_integer(DefaultPort), is_list(DefaultScheme)} of
|
||||||
|
{true, true} ->
|
||||||
|
#{
|
||||||
|
scheme => DefaultScheme,
|
||||||
|
hostname => check_hostname(Hostname),
|
||||||
|
port => DefaultPort
|
||||||
|
};
|
||||||
|
{true, false} ->
|
||||||
|
#{
|
||||||
|
hostname => check_hostname(Hostname),
|
||||||
|
port => DefaultPort
|
||||||
|
};
|
||||||
|
{false, true} ->
|
||||||
|
#{
|
||||||
|
scheme => DefaultScheme,
|
||||||
|
hostname => check_hostname(Hostname)
|
||||||
|
};
|
||||||
|
{false, false} ->
|
||||||
|
#{hostname => check_hostname(Hostname)}
|
||||||
|
end;
|
||||||
|
check_server_parts(_Tokens, _Context) ->
|
||||||
|
throw("bad_host_port").
|
||||||
|
|
||||||
|
check_scheme(Str, Opts) ->
|
||||||
|
SupportedSchemes = maps:get(supported_schemes, Opts, []),
|
||||||
|
IsSupported = lists:member(Str, SupportedSchemes),
|
||||||
|
case IsSupported of
|
||||||
|
true ->
|
||||||
|
Str;
|
||||||
|
false ->
|
||||||
|
throw("unsupported_scheme")
|
||||||
end.
|
end.
|
||||||
|
|
||||||
check_hostname(Str) ->
|
check_hostname(Str) ->
|
||||||
|
@ -3084,3 +3230,138 @@ assert_required_field(Conf, Key, ErrorMessage) ->
|
||||||
_ ->
|
_ ->
|
||||||
ok
|
ok
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
default_listener(tcp) ->
|
||||||
|
#{
|
||||||
|
<<"bind">> => <<"0.0.0.0:1883">>
|
||||||
|
};
|
||||||
|
default_listener(ws) ->
|
||||||
|
#{
|
||||||
|
<<"bind">> => <<"0.0.0.0:8083">>,
|
||||||
|
<<"websocket">> => #{<<"mqtt_path">> => <<"/mqtt">>}
|
||||||
|
};
|
||||||
|
default_listener(SSLListener) ->
|
||||||
|
%% The env variable is resolved in emqx_tls_lib by calling naive_env_interpolate
|
||||||
|
CertFile = fun(Name) ->
|
||||||
|
iolist_to_binary("${EMQX_ETC_DIR}/" ++ filename:join(["certs", Name]))
|
||||||
|
end,
|
||||||
|
SslOptions = #{
|
||||||
|
<<"cacertfile">> => CertFile(<<"cacert.pem">>),
|
||||||
|
<<"certfile">> => CertFile(<<"cert.pem">>),
|
||||||
|
<<"keyfile">> => CertFile(<<"key.pem">>)
|
||||||
|
},
|
||||||
|
case SSLListener of
|
||||||
|
ssl ->
|
||||||
|
#{
|
||||||
|
<<"bind">> => <<"0.0.0.0:8883">>,
|
||||||
|
<<"ssl_options">> => SslOptions
|
||||||
|
};
|
||||||
|
wss ->
|
||||||
|
#{
|
||||||
|
<<"bind">> => <<"0.0.0.0:8084">>,
|
||||||
|
<<"ssl_options">> => SslOptions,
|
||||||
|
<<"websocket">> => #{<<"mqtt_path">> => <<"/mqtt">>}
|
||||||
|
}
|
||||||
|
end.
|
||||||
|
|
||||||
|
%% @doc This function helps to perform a naive string interpolation which
|
||||||
|
%% only looks at the first segment of the string and tries to replace it.
|
||||||
|
%% For example
|
||||||
|
%% "$MY_FILE_PATH"
|
||||||
|
%% "${MY_FILE_PATH}"
|
||||||
|
%% "$ENV_VARIABLE/sub/path"
|
||||||
|
%% "${ENV_VARIABLE}/sub/path"
|
||||||
|
%% "${ENV_VARIABLE}\sub\path" # windows
|
||||||
|
%% This function returns undefined if the input is undefined
|
||||||
|
%% otherwise always return string.
|
||||||
|
naive_env_interpolation(undefined) ->
|
||||||
|
undefined;
|
||||||
|
naive_env_interpolation(Bin) when is_binary(Bin) ->
|
||||||
|
naive_env_interpolation(unicode:characters_to_list(Bin, utf8));
|
||||||
|
naive_env_interpolation("$" ++ Maybe = Original) ->
|
||||||
|
{Env, Tail} = split_path(Maybe),
|
||||||
|
case resolve_env(Env) of
|
||||||
|
{ok, Path} ->
|
||||||
|
filename:join([Path, Tail]);
|
||||||
|
error ->
|
||||||
|
Original
|
||||||
|
end;
|
||||||
|
naive_env_interpolation(Other) ->
|
||||||
|
Other.
|
||||||
|
|
||||||
|
split_path(Path) ->
|
||||||
|
split_path(Path, []).
|
||||||
|
|
||||||
|
split_path([], Acc) ->
|
||||||
|
{lists:reverse(Acc), []};
|
||||||
|
split_path([Char | Rest], Acc) when Char =:= $/ orelse Char =:= $\\ ->
|
||||||
|
{lists:reverse(Acc), string:trim(Rest, leading, "/\\")};
|
||||||
|
split_path([Char | Rest], Acc) ->
|
||||||
|
split_path(Rest, [Char | Acc]).
|
||||||
|
|
||||||
|
resolve_env(Name0) ->
|
||||||
|
Name = string:trim(Name0, both, "{}"),
|
||||||
|
Value = os:getenv(Name),
|
||||||
|
case Value =/= false andalso Value =/= "" of
|
||||||
|
true ->
|
||||||
|
{ok, Value};
|
||||||
|
false ->
|
||||||
|
special_env(Name)
|
||||||
|
end.
|
||||||
|
|
||||||
|
-ifdef(TEST).
|
||||||
|
%% when running tests, we need to mock the env variables
|
||||||
|
special_env("EMQX_ETC_DIR") ->
|
||||||
|
{ok, filename:join([code:lib_dir(emqx), etc])};
|
||||||
|
special_env("EMQX_LOG_DIR") ->
|
||||||
|
{ok, "log"};
|
||||||
|
special_env(_Name) ->
|
||||||
|
%% only in tests
|
||||||
|
error.
|
||||||
|
-else.
|
||||||
|
special_env(_Name) -> error.
|
||||||
|
-endif.
|
||||||
|
|
||||||
|
%% The tombstone atom.
|
||||||
|
tombstone() ->
|
||||||
|
?TOMBSTONE_TYPE.
|
||||||
|
|
||||||
|
%% Make a map type, the value of which is allowed to be 'marked_for_deletion'
|
||||||
|
%% 'marked_for_delition' is a special value which means the key is deleted.
|
||||||
|
%% This is used to support the 'delete' operation in configs,
|
||||||
|
%% since deleting the key would result in default value being used.
|
||||||
|
tombstone_map(Name, Type) ->
|
||||||
|
%% marked_for_deletion must be the last member of the union
|
||||||
|
%% because we need to first union member to populate the default values
|
||||||
|
map(Name, ?UNION([Type, ?TOMBSTONE_TYPE])).
|
||||||
|
|
||||||
|
%% inverse of mark_del_map
|
||||||
|
get_tombstone_map_value_type(Schema) ->
|
||||||
|
%% TODO: violation of abstraction, expose an API in hoconsc
|
||||||
|
%% hoconsc:map_value_type(Schema)
|
||||||
|
?MAP(_Name, Union) = hocon_schema:field_schema(Schema, type),
|
||||||
|
%% TODO: violation of abstraction, fix hoconsc:union_members/1
|
||||||
|
?UNION(Members) = Union,
|
||||||
|
Tombstone = tombstone(),
|
||||||
|
[Type, Tombstone] = hoconsc:union_members(Members),
|
||||||
|
Type.
|
||||||
|
|
||||||
|
%% Keep the 'default' tombstone, but delete others.
|
||||||
|
keep_default_tombstone(Map, _Opts) when is_map(Map) ->
|
||||||
|
maps:filter(
|
||||||
|
fun(Key, Value) ->
|
||||||
|
Key =:= <<"default">> orelse Value =/= ?TOMBSTONE_VALUE
|
||||||
|
end,
|
||||||
|
Map
|
||||||
|
);
|
||||||
|
keep_default_tombstone(Value, _Opts) ->
|
||||||
|
Value.
|
||||||
|
|
||||||
|
ensure_default_listener(undefined, ListenerType) ->
|
||||||
|
%% let the schema's default value do its job
|
||||||
|
#{<<"default">> => default_listener(ListenerType)};
|
||||||
|
ensure_default_listener(#{<<"default">> := _} = Map, _ListenerType) ->
|
||||||
|
keep_default_tombstone(Map, #{});
|
||||||
|
ensure_default_listener(Map, ListenerType) ->
|
||||||
|
NewMap = Map#{<<"default">> => default_listener(ListenerType)},
|
||||||
|
keep_default_tombstone(NewMap, #{}).
|
||||||
|
|
|
@ -165,7 +165,7 @@ strategy(Group) ->
|
||||||
|
|
||||||
-spec ack_enabled() -> boolean().
|
-spec ack_enabled() -> boolean().
|
||||||
ack_enabled() ->
|
ack_enabled() ->
|
||||||
emqx:get_config([broker, shared_dispatch_ack_enabled]).
|
emqx:get_config([broker, shared_dispatch_ack_enabled], false).
|
||||||
|
|
||||||
do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() ->
|
do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() ->
|
||||||
%% Deadlock otherwise
|
%% Deadlock otherwise
|
||||||
|
@ -181,7 +181,7 @@ do_dispatch(SubPid, _Group, Topic, Msg, retry) ->
|
||||||
do_dispatch(SubPid, Group, Topic, Msg, fresh) ->
|
do_dispatch(SubPid, Group, Topic, Msg, fresh) ->
|
||||||
case ack_enabled() of
|
case ack_enabled() of
|
||||||
true ->
|
true ->
|
||||||
%% FIXME: replace with `emqx_shared_sub_proto:dispatch_with_ack' in 5.2
|
%% TODO: delete this clase after 5.1.0
|
||||||
do_dispatch_with_ack(SubPid, Group, Topic, Msg);
|
do_dispatch_with_ack(SubPid, Group, Topic, Msg);
|
||||||
false ->
|
false ->
|
||||||
send(SubPid, Topic, {deliver, Topic, Msg})
|
send(SubPid, Topic, {deliver, Topic, Msg})
|
||||||
|
|
|
@ -309,19 +309,19 @@ ensure_ssl_files(Dir, SSL, Opts) ->
|
||||||
case ensure_ssl_file_key(SSL, RequiredKeys) of
|
case ensure_ssl_file_key(SSL, RequiredKeys) of
|
||||||
ok ->
|
ok ->
|
||||||
KeyPaths = ?SSL_FILE_OPT_PATHS ++ ?SSL_FILE_OPT_PATHS_A,
|
KeyPaths = ?SSL_FILE_OPT_PATHS ++ ?SSL_FILE_OPT_PATHS_A,
|
||||||
ensure_ssl_files(Dir, SSL, KeyPaths, Opts);
|
ensure_ssl_files_per_key(Dir, SSL, KeyPaths, Opts);
|
||||||
{error, _} = Error ->
|
{error, _} = Error ->
|
||||||
Error
|
Error
|
||||||
end.
|
end.
|
||||||
|
|
||||||
ensure_ssl_files(_Dir, SSL, [], _Opts) ->
|
ensure_ssl_files_per_key(_Dir, SSL, [], _Opts) ->
|
||||||
{ok, SSL};
|
{ok, SSL};
|
||||||
ensure_ssl_files(Dir, SSL, [KeyPath | KeyPaths], Opts) ->
|
ensure_ssl_files_per_key(Dir, SSL, [KeyPath | KeyPaths], Opts) ->
|
||||||
case
|
case
|
||||||
ensure_ssl_file(Dir, KeyPath, SSL, emqx_utils_maps:deep_get(KeyPath, SSL, undefined), Opts)
|
ensure_ssl_file(Dir, KeyPath, SSL, emqx_utils_maps:deep_get(KeyPath, SSL, undefined), Opts)
|
||||||
of
|
of
|
||||||
{ok, NewSSL} ->
|
{ok, NewSSL} ->
|
||||||
ensure_ssl_files(Dir, NewSSL, KeyPaths, Opts);
|
ensure_ssl_files_per_key(Dir, NewSSL, KeyPaths, Opts);
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
{error, Reason#{which_options => [KeyPath]}}
|
{error, Reason#{which_options => [KeyPath]}}
|
||||||
end.
|
end.
|
||||||
|
@ -472,7 +472,8 @@ hex_str(Bin) ->
|
||||||
iolist_to_binary([io_lib:format("~2.16.0b", [X]) || <<X:8>> <= Bin]).
|
iolist_to_binary([io_lib:format("~2.16.0b", [X]) || <<X:8>> <= Bin]).
|
||||||
|
|
||||||
%% @doc Returns 'true' when the file is a valid pem, otherwise {error, Reason}.
|
%% @doc Returns 'true' when the file is a valid pem, otherwise {error, Reason}.
|
||||||
is_valid_pem_file(Path) ->
|
is_valid_pem_file(Path0) ->
|
||||||
|
Path = resolve_cert_path_for_read(Path0),
|
||||||
case file:read_file(Path) of
|
case file:read_file(Path) of
|
||||||
{ok, Pem} -> is_pem(Pem) orelse {error, not_pem};
|
{ok, Pem} -> is_pem(Pem) orelse {error, not_pem};
|
||||||
{error, Reason} -> {error, Reason}
|
{error, Reason} -> {error, Reason}
|
||||||
|
@ -513,10 +514,16 @@ do_drop_invalid_certs([KeyPath | KeyPaths], SSL) ->
|
||||||
to_server_opts(Type, Opts) ->
|
to_server_opts(Type, Opts) ->
|
||||||
Versions = integral_versions(Type, maps:get(versions, Opts, undefined)),
|
Versions = integral_versions(Type, maps:get(versions, Opts, undefined)),
|
||||||
Ciphers = integral_ciphers(Versions, maps:get(ciphers, Opts, undefined)),
|
Ciphers = integral_ciphers(Versions, maps:get(ciphers, Opts, undefined)),
|
||||||
maps:to_list(Opts#{
|
Path = fun(Key) -> resolve_cert_path_for_read_strict(maps:get(Key, Opts, undefined)) end,
|
||||||
ciphers => Ciphers,
|
filter(
|
||||||
versions => Versions
|
maps:to_list(Opts#{
|
||||||
}).
|
keyfile => Path(keyfile),
|
||||||
|
certfile => Path(certfile),
|
||||||
|
cacertfile => Path(cacertfile),
|
||||||
|
ciphers => Ciphers,
|
||||||
|
versions => Versions
|
||||||
|
})
|
||||||
|
).
|
||||||
|
|
||||||
%% @doc Convert hocon-checked tls client options (map()) to
|
%% @doc Convert hocon-checked tls client options (map()) to
|
||||||
%% proplist accepted by ssl library.
|
%% proplist accepted by ssl library.
|
||||||
|
@ -530,11 +537,12 @@ to_client_opts(Opts) ->
|
||||||
to_client_opts(Type, Opts) ->
|
to_client_opts(Type, Opts) ->
|
||||||
GetD = fun(Key, Default) -> fuzzy_map_get(Key, Opts, Default) end,
|
GetD = fun(Key, Default) -> fuzzy_map_get(Key, Opts, Default) end,
|
||||||
Get = fun(Key) -> GetD(Key, undefined) end,
|
Get = fun(Key) -> GetD(Key, undefined) end,
|
||||||
|
Path = fun(Key) -> resolve_cert_path_for_read_strict(Get(Key)) end,
|
||||||
case GetD(enable, false) of
|
case GetD(enable, false) of
|
||||||
true ->
|
true ->
|
||||||
KeyFile = ensure_str(Get(keyfile)),
|
KeyFile = Path(keyfile),
|
||||||
CertFile = ensure_str(Get(certfile)),
|
CertFile = Path(certfile),
|
||||||
CAFile = ensure_str(Get(cacertfile)),
|
CAFile = Path(cacertfile),
|
||||||
Verify = GetD(verify, verify_none),
|
Verify = GetD(verify, verify_none),
|
||||||
SNI = ensure_sni(Get(server_name_indication)),
|
SNI = ensure_sni(Get(server_name_indication)),
|
||||||
Versions = integral_versions(Type, Get(versions)),
|
Versions = integral_versions(Type, Get(versions)),
|
||||||
|
@ -556,6 +564,31 @@ to_client_opts(Type, Opts) ->
|
||||||
[]
|
[]
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
resolve_cert_path_for_read_strict(Path) ->
|
||||||
|
case resolve_cert_path_for_read(Path) of
|
||||||
|
undefined ->
|
||||||
|
undefined;
|
||||||
|
ResolvedPath ->
|
||||||
|
case filelib:is_regular(ResolvedPath) of
|
||||||
|
true ->
|
||||||
|
ResolvedPath;
|
||||||
|
false ->
|
||||||
|
PathToLog = ensure_str(Path),
|
||||||
|
LogData =
|
||||||
|
case PathToLog =:= ResolvedPath of
|
||||||
|
true ->
|
||||||
|
#{path => PathToLog};
|
||||||
|
false ->
|
||||||
|
#{path => PathToLog, resolved_path => ResolvedPath}
|
||||||
|
end,
|
||||||
|
?SLOG(error, LogData#{msg => "cert_file_not_found"}),
|
||||||
|
undefined
|
||||||
|
end
|
||||||
|
end.
|
||||||
|
|
||||||
|
resolve_cert_path_for_read(Path) ->
|
||||||
|
emqx_schema:naive_env_interpolation(Path).
|
||||||
|
|
||||||
filter([]) -> [];
|
filter([]) -> [];
|
||||||
filter([{_, undefined} | T]) -> filter(T);
|
filter([{_, undefined} | T]) -> filter(T);
|
||||||
filter([{_, ""} | T]) -> filter(T);
|
filter([{_, ""} | T]) -> filter(T);
|
||||||
|
|
|
@ -27,7 +27,7 @@ format(
|
||||||
#{level := debug, meta := Meta = #{trace_tag := Tag}, msg := Msg},
|
#{level := debug, meta := Meta = #{trace_tag := Tag}, msg := Msg},
|
||||||
#{payload_encode := PEncode}
|
#{payload_encode := PEncode}
|
||||||
) ->
|
) ->
|
||||||
Time = calendar:system_time_to_rfc3339(erlang:system_time(second)),
|
Time = calendar:system_time_to_rfc3339(erlang:system_time(microsecond), [{unit, microsecond}]),
|
||||||
ClientId = to_iolist(maps:get(clientid, Meta, "")),
|
ClientId = to_iolist(maps:get(clientid, Meta, "")),
|
||||||
Peername = maps:get(peername, Meta, ""),
|
Peername = maps:get(peername, Meta, ""),
|
||||||
MetaBin = format_meta(Meta, PEncode),
|
MetaBin = format_meta(Meta, PEncode),
|
||||||
|
|
|
@ -90,7 +90,7 @@
|
||||||
listener :: {Type :: atom(), Name :: atom()},
|
listener :: {Type :: atom(), Name :: atom()},
|
||||||
|
|
||||||
%% Limiter
|
%% Limiter
|
||||||
limiter :: maybe(container()),
|
limiter :: container(),
|
||||||
|
|
||||||
%% cache operation when overload
|
%% cache operation when overload
|
||||||
limiter_cache :: queue:queue(cache()),
|
limiter_cache :: queue:queue(cache()),
|
||||||
|
@ -579,54 +579,61 @@ handle_timeout(TRef, TMsg, State) ->
|
||||||
list(any()),
|
list(any()),
|
||||||
state()
|
state()
|
||||||
) -> state().
|
) -> state().
|
||||||
|
check_limiter(
|
||||||
|
_Needs,
|
||||||
|
Data,
|
||||||
|
WhenOk,
|
||||||
|
Msgs,
|
||||||
|
#state{limiter = infinity} = State
|
||||||
|
) ->
|
||||||
|
WhenOk(Data, Msgs, State);
|
||||||
check_limiter(
|
check_limiter(
|
||||||
Needs,
|
Needs,
|
||||||
Data,
|
Data,
|
||||||
WhenOk,
|
WhenOk,
|
||||||
Msgs,
|
Msgs,
|
||||||
#state{
|
#state{limiter_timer = undefined, limiter = Limiter} = State
|
||||||
limiter = Limiter,
|
|
||||||
limiter_timer = LimiterTimer,
|
|
||||||
limiter_cache = Cache
|
|
||||||
} = State
|
|
||||||
) ->
|
) ->
|
||||||
case LimiterTimer of
|
case emqx_limiter_container:check_list(Needs, Limiter) of
|
||||||
undefined ->
|
{ok, Limiter2} ->
|
||||||
case emqx_limiter_container:check_list(Needs, Limiter) of
|
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
|
||||||
{ok, Limiter2} ->
|
{pause, Time, Limiter2} ->
|
||||||
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
|
?SLOG(debug, #{
|
||||||
{pause, Time, Limiter2} ->
|
msg => "pause_time_due_to_rate_limit",
|
||||||
?SLOG(debug, #{
|
needs => Needs,
|
||||||
msg => "pause_time_due_to_rate_limit",
|
time_in_ms => Time
|
||||||
needs => Needs,
|
}),
|
||||||
time_in_ms => Time
|
|
||||||
}),
|
|
||||||
|
|
||||||
Retry = #retry{
|
Retry = #retry{
|
||||||
types = [Type || {_, Type} <- Needs],
|
types = [Type || {_, Type} <- Needs],
|
||||||
data = Data,
|
data = Data,
|
||||||
next = WhenOk
|
next = WhenOk
|
||||||
},
|
},
|
||||||
|
|
||||||
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
|
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
|
||||||
|
|
||||||
TRef = start_timer(Time, limit_timeout),
|
TRef = start_timer(Time, limit_timeout),
|
||||||
|
|
||||||
enqueue(
|
enqueue(
|
||||||
{active, false},
|
{active, false},
|
||||||
State#state{
|
State#state{
|
||||||
sockstate = blocked,
|
sockstate = blocked,
|
||||||
limiter = Limiter3,
|
limiter = Limiter3,
|
||||||
limiter_timer = TRef
|
limiter_timer = TRef
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
{drop, Limiter2} ->
|
{drop, Limiter2} ->
|
||||||
{ok, State#state{limiter = Limiter2}}
|
{ok, State#state{limiter = Limiter2}}
|
||||||
end;
|
end;
|
||||||
_ ->
|
check_limiter(
|
||||||
New = #cache{need = Needs, data = Data, next = WhenOk},
|
Needs,
|
||||||
State#state{limiter_cache = queue:in(New, Cache)}
|
Data,
|
||||||
end.
|
WhenOk,
|
||||||
|
_Msgs,
|
||||||
|
#state{limiter_cache = Cache} = State
|
||||||
|
) ->
|
||||||
|
New = #cache{need = Needs, data = Data, next = WhenOk},
|
||||||
|
State#state{limiter_cache = queue:in(New, Cache)}.
|
||||||
|
|
||||||
-spec retry_limiter(state()) -> state().
|
-spec retry_limiter(state()) -> state().
|
||||||
retry_limiter(#state{limiter = Limiter} = State) ->
|
retry_limiter(#state{limiter = Limiter} = State) ->
|
||||||
|
|
|
@ -186,7 +186,7 @@ t_session_taken(_) ->
|
||||||
false
|
false
|
||||||
end
|
end
|
||||||
end,
|
end,
|
||||||
6000
|
15_000
|
||||||
),
|
),
|
||||||
Publish(),
|
Publish(),
|
||||||
|
|
||||||
|
|
|
@ -267,13 +267,14 @@ t_chan_info(_) ->
|
||||||
t_chan_caps(_) ->
|
t_chan_caps(_) ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
#{
|
#{
|
||||||
|
exclusive_subscription := false,
|
||||||
|
max_packet_size := 1048576,
|
||||||
max_clientid_len := 65535,
|
max_clientid_len := 65535,
|
||||||
max_qos_allowed := 2,
|
max_qos_allowed := 2,
|
||||||
max_topic_alias := 65535,
|
max_topic_alias := 65535,
|
||||||
max_topic_levels := Level,
|
max_topic_levels := Level,
|
||||||
retain_available := true,
|
retain_available := true,
|
||||||
shared_subscription := true,
|
shared_subscription := true,
|
||||||
subscription_identifiers := true,
|
|
||||||
wildcard_subscription := true
|
wildcard_subscription := true
|
||||||
} when is_integer(Level),
|
} when is_integer(Level),
|
||||||
emqx_channel:caps(channel())
|
emqx_channel:caps(channel())
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
start_apps/3,
|
start_apps/3,
|
||||||
start_app/2,
|
start_app/2,
|
||||||
stop_apps/1,
|
stop_apps/1,
|
||||||
|
stop_apps/2,
|
||||||
reload/2,
|
reload/2,
|
||||||
app_path/2,
|
app_path/2,
|
||||||
proj_root/0,
|
proj_root/0,
|
||||||
|
@ -55,12 +56,12 @@
|
||||||
is_tcp_server_available/2,
|
is_tcp_server_available/2,
|
||||||
is_tcp_server_available/3,
|
is_tcp_server_available/3,
|
||||||
load_config/2,
|
load_config/2,
|
||||||
load_config/3,
|
|
||||||
not_wait_mqtt_payload/1,
|
not_wait_mqtt_payload/1,
|
||||||
read_schema_configs/2,
|
read_schema_configs/2,
|
||||||
render_config_file/2,
|
render_config_file/2,
|
||||||
wait_for/4,
|
wait_for/4,
|
||||||
wait_mqtt_payload/1
|
wait_mqtt_payload/1,
|
||||||
|
select_free_port/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
@ -253,11 +254,20 @@ start_app(App, SpecAppConfig, Opts) ->
|
||||||
case application:ensure_all_started(App) of
|
case application:ensure_all_started(App) of
|
||||||
{ok, _} ->
|
{ok, _} ->
|
||||||
ok = ensure_dashboard_listeners_started(App),
|
ok = ensure_dashboard_listeners_started(App),
|
||||||
|
ok = wait_for_app_processes(App),
|
||||||
ok;
|
ok;
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
error({failed_to_start_app, App, Reason})
|
error({failed_to_start_app, App, Reason})
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
wait_for_app_processes(emqx_conf) ->
|
||||||
|
%% emqx_conf app has a gen_server which
|
||||||
|
%% initializes its state asynchronously
|
||||||
|
gen_server:call(emqx_cluster_rpc, dummy),
|
||||||
|
ok;
|
||||||
|
wait_for_app_processes(_) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
app_conf_file(emqx_conf) -> "emqx.conf.all";
|
app_conf_file(emqx_conf) -> "emqx.conf.all";
|
||||||
app_conf_file(App) -> atom_to_list(App) ++ ".conf".
|
app_conf_file(App) -> atom_to_list(App) ++ ".conf".
|
||||||
|
|
||||||
|
@ -274,9 +284,9 @@ app_schema(App) ->
|
||||||
mustache_vars(App, Opts) ->
|
mustache_vars(App, Opts) ->
|
||||||
ExtraMustacheVars = maps:get(extra_mustache_vars, Opts, #{}),
|
ExtraMustacheVars = maps:get(extra_mustache_vars, Opts, #{}),
|
||||||
Defaults = #{
|
Defaults = #{
|
||||||
|
node_cookie => atom_to_list(erlang:get_cookie()),
|
||||||
platform_data_dir => app_path(App, "data"),
|
platform_data_dir => app_path(App, "data"),
|
||||||
platform_etc_dir => app_path(App, "etc"),
|
platform_etc_dir => app_path(App, "etc")
|
||||||
platform_log_dir => app_path(App, "log")
|
|
||||||
},
|
},
|
||||||
maps:merge(Defaults, ExtraMustacheVars).
|
maps:merge(Defaults, ExtraMustacheVars).
|
||||||
|
|
||||||
|
@ -304,12 +314,21 @@ generate_config(SchemaModule, ConfigFile) when is_atom(SchemaModule) ->
|
||||||
|
|
||||||
-spec stop_apps(list()) -> ok.
|
-spec stop_apps(list()) -> ok.
|
||||||
stop_apps(Apps) ->
|
stop_apps(Apps) ->
|
||||||
|
stop_apps(Apps, #{}).
|
||||||
|
|
||||||
|
stop_apps(Apps, Opts) ->
|
||||||
[application:stop(App) || App <- Apps ++ [emqx, ekka, mria, mnesia]],
|
[application:stop(App) || App <- Apps ++ [emqx, ekka, mria, mnesia]],
|
||||||
ok = mria_mnesia:delete_schema(),
|
ok = mria_mnesia:delete_schema(),
|
||||||
%% to avoid inter-suite flakiness
|
%% to avoid inter-suite flakiness
|
||||||
application:unset_env(emqx, init_config_load_done),
|
application:unset_env(emqx, init_config_load_done),
|
||||||
persistent_term:erase(?EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY),
|
persistent_term:erase(?EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY),
|
||||||
emqx_config:erase_schema_mod_and_names(),
|
case Opts of
|
||||||
|
#{erase_all_configs := false} ->
|
||||||
|
%% FIXME: this means inter-suite or inter-test dependencies
|
||||||
|
ok;
|
||||||
|
_ ->
|
||||||
|
emqx_config:erase_all()
|
||||||
|
end,
|
||||||
ok = emqx_config:delete_override_conf_files(),
|
ok = emqx_config:delete_override_conf_files(),
|
||||||
application:unset_env(emqx, local_override_conf_file),
|
application:unset_env(emqx, local_override_conf_file),
|
||||||
application:unset_env(emqx, cluster_override_conf_file),
|
application:unset_env(emqx, cluster_override_conf_file),
|
||||||
|
@ -478,18 +497,14 @@ copy_certs(emqx_conf, Dest0) ->
|
||||||
copy_certs(_, _) ->
|
copy_certs(_, _) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
load_config(SchemaModule, Config, Opts) ->
|
load_config(SchemaModule, Config) ->
|
||||||
ConfigBin =
|
ConfigBin =
|
||||||
case is_map(Config) of
|
case is_map(Config) of
|
||||||
true -> emqx_utils_json:encode(Config);
|
true -> emqx_utils_json:encode(Config);
|
||||||
false -> Config
|
false -> Config
|
||||||
end,
|
end,
|
||||||
ok = emqx_config:delete_override_conf_files(),
|
ok = emqx_config:delete_override_conf_files(),
|
||||||
ok = emqx_config:init_load(SchemaModule, ConfigBin, Opts),
|
ok = emqx_config:init_load(SchemaModule, ConfigBin).
|
||||||
ok.
|
|
||||||
|
|
||||||
load_config(SchemaModule, Config) ->
|
|
||||||
load_config(SchemaModule, Config, #{raw_with_default => false}).
|
|
||||||
|
|
||||||
-spec is_all_tcp_servers_available(Servers) -> Result when
|
-spec is_all_tcp_servers_available(Servers) -> Result when
|
||||||
Servers :: [{Host, Port}],
|
Servers :: [{Host, Port}],
|
||||||
|
@ -665,6 +680,7 @@ start_slave(Name, Opts) when is_map(Opts) ->
|
||||||
SlaveMod = maps:get(peer_mod, Opts, ct_slave),
|
SlaveMod = maps:get(peer_mod, Opts, ct_slave),
|
||||||
Node = node_name(Name),
|
Node = node_name(Name),
|
||||||
put_peer_mod(Node, SlaveMod),
|
put_peer_mod(Node, SlaveMod),
|
||||||
|
Cookie = atom_to_list(erlang:get_cookie()),
|
||||||
DoStart =
|
DoStart =
|
||||||
fun() ->
|
fun() ->
|
||||||
case SlaveMod of
|
case SlaveMod of
|
||||||
|
@ -676,7 +692,11 @@ start_slave(Name, Opts) when is_map(Opts) ->
|
||||||
{monitor_master, true},
|
{monitor_master, true},
|
||||||
{init_timeout, 20_000},
|
{init_timeout, 20_000},
|
||||||
{startup_timeout, 20_000},
|
{startup_timeout, 20_000},
|
||||||
{erl_flags, erl_flags()}
|
{erl_flags, erl_flags()},
|
||||||
|
{env, [
|
||||||
|
{"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"},
|
||||||
|
{"EMQX_NODE__COOKIE", Cookie}
|
||||||
|
]}
|
||||||
]
|
]
|
||||||
);
|
);
|
||||||
slave ->
|
slave ->
|
||||||
|
@ -1241,3 +1261,34 @@ get_or_spawn_janitor() ->
|
||||||
on_exit(Fun) ->
|
on_exit(Fun) ->
|
||||||
Janitor = get_or_spawn_janitor(),
|
Janitor = get_or_spawn_janitor(),
|
||||||
ok = emqx_test_janitor:push_on_exit_callback(Janitor, Fun).
|
ok = emqx_test_janitor:push_on_exit_callback(Janitor, Fun).
|
||||||
|
|
||||||
|
%%-------------------------------------------------------------------------------
|
||||||
|
%% Select a free transport port from the OS
|
||||||
|
%%-------------------------------------------------------------------------------
|
||||||
|
%% @doc get unused port from OS
|
||||||
|
-spec select_free_port(tcp | udp | ssl | quic) -> inets:port_number().
|
||||||
|
select_free_port(tcp) ->
|
||||||
|
select_free_port(gen_tcp, listen);
|
||||||
|
select_free_port(udp) ->
|
||||||
|
select_free_port(gen_udp, open);
|
||||||
|
select_free_port(ssl) ->
|
||||||
|
select_free_port(tcp);
|
||||||
|
select_free_port(quic) ->
|
||||||
|
select_free_port(udp).
|
||||||
|
|
||||||
|
select_free_port(GenModule, Fun) when
|
||||||
|
GenModule == gen_tcp orelse
|
||||||
|
GenModule == gen_udp
|
||||||
|
->
|
||||||
|
{ok, S} = GenModule:Fun(0, [{reuseaddr, true}]),
|
||||||
|
{ok, Port} = inet:port(S),
|
||||||
|
ok = GenModule:close(S),
|
||||||
|
case os:type() of
|
||||||
|
{unix, darwin} ->
|
||||||
|
%% in MacOS, still get address_in_use after close port
|
||||||
|
timer:sleep(500);
|
||||||
|
_ ->
|
||||||
|
skip
|
||||||
|
end,
|
||||||
|
ct:pal("Select free OS port: ~p", [Port]),
|
||||||
|
Port.
|
||||||
|
|
|
@ -50,7 +50,6 @@ t_fill_default_values(_) ->
|
||||||
},
|
},
|
||||||
<<"route_batch_clean">> := false,
|
<<"route_batch_clean">> := false,
|
||||||
<<"session_locking_strategy">> := quorum,
|
<<"session_locking_strategy">> := quorum,
|
||||||
<<"shared_dispatch_ack_enabled">> := false,
|
|
||||||
<<"shared_subscription_strategy">> := round_robin
|
<<"shared_subscription_strategy">> := round_robin
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -59,3 +58,22 @@ t_fill_default_values(_) ->
|
||||||
%% ensure JSON compatible
|
%% ensure JSON compatible
|
||||||
_ = emqx_utils_json:encode(WithDefaults),
|
_ = emqx_utils_json:encode(WithDefaults),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
t_init_load(_Config) ->
|
||||||
|
ConfFile = "./test_emqx.conf",
|
||||||
|
ok = file:write_file(ConfFile, <<"">>),
|
||||||
|
ExpectRootNames = lists:sort(hocon_schema:root_names(emqx_schema)),
|
||||||
|
emqx_config:erase_all(),
|
||||||
|
{ok, DeprecatedFile} = application:get_env(emqx, cluster_override_conf_file),
|
||||||
|
?assertEqual(false, filelib:is_regular(DeprecatedFile), DeprecatedFile),
|
||||||
|
%% Don't has deprecated file
|
||||||
|
ok = emqx_config:init_load(emqx_schema, [ConfFile]),
|
||||||
|
?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())),
|
||||||
|
?assertMatch({ok, #{raw_config := 256}}, emqx:update_config([mqtt, max_topic_levels], 256)),
|
||||||
|
emqx_config:erase_all(),
|
||||||
|
%% Has deprecated file
|
||||||
|
ok = file:write_file(DeprecatedFile, <<"{}">>),
|
||||||
|
ok = emqx_config:init_load(emqx_schema, [ConfFile]),
|
||||||
|
?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())),
|
||||||
|
?assertMatch({ok, #{raw_config := 128}}, emqx:update_config([mqtt, max_topic_levels], 128)),
|
||||||
|
ok = file:delete(DeprecatedFile).
|
||||||
|
|
|
@ -38,8 +38,6 @@ init_per_suite(Config) ->
|
||||||
ok = meck:new(emqx_cm, [passthrough, no_history, no_link]),
|
ok = meck:new(emqx_cm, [passthrough, no_history, no_link]),
|
||||||
ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end),
|
ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end),
|
||||||
ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end),
|
ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end),
|
||||||
%% Meck Limiter
|
|
||||||
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
|
|
||||||
%% Meck Pd
|
%% Meck Pd
|
||||||
ok = meck:new(emqx_pd, [passthrough, no_history, no_link]),
|
ok = meck:new(emqx_pd, [passthrough, no_history, no_link]),
|
||||||
%% Meck Metrics
|
%% Meck Metrics
|
||||||
|
@ -67,7 +65,6 @@ end_per_suite(_Config) ->
|
||||||
ok = meck:unload(emqx_transport),
|
ok = meck:unload(emqx_transport),
|
||||||
catch meck:unload(emqx_channel),
|
catch meck:unload(emqx_channel),
|
||||||
ok = meck:unload(emqx_cm),
|
ok = meck:unload(emqx_cm),
|
||||||
ok = meck:unload(emqx_htb_limiter),
|
|
||||||
ok = meck:unload(emqx_pd),
|
ok = meck:unload(emqx_pd),
|
||||||
ok = meck:unload(emqx_metrics),
|
ok = meck:unload(emqx_metrics),
|
||||||
ok = meck:unload(emqx_hooks),
|
ok = meck:unload(emqx_hooks),
|
||||||
|
@ -421,6 +418,14 @@ t_ensure_rate_limit(_) ->
|
||||||
{ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})),
|
{ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})),
|
||||||
?assertEqual(Limiter, emqx_connection:info(limiter, State1)),
|
?assertEqual(Limiter, emqx_connection:info(limiter, State1)),
|
||||||
|
|
||||||
|
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
|
||||||
|
|
||||||
|
ok = meck:expect(
|
||||||
|
emqx_htb_limiter,
|
||||||
|
make_infinity_limiter,
|
||||||
|
fun() -> non_infinity end
|
||||||
|
),
|
||||||
|
|
||||||
ok = meck:expect(
|
ok = meck:expect(
|
||||||
emqx_htb_limiter,
|
emqx_htb_limiter,
|
||||||
check,
|
check,
|
||||||
|
@ -431,10 +436,10 @@ t_ensure_rate_limit(_) ->
|
||||||
[],
|
[],
|
||||||
WhenOk,
|
WhenOk,
|
||||||
[],
|
[],
|
||||||
st(#{limiter => Limiter})
|
st(#{limiter => init_limiter()})
|
||||||
),
|
),
|
||||||
meck:unload(emqx_htb_limiter),
|
meck:unload(emqx_htb_limiter),
|
||||||
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
|
|
||||||
?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)).
|
?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)).
|
||||||
|
|
||||||
t_activate_socket(_) ->
|
t_activate_socket(_) ->
|
||||||
|
@ -495,6 +500,7 @@ t_get_conn_info(_) ->
|
||||||
end).
|
end).
|
||||||
|
|
||||||
t_oom_shutdown(init, Config) ->
|
t_oom_shutdown(init, Config) ->
|
||||||
|
ok = snabbkaffe:stop(),
|
||||||
ok = snabbkaffe:start_trace(),
|
ok = snabbkaffe:start_trace(),
|
||||||
ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]),
|
ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]),
|
||||||
meck:expect(
|
meck:expect(
|
||||||
|
@ -707,7 +713,14 @@ init_limiter() ->
|
||||||
|
|
||||||
limiter_cfg() ->
|
limiter_cfg() ->
|
||||||
Cfg = bucket_cfg(),
|
Cfg = bucket_cfg(),
|
||||||
Client = #{
|
Client = client_cfg(),
|
||||||
|
#{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
|
||||||
|
|
||||||
|
bucket_cfg() ->
|
||||||
|
#{rate => infinity, initial => 0, burst => 0}.
|
||||||
|
|
||||||
|
client_cfg() ->
|
||||||
|
#{
|
||||||
rate => infinity,
|
rate => infinity,
|
||||||
initial => 0,
|
initial => 0,
|
||||||
burst => 0,
|
burst => 0,
|
||||||
|
@ -715,11 +728,7 @@ limiter_cfg() ->
|
||||||
divisible => false,
|
divisible => false,
|
||||||
max_retry_time => timer:seconds(5),
|
max_retry_time => timer:seconds(5),
|
||||||
failure_strategy => force
|
failure_strategy => force
|
||||||
},
|
}.
|
||||||
#{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
|
|
||||||
|
|
||||||
bucket_cfg() ->
|
|
||||||
#{rate => infinity, initial => 0, burst => 0}.
|
|
||||||
|
|
||||||
add_bucket() ->
|
add_bucket() ->
|
||||||
Cfg = bucket_cfg(),
|
Cfg = bucket_cfg(),
|
||||||
|
|
|
@ -35,6 +35,7 @@ all() ->
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
application:load(emqx),
|
application:load(emqx),
|
||||||
|
{ok, _} = application:ensure_all_started(ssl),
|
||||||
emqx_config:save_schema_mod_and_names(emqx_schema),
|
emqx_config:save_schema_mod_and_names(emqx_schema),
|
||||||
emqx_common_test_helpers:boot_modules(all),
|
emqx_common_test_helpers:boot_modules(all),
|
||||||
Config.
|
Config.
|
||||||
|
@ -328,7 +329,15 @@ drain_msgs() ->
|
||||||
|
|
||||||
clear_crl_cache() ->
|
clear_crl_cache() ->
|
||||||
%% reset the CRL cache
|
%% reset the CRL cache
|
||||||
|
Ref = monitor(process, whereis(ssl_manager)),
|
||||||
exit(whereis(ssl_manager), kill),
|
exit(whereis(ssl_manager), kill),
|
||||||
|
receive
|
||||||
|
{'DOWN', Ref, process, _, _} ->
|
||||||
|
ok
|
||||||
|
after 1_000 ->
|
||||||
|
ct:fail("ssl_manager didn't die")
|
||||||
|
end,
|
||||||
|
ensure_ssl_manager_alive(),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
force_cacertfile(Cacertfile) ->
|
force_cacertfile(Cacertfile) ->
|
||||||
|
@ -382,7 +391,6 @@ setup_crl_options(Config, #{is_cached := IsCached} = Opts) ->
|
||||||
false ->
|
false ->
|
||||||
%% ensure cache is empty
|
%% ensure cache is empty
|
||||||
clear_crl_cache(),
|
clear_crl_cache(),
|
||||||
ct:sleep(200),
|
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
drain_msgs(),
|
drain_msgs(),
|
||||||
|
@ -459,6 +467,13 @@ of_kinds(Trace0, Kinds0) ->
|
||||||
Trace0
|
Trace0
|
||||||
).
|
).
|
||||||
|
|
||||||
|
ensure_ssl_manager_alive() ->
|
||||||
|
?retry(
|
||||||
|
_Sleep0 = 200,
|
||||||
|
_Attempts0 = 50,
|
||||||
|
true = is_pid(whereis(ssl_manager))
|
||||||
|
).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Test cases
|
%% Test cases
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
|
@ -47,13 +47,14 @@ init_per_testcase(Case, Config) when
|
||||||
Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp
|
Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp
|
||||||
->
|
->
|
||||||
catch emqx_config_handler:stop(),
|
catch emqx_config_handler:stop(),
|
||||||
|
Port = emqx_common_test_helpers:select_free_port(tcp),
|
||||||
{ok, _} = emqx_config_handler:start_link(),
|
{ok, _} = emqx_config_handler:start_link(),
|
||||||
PrevListeners = emqx_config:get([listeners], #{}),
|
PrevListeners = emqx_config:get([listeners], #{}),
|
||||||
PureListeners = remove_default_limiter(PrevListeners),
|
PureListeners = remove_default_limiter(PrevListeners),
|
||||||
PureListeners2 = PureListeners#{
|
PureListeners2 = PureListeners#{
|
||||||
tcp => #{
|
tcp => #{
|
||||||
listener_test => #{
|
listener_test => #{
|
||||||
bind => {"127.0.0.1", 9999},
|
bind => {"127.0.0.1", Port},
|
||||||
max_connections => 4321,
|
max_connections => 4321,
|
||||||
limiter => #{}
|
limiter => #{}
|
||||||
}
|
}
|
||||||
|
@ -63,19 +64,20 @@ init_per_testcase(Case, Config) when
|
||||||
|
|
||||||
ok = emqx_listeners:start(),
|
ok = emqx_listeners:start(),
|
||||||
[
|
[
|
||||||
{prev_listener_conf, PrevListeners}
|
{prev_listener_conf, PrevListeners},
|
||||||
|
{tcp_port, Port}
|
||||||
| Config
|
| Config
|
||||||
];
|
];
|
||||||
init_per_testcase(t_wss_conn, Config) ->
|
init_per_testcase(t_wss_conn, Config) ->
|
||||||
catch emqx_config_handler:stop(),
|
catch emqx_config_handler:stop(),
|
||||||
|
Port = emqx_common_test_helpers:select_free_port(ssl),
|
||||||
{ok, _} = emqx_config_handler:start_link(),
|
{ok, _} = emqx_config_handler:start_link(),
|
||||||
|
|
||||||
PrevListeners = emqx_config:get([listeners], #{}),
|
PrevListeners = emqx_config:get([listeners], #{}),
|
||||||
PureListeners = remove_default_limiter(PrevListeners),
|
PureListeners = remove_default_limiter(PrevListeners),
|
||||||
PureListeners2 = PureListeners#{
|
PureListeners2 = PureListeners#{
|
||||||
wss => #{
|
wss => #{
|
||||||
listener_test => #{
|
listener_test => #{
|
||||||
bind => {{127, 0, 0, 1}, 9998},
|
bind => {{127, 0, 0, 1}, Port},
|
||||||
limiter => #{},
|
limiter => #{},
|
||||||
ssl_options => #{
|
ssl_options => #{
|
||||||
cacertfile => ?CERTS_PATH("cacert.pem"),
|
cacertfile => ?CERTS_PATH("cacert.pem"),
|
||||||
|
@ -89,7 +91,8 @@ init_per_testcase(t_wss_conn, Config) ->
|
||||||
|
|
||||||
ok = emqx_listeners:start(),
|
ok = emqx_listeners:start(),
|
||||||
[
|
[
|
||||||
{prev_listener_conf, PrevListeners}
|
{prev_listener_conf, PrevListeners},
|
||||||
|
{wss_port, Port}
|
||||||
| Config
|
| Config
|
||||||
];
|
];
|
||||||
init_per_testcase(_, Config) ->
|
init_per_testcase(_, Config) ->
|
||||||
|
@ -171,20 +174,30 @@ t_restart_listeners_with_hibernate_after_disabled(_Config) ->
|
||||||
ok = emqx_listeners:stop(),
|
ok = emqx_listeners:stop(),
|
||||||
emqx_config:put([listeners], OldLConf).
|
emqx_config:put([listeners], OldLConf).
|
||||||
|
|
||||||
t_max_conns_tcp(_) ->
|
t_max_conns_tcp(Config) ->
|
||||||
%% Note: Using a string representation for the bind address like
|
%% Note: Using a string representation for the bind address like
|
||||||
%% "127.0.0.1" does not work
|
%% "127.0.0.1" does not work
|
||||||
?assertEqual(4321, emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})).
|
?assertEqual(
|
||||||
|
4321,
|
||||||
|
emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, ?config(tcp_port, Config)})
|
||||||
|
).
|
||||||
|
|
||||||
t_current_conns_tcp(_) ->
|
t_current_conns_tcp(Config) ->
|
||||||
?assertEqual(0, emqx_listeners:current_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})).
|
?assertEqual(
|
||||||
|
0,
|
||||||
|
emqx_listeners:current_conns('tcp:listener_test', {
|
||||||
|
{127, 0, 0, 1}, ?config(tcp_port, Config)
|
||||||
|
})
|
||||||
|
).
|
||||||
|
|
||||||
t_wss_conn(_) ->
|
t_wss_conn(Config) ->
|
||||||
{ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000),
|
{ok, Socket} = ssl:connect(
|
||||||
|
{127, 0, 0, 1}, ?config(wss_port, Config), [{verify, verify_none}], 1000
|
||||||
|
),
|
||||||
ok = ssl:close(Socket).
|
ok = ssl:close(Socket).
|
||||||
|
|
||||||
t_quic_conn(Config) ->
|
t_quic_conn(Config) ->
|
||||||
Port = 24568,
|
Port = emqx_common_test_helpers:select_free_port(quic),
|
||||||
DataDir = ?config(data_dir, Config),
|
DataDir = ?config(data_dir, Config),
|
||||||
SSLOpts = #{
|
SSLOpts = #{
|
||||||
password => ?SERVER_KEY_PASSWORD,
|
password => ?SERVER_KEY_PASSWORD,
|
||||||
|
@ -207,7 +220,7 @@ t_quic_conn(Config) ->
|
||||||
emqx_listeners:stop_listener(quic, ?FUNCTION_NAME, #{bind => Port}).
|
emqx_listeners:stop_listener(quic, ?FUNCTION_NAME, #{bind => Port}).
|
||||||
|
|
||||||
t_ssl_password_cert(Config) ->
|
t_ssl_password_cert(Config) ->
|
||||||
Port = 24568,
|
Port = emqx_common_test_helpers:select_free_port(ssl),
|
||||||
DataDir = ?config(data_dir, Config),
|
DataDir = ?config(data_dir, Config),
|
||||||
SSLOptsPWD = #{
|
SSLOptsPWD = #{
|
||||||
password => ?SERVER_KEY_PASSWORD,
|
password => ?SERVER_KEY_PASSWORD,
|
||||||
|
@ -266,8 +279,7 @@ render_config_file() ->
|
||||||
mustache_vars() ->
|
mustache_vars() ->
|
||||||
[
|
[
|
||||||
{platform_data_dir, local_path(["data"])},
|
{platform_data_dir, local_path(["data"])},
|
||||||
{platform_etc_dir, local_path(["etc"])},
|
{platform_etc_dir, local_path(["etc"])}
|
||||||
{platform_log_dir, local_path(["log"])}
|
|
||||||
].
|
].
|
||||||
|
|
||||||
generate_config() ->
|
generate_config() ->
|
||||||
|
|
|
@ -22,7 +22,6 @@
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
|
||||||
-define(LOGGER, emqx_logger).
|
-define(LOGGER, emqx_logger).
|
||||||
-define(a, "a").
|
|
||||||
-define(SUPPORTED_LEVELS, [emergency, alert, critical, error, warning, notice, info, debug]).
|
-define(SUPPORTED_LEVELS, [emergency, alert, critical, error, warning, notice, info, debug]).
|
||||||
|
|
||||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
|
@ -254,10 +254,15 @@ does_module_exist(Mod) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
assert_no_http_get() ->
|
assert_no_http_get() ->
|
||||||
|
Timeout = 0,
|
||||||
|
Error = should_be_cached,
|
||||||
|
assert_no_http_get(Timeout, Error).
|
||||||
|
|
||||||
|
assert_no_http_get(Timeout, Error) ->
|
||||||
receive
|
receive
|
||||||
{http_get, _URL} ->
|
{http_get, _URL} ->
|
||||||
error(should_be_cached)
|
error(Error)
|
||||||
after 0 ->
|
after Timeout ->
|
||||||
ok
|
ok
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
@ -702,7 +707,9 @@ do_t_update_listener(Config) ->
|
||||||
%% the API converts that to an internally
|
%% the API converts that to an internally
|
||||||
%% managed file
|
%% managed file
|
||||||
<<"issuer_pem">> => IssuerPem,
|
<<"issuer_pem">> => IssuerPem,
|
||||||
<<"responder_url">> => <<"http://localhost:9877">>
|
<<"responder_url">> => <<"http://localhost:9877">>,
|
||||||
|
%% for quicker testing; min refresh in tests is 5 s.
|
||||||
|
<<"refresh_interval">> => <<"5s">>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -739,6 +746,70 @@ do_t_update_listener(Config) ->
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
assert_http_get(1, 5_000),
|
assert_http_get(1, 5_000),
|
||||||
|
|
||||||
|
%% Disable OCSP Stapling; the periodic refreshes should stop
|
||||||
|
RefreshInterval = emqx_config:get([listeners, ssl, default, ssl_options, ocsp, refresh_interval]),
|
||||||
|
OCSPConfig1 =
|
||||||
|
#{
|
||||||
|
<<"ssl_options">> =>
|
||||||
|
#{
|
||||||
|
<<"ocsp">> =>
|
||||||
|
#{
|
||||||
|
<<"enable_ocsp_stapling">> => false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ListenerData3 = emqx_utils_maps:deep_merge(ListenerData2, OCSPConfig1),
|
||||||
|
{ok, {_, _, ListenerData4}} = update_listener_via_api(ListenerId, ListenerData3),
|
||||||
|
?assertMatch(
|
||||||
|
#{
|
||||||
|
<<"ssl_options">> :=
|
||||||
|
#{
|
||||||
|
<<"ocsp">> :=
|
||||||
|
#{
|
||||||
|
<<"enable_ocsp_stapling">> := false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ListenerData4
|
||||||
|
),
|
||||||
|
|
||||||
|
assert_no_http_get(2 * RefreshInterval, should_stop_refreshing),
|
||||||
|
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_double_unregister(_Config) ->
|
||||||
|
ListenerID = <<"ssl:test_ocsp">>,
|
||||||
|
Conf = emqx_config:get_listener_conf(ssl, test_ocsp, []),
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
{ok, {ok, _}} =
|
||||||
|
?wait_async_action(
|
||||||
|
emqx_ocsp_cache:register_listener(ListenerID, Conf),
|
||||||
|
#{?snk_kind := ocsp_http_fetch_and_cache, listener_id := ListenerID},
|
||||||
|
5_000
|
||||||
|
),
|
||||||
|
assert_http_get(1),
|
||||||
|
|
||||||
|
{ok, {ok, _}} =
|
||||||
|
?wait_async_action(
|
||||||
|
emqx_ocsp_cache:unregister_listener(ListenerID),
|
||||||
|
#{?snk_kind := ocsp_cache_listener_unregistered, listener_id := ListenerID},
|
||||||
|
5_000
|
||||||
|
),
|
||||||
|
|
||||||
|
%% Should be idempotent and not crash
|
||||||
|
{ok, {ok, _}} =
|
||||||
|
?wait_async_action(
|
||||||
|
emqx_ocsp_cache:unregister_listener(ListenerID),
|
||||||
|
#{?snk_kind := ocsp_cache_listener_unregistered, listener_id := ListenerID},
|
||||||
|
5_000
|
||||||
|
),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
[]
|
||||||
|
),
|
||||||
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_ocsp_responder_error_responses(_Config) ->
|
t_ocsp_responder_error_responses(_Config) ->
|
||||||
|
|
|
@ -2026,18 +2026,7 @@ stop_emqx() ->
|
||||||
%% select a random port picked by OS
|
%% select a random port picked by OS
|
||||||
-spec select_port() -> inet:port_number().
|
-spec select_port() -> inet:port_number().
|
||||||
select_port() ->
|
select_port() ->
|
||||||
{ok, S} = gen_udp:open(0, [{reuseaddr, true}]),
|
emqx_common_test_helpers:select_free_port(quic).
|
||||||
{ok, {_, Port}} = inet:sockname(S),
|
|
||||||
gen_udp:close(S),
|
|
||||||
case os:type() of
|
|
||||||
{unix, darwin} ->
|
|
||||||
%% in MacOS, still get address_in_use after close port
|
|
||||||
timer:sleep(500);
|
|
||||||
_ ->
|
|
||||||
skip
|
|
||||||
end,
|
|
||||||
ct:pal("select port: ~p", [Port]),
|
|
||||||
Port.
|
|
||||||
|
|
||||||
-spec via_stream({quic, quicer:connection_handle(), quicer:stream_handle()}) ->
|
-spec via_stream({quic, quicer:connection_handle(), quicer:stream_handle()}) ->
|
||||||
quicer:stream_handle().
|
quicer:stream_handle().
|
||||||
|
|
|
@ -38,6 +38,7 @@
|
||||||
-define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)).
|
-define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)).
|
||||||
-define(RATE(Rate), to_rate(Rate)).
|
-define(RATE(Rate), to_rate(Rate)).
|
||||||
-define(NOW, erlang:system_time(millisecond)).
|
-define(NOW, erlang:system_time(millisecond)).
|
||||||
|
-define(ROOT_COUNTER_IDX, 1).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Setups
|
%% Setups
|
||||||
|
@ -211,11 +212,11 @@ t_infinity_client(_) ->
|
||||||
end,
|
end,
|
||||||
with_per_client(Fun, Case).
|
with_per_client(Fun, Case).
|
||||||
|
|
||||||
t_try_restore_agg(_) ->
|
t_try_restore_with_bucket(_) ->
|
||||||
Fun = fun(#{client := Cli} = Bucket) ->
|
Fun = fun(#{client := Cli} = Bucket) ->
|
||||||
Bucket2 = Bucket#{
|
Bucket2 = Bucket#{
|
||||||
rate := 1,
|
rate := 100,
|
||||||
burst := 199,
|
burst := 100,
|
||||||
initial := 50
|
initial := 50
|
||||||
},
|
},
|
||||||
Cli2 = Cli#{
|
Cli2 = Cli#{
|
||||||
|
@ -394,38 +395,6 @@ t_burst(_) ->
|
||||||
Case
|
Case
|
||||||
).
|
).
|
||||||
|
|
||||||
t_limit_global_with_unlimit_other(_) ->
|
|
||||||
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
|
|
||||||
Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
|
|
||||||
end,
|
|
||||||
|
|
||||||
Bucket = fun(#{client := Cli} = Bucket) ->
|
|
||||||
Bucket2 = Bucket#{
|
|
||||||
rate := infinity,
|
|
||||||
initial := 0,
|
|
||||||
burst := 0
|
|
||||||
},
|
|
||||||
Cli2 = Cli#{
|
|
||||||
rate := infinity,
|
|
||||||
burst := 0,
|
|
||||||
initial := 0
|
|
||||||
},
|
|
||||||
Bucket2#{client := Cli2}
|
|
||||||
end,
|
|
||||||
|
|
||||||
Case = fun() ->
|
|
||||||
C1 = counters:new(1, []),
|
|
||||||
start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
|
|
||||||
timer:sleep(2200),
|
|
||||||
check_average_rate(C1, 2, 600)
|
|
||||||
end,
|
|
||||||
|
|
||||||
with_global(
|
|
||||||
GlobalMod,
|
|
||||||
[{b1, Bucket}],
|
|
||||||
Case
|
|
||||||
).
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Test Cases container
|
%% Test Cases container
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -454,38 +423,6 @@ t_check_container(_) ->
|
||||||
end,
|
end,
|
||||||
with_per_client(Cfg, Case).
|
with_per_client(Cfg, Case).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Test Override
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
t_bucket_no_client(_) ->
|
|
||||||
Rate = ?RATE("1/s"),
|
|
||||||
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
|
|
||||||
Cfg#{client := Client#{message_routing := MR#{rate := Rate}}}
|
|
||||||
end,
|
|
||||||
BucketMod = fun(Bucket) ->
|
|
||||||
maps:remove(client, Bucket)
|
|
||||||
end,
|
|
||||||
Case = fun() ->
|
|
||||||
Limiter = connect(BucketMod(make_limiter_cfg())),
|
|
||||||
?assertMatch(#{rate := Rate}, Limiter)
|
|
||||||
end,
|
|
||||||
with_global(GlobalMod, [BucketMod], Case).
|
|
||||||
|
|
||||||
t_bucket_client(_) ->
|
|
||||||
GlobalRate = ?RATE("1/s"),
|
|
||||||
BucketRate = ?RATE("10/s"),
|
|
||||||
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
|
|
||||||
Cfg#{client := Client#{message_routing := MR#{rate := GlobalRate}}}
|
|
||||||
end,
|
|
||||||
BucketMod = fun(#{client := Client} = Bucket) ->
|
|
||||||
Bucket#{client := Client#{rate := BucketRate}}
|
|
||||||
end,
|
|
||||||
Case = fun() ->
|
|
||||||
Limiter = connect(BucketMod(make_limiter_cfg())),
|
|
||||||
?assertMatch(#{rate := BucketRate}, Limiter)
|
|
||||||
end,
|
|
||||||
with_global(GlobalMod, [BucketMod], Case).
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Test Cases misc
|
%% Test Cases misc
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -574,7 +511,7 @@ t_schema_unit(_) ->
|
||||||
?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")),
|
?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
compatibility_for_capacity(_) ->
|
t_compatibility_for_capacity(_) ->
|
||||||
CfgStr = <<
|
CfgStr = <<
|
||||||
""
|
""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -594,7 +531,7 @@ compatibility_for_capacity(_) ->
|
||||||
parse_and_check(CfgStr)
|
parse_and_check(CfgStr)
|
||||||
).
|
).
|
||||||
|
|
||||||
compatibility_for_message_in(_) ->
|
t_compatibility_for_message_in(_) ->
|
||||||
CfgStr = <<
|
CfgStr = <<
|
||||||
""
|
""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -614,7 +551,7 @@ compatibility_for_message_in(_) ->
|
||||||
parse_and_check(CfgStr)
|
parse_and_check(CfgStr)
|
||||||
).
|
).
|
||||||
|
|
||||||
compatibility_for_bytes_in(_) ->
|
t_compatibility_for_bytes_in(_) ->
|
||||||
CfgStr = <<
|
CfgStr = <<
|
||||||
""
|
""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -634,6 +571,174 @@ compatibility_for_bytes_in(_) ->
|
||||||
parse_and_check(CfgStr)
|
parse_and_check(CfgStr)
|
||||||
).
|
).
|
||||||
|
|
||||||
|
t_extract_with_type(_) ->
|
||||||
|
IsOnly = fun
|
||||||
|
(_Key, Cfg) when map_size(Cfg) =/= 1 ->
|
||||||
|
false;
|
||||||
|
(Key, Cfg) ->
|
||||||
|
maps:is_key(Key, Cfg)
|
||||||
|
end,
|
||||||
|
Checker = fun
|
||||||
|
(Type, #{client := Client} = Cfg) ->
|
||||||
|
Cfg2 = maps:remove(client, Cfg),
|
||||||
|
IsOnly(Type, Client) andalso
|
||||||
|
(IsOnly(Type, Cfg2) orelse
|
||||||
|
map_size(Cfg2) =:= 0);
|
||||||
|
(Type, Cfg) ->
|
||||||
|
IsOnly(Type, Cfg)
|
||||||
|
end,
|
||||||
|
?assertEqual(undefined, emqx_limiter_schema:extract_with_type(messages, undefined)),
|
||||||
|
?assert(
|
||||||
|
Checker(
|
||||||
|
messages,
|
||||||
|
emqx_limiter_schema:extract_with_type(messages, #{
|
||||||
|
messages => #{rate => 1}, bytes => #{rate => 1}
|
||||||
|
})
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?assert(
|
||||||
|
Checker(
|
||||||
|
messages,
|
||||||
|
emqx_limiter_schema:extract_with_type(messages, #{
|
||||||
|
messages => #{rate => 1},
|
||||||
|
bytes => #{rate => 1},
|
||||||
|
client => #{messages => #{rate => 2}}
|
||||||
|
})
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?assert(
|
||||||
|
Checker(
|
||||||
|
messages,
|
||||||
|
emqx_limiter_schema:extract_with_type(messages, #{
|
||||||
|
client => #{messages => #{rate => 2}, bytes => #{rate => 1}}
|
||||||
|
})
|
||||||
|
)
|
||||||
|
).
|
||||||
|
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Test Cases Create Instance
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
t_create_instance_with_infinity_node(_) ->
|
||||||
|
emqx_limiter_manager:insert_bucket(?FUNCTION_NAME, bytes, ?FUNCTION_NAME),
|
||||||
|
Cases = make_create_test_data_with_infinity_node(?FUNCTION_NAME),
|
||||||
|
lists:foreach(
|
||||||
|
fun({Cfg, Expected}) ->
|
||||||
|
{ok, Result} = emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg),
|
||||||
|
IsMatched =
|
||||||
|
case is_atom(Expected) of
|
||||||
|
true ->
|
||||||
|
Result =:= Expected;
|
||||||
|
_ ->
|
||||||
|
Expected(Result)
|
||||||
|
end,
|
||||||
|
?assert(
|
||||||
|
IsMatched,
|
||||||
|
lists:flatten(
|
||||||
|
io_lib:format("Got unexpected:~p~n, Cfg:~p~n", [
|
||||||
|
Result, Cfg
|
||||||
|
])
|
||||||
|
)
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
Cases
|
||||||
|
),
|
||||||
|
emqx_limiter_manager:delete_bucket(?FUNCTION_NAME, bytes),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_not_exists_instance(_) ->
|
||||||
|
Cfg = #{bytes => #{rate => 100, burst => 0, initial => 0}},
|
||||||
|
?assertEqual(
|
||||||
|
{error, invalid_bucket},
|
||||||
|
emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg)
|
||||||
|
),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
{error, invalid_bucket},
|
||||||
|
emqx_limiter_server:connect(?FUNCTION_NAME, not_exists, Cfg)
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_create_instance_with_node(_) ->
|
||||||
|
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
|
||||||
|
Cfg#{
|
||||||
|
message_routing := MR#{rate := ?RATE("200/1s")},
|
||||||
|
messages := MR#{rate := ?RATE("200/1s")}
|
||||||
|
}
|
||||||
|
end,
|
||||||
|
|
||||||
|
B1 = fun(Bucket) ->
|
||||||
|
Bucket#{rate := ?RATE("400/1s")}
|
||||||
|
end,
|
||||||
|
|
||||||
|
B2 = fun(Bucket) ->
|
||||||
|
Bucket#{rate := infinity}
|
||||||
|
end,
|
||||||
|
|
||||||
|
IsRefLimiter = fun
|
||||||
|
({ok, #{tokens := _}}, _IsRoot) ->
|
||||||
|
false;
|
||||||
|
({ok, #{bucket := #{index := ?ROOT_COUNTER_IDX}}}, true) ->
|
||||||
|
true;
|
||||||
|
({ok, #{bucket := #{index := Index}}}, false) when Index =/= ?ROOT_COUNTER_IDX ->
|
||||||
|
true;
|
||||||
|
(Result, _IsRoot) ->
|
||||||
|
ct:pal("The result is:~p~n", [Result]),
|
||||||
|
false
|
||||||
|
end,
|
||||||
|
|
||||||
|
Case = fun() ->
|
||||||
|
BucketCfg = make_limiter_cfg(),
|
||||||
|
|
||||||
|
?assert(
|
||||||
|
IsRefLimiter(emqx_limiter_server:connect(b1, message_routing, B1(BucketCfg)), false)
|
||||||
|
),
|
||||||
|
?assert(
|
||||||
|
IsRefLimiter(emqx_limiter_server:connect(b2, message_routing, B2(BucketCfg)), true)
|
||||||
|
),
|
||||||
|
?assert(IsRefLimiter(emqx_limiter_server:connect(x, messages, undefined), true)),
|
||||||
|
?assertNot(IsRefLimiter(emqx_limiter_server:connect(x, bytes, undefined), false))
|
||||||
|
end,
|
||||||
|
|
||||||
|
with_global(
|
||||||
|
GlobalMod,
|
||||||
|
[{b1, B1}, {b2, B2}],
|
||||||
|
Case
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Test Cases emqx_esockd_htb_limiter
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
t_create_esockd_htb_limiter(_) ->
|
||||||
|
Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, undefined),
|
||||||
|
?assertMatch(
|
||||||
|
#{module := _, id := ?FUNCTION_NAME, type := bytes, bucket := undefined},
|
||||||
|
Opts
|
||||||
|
),
|
||||||
|
|
||||||
|
Limiter = emqx_esockd_htb_limiter:create(Opts),
|
||||||
|
?assertMatch(
|
||||||
|
#{module := _, name := bytes, limiter := infinity},
|
||||||
|
Limiter
|
||||||
|
),
|
||||||
|
|
||||||
|
?assertEqual(ok, emqx_esockd_htb_limiter:delete(Limiter)),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_esockd_htb_consume(_) ->
|
||||||
|
ClientCfg = emqx_limiter_schema:default_client_config(),
|
||||||
|
Cfg = #{client => #{bytes => ClientCfg#{rate := 50, max_retry_time := 0}}},
|
||||||
|
Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, Cfg),
|
||||||
|
Limiter = emqx_esockd_htb_limiter:create(Opts),
|
||||||
|
|
||||||
|
C1R = emqx_esockd_htb_limiter:consume(51, Limiter),
|
||||||
|
?assertMatch({pause, _Ms, _Limiter2}, C1R),
|
||||||
|
|
||||||
|
timer:sleep(300),
|
||||||
|
C2R = emqx_esockd_htb_limiter:consume(50, Limiter),
|
||||||
|
?assertMatch({ok, _}, C2R),
|
||||||
|
ok.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%%% Internal functions
|
%%% Internal functions
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -877,3 +982,64 @@ apply_modifier(Pairs, #{default := Template}) ->
|
||||||
parse_and_check(ConfigString) ->
|
parse_and_check(ConfigString) ->
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_schema, ConfigString),
|
ok = emqx_common_test_helpers:load_config(emqx_schema, ConfigString),
|
||||||
emqx:get_config([listeners, tcp, default, limiter]).
|
emqx:get_config([listeners, tcp, default, limiter]).
|
||||||
|
|
||||||
|
make_create_test_data_with_infinity_node(FakeInstnace) ->
|
||||||
|
Infinity = emqx_htb_limiter:make_infinity_limiter(),
|
||||||
|
ClientCfg = emqx_limiter_schema:default_client_config(),
|
||||||
|
InfinityRef = emqx_limiter_bucket_ref:infinity_bucket(),
|
||||||
|
MkC = fun(Rate) ->
|
||||||
|
#{client => #{bytes => ClientCfg#{rate := Rate}}}
|
||||||
|
end,
|
||||||
|
MkB = fun(Rate) ->
|
||||||
|
#{bytes => #{rate => Rate, burst => 0, initial => 0}}
|
||||||
|
end,
|
||||||
|
|
||||||
|
MkA = fun(Client, Bucket) ->
|
||||||
|
maps:merge(MkC(Client), MkB(Bucket))
|
||||||
|
end,
|
||||||
|
IsRefLimiter = fun(Expected) ->
|
||||||
|
fun
|
||||||
|
(#{tokens := _}) -> false;
|
||||||
|
(#{bucket := Bucket}) -> Bucket =:= Expected;
|
||||||
|
(_) -> false
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
|
||||||
|
IsTokenLimiter = fun(Expected) ->
|
||||||
|
fun
|
||||||
|
(#{tokens := _, bucket := Bucket}) -> Bucket =:= Expected;
|
||||||
|
(_) -> false
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
|
||||||
|
[
|
||||||
|
%% default situation, no limiter setting
|
||||||
|
{undefined, Infinity},
|
||||||
|
|
||||||
|
%% client = undefined bucket = undefined
|
||||||
|
{#{}, Infinity},
|
||||||
|
%% client = undefined bucket = infinity
|
||||||
|
{MkB(infinity), Infinity},
|
||||||
|
%% client = undefined bucket = other
|
||||||
|
{MkB(100), IsRefLimiter(FakeInstnace)},
|
||||||
|
|
||||||
|
%% client = infinity bucket = undefined
|
||||||
|
{MkC(infinity), Infinity},
|
||||||
|
%% client = infinity bucket = infinity
|
||||||
|
{MkA(infinity, infinity), Infinity},
|
||||||
|
|
||||||
|
%% client = infinity bucket = other
|
||||||
|
{MkA(infinity, 100), IsRefLimiter(FakeInstnace)},
|
||||||
|
|
||||||
|
%% client = other bucket = undefined
|
||||||
|
{MkC(100), IsTokenLimiter(InfinityRef)},
|
||||||
|
|
||||||
|
%% client = other bucket = infinity
|
||||||
|
{MkC(100), IsTokenLimiter(InfinityRef)},
|
||||||
|
|
||||||
|
%% client = C bucket = B C < B
|
||||||
|
{MkA(100, 1000), IsTokenLimiter(FakeInstnace)},
|
||||||
|
|
||||||
|
%% client = C bucket = B C > B
|
||||||
|
{MkA(1000, 100), IsRefLimiter(FakeInstnace)}
|
||||||
|
].
|
||||||
|
|
|
@ -219,112 +219,124 @@ parse_server_test_() ->
|
||||||
?T(
|
?T(
|
||||||
"single server, binary, no port",
|
"single server, binary, no port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"localhost", DefaultPort}],
|
[#{hostname => "localhost", port => DefaultPort}],
|
||||||
Parse(<<"localhost">>)
|
Parse(<<"localhost">>)
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"single server, string, no port",
|
"single server, string, no port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"localhost", DefaultPort}],
|
[#{hostname => "localhost", port => DefaultPort}],
|
||||||
Parse("localhost")
|
Parse("localhost")
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"single server, list(string), no port",
|
"single server, list(string), no port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"localhost", DefaultPort}],
|
[#{hostname => "localhost", port => DefaultPort}],
|
||||||
Parse(["localhost"])
|
Parse(["localhost"])
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"single server, list(binary), no port",
|
"single server, list(binary), no port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"localhost", DefaultPort}],
|
[#{hostname => "localhost", port => DefaultPort}],
|
||||||
Parse([<<"localhost">>])
|
Parse([<<"localhost">>])
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"single server, binary, with port",
|
"single server, binary, with port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"localhost", 9999}],
|
[#{hostname => "localhost", port => 9999}],
|
||||||
Parse(<<"localhost:9999">>)
|
Parse(<<"localhost:9999">>)
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"single server, list(string), with port",
|
"single server, list(string), with port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"localhost", 9999}],
|
[#{hostname => "localhost", port => 9999}],
|
||||||
Parse(["localhost:9999"])
|
Parse(["localhost:9999"])
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"single server, string, with port",
|
"single server, string, with port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"localhost", 9999}],
|
[#{hostname => "localhost", port => 9999}],
|
||||||
Parse("localhost:9999")
|
Parse("localhost:9999")
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"single server, list(binary), with port",
|
"single server, list(binary), with port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"localhost", 9999}],
|
[#{hostname => "localhost", port => 9999}],
|
||||||
Parse([<<"localhost:9999">>])
|
Parse([<<"localhost:9999">>])
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"multiple servers, string, no port",
|
"multiple servers, string, no port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"host1", DefaultPort}, {"host2", DefaultPort}],
|
[
|
||||||
|
#{hostname => "host1", port => DefaultPort},
|
||||||
|
#{hostname => "host2", port => DefaultPort}
|
||||||
|
],
|
||||||
Parse("host1, host2")
|
Parse("host1, host2")
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"multiple servers, binary, no port",
|
"multiple servers, binary, no port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"host1", DefaultPort}, {"host2", DefaultPort}],
|
[
|
||||||
|
#{hostname => "host1", port => DefaultPort},
|
||||||
|
#{hostname => "host2", port => DefaultPort}
|
||||||
|
],
|
||||||
Parse(<<"host1, host2,,,">>)
|
Parse(<<"host1, host2,,,">>)
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"multiple servers, list(string), no port",
|
"multiple servers, list(string), no port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"host1", DefaultPort}, {"host2", DefaultPort}],
|
[
|
||||||
|
#{hostname => "host1", port => DefaultPort},
|
||||||
|
#{hostname => "host2", port => DefaultPort}
|
||||||
|
],
|
||||||
Parse(["host1", "host2"])
|
Parse(["host1", "host2"])
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"multiple servers, list(binary), no port",
|
"multiple servers, list(binary), no port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"host1", DefaultPort}, {"host2", DefaultPort}],
|
[
|
||||||
|
#{hostname => "host1", port => DefaultPort},
|
||||||
|
#{hostname => "host2", port => DefaultPort}
|
||||||
|
],
|
||||||
Parse([<<"host1">>, <<"host2">>])
|
Parse([<<"host1">>, <<"host2">>])
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"multiple servers, string, with port",
|
"multiple servers, string, with port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"host1", 1234}, {"host2", 2345}],
|
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
|
||||||
Parse("host1:1234, host2:2345")
|
Parse("host1:1234, host2:2345")
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"multiple servers, binary, with port",
|
"multiple servers, binary, with port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"host1", 1234}, {"host2", 2345}],
|
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
|
||||||
Parse(<<"host1:1234, host2:2345, ">>)
|
Parse(<<"host1:1234, host2:2345, ">>)
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"multiple servers, list(string), with port",
|
"multiple servers, list(string), with port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"host1", 1234}, {"host2", 2345}],
|
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
|
||||||
Parse([" host1:1234 ", "host2:2345"])
|
Parse([" host1:1234 ", "host2:2345"])
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"multiple servers, list(binary), with port",
|
"multiple servers, list(binary), with port",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"host1", 1234}, {"host2", 2345}],
|
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
|
||||||
Parse([<<"host1:1234">>, <<"host2:2345">>])
|
Parse([<<"host1:1234">>, <<"host2:2345">>])
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
|
@ -350,9 +362,9 @@ parse_server_test_() ->
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"multiple servers wihtout port, mixed list(binary|string)",
|
"multiple servers without port, mixed list(binary|string)",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
["host1", "host2"],
|
[#{hostname => "host1"}, #{hostname => "host2"}],
|
||||||
Parse2([<<"host1">>, "host2"], #{no_port => true})
|
Parse2([<<"host1">>, "host2"], #{no_port => true})
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
|
@ -394,14 +406,18 @@ parse_server_test_() ->
|
||||||
?T(
|
?T(
|
||||||
"single server map",
|
"single server map",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"host1.domain", 1234}],
|
[#{hostname => "host1.domain", port => 1234}],
|
||||||
HoconParse("host1.domain:1234")
|
HoconParse("host1.domain:1234")
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?T(
|
?T(
|
||||||
"multiple servers map",
|
"multiple servers map",
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[{"host1.domain", 1234}, {"host2.domain", 2345}, {"host3.domain", 3456}],
|
[
|
||||||
|
#{hostname => "host1.domain", port => 1234},
|
||||||
|
#{hostname => "host2.domain", port => 2345},
|
||||||
|
#{hostname => "host3.domain", port => 3456}
|
||||||
|
],
|
||||||
HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456")
|
HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456")
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
|
@ -447,6 +463,171 @@ parse_server_test_() ->
|
||||||
"bad_schema",
|
"bad_schema",
|
||||||
emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true})
|
emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true})
|
||||||
)
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"scheme, hostname and port",
|
||||||
|
?assertEqual(
|
||||||
|
#{scheme => "pulsar+ssl", hostname => "host", port => 6651},
|
||||||
|
emqx_schema:parse_server(
|
||||||
|
"pulsar+ssl://host:6651",
|
||||||
|
#{
|
||||||
|
default_port => 6650,
|
||||||
|
supported_schemes => ["pulsar", "pulsar+ssl"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"scheme and hostname, default port",
|
||||||
|
?assertEqual(
|
||||||
|
#{scheme => "pulsar", hostname => "host", port => 6650},
|
||||||
|
emqx_schema:parse_server(
|
||||||
|
"pulsar://host",
|
||||||
|
#{
|
||||||
|
default_port => 6650,
|
||||||
|
supported_schemes => ["pulsar", "pulsar+ssl"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"scheme and hostname, no port",
|
||||||
|
?assertEqual(
|
||||||
|
#{scheme => "pulsar", hostname => "host"},
|
||||||
|
emqx_schema:parse_server(
|
||||||
|
"pulsar://host",
|
||||||
|
#{
|
||||||
|
no_port => true,
|
||||||
|
supported_schemes => ["pulsar", "pulsar+ssl"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"scheme and hostname, missing port",
|
||||||
|
?assertThrow(
|
||||||
|
"missing_port_number",
|
||||||
|
emqx_schema:parse_server(
|
||||||
|
"pulsar://host",
|
||||||
|
#{
|
||||||
|
no_port => false,
|
||||||
|
supported_schemes => ["pulsar", "pulsar+ssl"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"hostname, default scheme, no default port",
|
||||||
|
?assertEqual(
|
||||||
|
#{scheme => "pulsar", hostname => "host"},
|
||||||
|
emqx_schema:parse_server(
|
||||||
|
"host",
|
||||||
|
#{
|
||||||
|
default_scheme => "pulsar",
|
||||||
|
no_port => true,
|
||||||
|
supported_schemes => ["pulsar", "pulsar+ssl"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"hostname, default scheme, default port",
|
||||||
|
?assertEqual(
|
||||||
|
#{scheme => "pulsar", hostname => "host", port => 6650},
|
||||||
|
emqx_schema:parse_server(
|
||||||
|
"host",
|
||||||
|
#{
|
||||||
|
default_port => 6650,
|
||||||
|
default_scheme => "pulsar",
|
||||||
|
supported_schemes => ["pulsar", "pulsar+ssl"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"just hostname, expecting missing scheme",
|
||||||
|
?assertThrow(
|
||||||
|
"missing_scheme",
|
||||||
|
emqx_schema:parse_server(
|
||||||
|
"host",
|
||||||
|
#{
|
||||||
|
no_port => true,
|
||||||
|
supported_schemes => ["pulsar", "pulsar+ssl"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"hostname, default scheme, defined port",
|
||||||
|
?assertEqual(
|
||||||
|
#{scheme => "pulsar", hostname => "host", port => 6651},
|
||||||
|
emqx_schema:parse_server(
|
||||||
|
"host:6651",
|
||||||
|
#{
|
||||||
|
default_port => 6650,
|
||||||
|
default_scheme => "pulsar",
|
||||||
|
supported_schemes => ["pulsar", "pulsar+ssl"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"inconsistent scheme opts",
|
||||||
|
?assertError(
|
||||||
|
"bad_schema",
|
||||||
|
emqx_schema:parse_server(
|
||||||
|
"pulsar+ssl://host:6651",
|
||||||
|
#{
|
||||||
|
default_port => 6650,
|
||||||
|
default_scheme => "something",
|
||||||
|
supported_schemes => ["not", "supported"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"hostname, default scheme, defined port",
|
||||||
|
?assertEqual(
|
||||||
|
#{scheme => "pulsar", hostname => "host", port => 6651},
|
||||||
|
emqx_schema:parse_server(
|
||||||
|
"host:6651",
|
||||||
|
#{
|
||||||
|
default_port => 6650,
|
||||||
|
default_scheme => "pulsar",
|
||||||
|
supported_schemes => ["pulsar", "pulsar+ssl"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"unsupported scheme",
|
||||||
|
?assertThrow(
|
||||||
|
"unsupported_scheme",
|
||||||
|
emqx_schema:parse_server(
|
||||||
|
"pulsar+quic://host:6651",
|
||||||
|
#{
|
||||||
|
default_port => 6650,
|
||||||
|
supported_schemes => ["pulsar"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?T(
|
||||||
|
"multiple hostnames with schemes (1)",
|
||||||
|
?assertEqual(
|
||||||
|
[
|
||||||
|
#{scheme => "pulsar", hostname => "host", port => 6649},
|
||||||
|
#{scheme => "pulsar+ssl", hostname => "other.host", port => 6651},
|
||||||
|
#{scheme => "pulsar", hostname => "yet.another", port => 6650}
|
||||||
|
],
|
||||||
|
emqx_schema:parse_servers(
|
||||||
|
"pulsar://host:6649, pulsar+ssl://other.host:6651,pulsar://yet.another",
|
||||||
|
#{
|
||||||
|
default_port => 6650,
|
||||||
|
supported_schemes => ["pulsar", "pulsar+ssl"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
)
|
)
|
||||||
].
|
].
|
||||||
|
|
||||||
|
@ -513,3 +694,81 @@ url_type_test_() ->
|
||||||
typerefl:from_string(emqx_schema:url(), <<"">>)
|
typerefl:from_string(emqx_schema:url(), <<"">>)
|
||||||
)
|
)
|
||||||
].
|
].
|
||||||
|
|
||||||
|
env_test_() ->
|
||||||
|
Do = fun emqx_schema:naive_env_interpolation/1,
|
||||||
|
[
|
||||||
|
{"undefined", fun() -> ?assertEqual(undefined, Do(undefined)) end},
|
||||||
|
{"full env abs path",
|
||||||
|
with_env_fn(
|
||||||
|
"MY_FILE",
|
||||||
|
"/path/to/my/file",
|
||||||
|
fun() -> ?assertEqual("/path/to/my/file", Do("$MY_FILE")) end
|
||||||
|
)},
|
||||||
|
{"full env relative path",
|
||||||
|
with_env_fn(
|
||||||
|
"MY_FILE",
|
||||||
|
"path/to/my/file",
|
||||||
|
fun() -> ?assertEqual("path/to/my/file", Do("${MY_FILE}")) end
|
||||||
|
)},
|
||||||
|
%% we can not test windows style file join though
|
||||||
|
{"windows style",
|
||||||
|
with_env_fn(
|
||||||
|
"MY_FILE",
|
||||||
|
"path\\to\\my\\file",
|
||||||
|
fun() -> ?assertEqual("path\\to\\my\\file", Do("$MY_FILE")) end
|
||||||
|
)},
|
||||||
|
{"dir no {}",
|
||||||
|
with_env_fn(
|
||||||
|
"MY_DIR",
|
||||||
|
"/mydir",
|
||||||
|
fun() -> ?assertEqual("/mydir/foobar", Do(<<"$MY_DIR/foobar">>)) end
|
||||||
|
)},
|
||||||
|
{"dir with {}",
|
||||||
|
with_env_fn(
|
||||||
|
"MY_DIR",
|
||||||
|
"/mydir",
|
||||||
|
fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}/foobar">>)) end
|
||||||
|
)},
|
||||||
|
%% a trailing / should not cause the sub path to become absolute
|
||||||
|
{"env dir with trailing /",
|
||||||
|
with_env_fn(
|
||||||
|
"MY_DIR",
|
||||||
|
"/mydir//",
|
||||||
|
fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}/foobar">>)) end
|
||||||
|
)},
|
||||||
|
{"string dir with doulbe /",
|
||||||
|
with_env_fn(
|
||||||
|
"MY_DIR",
|
||||||
|
"/mydir/",
|
||||||
|
fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}//foobar">>)) end
|
||||||
|
)},
|
||||||
|
{"env not found",
|
||||||
|
with_env_fn(
|
||||||
|
"MY_DIR",
|
||||||
|
"/mydir/",
|
||||||
|
fun() -> ?assertEqual("${MY_DIR2}//foobar", Do(<<"${MY_DIR2}//foobar">>)) end
|
||||||
|
)}
|
||||||
|
].
|
||||||
|
|
||||||
|
with_env_fn(Name, Value, F) ->
|
||||||
|
fun() ->
|
||||||
|
with_envs(F, [{Name, Value}])
|
||||||
|
end.
|
||||||
|
|
||||||
|
with_envs(Fun, Envs) ->
|
||||||
|
with_envs(Fun, [], Envs).
|
||||||
|
|
||||||
|
with_envs(Fun, Args, [{_Name, _Value} | _] = Envs) ->
|
||||||
|
set_envs(Envs),
|
||||||
|
try
|
||||||
|
apply(Fun, Args)
|
||||||
|
after
|
||||||
|
unset_envs(Envs)
|
||||||
|
end.
|
||||||
|
|
||||||
|
set_envs([{_Name, _Value} | _] = Envs) ->
|
||||||
|
lists:map(fun({Name, Value}) -> os:putenv(Name, Value) end, Envs).
|
||||||
|
|
||||||
|
unset_envs([{_Name, _Value} | _] = Envs) ->
|
||||||
|
lists:map(fun({Name, _}) -> os:unsetenv(Name) end, Envs).
|
||||||
|
|
|
@ -60,12 +60,12 @@ init(Parent) ->
|
||||||
{ok, #{callbacks => [], owner => Parent}}.
|
{ok, #{callbacks => [], owner => Parent}}.
|
||||||
|
|
||||||
terminate(_Reason, #{callbacks := Callbacks}) ->
|
terminate(_Reason, #{callbacks := Callbacks}) ->
|
||||||
lists:foreach(fun(Fun) -> catch Fun() end, Callbacks).
|
do_terminate(Callbacks).
|
||||||
|
|
||||||
handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) ->
|
handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) ->
|
||||||
{reply, ok, State#{callbacks := [Callback | Callbacks]}};
|
{reply, ok, State#{callbacks := [Callback | Callbacks]}};
|
||||||
handle_call(terminate, _From, State = #{callbacks := Callbacks}) ->
|
handle_call(terminate, _From, State = #{callbacks := Callbacks}) ->
|
||||||
lists:foreach(fun(Fun) -> catch Fun() end, Callbacks),
|
do_terminate(Callbacks),
|
||||||
{stop, normal, ok, State};
|
{stop, normal, ok, State};
|
||||||
handle_call(_Req, _From, State) ->
|
handle_call(_Req, _From, State) ->
|
||||||
{reply, error, State}.
|
{reply, error, State}.
|
||||||
|
@ -77,3 +77,23 @@ handle_info({'EXIT', Parent, _Reason}, State = #{owner := Parent}) ->
|
||||||
{stop, normal, State};
|
{stop, normal, State};
|
||||||
handle_info(_Msg, State) ->
|
handle_info(_Msg, State) ->
|
||||||
{noreply, State}.
|
{noreply, State}.
|
||||||
|
|
||||||
|
%%----------------------------------------------------------------------------------
|
||||||
|
%% Internal fns
|
||||||
|
%%----------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
do_terminate(Callbacks) ->
|
||||||
|
lists:foreach(
|
||||||
|
fun(Fun) ->
|
||||||
|
try
|
||||||
|
Fun()
|
||||||
|
catch
|
||||||
|
K:E:S ->
|
||||||
|
ct:pal("error executing callback ~p: ~p", [Fun, {K, E}]),
|
||||||
|
ct:pal("stacktrace: ~p", [S]),
|
||||||
|
ok
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
Callbacks
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
|
@ -138,13 +138,13 @@ end_per_testcase(t_ws_non_check_origin, Config) ->
|
||||||
del_bucket(),
|
del_bucket(),
|
||||||
PrevConfig = ?config(prev_config, Config),
|
PrevConfig = ?config(prev_config, Config),
|
||||||
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
|
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
|
||||||
emqx_common_test_helpers:stop_apps([]),
|
stop_apps(),
|
||||||
ok;
|
ok;
|
||||||
end_per_testcase(_, Config) ->
|
end_per_testcase(_, Config) ->
|
||||||
del_bucket(),
|
del_bucket(),
|
||||||
PrevConfig = ?config(prev_config, Config),
|
PrevConfig = ?config(prev_config, Config),
|
||||||
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
|
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
|
||||||
emqx_common_test_helpers:stop_apps([]),
|
stop_apps(),
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
|
@ -156,6 +156,10 @@ end_per_suite(_) ->
|
||||||
emqx_common_test_helpers:stop_apps([]),
|
emqx_common_test_helpers:stop_apps([]),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
%% FIXME: this is a temp fix to tests share configs.
|
||||||
|
stop_apps() ->
|
||||||
|
emqx_common_test_helpers:stop_apps([], #{erase_all_configs => false}).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Test Cases
|
%% Test Cases
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -443,7 +447,12 @@ t_websocket_info_deliver(_) ->
|
||||||
|
|
||||||
t_websocket_info_timeout_limiter(_) ->
|
t_websocket_info_timeout_limiter(_) ->
|
||||||
Ref = make_ref(),
|
Ref = make_ref(),
|
||||||
LimiterT = init_limiter(),
|
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
|
||||||
|
LimiterT = init_limiter(#{
|
||||||
|
bytes => bucket_cfg(),
|
||||||
|
messages => bucket_cfg(),
|
||||||
|
client => #{bytes => client_cfg(Rate)}
|
||||||
|
}),
|
||||||
Next = fun emqx_ws_connection:when_msg_in/3,
|
Next = fun emqx_ws_connection:when_msg_in/3,
|
||||||
Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT),
|
Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT),
|
||||||
Event = {timeout, Ref, limit_timeout},
|
Event = {timeout, Ref, limit_timeout},
|
||||||
|
|
|
@ -67,7 +67,7 @@ init_per_suite(Config) ->
|
||||||
emqx_config:erase(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY),
|
emqx_config:erase(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY),
|
||||||
_ = application:load(emqx_conf),
|
_ = application:load(emqx_conf),
|
||||||
ok = emqx_mgmt_api_test_util:init_suite(
|
ok = emqx_mgmt_api_test_util:init_suite(
|
||||||
[emqx_authn]
|
[emqx_conf, emqx_authn]
|
||||||
),
|
),
|
||||||
|
|
||||||
?AUTHN:delete_chain(?GLOBAL),
|
?AUTHN:delete_chain(?GLOBAL),
|
||||||
|
|
|
@ -42,15 +42,16 @@ init_per_testcase(_Case, Config) ->
|
||||||
<<"backend">> => <<"built_in_database">>,
|
<<"backend">> => <<"built_in_database">>,
|
||||||
<<"user_id_type">> => <<"clientid">>
|
<<"user_id_type">> => <<"clientid">>
|
||||||
},
|
},
|
||||||
emqx:update_config(
|
{ok, _} = emqx:update_config(
|
||||||
?PATH,
|
?PATH,
|
||||||
{create_authenticator, ?GLOBAL, AuthnConfig}
|
{create_authenticator, ?GLOBAL, AuthnConfig}
|
||||||
),
|
),
|
||||||
|
{ok, _} = emqx_conf:update(
|
||||||
emqx_conf:update(
|
[listeners, tcp, listener_authn_enabled],
|
||||||
[listeners, tcp, listener_authn_enabled], {create, listener_mqtt_tcp_conf(18830, true)}, #{}
|
{create, listener_mqtt_tcp_conf(18830, true)},
|
||||||
|
#{}
|
||||||
),
|
),
|
||||||
emqx_conf:update(
|
{ok, _} = emqx_conf:update(
|
||||||
[listeners, tcp, listener_authn_disabled],
|
[listeners, tcp, listener_authn_disabled],
|
||||||
{create, listener_mqtt_tcp_conf(18831, false)},
|
{create, listener_mqtt_tcp_conf(18831, false)},
|
||||||
#{}
|
#{}
|
||||||
|
|
|
@ -37,7 +37,7 @@ init_per_testcase(_, Config) ->
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
_ = application:load(emqx_conf),
|
_ = application:load(emqx_conf),
|
||||||
emqx_common_test_helpers:start_apps([emqx_authn]),
|
emqx_common_test_helpers:start_apps([emqx_conf, emqx_authn]),
|
||||||
application:ensure_all_started(emqx_resource),
|
application:ensure_all_started(emqx_resource),
|
||||||
application:ensure_all_started(emqx_connector),
|
application:ensure_all_started(emqx_connector),
|
||||||
Config.
|
Config.
|
||||||
|
|
|
@ -78,7 +78,8 @@ t_check_schema(_Config) ->
|
||||||
).
|
).
|
||||||
|
|
||||||
t_union_member_selector(_) ->
|
t_union_member_selector(_) ->
|
||||||
?assertMatch(#{authentication := undefined}, check(undefined)),
|
%% default value for authentication
|
||||||
|
?assertMatch(#{authentication := []}, check(undefined)),
|
||||||
C1 = #{<<"backend">> => <<"built_in_database">>},
|
C1 = #{<<"backend">> => <<"built_in_database">>},
|
||||||
?assertThrow(
|
?assertThrow(
|
||||||
#{
|
#{
|
||||||
|
|
|
@ -2,14 +2,4 @@ authorization {
|
||||||
deny_action = ignore
|
deny_action = ignore
|
||||||
no_match = allow
|
no_match = allow
|
||||||
cache = { enable = true }
|
cache = { enable = true }
|
||||||
sources = [
|
|
||||||
{
|
|
||||||
type = file
|
|
||||||
enable = true
|
|
||||||
# This file is immutable to EMQX.
|
|
||||||
# Once new rules are created from dashboard UI or HTTP API,
|
|
||||||
# the file 'data/authz/acl.conf' is used instead of this one
|
|
||||||
path = "{{ platform_etc_dir }}/acl.conf"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_authz, [
|
{application, emqx_authz, [
|
||||||
{description, "An OTP application"},
|
{description, "An OTP application"},
|
||||||
{vsn, "0.1.18"},
|
{vsn, "0.1.19"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_authz_app, []}},
|
{mod, {emqx_authz_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -205,7 +205,7 @@ sources(get, _) ->
|
||||||
},
|
},
|
||||||
AccIn
|
AccIn
|
||||||
) ->
|
) ->
|
||||||
case file:read_file(Path) of
|
case emqx_authz_file:read_file(Path) of
|
||||||
{ok, Rules} ->
|
{ok, Rules} ->
|
||||||
lists:append(AccIn, [
|
lists:append(AccIn, [
|
||||||
#{
|
#{
|
||||||
|
@ -242,7 +242,7 @@ source(get, #{bindings := #{type := Type}}) ->
|
||||||
Type,
|
Type,
|
||||||
fun
|
fun
|
||||||
(#{<<"type">> := <<"file">>, <<"enable">> := Enable, <<"path">> := Path}) ->
|
(#{<<"type">> := <<"file">>, <<"enable">> := Enable, <<"path">> := Path}) ->
|
||||||
case file:read_file(Path) of
|
case emqx_authz_file:read_file(Path) of
|
||||||
{ok, Rules} ->
|
{ok, Rules} ->
|
||||||
{200, #{
|
{200, #{
|
||||||
type => file,
|
type => file,
|
||||||
|
|
|
@ -32,13 +32,15 @@
|
||||||
create/1,
|
create/1,
|
||||||
update/1,
|
update/1,
|
||||||
destroy/1,
|
destroy/1,
|
||||||
authorize/4
|
authorize/4,
|
||||||
|
read_file/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
description() ->
|
description() ->
|
||||||
"AuthZ with static rules".
|
"AuthZ with static rules".
|
||||||
|
|
||||||
create(#{path := Path} = Source) ->
|
create(#{path := Path0} = Source) ->
|
||||||
|
Path = filename(Path0),
|
||||||
Rules =
|
Rules =
|
||||||
case file:consult(Path) of
|
case file:consult(Path) of
|
||||||
{ok, Terms} ->
|
{ok, Terms} ->
|
||||||
|
@ -63,3 +65,9 @@ destroy(_Source) -> ok.
|
||||||
|
|
||||||
authorize(Client, PubSub, Topic, #{annotations := #{rules := Rules}}) ->
|
authorize(Client, PubSub, Topic, #{annotations := #{rules := Rules}}) ->
|
||||||
emqx_authz_rule:matches(Client, PubSub, Topic, Rules).
|
emqx_authz_rule:matches(Client, PubSub, Topic, Rules).
|
||||||
|
|
||||||
|
read_file(Path) ->
|
||||||
|
file:read_file(filename(Path)).
|
||||||
|
|
||||||
|
filename(PathMaybeTemplate) ->
|
||||||
|
emqx_schema:naive_env_interpolation(PathMaybeTemplate).
|
||||||
|
|
|
@ -491,7 +491,7 @@ authz_fields() ->
|
||||||
?HOCON(
|
?HOCON(
|
||||||
?ARRAY(?UNION(UnionMemberSelector)),
|
?ARRAY(?UNION(UnionMemberSelector)),
|
||||||
#{
|
#{
|
||||||
default => [],
|
default => [default_authz()],
|
||||||
desc => ?DESC(sources),
|
desc => ?DESC(sources),
|
||||||
%% doc_lift is force a root level reference instead of nesting sub-structs
|
%% doc_lift is force a root level reference instead of nesting sub-structs
|
||||||
extra => #{doc_lift => true},
|
extra => #{doc_lift => true},
|
||||||
|
@ -501,3 +501,10 @@ authz_fields() ->
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
||||||
|
default_authz() ->
|
||||||
|
#{
|
||||||
|
<<"type">> => <<"file">>,
|
||||||
|
<<"enable">> => true,
|
||||||
|
<<"path">> => <<"${EMQX_ETC_DIR}/acl.conf">>
|
||||||
|
}.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_bridge, [
|
{application, emqx_bridge, [
|
||||||
{description, "EMQX bridges"},
|
{description, "EMQX bridges"},
|
||||||
{vsn, "0.1.17"},
|
{vsn, "0.1.18"},
|
||||||
{registered, [emqx_bridge_sup]},
|
{registered, [emqx_bridge_sup]},
|
||||||
{mod, {emqx_bridge_app, []}},
|
{mod, {emqx_bridge_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -70,7 +70,9 @@
|
||||||
T == dynamo;
|
T == dynamo;
|
||||||
T == rocketmq;
|
T == rocketmq;
|
||||||
T == cassandra;
|
T == cassandra;
|
||||||
T == sqlserver
|
T == sqlserver;
|
||||||
|
T == pulsar_producer;
|
||||||
|
T == oracle
|
||||||
).
|
).
|
||||||
|
|
||||||
load() ->
|
load() ->
|
||||||
|
|
|
@ -340,6 +340,8 @@ parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) ->
|
||||||
%% to hocon; keeping this as just `kafka' for backwards compatibility.
|
%% to hocon; keeping this as just `kafka' for backwards compatibility.
|
||||||
parse_confs(<<"kafka">> = _Type, Name, Conf) ->
|
parse_confs(<<"kafka">> = _Type, Name, Conf) ->
|
||||||
Conf#{bridge_name => Name};
|
Conf#{bridge_name => Name};
|
||||||
|
parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) ->
|
||||||
|
Conf#{bridge_name => Name};
|
||||||
parse_confs(_Type, _Name, Conf) ->
|
parse_confs(_Type, _Name, Conf) ->
|
||||||
Conf.
|
Conf.
|
||||||
|
|
||||||
|
|
|
@ -230,7 +230,12 @@ webhook_bridge_converter(Conf0, _HoconOpts) ->
|
||||||
undefined ->
|
undefined ->
|
||||||
undefined;
|
undefined;
|
||||||
_ ->
|
_ ->
|
||||||
do_convert_webhook_config(Conf1)
|
maps:map(
|
||||||
|
fun(_Name, Conf) ->
|
||||||
|
do_convert_webhook_config(Conf)
|
||||||
|
end,
|
||||||
|
Conf1
|
||||||
|
)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
do_convert_webhook_config(
|
do_convert_webhook_config(
|
||||||
|
|
|
@ -141,8 +141,7 @@ setup_fake_telemetry_data() ->
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Opts = #{raw_with_default => true},
|
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf),
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf, Opts),
|
|
||||||
|
|
||||||
ok = snabbkaffe:start_trace(),
|
ok = snabbkaffe:start_trace(),
|
||||||
Predicate = fun(#{?snk_kind := K}) -> K =:= emqx_bridge_loaded end,
|
Predicate = fun(#{?snk_kind := K}) -> K =:= emqx_bridge_loaded end,
|
||||||
|
|
|
@ -11,6 +11,7 @@ The application is used to connect EMQX and Cassandra. User can create a rule
|
||||||
and easily ingest IoT data into Cassandra by leveraging
|
and easily ingest IoT data into Cassandra by leveraging
|
||||||
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
|
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
|
||||||
|
|
||||||
|
<!---
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
|
|
||||||
|
@ -19,6 +20,7 @@ and easily ingest IoT data into Cassandra by leveraging
|
||||||
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
|
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
|
||||||
for the EMQX rules engine introduction.
|
for the EMQX rules engine introduction.
|
||||||
|
|
||||||
|
--->
|
||||||
|
|
||||||
# HTTP APIs
|
# HTTP APIs
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_cassandra, [
|
{application, emqx_bridge_cassandra, [
|
||||||
{description, "EMQX Enterprise Cassandra Bridge"},
|
{description, "EMQX Enterprise Cassandra Bridge"},
|
||||||
{vsn, "0.1.0"},
|
{vsn, "0.1.1"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, ecql]},
|
{applications, [kernel, stdlib, ecql]},
|
||||||
{env, []},
|
{env, []},
|
||||||
|
|
|
@ -92,7 +92,7 @@ callback_mode() -> async_if_possible.
|
||||||
on_start(
|
on_start(
|
||||||
InstId,
|
InstId,
|
||||||
#{
|
#{
|
||||||
servers := Servers,
|
servers := Servers0,
|
||||||
keyspace := Keyspace,
|
keyspace := Keyspace,
|
||||||
username := Username,
|
username := Username,
|
||||||
pool_size := PoolSize,
|
pool_size := PoolSize,
|
||||||
|
@ -104,9 +104,16 @@ on_start(
|
||||||
connector => InstId,
|
connector => InstId,
|
||||||
config => emqx_utils:redact(Config)
|
config => emqx_utils:redact(Config)
|
||||||
}),
|
}),
|
||||||
|
Servers =
|
||||||
|
lists:map(
|
||||||
|
fun(#{hostname := Host, port := Port}) ->
|
||||||
|
{Host, Port}
|
||||||
|
end,
|
||||||
|
emqx_schema:parse_servers(Servers0, ?DEFAULT_SERVER_OPTION)
|
||||||
|
),
|
||||||
|
|
||||||
Options = [
|
Options = [
|
||||||
{nodes, emqx_schema:parse_servers(Servers, ?DEFAULT_SERVER_OPTION)},
|
{nodes, Servers},
|
||||||
{username, Username},
|
{username, Username},
|
||||||
{password, emqx_secret:wrap(maps:get(password, Config, ""))},
|
{password, emqx_secret:wrap(maps:get(password, Config, ""))},
|
||||||
{keyspace, Keyspace},
|
{keyspace, Keyspace},
|
||||||
|
@ -274,7 +281,7 @@ proc_cql_params(query, SQL, Params, _State) ->
|
||||||
exec_cql_query(InstId, PoolName, Type, Async, PreparedKey, Data) when
|
exec_cql_query(InstId, PoolName, Type, Async, PreparedKey, Data) when
|
||||||
Type == query; Type == prepared_query
|
Type == query; Type == prepared_query
|
||||||
->
|
->
|
||||||
case ecpool:pick_and_do(PoolName, {?MODULE, Type, [Async, PreparedKey, Data]}, no_handover) of
|
case exec(PoolName, {?MODULE, Type, [Async, PreparedKey, Data]}) of
|
||||||
{error, Reason} = Result ->
|
{error, Reason} = Result ->
|
||||||
?tp(
|
?tp(
|
||||||
error,
|
error,
|
||||||
|
@ -288,7 +295,7 @@ exec_cql_query(InstId, PoolName, Type, Async, PreparedKey, Data) when
|
||||||
end.
|
end.
|
||||||
|
|
||||||
exec_cql_batch_query(InstId, PoolName, Async, CQLs) ->
|
exec_cql_batch_query(InstId, PoolName, Async, CQLs) ->
|
||||||
case ecpool:pick_and_do(PoolName, {?MODULE, batch_query, [Async, CQLs]}, no_handover) of
|
case exec(PoolName, {?MODULE, batch_query, [Async, CQLs]}) of
|
||||||
{error, Reason} = Result ->
|
{error, Reason} = Result ->
|
||||||
?tp(
|
?tp(
|
||||||
error,
|
error,
|
||||||
|
@ -301,6 +308,13 @@ exec_cql_batch_query(InstId, PoolName, Async, CQLs) ->
|
||||||
Result
|
Result
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
%% Pick one of the pool members to do the query.
|
||||||
|
%% Using 'no_handoever' strategy,
|
||||||
|
%% meaning the buffer worker does the gen_server call or gen_server cast
|
||||||
|
%% towards the connection process.
|
||||||
|
exec(PoolName, Query) ->
|
||||||
|
ecpool:pick_and_do(PoolName, Query, no_handover).
|
||||||
|
|
||||||
on_get_status(_InstId, #{pool_name := PoolName} = State) ->
|
on_get_status(_InstId, #{pool_name := PoolName} = State) ->
|
||||||
case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
|
case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
|
||||||
true ->
|
true ->
|
||||||
|
@ -339,17 +353,23 @@ do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepar
|
||||||
query(Conn, sync, CQL, Params) ->
|
query(Conn, sync, CQL, Params) ->
|
||||||
ecql:query(Conn, CQL, Params);
|
ecql:query(Conn, CQL, Params);
|
||||||
query(Conn, {async, Callback}, CQL, Params) ->
|
query(Conn, {async, Callback}, CQL, Params) ->
|
||||||
ecql:async_query(Conn, CQL, Params, one, Callback).
|
ok = ecql:async_query(Conn, CQL, Params, one, Callback),
|
||||||
|
%% return the connection pid for buffer worker to monitor
|
||||||
|
{ok, Conn}.
|
||||||
|
|
||||||
prepared_query(Conn, sync, PreparedKey, Params) ->
|
prepared_query(Conn, sync, PreparedKey, Params) ->
|
||||||
ecql:execute(Conn, PreparedKey, Params);
|
ecql:execute(Conn, PreparedKey, Params);
|
||||||
prepared_query(Conn, {async, Callback}, PreparedKey, Params) ->
|
prepared_query(Conn, {async, Callback}, PreparedKey, Params) ->
|
||||||
ecql:async_execute(Conn, PreparedKey, Params, Callback).
|
ok = ecql:async_execute(Conn, PreparedKey, Params, Callback),
|
||||||
|
%% return the connection pid for buffer worker to monitor
|
||||||
|
{ok, Conn}.
|
||||||
|
|
||||||
batch_query(Conn, sync, Rows) ->
|
batch_query(Conn, sync, Rows) ->
|
||||||
ecql:batch(Conn, Rows);
|
ecql:batch(Conn, Rows);
|
||||||
batch_query(Conn, {async, Callback}, Rows) ->
|
batch_query(Conn, {async, Callback}, Rows) ->
|
||||||
ecql:async_batch(Conn, Rows, Callback).
|
ok = ecql:async_batch(Conn, Rows, Callback),
|
||||||
|
%% return the connection pid for buffer worker to monitor
|
||||||
|
{ok, Conn}.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% callbacks for ecpool
|
%% callbacks for ecpool
|
||||||
|
|
|
@ -404,7 +404,7 @@ t_setup_via_config_and_publish(Config) ->
|
||||||
end,
|
end,
|
||||||
fun(Trace0) ->
|
fun(Trace0) ->
|
||||||
Trace = ?of_kind(cassandra_connector_query_return, Trace0),
|
Trace = ?of_kind(cassandra_connector_query_return, Trace0),
|
||||||
?assertMatch([#{result := ok}], Trace),
|
?assertMatch([#{result := {ok, _Pid}}], Trace),
|
||||||
ok
|
ok
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
|
@ -443,7 +443,7 @@ t_setup_via_http_api_and_publish(Config) ->
|
||||||
end,
|
end,
|
||||||
fun(Trace0) ->
|
fun(Trace0) ->
|
||||||
Trace = ?of_kind(cassandra_connector_query_return, Trace0),
|
Trace = ?of_kind(cassandra_connector_query_return, Trace0),
|
||||||
?assertMatch([#{result := ok}], Trace),
|
?assertMatch([#{result := {ok, _Pid}}], Trace),
|
||||||
ok
|
ok
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
|
@ -604,7 +604,7 @@ t_missing_data(Config) ->
|
||||||
fun(Trace0) ->
|
fun(Trace0) ->
|
||||||
%% 1. ecql driver will return `ok` first in async query
|
%% 1. ecql driver will return `ok` first in async query
|
||||||
Trace = ?of_kind(cassandra_connector_query_return, Trace0),
|
Trace = ?of_kind(cassandra_connector_query_return, Trace0),
|
||||||
?assertMatch([#{result := ok}], Trace),
|
?assertMatch([#{result := {ok, _Pid}}], Trace),
|
||||||
%% 2. then it will return an error in callback function
|
%% 2. then it will return an error in callback function
|
||||||
Trace1 = ?of_kind(handle_async_reply, Trace0),
|
Trace1 = ?of_kind(handle_async_reply, Trace0),
|
||||||
?assertMatch([#{result := {error, {8704, _}}}], Trace1),
|
?assertMatch([#{result := {error, {8704, _}}}], Trace1),
|
||||||
|
|
|
@ -38,9 +38,14 @@ groups() ->
|
||||||
[].
|
[].
|
||||||
|
|
||||||
cassandra_servers() ->
|
cassandra_servers() ->
|
||||||
emqx_schema:parse_servers(
|
lists:map(
|
||||||
iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]),
|
fun(#{hostname := Host, port := Port}) ->
|
||||||
#{default_port => ?CASSANDRA_DEFAULT_PORT}
|
{Host, Port}
|
||||||
|
end,
|
||||||
|
emqx_schema:parse_servers(
|
||||||
|
iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]),
|
||||||
|
#{default_port => ?CASSANDRA_DEFAULT_PORT}
|
||||||
|
)
|
||||||
).
|
).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_gcp_pubsub, [
|
{application, emqx_bridge_gcp_pubsub, [
|
||||||
{description, "EMQX Enterprise GCP Pub/Sub Bridge"},
|
{description, "EMQX Enterprise GCP Pub/Sub Bridge"},
|
||||||
{vsn, "0.1.0"},
|
{vsn, "0.1.1"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -38,7 +38,6 @@
|
||||||
}.
|
}.
|
||||||
-type state() :: #{
|
-type state() :: #{
|
||||||
connect_timeout := timer:time(),
|
connect_timeout := timer:time(),
|
||||||
instance_id := manager_id(),
|
|
||||||
jwt_worker_id := jwt_worker(),
|
jwt_worker_id := jwt_worker(),
|
||||||
max_retries := non_neg_integer(),
|
max_retries := non_neg_integer(),
|
||||||
payload_template := emqx_plugin_libs_rule:tmpl_token(),
|
payload_template := emqx_plugin_libs_rule:tmpl_token(),
|
||||||
|
@ -61,9 +60,9 @@ is_buffer_supported() -> false.
|
||||||
|
|
||||||
callback_mode() -> async_if_possible.
|
callback_mode() -> async_if_possible.
|
||||||
|
|
||||||
-spec on_start(manager_id(), config()) -> {ok, state()} | {error, term()}.
|
-spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}.
|
||||||
on_start(
|
on_start(
|
||||||
InstanceId,
|
ResourceId,
|
||||||
#{
|
#{
|
||||||
connect_timeout := ConnectTimeout,
|
connect_timeout := ConnectTimeout,
|
||||||
max_retries := MaxRetries,
|
max_retries := MaxRetries,
|
||||||
|
@ -75,13 +74,13 @@ on_start(
|
||||||
) ->
|
) ->
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "starting_gcp_pubsub_bridge",
|
msg => "starting_gcp_pubsub_bridge",
|
||||||
connector => InstanceId,
|
connector => ResourceId,
|
||||||
config => Config
|
config => Config
|
||||||
}),
|
}),
|
||||||
%% emulating the emulator behavior
|
%% emulating the emulator behavior
|
||||||
%% https://cloud.google.com/pubsub/docs/emulator
|
%% https://cloud.google.com/pubsub/docs/emulator
|
||||||
HostPort = os:getenv("PUBSUB_EMULATOR_HOST", "pubsub.googleapis.com:443"),
|
HostPort = os:getenv("PUBSUB_EMULATOR_HOST", "pubsub.googleapis.com:443"),
|
||||||
{Host, Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}),
|
#{hostname := Host, port := Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}),
|
||||||
PoolType = random,
|
PoolType = random,
|
||||||
Transport = tls,
|
Transport = tls,
|
||||||
TransportOpts = emqx_tls_lib:to_client_opts(#{enable => true, verify => verify_none}),
|
TransportOpts = emqx_tls_lib:to_client_opts(#{enable => true, verify => verify_none}),
|
||||||
|
@ -100,14 +99,13 @@ on_start(
|
||||||
#{
|
#{
|
||||||
jwt_worker_id := JWTWorkerId,
|
jwt_worker_id := JWTWorkerId,
|
||||||
project_id := ProjectId
|
project_id := ProjectId
|
||||||
} = ensure_jwt_worker(InstanceId, Config),
|
} = ensure_jwt_worker(ResourceId, Config),
|
||||||
State = #{
|
State = #{
|
||||||
connect_timeout => ConnectTimeout,
|
connect_timeout => ConnectTimeout,
|
||||||
instance_id => InstanceId,
|
|
||||||
jwt_worker_id => JWTWorkerId,
|
jwt_worker_id => JWTWorkerId,
|
||||||
max_retries => MaxRetries,
|
max_retries => MaxRetries,
|
||||||
payload_template => emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate),
|
payload_template => emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate),
|
||||||
pool_name => InstanceId,
|
pool_name => ResourceId,
|
||||||
project_id => ProjectId,
|
project_id => ProjectId,
|
||||||
pubsub_topic => PubSubTopic,
|
pubsub_topic => PubSubTopic,
|
||||||
request_timeout => RequestTimeout
|
request_timeout => RequestTimeout
|
||||||
|
@ -115,39 +113,39 @@ on_start(
|
||||||
?tp(
|
?tp(
|
||||||
gcp_pubsub_on_start_before_starting_pool,
|
gcp_pubsub_on_start_before_starting_pool,
|
||||||
#{
|
#{
|
||||||
instance_id => InstanceId,
|
resource_id => ResourceId,
|
||||||
pool_name => InstanceId,
|
pool_name => ResourceId,
|
||||||
pool_opts => PoolOpts
|
pool_opts => PoolOpts
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => InstanceId}),
|
?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => ResourceId}),
|
||||||
case ehttpc_sup:start_pool(InstanceId, PoolOpts) of
|
case ehttpc_sup:start_pool(ResourceId, PoolOpts) of
|
||||||
{ok, _} ->
|
{ok, _} ->
|
||||||
{ok, State};
|
{ok, State};
|
||||||
{error, {already_started, _}} ->
|
{error, {already_started, _}} ->
|
||||||
?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => InstanceId}),
|
?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => ResourceId}),
|
||||||
{ok, State};
|
{ok, State};
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
?tp(gcp_pubsub_ehttpc_pool_start_failure, #{
|
?tp(gcp_pubsub_ehttpc_pool_start_failure, #{
|
||||||
pool_name => InstanceId,
|
pool_name => ResourceId,
|
||||||
reason => Reason
|
reason => Reason
|
||||||
}),
|
}),
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec on_stop(manager_id(), state()) -> ok | {error, term()}.
|
-spec on_stop(resource_id(), state()) -> ok | {error, term()}.
|
||||||
on_stop(
|
on_stop(
|
||||||
InstanceId,
|
ResourceId,
|
||||||
_State = #{jwt_worker_id := JWTWorkerId, pool_name := PoolName}
|
_State = #{jwt_worker_id := JWTWorkerId}
|
||||||
) ->
|
) ->
|
||||||
?tp(gcp_pubsub_stop, #{instance_id => InstanceId, jwt_worker_id => JWTWorkerId}),
|
?tp(gcp_pubsub_stop, #{resource_id => ResourceId, jwt_worker_id => JWTWorkerId}),
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "stopping_gcp_pubsub_bridge",
|
msg => "stopping_gcp_pubsub_bridge",
|
||||||
connector => InstanceId
|
connector => ResourceId
|
||||||
}),
|
}),
|
||||||
emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
|
emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
|
||||||
emqx_connector_jwt:delete_jwt(?JWT_TABLE, InstanceId),
|
emqx_connector_jwt:delete_jwt(?JWT_TABLE, ResourceId),
|
||||||
ehttpc_sup:stop_pool(PoolName).
|
ehttpc_sup:stop_pool(ResourceId).
|
||||||
|
|
||||||
-spec on_query(
|
-spec on_query(
|
||||||
resource_id(),
|
resource_id(),
|
||||||
|
@ -213,9 +211,9 @@ on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) ->
|
||||||
),
|
),
|
||||||
do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId).
|
do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId).
|
||||||
|
|
||||||
-spec on_get_status(manager_id(), state()) -> connected | disconnected.
|
-spec on_get_status(resource_id(), state()) -> connected | disconnected.
|
||||||
on_get_status(InstanceId, #{connect_timeout := Timeout, pool_name := PoolName} = State) ->
|
on_get_status(ResourceId, #{connect_timeout := Timeout} = State) ->
|
||||||
case do_get_status(InstanceId, PoolName, Timeout) of
|
case do_get_status(ResourceId, Timeout) of
|
||||||
true ->
|
true ->
|
||||||
connected;
|
connected;
|
||||||
false ->
|
false ->
|
||||||
|
@ -230,12 +228,12 @@ on_get_status(InstanceId, #{connect_timeout := Timeout, pool_name := PoolName} =
|
||||||
%% Helper fns
|
%% Helper fns
|
||||||
%%-------------------------------------------------------------------------------------------------
|
%%-------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
-spec ensure_jwt_worker(manager_id(), config()) ->
|
-spec ensure_jwt_worker(resource_id(), config()) ->
|
||||||
#{
|
#{
|
||||||
jwt_worker_id := jwt_worker(),
|
jwt_worker_id := jwt_worker(),
|
||||||
project_id := binary()
|
project_id := binary()
|
||||||
}.
|
}.
|
||||||
ensure_jwt_worker(InstanceId, #{
|
ensure_jwt_worker(ResourceId, #{
|
||||||
service_account_json := ServiceAccountJSON
|
service_account_json := ServiceAccountJSON
|
||||||
}) ->
|
}) ->
|
||||||
#{
|
#{
|
||||||
|
@ -250,7 +248,7 @@ ensure_jwt_worker(InstanceId, #{
|
||||||
Alg = <<"RS256">>,
|
Alg = <<"RS256">>,
|
||||||
Config = #{
|
Config = #{
|
||||||
private_key => PrivateKeyPEM,
|
private_key => PrivateKeyPEM,
|
||||||
resource_id => InstanceId,
|
resource_id => ResourceId,
|
||||||
expiration => ExpirationMS,
|
expiration => ExpirationMS,
|
||||||
table => ?JWT_TABLE,
|
table => ?JWT_TABLE,
|
||||||
iss => ServiceAccountEmail,
|
iss => ServiceAccountEmail,
|
||||||
|
@ -260,14 +258,14 @@ ensure_jwt_worker(InstanceId, #{
|
||||||
alg => Alg
|
alg => Alg
|
||||||
},
|
},
|
||||||
|
|
||||||
JWTWorkerId = <<"gcp_pubsub_jwt_worker:", InstanceId/binary>>,
|
JWTWorkerId = <<"gcp_pubsub_jwt_worker:", ResourceId/binary>>,
|
||||||
Worker =
|
Worker =
|
||||||
case emqx_connector_jwt_sup:ensure_worker_present(JWTWorkerId, Config) of
|
case emqx_connector_jwt_sup:ensure_worker_present(JWTWorkerId, Config) of
|
||||||
{ok, Worker0} ->
|
{ok, Worker0} ->
|
||||||
Worker0;
|
Worker0;
|
||||||
Error ->
|
Error ->
|
||||||
?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{
|
?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{
|
||||||
connector => InstanceId,
|
connector => ResourceId,
|
||||||
reason => Error
|
reason => Error
|
||||||
}),
|
}),
|
||||||
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
|
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
|
||||||
|
@ -281,18 +279,18 @@ ensure_jwt_worker(InstanceId, #{
|
||||||
%% produced by the worker.
|
%% produced by the worker.
|
||||||
receive
|
receive
|
||||||
{Ref, token_created} ->
|
{Ref, token_created} ->
|
||||||
?tp(gcp_pubsub_bridge_jwt_created, #{resource_id => InstanceId}),
|
?tp(gcp_pubsub_bridge_jwt_created, #{resource_id => ResourceId}),
|
||||||
demonitor(MRef, [flush]),
|
demonitor(MRef, [flush]),
|
||||||
ok;
|
ok;
|
||||||
{'DOWN', MRef, process, Worker, Reason} ->
|
{'DOWN', MRef, process, Worker, Reason} ->
|
||||||
?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{
|
?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{
|
||||||
connector => InstanceId,
|
connector => ResourceId,
|
||||||
reason => Reason
|
reason => Reason
|
||||||
}),
|
}),
|
||||||
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
|
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
|
||||||
throw(failed_to_start_jwt_worker)
|
throw(failed_to_start_jwt_worker)
|
||||||
after 10_000 ->
|
after 10_000 ->
|
||||||
?tp(warning, "gcp_pubsub_bridge_jwt_timeout", #{connector => InstanceId}),
|
?tp(warning, "gcp_pubsub_bridge_jwt_timeout", #{connector => ResourceId}),
|
||||||
demonitor(MRef, [flush]),
|
demonitor(MRef, [flush]),
|
||||||
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
|
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
|
||||||
throw(timeout_creating_jwt)
|
throw(timeout_creating_jwt)
|
||||||
|
@ -325,8 +323,8 @@ publish_path(
|
||||||
<<"/v1/projects/", ProjectId/binary, "/topics/", PubSubTopic/binary, ":publish">>.
|
<<"/v1/projects/", ProjectId/binary, "/topics/", PubSubTopic/binary, ":publish">>.
|
||||||
|
|
||||||
-spec get_jwt_authorization_header(resource_id()) -> [{binary(), binary()}].
|
-spec get_jwt_authorization_header(resource_id()) -> [{binary(), binary()}].
|
||||||
get_jwt_authorization_header(InstanceId) ->
|
get_jwt_authorization_header(ResourceId) ->
|
||||||
case emqx_connector_jwt:lookup_jwt(?JWT_TABLE, InstanceId) of
|
case emqx_connector_jwt:lookup_jwt(?JWT_TABLE, ResourceId) of
|
||||||
%% Since we synchronize the JWT creation during resource start
|
%% Since we synchronize the JWT creation during resource start
|
||||||
%% (see `on_start/2'), this will be always be populated.
|
%% (see `on_start/2'), this will be always be populated.
|
||||||
{ok, JWT} ->
|
{ok, JWT} ->
|
||||||
|
@ -345,7 +343,6 @@ get_jwt_authorization_header(InstanceId) ->
|
||||||
do_send_requests_sync(State, Requests, ResourceId) ->
|
do_send_requests_sync(State, Requests, ResourceId) ->
|
||||||
#{
|
#{
|
||||||
pool_name := PoolName,
|
pool_name := PoolName,
|
||||||
instance_id := InstanceId,
|
|
||||||
max_retries := MaxRetries,
|
max_retries := MaxRetries,
|
||||||
request_timeout := RequestTimeout
|
request_timeout := RequestTimeout
|
||||||
} = State,
|
} = State,
|
||||||
|
@ -353,12 +350,11 @@ do_send_requests_sync(State, Requests, ResourceId) ->
|
||||||
gcp_pubsub_bridge_do_send_requests,
|
gcp_pubsub_bridge_do_send_requests,
|
||||||
#{
|
#{
|
||||||
query_mode => sync,
|
query_mode => sync,
|
||||||
instance_id => InstanceId,
|
|
||||||
resource_id => ResourceId,
|
resource_id => ResourceId,
|
||||||
requests => Requests
|
requests => Requests
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
Headers = get_jwt_authorization_header(InstanceId),
|
Headers = get_jwt_authorization_header(ResourceId),
|
||||||
Payloads =
|
Payloads =
|
||||||
lists:map(
|
lists:map(
|
||||||
fun({send_message, Selected}) ->
|
fun({send_message, Selected}) ->
|
||||||
|
@ -471,19 +467,17 @@ do_send_requests_sync(State, Requests, ResourceId) ->
|
||||||
do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId) ->
|
do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId) ->
|
||||||
#{
|
#{
|
||||||
pool_name := PoolName,
|
pool_name := PoolName,
|
||||||
instance_id := InstanceId,
|
|
||||||
request_timeout := RequestTimeout
|
request_timeout := RequestTimeout
|
||||||
} = State,
|
} = State,
|
||||||
?tp(
|
?tp(
|
||||||
gcp_pubsub_bridge_do_send_requests,
|
gcp_pubsub_bridge_do_send_requests,
|
||||||
#{
|
#{
|
||||||
query_mode => async,
|
query_mode => async,
|
||||||
instance_id => InstanceId,
|
|
||||||
resource_id => ResourceId,
|
resource_id => ResourceId,
|
||||||
requests => Requests
|
requests => Requests
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
Headers = get_jwt_authorization_header(InstanceId),
|
Headers = get_jwt_authorization_header(ResourceId),
|
||||||
Payloads =
|
Payloads =
|
||||||
lists:map(
|
lists:map(
|
||||||
fun({send_message, Selected}) ->
|
fun({send_message, Selected}) ->
|
||||||
|
@ -541,9 +535,9 @@ reply_delegator(_ResourceId, ReplyFunAndArgs, Result) ->
|
||||||
emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result)
|
emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec do_get_status(manager_id(), binary(), timer:time()) -> boolean().
|
-spec do_get_status(resource_id(), timer:time()) -> boolean().
|
||||||
do_get_status(InstanceId, PoolName, Timeout) ->
|
do_get_status(ResourceId, Timeout) ->
|
||||||
Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(PoolName)],
|
Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(ResourceId)],
|
||||||
DoPerWorker =
|
DoPerWorker =
|
||||||
fun(Worker) ->
|
fun(Worker) ->
|
||||||
case ehttpc:health_check(Worker, Timeout) of
|
case ehttpc:health_check(Worker, Timeout) of
|
||||||
|
@ -552,7 +546,7 @@ do_get_status(InstanceId, PoolName, Timeout) ->
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "ehttpc_health_check_failed",
|
msg => "ehttpc_health_check_failed",
|
||||||
instance_id => InstanceId,
|
connector => ResourceId,
|
||||||
reason => Reason,
|
reason => Reason,
|
||||||
worker => Worker
|
worker => Worker
|
||||||
}),
|
}),
|
||||||
|
|
|
@ -9,6 +9,7 @@ The application is used to connect EMQX and HStreamDB.
|
||||||
User can create a rule and easily ingest IoT data into HStreamDB by leveraging
|
User can create a rule and easily ingest IoT data into HStreamDB by leveraging
|
||||||
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
|
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
|
||||||
|
|
||||||
|
<!---
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
|
|
||||||
|
@ -18,6 +19,7 @@ User can create a rule and easily ingest IoT data into HStreamDB by leveraging
|
||||||
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
|
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
|
||||||
for the EMQX rules engine introduction.
|
for the EMQX rules engine introduction.
|
||||||
|
|
||||||
|
--->
|
||||||
|
|
||||||
# HTTP APIs
|
# HTTP APIs
|
||||||
|
|
||||||
|
|
|
@ -10,10 +10,21 @@ workers from `emqx_resource`. It implements the connection management
|
||||||
and interaction without need for a separate connector app, since it's
|
and interaction without need for a separate connector app, since it's
|
||||||
not used by authentication and authorization applications.
|
not used by authentication and authorization applications.
|
||||||
|
|
||||||
## Contributing
|
# Documentation links
|
||||||
|
|
||||||
|
For more information on Apache Kafka, please see its [official
|
||||||
|
site](https://kafka.apache.org/).
|
||||||
|
|
||||||
|
# Configurations
|
||||||
|
|
||||||
|
Please see [our official
|
||||||
|
documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-kafka.html)
|
||||||
|
for more detailed info.
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
|
||||||
Please see our [contributing.md](../../CONTRIBUTING.md).
|
Please see our [contributing.md](../../CONTRIBUTING.md).
|
||||||
|
|
||||||
## License
|
# License
|
||||||
|
|
||||||
See [BSL](./BSL.txt).
|
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_kafka, [
|
{application, emqx_bridge_kafka, [
|
||||||
{description, "EMQX Enterprise Kafka Bridge"},
|
{description, "EMQX Enterprise Kafka Bridge"},
|
||||||
{vsn, "0.1.0"},
|
{vsn, "0.1.2"},
|
||||||
{registered, [emqx_bridge_kafka_consumer_sup]},
|
{registered, [emqx_bridge_kafka_consumer_sup]},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -114,8 +114,8 @@ callback_mode() ->
|
||||||
is_buffer_supported() ->
|
is_buffer_supported() ->
|
||||||
true.
|
true.
|
||||||
|
|
||||||
-spec on_start(manager_id(), config()) -> {ok, state()}.
|
-spec on_start(resource_id(), config()) -> {ok, state()}.
|
||||||
on_start(InstanceId, Config) ->
|
on_start(ResourceId, Config) ->
|
||||||
#{
|
#{
|
||||||
authentication := Auth,
|
authentication := Auth,
|
||||||
bootstrap_hosts := BootstrapHosts0,
|
bootstrap_hosts := BootstrapHosts0,
|
||||||
|
@ -133,7 +133,7 @@ on_start(InstanceId, Config) ->
|
||||||
BootstrapHosts = emqx_bridge_kafka_impl:hosts(BootstrapHosts0),
|
BootstrapHosts = emqx_bridge_kafka_impl:hosts(BootstrapHosts0),
|
||||||
KafkaType = kafka_consumer,
|
KafkaType = kafka_consumer,
|
||||||
%% Note: this is distinct per node.
|
%% Note: this is distinct per node.
|
||||||
ClientID = make_client_id(InstanceId, KafkaType, BridgeName),
|
ClientID = make_client_id(ResourceId, KafkaType, BridgeName),
|
||||||
ClientOpts0 =
|
ClientOpts0 =
|
||||||
case Auth of
|
case Auth of
|
||||||
none -> [];
|
none -> [];
|
||||||
|
@ -144,26 +144,26 @@ on_start(InstanceId, Config) ->
|
||||||
ok ->
|
ok ->
|
||||||
?tp(
|
?tp(
|
||||||
kafka_consumer_client_started,
|
kafka_consumer_client_started,
|
||||||
#{client_id => ClientID, instance_id => InstanceId}
|
#{client_id => ClientID, resource_id => ResourceId}
|
||||||
),
|
),
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "kafka_consumer_client_started",
|
msg => "kafka_consumer_client_started",
|
||||||
instance_id => InstanceId,
|
resource_id => ResourceId,
|
||||||
kafka_hosts => BootstrapHosts
|
kafka_hosts => BootstrapHosts
|
||||||
});
|
});
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "failed_to_start_kafka_consumer_client",
|
msg => "failed_to_start_kafka_consumer_client",
|
||||||
instance_id => InstanceId,
|
resource_id => ResourceId,
|
||||||
kafka_hosts => BootstrapHosts,
|
kafka_hosts => BootstrapHosts,
|
||||||
reason => emqx_utils:redact(Reason)
|
reason => emqx_utils:redact(Reason)
|
||||||
}),
|
}),
|
||||||
throw(?CLIENT_DOWN_MESSAGE)
|
throw(?CLIENT_DOWN_MESSAGE)
|
||||||
end,
|
end,
|
||||||
start_consumer(Config, InstanceId, ClientID).
|
start_consumer(Config, ResourceId, ClientID).
|
||||||
|
|
||||||
-spec on_stop(manager_id(), state()) -> ok.
|
-spec on_stop(resource_id(), state()) -> ok.
|
||||||
on_stop(_InstanceID, State) ->
|
on_stop(_ResourceID, State) ->
|
||||||
#{
|
#{
|
||||||
subscriber_id := SubscriberId,
|
subscriber_id := SubscriberId,
|
||||||
kafka_client_id := ClientID
|
kafka_client_id := ClientID
|
||||||
|
@ -172,14 +172,19 @@ on_stop(_InstanceID, State) ->
|
||||||
stop_client(ClientID),
|
stop_client(ClientID),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
-spec on_get_status(manager_id(), state()) -> connected | disconnected.
|
-spec on_get_status(resource_id(), state()) -> connected | disconnected.
|
||||||
on_get_status(_InstanceID, State) ->
|
on_get_status(_ResourceID, State) ->
|
||||||
#{
|
#{
|
||||||
subscriber_id := SubscriberId,
|
subscriber_id := SubscriberId,
|
||||||
kafka_client_id := ClientID,
|
kafka_client_id := ClientID,
|
||||||
kafka_topics := KafkaTopics
|
kafka_topics := KafkaTopics
|
||||||
} = State,
|
} = State,
|
||||||
do_get_status(State, ClientID, KafkaTopics, SubscriberId).
|
case do_get_status(ClientID, KafkaTopics, SubscriberId) of
|
||||||
|
{disconnected, Message} ->
|
||||||
|
{disconnected, State, Message};
|
||||||
|
Res ->
|
||||||
|
Res
|
||||||
|
end.
|
||||||
|
|
||||||
%%-------------------------------------------------------------------------------------
|
%%-------------------------------------------------------------------------------------
|
||||||
%% `brod_group_subscriber' API
|
%% `brod_group_subscriber' API
|
||||||
|
@ -266,8 +271,8 @@ ensure_consumer_supervisor_started() ->
|
||||||
ok
|
ok
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec start_consumer(config(), manager_id(), brod:client_id()) -> {ok, state()}.
|
-spec start_consumer(config(), resource_id(), brod:client_id()) -> {ok, state()}.
|
||||||
start_consumer(Config, InstanceId, ClientID) ->
|
start_consumer(Config, ResourceId, ClientID) ->
|
||||||
#{
|
#{
|
||||||
bootstrap_hosts := BootstrapHosts0,
|
bootstrap_hosts := BootstrapHosts0,
|
||||||
bridge_name := BridgeName,
|
bridge_name := BridgeName,
|
||||||
|
@ -287,7 +292,7 @@ start_consumer(Config, InstanceId, ClientID) ->
|
||||||
InitialState = #{
|
InitialState = #{
|
||||||
key_encoding_mode => KeyEncodingMode,
|
key_encoding_mode => KeyEncodingMode,
|
||||||
hookpoint => Hookpoint,
|
hookpoint => Hookpoint,
|
||||||
resource_id => emqx_bridge_resource:resource_id(kafka_consumer, BridgeName),
|
resource_id => ResourceId,
|
||||||
topic_mapping => TopicMapping,
|
topic_mapping => TopicMapping,
|
||||||
value_encoding_mode => ValueEncodingMode
|
value_encoding_mode => ValueEncodingMode
|
||||||
},
|
},
|
||||||
|
@ -332,7 +337,7 @@ start_consumer(Config, InstanceId, ClientID) ->
|
||||||
{ok, _ConsumerPid} ->
|
{ok, _ConsumerPid} ->
|
||||||
?tp(
|
?tp(
|
||||||
kafka_consumer_subscriber_started,
|
kafka_consumer_subscriber_started,
|
||||||
#{instance_id => InstanceId, subscriber_id => SubscriberId}
|
#{resource_id => ResourceId, subscriber_id => SubscriberId}
|
||||||
),
|
),
|
||||||
{ok, #{
|
{ok, #{
|
||||||
subscriber_id => SubscriberId,
|
subscriber_id => SubscriberId,
|
||||||
|
@ -342,7 +347,7 @@ start_consumer(Config, InstanceId, ClientID) ->
|
||||||
{error, Reason2} ->
|
{error, Reason2} ->
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "failed_to_start_kafka_consumer",
|
msg => "failed_to_start_kafka_consumer",
|
||||||
instance_id => InstanceId,
|
resource_id => ResourceId,
|
||||||
kafka_hosts => emqx_bridge_kafka_impl:hosts(BootstrapHosts0),
|
kafka_hosts => emqx_bridge_kafka_impl:hosts(BootstrapHosts0),
|
||||||
reason => emqx_utils:redact(Reason2)
|
reason => emqx_utils:redact(Reason2)
|
||||||
}),
|
}),
|
||||||
|
@ -376,41 +381,41 @@ stop_client(ClientID) ->
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
do_get_status(State, ClientID, [KafkaTopic | RestTopics], SubscriberId) ->
|
do_get_status(ClientID, [KafkaTopic | RestTopics], SubscriberId) ->
|
||||||
case brod:get_partitions_count(ClientID, KafkaTopic) of
|
case brod:get_partitions_count(ClientID, KafkaTopic) of
|
||||||
{ok, NPartitions} ->
|
{ok, NPartitions} ->
|
||||||
case do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) of
|
case do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) of
|
||||||
connected -> do_get_status(State, ClientID, RestTopics, SubscriberId);
|
connected -> do_get_status(ClientID, RestTopics, SubscriberId);
|
||||||
disconnected -> disconnected
|
disconnected -> disconnected
|
||||||
end;
|
end;
|
||||||
{error, {client_down, Context}} ->
|
{error, {client_down, Context}} ->
|
||||||
case infer_client_error(Context) of
|
case infer_client_error(Context) of
|
||||||
auth_error ->
|
auth_error ->
|
||||||
Message = "Authentication error. " ++ ?CLIENT_DOWN_MESSAGE,
|
Message = "Authentication error. " ++ ?CLIENT_DOWN_MESSAGE,
|
||||||
{disconnected, State, Message};
|
{disconnected, Message};
|
||||||
{auth_error, Message0} ->
|
{auth_error, Message0} ->
|
||||||
Message = binary_to_list(Message0) ++ "; " ++ ?CLIENT_DOWN_MESSAGE,
|
Message = binary_to_list(Message0) ++ "; " ++ ?CLIENT_DOWN_MESSAGE,
|
||||||
{disconnected, State, Message};
|
{disconnected, Message};
|
||||||
connection_refused ->
|
connection_refused ->
|
||||||
Message = "Connection refused. " ++ ?CLIENT_DOWN_MESSAGE,
|
Message = "Connection refused. " ++ ?CLIENT_DOWN_MESSAGE,
|
||||||
{disconnected, State, Message};
|
{disconnected, Message};
|
||||||
_ ->
|
_ ->
|
||||||
{disconnected, State, ?CLIENT_DOWN_MESSAGE}
|
{disconnected, ?CLIENT_DOWN_MESSAGE}
|
||||||
end;
|
end;
|
||||||
{error, leader_not_available} ->
|
{error, leader_not_available} ->
|
||||||
Message =
|
Message =
|
||||||
"Leader connection not available. Please check the Kafka topic used,"
|
"Leader connection not available. Please check the Kafka topic used,"
|
||||||
" the connection parameters and Kafka cluster health",
|
" the connection parameters and Kafka cluster health",
|
||||||
{disconnected, State, Message};
|
{disconnected, Message};
|
||||||
_ ->
|
_ ->
|
||||||
disconnected
|
disconnected
|
||||||
end;
|
end;
|
||||||
do_get_status(_State, _ClientID, _KafkaTopics = [], _SubscriberId) ->
|
do_get_status(_ClientID, _KafkaTopics = [], _SubscriberId) ->
|
||||||
connected.
|
connected.
|
||||||
|
|
||||||
-spec do_get_status1(brod:client_id(), binary(), subscriber_id(), pos_integer()) ->
|
-spec do_get_topic_status(brod:client_id(), binary(), subscriber_id(), pos_integer()) ->
|
||||||
connected | disconnected.
|
connected | disconnected.
|
||||||
do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) ->
|
do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) ->
|
||||||
Results =
|
Results =
|
||||||
lists:map(
|
lists:map(
|
||||||
fun(N) ->
|
fun(N) ->
|
||||||
|
@ -466,19 +471,19 @@ consumer_group_id(BridgeName0) ->
|
||||||
BridgeName = to_bin(BridgeName0),
|
BridgeName = to_bin(BridgeName0),
|
||||||
<<"emqx-kafka-consumer-", BridgeName/binary>>.
|
<<"emqx-kafka-consumer-", BridgeName/binary>>.
|
||||||
|
|
||||||
-spec is_dry_run(manager_id()) -> boolean().
|
-spec is_dry_run(resource_id()) -> boolean().
|
||||||
is_dry_run(InstanceId) ->
|
is_dry_run(ResourceId) ->
|
||||||
TestIdStart = string:find(InstanceId, ?TEST_ID_PREFIX),
|
TestIdStart = string:find(ResourceId, ?TEST_ID_PREFIX),
|
||||||
case TestIdStart of
|
case TestIdStart of
|
||||||
nomatch ->
|
nomatch ->
|
||||||
false;
|
false;
|
||||||
_ ->
|
_ ->
|
||||||
string:equal(TestIdStart, InstanceId)
|
string:equal(TestIdStart, ResourceId)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec make_client_id(manager_id(), kafka_consumer, atom() | binary()) -> atom().
|
-spec make_client_id(resource_id(), kafka_consumer, atom() | binary()) -> atom().
|
||||||
make_client_id(InstanceId, KafkaType, KafkaName) ->
|
make_client_id(ResourceId, KafkaType, KafkaName) ->
|
||||||
case is_dry_run(InstanceId) of
|
case is_dry_run(ResourceId) of
|
||||||
false ->
|
false ->
|
||||||
ClientID0 = emqx_bridge_kafka_impl:make_client_id(KafkaType, KafkaName),
|
ClientID0 = emqx_bridge_kafka_impl:make_client_id(KafkaType, KafkaName),
|
||||||
binary_to_atom(ClientID0);
|
binary_to_atom(ClientID0);
|
||||||
|
|
|
@ -1156,11 +1156,12 @@ t_start_and_consume_ok(Config) ->
|
||||||
),
|
),
|
||||||
|
|
||||||
%% Check that the bridge probe API doesn't leak atoms.
|
%% Check that the bridge probe API doesn't leak atoms.
|
||||||
ProbeRes = probe_bridge_api(Config),
|
ProbeRes0 = probe_bridge_api(Config),
|
||||||
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes),
|
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
|
||||||
AtomsBefore = erlang:system_info(atom_count),
|
AtomsBefore = erlang:system_info(atom_count),
|
||||||
%% Probe again; shouldn't have created more atoms.
|
%% Probe again; shouldn't have created more atoms.
|
||||||
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes),
|
ProbeRes1 = probe_bridge_api(Config),
|
||||||
|
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
|
||||||
AtomsAfter = erlang:system_info(atom_count),
|
AtomsAfter = erlang:system_info(atom_count),
|
||||||
?assertEqual(AtomsBefore, AtomsAfter),
|
?assertEqual(AtomsBefore, AtomsAfter),
|
||||||
|
|
||||||
|
@ -1259,11 +1260,12 @@ t_multiple_topic_mappings(Config) ->
|
||||||
{ok, _} = snabbkaffe:receive_events(SRef0),
|
{ok, _} = snabbkaffe:receive_events(SRef0),
|
||||||
|
|
||||||
%% Check that the bridge probe API doesn't leak atoms.
|
%% Check that the bridge probe API doesn't leak atoms.
|
||||||
ProbeRes = probe_bridge_api(Config),
|
ProbeRes0 = probe_bridge_api(Config),
|
||||||
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes),
|
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
|
||||||
AtomsBefore = erlang:system_info(atom_count),
|
AtomsBefore = erlang:system_info(atom_count),
|
||||||
%% Probe again; shouldn't have created more atoms.
|
%% Probe again; shouldn't have created more atoms.
|
||||||
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes),
|
ProbeRes1 = probe_bridge_api(Config),
|
||||||
|
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
|
||||||
AtomsAfter = erlang:system_info(atom_count),
|
AtomsAfter = erlang:system_info(atom_count),
|
||||||
?assertEqual(AtomsBefore, AtomsAfter),
|
?assertEqual(AtomsBefore, AtomsAfter),
|
||||||
|
|
||||||
|
@ -1473,7 +1475,10 @@ do_t_receive_after_recovery(Config) ->
|
||||||
ResourceId = resource_id(Config),
|
ResourceId = resource_id(Config),
|
||||||
?check_trace(
|
?check_trace(
|
||||||
begin
|
begin
|
||||||
{ok, _} = create_bridge(Config),
|
{ok, _} = create_bridge(
|
||||||
|
Config,
|
||||||
|
#{<<"kafka">> => #{<<"offset_reset_policy">> => <<"earliest">>}}
|
||||||
|
),
|
||||||
ping_until_healthy(Config, _Period = 1_500, _Timeout0 = 24_000),
|
ping_until_healthy(Config, _Period = 1_500, _Timeout0 = 24_000),
|
||||||
{ok, connected} = emqx_resource_manager:health_check(ResourceId),
|
{ok, connected} = emqx_resource_manager:health_check(ResourceId),
|
||||||
%% 0) ensure each partition commits its offset so it can
|
%% 0) ensure each partition commits its offset so it can
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
# EMQX MatrixDB Bridge
|
# EMQX MatrixDB Bridge
|
||||||
|
|
||||||
[MatrixDB](http://matrixdb.univ-lyon1.fr/) is a biological database focused on
|
[YMatrix](https://www.ymatrix.cn/) is a hyper-converged database product developed by YMatrix based on the PostgreSQL / Greenplum classic open source database. In addition to being able to handle time series scenarios with ease, it also supports classic scenarios such as online transaction processing (OLTP) and online analytical processing (OLAP).
|
||||||
molecular interactions between extracellular proteins and polysaccharides.
|
|
||||||
|
|
||||||
The application is used to connect EMQX and MatrixDB.
|
The application is used to connect EMQX and MatrixDB.
|
||||||
User can create a rule and easily ingest IoT data into MatrixDB by leveraging
|
User can create a rule and easily ingest IoT data into MatrixDB by leveraging
|
||||||
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
|
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
|
||||||
|
|
||||||
|
<!---
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ User can create a rule and easily ingest IoT data into MatrixDB by leveraging
|
||||||
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
|
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
|
||||||
for the EMQX rules engine introduction.
|
for the EMQX rules engine introduction.
|
||||||
|
|
||||||
|
--->
|
||||||
|
|
||||||
# HTTP APIs
|
# HTTP APIs
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# EMQX MySQL Bridge
|
# EMQX MySQL Bridge
|
||||||
|
|
||||||
[MySQL](https://github.com/MySQL/MySQL) is a popular open-source relational database
|
[MySQL](https://github.com/mysql/mysql-server) is a popular open-source relational database
|
||||||
management system.
|
management system.
|
||||||
|
|
||||||
The application is used to connect EMQX and MySQL.
|
The application is used to connect EMQX and MySQL.
|
||||||
|
|
|
@ -0,0 +1,94 @@
|
||||||
|
Business Source License 1.1
|
||||||
|
|
||||||
|
Licensor: Hangzhou EMQ Technologies Co., Ltd.
|
||||||
|
Licensed Work: EMQX Enterprise Edition
|
||||||
|
The Licensed Work is (c) 2023
|
||||||
|
Hangzhou EMQ Technologies Co., Ltd.
|
||||||
|
Additional Use Grant: Students and educators are granted right to copy,
|
||||||
|
modify, and create derivative work for research
|
||||||
|
or education.
|
||||||
|
Change Date: 2027-02-01
|
||||||
|
Change License: Apache License, Version 2.0
|
||||||
|
|
||||||
|
For information about alternative licensing arrangements for the Software,
|
||||||
|
please contact Licensor: https://www.emqx.com/en/contact
|
||||||
|
|
||||||
|
Notice
|
||||||
|
|
||||||
|
The Business Source License (this document, or the “License”) is not an Open
|
||||||
|
Source license. However, the Licensed Work will eventually be made available
|
||||||
|
under an Open Source License, as stated in this License.
|
||||||
|
|
||||||
|
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
|
||||||
|
“Business Source License” is a trademark of MariaDB Corporation Ab.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
Business Source License 1.1
|
||||||
|
|
||||||
|
Terms
|
||||||
|
|
||||||
|
The Licensor hereby grants you the right to copy, modify, create derivative
|
||||||
|
works, redistribute, and make non-production use of the Licensed Work. The
|
||||||
|
Licensor may make an Additional Use Grant, above, permitting limited
|
||||||
|
production use.
|
||||||
|
|
||||||
|
Effective on the Change Date, or the fourth anniversary of the first publicly
|
||||||
|
available distribution of a specific version of the Licensed Work under this
|
||||||
|
License, whichever comes first, the Licensor hereby grants you rights under
|
||||||
|
the terms of the Change License, and the rights granted in the paragraph
|
||||||
|
above terminate.
|
||||||
|
|
||||||
|
If your use of the Licensed Work does not comply with the requirements
|
||||||
|
currently in effect as described in this License, you must purchase a
|
||||||
|
commercial license from the Licensor, its affiliated entities, or authorized
|
||||||
|
resellers, or you must refrain from using the Licensed Work.
|
||||||
|
|
||||||
|
All copies of the original and modified Licensed Work, and derivative works
|
||||||
|
of the Licensed Work, are subject to this License. This License applies
|
||||||
|
separately for each version of the Licensed Work and the Change Date may vary
|
||||||
|
for each version of the Licensed Work released by Licensor.
|
||||||
|
|
||||||
|
You must conspicuously display this License on each original or modified copy
|
||||||
|
of the Licensed Work. If you receive the Licensed Work in original or
|
||||||
|
modified form from a third party, the terms and conditions set forth in this
|
||||||
|
License apply to your use of that work.
|
||||||
|
|
||||||
|
Any use of the Licensed Work in violation of this License will automatically
|
||||||
|
terminate your rights under this License for the current and all other
|
||||||
|
versions of the Licensed Work.
|
||||||
|
|
||||||
|
This License does not grant you any right in any trademark or logo of
|
||||||
|
Licensor or its affiliates (provided that you may use a trademark or logo of
|
||||||
|
Licensor as expressly required by this License).
|
||||||
|
|
||||||
|
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
|
||||||
|
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
|
||||||
|
TITLE.
|
||||||
|
|
||||||
|
MariaDB hereby grants you permission to use this License’s text to license
|
||||||
|
your works, and to refer to it using the trademark “Business Source License”,
|
||||||
|
as long as you comply with the Covenants of Licensor below.
|
||||||
|
|
||||||
|
Covenants of Licensor
|
||||||
|
|
||||||
|
In consideration of the right to use this License’s text and the “Business
|
||||||
|
Source License” name and trademark, Licensor covenants to MariaDB, and to all
|
||||||
|
other recipients of the licensed work to be provided by Licensor:
|
||||||
|
|
||||||
|
1. To specify as the Change License the GPL Version 2.0 or any later version,
|
||||||
|
or a license that is compatible with GPL Version 2.0 or a later version,
|
||||||
|
where “compatible” means that software provided under the Change License can
|
||||||
|
be included in a program with software provided under GPL Version 2.0 or a
|
||||||
|
later version. Licensor may specify additional Change Licenses without
|
||||||
|
limitation.
|
||||||
|
|
||||||
|
2. To either: (a) specify an additional grant of rights to use that does not
|
||||||
|
impose any additional restriction on the right granted in this License, as
|
||||||
|
the Additional Use Grant; or (b) insert the text “None”.
|
||||||
|
|
||||||
|
3. To specify a Change Date.
|
||||||
|
|
||||||
|
4. Not to modify this License in any other way.
|
|
@ -0,0 +1,36 @@
|
||||||
|
# EMQX OpenTSDB Bridge
|
||||||
|
|
||||||
|
[OpenTSDB](http://opentsdb.net) is a distributed, scalable Time Series Database (TSDB) written on top of HBase.
|
||||||
|
|
||||||
|
OpenTSDB was written to address a common need: store, index and serve metrics collected from computer systems (network gear, operating systems, applications) at a large scale, and make this data easily accessible and graphable.
|
||||||
|
|
||||||
|
OpenTSDB allows you to collect thousands of metrics from tens of thousands of hosts and applications, at a high rate (every few seconds).
|
||||||
|
|
||||||
|
OpenTSDB will never delete or downsample data and can easily store hundreds of billions of data points.
|
||||||
|
|
||||||
|
The application is used to connect EMQX and OpenTSDB. User can create a rule and easily ingest IoT data into OpenTSDB by leveraging the
|
||||||
|
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
|
||||||
|
|
||||||
|
|
||||||
|
# Documentation
|
||||||
|
|
||||||
|
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
|
||||||
|
for the EMQX rules engine introduction.
|
||||||
|
|
||||||
|
|
||||||
|
# HTTP APIs
|
||||||
|
|
||||||
|
- Several APIs are provided for bridge management, which includes create bridge,
|
||||||
|
update bridge, get bridge, stop or restart bridge and list bridges etc.
|
||||||
|
|
||||||
|
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information.
|
||||||
|
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
Please see our [contributing.md](../../CONTRIBUTING.md).
|
||||||
|
|
||||||
|
|
||||||
|
# License
|
||||||
|
|
||||||
|
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).
|
|
@ -0,0 +1,2 @@
|
||||||
|
toxiproxy
|
||||||
|
opents
|
|
@ -0,0 +1,8 @@
|
||||||
|
{erl_opts, [debug_info]}.
|
||||||
|
|
||||||
|
{deps, [
|
||||||
|
{opentsdb, {git, "https://github.com/emqx/opentsdb-client-erl", {tag, "v0.5.1"}}},
|
||||||
|
{emqx_connector, {path, "../../apps/emqx_connector"}},
|
||||||
|
{emqx_resource, {path, "../../apps/emqx_resource"}},
|
||||||
|
{emqx_bridge, {path, "../../apps/emqx_bridge"}}
|
||||||
|
]}.
|
|
@ -0,0 +1,15 @@
|
||||||
|
{application, emqx_bridge_opents, [
|
||||||
|
{description, "EMQX Enterprise OpenTSDB Bridge"},
|
||||||
|
{vsn, "0.1.0"},
|
||||||
|
{registered, []},
|
||||||
|
{applications, [
|
||||||
|
kernel,
|
||||||
|
stdlib,
|
||||||
|
opentsdb
|
||||||
|
]},
|
||||||
|
{env, []},
|
||||||
|
{modules, []},
|
||||||
|
|
||||||
|
{licenses, ["BSL"]},
|
||||||
|
{links, []}
|
||||||
|
]}.
|
|
@ -0,0 +1,85 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_bridge_opents).
|
||||||
|
|
||||||
|
-include_lib("typerefl/include/types.hrl").
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
-include_lib("emqx_resource/include/emqx_resource.hrl").
|
||||||
|
|
||||||
|
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
conn_bridge_examples/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
namespace/0,
|
||||||
|
roots/0,
|
||||||
|
fields/1,
|
||||||
|
desc/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------------------------------------
|
||||||
|
%% api
|
||||||
|
conn_bridge_examples(Method) ->
|
||||||
|
[
|
||||||
|
#{
|
||||||
|
<<"opents">> => #{
|
||||||
|
summary => <<"OpenTSDB Bridge">>,
|
||||||
|
value => values(Method)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
].
|
||||||
|
|
||||||
|
values(_Method) ->
|
||||||
|
#{
|
||||||
|
enable => true,
|
||||||
|
type => opents,
|
||||||
|
name => <<"foo">>,
|
||||||
|
server => <<"http://127.0.0.1:4242">>,
|
||||||
|
pool_size => 8,
|
||||||
|
resource_opts => #{
|
||||||
|
worker_pool_size => 1,
|
||||||
|
health_check_interval => ?HEALTHCHECK_INTERVAL_RAW,
|
||||||
|
auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW,
|
||||||
|
batch_size => ?DEFAULT_BATCH_SIZE,
|
||||||
|
batch_time => ?DEFAULT_BATCH_TIME,
|
||||||
|
query_mode => async,
|
||||||
|
max_buffer_bytes => ?DEFAULT_BUFFER_BYTES
|
||||||
|
}
|
||||||
|
}.
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------------------------------------
|
||||||
|
%% Hocon Schema Definitions
|
||||||
|
namespace() -> "bridge_opents".
|
||||||
|
|
||||||
|
roots() -> [].
|
||||||
|
|
||||||
|
fields("config") ->
|
||||||
|
[
|
||||||
|
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}
|
||||||
|
] ++ emqx_resource_schema:fields("resource_opts") ++
|
||||||
|
emqx_bridge_opents_connector:fields(config);
|
||||||
|
fields("post") ->
|
||||||
|
[type_field(), name_field() | fields("config")];
|
||||||
|
fields("put") ->
|
||||||
|
fields("config");
|
||||||
|
fields("get") ->
|
||||||
|
emqx_bridge_schema:status_fields() ++ fields("post").
|
||||||
|
|
||||||
|
desc("config") ->
|
||||||
|
?DESC("desc_config");
|
||||||
|
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
|
||||||
|
["Configuration for OpenTSDB using `", string:to_upper(Method), "` method."];
|
||||||
|
desc(_) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------------------------------------
|
||||||
|
%% internal
|
||||||
|
|
||||||
|
type_field() ->
|
||||||
|
{type, mk(enum([opents]), #{required => true, desc => ?DESC("desc_type")})}.
|
||||||
|
|
||||||
|
name_field() ->
|
||||||
|
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.
|
|
@ -0,0 +1,184 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_bridge_opents_connector).
|
||||||
|
|
||||||
|
-behaviour(emqx_resource).
|
||||||
|
|
||||||
|
-include_lib("emqx_resource/include/emqx_resource.hrl").
|
||||||
|
-include_lib("typerefl/include/types.hrl").
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
|
||||||
|
-export([roots/0, fields/1]).
|
||||||
|
|
||||||
|
%% `emqx_resource' API
|
||||||
|
-export([
|
||||||
|
callback_mode/0,
|
||||||
|
is_buffer_supported/0,
|
||||||
|
on_start/2,
|
||||||
|
on_stop/2,
|
||||||
|
on_query/3,
|
||||||
|
on_batch_query/3,
|
||||||
|
on_get_status/2
|
||||||
|
]).
|
||||||
|
|
||||||
|
-export([connect/1]).
|
||||||
|
|
||||||
|
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||||
|
|
||||||
|
%%=====================================================================
|
||||||
|
%% Hocon schema
|
||||||
|
roots() ->
|
||||||
|
[{config, #{type => hoconsc:ref(?MODULE, config)}}].
|
||||||
|
|
||||||
|
fields(config) ->
|
||||||
|
[
|
||||||
|
{server, mk(binary(), #{required => true, desc => ?DESC("server")})},
|
||||||
|
{pool_size, fun emqx_connector_schema_lib:pool_size/1},
|
||||||
|
{summary, mk(boolean(), #{default => true, desc => ?DESC("summary")})},
|
||||||
|
{details, mk(boolean(), #{default => false, desc => ?DESC("details")})},
|
||||||
|
{auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1}
|
||||||
|
].
|
||||||
|
|
||||||
|
%%========================================================================================
|
||||||
|
%% `emqx_resource' API
|
||||||
|
%%========================================================================================
|
||||||
|
|
||||||
|
callback_mode() -> always_sync.
|
||||||
|
|
||||||
|
is_buffer_supported() -> false.
|
||||||
|
|
||||||
|
on_start(
|
||||||
|
InstanceId,
|
||||||
|
#{
|
||||||
|
server := Server,
|
||||||
|
pool_size := PoolSize,
|
||||||
|
summary := Summary,
|
||||||
|
details := Details,
|
||||||
|
resource_opts := #{batch_size := BatchSize}
|
||||||
|
} = Config
|
||||||
|
) ->
|
||||||
|
?SLOG(info, #{
|
||||||
|
msg => "starting_opents_connector",
|
||||||
|
connector => InstanceId,
|
||||||
|
config => emqx_utils:redact(Config)
|
||||||
|
}),
|
||||||
|
|
||||||
|
Options = [
|
||||||
|
{server, to_str(Server)},
|
||||||
|
{summary, Summary},
|
||||||
|
{details, Details},
|
||||||
|
{max_batch_size, BatchSize},
|
||||||
|
{pool_size, PoolSize}
|
||||||
|
],
|
||||||
|
|
||||||
|
State = #{pool_name => InstanceId, server => Server},
|
||||||
|
case opentsdb_connectivity(Server) of
|
||||||
|
ok ->
|
||||||
|
case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of
|
||||||
|
ok ->
|
||||||
|
{ok, State};
|
||||||
|
Error ->
|
||||||
|
Error
|
||||||
|
end;
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
?SLOG(error, #{msg => "Initiate resource failed", reason => Reason}),
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
on_stop(InstanceId, #{pool_name := PoolName} = _State) ->
|
||||||
|
?SLOG(info, #{
|
||||||
|
msg => "stopping_opents_connector",
|
||||||
|
connector => InstanceId
|
||||||
|
}),
|
||||||
|
emqx_resource_pool:stop(PoolName).
|
||||||
|
|
||||||
|
on_query(InstanceId, Request, State) ->
|
||||||
|
on_batch_query(InstanceId, [Request], State).
|
||||||
|
|
||||||
|
on_batch_query(
|
||||||
|
InstanceId,
|
||||||
|
BatchReq,
|
||||||
|
State
|
||||||
|
) ->
|
||||||
|
Datas = [format_opentsdb_msg(Msg) || {_Key, Msg} <- BatchReq],
|
||||||
|
do_query(InstanceId, Datas, State).
|
||||||
|
|
||||||
|
on_get_status(_InstanceId, #{server := Server}) ->
|
||||||
|
Result =
|
||||||
|
case opentsdb_connectivity(Server) of
|
||||||
|
ok ->
|
||||||
|
connected;
|
||||||
|
{error, Reason} ->
|
||||||
|
?SLOG(error, #{msg => "OpenTSDB lost connection", reason => Reason}),
|
||||||
|
connecting
|
||||||
|
end,
|
||||||
|
Result.
|
||||||
|
|
||||||
|
%%========================================================================================
|
||||||
|
%% Helper fns
|
||||||
|
%%========================================================================================
|
||||||
|
|
||||||
|
do_query(InstanceId, Query, #{pool_name := PoolName} = State) ->
|
||||||
|
?TRACE(
|
||||||
|
"QUERY",
|
||||||
|
"opents_connector_received",
|
||||||
|
#{connector => InstanceId, query => Query, state => State}
|
||||||
|
),
|
||||||
|
Result = ecpool:pick_and_do(PoolName, {opentsdb, put, [Query]}, no_handover),
|
||||||
|
|
||||||
|
case Result of
|
||||||
|
{error, Reason} ->
|
||||||
|
?tp(
|
||||||
|
opents_connector_query_return,
|
||||||
|
#{error => Reason}
|
||||||
|
),
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "opents_connector_do_query_failed",
|
||||||
|
connector => InstanceId,
|
||||||
|
query => Query,
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
Result;
|
||||||
|
_ ->
|
||||||
|
?tp(
|
||||||
|
opents_connector_query_return,
|
||||||
|
#{result => Result}
|
||||||
|
),
|
||||||
|
Result
|
||||||
|
end.
|
||||||
|
|
||||||
|
connect(Opts) ->
|
||||||
|
opentsdb:start_link(Opts).
|
||||||
|
|
||||||
|
to_str(List) when is_list(List) ->
|
||||||
|
List;
|
||||||
|
to_str(Bin) when is_binary(Bin) ->
|
||||||
|
erlang:binary_to_list(Bin).
|
||||||
|
|
||||||
|
opentsdb_connectivity(Server) ->
|
||||||
|
SvrUrl =
|
||||||
|
case Server of
|
||||||
|
<<"http://", _/binary>> -> Server;
|
||||||
|
<<"https://", _/binary>> -> Server;
|
||||||
|
_ -> "http://" ++ Server
|
||||||
|
end,
|
||||||
|
emqx_plugin_libs_rule:http_connectivity(SvrUrl).
|
||||||
|
|
||||||
|
format_opentsdb_msg(Msg) ->
|
||||||
|
maps:with(
|
||||||
|
[
|
||||||
|
timestamp,
|
||||||
|
metric,
|
||||||
|
tags,
|
||||||
|
value,
|
||||||
|
<<"timestamp">>,
|
||||||
|
<<"metric">>,
|
||||||
|
<<"tags">>,
|
||||||
|
<<"value">>
|
||||||
|
],
|
||||||
|
Msg
|
||||||
|
).
|
|
@ -0,0 +1,363 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_bridge_opents_SUITE).
|
||||||
|
|
||||||
|
-compile(nowarn_export_all).
|
||||||
|
-compile(export_all).
|
||||||
|
|
||||||
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
|
% DB defaults
|
||||||
|
-define(BATCH_SIZE, 10).
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% CT boilerplate
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
all() ->
|
||||||
|
[
|
||||||
|
{group, with_batch},
|
||||||
|
{group, without_batch}
|
||||||
|
].
|
||||||
|
|
||||||
|
groups() ->
|
||||||
|
TCs = emqx_common_test_helpers:all(?MODULE),
|
||||||
|
[
|
||||||
|
{with_batch, TCs},
|
||||||
|
{without_batch, TCs}
|
||||||
|
].
|
||||||
|
|
||||||
|
init_per_group(with_batch, Config0) ->
|
||||||
|
Config = [{batch_size, ?BATCH_SIZE} | Config0],
|
||||||
|
common_init(Config);
|
||||||
|
init_per_group(without_batch, Config0) ->
|
||||||
|
Config = [{batch_size, 1} | Config0],
|
||||||
|
common_init(Config);
|
||||||
|
init_per_group(_Group, Config) ->
|
||||||
|
Config.
|
||||||
|
|
||||||
|
end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch ->
|
||||||
|
ProxyHost = ?config(proxy_host, Config),
|
||||||
|
ProxyPort = ?config(proxy_port, Config),
|
||||||
|
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||||
|
ok;
|
||||||
|
end_per_group(_Group, _Config) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
init_per_suite(Config) ->
|
||||||
|
Config.
|
||||||
|
|
||||||
|
end_per_suite(_Config) ->
|
||||||
|
emqx_mgmt_api_test_util:end_suite(),
|
||||||
|
ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
init_per_testcase(_Testcase, Config) ->
|
||||||
|
delete_bridge(Config),
|
||||||
|
snabbkaffe:start_trace(),
|
||||||
|
Config.
|
||||||
|
|
||||||
|
end_per_testcase(_Testcase, Config) ->
|
||||||
|
ProxyHost = ?config(proxy_host, Config),
|
||||||
|
ProxyPort = ?config(proxy_port, Config),
|
||||||
|
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||||
|
ok = snabbkaffe:stop(),
|
||||||
|
delete_bridge(Config),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Helper fns
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
common_init(ConfigT) ->
|
||||||
|
Host = os:getenv("OPENTS_HOST", "toxiproxy"),
|
||||||
|
Port = list_to_integer(os:getenv("OPENTS_PORT", "4242")),
|
||||||
|
|
||||||
|
Config0 = [
|
||||||
|
{opents_host, Host},
|
||||||
|
{opents_port, Port},
|
||||||
|
{proxy_name, "opents"}
|
||||||
|
| ConfigT
|
||||||
|
],
|
||||||
|
|
||||||
|
BridgeType = proplists:get_value(bridge_type, Config0, <<"opents">>),
|
||||||
|
case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of
|
||||||
|
true ->
|
||||||
|
% Setup toxiproxy
|
||||||
|
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
|
||||||
|
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
|
||||||
|
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||||
|
% Ensure EE bridge module is loaded
|
||||||
|
_ = application:load(emqx_ee_bridge),
|
||||||
|
_ = emqx_ee_bridge:module_info(),
|
||||||
|
ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]),
|
||||||
|
emqx_mgmt_api_test_util:init_suite(),
|
||||||
|
{Name, OpenTSConf} = opents_config(BridgeType, Config0),
|
||||||
|
Config =
|
||||||
|
[
|
||||||
|
{opents_config, OpenTSConf},
|
||||||
|
{opents_bridge_type, BridgeType},
|
||||||
|
{opents_name, Name},
|
||||||
|
{proxy_host, ProxyHost},
|
||||||
|
{proxy_port, ProxyPort}
|
||||||
|
| Config0
|
||||||
|
],
|
||||||
|
Config;
|
||||||
|
false ->
|
||||||
|
case os:getenv("IS_CI") of
|
||||||
|
"yes" ->
|
||||||
|
throw(no_opents);
|
||||||
|
_ ->
|
||||||
|
{skip, no_opents}
|
||||||
|
end
|
||||||
|
end.
|
||||||
|
|
||||||
|
opents_config(BridgeType, Config) ->
|
||||||
|
Port = integer_to_list(?config(opents_port, Config)),
|
||||||
|
Server = "http://" ++ ?config(opents_host, Config) ++ ":" ++ Port,
|
||||||
|
Name = atom_to_binary(?MODULE),
|
||||||
|
BatchSize = ?config(batch_size, Config),
|
||||||
|
ConfigString =
|
||||||
|
io_lib:format(
|
||||||
|
"bridges.~s.~s {\n"
|
||||||
|
" enable = true\n"
|
||||||
|
" server = ~p\n"
|
||||||
|
" resource_opts = {\n"
|
||||||
|
" request_timeout = 500ms\n"
|
||||||
|
" batch_size = ~b\n"
|
||||||
|
" query_mode = sync\n"
|
||||||
|
" }\n"
|
||||||
|
"}",
|
||||||
|
[
|
||||||
|
BridgeType,
|
||||||
|
Name,
|
||||||
|
Server,
|
||||||
|
BatchSize
|
||||||
|
]
|
||||||
|
),
|
||||||
|
{Name, parse_and_check(ConfigString, BridgeType, Name)}.
|
||||||
|
|
||||||
|
parse_and_check(ConfigString, BridgeType, Name) ->
|
||||||
|
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
|
||||||
|
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
|
||||||
|
#{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf,
|
||||||
|
Config.
|
||||||
|
|
||||||
|
create_bridge(Config) ->
|
||||||
|
create_bridge(Config, _Overrides = #{}).
|
||||||
|
|
||||||
|
create_bridge(Config, Overrides) ->
|
||||||
|
BridgeType = ?config(opents_bridge_type, Config),
|
||||||
|
Name = ?config(opents_name, Config),
|
||||||
|
Config0 = ?config(opents_config, Config),
|
||||||
|
Config1 = emqx_utils_maps:deep_merge(Config0, Overrides),
|
||||||
|
emqx_bridge:create(BridgeType, Name, Config1).
|
||||||
|
|
||||||
|
delete_bridge(Config) ->
|
||||||
|
BridgeType = ?config(opents_bridge_type, Config),
|
||||||
|
Name = ?config(opents_name, Config),
|
||||||
|
emqx_bridge:remove(BridgeType, Name).
|
||||||
|
|
||||||
|
create_bridge_http(Params) ->
|
||||||
|
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
|
||||||
|
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
|
||||||
|
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
|
||||||
|
{ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])};
|
||||||
|
Error -> Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
send_message(Config, Payload) ->
|
||||||
|
Name = ?config(opents_name, Config),
|
||||||
|
BridgeType = ?config(opents_bridge_type, Config),
|
||||||
|
BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name),
|
||||||
|
emqx_bridge:send_message(BridgeID, Payload).
|
||||||
|
|
||||||
|
query_resource(Config, Request) ->
|
||||||
|
query_resource(Config, Request, 1_000).
|
||||||
|
|
||||||
|
query_resource(Config, Request, Timeout) ->
|
||||||
|
Name = ?config(opents_name, Config),
|
||||||
|
BridgeType = ?config(opents_bridge_type, Config),
|
||||||
|
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
|
||||||
|
emqx_resource:query(ResourceID, Request, #{timeout => Timeout}).
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Testcases
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
t_setup_via_config_and_publish(Config) ->
|
||||||
|
?assertMatch(
|
||||||
|
{ok, _},
|
||||||
|
create_bridge(Config)
|
||||||
|
),
|
||||||
|
SentData = make_data(),
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
{_, {ok, #{result := Result}}} =
|
||||||
|
?wait_async_action(
|
||||||
|
send_message(Config, SentData),
|
||||||
|
#{?snk_kind := buffer_worker_flush_ack},
|
||||||
|
2_000
|
||||||
|
),
|
||||||
|
?assertMatch(
|
||||||
|
{ok, 200, #{failed := 0, success := 1}}, Result
|
||||||
|
),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
fun(Trace0) ->
|
||||||
|
Trace = ?of_kind(opents_connector_query_return, Trace0),
|
||||||
|
?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace),
|
||||||
|
ok
|
||||||
|
end
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_setup_via_http_api_and_publish(Config) ->
|
||||||
|
BridgeType = ?config(opents_bridge_type, Config),
|
||||||
|
Name = ?config(opents_name, Config),
|
||||||
|
OpentsConfig0 = ?config(opents_config, Config),
|
||||||
|
OpentsConfig = OpentsConfig0#{
|
||||||
|
<<"name">> => Name,
|
||||||
|
<<"type">> => BridgeType
|
||||||
|
},
|
||||||
|
?assertMatch(
|
||||||
|
{ok, _},
|
||||||
|
create_bridge_http(OpentsConfig)
|
||||||
|
),
|
||||||
|
SentData = make_data(),
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
Request = {send_message, SentData},
|
||||||
|
Res0 = query_resource(Config, Request, 2_500),
|
||||||
|
?assertMatch(
|
||||||
|
{ok, 200, #{failed := 0, success := 1}}, Res0
|
||||||
|
),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
fun(Trace0) ->
|
||||||
|
Trace = ?of_kind(opents_connector_query_return, Trace0),
|
||||||
|
?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace),
|
||||||
|
ok
|
||||||
|
end
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_get_status(Config) ->
|
||||||
|
?assertMatch(
|
||||||
|
{ok, _},
|
||||||
|
create_bridge(Config)
|
||||||
|
),
|
||||||
|
|
||||||
|
Name = ?config(opents_name, Config),
|
||||||
|
BridgeType = ?config(opents_bridge_type, Config),
|
||||||
|
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
|
||||||
|
|
||||||
|
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_create_disconnected(Config) ->
|
||||||
|
BridgeType = proplists:get_value(bridge_type, Config, <<"opents">>),
|
||||||
|
Config1 = lists:keyreplace(opents_port, 1, Config, {opents_port, 61234}),
|
||||||
|
{_Name, OpenTSConf} = opents_config(BridgeType, Config1),
|
||||||
|
|
||||||
|
Config2 = lists:keyreplace(opents_config, 1, Config1, {opents_config, OpenTSConf}),
|
||||||
|
?assertMatch({ok, _}, create_bridge(Config2)),
|
||||||
|
|
||||||
|
Name = ?config(opents_name, Config),
|
||||||
|
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
|
||||||
|
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceID)),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_write_failure(Config) ->
|
||||||
|
ProxyName = ?config(proxy_name, Config),
|
||||||
|
ProxyPort = ?config(proxy_port, Config),
|
||||||
|
ProxyHost = ?config(proxy_host, Config),
|
||||||
|
{ok, _} = create_bridge(Config),
|
||||||
|
SentData = make_data(),
|
||||||
|
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
|
||||||
|
{_, {ok, #{result := Result}}} =
|
||||||
|
?wait_async_action(
|
||||||
|
send_message(Config, SentData),
|
||||||
|
#{?snk_kind := buffer_worker_flush_ack},
|
||||||
|
2_000
|
||||||
|
),
|
||||||
|
?assertMatch({error, _}, Result),
|
||||||
|
ok
|
||||||
|
end),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_write_timeout(Config) ->
|
||||||
|
ProxyName = ?config(proxy_name, Config),
|
||||||
|
ProxyPort = ?config(proxy_port, Config),
|
||||||
|
ProxyHost = ?config(proxy_host, Config),
|
||||||
|
{ok, _} = create_bridge(
|
||||||
|
Config,
|
||||||
|
#{
|
||||||
|
<<"resource_opts">> => #{
|
||||||
|
<<"request_timeout">> => 500,
|
||||||
|
<<"resume_interval">> => 100,
|
||||||
|
<<"health_check_interval">> => 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
SentData = make_data(),
|
||||||
|
emqx_common_test_helpers:with_failure(
|
||||||
|
timeout, ProxyName, ProxyHost, ProxyPort, fun() ->
|
||||||
|
?assertMatch(
|
||||||
|
{error, {resource_error, #{reason := timeout}}},
|
||||||
|
query_resource(Config, {send_message, SentData})
|
||||||
|
)
|
||||||
|
end
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_missing_data(Config) ->
|
||||||
|
?assertMatch(
|
||||||
|
{ok, _},
|
||||||
|
create_bridge(Config)
|
||||||
|
),
|
||||||
|
{_, {ok, #{result := Result}}} =
|
||||||
|
?wait_async_action(
|
||||||
|
send_message(Config, #{}),
|
||||||
|
#{?snk_kind := buffer_worker_flush_ack},
|
||||||
|
2_000
|
||||||
|
),
|
||||||
|
?assertMatch(
|
||||||
|
{error, {400, #{failed := 1, success := 0}}},
|
||||||
|
Result
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_bad_data(Config) ->
|
||||||
|
?assertMatch(
|
||||||
|
{ok, _},
|
||||||
|
create_bridge(Config)
|
||||||
|
),
|
||||||
|
Data = maps:without([metric], make_data()),
|
||||||
|
{_, {ok, #{result := Result}}} =
|
||||||
|
?wait_async_action(
|
||||||
|
send_message(Config, Data),
|
||||||
|
#{?snk_kind := buffer_worker_flush_ack},
|
||||||
|
2_000
|
||||||
|
),
|
||||||
|
|
||||||
|
?assertMatch(
|
||||||
|
{error, {400, #{failed := 1, success := 0}}}, Result
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
make_data() ->
|
||||||
|
make_data(<<"cpu">>, 12).
|
||||||
|
|
||||||
|
make_data(Metric, Value) ->
|
||||||
|
#{
|
||||||
|
metric => Metric,
|
||||||
|
tags => #{
|
||||||
|
<<"host">> => <<"serverA">>
|
||||||
|
},
|
||||||
|
value => Value
|
||||||
|
}.
|
|
@ -0,0 +1,94 @@
|
||||||
|
Business Source License 1.1
|
||||||
|
|
||||||
|
Licensor: Hangzhou EMQ Technologies Co., Ltd.
|
||||||
|
Licensed Work: EMQX Enterprise Edition
|
||||||
|
The Licensed Work is (c) 2023
|
||||||
|
Hangzhou EMQ Technologies Co., Ltd.
|
||||||
|
Additional Use Grant: Students and educators are granted right to copy,
|
||||||
|
modify, and create derivative work for research
|
||||||
|
or education.
|
||||||
|
Change Date: 2027-02-01
|
||||||
|
Change License: Apache License, Version 2.0
|
||||||
|
|
||||||
|
For information about alternative licensing arrangements for the Software,
|
||||||
|
please contact Licensor: https://www.emqx.com/en/contact
|
||||||
|
|
||||||
|
Notice
|
||||||
|
|
||||||
|
The Business Source License (this document, or the “License”) is not an Open
|
||||||
|
Source license. However, the Licensed Work will eventually be made available
|
||||||
|
under an Open Source License, as stated in this License.
|
||||||
|
|
||||||
|
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
|
||||||
|
“Business Source License” is a trademark of MariaDB Corporation Ab.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
Business Source License 1.1
|
||||||
|
|
||||||
|
Terms
|
||||||
|
|
||||||
|
The Licensor hereby grants you the right to copy, modify, create derivative
|
||||||
|
works, redistribute, and make non-production use of the Licensed Work. The
|
||||||
|
Licensor may make an Additional Use Grant, above, permitting limited
|
||||||
|
production use.
|
||||||
|
|
||||||
|
Effective on the Change Date, or the fourth anniversary of the first publicly
|
||||||
|
available distribution of a specific version of the Licensed Work under this
|
||||||
|
License, whichever comes first, the Licensor hereby grants you rights under
|
||||||
|
the terms of the Change License, and the rights granted in the paragraph
|
||||||
|
above terminate.
|
||||||
|
|
||||||
|
If your use of the Licensed Work does not comply with the requirements
|
||||||
|
currently in effect as described in this License, you must purchase a
|
||||||
|
commercial license from the Licensor, its affiliated entities, or authorized
|
||||||
|
resellers, or you must refrain from using the Licensed Work.
|
||||||
|
|
||||||
|
All copies of the original and modified Licensed Work, and derivative works
|
||||||
|
of the Licensed Work, are subject to this License. This License applies
|
||||||
|
separately for each version of the Licensed Work and the Change Date may vary
|
||||||
|
for each version of the Licensed Work released by Licensor.
|
||||||
|
|
||||||
|
You must conspicuously display this License on each original or modified copy
|
||||||
|
of the Licensed Work. If you receive the Licensed Work in original or
|
||||||
|
modified form from a third party, the terms and conditions set forth in this
|
||||||
|
License apply to your use of that work.
|
||||||
|
|
||||||
|
Any use of the Licensed Work in violation of this License will automatically
|
||||||
|
terminate your rights under this License for the current and all other
|
||||||
|
versions of the Licensed Work.
|
||||||
|
|
||||||
|
This License does not grant you any right in any trademark or logo of
|
||||||
|
Licensor or its affiliates (provided that you may use a trademark or logo of
|
||||||
|
Licensor as expressly required by this License).
|
||||||
|
|
||||||
|
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
|
||||||
|
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
|
||||||
|
TITLE.
|
||||||
|
|
||||||
|
MariaDB hereby grants you permission to use this License’s text to license
|
||||||
|
your works, and to refer to it using the trademark “Business Source License”,
|
||||||
|
as long as you comply with the Covenants of Licensor below.
|
||||||
|
|
||||||
|
Covenants of Licensor
|
||||||
|
|
||||||
|
In consideration of the right to use this License’s text and the “Business
|
||||||
|
Source License” name and trademark, Licensor covenants to MariaDB, and to all
|
||||||
|
other recipients of the licensed work to be provided by Licensor:
|
||||||
|
|
||||||
|
1. To specify as the Change License the GPL Version 2.0 or any later version,
|
||||||
|
or a license that is compatible with GPL Version 2.0 or a later version,
|
||||||
|
where “compatible” means that software provided under the Change License can
|
||||||
|
be included in a program with software provided under GPL Version 2.0 or a
|
||||||
|
later version. Licensor may specify additional Change Licenses without
|
||||||
|
limitation.
|
||||||
|
|
||||||
|
2. To either: (a) specify an additional grant of rights to use that does not
|
||||||
|
impose any additional restriction on the right granted in this License, as
|
||||||
|
the Additional Use Grant; or (b) insert the text “None”.
|
||||||
|
|
||||||
|
3. To specify a Change Date.
|
||||||
|
|
||||||
|
4. Not to modify this License in any other way.
|
|
@ -0,0 +1,28 @@
|
||||||
|
# EMQX Oracle Database Bridge
|
||||||
|
|
||||||
|
This application houses the Oracle Database bridge for EMQX Enterprise Edition.
|
||||||
|
It implements the data bridge APIs for interacting with an Oracle Database Bridge.
|
||||||
|
|
||||||
|
|
||||||
|
# Documentation
|
||||||
|
|
||||||
|
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
|
||||||
|
for the EMQX rules engine introduction.
|
||||||
|
|
||||||
|
|
||||||
|
# HTTP APIs
|
||||||
|
|
||||||
|
- Several APIs are provided for bridge management, which includes create bridge,
|
||||||
|
update bridge, get bridge, stop or restart bridge and list bridges etc.
|
||||||
|
|
||||||
|
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information.
|
||||||
|
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
Please see our [contributing.md](../../CONTRIBUTING.md).
|
||||||
|
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
See [BSL](./BSL.txt).
|
|
@ -0,0 +1,2 @@
|
||||||
|
toxiproxy
|
||||||
|
oracle
|
|
@ -0,0 +1,13 @@
|
||||||
|
%% -*- mode: erlang; -*-
|
||||||
|
|
||||||
|
{erl_opts, [debug_info]}.
|
||||||
|
{deps, [ {emqx_oracle, {path, "../../apps/emqx_oracle"}}
|
||||||
|
, {emqx_connector, {path, "../../apps/emqx_connector"}}
|
||||||
|
, {emqx_resource, {path, "../../apps/emqx_resource"}}
|
||||||
|
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
|
||||||
|
]}.
|
||||||
|
|
||||||
|
{shell, [
|
||||||
|
% {config, "config/sys.config"},
|
||||||
|
{apps, [emqx_bridge_oracle]}
|
||||||
|
]}.
|
|
@ -0,0 +1,14 @@
|
||||||
|
{application, emqx_bridge_oracle, [
|
||||||
|
{description, "EMQX Enterprise Oracle Database Bridge"},
|
||||||
|
{vsn, "0.1.0"},
|
||||||
|
{registered, []},
|
||||||
|
{applications, [
|
||||||
|
kernel,
|
||||||
|
stdlib,
|
||||||
|
emqx_oracle
|
||||||
|
]},
|
||||||
|
{env, []},
|
||||||
|
{modules, []},
|
||||||
|
|
||||||
|
{links, []}
|
||||||
|
]}.
|
|
@ -0,0 +1,109 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_bridge_oracle).
|
||||||
|
|
||||||
|
-include_lib("typerefl/include/types.hrl").
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
-include_lib("emqx_bridge/include/emqx_bridge.hrl").
|
||||||
|
-include_lib("emqx_resource/include/emqx_resource.hrl").
|
||||||
|
|
||||||
|
-export([
|
||||||
|
conn_bridge_examples/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
namespace/0,
|
||||||
|
roots/0,
|
||||||
|
fields/1,
|
||||||
|
desc/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-define(DEFAULT_SQL, <<
|
||||||
|
"insert into t_mqtt_msg(msgid, topic, qos, payload)"
|
||||||
|
"values (${id}, ${topic}, ${qos}, ${payload})"
|
||||||
|
>>).
|
||||||
|
|
||||||
|
conn_bridge_examples(Method) ->
|
||||||
|
[
|
||||||
|
#{
|
||||||
|
<<"oracle">> => #{
|
||||||
|
summary => <<"Oracle Database Bridge">>,
|
||||||
|
value => values(Method)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
].
|
||||||
|
|
||||||
|
values(_Method) ->
|
||||||
|
#{
|
||||||
|
enable => true,
|
||||||
|
type => oracle,
|
||||||
|
name => <<"foo">>,
|
||||||
|
server => <<"127.0.0.1:1521">>,
|
||||||
|
pool_size => 8,
|
||||||
|
database => <<"ORCL">>,
|
||||||
|
sid => <<"ORCL">>,
|
||||||
|
username => <<"root">>,
|
||||||
|
password => <<"******">>,
|
||||||
|
sql => ?DEFAULT_SQL,
|
||||||
|
local_topic => <<"local/topic/#">>,
|
||||||
|
resource_opts => #{
|
||||||
|
worker_pool_size => 8,
|
||||||
|
health_check_interval => ?HEALTHCHECK_INTERVAL_RAW,
|
||||||
|
auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW,
|
||||||
|
batch_size => ?DEFAULT_BATCH_SIZE,
|
||||||
|
batch_time => ?DEFAULT_BATCH_TIME,
|
||||||
|
query_mode => async,
|
||||||
|
max_buffer_bytes => ?DEFAULT_BUFFER_BYTES
|
||||||
|
}
|
||||||
|
}.
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------------------------------------
|
||||||
|
%% Hocon Schema Definitions
|
||||||
|
|
||||||
|
namespace() -> "bridge_oracle".
|
||||||
|
|
||||||
|
roots() -> [].
|
||||||
|
|
||||||
|
fields("config") ->
|
||||||
|
[
|
||||||
|
{enable,
|
||||||
|
hoconsc:mk(
|
||||||
|
boolean(),
|
||||||
|
#{desc => ?DESC("config_enable"), default => true}
|
||||||
|
)},
|
||||||
|
{sql,
|
||||||
|
hoconsc:mk(
|
||||||
|
binary(),
|
||||||
|
#{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>}
|
||||||
|
)},
|
||||||
|
{local_topic,
|
||||||
|
hoconsc:mk(
|
||||||
|
binary(),
|
||||||
|
#{desc => ?DESC("local_topic"), default => undefined}
|
||||||
|
)}
|
||||||
|
] ++ emqx_resource_schema:fields("resource_opts") ++
|
||||||
|
(emqx_oracle_schema:fields(config) --
|
||||||
|
emqx_connector_schema_lib:prepare_statement_fields());
|
||||||
|
fields("post") ->
|
||||||
|
fields("post", oracle);
|
||||||
|
fields("put") ->
|
||||||
|
fields("config");
|
||||||
|
fields("get") ->
|
||||||
|
emqx_bridge_schema:status_fields() ++ fields("post").
|
||||||
|
|
||||||
|
fields("post", Type) ->
|
||||||
|
[type_field(Type), name_field() | fields("config")].
|
||||||
|
|
||||||
|
desc("config") ->
|
||||||
|
?DESC("desc_config");
|
||||||
|
desc(_) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type_field(Type) ->
|
||||||
|
{type, hoconsc:mk(hoconsc:enum([Type]), #{required => true, desc => ?DESC("desc_type")})}.
|
||||||
|
|
||||||
|
name_field() ->
|
||||||
|
{name, hoconsc:mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.
|
|
@ -0,0 +1,514 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_bridge_oracle_SUITE).
|
||||||
|
|
||||||
|
-compile(nowarn_export_all).
|
||||||
|
-compile(export_all).
|
||||||
|
|
||||||
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
|
-import(emqx_common_test_helpers, [on_exit/1]).
|
||||||
|
|
||||||
|
-define(BRIDGE_TYPE_BIN, <<"oracle">>).
|
||||||
|
-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_oracle, emqx_bridge_oracle]).
|
||||||
|
-define(DATABASE, "XE").
|
||||||
|
-define(RULE_TOPIC, "mqtt/rule").
|
||||||
|
% -define(RULE_TOPIC_BIN, <<?RULE_TOPIC>>).
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% CT boilerplate
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
all() ->
|
||||||
|
[
|
||||||
|
{group, plain}
|
||||||
|
].
|
||||||
|
|
||||||
|
groups() ->
|
||||||
|
AllTCs = emqx_common_test_helpers:all(?MODULE),
|
||||||
|
[
|
||||||
|
{plain, AllTCs}
|
||||||
|
].
|
||||||
|
|
||||||
|
only_once_tests() ->
|
||||||
|
[t_create_via_http].
|
||||||
|
|
||||||
|
init_per_suite(Config) ->
|
||||||
|
Config.
|
||||||
|
|
||||||
|
end_per_suite(_Config) ->
|
||||||
|
emqx_mgmt_api_test_util:end_suite(),
|
||||||
|
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
|
||||||
|
ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)),
|
||||||
|
_ = application:stop(emqx_connector),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
init_per_group(plain = Type, Config) ->
|
||||||
|
OracleHost = os:getenv("ORACLE_PLAIN_HOST", "toxiproxy.emqx.net"),
|
||||||
|
OraclePort = list_to_integer(os:getenv("ORACLE_PLAIN_PORT", "1521")),
|
||||||
|
ProxyName = "oracle",
|
||||||
|
case emqx_common_test_helpers:is_tcp_server_available(OracleHost, OraclePort) of
|
||||||
|
true ->
|
||||||
|
Config1 = common_init_per_group(),
|
||||||
|
[
|
||||||
|
{proxy_name, ProxyName},
|
||||||
|
{oracle_host, OracleHost},
|
||||||
|
{oracle_port, OraclePort},
|
||||||
|
{connection_type, Type}
|
||||||
|
| Config1 ++ Config
|
||||||
|
];
|
||||||
|
false ->
|
||||||
|
case os:getenv("IS_CI") of
|
||||||
|
"yes" ->
|
||||||
|
throw(no_oracle);
|
||||||
|
_ ->
|
||||||
|
{skip, no_oracle}
|
||||||
|
end
|
||||||
|
end;
|
||||||
|
init_per_group(_Group, Config) ->
|
||||||
|
Config.
|
||||||
|
|
||||||
|
end_per_group(Group, Config) when
|
||||||
|
Group =:= plain
|
||||||
|
->
|
||||||
|
common_end_per_group(Config),
|
||||||
|
ok;
|
||||||
|
end_per_group(_Group, _Config) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
common_init_per_group() ->
|
||||||
|
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
|
||||||
|
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
|
||||||
|
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||||
|
application:load(emqx_bridge),
|
||||||
|
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
|
||||||
|
ok = emqx_connector_test_helpers:start_apps(?APPS),
|
||||||
|
{ok, _} = application:ensure_all_started(emqx_connector),
|
||||||
|
emqx_mgmt_api_test_util:init_suite(),
|
||||||
|
UniqueNum = integer_to_binary(erlang:unique_integer()),
|
||||||
|
MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>,
|
||||||
|
[
|
||||||
|
{proxy_host, ProxyHost},
|
||||||
|
{proxy_port, ProxyPort},
|
||||||
|
{mqtt_topic, MQTTTopic}
|
||||||
|
].
|
||||||
|
|
||||||
|
common_end_per_group(Config) ->
|
||||||
|
ProxyHost = ?config(proxy_host, Config),
|
||||||
|
ProxyPort = ?config(proxy_port, Config),
|
||||||
|
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||||
|
delete_all_bridges(),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
init_per_testcase(TestCase, Config) ->
|
||||||
|
common_init_per_testcase(TestCase, Config).
|
||||||
|
|
||||||
|
end_per_testcase(_Testcase, Config) ->
|
||||||
|
common_end_per_testcase(_Testcase, Config).
|
||||||
|
|
||||||
|
common_init_per_testcase(TestCase, Config0) ->
|
||||||
|
ct:timetrap(timer:seconds(60)),
|
||||||
|
delete_all_bridges(),
|
||||||
|
UniqueNum = integer_to_binary(erlang:unique_integer()),
|
||||||
|
OracleTopic =
|
||||||
|
<<
|
||||||
|
(atom_to_binary(TestCase))/binary,
|
||||||
|
UniqueNum/binary
|
||||||
|
>>,
|
||||||
|
ConnectionType = ?config(connection_type, Config0),
|
||||||
|
Config = [{oracle_topic, OracleTopic} | Config0],
|
||||||
|
{Name, ConfigString, OracleConfig} = oracle_config(
|
||||||
|
TestCase, ConnectionType, Config
|
||||||
|
),
|
||||||
|
ok = snabbkaffe:start_trace(),
|
||||||
|
[
|
||||||
|
{oracle_name, Name},
|
||||||
|
{oracle_config_string, ConfigString},
|
||||||
|
{oracle_config, OracleConfig}
|
||||||
|
| Config
|
||||||
|
].
|
||||||
|
|
||||||
|
common_end_per_testcase(_Testcase, Config) ->
|
||||||
|
case proplists:get_bool(skip_does_not_apply, Config) of
|
||||||
|
true ->
|
||||||
|
ok;
|
||||||
|
false ->
|
||||||
|
ProxyHost = ?config(proxy_host, Config),
|
||||||
|
ProxyPort = ?config(proxy_port, Config),
|
||||||
|
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||||
|
delete_all_bridges(),
|
||||||
|
%% in CI, apparently this needs more time since the
|
||||||
|
%% machines struggle with all the containers running...
|
||||||
|
emqx_common_test_helpers:call_janitor(60_000),
|
||||||
|
ok = snabbkaffe:stop(),
|
||||||
|
ok
|
||||||
|
end.
|
||||||
|
|
||||||
|
delete_all_bridges() ->
|
||||||
|
lists:foreach(
|
||||||
|
fun(#{name := Name, type := Type}) ->
|
||||||
|
emqx_bridge:remove(Type, Name)
|
||||||
|
end,
|
||||||
|
emqx_bridge:list()
|
||||||
|
).
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Helper fns
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
sql_insert_template_for_bridge() ->
|
||||||
|
"INSERT INTO mqtt_test(topic, msgid, payload, retain) VALUES (${topic}, ${id}, ${payload}, ${retain})".
|
||||||
|
|
||||||
|
sql_create_table() ->
|
||||||
|
"CREATE TABLE mqtt_test (topic VARCHAR2(255), msgid VARCHAR2(64), payload NCLOB, retain NUMBER(1))".
|
||||||
|
|
||||||
|
sql_drop_table() ->
|
||||||
|
"DROP TABLE mqtt_test".
|
||||||
|
|
||||||
|
reset_table(Config) ->
|
||||||
|
ResourceId = resource_id(Config),
|
||||||
|
_ = emqx_resource:simple_sync_query(ResourceId, {sql, sql_drop_table()}),
|
||||||
|
{ok, [{proc_result, 0, _}]} = emqx_resource:simple_sync_query(
|
||||||
|
ResourceId, {sql, sql_create_table()}
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
drop_table(Config) ->
|
||||||
|
ResourceId = resource_id(Config),
|
||||||
|
emqx_resource:simple_sync_query(ResourceId, {query, sql_drop_table()}),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
oracle_config(TestCase, _ConnectionType, Config) ->
|
||||||
|
UniqueNum = integer_to_binary(erlang:unique_integer()),
|
||||||
|
OracleHost = ?config(oracle_host, Config),
|
||||||
|
OraclePort = ?config(oracle_port, Config),
|
||||||
|
Name = <<
|
||||||
|
(atom_to_binary(TestCase))/binary, UniqueNum/binary
|
||||||
|
>>,
|
||||||
|
ServerURL = iolist_to_binary([
|
||||||
|
OracleHost,
|
||||||
|
":",
|
||||||
|
integer_to_binary(OraclePort)
|
||||||
|
]),
|
||||||
|
ConfigString =
|
||||||
|
io_lib:format(
|
||||||
|
"bridges.oracle.~s {\n"
|
||||||
|
" enable = true\n"
|
||||||
|
" database = \"~s\"\n"
|
||||||
|
" sid = \"~s\"\n"
|
||||||
|
" server = \"~s\"\n"
|
||||||
|
" username = \"system\"\n"
|
||||||
|
" password = \"oracle\"\n"
|
||||||
|
" pool_size = 1\n"
|
||||||
|
" sql = \"~s\"\n"
|
||||||
|
" resource_opts = {\n"
|
||||||
|
" auto_restart_interval = 5000\n"
|
||||||
|
" request_timeout = 30000\n"
|
||||||
|
" query_mode = \"async\"\n"
|
||||||
|
" enable_batch = true\n"
|
||||||
|
" batch_size = 3\n"
|
||||||
|
" batch_time = \"3s\"\n"
|
||||||
|
" worker_pool_size = 1\n"
|
||||||
|
" }\n"
|
||||||
|
"}\n",
|
||||||
|
[
|
||||||
|
Name,
|
||||||
|
?DATABASE,
|
||||||
|
?DATABASE,
|
||||||
|
ServerURL,
|
||||||
|
sql_insert_template_for_bridge()
|
||||||
|
]
|
||||||
|
),
|
||||||
|
{Name, ConfigString, parse_and_check(ConfigString, Name)}.
|
||||||
|
|
||||||
|
parse_and_check(ConfigString, Name) ->
|
||||||
|
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
|
||||||
|
TypeBin = ?BRIDGE_TYPE_BIN,
|
||||||
|
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
|
||||||
|
#{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf,
|
||||||
|
Config.
|
||||||
|
|
||||||
|
resource_id(Config) ->
|
||||||
|
Type = ?BRIDGE_TYPE_BIN,
|
||||||
|
Name = ?config(oracle_name, Config),
|
||||||
|
emqx_bridge_resource:resource_id(Type, Name).
|
||||||
|
|
||||||
|
create_bridge(Config) ->
|
||||||
|
create_bridge(Config, _Overrides = #{}).
|
||||||
|
|
||||||
|
create_bridge(Config, Overrides) ->
|
||||||
|
Type = ?BRIDGE_TYPE_BIN,
|
||||||
|
Name = ?config(oracle_name, Config),
|
||||||
|
OracleConfig0 = ?config(oracle_config, Config),
|
||||||
|
OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides),
|
||||||
|
emqx_bridge:create(Type, Name, OracleConfig).
|
||||||
|
|
||||||
|
create_bridge_api(Config) ->
|
||||||
|
create_bridge_api(Config, _Overrides = #{}).
|
||||||
|
|
||||||
|
create_bridge_api(Config, Overrides) ->
|
||||||
|
TypeBin = ?BRIDGE_TYPE_BIN,
|
||||||
|
Name = ?config(oracle_name, Config),
|
||||||
|
OracleConfig0 = ?config(oracle_config, Config),
|
||||||
|
OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides),
|
||||||
|
Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name},
|
||||||
|
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
|
||||||
|
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
|
||||||
|
Opts = #{return_all => true},
|
||||||
|
ct:pal("creating bridge (via http): ~p", [Params]),
|
||||||
|
Res =
|
||||||
|
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
|
||||||
|
{ok, {Status, Headers, Body0}} ->
|
||||||
|
{ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}};
|
||||||
|
Error ->
|
||||||
|
Error
|
||||||
|
end,
|
||||||
|
ct:pal("bridge create result: ~p", [Res]),
|
||||||
|
Res.
|
||||||
|
|
||||||
|
update_bridge_api(Config) ->
|
||||||
|
update_bridge_api(Config, _Overrides = #{}).
|
||||||
|
|
||||||
|
update_bridge_api(Config, Overrides) ->
|
||||||
|
TypeBin = ?BRIDGE_TYPE_BIN,
|
||||||
|
Name = ?config(oracle_name, Config),
|
||||||
|
OracleConfig0 = ?config(oracle_config, Config),
|
||||||
|
OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides),
|
||||||
|
BridgeId = emqx_bridge_resource:bridge_id(TypeBin, Name),
|
||||||
|
Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name},
|
||||||
|
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]),
|
||||||
|
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
|
||||||
|
Opts = #{return_all => true},
|
||||||
|
ct:pal("updating bridge (via http): ~p", [Params]),
|
||||||
|
Res =
|
||||||
|
case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of
|
||||||
|
{ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])};
|
||||||
|
Error -> Error
|
||||||
|
end,
|
||||||
|
ct:pal("bridge update result: ~p", [Res]),
|
||||||
|
Res.
|
||||||
|
|
||||||
|
probe_bridge_api(Config) ->
|
||||||
|
probe_bridge_api(Config, _Overrides = #{}).
|
||||||
|
|
||||||
|
probe_bridge_api(Config, _Overrides) ->
|
||||||
|
TypeBin = ?BRIDGE_TYPE_BIN,
|
||||||
|
Name = ?config(oracle_name, Config),
|
||||||
|
OracleConfig = ?config(oracle_config, Config),
|
||||||
|
Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name},
|
||||||
|
Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]),
|
||||||
|
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
|
||||||
|
Opts = #{return_all => true},
|
||||||
|
ct:pal("probing bridge (via http): ~p", [Params]),
|
||||||
|
Res =
|
||||||
|
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
|
||||||
|
{ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0};
|
||||||
|
Error -> Error
|
||||||
|
end,
|
||||||
|
ct:pal("bridge probe result: ~p", [Res]),
|
||||||
|
Res.
|
||||||
|
|
||||||
|
create_rule_and_action_http(Config) ->
|
||||||
|
OracleName = ?config(oracle_name, Config),
|
||||||
|
BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, OracleName),
|
||||||
|
Params = #{
|
||||||
|
enable => true,
|
||||||
|
sql => <<"SELECT * FROM \"", ?RULE_TOPIC, "\"">>,
|
||||||
|
actions => [BridgeId]
|
||||||
|
},
|
||||||
|
Path = emqx_mgmt_api_test_util:api_path(["rules"]),
|
||||||
|
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
|
||||||
|
ct:pal("rule action params: ~p", [Params]),
|
||||||
|
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
|
||||||
|
{ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])};
|
||||||
|
Error -> Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Testcases
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
t_sync_query(Config) ->
|
||||||
|
ResourceId = resource_id(Config),
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
?assertMatch({ok, _}, create_bridge_api(Config)),
|
||||||
|
?retry(
|
||||||
|
_Sleep = 1_000,
|
||||||
|
_Attempts = 20,
|
||||||
|
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
|
||||||
|
),
|
||||||
|
reset_table(Config),
|
||||||
|
MsgId = erlang:unique_integer(),
|
||||||
|
Params = #{
|
||||||
|
topic => ?config(mqtt_topic, Config),
|
||||||
|
id => MsgId,
|
||||||
|
payload => ?config(oracle_name, Config),
|
||||||
|
retain => true
|
||||||
|
},
|
||||||
|
Message = {send_message, Params},
|
||||||
|
?assertEqual(
|
||||||
|
{ok, [{affected_rows, 1}]}, emqx_resource:simple_sync_query(ResourceId, Message)
|
||||||
|
),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
[]
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_batch_sync_query(Config) ->
|
||||||
|
ProxyPort = ?config(proxy_port, Config),
|
||||||
|
ProxyHost = ?config(proxy_host, Config),
|
||||||
|
ProxyName = ?config(proxy_name, Config),
|
||||||
|
ResourceId = resource_id(Config),
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
?assertMatch({ok, _}, create_bridge_api(Config)),
|
||||||
|
?retry(
|
||||||
|
_Sleep = 1_000,
|
||||||
|
_Attempts = 30,
|
||||||
|
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
|
||||||
|
),
|
||||||
|
reset_table(Config),
|
||||||
|
MsgId = erlang:unique_integer(),
|
||||||
|
Params = #{
|
||||||
|
topic => ?config(mqtt_topic, Config),
|
||||||
|
id => MsgId,
|
||||||
|
payload => ?config(oracle_name, Config),
|
||||||
|
retain => false
|
||||||
|
},
|
||||||
|
% Send 3 async messages while resource is down. When it comes back, these messages
|
||||||
|
% will be delivered in sync way. If we try to send sync messages directly, it will
|
||||||
|
% be sent async as callback_mode is set to async_if_possible.
|
||||||
|
Message = {send_message, Params},
|
||||||
|
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
|
||||||
|
ct:sleep(1000),
|
||||||
|
emqx_resource:query(ResourceId, Message),
|
||||||
|
emqx_resource:query(ResourceId, Message),
|
||||||
|
emqx_resource:query(ResourceId, Message)
|
||||||
|
end),
|
||||||
|
?retry(
|
||||||
|
_Sleep = 1_000,
|
||||||
|
_Attempts = 30,
|
||||||
|
?assertMatch(
|
||||||
|
{ok, [{result_set, _, _, [[{3}]]}]},
|
||||||
|
emqx_resource:simple_sync_query(
|
||||||
|
ResourceId, {query, "SELECT COUNT(*) FROM mqtt_test"}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
[]
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_create_via_http(Config) ->
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
?assertMatch({ok, _}, create_bridge_api(Config)),
|
||||||
|
|
||||||
|
%% lightweight matrix testing some configs
|
||||||
|
?assertMatch(
|
||||||
|
{ok, _},
|
||||||
|
update_bridge_api(
|
||||||
|
Config,
|
||||||
|
#{
|
||||||
|
<<"resource_opts">> =>
|
||||||
|
#{<<"batch_size">> => 4}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
),
|
||||||
|
?assertMatch(
|
||||||
|
{ok, _},
|
||||||
|
update_bridge_api(
|
||||||
|
Config,
|
||||||
|
#{
|
||||||
|
<<"resource_opts">> =>
|
||||||
|
#{<<"batch_time">> => <<"4s">>}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
[]
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_start_stop(Config) ->
|
||||||
|
OracleName = ?config(oracle_name, Config),
|
||||||
|
ResourceId = resource_id(Config),
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
?assertMatch({ok, _}, create_bridge(Config)),
|
||||||
|
%% Since the connection process is async, we give it some time to
|
||||||
|
%% stabilize and avoid flakiness.
|
||||||
|
?retry(
|
||||||
|
_Sleep = 1_000,
|
||||||
|
_Attempts = 20,
|
||||||
|
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
|
||||||
|
),
|
||||||
|
|
||||||
|
%% Check that the bridge probe API doesn't leak atoms.
|
||||||
|
ProbeRes0 = probe_bridge_api(
|
||||||
|
Config,
|
||||||
|
#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
|
||||||
|
),
|
||||||
|
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
|
||||||
|
AtomsBefore = erlang:system_info(atom_count),
|
||||||
|
%% Probe again; shouldn't have created more atoms.
|
||||||
|
ProbeRes1 = probe_bridge_api(
|
||||||
|
Config,
|
||||||
|
#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
|
||||||
|
),
|
||||||
|
|
||||||
|
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
|
||||||
|
AtomsAfter = erlang:system_info(atom_count),
|
||||||
|
?assertEqual(AtomsBefore, AtomsAfter),
|
||||||
|
|
||||||
|
%% Now stop the bridge.
|
||||||
|
?assertMatch(
|
||||||
|
{{ok, _}, {ok, _}},
|
||||||
|
?wait_async_action(
|
||||||
|
emqx_bridge:disable_enable(disable, ?BRIDGE_TYPE_BIN, OracleName),
|
||||||
|
#{?snk_kind := oracle_bridge_stopped},
|
||||||
|
5_000
|
||||||
|
)
|
||||||
|
),
|
||||||
|
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
fun(Trace) ->
|
||||||
|
%% one for each probe, one for real
|
||||||
|
?assertMatch([_, _, _], ?of_kind(oracle_bridge_stopped, Trace)),
|
||||||
|
ok
|
||||||
|
end
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_on_get_status(Config) ->
|
||||||
|
ProxyPort = ?config(proxy_port, Config),
|
||||||
|
ProxyHost = ?config(proxy_host, Config),
|
||||||
|
ProxyName = ?config(proxy_name, Config),
|
||||||
|
ResourceId = resource_id(Config),
|
||||||
|
?assertMatch({ok, _}, create_bridge(Config)),
|
||||||
|
%% Since the connection process is async, we give it some time to
|
||||||
|
%% stabilize and avoid flakiness.
|
||||||
|
?retry(
|
||||||
|
_Sleep = 1_000,
|
||||||
|
_Attempts = 20,
|
||||||
|
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
|
||||||
|
),
|
||||||
|
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
|
||||||
|
ct:sleep(500),
|
||||||
|
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId))
|
||||||
|
end),
|
||||||
|
%% Check that it recovers itself.
|
||||||
|
?retry(
|
||||||
|
_Sleep = 1_000,
|
||||||
|
_Attempts = 20,
|
||||||
|
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
|
||||||
|
),
|
||||||
|
ok.
|
|
@ -0,0 +1,19 @@
|
||||||
|
.rebar3
|
||||||
|
_*
|
||||||
|
.eunit
|
||||||
|
*.o
|
||||||
|
*.beam
|
||||||
|
*.plt
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
.erlang.cookie
|
||||||
|
ebin
|
||||||
|
log
|
||||||
|
erl_crash.dump
|
||||||
|
.rebar
|
||||||
|
logs
|
||||||
|
_build
|
||||||
|
.idea
|
||||||
|
*.iml
|
||||||
|
rebar3.crashdump
|
||||||
|
*~
|
|
@ -0,0 +1,94 @@
|
||||||
|
Business Source License 1.1
|
||||||
|
|
||||||
|
Licensor: Hangzhou EMQ Technologies Co., Ltd.
|
||||||
|
Licensed Work: EMQX Enterprise Edition
|
||||||
|
The Licensed Work is (c) 2023
|
||||||
|
Hangzhou EMQ Technologies Co., Ltd.
|
||||||
|
Additional Use Grant: Students and educators are granted right to copy,
|
||||||
|
modify, and create derivative work for research
|
||||||
|
or education.
|
||||||
|
Change Date: 2027-02-01
|
||||||
|
Change License: Apache License, Version 2.0
|
||||||
|
|
||||||
|
For information about alternative licensing arrangements for the Software,
|
||||||
|
please contact Licensor: https://www.emqx.com/en/contact
|
||||||
|
|
||||||
|
Notice
|
||||||
|
|
||||||
|
The Business Source License (this document, or the “License”) is not an Open
|
||||||
|
Source license. However, the Licensed Work will eventually be made available
|
||||||
|
under an Open Source License, as stated in this License.
|
||||||
|
|
||||||
|
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
|
||||||
|
“Business Source License” is a trademark of MariaDB Corporation Ab.
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
Business Source License 1.1
|
||||||
|
|
||||||
|
Terms
|
||||||
|
|
||||||
|
The Licensor hereby grants you the right to copy, modify, create derivative
|
||||||
|
works, redistribute, and make non-production use of the Licensed Work. The
|
||||||
|
Licensor may make an Additional Use Grant, above, permitting limited
|
||||||
|
production use.
|
||||||
|
|
||||||
|
Effective on the Change Date, or the fourth anniversary of the first publicly
|
||||||
|
available distribution of a specific version of the Licensed Work under this
|
||||||
|
License, whichever comes first, the Licensor hereby grants you rights under
|
||||||
|
the terms of the Change License, and the rights granted in the paragraph
|
||||||
|
above terminate.
|
||||||
|
|
||||||
|
If your use of the Licensed Work does not comply with the requirements
|
||||||
|
currently in effect as described in this License, you must purchase a
|
||||||
|
commercial license from the Licensor, its affiliated entities, or authorized
|
||||||
|
resellers, or you must refrain from using the Licensed Work.
|
||||||
|
|
||||||
|
All copies of the original and modified Licensed Work, and derivative works
|
||||||
|
of the Licensed Work, are subject to this License. This License applies
|
||||||
|
separately for each version of the Licensed Work and the Change Date may vary
|
||||||
|
for each version of the Licensed Work released by Licensor.
|
||||||
|
|
||||||
|
You must conspicuously display this License on each original or modified copy
|
||||||
|
of the Licensed Work. If you receive the Licensed Work in original or
|
||||||
|
modified form from a third party, the terms and conditions set forth in this
|
||||||
|
License apply to your use of that work.
|
||||||
|
|
||||||
|
Any use of the Licensed Work in violation of this License will automatically
|
||||||
|
terminate your rights under this License for the current and all other
|
||||||
|
versions of the Licensed Work.
|
||||||
|
|
||||||
|
This License does not grant you any right in any trademark or logo of
|
||||||
|
Licensor or its affiliates (provided that you may use a trademark or logo of
|
||||||
|
Licensor as expressly required by this License).
|
||||||
|
|
||||||
|
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
|
||||||
|
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
|
||||||
|
TITLE.
|
||||||
|
|
||||||
|
MariaDB hereby grants you permission to use this License’s text to license
|
||||||
|
your works, and to refer to it using the trademark “Business Source License”,
|
||||||
|
as long as you comply with the Covenants of Licensor below.
|
||||||
|
|
||||||
|
Covenants of Licensor
|
||||||
|
|
||||||
|
In consideration of the right to use this License’s text and the “Business
|
||||||
|
Source License” name and trademark, Licensor covenants to MariaDB, and to all
|
||||||
|
other recipients of the licensed work to be provided by Licensor:
|
||||||
|
|
||||||
|
1. To specify as the Change License the GPL Version 2.0 or any later version,
|
||||||
|
or a license that is compatible with GPL Version 2.0 or a later version,
|
||||||
|
where “compatible” means that software provided under the Change License can
|
||||||
|
be included in a program with software provided under GPL Version 2.0 or a
|
||||||
|
later version. Licensor may specify additional Change Licenses without
|
||||||
|
limitation.
|
||||||
|
|
||||||
|
2. To either: (a) specify an additional grant of rights to use that does not
|
||||||
|
impose any additional restriction on the right granted in this License, as
|
||||||
|
the Additional Use Grant; or (b) insert the text “None”.
|
||||||
|
|
||||||
|
3. To specify a Change Date.
|
||||||
|
|
||||||
|
4. Not to modify this License in any other way.
|
|
@ -0,0 +1,30 @@
|
||||||
|
# Pulsar Data Integration Bridge
|
||||||
|
|
||||||
|
This application houses the Pulsar Producer data integration bridge
|
||||||
|
for EMQX Enterprise Edition. It provides the means to connect to
|
||||||
|
Pulsar and publish messages to it.
|
||||||
|
|
||||||
|
Currently, our Pulsar Producer library has its own `replayq` buffering
|
||||||
|
implementation, so this bridge does not require buffer workers from
|
||||||
|
`emqx_resource`. It implements the connection management and
|
||||||
|
interaction without need for a separate connector app, since it's not
|
||||||
|
used by authentication and authorization applications.
|
||||||
|
|
||||||
|
# Documentation links
|
||||||
|
|
||||||
|
For more information on Apache Pulsar, please see its [official
|
||||||
|
site](https://pulsar.apache.org/).
|
||||||
|
|
||||||
|
# Configurations
|
||||||
|
|
||||||
|
Please see [our official
|
||||||
|
documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-pulsar.html)
|
||||||
|
for more detailed info.
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
Please see our [contributing.md](../../CONTRIBUTING.md).
|
||||||
|
|
||||||
|
# License
|
||||||
|
|
||||||
|
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).
|
|
@ -0,0 +1,2 @@
|
||||||
|
toxiproxy
|
||||||
|
pulsar
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue