Merge pull request #10637 from zmstone/0508-prepare-for-e5.0.4

0508 prepare for e5.0.4
This commit is contained in:
Zaiming (Stone) Shi 2023-05-09 00:13:31 +02:00 committed by GitHub
commit 2cea5dc4d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
624 changed files with 30101 additions and 17322 deletions

View File

@ -7,6 +7,7 @@ INFLUXDB_TAG=2.5.0
TDENGINE_TAG=3.0.2.4 TDENGINE_TAG=3.0.2.4
DYNAMO_TAG=1.21.0 DYNAMO_TAG=1.21.0
CASSANDRA_TAG=3.11.6 CASSANDRA_TAG=3.11.6
OPENTS_TAG=9aa7f88
MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server
SQLSERVER_TAG=2019-CU19-ubuntu-20.04 SQLSERVER_TAG=2019-CU19-ubuntu-20.04

View File

@ -0,0 +1,31 @@
version: '3.9'
services:
iotdb:
container_name: iotdb
hostname: iotdb
image: apache/iotdb:1.1.0-standalone
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_target_config_node_list=iotdb:10710
- dn_rpc_address=iotdb
- dn_internal_address=iotdb
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_target_config_node_list=iotdb:10710
# volumes:
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs
expose:
- "18080"
# IoTDB's REST interface, uncomment for local testing
# ports:
# - "18080:18080"
networks:
- emqx_bridge

View File

@ -0,0 +1,9 @@
version: '3.9'
services:
opents_server:
container_name: opents
image: petergrace/opentsdb-docker:${OPENTS_TAG}
restart: always
networks:
- emqx_bridge

View File

@ -0,0 +1,11 @@
version: '3.9'
services:
oracle_server:
container_name: oracle
image: oracleinanutshell/oracle-xe-11g:1.0.0
restart: always
environment:
ORACLE_DISABLE_ASYNCH_IO: true
networks:
- emqx_bridge

View File

@ -0,0 +1,32 @@
version: '3'
services:
pulsar:
container_name: pulsar
image: apachepulsar/pulsar:2.11.0
# ports:
# - 6650:6650
# - 8080:8080
networks:
emqx_bridge:
volumes:
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem
restart: always
command:
- bash
- "-c"
- |
sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf
sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf
sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf
sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf
sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf
sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf
sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf
sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf
sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf
sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf
echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf
bin/pulsar standalone -nfw -nss

View File

@ -26,6 +26,8 @@ services:
- 19876:9876 - 19876:9876
- 19042:9042 - 19042:9042
- 19142:9142 - 19142:9142
- 14242:4242
- 28080:18080
command: command:
- "-host=0.0.0.0" - "-host=0.0.0.0"
- "-config=/config/toxiproxy.json" - "-config=/config/toxiproxy.json"

View File

@ -20,8 +20,8 @@ esac
{ {
echo "HOCON_ENV_OVERRIDE_PREFIX=EMQX_" echo "HOCON_ENV_OVERRIDE_PREFIX=EMQX_"
echo "EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s" echo "EMQX_MQTT__RETRY_INTERVAL=2s"
echo "EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10" echo "EMQX_MQTT__MAX_TOPIC_ALIAS=10"
echo "EMQX_AUTHORIZATION__SOURCES=[]" echo "EMQX_AUTHORIZATION__SOURCES=[]"
echo "EMQX_AUTHORIZATION__NO_MATCH=allow" echo "EMQX_AUTHORIZATION__NO_MATCH=allow"
} >> .ci/docker-compose-file/conf.cluster.env } >> .ci/docker-compose-file/conf.cluster.env

View File

@ -101,5 +101,35 @@
"listen": "0.0.0.0:1433", "listen": "0.0.0.0:1433",
"upstream": "sqlserver:1433", "upstream": "sqlserver:1433",
"enabled": true "enabled": true
},
{
"name": "opents",
"listen": "0.0.0.0:4242",
"upstream": "opents:4242",
"enabled": true
},
{
"name": "pulsar_plain",
"listen": "0.0.0.0:6652",
"upstream": "pulsar:6652",
"enabled": true
},
{
"name": "pulsar_tls",
"listen": "0.0.0.0:6653",
"upstream": "pulsar:6653",
"enabled": true
},
{
"name": "oracle",
"listen": "0.0.0.0:1521",
"upstream": "oracle:1521",
"enabled": true
},
{
"name": "iotdb",
"listen": "0.0.0.0:18080",
"upstream": "iotdb:18080",
"enabled": true
} }
] ]

View File

@ -35,10 +35,7 @@ jobs:
- name: Get profile to build - name: Get profile to build
id: get_profile id: get_profile
run: | run: |
set -e git config --global --add safe.directory "$GITHUB_WORKSPACE"
THISDIR="$(pwd)"
echo "Adding $THISDIR as safe dir for git"
git config --global --add safe.directory "${THISDIR}"
tag=${{ github.ref }} tag=${{ github.ref }}
if git describe --tags --match "[v|e]*" --exact; then if git describe --tags --match "[v|e]*" --exact; then
echo "WARN: This is an exact git tag, will publish release" echo "WARN: This is an exact git tag, will publish release"
@ -233,7 +230,7 @@ jobs:
ARCH: ${{ matrix.arch }} ARCH: ${{ matrix.arch }}
run: | run: |
set -eu set -eu
git config --global --add safe.directory "/__w/emqx/emqx" git config --global --add safe.directory "$GITHUB_WORKSPACE"
# Align path for CMake caches # Align path for CMake caches
if [ ! "$PWD" = "/emqx" ]; then if [ ! "$PWD" = "/emqx" ]; then
ln -s $PWD /emqx ln -s $PWD /emqx

View File

@ -0,0 +1,130 @@
name: Scheduled build packages
concurrency:
group: build-${{ github.event_name }}-${{ github.ref }}
cancel-in-progress: true
on:
schedule:
- cron: '0 */6 * * *'
workflow_dispatch:
jobs:
linux:
if: github.repository_owner == 'emqx'
runs-on: aws-${{ matrix.arch }}
# always run in builder container because the host might have the wrong OTP version etc.
# otherwise buildx.sh does not run docker if arch and os matches the target arch and os.
container:
image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
strategy:
fail-fast: false
matrix:
profile:
- ['emqx', 'master']
- ['emqx-enterprise', 'release-50']
branch:
- master
- release-50
otp:
- 24.3.4.2-3
arch:
- amd64
os:
- debian10
- amzn2
builder:
- 5.0-34
elixir:
- 1.13.4
defaults:
run:
shell: bash
steps:
- uses: emqx/self-hosted-cleanup-action@v1.0.3
- uses: actions/checkout@v3
with:
ref: ${{ matrix.profile[1] }}
fetch-depth: 0
- name: build emqx packages
env:
ELIXIR: ${{ matrix.elixir }}
PROFILE: ${{ matrix.profile[0] }}
ARCH: ${{ matrix.arch }}
run: |
set -eu
git config --global --add safe.directory "$GITHUB_WORKSPACE"
PKGTYPES="tgz pkg"
IS_ELIXIR="no"
for PKGTYPE in ${PKGTYPES};
do
./scripts/buildx.sh \
--profile "${PROFILE}" \
--pkgtype "${PKGTYPE}" \
--arch "${ARCH}" \
--elixir "${IS_ELIXIR}" \
--builder "force_host"
done
- uses: actions/upload-artifact@v3
if: success()
with:
name: ${{ matrix.profile[0] }}
path: _packages/${{ matrix.profile[0] }}/
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "Scheduled build of ${{ matrix.profile[0] }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
mac:
runs-on: ${{ matrix.os }}
if: github.repository_owner == 'emqx'
strategy:
fail-fast: false
matrix:
profile:
- emqx
branch:
- master
otp:
- 24.3.4.2-3
os:
- macos-12
- macos-12-arm64
steps:
- uses: emqx/self-hosted-cleanup-action@v1.0.3
- uses: actions/checkout@v3
with:
ref: ${{ matrix.branch }}
fetch-depth: 0
- uses: ./.github/actions/package-macos
with:
profile: ${{ matrix.profile }}
otp: ${{ matrix.otp }}
os: ${{ matrix.os }}
apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }}
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@v3
if: success()
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}

View File

@ -194,15 +194,12 @@ jobs:
run: | run: |
CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG) CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG)
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID) HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID)
export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='yes'
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
docker stop $CID docker stop $CID
- name: test two nodes cluster with proto_dist=inet_tls in docker - name: test two nodes cluster with proto_dist=inet_tls in docker
run: | run: |
./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG ./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy) HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy)
# versions before 5.0.22 have hidden fields included in the API spec
export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='no'
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
# cleanup # cleanup
./scripts/test/start-two-nodes-in-docker.sh -c ./scripts/test/start-two-nodes-in-docker.sh -c

128
.github/workflows/performance_test.yaml vendored Normal file
View File

@ -0,0 +1,128 @@
name: Performance Test Suite
on:
push:
branches:
- 'perf/**'
schedule:
- cron: '0 1 * * *'
workflow_dispatch:
inputs:
ref:
required: false
jobs:
prepare:
runs-on: ubuntu-latest
if: github.repository_owner == 'emqx'
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu20.04
outputs:
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.event.inputs.ref }}
- name: Work around https://github.com/actions/checkout/issues/766
run: |
git config --global --add safe.directory "$GITHUB_WORKSPACE"
- id: prepare
run: |
echo "EMQX_NAME=emqx" >> $GITHUB_ENV
echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
echo "BENCH_ID=$(date --utc +%F)/emqx-$(./pkg-vsn.sh emqx)" >> $GITHUB_OUTPUT
- name: Build deb package
run: |
make ${EMQX_NAME}-pkg
./scripts/pkg-tests.sh ${EMQX_NAME}-pkg
- name: Get package file name
id: package_file
run: |
echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT
- uses: actions/upload-artifact@v3
with:
name: emqx-ubuntu20.04
path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }}
tf_emqx_perf_test:
runs-on: ubuntu-latest
needs:
- prepare
env:
TF_VAR_bench_id: ${{ needs.prepare.outputs.BENCH_ID }}
TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }}
TF_VAR_test_duration: 300
TF_VAR_grafana_api_key: ${{ secrets.TF_EMQX_PERF_TEST_GRAFANA_API_KEY }}
TF_AWS_REGION: eu-north-1
steps:
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-north-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@v3
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
- uses: actions/download-artifact@v3
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
- name: Setup Terraform
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- name: terraform init
working-directory: ./tf-emqx-performance-test
run: |
terraform init
- name: terraform apply
working-directory: ./tf-emqx-performance-test
run: |
terraform apply -auto-approve
- name: Wait for test results
timeout-minutes: 30
working-directory: ./tf-emqx-performance-test
id: test-results
run: |
sleep $TF_VAR_test_duration
until aws s3api head-object --bucket tf-emqx-performance-test --key "$TF_VAR_bench_id/DONE" > /dev/null 2>&1
do
printf '.'
sleep 10
done
echo
aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/metrics.json" ./
aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/stats.json" ./
echo MESSAGES_DELIVERED=$(cat metrics.json | jq '[.[]."messages.delivered"] | add') >> $GITHUB_OUTPUT
echo MESSAGES_DROPPED=$(cat metrics.json | jq '[.[]."messages.dropped"] | add') >> $GITHUB_OUTPUT
- name: Send notification to Slack
if: success()
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "EMQX performance test completed.\nMessages delivered: ${{ steps.test-results.outputs.MESSAGES_DELIVERED }}.\nMessages dropped: ${{ steps.test-results.outputs.MESSAGES_DROPPED }}.\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
- name: terraform destroy
if: always()
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@v3
if: success()
with:
name: test-results
path: "./tf-emqx-performance-test/*.json"
- uses: actions/upload-artifact@v3
if: always()
with:
name: terraform
path: |
./tf-emqx-performance-test/.terraform
./tf-emqx-performance-test/*.tfstate

View File

@ -167,8 +167,8 @@ jobs:
--set image.pullPolicy=Never \ --set image.pullPolicy=Never \
--set image.tag=$EMQX_TAG \ --set image.tag=$EMQX_TAG \
--set emqxAclConfig="" \ --set emqxAclConfig="" \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \ --set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \ --set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \ --set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \ --set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
deploy/charts/${{ matrix.profile }} \ deploy/charts/${{ matrix.profile }} \
@ -185,8 +185,8 @@ jobs:
--set image.pullPolicy=Never \ --set image.pullPolicy=Never \
--set image.tag=$EMQX_TAG \ --set image.tag=$EMQX_TAG \
--set emqxAclConfig="" \ --set emqxAclConfig="" \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \ --set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \ --set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \ --set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \ --set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
deploy/charts/${{ matrix.profile }} \ deploy/charts/${{ matrix.profile }} \

View File

@ -14,6 +14,9 @@ on:
- e* - e*
pull_request: pull_request:
env:
IS_CI: "yes"
jobs: jobs:
build-matrix: build-matrix:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
@ -69,21 +72,14 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
path: source path: source
- uses: actions/cache@v3
id: cache
with:
path: "$HOME/.cache/rebar3/rebar3_${{ matrix.otp }}_plt"
key: rebar3-dialyzer-plt-${{ matrix.otp }}
- name: get_all_deps - name: get_all_deps
working-directory: source working-directory: source
env: env:
PROFILE: ${{ matrix.profile }} PROFILE: ${{ matrix.profile }}
#DIAGNOSTIC: 1
run: | run: |
make ensure-rebar3 make ensure-rebar3
# fetch all deps and compile # fetch all deps and compile
make ${{ matrix.profile }} make ${{ matrix.profile }}-compile
make static_checks
make test-compile make test-compile
cd .. cd ..
zip -ryq source.zip source/* source/.[^.]* zip -ryq source.zip source/* source/.[^.]*
@ -92,6 +88,34 @@ jobs:
name: source-${{ matrix.profile }}-${{ matrix.otp }} name: source-${{ matrix.profile }}-${{ matrix.otp }}
path: source.zip path: source.zip
static_checks:
needs:
- build-matrix
- prepare
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
strategy:
fail-fast: false
matrix:
include: ${{ fromJson(needs.build-matrix.outputs.prepare) }}
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3
with:
name: source-${{ matrix.profile }}-${{ matrix.otp }}
path: .
- name: unzip source code
run: unzip -o -q source.zip
- uses: actions/cache@v3
with:
path: "source/emqx_dialyzer_${{ matrix.otp }}_plt"
key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }}
- name: run static checks
env:
PROFILE: ${{ matrix.profile }}
working-directory: source
run: make static_checks
eunit_and_proper: eunit_and_proper:
needs: needs:
- build-matrix - build-matrix
@ -168,6 +192,7 @@ jobs:
REDIS_TAG: "7.0" REDIS_TAG: "7.0"
INFLUXDB_TAG: "2.5.0" INFLUXDB_TAG: "2.5.0"
TDENGINE_TAG: "3.0.2.4" TDENGINE_TAG: "3.0.2.4"
OPENTS_TAG: "9aa7f88"
PROFILE: ${{ matrix.profile }} PROFILE: ${{ matrix.profile }}
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }} run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }}

View File

@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier same "printed page" as the copyright notice for easier
identification within third-party archives. identification within third-party archives.
Copyright {yyyy} {name of copyright owner} Copyright (c) 2016-2023 EMQ Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@ -1,7 +1,7 @@
Source code in this repository is variously licensed under below licenses. Source code in this repository is variously licensed under below licenses.
For EMQX: Apache License 2.0, see APL.txt, For Default: Apache License 2.0, see APL.txt,
which applies to all source files except for lib-ee sub-directory. which applies to all source files except for folders applied with Business Source License.
For EMQX Enterprise (since version 5.0): Business Source License 1.1, For EMQX Enterprise (since version 5.0): Business Source License 1.1,
see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory. see apps/emqx_bridge_kafka/BSL.txt as an example, please check license files under sub directory of apps.

View File

@ -4,10 +4,6 @@ SCRIPTS = $(CURDIR)/scripts
export EMQX_RELUP ?= true export EMQX_RELUP ?= true
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-debian11 export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-debian11
export EMQX_DEFAULT_RUNNER = debian:11-slim export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.2.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6
export EMQX_REL_FORM ?= tgz export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1 export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT) ifeq ($(OS),Windows_NT)
@ -17,6 +13,22 @@ else
FIND=find FIND=find
endif endif
# Dashbord version
# from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.2.4
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6
# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used
# In make 4.4+, for backward-compatibility the value from the original environment is used.
# so the shell script will be executed tons of times.
# https://github.com/emqx/emqx/pull/10627
ifeq ($(strip $(OTP_VSN)),)
export OTP_VSN := $(shell $(SCRIPTS)/get-otp-vsn.sh)
endif
ifeq ($(strip $(ELIXIR_VSN)),)
export ELIXIR_VSN := $(shell $(SCRIPTS)/get-elixir-vsn.sh)
endif
PROFILE ?= emqx PROFILE ?= emqx
REL_PROFILES := emqx emqx-enterprise REL_PROFILES := emqx emqx-enterprise
PKG_PROFILES := emqx-pkg emqx-enterprise-pkg PKG_PROFILES := emqx-pkg emqx-enterprise-pkg
@ -73,6 +85,10 @@ proper: $(REBAR)
test-compile: $(REBAR) merge-config test-compile: $(REBAR) merge-config
$(REBAR) as test compile $(REBAR) as test compile
.PHONY: $(REL_PROFILES:%=%-compile)
$(REL_PROFILES:%=%-compile): $(REBAR) merge-config
$(REBAR) as $(@:%-compile=%) compile
.PHONY: ct .PHONY: ct
ct: $(REBAR) merge-config ct: $(REBAR) merge-config
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct @ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
@ -88,13 +104,17 @@ APPS=$(shell $(SCRIPTS)/find-apps.sh)
.PHONY: $(APPS:%=%-ct) .PHONY: $(APPS:%=%-ct)
define gen-app-ct-target define gen-app-ct-target
$1-ct: $(REBAR) $1-ct: $(REBAR) merge-config
@$(SCRIPTS)/pre-compile.sh $(PROFILE) $(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1))
ifneq ($(SUITES),)
@ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \ @ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
--readable=$(CT_READABLE) \ --readable=$(CT_READABLE) \
--name $(CT_NODE_NAME) \ --name $(CT_NODE_NAME) \
--cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \ --cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \
--suite $(shell $(SCRIPTS)/find-suites.sh $1) --suite $(SUITES)
else
@echo 'No suites found for $1'
endif
endef endef
$(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app)))) $(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app))))
@ -134,6 +154,11 @@ COMMON_DEPS := $(REBAR)
$(REL_PROFILES:%=%): $(COMMON_DEPS) $(REL_PROFILES:%=%): $(COMMON_DEPS)
@$(BUILD) $(@) rel @$(BUILD) $(@) rel
.PHONY: compile $(PROFILES:%=compile-%)
compile: $(PROFILES:%=compile-%)
$(PROFILES:%=compile-%):
@$(BUILD) $(@:compile-%=%) apps
## Not calling rebar3 clean because ## Not calling rebar3 clean because
## 1. rebar3 clean relies on rebar3, meaning it reads config, fetches dependencies etc. ## 1. rebar3 clean relies on rebar3, meaning it reads config, fetches dependencies etc.
## 2. it's slow ## 2. it's slow
@ -217,11 +242,11 @@ endef
$(foreach pt,$(PKG_PROFILES),$(eval $(call gen-pkg-target,$(pt)))) $(foreach pt,$(PKG_PROFILES),$(eval $(call gen-pkg-target,$(pt))))
.PHONY: run .PHONY: run
run: $(PROFILE) quickrun run: compile-$(PROFILE) quickrun
.PHONY: quickrun .PHONY: quickrun
quickrun: quickrun:
./_build/$(PROFILE)/rel/emqx/bin/emqx console ./dev -p $(PROFILE)
## Take the currently set PROFILE ## Take the currently set PROFILE
docker: docker:
@ -239,7 +264,6 @@ $(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt))))
.PHONY: .PHONY:
merge-config: merge-config:
@$(SCRIPTS)/merge-config.escript @$(SCRIPTS)/merge-config.escript
@$(SCRIPTS)/merge-i18n.escript
## elixir target is to create release packages using Elixir's Mix ## elixir target is to create release packages using Elixir's Mix
.PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir) .PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir)

View File

@ -1,4 +1,4 @@
%% This additional config file is used when the config 'cluster.proto_dis' in emqx.conf is set to 'inet_tls'. %% This additional config file is used when the config 'cluster.proto_dist' in emqx.conf is set to 'inet_tls'.
%% Which means the EMQX nodes will connect to each other over TLS. %% Which means the EMQX nodes will connect to each other over TLS.
%% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html %% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html

View File

@ -32,10 +32,10 @@
%% `apps/emqx/src/bpapi/README.md' %% `apps/emqx/src/bpapi/README.md'
%% Community edition %% Community edition
-define(EMQX_RELEASE_CE, "5.0.22"). -define(EMQX_RELEASE_CE, "5.0.25-rc.1").
%% Enterprise edition %% Enterprise edition
-define(EMQX_RELEASE_EE, "5.0.3"). -define(EMQX_RELEASE_EE, "5.0.4-alpha.1").
%% the HTTP API version %% the HTTP API version
-define(EMQX_API_VERSION, "5.0"). -define(EMQX_API_VERSION, "5.0").

View File

@ -57,16 +57,16 @@
-define(ERROR_CODES, [ -define(ERROR_CODES, [
{?BAD_USERNAME_OR_PWD, <<"Bad username or password">>}, {?BAD_USERNAME_OR_PWD, <<"Bad username or password">>},
{?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>}, {?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>},
{'BAD_REQUEST', <<"Request parameters are not legal">>}, {'BAD_REQUEST', <<"Request parameters are invalid">>},
{'NOT_MATCH', <<"Conditions are not matched">>}, {'NOT_MATCH', <<"Conditions are not matched">>},
{'ALREADY_EXISTS', <<"Resource already existed">>}, {'ALREADY_EXISTS', <<"Resource already existed">>},
{'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>}, {'BAD_CONFIG_SCHEMA', <<"Configuration data is invalid">>},
{'BAD_LISTENER_ID', <<"Bad listener ID">>}, {'BAD_LISTENER_ID', <<"Bad listener ID">>},
{'BAD_NODE_NAME', <<"Bad Node Name">>}, {'BAD_NODE_NAME', <<"Bad Node Name">>},
{'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>}, {'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>},
{'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>}, {'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>},
{'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>}, {'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>},
{'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>}, {'INVALID_PARAMETER', <<"Request parameters is invalid and exceeds the boundary value">>},
{'CONFLICT', <<"Conflicting request resources">>}, {'CONFLICT', <<"Conflicting request resources">>},
{'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>}, {'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>},
{'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>}, {'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>},

View File

@ -27,9 +27,9 @@
{gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}, {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.1"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.38.2"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.4"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},

View File

@ -3,7 +3,7 @@
{id, "emqx"}, {id, "emqx"},
{description, "EMQX Core"}, {description, "EMQX Core"},
% strict semver, bump manually! % strict semver, bump manually!
{vsn, "5.0.23"}, {vsn, "5.0.25"},
{modules, []}, {modules, []},
{registered, []}, {registered, []},
{applications, [ {applications, [

View File

@ -30,6 +30,12 @@
stop/0 stop/0
]). ]).
%% Cluster API
-export([
cluster_nodes/1,
running_nodes/0
]).
%% PubSub API %% PubSub API
-export([ -export([
subscribe/1, subscribe/1,
@ -102,6 +108,18 @@ is_running() ->
_ -> true _ -> true
end. end.
%%--------------------------------------------------------------------
%% Cluster API
%%--------------------------------------------------------------------
-spec running_nodes() -> [node()].
running_nodes() ->
mria:running_nodes().
-spec cluster_nodes(all | running | cores | stopped) -> [node()].
cluster_nodes(Type) ->
mria:cluster_nodes(Type).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% PubSub API %% PubSub API
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -42,7 +42,9 @@
get_alarms/0, get_alarms/0,
get_alarms/1, get_alarms/1,
format/1, format/1,
format/2 format/2,
safe_activate/3,
safe_deactivate/1
]). ]).
%% gen_server callbacks %% gen_server callbacks
@ -57,7 +59,6 @@
%% Internal exports (RPC) %% Internal exports (RPC)
-export([ -export([
create_activate_alarm/3,
do_get_alarms/0 do_get_alarms/0
]). ]).
@ -123,6 +124,9 @@ activate(Name, Details) ->
activate(Name, Details, Message) -> activate(Name, Details, Message) ->
gen_server:call(?MODULE, {activate_alarm, Name, Details, Message}). gen_server:call(?MODULE, {activate_alarm, Name, Details, Message}).
safe_activate(Name, Details, Message) ->
safe_call({activate_alarm, Name, Details, Message}).
-spec ensure_deactivated(binary() | atom()) -> ok. -spec ensure_deactivated(binary() | atom()) -> ok.
ensure_deactivated(Name) -> ensure_deactivated(Name) ->
ensure_deactivated(Name, no_details). ensure_deactivated(Name, no_details).
@ -155,6 +159,9 @@ deactivate(Name, Details) ->
deactivate(Name, Details, Message) -> deactivate(Name, Details, Message) ->
gen_server:call(?MODULE, {deactivate_alarm, Name, Details, Message}). gen_server:call(?MODULE, {deactivate_alarm, Name, Details, Message}).
safe_deactivate(Name) ->
safe_call({deactivate_alarm, Name, no_details, <<"">>}).
-spec delete_all_deactivated_alarms() -> ok. -spec delete_all_deactivated_alarms() -> ok.
delete_all_deactivated_alarms() -> delete_all_deactivated_alarms() ->
gen_server:call(?MODULE, delete_all_deactivated_alarms). gen_server:call(?MODULE, delete_all_deactivated_alarms).
@ -218,17 +225,12 @@ init([]) ->
{ok, #{}, get_validity_period()}. {ok, #{}, get_validity_period()}.
handle_call({activate_alarm, Name, Details, Message}, _From, State) -> handle_call({activate_alarm, Name, Details, Message}, _From, State) ->
Res = mria:transaction( case create_activate_alarm(Name, Details, Message) of
mria:local_content_shard(), {ok, Alarm} ->
fun ?MODULE:create_activate_alarm/3,
[Name, Details, Message]
),
case Res of
{atomic, Alarm} ->
do_actions(activate, Alarm, emqx:get_config([alarm, actions])), do_actions(activate, Alarm, emqx:get_config([alarm, actions])),
{reply, ok, State, get_validity_period()}; {reply, ok, State, get_validity_period()};
{aborted, Reason} -> Err ->
{reply, Reason, State, get_validity_period()} {reply, Err, State, get_validity_period()}
end; end;
handle_call({deactivate_alarm, Name, Details, Message}, _From, State) -> handle_call({deactivate_alarm, Name, Details, Message}, _From, State) ->
case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of
@ -283,9 +285,9 @@ get_validity_period() ->
emqx:get_config([alarm, validity_period]). emqx:get_config([alarm, validity_period]).
create_activate_alarm(Name, Details, Message) -> create_activate_alarm(Name, Details, Message) ->
case mnesia:read(?ACTIVATED_ALARM, Name) of case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of
[#activated_alarm{name = Name}] -> [#activated_alarm{name = Name}] ->
mnesia:abort({error, already_existed}); {error, already_existed};
[] -> [] ->
Alarm = #activated_alarm{ Alarm = #activated_alarm{
name = Name, name = Name,
@ -293,8 +295,8 @@ create_activate_alarm(Name, Details, Message) ->
message = normalize_message(Name, iolist_to_binary(Message)), message = normalize_message(Name, iolist_to_binary(Message)),
activate_at = erlang:system_time(microsecond) activate_at = erlang:system_time(microsecond)
}, },
ok = mnesia:write(?ACTIVATED_ALARM, Alarm, write), ok = mria:dirty_write(?ACTIVATED_ALARM, Alarm),
Alarm {ok, Alarm}
end. end.
do_get_alarms() -> do_get_alarms() ->
@ -474,3 +476,19 @@ normalize_message(Name, <<"">>) ->
list_to_binary(io_lib:format("~p", [Name])); list_to_binary(io_lib:format("~p", [Name]));
normalize_message(_Name, Message) -> normalize_message(_Name, Message) ->
Message. Message.
safe_call(Req) ->
try
gen_server:call(?MODULE, Req)
catch
_:{timeout, _} = Reason ->
?SLOG(warning, #{msg => "emqx_alarm_safe_call_timeout", reason => Reason}),
{error, timeout};
_:Reason:St ->
?SLOG(error, #{
msg => "emqx_alarm_safe_call_exception",
reason => Reason,
stacktrace => St
}),
{error, Reason}
end.

View File

@ -89,7 +89,7 @@
%% Authentication Data Cache %% Authentication Data Cache
auth_cache :: maybe(map()), auth_cache :: maybe(map()),
%% Quota checkers %% Quota checkers
quota :: maybe(emqx_limiter_container:limiter()), quota :: emqx_limiter_container:limiter(),
%% Timers %% Timers
timers :: #{atom() => disabled | maybe(reference())}, timers :: #{atom() => disabled | maybe(reference())},
%% Conn State %% Conn State
@ -760,7 +760,7 @@ do_publish(
handle_out(disconnect, RC, Channel) handle_out(disconnect, RC, Channel)
end. end.
ensure_quota(_, Channel = #channel{quota = undefined}) -> ensure_quota(_, Channel = #channel{quota = infinity}) ->
Channel; Channel;
ensure_quota(PubRes, Channel = #channel{quota = Limiter}) -> ensure_quota(PubRes, Channel = #channel{quota = Limiter}) ->
Cnt = lists:foldl( Cnt = lists:foldl(

View File

@ -18,11 +18,11 @@
-compile({no_auto_import, [get/0, get/1, put/2, erase/1]}). -compile({no_auto_import, [get/0, get/1, put/2, erase/1]}).
-elvis([{elvis_style, god_modules, disable}]). -elvis([{elvis_style, god_modules, disable}]).
-include("logger.hrl"). -include("logger.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-export([ -export([
init_load/1, init_load/1,
init_load/2, init_load/2,
init_load/3,
read_override_conf/1, read_override_conf/1,
has_deprecated_file/0, has_deprecated_file/0,
delete_override_conf_files/0, delete_override_conf_files/0,
@ -102,6 +102,8 @@
-define(ZONE_CONF_PATH(ZONE, PATH), [zones, ZONE | PATH]). -define(ZONE_CONF_PATH(ZONE, PATH), [zones, ZONE | PATH]).
-define(LISTENER_CONF_PATH(TYPE, LISTENER, PATH), [listeners, TYPE, LISTENER | PATH]). -define(LISTENER_CONF_PATH(TYPE, LISTENER, PATH), [listeners, TYPE, LISTENER | PATH]).
-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound').
-export_type([ -export_type([
update_request/0, update_request/0,
raw_config/0, raw_config/0,
@ -150,7 +152,7 @@ get_root([RootName | _]) ->
%% @doc For the given path, get raw root value enclosed in a single-key map. %% @doc For the given path, get raw root value enclosed in a single-key map.
%% key is ensured to be binary. %% key is ensured to be binary.
get_root_raw([RootName | _]) -> get_root_raw([RootName | _]) ->
#{bin(RootName) => do_get_raw([RootName], #{})}. #{bin(RootName) => get_raw([RootName], #{})}.
%% @doc Get a config value for the given path. %% @doc Get a config value for the given path.
%% The path should at least include root config name. %% The path should at least include root config name.
@ -163,9 +165,8 @@ get(KeyPath, Default) -> do_get(?CONF, KeyPath, Default).
-spec find(emqx_utils_maps:config_key_path()) -> -spec find(emqx_utils_maps:config_key_path()) ->
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}. {ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
find([]) -> find([]) ->
Ref = make_ref(), case do_get(?CONF, [], ?CONFIG_NOT_FOUND_MAGIC) of
case do_get(?CONF, [], Ref) of ?CONFIG_NOT_FOUND_MAGIC -> {not_found, []};
Ref -> {not_found, []};
Res -> {ok, Res} Res -> {ok, Res}
end; end;
find(KeyPath) -> find(KeyPath) ->
@ -178,9 +179,8 @@ find(KeyPath) ->
-spec find_raw(emqx_utils_maps:config_key_path()) -> -spec find_raw(emqx_utils_maps:config_key_path()) ->
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}. {ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
find_raw([]) -> find_raw([]) ->
Ref = make_ref(), case do_get_raw([], ?CONFIG_NOT_FOUND_MAGIC) of
case do_get_raw([], Ref) of ?CONFIG_NOT_FOUND_MAGIC -> {not_found, []};
Ref -> {not_found, []};
Res -> {ok, Res} Res -> {ok, Res}
end; end;
find_raw(KeyPath) -> find_raw(KeyPath) ->
@ -231,14 +231,14 @@ find_listener_conf(Type, Listener, KeyPath) ->
put(Config) -> put(Config) ->
maps:fold( maps:fold(
fun(RootName, RootValue, _) -> fun(RootName, RootValue, _) ->
?MODULE:put([RootName], RootValue) ?MODULE:put([atom(RootName)], RootValue)
end, end,
ok, ok,
Config Config
). ).
erase(RootName) -> erase(RootName) ->
persistent_term:erase(?PERSIS_KEY(?CONF, bin(RootName))), persistent_term:erase(?PERSIS_KEY(?CONF, atom(RootName))),
persistent_term:erase(?PERSIS_KEY(?RAW_CONF, bin(RootName))), persistent_term:erase(?PERSIS_KEY(?RAW_CONF, bin(RootName))),
ok. ok.
@ -287,9 +287,11 @@ get_default_value([RootName | _] = KeyPath) ->
end. end.
-spec get_raw(emqx_utils_maps:config_key_path()) -> term(). -spec get_raw(emqx_utils_maps:config_key_path()) -> term().
get_raw([Root | T]) when is_atom(Root) -> get_raw([bin(Root) | T]);
get_raw(KeyPath) -> do_get_raw(KeyPath). get_raw(KeyPath) -> do_get_raw(KeyPath).
-spec get_raw(emqx_utils_maps:config_key_path(), term()) -> term(). -spec get_raw(emqx_utils_maps:config_key_path(), term()) -> term().
get_raw([Root | T], Default) when is_atom(Root) -> get_raw([bin(Root) | T], Default);
get_raw(KeyPath, Default) -> do_get_raw(KeyPath, Default). get_raw(KeyPath, Default) -> do_get_raw(KeyPath, Default).
-spec put_raw(map()) -> ok. -spec put_raw(map()) -> ok.
@ -314,44 +316,39 @@ put_raw(KeyPath, Config) ->
%%============================================================================ %%============================================================================
init_load(SchemaMod) -> init_load(SchemaMod) ->
ConfFiles = application:get_env(emqx, config_files, []), ConfFiles = application:get_env(emqx, config_files, []),
init_load(SchemaMod, ConfFiles, #{raw_with_default => true}). init_load(SchemaMod, ConfFiles).
init_load(SchemaMod, Opts) when is_map(Opts) ->
ConfFiles = application:get_env(emqx, config_files, []),
init_load(SchemaMod, ConfFiles, Opts);
init_load(SchemaMod, ConfFiles) ->
init_load(SchemaMod, ConfFiles, #{raw_with_default => false}).
%% @doc Initial load of the given config files. %% @doc Initial load of the given config files.
%% NOTE: The order of the files is significant, configs from files ordered %% NOTE: The order of the files is significant, configs from files ordered
%% in the rear of the list overrides prior values. %% in the rear of the list overrides prior values.
-spec init_load(module(), [string()] | binary() | hocon:config()) -> ok. -spec init_load(module(), [string()] | binary() | hocon:config()) -> ok.
init_load(SchemaMod, Conf, Opts) when is_list(Conf) orelse is_binary(Conf) -> init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) ->
HasDeprecatedFile = has_deprecated_file(),
RawConf = load_config_files(HasDeprecatedFile, Conf),
init_load(HasDeprecatedFile, SchemaMod, RawConf, Opts).
init_load(true, SchemaMod, RawConf, Opts) when is_map(RawConf) ->
ok = save_schema_mod_and_names(SchemaMod), ok = save_schema_mod_and_names(SchemaMod),
%% deprecated conf will be removed in 5.1 HasDeprecatedFile = has_deprecated_file(),
%% Merge environment variable overrides on top RawConf0 = load_config_files(HasDeprecatedFile, Conf),
warning_deprecated_root_key(RawConf0),
RawConf1 =
case HasDeprecatedFile of
true ->
overlay_v0(SchemaMod, RawConf0);
false ->
overlay_v1(SchemaMod, RawConf0)
end,
RawConf = fill_defaults_for_all_roots(SchemaMod, RawConf1),
%% check configs against the schema
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf, #{}),
save_to_app_env(AppEnvs),
ok = save_to_config_map(CheckedConf, RawConf).
%% Merge environment variable overrides on top, then merge with overrides.
overlay_v0(SchemaMod, RawConf) when is_map(RawConf) ->
RawConfWithEnvs = merge_envs(SchemaMod, RawConf), RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
Overrides = read_override_confs(), Overrides = read_override_confs(),
RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides), hocon:deep_merge(RawConfWithEnvs, Overrides).
RawConfAll = maybe_fill_defaults(SchemaMod, RawConfWithOverrides, Opts),
%% check configs against the schema %% Merge environment variable overrides on top.
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}), overlay_v1(SchemaMod, RawConf) when is_map(RawConf) ->
save_to_app_env(AppEnvs), merge_envs(SchemaMod, RawConf).
ok = save_to_config_map(CheckedConf, RawConfAll);
init_load(false, SchemaMod, RawConf, Opts) when is_map(RawConf) ->
ok = save_schema_mod_and_names(SchemaMod),
%% Merge environment variable overrides on top
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
RawConfAll = maybe_fill_defaults(SchemaMod, RawConfWithEnvs, Opts),
%% check configs against the schema
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}),
save_to_app_env(AppEnvs),
ok = save_to_config_map(CheckedConf, RawConfAll).
%% @doc Read merged cluster + local overrides. %% @doc Read merged cluster + local overrides.
read_override_confs() -> read_override_confs() ->
@ -360,8 +357,7 @@ read_override_confs() ->
hocon:deep_merge(ClusterOverrides, LocalOverrides). hocon:deep_merge(ClusterOverrides, LocalOverrides).
%% keep the raw and non-raw conf has the same keys to make update raw conf easier. %% keep the raw and non-raw conf has the same keys to make update raw conf easier.
%% TODO: remove raw_with_default as it's now always true. fill_defaults_for_all_roots(SchemaMod, RawConf0) ->
maybe_fill_defaults(SchemaMod, RawConf0, #{raw_with_default := true}) ->
RootSchemas = hocon_schema:roots(SchemaMod), RootSchemas = hocon_schema:roots(SchemaMod),
%% the roots which are missing from the loaded configs %% the roots which are missing from the loaded configs
MissingRoots = lists:filtermap( MissingRoots = lists:filtermap(
@ -380,9 +376,7 @@ maybe_fill_defaults(SchemaMod, RawConf0, #{raw_with_default := true}) ->
RawConf0, RawConf0,
MissingRoots MissingRoots
), ),
fill_defaults(RawConf); fill_defaults(RawConf).
maybe_fill_defaults(_SchemaMod, RawConf, _Opts) ->
RawConf.
%% So far, this can only return true when testing. %% So far, this can only return true when testing.
%% e.g. when testing an app, we need to load its config first %% e.g. when testing an app, we need to load its config first
@ -679,11 +673,9 @@ do_get_raw(Path, Default) ->
do_get(?RAW_CONF, Path, Default). do_get(?RAW_CONF, Path, Default).
do_get(Type, KeyPath) -> do_get(Type, KeyPath) ->
Ref = make_ref(), case do_get(Type, KeyPath, ?CONFIG_NOT_FOUND_MAGIC) of
Res = do_get(Type, KeyPath, Ref), ?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, KeyPath});
case Res =:= Ref of Res -> Res
true -> error({config_not_found, KeyPath});
false -> Res
end. end.
do_get(Type, [], Default) -> do_get(Type, [], Default) ->
@ -702,9 +694,9 @@ do_get(Type, [], Default) ->
false -> AllConf false -> AllConf
end; end;
do_get(Type, [RootName], Default) -> do_get(Type, [RootName], Default) ->
persistent_term:get(?PERSIS_KEY(Type, bin(RootName)), Default); persistent_term:get(?PERSIS_KEY(Type, RootName), Default);
do_get(Type, [RootName | KeyPath], Default) -> do_get(Type, [RootName | KeyPath], Default) ->
RootV = persistent_term:get(?PERSIS_KEY(Type, bin(RootName)), #{}), RootV = persistent_term:get(?PERSIS_KEY(Type, RootName), #{}),
do_deep_get(Type, KeyPath, RootV, Default). do_deep_get(Type, KeyPath, RootV, Default).
do_put(Type, Putter, [], DeepValue) -> do_put(Type, Putter, [], DeepValue) ->
@ -718,7 +710,7 @@ do_put(Type, Putter, [], DeepValue) ->
do_put(Type, Putter, [RootName | KeyPath], DeepValue) -> do_put(Type, Putter, [RootName | KeyPath], DeepValue) ->
OldValue = do_get(Type, [RootName], #{}), OldValue = do_get(Type, [RootName], #{}),
NewValue = do_deep_put(Type, Putter, KeyPath, OldValue, DeepValue), NewValue = do_deep_put(Type, Putter, KeyPath, OldValue, DeepValue),
persistent_term:put(?PERSIS_KEY(Type, bin(RootName)), NewValue). persistent_term:put(?PERSIS_KEY(Type, RootName), NewValue).
do_deep_get(?CONF, KeyPath, Map, Default) -> do_deep_get(?CONF, KeyPath, Map, Default) ->
atom_conf_path( atom_conf_path(
@ -760,6 +752,22 @@ bin(Bin) when is_binary(Bin) -> Bin;
bin(Str) when is_list(Str) -> list_to_binary(Str); bin(Str) when is_list(Str) -> list_to_binary(Str);
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
warning_deprecated_root_key(RawConf) ->
case maps:keys(RawConf) -- get_root_names() of
[] ->
ok;
Keys ->
Unknowns = string:join([binary_to_list(K) || K <- Keys], ","),
?tp(unknown_config_keys, #{unknown_config_keys => Unknowns}),
?SLOG(
warning,
#{
msg => "config_key_not_recognized",
unknown_config_keys => Unknowns
}
)
end.
conf_key(?CONF, RootName) -> conf_key(?CONF, RootName) ->
atom(RootName); atom(RootName);
conf_key(?RAW_CONF, RootName) -> conf_key(?RAW_CONF, RootName) ->

View File

@ -111,7 +111,7 @@
listener :: {Type :: atom(), Name :: atom()}, listener :: {Type :: atom(), Name :: atom()},
%% Limiter %% Limiter
limiter :: maybe(limiter()), limiter :: limiter(),
%% limiter buffer for overload use %% limiter buffer for overload use
limiter_buffer :: queue:queue(pending_req()), limiter_buffer :: queue:queue(pending_req()),
@ -182,10 +182,8 @@
-define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]). -define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]).
-define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]). -define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]).
%% use macro to do compile time limiter's type check -define(LIMITER_BYTES_IN, bytes).
-define(LIMITER_BYTES_IN, bytes_in). -define(LIMITER_MESSAGE_IN, messages).
-define(LIMITER_MESSAGE_IN, message_in).
-define(EMPTY_QUEUE, {[], []}).
-dialyzer({no_match, [info/2]}). -dialyzer({no_match, [info/2]}).
-dialyzer( -dialyzer(
@ -976,19 +974,22 @@ handle_cast(Req, State) ->
list(any()), list(any()),
state() state()
) -> _. ) -> _.
check_limiter(
_Needs,
Data,
WhenOk,
Msgs,
#state{limiter = infinity} = State
) ->
WhenOk(Data, Msgs, State);
check_limiter( check_limiter(
Needs, Needs,
Data, Data,
WhenOk, WhenOk,
Msgs, Msgs,
#state{ #state{limiter_timer = undefined, limiter = Limiter} = State
limiter = Limiter, ) ->
limiter_timer = LimiterTimer,
limiter_buffer = Cache
} = State
) when Limiter =/= undefined ->
case LimiterTimer of
undefined ->
case emqx_limiter_container:check_list(Needs, Limiter) of case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} -> {ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2}); WhenOk(Data, Msgs, State#state{limiter = Limiter2});
@ -1016,15 +1017,18 @@ check_limiter(
{drop, Limiter2} -> {drop, Limiter2} ->
{ok, State#state{limiter = Limiter2}} {ok, State#state{limiter = Limiter2}}
end; end;
_ -> check_limiter(
Needs,
Data,
WhenOk,
_Msgs,
#state{limiter_buffer = Cache} = State
) ->
%% if there has a retry timer, %% if there has a retry timer,
%% cache the operation and execute it after the retry is over %% cache the operation and execute it after the retry is over
%% the maximum length of the cache queue is equal to the active_n %% the maximum length of the cache queue is equal to the active_n
New = #pending_req{need = Needs, data = Data, next = WhenOk}, New = #pending_req{need = Needs, data = Data, next = WhenOk},
{ok, State#state{limiter_buffer = queue:in(New, Cache)}} {ok, State#state{limiter_buffer = queue:in(New, Cache)}}.
end;
check_limiter(_, Data, WhenOk, Msgs, State) ->
WhenOk(Data, Msgs, State).
%% try to perform a retry %% try to perform a retry
-spec retry_limiter(state()) -> _. -spec retry_limiter(state()) -> _.

View File

@ -22,7 +22,7 @@
%% API %% API
-export([ -export([
make_token_bucket_limiter/2, make_local_limiter/2,
make_ref_limiter/2, make_ref_limiter/2,
check/2, check/2,
consume/2, consume/2,
@ -32,12 +32,11 @@
make_future/1, make_future/1,
available/1 available/1
]). ]).
-export_type([token_bucket_limiter/0]). -export_type([local_limiter/0]).
%% a token bucket limiter with a limiter server's bucket reference %% a token bucket limiter which may or not contains a reference to another limiter,
%% and can be used in a client alone
%% the number of tokens currently available -type local_limiter() :: #{
-type token_bucket_limiter() :: #{
tokens := non_neg_integer(), tokens := non_neg_integer(),
rate := decimal(), rate := decimal(),
capacity := decimal(), capacity := decimal(),
@ -58,12 +57,12 @@
retry_ctx => retry_ctx =>
undefined undefined
%% the retry context %% the retry context
| retry_context(token_bucket_limiter()), | retry_context(local_limiter()),
%% allow to add other keys %% allow to add other keys
atom => any() atom => any()
}. }.
%% a limiter server's bucket reference %% a limiter instance which only contains a reference to another limiter(bucket)
-type ref_limiter() :: #{ -type ref_limiter() :: #{
max_retry_time := non_neg_integer(), max_retry_time := non_neg_integer(),
failure_strategy := failure_strategy(), failure_strategy := failure_strategy(),
@ -88,7 +87,7 @@
}. }.
-type bucket() :: emqx_limiter_bucket_ref:bucket_ref(). -type bucket() :: emqx_limiter_bucket_ref:bucket_ref().
-type limiter() :: token_bucket_limiter() | ref_limiter() | infinity. -type limiter() :: local_limiter() | ref_limiter() | infinity.
-type millisecond() :: non_neg_integer(). -type millisecond() :: non_neg_integer().
-type pause_type() :: pause | partial. -type pause_type() :: pause | partial.
@ -116,7 +115,7 @@
rate := decimal(), rate := decimal(),
initial := non_neg_integer(), initial := non_neg_integer(),
low_watermark := non_neg_integer(), low_watermark := non_neg_integer(),
capacity := decimal(), burst := decimal(),
divisible := boolean(), divisible := boolean(),
max_retry_time := non_neg_integer(), max_retry_time := non_neg_integer(),
failure_strategy := failure_strategy() failure_strategy := failure_strategy()
@ -134,12 +133,13 @@
%% API %% API
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%%@doc create a limiter %%@doc create a limiter
-spec make_token_bucket_limiter(limiter_bucket_cfg(), bucket()) -> _. -spec make_local_limiter(limiter_bucket_cfg(), bucket()) -> _.
make_token_bucket_limiter(Cfg, Bucket) -> make_local_limiter(Cfg, Bucket) ->
Cfg#{ Cfg#{
tokens => emqx_limiter_server:get_initial_val(Cfg), tokens => emqx_limiter_server:get_initial_val(Cfg),
lasttime => ?NOW, lasttime => ?NOW,
bucket => Bucket bucket => Bucket,
capacity => emqx_limiter_schema:calc_capacity(Cfg)
}. }.
%%@doc create a limiter server's reference %%@doc create a limiter server's reference
@ -311,8 +311,8 @@ on_failure(throw, Limiter) ->
Message = io_lib:format("limiter consume failed, limiter:~p~n", [Limiter]), Message = io_lib:format("limiter consume failed, limiter:~p~n", [Limiter]),
erlang:throw({rate_check_fail, Message}). erlang:throw({rate_check_fail, Message}).
-spec do_check_with_parent_limiter(pos_integer(), token_bucket_limiter()) -> -spec do_check_with_parent_limiter(pos_integer(), local_limiter()) ->
inner_check_result(token_bucket_limiter()). inner_check_result(local_limiter()).
do_check_with_parent_limiter( do_check_with_parent_limiter(
Need, Need,
#{ #{
@ -335,7 +335,7 @@ do_check_with_parent_limiter(
) )
end. end.
-spec do_reset(pos_integer(), token_bucket_limiter()) -> inner_check_result(token_bucket_limiter()). -spec do_reset(pos_integer(), local_limiter()) -> inner_check_result(local_limiter()).
do_reset( do_reset(
Need, Need,
#{ #{

View File

@ -23,6 +23,7 @@
%% API %% API
-export([ -export([
new/3, new/3,
infinity_bucket/0,
check/3, check/3,
try_restore/2, try_restore/2,
available/1 available/1
@ -58,6 +59,10 @@ new(Counter, Index, Rate) ->
rate => Rate rate => Rate
}. }.
-spec infinity_bucket() -> bucket_ref().
infinity_bucket() ->
infinity.
%% @doc check tokens %% @doc check tokens
-spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) -> -spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) ->
HasToken :: HasToken ::

View File

@ -34,7 +34,9 @@
-export_type([container/0, check_result/0]). -export_type([container/0, check_result/0]).
-type container() :: #{ -type container() ::
infinity
| #{
limiter_type() => undefined | limiter(), limiter_type() => undefined | limiter(),
%% the retry context of the limiter %% the retry context of the limiter
retry_key() => retry_key() =>
@ -78,7 +80,20 @@ get_limiter_by_types(Id, Types, BucketCfgs) ->
{ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs), {ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs),
add_new(Type, Limiter, Acc) add_new(Type, Limiter, Acc)
end, end,
lists:foldl(Init, #{retry_ctx => undefined}, Types). Container = lists:foldl(Init, #{retry_ctx => undefined}, Types),
case
lists:all(
fun(Type) ->
maps:get(Type, Container) =:= infinity
end,
Types
)
of
true ->
infinity;
_ ->
Container
end.
-spec add_new(limiter_type(), limiter(), container()) -> container(). -spec add_new(limiter_type(), limiter(), container()) -> container().
add_new(Type, Limiter, Container) -> add_new(Type, Limiter, Container) ->
@ -89,11 +104,15 @@ add_new(Type, Limiter, Container) ->
%% @doc check the specified limiter %% @doc check the specified limiter
-spec check(pos_integer(), limiter_type(), container()) -> check_result(). -spec check(pos_integer(), limiter_type(), container()) -> check_result().
check(_Need, _Type, infinity) ->
{ok, infinity};
check(Need, Type, Container) -> check(Need, Type, Container) ->
check_list([{Need, Type}], Container). check_list([{Need, Type}], Container).
%% @doc check multiple limiters %% @doc check multiple limiters
-spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result(). -spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result().
check_list(_Need, infinity) ->
{ok, infinity};
check_list([{Need, Type} | T], Container) -> check_list([{Need, Type} | T], Container) ->
Limiter = maps:get(Type, Container), Limiter = maps:get(Type, Container),
case emqx_htb_limiter:check(Need, Limiter) of case emqx_htb_limiter:check(Need, Limiter) of
@ -121,11 +140,15 @@ check_list([], Container) ->
%% @doc retry the specified limiter %% @doc retry the specified limiter
-spec retry(limiter_type(), container()) -> check_result(). -spec retry(limiter_type(), container()) -> check_result().
retry(_Type, infinity) ->
{ok, infinity};
retry(Type, Container) -> retry(Type, Container) ->
retry_list([Type], Container). retry_list([Type], Container).
%% @doc retry multiple limiters %% @doc retry multiple limiters
-spec retry_list(list(limiter_type()), container()) -> check_result(). -spec retry_list(list(limiter_type()), container()) -> check_result().
retry_list(_Types, infinity) ->
{ok, infinity};
retry_list([Type | T], Container) -> retry_list([Type | T], Container) ->
Key = ?RETRY_KEY(Type), Key = ?RETRY_KEY(Type),
case Container of case Container of

View File

@ -30,6 +30,12 @@
post_config_update/5 post_config_update/5
]). ]).
-export([
find_root/1,
insert_root/2,
delete_root/1
]).
-export([ -export([
start_server/1, start_server/1,
start_server/2, start_server/2,
@ -62,6 +68,7 @@
-define(UID(Id, Type), {Id, Type}). -define(UID(Id, Type), {Id, Type}).
-define(TAB, emqx_limiter_counters). -define(TAB, emqx_limiter_counters).
-define(ROOT_ID, root).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% API %% API
@ -104,9 +111,25 @@ insert_bucket(Id, Type, Bucket) ->
). ).
-spec delete_bucket(limiter_id(), limiter_type()) -> true. -spec delete_bucket(limiter_id(), limiter_type()) -> true.
delete_bucket(Type, Id) -> delete_bucket(Id, Type) ->
ets:delete(?TAB, ?UID(Id, Type)). ets:delete(?TAB, ?UID(Id, Type)).
-spec find_root(limiter_type()) ->
{ok, bucket_ref()} | undefined.
find_root(Type) ->
find_bucket(?ROOT_ID, Type).
-spec insert_root(
limiter_type(),
bucket_ref()
) -> boolean().
insert_root(Type, Bucket) ->
insert_bucket(?ROOT_ID, Type, Bucket).
-spec delete_root(limiter_type()) -> true.
delete_root(Type) ->
delete_bucket(?ROOT_ID, Type).
post_config_update([limiter], _Config, NewConf, _OldConf, _AppEnvs) -> post_config_update([limiter], _Config, NewConf, _OldConf, _AppEnvs) ->
Types = lists:delete(client, maps:keys(NewConf)), Types = lists:delete(client, maps:keys(NewConf)),
_ = [on_post_config_update(Type, NewConf) || Type <- Types], _ = [on_post_config_update(Type, NewConf) || Type <- Types],

View File

@ -24,6 +24,7 @@
fields/1, fields/1,
to_rate/1, to_rate/1,
to_capacity/1, to_capacity/1,
to_burst/1,
default_period/0, default_period/0,
to_burst_rate/1, to_burst_rate/1,
to_initial/1, to_initial/1,
@ -31,20 +32,22 @@
get_bucket_cfg_path/2, get_bucket_cfg_path/2,
desc/1, desc/1,
types/0, types/0,
infinity_value/0 calc_capacity/1,
extract_with_type/2,
default_client_config/0
]). ]).
-define(KILOBYTE, 1024). -define(KILOBYTE, 1024).
-define(BUCKET_KEYS, [ -define(LISTENER_BUCKET_KEYS, [
{bytes_in, bucket_infinity}, bytes,
{message_in, bucket_infinity}, messages,
{connection, bucket_limit}, connection,
{message_routing, bucket_infinity} message_routing
]). ]).
-type limiter_type() :: -type limiter_type() ::
bytes_in bytes
| message_in | messages
| connection | connection
| message_routing | message_routing
%% internal limiter for unclassified resources %% internal limiter for unclassified resources
@ -54,8 +57,10 @@
-type bucket_name() :: atom(). -type bucket_name() :: atom().
-type rate() :: infinity | float(). -type rate() :: infinity | float().
-type burst_rate() :: 0 | float(). -type burst_rate() :: 0 | float().
%% this is a compatible type for the deprecated field and type `capacity`.
-type burst() :: burst_rate().
%% the capacity of the token bucket %% the capacity of the token bucket
-type capacity() :: non_neg_integer(). %%-type capacity() :: non_neg_integer().
%% initial capacity of the token bucket %% initial capacity of the token bucket
-type initial() :: non_neg_integer(). -type initial() :: non_neg_integer().
-type bucket_path() :: list(atom()). -type bucket_path() :: list(atom()).
@ -72,13 +77,13 @@
-typerefl_from_string({rate/0, ?MODULE, to_rate}). -typerefl_from_string({rate/0, ?MODULE, to_rate}).
-typerefl_from_string({burst_rate/0, ?MODULE, to_burst_rate}). -typerefl_from_string({burst_rate/0, ?MODULE, to_burst_rate}).
-typerefl_from_string({capacity/0, ?MODULE, to_capacity}). -typerefl_from_string({burst/0, ?MODULE, to_burst}).
-typerefl_from_string({initial/0, ?MODULE, to_initial}). -typerefl_from_string({initial/0, ?MODULE, to_initial}).
-reflect_type([ -reflect_type([
rate/0, rate/0,
burst_rate/0, burst_rate/0,
capacity/0, burst/0,
initial/0, initial/0,
failure_strategy/0, failure_strategy/0,
bucket_name/0 bucket_name/0
@ -90,27 +95,34 @@
namespace() -> limiter. namespace() -> limiter.
roots() -> [limiter]. roots() ->
[
{limiter,
hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{
importance => ?IMPORTANCE_HIDDEN
})}
].
fields(limiter) -> fields(limiter) ->
[ [
{Type, {Type,
?HOCON(?R_REF(node_opts), #{ ?HOCON(?R_REF(node_opts), #{
desc => ?DESC(Type), desc => ?DESC(Type),
default => #{} importance => ?IMPORTANCE_HIDDEN,
aliases => alias_of_type(Type)
})} })}
|| Type <- types() || Type <- types()
] ++ ] ++
[ [
%% This is an undocumented feature, and it won't be support anymore
{client, {client,
?HOCON( ?HOCON(
?R_REF(client_fields), ?R_REF(client_fields),
#{ #{
desc => ?DESC(client), desc => ?DESC(client),
default => maps:from_list([ importance => ?IMPORTANCE_HIDDEN,
{erlang:atom_to_binary(Type), #{}} required => {false, recursively},
|| Type <- types() deprecated => {since, "5.0.25"}
])
} }
)} )}
]; ];
@ -124,30 +136,18 @@ fields(node_opts) ->
})} })}
]; ];
fields(client_fields) -> fields(client_fields) ->
[ client_fields(types());
{Type, fields(bucket_opts) ->
?HOCON(?R_REF(client_opts), #{ fields_of_bucket(<<"infinity">>);
desc => ?DESC(Type),
default => #{}
})}
|| Type <- types()
];
fields(bucket_infinity) ->
[
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})},
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"infinity">>})},
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}
];
fields(bucket_limit) ->
[
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"1000/s">>})},
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"1000">>})},
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}
];
fields(client_opts) -> fields(client_opts) ->
[ [
{rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})}, {rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})},
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}, {initial,
?HOCON(initial(), #{
default => <<"0">>,
desc => ?DESC(initial),
importance => ?IMPORTANCE_HIDDEN
})},
%% low_watermark add for emqx_channel and emqx_session %% low_watermark add for emqx_channel and emqx_session
%% both modules consume first and then check %% both modules consume first and then check
%% so we need to use this value to prevent excessive consumption %% so we need to use this value to prevent excessive consumption
@ -157,20 +157,24 @@ fields(client_opts) ->
initial(), initial(),
#{ #{
desc => ?DESC(low_watermark), desc => ?DESC(low_watermark),
default => <<"0">> default => <<"0">>,
importance => ?IMPORTANCE_HIDDEN
} }
)}, )},
{capacity, {burst,
?HOCON(capacity(), #{ ?HOCON(burst(), #{
desc => ?DESC(client_bucket_capacity), desc => ?DESC(burst),
default => <<"infinity">> default => <<"0">>,
importance => ?IMPORTANCE_HIDDEN,
aliases => [capacity]
})}, })},
{divisible, {divisible,
?HOCON( ?HOCON(
boolean(), boolean(),
#{ #{
desc => ?DESC(divisible), desc => ?DESC(divisible),
default => false default => false,
importance => ?IMPORTANCE_HIDDEN
} }
)}, )},
{max_retry_time, {max_retry_time,
@ -178,7 +182,8 @@ fields(client_opts) ->
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
desc => ?DESC(max_retry_time), desc => ?DESC(max_retry_time),
default => <<"10s">> default => <<"10s">>,
importance => ?IMPORTANCE_HIDDEN
} }
)}, )},
{failure_strategy, {failure_strategy,
@ -186,25 +191,24 @@ fields(client_opts) ->
failure_strategy(), failure_strategy(),
#{ #{
desc => ?DESC(failure_strategy), desc => ?DESC(failure_strategy),
default => force default => force,
importance => ?IMPORTANCE_HIDDEN
} }
)} )}
]; ];
fields(listener_fields) -> fields(listener_fields) ->
bucket_fields(?BUCKET_KEYS, listener_client_fields); composite_bucket_fields(?LISTENER_BUCKET_KEYS, listener_client_fields);
fields(listener_client_fields) -> fields(listener_client_fields) ->
client_fields(?BUCKET_KEYS); client_fields(?LISTENER_BUCKET_KEYS);
fields(Type) -> fields(Type) ->
bucket_field(Type). simple_bucket_field(Type).
desc(limiter) -> desc(limiter) ->
"Settings for the rate limiter."; "Settings for the rate limiter.";
desc(node_opts) -> desc(node_opts) ->
"Settings for the limiter of the node level."; "Settings for the limiter of the node level.";
desc(bucket_infinity) -> desc(bucket_opts) ->
"Settings for the bucket."; "Settings for the bucket.";
desc(bucket_limit) ->
desc(bucket_infinity);
desc(client_opts) -> desc(client_opts) ->
"Settings for the client in bucket level."; "Settings for the client in bucket level.";
desc(client_fields) -> desc(client_fields) ->
@ -230,19 +234,37 @@ get_bucket_cfg_path(Type, BucketName) ->
[limiter, Type, bucket, BucketName]. [limiter, Type, bucket, BucketName].
types() -> types() ->
[bytes_in, message_in, connection, message_routing, internal]. [bytes, messages, connection, message_routing, internal].
%%-------------------------------------------------------------------- calc_capacity(#{rate := infinity}) ->
%% Internal functions infinity;
%%-------------------------------------------------------------------- calc_capacity(#{rate := Rate, burst := Burst}) ->
erlang:floor(1000 * Rate / default_period()) + Burst.
%% `infinity` to `infinity_value` rules: extract_with_type(_Type, undefined) ->
%% 1. all infinity capacity will change to infinity_value undefined;
%% 2. if the rate of global and bucket both are `infinity`, extract_with_type(Type, #{client := ClientCfg} = BucketCfg) ->
%% use `infinity_value` as bucket rate. see `emqx_limiter_server:get_counter_rate/2` BucketVal = maps:find(Type, BucketCfg),
infinity_value() -> ClientVal = maps:find(Type, ClientCfg),
%% 1 TB merge_client_bucket(Type, ClientVal, BucketVal);
1099511627776. extract_with_type(Type, BucketCfg) ->
BucketVal = maps:find(Type, BucketCfg),
merge_client_bucket(Type, undefined, BucketVal).
%% Since the client configuration can be absent and be a undefined value,
%% but we must need some basic settings to control the behaviour of the limiter,
%% so here add this helper function to generate a default setting.
%% This is a temporary workaround until we found a better way to simplify.
default_client_config() ->
#{
rate => infinity,
initial => 0,
low_watermark => 0,
burst => 0,
divisible => false,
max_retry_time => timer:seconds(10),
failure_strategy => force
}.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Internal functions %% Internal functions
@ -251,6 +273,17 @@ infinity_value() ->
to_burst_rate(Str) -> to_burst_rate(Str) ->
to_rate(Str, false, true). to_rate(Str, false, true).
%% The default value of `capacity` is `infinity`,
%% but we have changed `capacity` to `burst` which should not be `infinity`
%% and its default value is 0, so we should convert `infinity` to 0
to_burst(Str) ->
case to_rate(Str, true, true) of
{ok, infinity} ->
{ok, 0};
Any ->
Any
end.
%% rate can be: 10 10MB 10MB/s 10MB/2s infinity %% rate can be: 10 10MB 10MB/s 10MB/2s infinity
%% e.g. the bytes_in regex tree is: %% e.g. the bytes_in regex tree is:
%% %%
@ -335,7 +368,7 @@ to_quota(Str, Regex) ->
{match, [Quota, ""]} -> {match, [Quota, ""]} ->
{ok, erlang:list_to_integer(Quota)}; {ok, erlang:list_to_integer(Quota)};
{match, ""} -> {match, ""} ->
{ok, infinity_value()}; {ok, infinity};
_ -> _ ->
{error, Str} {error, Str}
end end
@ -350,26 +383,33 @@ apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE; apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit). apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
bucket_field(Type) when is_atom(Type) -> %% A bucket with only one type
fields(bucket_infinity) ++ simple_bucket_field(Type) when is_atom(Type) ->
fields(bucket_opts) ++
[ [
{client, {client,
?HOCON( ?HOCON(
?R_REF(?MODULE, client_opts), ?R_REF(?MODULE, client_opts),
#{ #{
desc => ?DESC(client), desc => ?DESC(client),
required => false required => {false, recursively},
importance => importance_of_type(Type),
aliases => alias_of_type(Type)
} }
)} )}
]. ].
bucket_fields(Types, ClientRef) ->
%% A bucket with multi types
composite_bucket_fields(Types, ClientRef) ->
[ [
{Type, {Type,
?HOCON(?R_REF(?MODULE, Opts), #{ ?HOCON(?R_REF(?MODULE, bucket_opts), #{
desc => ?DESC(?MODULE, Type), desc => ?DESC(?MODULE, Type),
required => false required => {false, recursively},
importance => importance_of_type(Type),
aliases => alias_of_type(Type)
})} })}
|| {Type, Opts} <- Types || Type <- Types
] ++ ] ++
[ [
{client, {client,
@ -377,17 +417,62 @@ bucket_fields(Types, ClientRef) ->
?R_REF(?MODULE, ClientRef), ?R_REF(?MODULE, ClientRef),
#{ #{
desc => ?DESC(client), desc => ?DESC(client),
required => false required => {false, recursively}
} }
)} )}
]. ].
fields_of_bucket(Default) ->
[
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => Default})},
{burst,
?HOCON(burst(), #{
desc => ?DESC(burst),
default => <<"0">>,
importance => ?IMPORTANCE_HIDDEN,
aliases => [capacity]
})},
{initial,
?HOCON(initial(), #{
default => <<"0">>,
desc => ?DESC(initial),
importance => ?IMPORTANCE_HIDDEN
})}
].
client_fields(Types) -> client_fields(Types) ->
[ [
{Type, {Type,
?HOCON(?R_REF(client_opts), #{ ?HOCON(?R_REF(client_opts), #{
desc => ?DESC(Type), desc => ?DESC(Type),
required => false required => false,
importance => importance_of_type(Type),
aliases => alias_of_type(Type)
})} })}
|| {Type, _} <- Types || Type <- Types
]. ].
importance_of_type(interval) ->
?IMPORTANCE_HIDDEN;
importance_of_type(message_routing) ->
?IMPORTANCE_HIDDEN;
importance_of_type(connection) ->
?IMPORTANCE_HIDDEN;
importance_of_type(_) ->
?DEFAULT_IMPORTANCE.
alias_of_type(messages) ->
[message_in];
alias_of_type(bytes) ->
[bytes_in];
alias_of_type(_) ->
[].
merge_client_bucket(Type, {ok, ClientVal}, {ok, BucketVal}) ->
#{Type => BucketVal, client => #{Type => ClientVal}};
merge_client_bucket(Type, {ok, ClientVal}, _) ->
#{client => #{Type => ClientVal}};
merge_client_bucket(Type, _, {ok, BucketVal}) ->
#{Type => BucketVal};
merge_client_bucket(_, _, _) ->
undefined.

View File

@ -59,7 +59,8 @@
burst := rate(), burst := rate(),
%% token generation interval(second) %% token generation interval(second)
period := pos_integer(), period := pos_integer(),
produced := float() produced := float(),
correction := emqx_limiter_decimal:zero_or_float()
}. }.
-type bucket() :: #{ -type bucket() :: #{
@ -98,6 +99,7 @@
%% minimum coefficient for overloaded limiter %% minimum coefficient for overloaded limiter
-define(OVERLOAD_MIN_ALLOC, 0.3). -define(OVERLOAD_MIN_ALLOC, 0.3).
-define(COUNTER_SIZE, 8). -define(COUNTER_SIZE, 8).
-define(ROOT_COUNTER_IDX, 1).
-export_type([index/0]). -export_type([index/0]).
-import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]). -import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]).
@ -110,40 +112,24 @@
-spec connect( -spec connect(
limiter_id(), limiter_id(),
limiter_type(), limiter_type(),
bucket_name() | #{limiter_type() => bucket_name() | undefined} hocons:config() | undefined
) -> ) ->
{ok, emqx_htb_limiter:limiter()} | {error, _}. {ok, emqx_htb_limiter:limiter()} | {error, _}.
%% If no bucket path is set in config, there will be no limit %% undefined is the default situation, no limiter setting by default
connect(_Id, _Type, undefined) -> connect(Id, Type, undefined) ->
{ok, emqx_htb_limiter:make_infinity_limiter()}; create_limiter(Id, Type, undefined, undefined);
connect(Id, Type, #{rate := _} = Cfg) ->
create_limiter(Id, Type, maps:get(client, Cfg, undefined), Cfg);
connect(Id, Type, Cfg) -> connect(Id, Type, Cfg) ->
case find_limiter_cfg(Type, Cfg) of create_limiter(
{undefined, _} -> Id,
{ok, emqx_htb_limiter:make_infinity_limiter()}; Type,
{ emqx_utils_maps:deep_get([client, Type], Cfg, undefined),
#{ maps:get(Type, Cfg, undefined)
rate := BucketRate, ).
capacity := BucketSize
},
#{rate := CliRate, capacity := CliSize} = ClientCfg
} ->
case emqx_limiter_manager:find_bucket(Id, Type) of
{ok, Bucket} ->
{ok,
if
CliRate < BucketRate orelse CliSize < BucketSize ->
emqx_htb_limiter:make_token_bucket_limiter(ClientCfg, Bucket);
true ->
emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)
end};
undefined ->
?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}),
{error, invalid_bucket}
end
end.
-spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok. -spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok.
add_bucket(_Id, _Type, undefine) -> add_bucket(_Id, _Type, undefined) ->
ok; ok;
add_bucket(Id, Type, Cfg) -> add_bucket(Id, Type, Cfg) ->
?CALL(Type, {add_bucket, Id, Cfg}). ?CALL(Type, {add_bucket, Id, Cfg}).
@ -281,7 +267,8 @@ handle_info(Info, State) ->
Reason :: normal | shutdown | {shutdown, term()} | term(), Reason :: normal | shutdown | {shutdown, term()} | term(),
State :: term() State :: term()
) -> any(). ) -> any().
terminate(_Reason, _State) -> terminate(_Reason, #{type := Type}) ->
emqx_limiter_manager:delete_root(Type),
ok. ok.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -336,10 +323,14 @@ oscillation(
oscillate(Interval), oscillate(Interval),
Ordereds = get_ordered_buckets(Buckets), Ordereds = get_ordered_buckets(Buckets),
{Alloced, Buckets2} = transverse(Ordereds, Flow, 0.0, Buckets), {Alloced, Buckets2} = transverse(Ordereds, Flow, 0.0, Buckets),
maybe_burst(State#{ State2 = maybe_adjust_root_tokens(
State#{
buckets := Buckets2, buckets := Buckets2,
root := Root#{produced := Produced + Alloced} root := Root#{produced := Produced + Alloced}
}). },
Alloced
),
maybe_burst(State2).
%% @doc horizontal spread %% @doc horizontal spread
-spec transverse( -spec transverse(
@ -412,6 +403,24 @@ get_ordered_buckets(Buckets) ->
Buckets Buckets
). ).
-spec maybe_adjust_root_tokens(state(), float()) -> state().
maybe_adjust_root_tokens(#{root := #{rate := infinity}} = State, _Alloced) ->
State;
maybe_adjust_root_tokens(#{root := #{rate := Rate}} = State, Alloced) when Alloced >= Rate ->
State;
maybe_adjust_root_tokens(#{root := #{rate := Rate} = Root, counter := Counter} = State, Alloced) ->
InFlow = Rate - Alloced,
Token = counters:get(Counter, ?ROOT_COUNTER_IDX),
case Token >= Rate of
true ->
State;
_ ->
Available = erlang:min(Rate - Token, InFlow),
{Inc, Root2} = emqx_limiter_correction:add(Available, Root),
counters:add(Counter, ?ROOT_COUNTER_IDX, Inc),
State#{root := Root2}
end.
-spec maybe_burst(state()) -> state(). -spec maybe_burst(state()) -> state().
maybe_burst( maybe_burst(
#{ #{
@ -475,12 +484,16 @@ init_tree(Type) when is_atom(Type) ->
Cfg = emqx:get_config([limiter, Type]), Cfg = emqx:get_config([limiter, Type]),
init_tree(Type, Cfg). init_tree(Type, Cfg).
init_tree(Type, Cfg) -> init_tree(Type, #{rate := Rate} = Cfg) ->
Counter = counters:new(?COUNTER_SIZE, [write_concurrency]),
RootBucket = emqx_limiter_bucket_ref:new(Counter, ?ROOT_COUNTER_IDX, Rate),
emqx_limiter_manager:insert_root(Type, RootBucket),
#{ #{
type => Type, type => Type,
root => make_root(Cfg), root => make_root(Cfg),
counter => counters:new(?COUNTER_SIZE, [write_concurrency]), counter => Counter,
index => 0, %% The first slot is reserved for the root
index => ?ROOT_COUNTER_IDX,
buckets => #{} buckets => #{}
}. }.
@ -490,15 +503,18 @@ make_root(#{rate := Rate, burst := Burst}) ->
rate => Rate, rate => Rate,
burst => Burst, burst => Burst,
period => emqx_limiter_schema:default_period(), period => emqx_limiter_schema:default_period(),
produced => 0.0 produced => 0.0,
correction => 0
}. }.
do_add_bucket(Id, #{rate := Rate, capacity := Capacity} = Cfg, #{buckets := Buckets} = State) -> do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) ->
State;
do_add_bucket(Id, #{rate := Rate} = Cfg, #{buckets := Buckets} = State) ->
case maps:get(Id, Buckets, undefined) of case maps:get(Id, Buckets, undefined) of
undefined -> undefined ->
make_bucket(Id, Cfg, State); make_bucket(Id, Cfg, State);
Bucket -> Bucket ->
Bucket2 = Bucket#{rate := Rate, capacity := Capacity}, Bucket2 = Bucket#{rate := Rate, capacity := emqx_limiter_schema:calc_capacity(Cfg)},
State#{buckets := Buckets#{Id := Bucket2}} State#{buckets := Buckets#{Id := Bucket2}}
end. end.
@ -509,7 +525,7 @@ make_bucket(Id, Cfg, #{index := ?COUNTER_SIZE} = State) ->
}); });
make_bucket( make_bucket(
Id, Id,
#{rate := Rate, capacity := Capacity} = Cfg, #{rate := Rate} = Cfg,
#{type := Type, counter := Counter, index := Index, buckets := Buckets} = State #{type := Type, counter := Counter, index := Index, buckets := Buckets} = State
) -> ) ->
NewIndex = Index + 1, NewIndex = Index + 1,
@ -519,7 +535,7 @@ make_bucket(
rate => Rate, rate => Rate,
obtained => Initial, obtained => Initial,
correction => 0, correction => 0,
capacity => Capacity, capacity => emqx_limiter_schema:calc_capacity(Cfg),
counter => Counter, counter => Counter,
index => NewIndex index => NewIndex
}, },
@ -541,19 +557,14 @@ do_del_bucket(Id, #{type := Type, buckets := Buckets} = State) ->
get_initial_val( get_initial_val(
#{ #{
initial := Initial, initial := Initial,
rate := Rate, rate := Rate
capacity := Capacity
} }
) -> ) ->
%% initial will nevner be infinity(see the emqx_limiter_schema)
InfVal = emqx_limiter_schema:infinity_value(),
if if
Initial > 0 -> Initial > 0 ->
Initial; Initial;
Rate =/= infinity -> Rate =/= infinity ->
erlang:min(Rate, Capacity); Rate;
Capacity =/= infinity andalso Capacity =/= InfVal ->
Capacity;
true -> true ->
0 0
end. end.
@ -567,21 +578,61 @@ call(Type, Msg) ->
gen_server:call(Pid, Msg) gen_server:call(Pid, Msg)
end. end.
find_limiter_cfg(Type, #{rate := _} = Cfg) -> create_limiter(Id, Type, #{rate := Rate} = ClientCfg, BucketCfg) when Rate =/= infinity ->
{Cfg, find_client_cfg(Type, maps:get(client, Cfg, undefined))}; create_limiter_with_client(Id, Type, ClientCfg, BucketCfg);
find_limiter_cfg(Type, Cfg) -> create_limiter(Id, Type, _, BucketCfg) ->
{ create_limiter_without_client(Id, Type, BucketCfg).
maps:get(Type, Cfg, undefined),
find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined))
}.
find_client_cfg(Type, BucketCfg) -> %% create a limiter with the client-level configuration
NodeCfg = emqx:get_config([limiter, client, Type], undefined), create_limiter_with_client(Id, Type, ClientCfg, BucketCfg) ->
merge_client_cfg(NodeCfg, BucketCfg). case find_referenced_bucket(Id, Type, BucketCfg) of
false ->
{ok, emqx_htb_limiter:make_local_limiter(ClientCfg, infinity)};
{ok, Bucket, RefCfg} ->
create_limiter_with_ref(Bucket, ClientCfg, RefCfg);
Error ->
Error
end.
merge_client_cfg(undefined, BucketCfg) -> %% create a limiter only with the referenced configuration
BucketCfg; create_limiter_without_client(Id, Type, BucketCfg) ->
merge_client_cfg(NodeCfg, undefined) -> case find_referenced_bucket(Id, Type, BucketCfg) of
NodeCfg; false ->
merge_client_cfg(NodeCfg, BucketCfg) -> {ok, emqx_htb_limiter:make_infinity_limiter()};
maps:merge(NodeCfg, BucketCfg). {ok, Bucket, RefCfg} ->
ClientCfg = emqx_limiter_schema:default_client_config(),
create_limiter_with_ref(Bucket, ClientCfg, RefCfg);
Error ->
Error
end.
create_limiter_with_ref(
Bucket,
#{rate := CliRate} = ClientCfg,
#{rate := RefRate}
) when CliRate < RefRate ->
{ok, emqx_htb_limiter:make_local_limiter(ClientCfg, Bucket)};
create_limiter_with_ref(Bucket, ClientCfg, _) ->
{ok, emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)}.
%% this is a listener(server)-level reference
find_referenced_bucket(Id, Type, #{rate := Rate} = Cfg) when Rate =/= infinity ->
case emqx_limiter_manager:find_bucket(Id, Type) of
{ok, Bucket} ->
{ok, Bucket, Cfg};
_ ->
?SLOG(error, #{msg => "bucket not found", type => Type, id => Id}),
{error, invalid_bucket}
end;
%% this is a node-level reference
find_referenced_bucket(Id, Type, _) ->
case emqx:get_config([limiter, Type], undefined) of
#{rate := infinity} ->
false;
undefined ->
?SLOG(error, #{msg => "invalid limiter type", type => Type, id => Id}),
{error, invalid_bucket};
NodeCfg ->
{ok, Bucket} = emqx_limiter_manager:find_root(Type),
{ok, Bucket, NodeCfg}
end.

View File

@ -35,7 +35,8 @@
current_conns/2, current_conns/2,
max_conns/2, max_conns/2,
id_example/0, id_example/0,
default_max_conn/0 default_max_conn/0,
shutdown_count/2
]). ]).
-export([ -export([
@ -195,6 +196,17 @@ max_conns(Type, Name, _ListenOn) when Type =:= ws; Type =:= wss ->
max_conns(_, _, _) -> max_conns(_, _, _) ->
{error, not_support}. {error, not_support}.
shutdown_count(ID, ListenOn) ->
{ok, #{type := Type, name := Name}} = parse_listener_id(ID),
shutdown_count(Type, Name, ListenOn).
shutdown_count(Type, Name, ListenOn) when Type == tcp; Type == ssl ->
esockd:get_shutdown_count({listener_id(Type, Name), ListenOn});
shutdown_count(Type, _Name, _ListenOn) when Type =:= ws; Type =:= wss ->
[];
shutdown_count(_, _, _) ->
{error, not_support}.
%% @doc Start all listeners. %% @doc Start all listeners.
-spec start() -> ok. -spec start() -> ok.
start() -> start() ->
@ -494,7 +506,7 @@ esockd_opts(ListenerId, Type, Opts0) ->
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0), Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
Limiter = limiter(Opts0), Limiter = limiter(Opts0),
Opts2 = Opts2 =
case maps:get(connection, Limiter, undefined) of case emqx_limiter_schema:extract_with_type(connection, Limiter) of
undefined -> undefined ->
Opts1; Opts1;
BucketCfg -> BucketCfg ->
@ -639,7 +651,7 @@ zone(Opts) ->
maps:get(zone, Opts, undefined). maps:get(zone, Opts, undefined).
limiter(Opts) -> limiter(Opts) ->
maps:get(limiter, Opts, #{}). maps:get(limiter, Opts, undefined).
add_limiter_bucket(Id, #{limiter := Limiter}) -> add_limiter_bucket(Id, #{limiter := Limiter}) ->
maps:fold( maps:fold(

View File

@ -237,7 +237,7 @@ set_log_handler_level(HandlerId, Level) ->
end. end.
%% @doc Set both the primary and all handlers level in one command %% @doc Set both the primary and all handlers level in one command
-spec set_log_level(logger:handler_id()) -> ok | {error, term()}. -spec set_log_level(logger:level()) -> ok | {error, term()}.
set_log_level(Level) -> set_log_level(Level) ->
case set_primary_log_level(Level) of case set_primary_log_level(Level) of
ok -> set_all_log_handlers_level(Level); ok -> set_all_log_handlers_level(Level);

View File

@ -37,7 +37,6 @@
max_qos_allowed => emqx_types:qos(), max_qos_allowed => emqx_types:qos(),
retain_available => boolean(), retain_available => boolean(),
wildcard_subscription => boolean(), wildcard_subscription => boolean(),
subscription_identifiers => boolean(),
shared_subscription => boolean(), shared_subscription => boolean(),
exclusive_subscription => boolean() exclusive_subscription => boolean()
}. }.
@ -58,18 +57,17 @@
exclusive_subscription exclusive_subscription
]). ]).
-define(DEFAULT_CAPS, #{ -define(DEFAULT_CAPS_KEYS, [
max_packet_size => ?MAX_PACKET_SIZE, max_packet_size,
max_clientid_len => ?MAX_CLIENTID_LEN, max_clientid_len,
max_topic_alias => ?MAX_TOPIC_AlIAS, max_topic_alias,
max_topic_levels => ?MAX_TOPIC_LEVELS, max_topic_levels,
max_qos_allowed => ?QOS_2, max_qos_allowed,
retain_available => true, retain_available,
wildcard_subscription => true, wildcard_subscription,
subscription_identifiers => true, shared_subscription,
shared_subscription => true, exclusive_subscription
exclusive_subscription => false ]).
}).
-spec check_pub( -spec check_pub(
emqx_types:zone(), emqx_types:zone(),
@ -88,7 +86,7 @@ check_pub(Zone, Flags) when is_map(Flags) ->
error -> error ->
Flags Flags
end, end,
maps:with(?PUBCAP_KEYS, get_caps(Zone)) get_caps(?PUBCAP_KEYS, Zone)
). ).
do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when
@ -111,7 +109,7 @@ do_check_pub(_Flags, _Caps) ->
) -> ) ->
ok_or_error(emqx_types:reason_code()). ok_or_error(emqx_types:reason_code()).
check_sub(ClientInfo = #{zone := Zone}, Topic, SubOpts) -> check_sub(ClientInfo = #{zone := Zone}, Topic, SubOpts) ->
Caps = maps:with(?SUBCAP_KEYS, get_caps(Zone)), Caps = get_caps(?SUBCAP_KEYS, Zone),
Flags = lists:foldl( Flags = lists:foldl(
fun fun
(max_topic_levels, Map) -> (max_topic_levels, Map) ->
@ -152,10 +150,12 @@ do_check_sub(_Flags, _Caps, _, _) ->
ok. ok.
get_caps(Zone) -> get_caps(Zone) ->
lists:foldl( get_caps(?DEFAULT_CAPS_KEYS, Zone).
fun({K, V}, Acc) -> get_caps(Keys, Zone) ->
Acc#{K => emqx_config:get_zone_conf(Zone, [mqtt, K], V)} maps:with(
end, Keys,
#{}, maps:merge(
maps:to_list(?DEFAULT_CAPS) emqx_config:get([mqtt]),
emqx_config:get_zone_conf(Zone, [mqtt])
)
). ).

View File

@ -43,7 +43,12 @@
-type ip_port() :: tuple() | integer(). -type ip_port() :: tuple() | integer().
-type cipher() :: map(). -type cipher() :: map().
-type port_number() :: 1..65536. -type port_number() :: 1..65536.
-type server_parse_option() :: #{default_port => port_number(), no_port => boolean()}. -type server_parse_option() :: #{
default_port => port_number(),
no_port => boolean(),
supported_schemes => [string()],
default_scheme => string()
}.
-type url() :: binary(). -type url() :: binary().
-type json_binary() :: binary(). -type json_binary() :: binary().
@ -62,6 +67,12 @@
-typerefl_from_string({url/0, emqx_schema, to_url}). -typerefl_from_string({url/0, emqx_schema, to_url}).
-typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}). -typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}).
-type parsed_server() :: #{
hostname := string(),
port => port_number(),
scheme => string()
}.
-export([ -export([
validate_heap_size/1, validate_heap_size/1,
user_lookup_fun_tr/2, user_lookup_fun_tr/2,
@ -172,7 +183,7 @@ roots(high) ->
} }
)}, )},
{?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(global)}, {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(global)},
%% NOTE: authorization schema here is only to keep emqx app prue %% NOTE: authorization schema here is only to keep emqx app pure
%% the full schema for EMQX node is injected in emqx_conf_schema. %% the full schema for EMQX node is injected in emqx_conf_schema.
{?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME, {?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME,
sc( sc(
@ -676,12 +687,13 @@ fields("force_shutdown") ->
desc => ?DESC(force_shutdown_enable) desc => ?DESC(force_shutdown_enable)
} }
)}, )},
{"max_message_queue_len", {"max_mailbox_size",
sc( sc(
range(0, inf), range(0, inf),
#{ #{
default => 1000, default => 1000,
desc => ?DESC(force_shutdown_max_message_queue_len) aliases => [max_message_queue_len],
desc => ?DESC(force_shutdown_max_mailbox_size)
} }
)}, )},
{"max_heap_size", {"max_heap_size",
@ -924,15 +936,17 @@ fields("mqtt_quic_listener") ->
string(), string(),
#{ #{
%% TODO: deprecated => {since, "5.1.0"} %% TODO: deprecated => {since, "5.1.0"}
desc => ?DESC(fields_mqtt_quic_listener_certfile) desc => ?DESC(fields_mqtt_quic_listener_certfile),
importance => ?IMPORTANCE_HIDDEN
} }
)}, )},
{"keyfile", {"keyfile",
sc( sc(
string(), string(),
%% TODO: deprecated => {since, "5.1.0"}
#{ #{
desc => ?DESC(fields_mqtt_quic_listener_keyfile) %% TODO: deprecated => {since, "5.1.0"}
desc => ?DESC(fields_mqtt_quic_listener_keyfile),
importance => ?IMPORTANCE_HIDDEN
} }
)}, )},
{"ciphers", ciphers_schema(quic)}, {"ciphers", ciphers_schema(quic)},
@ -1008,7 +1022,10 @@ fields("mqtt_quic_listener") ->
duration_ms(), duration_ms(),
#{ #{
default => 0, default => 0,
desc => ?DESC(fields_mqtt_quic_listener_idle_timeout) desc => ?DESC(fields_mqtt_quic_listener_idle_timeout),
%% TODO: deprecated => {since, "5.1.0"}
%% deprecated, use idle_timeout_ms instead
importance => ?IMPORTANCE_HIDDEN
} }
)}, )},
{"idle_timeout_ms", {"idle_timeout_ms",
@ -1022,7 +1039,10 @@ fields("mqtt_quic_listener") ->
duration_ms(), duration_ms(),
#{ #{
default => <<"10s">>, default => <<"10s">>,
desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout) desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout),
%% TODO: deprecated => {since, "5.1.0"}
%% use handshake_idle_timeout_ms
importance => ?IMPORTANCE_HIDDEN
} }
)}, )},
{"handshake_idle_timeout_ms", {"handshake_idle_timeout_ms",
@ -1036,7 +1056,10 @@ fields("mqtt_quic_listener") ->
duration_ms(), duration_ms(),
#{ #{
default => 0, default => 0,
desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval) desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval),
%% TODO: deprecated => {since, "5.1.0"}
%% use keep_alive_interval_ms instead
importance => ?IMPORTANCE_HIDDEN
} }
)}, )},
{"keep_alive_interval_ms", {"keep_alive_interval_ms",
@ -1504,10 +1527,8 @@ fields("broker") ->
sc( sc(
boolean(), boolean(),
#{ #{
%% TODO: deprecated => {since, "5.1.0"} deprecated => {since, "5.1.0"},
%% in favor of session message re-dispatch at termination importance => ?IMPORTANCE_HIDDEN,
%% we will stop supporting dispatch acks for shared
%% subscriptions.
default => false, default => false,
desc => ?DESC(broker_shared_dispatch_ack_enabled) desc => ?DESC(broker_shared_dispatch_ack_enabled)
} }
@ -2171,7 +2192,7 @@ common_ssl_opts_schema(Defaults) ->
D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end, D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end,
Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end, Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end,
Collection = maps:get(versions, Defaults, tls_all_available), Collection = maps:get(versions, Defaults, tls_all_available),
AvailableVersions = default_tls_vsns(Collection), DefaultVersions = default_tls_vsns(Collection),
[ [
{"cacertfile", {"cacertfile",
sc( sc(
@ -2233,6 +2254,7 @@ common_ssl_opts_schema(Defaults) ->
example => <<"">>, example => <<"">>,
format => <<"password">>, format => <<"password">>,
desc => ?DESC(common_ssl_opts_schema_password), desc => ?DESC(common_ssl_opts_schema_password),
importance => ?IMPORTANCE_LOW,
converter => fun password_converter/2 converter => fun password_converter/2
} }
)}, )},
@ -2240,9 +2262,10 @@ common_ssl_opts_schema(Defaults) ->
sc( sc(
hoconsc:array(typerefl:atom()), hoconsc:array(typerefl:atom()),
#{ #{
default => AvailableVersions, default => DefaultVersions,
desc => ?DESC(common_ssl_opts_schema_versions), desc => ?DESC(common_ssl_opts_schema_versions),
validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end importance => ?IMPORTANCE_HIGH,
validator => fun(Input) -> validate_tls_versions(Collection, Input) end
} }
)}, )},
{"ciphers", ciphers_schema(D("ciphers"))}, {"ciphers", ciphers_schema(D("ciphers"))},
@ -2428,10 +2451,14 @@ client_ssl_opts_schema(Defaults) ->
)} )}
]. ].
default_tls_vsns(dtls_all_available) -> available_tls_vsns(dtls_all_available) -> emqx_tls_lib:available_versions(dtls);
emqx_tls_lib:available_versions(dtls); available_tls_vsns(tls_all_available) -> emqx_tls_lib:available_versions(tls).
default_tls_vsns(tls_all_available) ->
emqx_tls_lib:available_versions(tls). outdated_tls_vsn(dtls_all_available) -> [dtlsv1];
outdated_tls_vsn(tls_all_available) -> ['tlsv1.1', tlsv1].
default_tls_vsns(Key) ->
available_tls_vsns(Key) -- outdated_tls_vsn(Key).
-spec ciphers_schema(quic | dtls_all_available | tls_all_available | undefined) -> -spec ciphers_schema(quic | dtls_all_available | tls_all_available | undefined) ->
hocon_schema:field_schema(). hocon_schema:field_schema().
@ -2740,7 +2767,8 @@ validate_ciphers(Ciphers) ->
Bad -> {error, {bad_ciphers, Bad}} Bad -> {error, {bad_ciphers, Bad}}
end. end.
validate_tls_versions(AvailableVersions, Versions) -> validate_tls_versions(Collection, Versions) ->
AvailableVersions = available_tls_vsns(Collection),
case lists:filter(fun(V) -> not lists:member(V, AvailableVersions) end, Versions) of case lists:filter(fun(V) -> not lists:member(V, AvailableVersions) end, Versions) of
[] -> ok; [] -> ok;
Vs -> {error, {unsupported_tls_versions, Vs}} Vs -> {error, {unsupported_tls_versions, Vs}}
@ -2913,7 +2941,7 @@ servers_validator(Opts, Required) ->
%% `no_port': by default it's `false', when set to `true', %% `no_port': by default it's `false', when set to `true',
%% a `throw' exception is raised if the port is found. %% a `throw' exception is raised if the port is found.
-spec parse_server(undefined | string() | binary(), server_parse_option()) -> -spec parse_server(undefined | string() | binary(), server_parse_option()) ->
{string(), port_number()}. undefined | parsed_server().
parse_server(Str, Opts) -> parse_server(Str, Opts) ->
case parse_servers(Str, Opts) of case parse_servers(Str, Opts) of
undefined -> undefined ->
@ -2927,7 +2955,7 @@ parse_server(Str, Opts) ->
%% @doc Parse comma separated `host[:port][,host[:port]]' endpoints %% @doc Parse comma separated `host[:port][,host[:port]]' endpoints
%% into a list of `{Host, Port}' tuples or just `Host' string. %% into a list of `{Host, Port}' tuples or just `Host' string.
-spec parse_servers(undefined | string() | binary(), server_parse_option()) -> -spec parse_servers(undefined | string() | binary(), server_parse_option()) ->
[{string(), port_number()}]. undefined | [parsed_server()].
parse_servers(undefined, _Opts) -> parse_servers(undefined, _Opts) ->
%% should not parse 'undefined' as string, %% should not parse 'undefined' as string,
%% not to throw exception either, %% not to throw exception either,
@ -2973,6 +3001,9 @@ split_host_port(Str) ->
do_parse_server(Str, Opts) -> do_parse_server(Str, Opts) ->
DefaultPort = maps:get(default_port, Opts, undefined), DefaultPort = maps:get(default_port, Opts, undefined),
NotExpectingPort = maps:get(no_port, Opts, false), NotExpectingPort = maps:get(no_port, Opts, false),
DefaultScheme = maps:get(default_scheme, Opts, undefined),
SupportedSchemes = maps:get(supported_schemes, Opts, []),
NotExpectingScheme = (not is_list(DefaultScheme)) andalso length(SupportedSchemes) =:= 0,
case is_integer(DefaultPort) andalso NotExpectingPort of case is_integer(DefaultPort) andalso NotExpectingPort of
true -> true ->
%% either provide a default port from schema, %% either provide a default port from schema,
@ -2981,22 +3012,129 @@ do_parse_server(Str, Opts) ->
false -> false ->
ok ok
end, end,
case is_list(DefaultScheme) andalso (not lists:member(DefaultScheme, SupportedSchemes)) of
true ->
%% inconsistent schema
error("bad_schema");
false ->
ok
end,
%% do not split with space, there should be no space allowed between host and port %% do not split with space, there should be no space allowed between host and port
case string:tokens(Str, ":") of Tokens = string:tokens(Str, ":"),
[Hostname, Port] -> Context = #{
not_expecting_port => NotExpectingPort,
not_expecting_scheme => NotExpectingScheme,
default_port => DefaultPort,
default_scheme => DefaultScheme,
opts => Opts
},
check_server_parts(Tokens, Context).
check_server_parts([Scheme, "//" ++ Hostname, Port], Context) ->
#{
not_expecting_scheme := NotExpectingScheme,
not_expecting_port := NotExpectingPort,
opts := Opts
} = Context,
NotExpectingPort andalso throw("not_expecting_port_number"), NotExpectingPort andalso throw("not_expecting_port_number"),
{check_hostname(Hostname), parse_port(Port)}; NotExpectingScheme andalso throw("not_expecting_scheme"),
[Hostname] -> #{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname),
port => parse_port(Port)
};
check_server_parts([Scheme, "//" ++ Hostname], Context) ->
#{
not_expecting_scheme := NotExpectingScheme,
not_expecting_port := NotExpectingPort,
default_port := DefaultPort,
opts := Opts
} = Context,
NotExpectingScheme andalso throw("not_expecting_scheme"),
case is_integer(DefaultPort) of case is_integer(DefaultPort) of
true -> true ->
{check_hostname(Hostname), DefaultPort}; #{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname),
port => DefaultPort
};
false when NotExpectingPort -> false when NotExpectingPort ->
check_hostname(Hostname); #{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname)
};
false -> false ->
throw("missing_port_number") throw("missing_port_number")
end; end;
_ -> check_server_parts([Hostname, Port], Context) ->
throw("bad_host_port") #{
not_expecting_port := NotExpectingPort,
default_scheme := DefaultScheme
} = Context,
NotExpectingPort andalso throw("not_expecting_port_number"),
case is_list(DefaultScheme) of
false ->
#{
hostname => check_hostname(Hostname),
port => parse_port(Port)
};
true ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname),
port => parse_port(Port)
}
end;
check_server_parts([Hostname], Context) ->
#{
not_expecting_scheme := NotExpectingScheme,
not_expecting_port := NotExpectingPort,
default_port := DefaultPort,
default_scheme := DefaultScheme
} = Context,
case is_integer(DefaultPort) orelse NotExpectingPort of
true ->
ok;
false ->
throw("missing_port_number")
end,
case is_list(DefaultScheme) orelse NotExpectingScheme of
true ->
ok;
false ->
throw("missing_scheme")
end,
case {is_integer(DefaultPort), is_list(DefaultScheme)} of
{true, true} ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname),
port => DefaultPort
};
{true, false} ->
#{
hostname => check_hostname(Hostname),
port => DefaultPort
};
{false, true} ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname)
};
{false, false} ->
#{hostname => check_hostname(Hostname)}
end;
check_server_parts(_Tokens, _Context) ->
throw("bad_host_port").
check_scheme(Str, Opts) ->
SupportedSchemes = maps:get(supported_schemes, Opts, []),
IsSupported = lists:member(Str, SupportedSchemes),
case IsSupported of
true ->
Str;
false ->
throw("unsupported_scheme")
end. end.
check_hostname(Str) -> check_hostname(Str) ->

View File

@ -165,7 +165,7 @@ strategy(Group) ->
-spec ack_enabled() -> boolean(). -spec ack_enabled() -> boolean().
ack_enabled() -> ack_enabled() ->
emqx:get_config([broker, shared_dispatch_ack_enabled]). emqx:get_config([broker, shared_dispatch_ack_enabled], false).
do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() -> do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() ->
%% Deadlock otherwise %% Deadlock otherwise
@ -181,7 +181,7 @@ do_dispatch(SubPid, _Group, Topic, Msg, retry) ->
do_dispatch(SubPid, Group, Topic, Msg, fresh) -> do_dispatch(SubPid, Group, Topic, Msg, fresh) ->
case ack_enabled() of case ack_enabled() of
true -> true ->
%% FIXME: replace with `emqx_shared_sub_proto:dispatch_with_ack' in 5.2 %% TODO: delete this clase after 5.1.0
do_dispatch_with_ack(SubPid, Group, Topic, Msg); do_dispatch_with_ack(SubPid, Group, Topic, Msg);
false -> false ->
send(SubPid, Topic, {deliver, Topic, Msg}) send(SubPid, Topic, {deliver, Topic, Msg})

View File

@ -27,7 +27,7 @@ format(
#{level := debug, meta := Meta = #{trace_tag := Tag}, msg := Msg}, #{level := debug, meta := Meta = #{trace_tag := Tag}, msg := Msg},
#{payload_encode := PEncode} #{payload_encode := PEncode}
) -> ) ->
Time = calendar:system_time_to_rfc3339(erlang:system_time(second)), Time = calendar:system_time_to_rfc3339(erlang:system_time(microsecond), [{unit, microsecond}]),
ClientId = to_iolist(maps:get(clientid, Meta, "")), ClientId = to_iolist(maps:get(clientid, Meta, "")),
Peername = maps:get(peername, Meta, ""), Peername = maps:get(peername, Meta, ""),
MetaBin = format_meta(Meta, PEncode), MetaBin = format_meta(Meta, PEncode),

View File

@ -238,7 +238,7 @@
-type stats() :: [{atom(), term()}]. -type stats() :: [{atom(), term()}].
-type oom_policy() :: #{ -type oom_policy() :: #{
max_message_queue_len => non_neg_integer(), max_mailbox_size => non_neg_integer(),
max_heap_size => non_neg_integer(), max_heap_size => non_neg_integer(),
enable => boolean() enable => boolean()
}. }.

View File

@ -90,7 +90,7 @@
listener :: {Type :: atom(), Name :: atom()}, listener :: {Type :: atom(), Name :: atom()},
%% Limiter %% Limiter
limiter :: maybe(container()), limiter :: container(),
%% cache operation when overload %% cache operation when overload
limiter_cache :: queue:queue(cache()), limiter_cache :: queue:queue(cache()),
@ -121,8 +121,8 @@
-define(SOCK_STATS, [recv_oct, recv_cnt, send_oct, send_cnt]). -define(SOCK_STATS, [recv_oct, recv_cnt, send_oct, send_cnt]).
-define(ENABLED(X), (X =/= undefined)). -define(ENABLED(X), (X =/= undefined)).
-define(LIMITER_BYTES_IN, bytes_in). -define(LIMITER_BYTES_IN, bytes).
-define(LIMITER_MESSAGE_IN, message_in). -define(LIMITER_MESSAGE_IN, messages).
-dialyzer({no_match, [info/2]}). -dialyzer({no_match, [info/2]}).
-dialyzer({nowarn_function, [websocket_init/1]}). -dialyzer({nowarn_function, [websocket_init/1]}).
@ -579,19 +579,21 @@ handle_timeout(TRef, TMsg, State) ->
list(any()), list(any()),
state() state()
) -> state(). ) -> state().
check_limiter(
_Needs,
Data,
WhenOk,
Msgs,
#state{limiter = infinity} = State
) ->
WhenOk(Data, Msgs, State);
check_limiter( check_limiter(
Needs, Needs,
Data, Data,
WhenOk, WhenOk,
Msgs, Msgs,
#state{ #state{limiter_timer = undefined, limiter = Limiter} = State
limiter = Limiter,
limiter_timer = LimiterTimer,
limiter_cache = Cache
} = State
) -> ) ->
case LimiterTimer of
undefined ->
case emqx_limiter_container:check_list(Needs, Limiter) of case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} -> {ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2}); WhenOk(Data, Msgs, State#state{limiter = Limiter2});
@ -623,10 +625,15 @@ check_limiter(
{drop, Limiter2} -> {drop, Limiter2} ->
{ok, State#state{limiter = Limiter2}} {ok, State#state{limiter = Limiter2}}
end; end;
_ -> check_limiter(
Needs,
Data,
WhenOk,
_Msgs,
#state{limiter_cache = Cache} = State
) ->
New = #cache{need = Needs, data = Data, next = WhenOk}, New = #cache{need = Needs, data = Data, next = WhenOk},
State#state{limiter_cache = queue:in(New, Cache)} State#state{limiter_cache = queue:in(New, Cache)}.
end.
-spec retry_limiter(state()) -> state(). -spec retry_limiter(state()) -> state().
retry_limiter(#state{limiter = Limiter} = State) -> retry_limiter(#state{limiter = Limiter} = State) ->

View File

@ -148,6 +148,14 @@ t_run_hook(_) ->
?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)), ?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)),
?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)). ?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)).
t_cluster_nodes(_) ->
Expected = [node()],
?assertEqual(Expected, emqx:running_nodes()),
?assertEqual(Expected, emqx:cluster_nodes(running)),
?assertEqual(Expected, emqx:cluster_nodes(all)),
?assertEqual(Expected, emqx:cluster_nodes(cores)),
?assertEqual([], emqx:cluster_nodes(stopped)).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Hook fun %% Hook fun
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -186,7 +186,7 @@ t_session_taken(_) ->
false false
end end
end, end,
6000 15_000
), ),
Publish(), Publish(),

View File

@ -31,7 +31,7 @@ force_gc_conf() ->
#{bytes => 16777216, count => 16000, enable => true}. #{bytes => 16777216, count => 16000, enable => true}.
force_shutdown_conf() -> force_shutdown_conf() ->
#{enable => true, max_heap_size => 4194304, max_message_queue_len => 1000}. #{enable => true, max_heap_size => 4194304, max_mailbox_size => 1000}.
rpc_conf() -> rpc_conf() ->
#{ #{
@ -162,8 +162,7 @@ limiter_conf() ->
Make = fun() -> Make = fun() ->
#{ #{
burst => 0, burst => 0,
rate => infinity, rate => infinity
capacity => infinity
} }
end, end,
@ -172,7 +171,7 @@ limiter_conf() ->
Acc#{Name => Make()} Acc#{Name => Make()}
end, end,
#{}, #{},
[bytes_in, message_in, message_routing, connection, internal] [bytes, messages, message_routing, connection, internal]
). ).
stats_conf() -> stats_conf() ->
@ -268,13 +267,14 @@ t_chan_info(_) ->
t_chan_caps(_) -> t_chan_caps(_) ->
?assertMatch( ?assertMatch(
#{ #{
exclusive_subscription := false,
max_packet_size := 1048576,
max_clientid_len := 65535, max_clientid_len := 65535,
max_qos_allowed := 2, max_qos_allowed := 2,
max_topic_alias := 65535, max_topic_alias := 65535,
max_topic_levels := Level, max_topic_levels := Level,
retain_available := true, retain_available := true,
shared_subscription := true, shared_subscription := true,
subscription_identifiers := true,
wildcard_subscription := true wildcard_subscription := true
} when is_integer(Level), } when is_integer(Level),
emqx_channel:caps(channel()) emqx_channel:caps(channel())
@ -1258,7 +1258,7 @@ limiter_cfg() ->
Client = #{ Client = #{
rate => 5, rate => 5,
initial => 0, initial => 0,
capacity => 5, burst => 0,
low_watermark => 1, low_watermark => 1,
divisible => false, divisible => false,
max_retry_time => timer:seconds(5), max_retry_time => timer:seconds(5),
@ -1270,7 +1270,7 @@ limiter_cfg() ->
}. }.
bucket_cfg() -> bucket_cfg() ->
#{rate => 10, initial => 0, capacity => 10}. #{rate => 10, initial => 0, burst => 0}.
add_bucket() -> add_bucket() ->
emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()). emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()).

View File

@ -67,7 +67,8 @@ groups() ->
%% t_keepalive, %% t_keepalive,
%% t_redelivery_on_reconnect, %% t_redelivery_on_reconnect,
%% subscribe_failure_test, %% subscribe_failure_test,
t_dollar_topics t_dollar_topics,
t_sub_non_utf8_topic
]}, ]},
{mqttv5, [non_parallel_tests], [t_basic_with_props_v5]}, {mqttv5, [non_parallel_tests], [t_basic_with_props_v5]},
{others, [non_parallel_tests], [ {others, [non_parallel_tests], [
@ -297,6 +298,36 @@ t_dollar_topics(_) ->
ok = emqtt:disconnect(C), ok = emqtt:disconnect(C),
ct:pal("$ topics test succeeded"). ct:pal("$ topics test succeeded").
t_sub_non_utf8_topic(_) ->
{ok, Socket} = gen_tcp:connect({127, 0, 0, 1}, 1883, [{active, true}, binary]),
ConnPacket = emqx_frame:serialize(#mqtt_packet{
header = #mqtt_packet_header{type = 1},
variable = #mqtt_packet_connect{
clientid = <<"abcdefg">>
}
}),
ok = gen_tcp:send(Socket, ConnPacket),
receive
{tcp, _, _ConnAck = <<32, 2, 0, 0>>} -> ok
after 3000 -> ct:fail({connect_ack_not_recv, process_info(self(), messages)})
end,
SubHeader = <<130, 18, 25, 178>>,
SubTopicLen = <<0, 13>>,
%% this is not a valid utf8 topic
SubTopic = <<128, 10, 10, 12, 178, 159, 162, 47, 115, 1, 1, 1, 1>>,
SubQoS = <<1>>,
SubPacket = <<SubHeader/binary, SubTopicLen/binary, SubTopic/binary, SubQoS/binary>>,
ok = gen_tcp:send(Socket, SubPacket),
receive
{tcp_closed, _} -> ok
after 3000 -> ct:fail({should_get_disconnected, process_info(self(), messages)})
end,
timer:sleep(1000),
ListenerCounts = emqx_listeners:shutdown_count('tcp:default', {{0, 0, 0, 0}, 1883}),
TopicInvalidCount = proplists:get_value(topic_filter_invalid, ListenerCounts),
?assert(is_integer(TopicInvalidCount) andalso TopicInvalidCount > 0),
ok.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test cases for MQTT v5 %% Test cases for MQTT v5
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -55,12 +55,12 @@
is_tcp_server_available/2, is_tcp_server_available/2,
is_tcp_server_available/3, is_tcp_server_available/3,
load_config/2, load_config/2,
load_config/3,
not_wait_mqtt_payload/1, not_wait_mqtt_payload/1,
read_schema_configs/2, read_schema_configs/2,
render_config_file/2, render_config_file/2,
wait_for/4, wait_for/4,
wait_mqtt_payload/1 wait_mqtt_payload/1,
select_free_port/1
]). ]).
-export([ -export([
@ -280,6 +280,7 @@ app_schema(App) ->
mustache_vars(App, Opts) -> mustache_vars(App, Opts) ->
ExtraMustacheVars = maps:get(extra_mustache_vars, Opts, #{}), ExtraMustacheVars = maps:get(extra_mustache_vars, Opts, #{}),
Defaults = #{ Defaults = #{
node_cookie => atom_to_list(erlang:get_cookie()),
platform_data_dir => app_path(App, "data"), platform_data_dir => app_path(App, "data"),
platform_etc_dir => app_path(App, "etc") platform_etc_dir => app_path(App, "etc")
}, },
@ -497,18 +498,14 @@ copy_certs(emqx_conf, Dest0) ->
copy_certs(_, _) -> copy_certs(_, _) ->
ok. ok.
load_config(SchemaModule, Config, Opts) -> load_config(SchemaModule, Config) ->
ConfigBin = ConfigBin =
case is_map(Config) of case is_map(Config) of
true -> emqx_utils_json:encode(Config); true -> emqx_utils_json:encode(Config);
false -> Config false -> Config
end, end,
ok = emqx_config:delete_override_conf_files(), ok = emqx_config:delete_override_conf_files(),
ok = emqx_config:init_load(SchemaModule, ConfigBin, Opts), ok = emqx_config:init_load(SchemaModule, ConfigBin).
ok.
load_config(SchemaModule, Config) ->
load_config(SchemaModule, Config, #{raw_with_default => true}).
-spec is_all_tcp_servers_available(Servers) -> Result when -spec is_all_tcp_servers_available(Servers) -> Result when
Servers :: [{Host, Port}], Servers :: [{Host, Port}],
@ -684,6 +681,7 @@ start_slave(Name, Opts) when is_map(Opts) ->
SlaveMod = maps:get(peer_mod, Opts, ct_slave), SlaveMod = maps:get(peer_mod, Opts, ct_slave),
Node = node_name(Name), Node = node_name(Name),
put_peer_mod(Node, SlaveMod), put_peer_mod(Node, SlaveMod),
Cookie = atom_to_list(erlang:get_cookie()),
DoStart = DoStart =
fun() -> fun() ->
case SlaveMod of case SlaveMod of
@ -695,7 +693,11 @@ start_slave(Name, Opts) when is_map(Opts) ->
{monitor_master, true}, {monitor_master, true},
{init_timeout, 20_000}, {init_timeout, 20_000},
{startup_timeout, 20_000}, {startup_timeout, 20_000},
{erl_flags, erl_flags()} {erl_flags, erl_flags()},
{env, [
{"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"},
{"EMQX_NODE__COOKIE", Cookie}
]}
] ]
); );
slave -> slave ->
@ -782,6 +784,7 @@ setup_node(Node, Opts) when is_map(Opts) ->
load_apps => LoadApps, load_apps => LoadApps,
apps => Apps, apps => Apps,
env => Env, env => Env,
join_to => JoinTo,
start_apps => StartApps start_apps => StartApps
} }
] ]
@ -1259,3 +1262,34 @@ get_or_spawn_janitor() ->
on_exit(Fun) -> on_exit(Fun) ->
Janitor = get_or_spawn_janitor(), Janitor = get_or_spawn_janitor(),
ok = emqx_test_janitor:push_on_exit_callback(Janitor, Fun). ok = emqx_test_janitor:push_on_exit_callback(Janitor, Fun).
%%-------------------------------------------------------------------------------
%% Select a free transport port from the OS
%%-------------------------------------------------------------------------------
%% @doc get unused port from OS
-spec select_free_port(tcp | udp | ssl | quic) -> inets:port_number().
select_free_port(tcp) ->
select_free_port(gen_tcp, listen);
select_free_port(udp) ->
select_free_port(gen_udp, open);
select_free_port(ssl) ->
select_free_port(tcp);
select_free_port(quic) ->
select_free_port(udp).
select_free_port(GenModule, Fun) when
GenModule == gen_tcp orelse
GenModule == gen_udp
->
{ok, S} = GenModule:Fun(0, [{reuseaddr, true}]),
{ok, Port} = inet:port(S),
ok = GenModule:close(S),
case os:type() of
{unix, darwin} ->
%% in MacOS, still get address_in_use after close port
timer:sleep(500);
_ ->
skip
end,
ct:pal("Select free OS port: ~p", [Port]),
Port.

View File

@ -19,6 +19,7 @@
-compile(export_all). -compile(export_all).
-compile(nowarn_export_all). -compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
all() -> emqx_common_test_helpers:all(?MODULE). all() -> emqx_common_test_helpers:all(?MODULE).
@ -50,7 +51,6 @@ t_fill_default_values(_) ->
}, },
<<"route_batch_clean">> := false, <<"route_batch_clean">> := false,
<<"session_locking_strategy">> := quorum, <<"session_locking_strategy">> := quorum,
<<"shared_dispatch_ack_enabled">> := false,
<<"shared_subscription_strategy">> := round_robin <<"shared_subscription_strategy">> := round_robin
} }
}, },
@ -78,3 +78,21 @@ t_init_load(_Config) ->
?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())), ?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())),
?assertMatch({ok, #{raw_config := 128}}, emqx:update_config([mqtt, max_topic_levels], 128)), ?assertMatch({ok, #{raw_config := 128}}, emqx:update_config([mqtt, max_topic_levels], 128)),
ok = file:delete(DeprecatedFile). ok = file:delete(DeprecatedFile).
t_unknown_rook_keys(_) ->
?check_trace(
#{timetrap => 1000},
begin
ok = emqx_config:init_load(
emqx_schema, <<"test_1 {}\n test_2 {sub = 100}\n listeners {}">>
),
?block_until(#{?snk_kind := unknown_config_keys})
end,
fun(Trace) ->
?assertMatch(
[#{unknown_config_keys := "test_1,test_2"}],
?of_kind(unknown_config_keys, Trace)
)
end
),
ok.

View File

@ -177,7 +177,9 @@ t_sub_key_update_remove(_Config) ->
{ok, #{post_config_update => #{emqx_config_handler_SUITE => ok}}}, {ok, #{post_config_update => #{emqx_config_handler_SUITE => ok}}},
emqx:remove_config(KeyPath) emqx:remove_config(KeyPath)
), ),
?assertError({config_not_found, KeyPath}, emqx:get_raw_config(KeyPath)), ?assertError(
{config_not_found, [<<"sysmon">>, os, cpu_check_interval]}, emqx:get_raw_config(KeyPath)
),
OSKey = maps:keys(emqx:get_raw_config([sysmon, os])), OSKey = maps:keys(emqx:get_raw_config([sysmon, os])),
?assertEqual(false, lists:member(<<"cpu_check_interval">>, OSKey)), ?assertEqual(false, lists:member(<<"cpu_check_interval">>, OSKey)),
?assert(length(OSKey) > 0), ?assert(length(OSKey) > 0),

View File

@ -38,8 +38,6 @@ init_per_suite(Config) ->
ok = meck:new(emqx_cm, [passthrough, no_history, no_link]), ok = meck:new(emqx_cm, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end), ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end),
ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end), ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end),
%% Meck Limiter
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
%% Meck Pd %% Meck Pd
ok = meck:new(emqx_pd, [passthrough, no_history, no_link]), ok = meck:new(emqx_pd, [passthrough, no_history, no_link]),
%% Meck Metrics %% Meck Metrics
@ -67,7 +65,6 @@ end_per_suite(_Config) ->
ok = meck:unload(emqx_transport), ok = meck:unload(emqx_transport),
catch meck:unload(emqx_channel), catch meck:unload(emqx_channel),
ok = meck:unload(emqx_cm), ok = meck:unload(emqx_cm),
ok = meck:unload(emqx_htb_limiter),
ok = meck:unload(emqx_pd), ok = meck:unload(emqx_pd),
ok = meck:unload(emqx_metrics), ok = meck:unload(emqx_metrics),
ok = meck:unload(emqx_hooks), ok = meck:unload(emqx_hooks),
@ -421,20 +418,28 @@ t_ensure_rate_limit(_) ->
{ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})), {ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})),
?assertEqual(Limiter, emqx_connection:info(limiter, State1)), ?assertEqual(Limiter, emqx_connection:info(limiter, State1)),
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
ok = meck:expect(
emqx_htb_limiter,
make_infinity_limiter,
fun() -> non_infinity end
),
ok = meck:expect( ok = meck:expect(
emqx_htb_limiter, emqx_htb_limiter,
check, check,
fun(_, Client) -> {pause, 3000, undefined, Client} end fun(_, Client) -> {pause, 3000, undefined, Client} end
), ),
{ok, State2} = emqx_connection:check_limiter( {ok, State2} = emqx_connection:check_limiter(
[{1000, bytes_in}], [{1000, bytes}],
[], [],
WhenOk, WhenOk,
[], [],
st(#{limiter => Limiter}) st(#{limiter => init_limiter()})
), ),
meck:unload(emqx_htb_limiter), meck:unload(emqx_htb_limiter),
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)). ?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)).
t_activate_socket(_) -> t_activate_socket(_) ->
@ -495,6 +500,7 @@ t_get_conn_info(_) ->
end). end).
t_oom_shutdown(init, Config) -> t_oom_shutdown(init, Config) ->
ok = snabbkaffe:stop(),
ok = snabbkaffe:start_trace(), ok = snabbkaffe:start_trace(),
ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]), ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]),
meck:expect( meck:expect(
@ -703,31 +709,32 @@ handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St).
-define(LIMITER_ID, 'tcp:default'). -define(LIMITER_ID, 'tcp:default').
init_limiter() -> init_limiter() ->
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], limiter_cfg()). emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], limiter_cfg()).
limiter_cfg() -> limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Cfg = bucket_cfg(), Cfg = bucket_cfg(),
Client = #{ Client = client_cfg(),
rate => Infinity, #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
bucket_cfg() ->
#{rate => infinity, initial => 0, burst => 0}.
client_cfg() ->
#{
rate => infinity,
initial => 0, initial => 0,
capacity => Infinity, burst => 0,
low_watermark => 1, low_watermark => 1,
divisible => false, divisible => false,
max_retry_time => timer:seconds(5), max_retry_time => timer:seconds(5),
failure_strategy => force failure_strategy => force
}, }.
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
#{rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket() -> add_bucket() ->
Cfg = bucket_cfg(), Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg), emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg). emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg).
del_bucket() -> del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in), emqx_limiter_server:del_bucket(?LIMITER_ID, bytes),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in). emqx_limiter_server:del_bucket(?LIMITER_ID, messages).

View File

@ -35,6 +35,7 @@ all() ->
init_per_suite(Config) -> init_per_suite(Config) ->
application:load(emqx), application:load(emqx),
{ok, _} = application:ensure_all_started(ssl),
emqx_config:save_schema_mod_and_names(emqx_schema), emqx_config:save_schema_mod_and_names(emqx_schema),
emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:boot_modules(all),
Config. Config.
@ -328,7 +329,15 @@ drain_msgs() ->
clear_crl_cache() -> clear_crl_cache() ->
%% reset the CRL cache %% reset the CRL cache
Ref = monitor(process, whereis(ssl_manager)),
exit(whereis(ssl_manager), kill), exit(whereis(ssl_manager), kill),
receive
{'DOWN', Ref, process, _, _} ->
ok
after 1_000 ->
ct:fail("ssl_manager didn't die")
end,
ensure_ssl_manager_alive(),
ok. ok.
force_cacertfile(Cacertfile) -> force_cacertfile(Cacertfile) ->
@ -382,7 +391,6 @@ setup_crl_options(Config, #{is_cached := IsCached} = Opts) ->
false -> false ->
%% ensure cache is empty %% ensure cache is empty
clear_crl_cache(), clear_crl_cache(),
ct:sleep(200),
ok ok
end, end,
drain_msgs(), drain_msgs(),
@ -459,6 +467,13 @@ of_kinds(Trace0, Kinds0) ->
Trace0 Trace0
). ).
ensure_ssl_manager_alive() ->
?retry(
_Sleep0 = 200,
_Attempts0 = 50,
true = is_pid(whereis(ssl_manager))
).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test cases %% Test cases
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -47,13 +47,14 @@ init_per_testcase(Case, Config) when
Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp
-> ->
catch emqx_config_handler:stop(), catch emqx_config_handler:stop(),
Port = emqx_common_test_helpers:select_free_port(tcp),
{ok, _} = emqx_config_handler:start_link(), {ok, _} = emqx_config_handler:start_link(),
PrevListeners = emqx_config:get([listeners], #{}), PrevListeners = emqx_config:get([listeners], #{}),
PureListeners = remove_default_limiter(PrevListeners), PureListeners = remove_default_limiter(PrevListeners),
PureListeners2 = PureListeners#{ PureListeners2 = PureListeners#{
tcp => #{ tcp => #{
listener_test => #{ listener_test => #{
bind => {"127.0.0.1", 9999}, bind => {"127.0.0.1", Port},
max_connections => 4321, max_connections => 4321,
limiter => #{} limiter => #{}
} }
@ -63,19 +64,20 @@ init_per_testcase(Case, Config) when
ok = emqx_listeners:start(), ok = emqx_listeners:start(),
[ [
{prev_listener_conf, PrevListeners} {prev_listener_conf, PrevListeners},
{tcp_port, Port}
| Config | Config
]; ];
init_per_testcase(t_wss_conn, Config) -> init_per_testcase(t_wss_conn, Config) ->
catch emqx_config_handler:stop(), catch emqx_config_handler:stop(),
Port = emqx_common_test_helpers:select_free_port(ssl),
{ok, _} = emqx_config_handler:start_link(), {ok, _} = emqx_config_handler:start_link(),
PrevListeners = emqx_config:get([listeners], #{}), PrevListeners = emqx_config:get([listeners], #{}),
PureListeners = remove_default_limiter(PrevListeners), PureListeners = remove_default_limiter(PrevListeners),
PureListeners2 = PureListeners#{ PureListeners2 = PureListeners#{
wss => #{ wss => #{
listener_test => #{ listener_test => #{
bind => {{127, 0, 0, 1}, 9998}, bind => {{127, 0, 0, 1}, Port},
limiter => #{}, limiter => #{},
ssl_options => #{ ssl_options => #{
cacertfile => ?CERTS_PATH("cacert.pem"), cacertfile => ?CERTS_PATH("cacert.pem"),
@ -89,7 +91,8 @@ init_per_testcase(t_wss_conn, Config) ->
ok = emqx_listeners:start(), ok = emqx_listeners:start(),
[ [
{prev_listener_conf, PrevListeners} {prev_listener_conf, PrevListeners},
{wss_port, Port}
| Config | Config
]; ];
init_per_testcase(_, Config) -> init_per_testcase(_, Config) ->
@ -171,20 +174,30 @@ t_restart_listeners_with_hibernate_after_disabled(_Config) ->
ok = emqx_listeners:stop(), ok = emqx_listeners:stop(),
emqx_config:put([listeners], OldLConf). emqx_config:put([listeners], OldLConf).
t_max_conns_tcp(_) -> t_max_conns_tcp(Config) ->
%% Note: Using a string representation for the bind address like %% Note: Using a string representation for the bind address like
%% "127.0.0.1" does not work %% "127.0.0.1" does not work
?assertEqual(4321, emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). ?assertEqual(
4321,
emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, ?config(tcp_port, Config)})
).
t_current_conns_tcp(_) -> t_current_conns_tcp(Config) ->
?assertEqual(0, emqx_listeners:current_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). ?assertEqual(
0,
emqx_listeners:current_conns('tcp:listener_test', {
{127, 0, 0, 1}, ?config(tcp_port, Config)
})
).
t_wss_conn(_) -> t_wss_conn(Config) ->
{ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000), {ok, Socket} = ssl:connect(
{127, 0, 0, 1}, ?config(wss_port, Config), [{verify, verify_none}], 1000
),
ok = ssl:close(Socket). ok = ssl:close(Socket).
t_quic_conn(Config) -> t_quic_conn(Config) ->
Port = 24568, Port = emqx_common_test_helpers:select_free_port(quic),
DataDir = ?config(data_dir, Config), DataDir = ?config(data_dir, Config),
SSLOpts = #{ SSLOpts = #{
password => ?SERVER_KEY_PASSWORD, password => ?SERVER_KEY_PASSWORD,
@ -207,7 +220,7 @@ t_quic_conn(Config) ->
emqx_listeners:stop_listener(quic, ?FUNCTION_NAME, #{bind => Port}). emqx_listeners:stop_listener(quic, ?FUNCTION_NAME, #{bind => Port}).
t_ssl_password_cert(Config) -> t_ssl_password_cert(Config) ->
Port = 24568, Port = emqx_common_test_helpers:select_free_port(ssl),
DataDir = ?config(data_dir, Config), DataDir = ?config(data_dir, Config),
SSLOptsPWD = #{ SSLOptsPWD = #{
password => ?SERVER_KEY_PASSWORD, password => ?SERVER_KEY_PASSWORD,

View File

@ -22,7 +22,16 @@
-include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl").
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
all() -> emqx_common_test_helpers:all(?MODULE). all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
emqx_common_test_helpers:start_apps([]),
Config.
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([]),
ok.
t_check_pub(_) -> t_check_pub(_) ->
OldConf = emqx:get_config([zones], #{}), OldConf = emqx:get_config([zones], #{}),

View File

@ -2026,18 +2026,7 @@ stop_emqx() ->
%% select a random port picked by OS %% select a random port picked by OS
-spec select_port() -> inet:port_number(). -spec select_port() -> inet:port_number().
select_port() -> select_port() ->
{ok, S} = gen_udp:open(0, [{reuseaddr, true}]), emqx_common_test_helpers:select_free_port(quic).
{ok, {_, Port}} = inet:sockname(S),
gen_udp:close(S),
case os:type() of
{unix, darwin} ->
%% in MacOS, still get address_in_use after close port
timer:sleep(500);
_ ->
skip
end,
ct:pal("select port: ~p", [Port]),
Port.
-spec via_stream({quic, quicer:connection_handle(), quicer:stream_handle()}) -> -spec via_stream({quic, quicer:connection_handle(), quicer:stream_handle()}) ->
quicer:stream_handle(). quicer:stream_handle().

View File

@ -38,6 +38,7 @@
-define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)). -define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)).
-define(RATE(Rate), to_rate(Rate)). -define(RATE(Rate), to_rate(Rate)).
-define(NOW, erlang:system_time(millisecond)). -define(NOW, erlang:system_time(millisecond)).
-define(ROOT_COUNTER_IDX, 1).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Setups %% Setups
@ -72,7 +73,7 @@ t_consume(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
Cfg#{ Cfg#{
rate := 100, rate := 100,
capacity := 100, burst := 0,
initial := 100, initial := 100,
max_retry_time := 1000, max_retry_time := 1000,
failure_strategy := force failure_strategy := force
@ -89,7 +90,7 @@ t_retry(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
Cfg#{ Cfg#{
rate := 50, rate := 50,
capacity := 200, burst := 150,
initial := 0, initial := 0,
max_retry_time := 1000, max_retry_time := 1000,
failure_strategy := force failure_strategy := force
@ -109,7 +110,7 @@ t_restore(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
Cfg#{ Cfg#{
rate := 1, rate := 1,
capacity := 200, burst := 199,
initial := 50, initial := 50,
max_retry_time := 100, max_retry_time := 100,
failure_strategy := force failure_strategy := force
@ -129,7 +130,7 @@ t_max_retry_time(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
Cfg#{ Cfg#{
rate := 1, rate := 1,
capacity := 1, burst := 0,
max_retry_time := 500, max_retry_time := 500,
failure_strategy := drop failure_strategy := drop
} }
@ -139,8 +140,12 @@ t_max_retry_time(_) ->
Begin = ?NOW, Begin = ?NOW,
Result = emqx_htb_limiter:consume(101, Client), Result = emqx_htb_limiter:consume(101, Client),
?assertMatch({drop, _}, Result), ?assertMatch({drop, _}, Result),
Time = ?NOW - Begin, End = ?NOW,
?assert(Time >= 500 andalso Time < 550) Time = End - Begin,
?assert(
Time >= 500 andalso Time < 550,
lists:flatten(io_lib:format("Begin:~p, End:~p, Time:~p~n", [Begin, End, Time]))
)
end, end,
with_per_client(Cfg, Case). with_per_client(Cfg, Case).
@ -150,7 +155,7 @@ t_divisible(_) ->
divisible := true, divisible := true,
rate := ?RATE("1000/1s"), rate := ?RATE("1000/1s"),
initial := 600, initial := 600,
capacity := 600 burst := 0
} }
end, end,
Case = fun(BucketCfg) -> Case = fun(BucketCfg) ->
@ -176,7 +181,7 @@ t_low_watermark(_) ->
low_watermark := 400, low_watermark := 400,
rate := ?RATE("1000/1s"), rate := ?RATE("1000/1s"),
initial := 1000, initial := 1000,
capacity := 1000 burst := 0
} }
end, end,
Case = fun(BucketCfg) -> Case = fun(BucketCfg) ->
@ -201,23 +206,22 @@ t_infinity_client(_) ->
Fun = fun(Cfg) -> Cfg end, Fun = fun(Cfg) -> Cfg end,
Case = fun(Cfg) -> Case = fun(Cfg) ->
Client = connect(Cfg), Client = connect(Cfg),
InfVal = emqx_limiter_schema:infinity_value(), ?assertMatch(infinity, Client),
?assertMatch(#{bucket := #{rate := InfVal}}, Client),
Result = emqx_htb_limiter:check(100000, Client), Result = emqx_htb_limiter:check(100000, Client),
?assertEqual({ok, Client}, Result) ?assertEqual({ok, Client}, Result)
end, end,
with_per_client(Fun, Case). with_per_client(Fun, Case).
t_try_restore_agg(_) -> t_try_restore_with_bucket(_) ->
Fun = fun(#{client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := 1, rate := 100,
capacity := 200, burst := 100,
initial := 50 initial := 50
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := infinity, rate := infinity,
capacity := infinity, burst := 0,
divisible := true, divisible := true,
max_retry_time := 100, max_retry_time := 100,
failure_strategy := force failure_strategy := force
@ -239,11 +243,11 @@ t_short_board(_) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/1s"), rate := ?RATE("100/1s"),
initial := 0, initial := 0,
capacity := 100 burst := 0
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := ?RATE("600/1s"), rate := ?RATE("600/1s"),
capacity := 600, burst := 0,
initial := 600 initial := 600
}, },
Bucket2#{client := Cli2} Bucket2#{client := Cli2}
@ -261,46 +265,45 @@ t_rate(_) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/100ms"), rate := ?RATE("100/100ms"),
initial := 0, initial := 0,
capacity := infinity burst := 0
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := infinity, rate := infinity,
capacity := infinity, burst := 0,
initial := 0 initial := 0
}, },
Bucket2#{client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun(Cfg) -> Case = fun(Cfg) ->
Time = 1000,
Client = connect(Cfg), Client = connect(Cfg),
Ts1 = erlang:system_time(millisecond),
C1 = emqx_htb_limiter:available(Client), C1 = emqx_htb_limiter:available(Client),
timer:sleep(1000), timer:sleep(1100),
Ts2 = erlang:system_time(millisecond),
C2 = emqx_htb_limiter:available(Client), C2 = emqx_htb_limiter:available(Client),
ShouldInc = floor((Ts2 - Ts1) / 100) * 100, ShouldInc = floor(Time / 100) * 100,
Inc = C2 - C1, Inc = C2 - C1,
?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate") ?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate")
end, end,
with_bucket(Fun, Case). with_bucket(Fun, Case).
t_capacity(_) -> t_capacity(_) ->
Capacity = 600, Capacity = 1200,
Fun = fun(#{client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/100ms"), rate := ?RATE("100/100ms"),
initial := 0, initial := 0,
capacity := 600 burst := 200
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := infinity, rate := infinity,
capacity := infinity, burst := 0,
initial := 0 initial := 0
}, },
Bucket2#{client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun(Cfg) -> Case = fun(Cfg) ->
Client = connect(Cfg), Client = connect(Cfg),
timer:sleep(1000), timer:sleep(1500),
C1 = emqx_htb_limiter:available(Client), C1 = emqx_htb_limiter:available(Client),
?assertEqual(Capacity, C1, "test bucket capacity") ?assertEqual(Capacity, C1, "test bucket capacity")
end, end,
@ -318,11 +321,11 @@ t_collaborative_alloc(_) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("400/1s"), rate := ?RATE("400/1s"),
initial := 0, initial := 0,
capacity := 600 burst := 200
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := ?RATE("50"), rate := ?RATE("50"),
capacity := 100, burst := 50,
initial := 100 initial := 100
}, },
Bucket2#{client := Cli2} Bucket2#{client := Cli2}
@ -363,11 +366,11 @@ t_burst(_) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("200/1s"), rate := ?RATE("200/1s"),
initial := 0, initial := 0,
capacity := 200 burst := 0
}, },
Cli2 = Cli#{ Cli2 = Cli#{
rate := ?RATE("50/1s"), rate := ?RATE("50/1s"),
capacity := 200, burst := 150,
divisible := true divisible := true
}, },
Bucket2#{client := Cli2} Bucket2#{client := Cli2}
@ -392,38 +395,6 @@ t_burst(_) ->
Case Case
). ).
t_limit_global_with_unlimit_other(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
end,
Bucket = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := infinity,
initial := 0,
capacity := infinity
},
Cli2 = Cli#{
rate := infinity,
capacity := infinity,
initial := 0
},
Bucket2#{client := Cli2}
end,
Case = fun() ->
C1 = counters:new(1, []),
start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
timer:sleep(2100),
check_average_rate(C1, 2, 600)
end,
with_global(
GlobalMod,
[{b1, Bucket}],
Case
).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test Cases container %% Test Cases container
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -432,7 +403,7 @@ t_check_container(_) ->
Cfg#{ Cfg#{
rate := ?RATE("1000/1s"), rate := ?RATE("1000/1s"),
initial := 1000, initial := 1000,
capacity := 1000 burst := 0
} }
end, end,
Case = fun(#{client := Client} = BucketCfg) -> Case = fun(#{client := Client} = BucketCfg) ->
@ -452,38 +423,6 @@ t_check_container(_) ->
end, end,
with_per_client(Cfg, Case). with_per_client(Cfg, Case).
%%--------------------------------------------------------------------
%% Test Override
%%--------------------------------------------------------------------
t_bucket_no_client(_) ->
Rate = ?RATE("1/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := Rate}}}
end,
BucketMod = fun(Bucket) ->
maps:remove(client, Bucket)
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := Rate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
t_bucket_client(_) ->
GlobalRate = ?RATE("1/s"),
BucketRate = ?RATE("10/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := GlobalRate}}}
end,
BucketMod = fun(#{client := Client} = Bucket) ->
Bucket#{client := Client#{rate := BucketRate}}
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := BucketRate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test Cases misc %% Test Cases misc
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -565,13 +504,241 @@ t_schema_unit(_) ->
?assertMatch({error, _}, M:to_rate("100MB/1")), ?assertMatch({error, _}, M:to_rate("100MB/1")),
?assertMatch({error, _}, M:to_rate("100/10x")), ?assertMatch({error, _}, M:to_rate("100/10x")),
?assertEqual({ok, emqx_limiter_schema:infinity_value()}, M:to_capacity("infinity")), ?assertEqual({ok, infinity}, M:to_capacity("infinity")),
?assertEqual({ok, 100}, M:to_capacity("100")), ?assertEqual({ok, 100}, M:to_capacity("100")),
?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")), ?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")),
?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")), ?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")),
?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")), ?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")),
ok. ok.
t_compatibility_for_capacity(_) ->
CfgStr = <<
""
"\n"
"listeners.tcp.default {\n"
" bind = \"0.0.0.0:1883\"\n"
" max_connections = 1024000\n"
" limiter.messages.capacity = infinity\n"
" limiter.client.messages.capacity = infinity\n"
"}\n"
""
>>,
?assertMatch(
#{
messages := #{burst := 0},
client := #{messages := #{burst := 0}}
},
parse_and_check(CfgStr)
).
t_compatibility_for_message_in(_) ->
CfgStr = <<
""
"\n"
"listeners.tcp.default {\n"
" bind = \"0.0.0.0:1883\"\n"
" max_connections = 1024000\n"
" limiter.message_in.rate = infinity\n"
" limiter.client.message_in.rate = infinity\n"
"}\n"
""
>>,
?assertMatch(
#{
messages := #{rate := infinity},
client := #{messages := #{rate := infinity}}
},
parse_and_check(CfgStr)
).
t_compatibility_for_bytes_in(_) ->
CfgStr = <<
""
"\n"
"listeners.tcp.default {\n"
" bind = \"0.0.0.0:1883\"\n"
" max_connections = 1024000\n"
" limiter.bytes_in.rate = infinity\n"
" limiter.client.bytes_in.rate = infinity\n"
"}\n"
""
>>,
?assertMatch(
#{
bytes := #{rate := infinity},
client := #{bytes := #{rate := infinity}}
},
parse_and_check(CfgStr)
).
t_extract_with_type(_) ->
IsOnly = fun
(_Key, Cfg) when map_size(Cfg) =/= 1 ->
false;
(Key, Cfg) ->
maps:is_key(Key, Cfg)
end,
Checker = fun
(Type, #{client := Client} = Cfg) ->
Cfg2 = maps:remove(client, Cfg),
IsOnly(Type, Client) andalso
(IsOnly(Type, Cfg2) orelse
map_size(Cfg2) =:= 0);
(Type, Cfg) ->
IsOnly(Type, Cfg)
end,
?assertEqual(undefined, emqx_limiter_schema:extract_with_type(messages, undefined)),
?assert(
Checker(
messages,
emqx_limiter_schema:extract_with_type(messages, #{
messages => #{rate => 1}, bytes => #{rate => 1}
})
)
),
?assert(
Checker(
messages,
emqx_limiter_schema:extract_with_type(messages, #{
messages => #{rate => 1},
bytes => #{rate => 1},
client => #{messages => #{rate => 2}}
})
)
),
?assert(
Checker(
messages,
emqx_limiter_schema:extract_with_type(messages, #{
client => #{messages => #{rate => 2}, bytes => #{rate => 1}}
})
)
).
%%--------------------------------------------------------------------
%% Test Cases Create Instance
%%--------------------------------------------------------------------
t_create_instance_with_infinity_node(_) ->
emqx_limiter_manager:insert_bucket(?FUNCTION_NAME, bytes, ?FUNCTION_NAME),
Cases = make_create_test_data_with_infinity_node(?FUNCTION_NAME),
lists:foreach(
fun({Cfg, Expected}) ->
{ok, Result} = emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg),
IsMatched =
case is_atom(Expected) of
true ->
Result =:= Expected;
_ ->
Expected(Result)
end,
?assert(
IsMatched,
lists:flatten(
io_lib:format("Got unexpected:~p~n, Cfg:~p~n", [
Result, Cfg
])
)
)
end,
Cases
),
emqx_limiter_manager:delete_bucket(?FUNCTION_NAME, bytes),
ok.
t_not_exists_instance(_) ->
Cfg = #{bytes => #{rate => 100, burst => 0, initial => 0}},
?assertEqual(
{error, invalid_bucket},
emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg)
),
?assertEqual(
{error, invalid_bucket},
emqx_limiter_server:connect(?FUNCTION_NAME, not_exists, Cfg)
),
ok.
t_create_instance_with_node(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{
message_routing := MR#{rate := ?RATE("200/1s")},
messages := MR#{rate := ?RATE("200/1s")}
}
end,
B1 = fun(Bucket) ->
Bucket#{rate := ?RATE("400/1s")}
end,
B2 = fun(Bucket) ->
Bucket#{rate := infinity}
end,
IsRefLimiter = fun
({ok, #{tokens := _}}, _IsRoot) ->
false;
({ok, #{bucket := #{index := ?ROOT_COUNTER_IDX}}}, true) ->
true;
({ok, #{bucket := #{index := Index}}}, false) when Index =/= ?ROOT_COUNTER_IDX ->
true;
(Result, _IsRoot) ->
ct:pal("The result is:~p~n", [Result]),
false
end,
Case = fun() ->
BucketCfg = make_limiter_cfg(),
?assert(
IsRefLimiter(emqx_limiter_server:connect(b1, message_routing, B1(BucketCfg)), false)
),
?assert(
IsRefLimiter(emqx_limiter_server:connect(b2, message_routing, B2(BucketCfg)), true)
),
?assert(IsRefLimiter(emqx_limiter_server:connect(x, messages, undefined), true)),
?assertNot(IsRefLimiter(emqx_limiter_server:connect(x, bytes, undefined), false))
end,
with_global(
GlobalMod,
[{b1, B1}, {b2, B2}],
Case
),
ok.
%%--------------------------------------------------------------------
%% Test Cases emqx_esockd_htb_limiter
%%--------------------------------------------------------------------
t_create_esockd_htb_limiter(_) ->
Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, undefined),
?assertMatch(
#{module := _, id := ?FUNCTION_NAME, type := bytes, bucket := undefined},
Opts
),
Limiter = emqx_esockd_htb_limiter:create(Opts),
?assertMatch(
#{module := _, name := bytes, limiter := infinity},
Limiter
),
?assertEqual(ok, emqx_esockd_htb_limiter:delete(Limiter)),
ok.
t_esockd_htb_consume(_) ->
ClientCfg = emqx_limiter_schema:default_client_config(),
Cfg = #{client => #{bytes => ClientCfg#{rate := 50, max_retry_time := 0}}},
Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, Cfg),
Limiter = emqx_esockd_htb_limiter:create(Opts),
C1R = emqx_esockd_htb_limiter:consume(51, Limiter),
?assertMatch({pause, _Ms, _Limiter2}, C1R),
timer:sleep(300),
C2R = emqx_esockd_htb_limiter:consume(50, Limiter),
?assertMatch({ok, _}, C2R),
ok.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%%% Internal functions %%% Internal functions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -748,17 +915,16 @@ connect(Name, Cfg) ->
Limiter. Limiter.
make_limiter_cfg() -> make_limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Client = #{ Client = #{
rate => Infinity, rate => infinity,
initial => 0, initial => 0,
capacity => Infinity, burst => 0,
low_watermark => 0, low_watermark => 0,
divisible => false, divisible => false,
max_retry_time => timer:seconds(5), max_retry_time => timer:seconds(5),
failure_strategy => force failure_strategy => force
}, },
#{client => Client, rate => Infinity, initial => 0, capacity => Infinity}. #{client => Client, rate => infinity, initial => 0, burst => 0}.
add_bucket(Cfg) -> add_bucket(Cfg) ->
add_bucket(?MODULE, Cfg). add_bucket(?MODULE, Cfg).
@ -812,3 +978,68 @@ apply_modifier(Pairs, #{default := Template}) ->
Acc#{N => M(Template)} Acc#{N => M(Template)}
end, end,
lists:foldl(Fun, #{}, Pairs). lists:foldl(Fun, #{}, Pairs).
parse_and_check(ConfigString) ->
ok = emqx_common_test_helpers:load_config(emqx_schema, ConfigString),
emqx:get_config([listeners, tcp, default, limiter]).
make_create_test_data_with_infinity_node(FakeInstnace) ->
Infinity = emqx_htb_limiter:make_infinity_limiter(),
ClientCfg = emqx_limiter_schema:default_client_config(),
InfinityRef = emqx_limiter_bucket_ref:infinity_bucket(),
MkC = fun(Rate) ->
#{client => #{bytes => ClientCfg#{rate := Rate}}}
end,
MkB = fun(Rate) ->
#{bytes => #{rate => Rate, burst => 0, initial => 0}}
end,
MkA = fun(Client, Bucket) ->
maps:merge(MkC(Client), MkB(Bucket))
end,
IsRefLimiter = fun(Expected) ->
fun
(#{tokens := _}) -> false;
(#{bucket := Bucket}) -> Bucket =:= Expected;
(_) -> false
end
end,
IsTokenLimiter = fun(Expected) ->
fun
(#{tokens := _, bucket := Bucket}) -> Bucket =:= Expected;
(_) -> false
end
end,
[
%% default situation, no limiter setting
{undefined, Infinity},
%% client = undefined bucket = undefined
{#{}, Infinity},
%% client = undefined bucket = infinity
{MkB(infinity), Infinity},
%% client = undefined bucket = other
{MkB(100), IsRefLimiter(FakeInstnace)},
%% client = infinity bucket = undefined
{MkC(infinity), Infinity},
%% client = infinity bucket = infinity
{MkA(infinity, infinity), Infinity},
%% client = infinity bucket = other
{MkA(infinity, 100), IsRefLimiter(FakeInstnace)},
%% client = other bucket = undefined
{MkC(100), IsTokenLimiter(InfinityRef)},
%% client = other bucket = infinity
{MkC(100), IsTokenLimiter(InfinityRef)},
%% client = C bucket = B C < B
{MkA(100, 1000), IsTokenLimiter(FakeInstnace)},
%% client = C bucket = B C > B
{MkA(1000, 100), IsRefLimiter(FakeInstnace)}
].

View File

@ -219,112 +219,124 @@ parse_server_test_() ->
?T( ?T(
"single server, binary, no port", "single server, binary, no port",
?assertEqual( ?assertEqual(
[{"localhost", DefaultPort}], [#{hostname => "localhost", port => DefaultPort}],
Parse(<<"localhost">>) Parse(<<"localhost">>)
) )
), ),
?T( ?T(
"single server, string, no port", "single server, string, no port",
?assertEqual( ?assertEqual(
[{"localhost", DefaultPort}], [#{hostname => "localhost", port => DefaultPort}],
Parse("localhost") Parse("localhost")
) )
), ),
?T( ?T(
"single server, list(string), no port", "single server, list(string), no port",
?assertEqual( ?assertEqual(
[{"localhost", DefaultPort}], [#{hostname => "localhost", port => DefaultPort}],
Parse(["localhost"]) Parse(["localhost"])
) )
), ),
?T( ?T(
"single server, list(binary), no port", "single server, list(binary), no port",
?assertEqual( ?assertEqual(
[{"localhost", DefaultPort}], [#{hostname => "localhost", port => DefaultPort}],
Parse([<<"localhost">>]) Parse([<<"localhost">>])
) )
), ),
?T( ?T(
"single server, binary, with port", "single server, binary, with port",
?assertEqual( ?assertEqual(
[{"localhost", 9999}], [#{hostname => "localhost", port => 9999}],
Parse(<<"localhost:9999">>) Parse(<<"localhost:9999">>)
) )
), ),
?T( ?T(
"single server, list(string), with port", "single server, list(string), with port",
?assertEqual( ?assertEqual(
[{"localhost", 9999}], [#{hostname => "localhost", port => 9999}],
Parse(["localhost:9999"]) Parse(["localhost:9999"])
) )
), ),
?T( ?T(
"single server, string, with port", "single server, string, with port",
?assertEqual( ?assertEqual(
[{"localhost", 9999}], [#{hostname => "localhost", port => 9999}],
Parse("localhost:9999") Parse("localhost:9999")
) )
), ),
?T( ?T(
"single server, list(binary), with port", "single server, list(binary), with port",
?assertEqual( ?assertEqual(
[{"localhost", 9999}], [#{hostname => "localhost", port => 9999}],
Parse([<<"localhost:9999">>]) Parse([<<"localhost:9999">>])
) )
), ),
?T( ?T(
"multiple servers, string, no port", "multiple servers, string, no port",
?assertEqual( ?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}], [
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse("host1, host2") Parse("host1, host2")
) )
), ),
?T( ?T(
"multiple servers, binary, no port", "multiple servers, binary, no port",
?assertEqual( ?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}], [
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse(<<"host1, host2,,,">>) Parse(<<"host1, host2,,,">>)
) )
), ),
?T( ?T(
"multiple servers, list(string), no port", "multiple servers, list(string), no port",
?assertEqual( ?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}], [
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse(["host1", "host2"]) Parse(["host1", "host2"])
) )
), ),
?T( ?T(
"multiple servers, list(binary), no port", "multiple servers, list(binary), no port",
?assertEqual( ?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}], [
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse([<<"host1">>, <<"host2">>]) Parse([<<"host1">>, <<"host2">>])
) )
), ),
?T( ?T(
"multiple servers, string, with port", "multiple servers, string, with port",
?assertEqual( ?assertEqual(
[{"host1", 1234}, {"host2", 2345}], [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse("host1:1234, host2:2345") Parse("host1:1234, host2:2345")
) )
), ),
?T( ?T(
"multiple servers, binary, with port", "multiple servers, binary, with port",
?assertEqual( ?assertEqual(
[{"host1", 1234}, {"host2", 2345}], [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse(<<"host1:1234, host2:2345, ">>) Parse(<<"host1:1234, host2:2345, ">>)
) )
), ),
?T( ?T(
"multiple servers, list(string), with port", "multiple servers, list(string), with port",
?assertEqual( ?assertEqual(
[{"host1", 1234}, {"host2", 2345}], [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse([" host1:1234 ", "host2:2345"]) Parse([" host1:1234 ", "host2:2345"])
) )
), ),
?T( ?T(
"multiple servers, list(binary), with port", "multiple servers, list(binary), with port",
?assertEqual( ?assertEqual(
[{"host1", 1234}, {"host2", 2345}], [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse([<<"host1:1234">>, <<"host2:2345">>]) Parse([<<"host1:1234">>, <<"host2:2345">>])
) )
), ),
@ -350,9 +362,9 @@ parse_server_test_() ->
) )
), ),
?T( ?T(
"multiple servers wihtout port, mixed list(binary|string)", "multiple servers without port, mixed list(binary|string)",
?assertEqual( ?assertEqual(
["host1", "host2"], [#{hostname => "host1"}, #{hostname => "host2"}],
Parse2([<<"host1">>, "host2"], #{no_port => true}) Parse2([<<"host1">>, "host2"], #{no_port => true})
) )
), ),
@ -394,14 +406,18 @@ parse_server_test_() ->
?T( ?T(
"single server map", "single server map",
?assertEqual( ?assertEqual(
[{"host1.domain", 1234}], [#{hostname => "host1.domain", port => 1234}],
HoconParse("host1.domain:1234") HoconParse("host1.domain:1234")
) )
), ),
?T( ?T(
"multiple servers map", "multiple servers map",
?assertEqual( ?assertEqual(
[{"host1.domain", 1234}, {"host2.domain", 2345}, {"host3.domain", 3456}], [
#{hostname => "host1.domain", port => 1234},
#{hostname => "host2.domain", port => 2345},
#{hostname => "host3.domain", port => 3456}
],
HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456") HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456")
) )
), ),
@ -447,6 +463,171 @@ parse_server_test_() ->
"bad_schema", "bad_schema",
emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true}) emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true})
) )
),
?T(
"scheme, hostname and port",
?assertEqual(
#{scheme => "pulsar+ssl", hostname => "host", port => 6651},
emqx_schema:parse_server(
"pulsar+ssl://host:6651",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6650},
emqx_schema:parse_server(
"pulsar://host",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, no port",
?assertEqual(
#{scheme => "pulsar", hostname => "host"},
emqx_schema:parse_server(
"pulsar://host",
#{
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, missing port",
?assertThrow(
"missing_port_number",
emqx_schema:parse_server(
"pulsar://host",
#{
no_port => false,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, no default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host"},
emqx_schema:parse_server(
"host",
#{
default_scheme => "pulsar",
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6650},
emqx_schema:parse_server(
"host",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"just hostname, expecting missing scheme",
?assertThrow(
"missing_scheme",
emqx_schema:parse_server(
"host",
#{
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, defined port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6651},
emqx_schema:parse_server(
"host:6651",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"inconsistent scheme opts",
?assertError(
"bad_schema",
emqx_schema:parse_server(
"pulsar+ssl://host:6651",
#{
default_port => 6650,
default_scheme => "something",
supported_schemes => ["not", "supported"]
}
)
)
),
?T(
"hostname, default scheme, defined port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6651},
emqx_schema:parse_server(
"host:6651",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"unsupported scheme",
?assertThrow(
"unsupported_scheme",
emqx_schema:parse_server(
"pulsar+quic://host:6651",
#{
default_port => 6650,
supported_schemes => ["pulsar"]
}
)
)
),
?T(
"multiple hostnames with schemes (1)",
?assertEqual(
[
#{scheme => "pulsar", hostname => "host", port => 6649},
#{scheme => "pulsar+ssl", hostname => "other.host", port => 6651},
#{scheme => "pulsar", hostname => "yet.another", port => 6650}
],
emqx_schema:parse_servers(
"pulsar://host:6649, pulsar+ssl://other.host:6651,pulsar://yet.another",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
) )
]. ].

View File

@ -60,12 +60,12 @@ init(Parent) ->
{ok, #{callbacks => [], owner => Parent}}. {ok, #{callbacks => [], owner => Parent}}.
terminate(_Reason, #{callbacks := Callbacks}) -> terminate(_Reason, #{callbacks := Callbacks}) ->
lists:foreach(fun(Fun) -> catch Fun() end, Callbacks). do_terminate(Callbacks).
handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) -> handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) ->
{reply, ok, State#{callbacks := [Callback | Callbacks]}}; {reply, ok, State#{callbacks := [Callback | Callbacks]}};
handle_call(terminate, _From, State = #{callbacks := Callbacks}) -> handle_call(terminate, _From, State = #{callbacks := Callbacks}) ->
lists:foreach(fun(Fun) -> catch Fun() end, Callbacks), do_terminate(Callbacks),
{stop, normal, ok, State}; {stop, normal, ok, State};
handle_call(_Req, _From, State) -> handle_call(_Req, _From, State) ->
{reply, error, State}. {reply, error, State}.
@ -77,3 +77,23 @@ handle_info({'EXIT', Parent, _Reason}, State = #{owner := Parent}) ->
{stop, normal, State}; {stop, normal, State};
handle_info(_Msg, State) -> handle_info(_Msg, State) ->
{noreply, State}. {noreply, State}.
%%----------------------------------------------------------------------------------
%% Internal fns
%%----------------------------------------------------------------------------------
do_terminate(Callbacks) ->
lists:foreach(
fun(Fun) ->
try
Fun()
catch
K:E:S ->
ct:pal("error executing callback ~p: ~p", [Fun, {K, E}]),
ct:pal("stacktrace: ~p", [S]),
ok
end
end,
Callbacks
),
ok.

View File

@ -229,7 +229,8 @@ ssl_files_handle_non_generated_file_test() ->
ok = emqx_tls_lib:delete_ssl_files(Dir, undefined, SSL2), ok = emqx_tls_lib:delete_ssl_files(Dir, undefined, SSL2),
%% verify the file is not delete and not changed, because it is not generated by %% verify the file is not delete and not changed, because it is not generated by
%% emqx_tls_lib %% emqx_tls_lib
?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)). ?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)),
ok = file:delete(TmpKeyFile).
ssl_file_replace_test() -> ssl_file_replace_test() ->
Key1 = bin(test_key()), Key1 = bin(test_key()),

View File

@ -447,7 +447,12 @@ t_websocket_info_deliver(_) ->
t_websocket_info_timeout_limiter(_) -> t_websocket_info_timeout_limiter(_) ->
Ref = make_ref(), Ref = make_ref(),
LimiterT = init_limiter(), {ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
LimiterT = init_limiter(#{
bytes => bucket_cfg(),
messages => bucket_cfg(),
client => #{bytes => client_cfg(Rate)}
}),
Next = fun emqx_ws_connection:when_msg_in/3, Next = fun emqx_ws_connection:when_msg_in/3,
Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT), Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT),
Event = {timeout, Ref, limit_timeout}, Event = {timeout, Ref, limit_timeout},
@ -513,16 +518,16 @@ t_handle_timeout_emit_stats(_) ->
t_ensure_rate_limit(_) -> t_ensure_rate_limit(_) ->
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"), {ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
Limiter = init_limiter(#{ Limiter = init_limiter(#{
bytes_in => bucket_cfg(), bytes => bucket_cfg(),
message_in => bucket_cfg(), messages => bucket_cfg(),
client => #{bytes_in => client_cfg(Rate)} client => #{bytes => client_cfg(Rate)}
}), }),
St = st(#{limiter => Limiter}), St = st(#{limiter => Limiter}),
%% must bigger than value in emqx_ratelimit_SUITE %% must bigger than value in emqx_ratelimit_SUITE
{ok, Need} = emqx_limiter_schema:to_capacity("1GB"), {ok, Need} = emqx_limiter_schema:to_capacity("1GB"),
St1 = ?ws_conn:check_limiter( St1 = ?ws_conn:check_limiter(
[{Need, bytes_in}], [{Need, bytes}],
[], [],
fun(_, _, S) -> S end, fun(_, _, S) -> S end,
[], [],
@ -703,23 +708,21 @@ init_limiter() ->
init_limiter(limiter_cfg()). init_limiter(limiter_cfg()).
init_limiter(LimiterCfg) -> init_limiter(LimiterCfg) ->
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], LimiterCfg). emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], LimiterCfg).
limiter_cfg() -> limiter_cfg() ->
Cfg = bucket_cfg(), Cfg = bucket_cfg(),
Client = client_cfg(), Client = client_cfg(),
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}. #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
client_cfg() -> client_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(), client_cfg(infinity).
client_cfg(Infinity).
client_cfg(Rate) -> client_cfg(Rate) ->
Infinity = emqx_limiter_schema:infinity_value(),
#{ #{
rate => Rate, rate => Rate,
initial => 0, initial => 0,
capacity => Infinity, burst => 0,
low_watermark => 1, low_watermark => 1,
divisible => false, divisible => false,
max_retry_time => timer:seconds(5), max_retry_time => timer:seconds(5),
@ -727,14 +730,13 @@ client_cfg(Rate) ->
}. }.
bucket_cfg() -> bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(), #{rate => infinity, initial => 0, burst => 0}.
#{rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket() -> add_bucket() ->
Cfg = bucket_cfg(), Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg), emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg). emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg).
del_bucket() -> del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in), emqx_limiter_server:del_bucket(?LIMITER_ID, bytes),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in). emqx_limiter_server:del_bucket(?LIMITER_ID, messages).

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_authn, [ {application, emqx_authn, [
{description, "EMQX Authentication"}, {description, "EMQX Authentication"},
{vsn, "0.1.17"}, {vsn, "0.1.18"},
{modules, []}, {modules, []},
{registered, [emqx_authn_sup, emqx_authn_registry]}, {registered, [emqx_authn_sup, emqx_authn_registry]},
{applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]}, {applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]},

View File

@ -28,6 +28,7 @@
parse_sql/2, parse_sql/2,
render_deep/2, render_deep/2,
render_str/2, render_str/2,
render_urlencoded_str/2,
render_sql_params/2, render_sql_params/2,
is_superuser/1, is_superuser/1,
bin/1, bin/1,
@ -129,6 +130,13 @@ render_str(Template, Credential) ->
#{return => full_binary, var_trans => fun handle_var/2} #{return => full_binary, var_trans => fun handle_var/2}
). ).
render_urlencoded_str(Template, Credential) ->
emqx_placeholder:proc_tmpl(
Template,
mapping_credential(Credential),
#{return => full_binary, var_trans => fun urlencode_var/2}
).
render_sql_params(ParamList, Credential) -> render_sql_params(ParamList, Credential) ->
emqx_placeholder:proc_tmpl( emqx_placeholder:proc_tmpl(
ParamList, ParamList,
@ -217,6 +225,11 @@ without_password(Credential, [Name | Rest]) ->
without_password(Credential, Rest) without_password(Credential, Rest)
end. end.
urlencode_var({var, _} = Var, Value) ->
emqx_http_lib:uri_encode(handle_var(Var, Value));
urlencode_var(Var, Value) ->
handle_var(Var, Value).
handle_var({var, _Name}, undefined) -> handle_var({var, _Name}, undefined) ->
<<>>; <<>>;
handle_var({var, <<"peerhost">>}, PeerHost) -> handle_var({var, <<"peerhost">>}, PeerHost) ->

View File

@ -105,14 +105,16 @@ mnesia(boot) ->
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-scram-builtin_db". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
roots() -> [?CONF_NS]. %% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, scram))}].
fields(?CONF_NS) -> fields(scram) ->
[ [
{mechanism, emqx_authn_schema:mechanism(scram)}, {mechanism, emqx_authn_schema:mechanism(scram)},
{backend, emqx_authn_schema:backend(built_in_database)}, {backend, emqx_authn_schema:backend(built_in_database)},
@ -120,7 +122,7 @@ fields(?CONF_NS) ->
{iteration_count, fun iteration_count/1} {iteration_count, fun iteration_count/1}
] ++ emqx_authn_schema:common_fields(). ] ++ emqx_authn_schema:common_fields().
desc(?CONF_NS) -> desc(scram) ->
"Settings for Salted Challenge Response Authentication Mechanism\n" "Settings for Salted Challenge Response Authentication Mechanism\n"
"(SCRAM) authentication."; "(SCRAM) authentication.";
desc(_) -> desc(_) ->
@ -141,7 +143,7 @@ iteration_count(_) -> undefined.
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
refs() -> refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)]. [hoconsc:ref(?MODULE, scram)].
create( create(
AuthenticatorID, AuthenticatorID,

View File

@ -53,34 +53,35 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-http". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() -> roots() ->
[ [
{?CONF_NS, {?CONF_NS,
hoconsc:mk( hoconsc:mk(
hoconsc:union(fun union_member_selector/1), hoconsc:union(fun ?MODULE:union_member_selector/1),
#{} #{}
)} )}
]. ].
fields(get) -> fields(http_get) ->
[ [
{method, #{type => get, required => true, desc => ?DESC(method)}}, {method, #{type => get, required => true, desc => ?DESC(method)}},
{headers, fun headers_no_content_type/1} {headers, fun headers_no_content_type/1}
] ++ common_fields(); ] ++ common_fields();
fields(post) -> fields(http_post) ->
[ [
{method, #{type => post, required => true, desc => ?DESC(method)}}, {method, #{type => post, required => true, desc => ?DESC(method)}},
{headers, fun headers/1} {headers, fun headers/1}
] ++ common_fields(). ] ++ common_fields().
desc(get) -> desc(http_get) ->
?DESC(get); ?DESC(get);
desc(post) -> desc(http_post) ->
?DESC(post); ?DESC(post);
desc(_) -> desc(_) ->
undefined. undefined.
@ -158,8 +159,8 @@ request_timeout(_) -> undefined.
refs() -> refs() ->
[ [
hoconsc:ref(?MODULE, get), hoconsc:ref(?MODULE, http_get),
hoconsc:ref(?MODULE, post) hoconsc:ref(?MODULE, http_post)
]. ].
union_member_selector(all_union_members) -> union_member_selector(all_union_members) ->
@ -168,9 +169,9 @@ union_member_selector({value, Value}) ->
refs(Value). refs(Value).
refs(#{<<"method">> := <<"get">>}) -> refs(#{<<"method">> := <<"get">>}) ->
[hoconsc:ref(?MODULE, get)]; [hoconsc:ref(?MODULE, http_get)];
refs(#{<<"method">> := <<"post">>}) -> refs(#{<<"method">> := <<"post">>}) ->
[hoconsc:ref(?MODULE, post)]; [hoconsc:ref(?MODULE, http_post)];
refs(_) -> refs(_) ->
throw(#{ throw(#{
field_name => method, field_name => method,
@ -313,9 +314,9 @@ parse_url(Url) ->
BaseUrl = iolist_to_binary([Scheme, "//", HostPort]), BaseUrl = iolist_to_binary([Scheme, "//", HostPort]),
case string:split(Remaining, "?", leading) of case string:split(Remaining, "?", leading) of
[Path, QueryString] -> [Path, QueryString] ->
{BaseUrl, Path, QueryString}; {BaseUrl, <<"/", Path/binary>>, QueryString};
[Path] -> [Path] ->
{BaseUrl, Path, <<>>} {BaseUrl, <<"/", Path/binary>>, <<>>}
end; end;
[HostPort] -> [HostPort] ->
{iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>} {iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>}
@ -356,7 +357,7 @@ generate_request(Credential, #{
body_template := BodyTemplate body_template := BodyTemplate
}) -> }) ->
Headers = maps:to_list(Headers0), Headers = maps:to_list(Headers0),
Path = emqx_authn_utils:render_str(BasePathTemplate, Credential), Path = emqx_authn_utils:render_urlencoded_str(BasePathTemplate, Credential),
Query = emqx_authn_utils:render_deep(BaseQueryTemplate, Credential), Query = emqx_authn_utils:render_deep(BaseQueryTemplate, Credential),
Body = emqx_authn_utils:render_deep(BodyTemplate, Credential), Body = emqx_authn_utils:render_deep(BodyTemplate, Credential),
case Method of case Method of
@ -371,9 +372,9 @@ generate_request(Credential, #{
end. end.
append_query(Path, []) -> append_query(Path, []) ->
encode_path(Path); Path;
append_query(Path, Query) -> append_query(Path, Query) ->
encode_path(Path) ++ "?" ++ binary_to_list(qs(Query)). Path ++ "?" ++ binary_to_list(qs(Query)).
qs(KVs) -> qs(KVs) ->
qs(KVs, []). qs(KVs, []).
@ -435,10 +436,6 @@ parse_body(ContentType, _) ->
uri_encode(T) -> uri_encode(T) ->
emqx_http_lib:uri_encode(to_list(T)). emqx_http_lib:uri_encode(to_list(T)).
encode_path(Path) ->
Parts = string:split(Path, "/", all),
lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]).
request_for_log(Credential, #{url := Url} = State) -> request_for_log(Credential, #{url := Url} = State) ->
SafeCredential = emqx_authn_utils:without_password(Credential), SafeCredential = emqx_authn_utils:without_password(Credential),
case generate_request(SafeCredential, State) of case generate_request(SafeCredential, State) of

View File

@ -35,18 +35,17 @@
callback_mode() -> always_sync. callback_mode() -> always_sync.
on_start(InstId, Opts) -> on_start(InstId, Opts) ->
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
PoolOpts = [ PoolOpts = [
{pool_size, maps:get(pool_size, Opts, ?DEFAULT_POOL_SIZE)}, {pool_size, maps:get(pool_size, Opts, ?DEFAULT_POOL_SIZE)},
{connector_opts, Opts} {connector_opts, Opts}
], ],
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, PoolOpts) of case emqx_resource_pool:start(InstId, ?MODULE, PoolOpts) of
ok -> {ok, #{pool_name => PoolName}}; ok -> {ok, #{pool_name => InstId}};
{error, Reason} -> {error, Reason} {error, Reason} -> {error, Reason}
end. end.
on_stop(_InstId, #{pool_name := PoolName}) -> on_stop(_InstId, #{pool_name := PoolName}) ->
emqx_plugin_libs_pool:stop_pool(PoolName). emqx_resource_pool:stop(PoolName).
on_query(InstId, get_jwks, #{pool_name := PoolName}) -> on_query(InstId, get_jwks, #{pool_name := PoolName}) ->
Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover), Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover),
@ -72,16 +71,15 @@ on_query(_InstId, {update, Opts}, #{pool_name := PoolName}) ->
ok. ok.
on_get_status(_InstId, #{pool_name := PoolName}) -> on_get_status(_InstId, #{pool_name := PoolName}) ->
Func = case emqx_resource_pool:health_check_workers(PoolName, fun health_check/1) of
fun(Conn) -> true -> connected;
false -> disconnected
end.
health_check(Conn) ->
case emqx_authn_jwks_client:get_jwks(Conn) of case emqx_authn_jwks_client:get_jwks(Conn) of
{ok, _} -> true; {ok, _} -> true;
_ -> false _ -> false
end
end,
case emqx_plugin_libs_pool:health_check_ecpool_workers(PoolName, Func) of
true -> connected;
false -> disconnected
end. end.
connect(Opts) -> connect(Opts) ->

View File

@ -43,36 +43,57 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-jwt". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() -> roots() ->
[ [
{?CONF_NS, {?CONF_NS,
hoconsc:mk( hoconsc:mk(
hoconsc:union(fun union_member_selector/1), hoconsc:union(fun ?MODULE:union_member_selector/1),
#{} #{}
)} )}
]. ].
fields('hmac-based') -> fields(jwt_hmac) ->
[ [
{use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})}, %% for hmac, it's the 'algorithm' field which selects this type
%% use_jwks field can be ignored (kept for backward compatibility)
{use_jwks,
sc(
hoconsc:enum([false]),
#{
required => false,
desc => ?DESC(use_jwks),
importance => ?IMPORTANCE_HIDDEN
}
)},
{algorithm, {algorithm,
sc(hoconsc:enum(['hmac-based']), #{required => true, desc => ?DESC(algorithm)})}, sc(hoconsc:enum(['hmac-based']), #{required => true, desc => ?DESC(algorithm)})},
{secret, fun secret/1}, {secret, fun secret/1},
{secret_base64_encoded, fun secret_base64_encoded/1} {secret_base64_encoded, fun secret_base64_encoded/1}
] ++ common_fields(); ] ++ common_fields();
fields('public-key') -> fields(jwt_public_key) ->
[ [
{use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})}, %% for public-key, it's the 'algorithm' field which selects this type
%% use_jwks field can be ignored (kept for backward compatibility)
{use_jwks,
sc(
hoconsc:enum([false]),
#{
required => false,
desc => ?DESC(use_jwks),
importance => ?IMPORTANCE_HIDDEN
}
)},
{algorithm, {algorithm,
sc(hoconsc:enum(['public-key']), #{required => true, desc => ?DESC(algorithm)})}, sc(hoconsc:enum(['public-key']), #{required => true, desc => ?DESC(algorithm)})},
{public_key, fun public_key/1} {public_key, fun public_key/1}
] ++ common_fields(); ] ++ common_fields();
fields('jwks') -> fields(jwt_jwks) ->
[ [
{use_jwks, sc(hoconsc:enum([true]), #{required => true, desc => ?DESC(use_jwks)})}, {use_jwks, sc(hoconsc:enum([true]), #{required => true, desc => ?DESC(use_jwks)})},
{endpoint, fun endpoint/1}, {endpoint, fun endpoint/1},
@ -85,12 +106,12 @@ fields('jwks') ->
}} }}
] ++ common_fields(). ] ++ common_fields().
desc('hmac-based') -> desc(jwt_hmac) ->
?DESC('hmac-based'); ?DESC(jwt_hmac);
desc('public-key') -> desc(jwt_public_key) ->
?DESC('public-key'); ?DESC(jwt_public_key);
desc('jwks') -> desc(jwt_jwks) ->
?DESC('jwks'); ?DESC(jwt_jwks);
desc(undefined) -> desc(undefined) ->
undefined. undefined.
@ -160,9 +181,9 @@ from(_) -> undefined.
refs() -> refs() ->
[ [
hoconsc:ref(?MODULE, 'hmac-based'), hoconsc:ref(?MODULE, jwt_hmac),
hoconsc:ref(?MODULE, 'public-key'), hoconsc:ref(?MODULE, jwt_public_key),
hoconsc:ref(?MODULE, 'jwks') hoconsc:ref(?MODULE, jwt_jwks)
]. ].
union_member_selector(all_union_members) -> union_member_selector(all_union_members) ->
@ -179,11 +200,11 @@ boolean(<<"false">>) -> false;
boolean(Other) -> Other. boolean(Other) -> Other.
select_ref(true, _) -> select_ref(true, _) ->
[hoconsc:ref(?MODULE, 'jwks')]; [hoconsc:ref(?MODULE, 'jwt_jwks')];
select_ref(false, #{<<"public_key">> := _}) -> select_ref(false, #{<<"public_key">> := _}) ->
[hoconsc:ref(?MODULE, 'public-key')]; [hoconsc:ref(?MODULE, jwt_public_key)];
select_ref(false, _) -> select_ref(false, _) ->
[hoconsc:ref(?MODULE, 'hmac-based')]; [hoconsc:ref(?MODULE, jwt_hmac)];
select_ref(_, _) -> select_ref(_, _) ->
throw(#{ throw(#{
field_name => use_jwks, field_name => use_jwks,

View File

@ -107,14 +107,16 @@ mnesia(boot) ->
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-builtin_db". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
roots() -> [?CONF_NS]. %% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, builtin_db))}].
fields(?CONF_NS) -> fields(builtin_db) ->
[ [
{mechanism, emqx_authn_schema:mechanism(password_based)}, {mechanism, emqx_authn_schema:mechanism(password_based)},
{backend, emqx_authn_schema:backend(built_in_database)}, {backend, emqx_authn_schema:backend(built_in_database)},
@ -122,8 +124,8 @@ fields(?CONF_NS) ->
{password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1} {password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1}
] ++ emqx_authn_schema:common_fields(). ] ++ emqx_authn_schema:common_fields().
desc(?CONF_NS) -> desc(builtin_db) ->
?DESC(?CONF_NS); ?DESC(builtin_db);
desc(_) -> desc(_) ->
undefined. undefined.
@ -138,7 +140,7 @@ user_id_type(_) -> undefined.
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
refs() -> refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)]. [hoconsc:ref(?MODULE, builtin_db)].
create(_AuthenticatorID, Config) -> create(_AuthenticatorID, Config) ->
create(Config). create(Config).

View File

@ -44,32 +44,33 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-mongodb". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() -> roots() ->
[ [
{?CONF_NS, {?CONF_NS,
hoconsc:mk( hoconsc:mk(
hoconsc:union(fun union_member_selector/1), hoconsc:union(fun ?MODULE:union_member_selector/1),
#{} #{}
)} )}
]. ].
fields(standalone) -> fields(mongo_single) ->
common_fields() ++ emqx_connector_mongo:fields(single); common_fields() ++ emqx_connector_mongo:fields(single);
fields('replica-set') -> fields(mongo_rs) ->
common_fields() ++ emqx_connector_mongo:fields(rs); common_fields() ++ emqx_connector_mongo:fields(rs);
fields('sharded-cluster') -> fields(mongo_sharded) ->
common_fields() ++ emqx_connector_mongo:fields(sharded). common_fields() ++ emqx_connector_mongo:fields(sharded).
desc(standalone) -> desc(mongo_single) ->
?DESC(standalone); ?DESC(single);
desc('replica-set') -> desc(mongo_rs) ->
?DESC('replica-set'); ?DESC('replica-set');
desc('sharded-cluster') -> desc(mongo_sharded) ->
?DESC('sharded-cluster'); ?DESC('sharded-cluster');
desc(_) -> desc(_) ->
undefined. undefined.
@ -126,9 +127,9 @@ is_superuser_field(_) -> undefined.
refs() -> refs() ->
[ [
hoconsc:ref(?MODULE, standalone), hoconsc:ref(?MODULE, mongo_single),
hoconsc:ref(?MODULE, 'replica-set'), hoconsc:ref(?MODULE, mongo_rs),
hoconsc:ref(?MODULE, 'sharded-cluster') hoconsc:ref(?MODULE, mongo_sharded)
]. ].
create(_AuthenticatorID, Config) -> create(_AuthenticatorID, Config) ->
@ -254,11 +255,11 @@ union_member_selector({value, Value}) ->
refs(Value). refs(Value).
refs(#{<<"mongo_type">> := <<"single">>}) -> refs(#{<<"mongo_type">> := <<"single">>}) ->
[hoconsc:ref(?MODULE, standalone)]; [hoconsc:ref(?MODULE, mongo_single)];
refs(#{<<"mongo_type">> := <<"rs">>}) -> refs(#{<<"mongo_type">> := <<"rs">>}) ->
[hoconsc:ref(?MODULE, 'replica-set')]; [hoconsc:ref(?MODULE, mongo_rs)];
refs(#{<<"mongo_type">> := <<"sharded">>}) -> refs(#{<<"mongo_type">> := <<"sharded">>}) ->
[hoconsc:ref(?MODULE, 'sharded-cluster')]; [hoconsc:ref(?MODULE, mongo_sharded)];
refs(_) -> refs(_) ->
throw(#{ throw(#{
field_name => mongo_type, field_name => mongo_type,

View File

@ -45,14 +45,16 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-mysql". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
roots() -> [?CONF_NS]. %% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, mysql))}].
fields(?CONF_NS) -> fields(mysql) ->
[ [
{mechanism, emqx_authn_schema:mechanism(password_based)}, {mechanism, emqx_authn_schema:mechanism(password_based)},
{backend, emqx_authn_schema:backend(mysql)}, {backend, emqx_authn_schema:backend(mysql)},
@ -62,8 +64,8 @@ fields(?CONF_NS) ->
] ++ emqx_authn_schema:common_fields() ++ ] ++ emqx_authn_schema:common_fields() ++
proplists:delete(prepare_statement, emqx_connector_mysql:fields(config)). proplists:delete(prepare_statement, emqx_connector_mysql:fields(config)).
desc(?CONF_NS) -> desc(mysql) ->
?DESC(?CONF_NS); ?DESC(mysql);
desc(_) -> desc(_) ->
undefined. undefined.
@ -82,7 +84,7 @@ query_timeout(_) -> undefined.
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
refs() -> refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)]. [hoconsc:ref(?MODULE, mysql)].
create(_AuthenticatorID, Config) -> create(_AuthenticatorID, Config) ->
create(Config). create(Config).

View File

@ -49,14 +49,16 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-postgresql". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
roots() -> [?CONF_NS]. %% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, postgresql))}].
fields(?CONF_NS) -> fields(postgresql) ->
[ [
{mechanism, emqx_authn_schema:mechanism(password_based)}, {mechanism, emqx_authn_schema:mechanism(password_based)},
{backend, emqx_authn_schema:backend(postgresql)}, {backend, emqx_authn_schema:backend(postgresql)},
@ -66,8 +68,8 @@ fields(?CONF_NS) ->
emqx_authn_schema:common_fields() ++ emqx_authn_schema:common_fields() ++
proplists:delete(prepare_statement, emqx_connector_pgsql:fields(config)). proplists:delete(prepare_statement, emqx_connector_pgsql:fields(config)).
desc(?CONF_NS) -> desc(postgresql) ->
?DESC(?CONF_NS); ?DESC(postgresql);
desc(_) -> desc(_) ->
undefined. undefined.
@ -81,7 +83,7 @@ query(_) -> undefined.
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
refs() -> refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)]. [hoconsc:ref(?MODULE, postgresql)].
create(_AuthenticatorID, Config) -> create(_AuthenticatorID, Config) ->
create(Config). create(Config).

View File

@ -44,32 +44,33 @@
%% Hocon Schema %% Hocon Schema
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
namespace() -> "authn-redis". namespace() -> "authn".
tags() -> tags() ->
[<<"Authentication">>]. [<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() -> roots() ->
[ [
{?CONF_NS, {?CONF_NS,
hoconsc:mk( hoconsc:mk(
hoconsc:union(fun union_member_selector/1), hoconsc:union(fun ?MODULE:union_member_selector/1),
#{} #{}
)} )}
]. ].
fields(standalone) -> fields(redis_single) ->
common_fields() ++ emqx_connector_redis:fields(single); common_fields() ++ emqx_connector_redis:fields(single);
fields(cluster) -> fields(redis_cluster) ->
common_fields() ++ emqx_connector_redis:fields(cluster); common_fields() ++ emqx_connector_redis:fields(cluster);
fields(sentinel) -> fields(redis_sentinel) ->
common_fields() ++ emqx_connector_redis:fields(sentinel). common_fields() ++ emqx_connector_redis:fields(sentinel).
desc(standalone) -> desc(redis_single) ->
?DESC(standalone); ?DESC(single);
desc(cluster) -> desc(redis_cluster) ->
?DESC(cluster); ?DESC(cluster);
desc(sentinel) -> desc(redis_sentinel) ->
?DESC(sentinel); ?DESC(sentinel);
desc(_) -> desc(_) ->
"". "".
@ -93,9 +94,9 @@ cmd(_) -> undefined.
refs() -> refs() ->
[ [
hoconsc:ref(?MODULE, standalone), hoconsc:ref(?MODULE, redis_single),
hoconsc:ref(?MODULE, cluster), hoconsc:ref(?MODULE, redis_cluster),
hoconsc:ref(?MODULE, sentinel) hoconsc:ref(?MODULE, redis_sentinel)
]. ].
union_member_selector(all_union_members) -> union_member_selector(all_union_members) ->
@ -104,11 +105,11 @@ union_member_selector({value, Value}) ->
refs(Value). refs(Value).
refs(#{<<"redis_type">> := <<"single">>}) -> refs(#{<<"redis_type">> := <<"single">>}) ->
[hoconsc:ref(?MODULE, standalone)]; [hoconsc:ref(?MODULE, redis_single)];
refs(#{<<"redis_type">> := <<"cluster">>}) -> refs(#{<<"redis_type">> := <<"cluster">>}) ->
[hoconsc:ref(?MODULE, cluster)]; [hoconsc:ref(?MODULE, redis_cluster)];
refs(#{<<"redis_type">> := <<"sentinel">>}) -> refs(#{<<"redis_type">> := <<"sentinel">>}) ->
[hoconsc:ref(?MODULE, sentinel)]; [hoconsc:ref(?MODULE, redis_sentinel)];
refs(_) -> refs(_) ->
throw(#{ throw(#{
field_name => redis_type, field_name => redis_type,

View File

@ -47,7 +47,6 @@
}) })
). ).
-define(SERVER_RESPONSE_URLENCODE(Result), ?SERVER_RESPONSE_URLENCODE(Result, false)).
-define(SERVER_RESPONSE_URLENCODE(Result, IsSuperuser), -define(SERVER_RESPONSE_URLENCODE(Result, IsSuperuser),
list_to_binary( list_to_binary(
"result=" ++ "result=" ++
@ -166,6 +165,54 @@ test_user_auth(#{
?GLOBAL ?GLOBAL
). ).
t_authenticate_path_placeholders(_Config) ->
ok = emqx_authn_http_test_server:stop(),
{ok, _} = emqx_authn_http_test_server:start_link(?HTTP_PORT, <<"/[...]">>),
ok = emqx_authn_http_test_server:set_handler(
fun(Req0, State) ->
Req =
case cowboy_req:path(Req0) of
<<"/my/p%20ath//us%20er/auth//">> ->
cowboy_req:reply(
200,
#{<<"content-type">> => <<"application/json">>},
emqx_utils_json:encode(#{result => allow, is_superuser => false}),
Req0
);
Path ->
ct:pal("Unexpected path: ~p", [Path]),
cowboy_req:reply(403, Req0)
end,
{ok, Req, State}
end
),
Credentials = ?CREDENTIALS#{
username => <<"us er">>
},
AuthConfig = maps:merge(
raw_http_auth_config(),
#{
<<"url">> => <<"http://127.0.0.1:32333/my/p%20ath//${username}/auth//">>,
<<"body">> => #{}
}
),
{ok, _} = emqx:update_config(
?PATH,
{create_authenticator, ?GLOBAL, AuthConfig}
),
?assertMatch(
{ok, #{is_superuser := false}},
emqx_access_control:authenticate(Credentials)
),
_ = emqx_authn_test_lib:delete_authenticators(
[authentication],
?GLOBAL
).
t_no_value_for_placeholder(_Config) -> t_no_value_for_placeholder(_Config) ->
Handler = fun(Req0, State) -> Handler = fun(Req0, State) ->
{ok, RawBody, Req1} = cowboy_req:read_body(Req0), {ok, RawBody, Req1} = cowboy_req:read_body(Req0),

View File

@ -107,7 +107,7 @@ t_update_with_invalid_config(_Config) ->
?assertMatch( ?assertMatch(
{error, #{ {error, #{
kind := validation_error, kind := validation_error,
matched_type := "authn-postgresql:authentication", matched_type := "authn:postgresql",
path := "authentication.1.server", path := "authentication.1.server",
reason := required_field reason := required_field
}}, }},

View File

@ -162,7 +162,7 @@ t_create_invalid_config(_Config) ->
?assertMatch( ?assertMatch(
{error, #{ {error, #{
kind := validation_error, kind := validation_error,
matched_type := "authn-redis:standalone", matched_type := "authn:redis_single",
path := "authentication.1.server", path := "authentication.1.server",
reason := required_field reason := required_field
}}, }},

View File

@ -53,7 +53,7 @@ t_check_schema(_Config) ->
?assertThrow( ?assertThrow(
#{ #{
path := "authentication.1.password_hash_algorithm.name", path := "authentication.1.password_hash_algorithm.name",
matched_type := "authn-builtin_db:authentication/authn-hash:simple", matched_type := "authn:builtin_db/authn-hash:simple",
reason := unable_to_convert_to_enum_symbol reason := unable_to_convert_to_enum_symbol
}, },
Check(ConfigNotOk) Check(ConfigNotOk)
@ -72,7 +72,7 @@ t_check_schema(_Config) ->
#{ #{
path := "authentication.1.password_hash_algorithm", path := "authentication.1.password_hash_algorithm",
reason := "algorithm_name_missing", reason := "algorithm_name_missing",
matched_type := "authn-builtin_db:authentication" matched_type := "authn:builtin_db"
}, },
Check(ConfigMissingAlgoName) Check(ConfigMissingAlgoName)
). ).

View File

@ -32,19 +32,19 @@ union_member_selector_mongo_test_() ->
end}, end},
{"single", fun() -> {"single", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-mongodb:standalone"}), ?ERR(#{matched_type := "authn:mongo_single"}),
Check("{mongo_type: single}") Check("{mongo_type: single}")
) )
end}, end},
{"replica-set", fun() -> {"replica-set", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-mongodb:replica-set"}), ?ERR(#{matched_type := "authn:mongo_rs"}),
Check("{mongo_type: rs}") Check("{mongo_type: rs}")
) )
end}, end},
{"sharded", fun() -> {"sharded", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-mongodb:sharded-cluster"}), ?ERR(#{matched_type := "authn:mongo_sharded"}),
Check("{mongo_type: sharded}") Check("{mongo_type: sharded}")
) )
end} end}
@ -61,19 +61,19 @@ union_member_selector_jwt_test_() ->
end}, end},
{"jwks", fun() -> {"jwks", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-jwt:jwks"}), ?ERR(#{matched_type := "authn:jwt_jwks"}),
Check("{use_jwks = true}") Check("{use_jwks = true}")
) )
end}, end},
{"publick-key", fun() -> {"publick-key", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-jwt:public-key"}), ?ERR(#{matched_type := "authn:jwt_public_key"}),
Check("{use_jwks = false, public_key = 1}") Check("{use_jwks = false, public_key = 1}")
) )
end}, end},
{"hmac-based", fun() -> {"hmac-based", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-jwt:hmac-based"}), ?ERR(#{matched_type := "authn:jwt_hmac"}),
Check("{use_jwks = false}") Check("{use_jwks = false}")
) )
end} end}
@ -90,19 +90,19 @@ union_member_selector_redis_test_() ->
end}, end},
{"single", fun() -> {"single", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-redis:standalone"}), ?ERR(#{matched_type := "authn:redis_single"}),
Check("{redis_type = single}") Check("{redis_type = single}")
) )
end}, end},
{"cluster", fun() -> {"cluster", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-redis:cluster"}), ?ERR(#{matched_type := "authn:redis_cluster"}),
Check("{redis_type = cluster}") Check("{redis_type = cluster}")
) )
end}, end},
{"sentinel", fun() -> {"sentinel", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-redis:sentinel"}), ?ERR(#{matched_type := "authn:redis_sentinel"}),
Check("{redis_type = sentinel}") Check("{redis_type = sentinel}")
) )
end} end}
@ -119,13 +119,13 @@ union_member_selector_http_test_() ->
end}, end},
{"get", fun() -> {"get", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-http:get"}), ?ERR(#{matched_type := "authn:http_get"}),
Check("{method = get}") Check("{method = get}")
) )
end}, end},
{"post", fun() -> {"post", fun() ->
?assertMatch( ?assertMatch(
?ERR(#{matched_type := "authn-http:post"}), ?ERR(#{matched_type := "authn:http_post"}),
Check("{method = post}") Check("{method = post}")
) )
end} end}

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_authz, [ {application, emqx_authz, [
{description, "An OTP application"}, {description, "An OTP application"},
{vsn, "0.1.17"}, {vsn, "0.1.19"},
{registered, []}, {registered, []},
{mod, {emqx_authz_app, []}}, {mod, {emqx_authz_app, []}},
{applications, [ {applications, [

View File

@ -161,9 +161,9 @@ parse_url(Url) ->
BaseUrl = iolist_to_binary([Scheme, "//", HostPort]), BaseUrl = iolist_to_binary([Scheme, "//", HostPort]),
case string:split(Remaining, "?", leading) of case string:split(Remaining, "?", leading) of
[Path, QueryString] -> [Path, QueryString] ->
{BaseUrl, Path, QueryString}; {BaseUrl, <<"/", Path/binary>>, QueryString};
[Path] -> [Path] ->
{BaseUrl, Path, <<>>} {BaseUrl, <<"/", Path/binary>>, <<>>}
end; end;
[HostPort] -> [HostPort] ->
{iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>} {iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>}
@ -185,7 +185,7 @@ generate_request(
} }
) -> ) ->
Values = client_vars(Client, PubSub, Topic), Values = client_vars(Client, PubSub, Topic),
Path = emqx_authz_utils:render_str(BasePathTemplate, Values), Path = emqx_authz_utils:render_urlencoded_str(BasePathTemplate, Values),
Query = emqx_authz_utils:render_deep(BaseQueryTemplate, Values), Query = emqx_authz_utils:render_deep(BaseQueryTemplate, Values),
Body = emqx_authz_utils:render_deep(BodyTemplate, Values), Body = emqx_authz_utils:render_deep(BodyTemplate, Values),
case Method of case Method of
@ -202,9 +202,9 @@ generate_request(
end. end.
append_query(Path, []) -> append_query(Path, []) ->
encode_path(Path); to_list(Path);
append_query(Path, Query) -> append_query(Path, Query) ->
encode_path(Path) ++ "?" ++ to_list(query_string(Query)). to_list(Path) ++ "?" ++ to_list(query_string(Query)).
query_string(Body) -> query_string(Body) ->
query_string(Body, []). query_string(Body, []).
@ -222,10 +222,6 @@ query_string([{K, V} | More], Acc) ->
uri_encode(T) -> uri_encode(T) ->
emqx_http_lib:uri_encode(to_list(T)). emqx_http_lib:uri_encode(to_list(T)).
encode_path(Path) ->
Parts = string:split(Path, "/", all),
lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]).
serialize_body(<<"application/json">>, Body) -> serialize_body(<<"application/json">>, Body) ->
emqx_utils_json:encode(Body); emqx_utils_json:encode(Body);
serialize_body(<<"application/x-www-form-urlencoded">>, Body) -> serialize_body(<<"application/x-www-form-urlencoded">>, Body) ->

View File

@ -185,7 +185,7 @@ match_who(#{peerhost := IpAddress}, {ipaddrs, CIDRs}) ->
match_who(ClientInfo, {'and', Principals}) when is_list(Principals) -> match_who(ClientInfo, {'and', Principals}) when is_list(Principals) ->
lists:foldl( lists:foldl(
fun(Principal, Permission) -> fun(Principal, Permission) ->
match_who(ClientInfo, Principal) andalso Permission Permission andalso match_who(ClientInfo, Principal)
end, end,
true, true,
Principals Principals
@ -193,7 +193,7 @@ match_who(ClientInfo, {'and', Principals}) when is_list(Principals) ->
match_who(ClientInfo, {'or', Principals}) when is_list(Principals) -> match_who(ClientInfo, {'or', Principals}) when is_list(Principals) ->
lists:foldl( lists:foldl(
fun(Principal, Permission) -> fun(Principal, Permission) ->
match_who(ClientInfo, Principal) orelse Permission Permission orelse match_who(ClientInfo, Principal)
end, end,
false, false,
Principals Principals

View File

@ -54,7 +54,7 @@ type_names() ->
file, file,
http_get, http_get,
http_post, http_post,
mnesia, builtin_db,
mongo_single, mongo_single,
mongo_rs, mongo_rs,
mongo_sharded, mongo_sharded,
@ -93,7 +93,7 @@ fields(http_post) ->
{method, method(post)}, {method, method(post)},
{headers, fun headers/1} {headers, fun headers/1}
]; ];
fields(mnesia) -> fields(builtin_db) ->
authz_common_fields(built_in_database); authz_common_fields(built_in_database);
fields(mongo_single) -> fields(mongo_single) ->
authz_common_fields(mongodb) ++ authz_common_fields(mongodb) ++
@ -191,8 +191,8 @@ desc(http_get) ->
?DESC(http_get); ?DESC(http_get);
desc(http_post) -> desc(http_post) ->
?DESC(http_post); ?DESC(http_post);
desc(mnesia) -> desc(builtin_db) ->
?DESC(mnesia); ?DESC(builtin_db);
desc(mongo_single) -> desc(mongo_single) ->
?DESC(mongo_single); ?DESC(mongo_single);
desc(mongo_rs) -> desc(mongo_rs) ->
@ -459,7 +459,7 @@ select_union_member(#{<<"type">> := <<"http">>} = Value) ->
}) })
end; end;
select_union_member(#{<<"type">> := <<"built_in_database">>}) -> select_union_member(#{<<"type">> := <<"built_in_database">>}) ->
?R_REF(mnesia); ?R_REF(builtin_db);
select_union_member(#{<<"type">> := Type}) -> select_union_member(#{<<"type">> := Type}) ->
select_union_member_loop(Type, type_names()); select_union_member_loop(Type, type_names());
select_union_member(_) -> select_union_member(_) ->

View File

@ -16,7 +16,6 @@
-module(emqx_authz_utils). -module(emqx_authz_utils).
-include_lib("emqx/include/emqx_placeholder.hrl").
-include_lib("emqx_authz.hrl"). -include_lib("emqx_authz.hrl").
-export([ -export([
@ -28,6 +27,7 @@
update_config/2, update_config/2,
parse_deep/2, parse_deep/2,
parse_str/2, parse_str/2,
render_urlencoded_str/2,
parse_sql/3, parse_sql/3,
render_deep/2, render_deep/2,
render_str/2, render_str/2,
@ -128,6 +128,13 @@ render_str(Template, Values) ->
#{return => full_binary, var_trans => fun handle_var/2} #{return => full_binary, var_trans => fun handle_var/2}
). ).
render_urlencoded_str(Template, Values) ->
emqx_placeholder:proc_tmpl(
Template,
client_vars(Values),
#{return => full_binary, var_trans => fun urlencode_var/2}
).
render_sql_params(ParamList, Values) -> render_sql_params(ParamList, Values) ->
emqx_placeholder:proc_tmpl( emqx_placeholder:proc_tmpl(
ParamList, ParamList,
@ -181,6 +188,11 @@ convert_client_var({dn, DN}) -> {cert_subject, DN};
convert_client_var({protocol, Proto}) -> {proto_name, Proto}; convert_client_var({protocol, Proto}) -> {proto_name, Proto};
convert_client_var(Other) -> Other. convert_client_var(Other) -> Other.
urlencode_var({var, _} = Var, Value) ->
emqx_http_lib:uri_encode(handle_var(Var, Value));
urlencode_var(Var, Value) ->
handle_var(Var, Value).
handle_var({var, _Name}, undefined) -> handle_var({var, _Name}, undefined) ->
<<>>; <<>>;
handle_var({var, <<"peerhost">>}, IpAddr) -> handle_var({var, <<"peerhost">>}, IpAddr) ->

View File

@ -199,7 +199,7 @@ t_query_params(_Config) ->
peerhost := <<"127.0.0.1">>, peerhost := <<"127.0.0.1">>,
proto_name := <<"MQTT">>, proto_name := <<"MQTT">>,
mountpoint := <<"MOUNTPOINT">>, mountpoint := <<"MOUNTPOINT">>,
topic := <<"t">>, topic := <<"t/1">>,
action := <<"publish">> action := <<"publish">>
} = cowboy_req:match_qs( } = cowboy_req:match_qs(
[ [
@ -241,7 +241,7 @@ t_query_params(_Config) ->
?assertEqual( ?assertEqual(
allow, allow,
emqx_access_control:authorize(ClientInfo, publish, <<"t">>) emqx_access_control:authorize(ClientInfo, publish, <<"t/1">>)
). ).
t_path(_Config) -> t_path(_Config) ->
@ -249,13 +249,13 @@ t_path(_Config) ->
fun(Req0, State) -> fun(Req0, State) ->
?assertEqual( ?assertEqual(
<< <<
"/authz/users/" "/authz/use%20rs/"
"user%20name/" "user%20name/"
"client%20id/" "client%20id/"
"127.0.0.1/" "127.0.0.1/"
"MQTT/" "MQTT/"
"MOUNTPOINT/" "MOUNTPOINT/"
"t/1/" "t%2F1/"
"publish" "publish"
>>, >>,
cowboy_req:path(Req0) cowboy_req:path(Req0)
@ -264,7 +264,7 @@ t_path(_Config) ->
end, end,
#{ #{
<<"url">> => << <<"url">> => <<
"http://127.0.0.1:33333/authz/users/" "http://127.0.0.1:33333/authz/use%20rs/"
"${username}/" "${username}/"
"${clientid}/" "${clientid}/"
"${peerhost}/" "${peerhost}/"

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_bridge, [ {application, emqx_bridge, [
{description, "EMQX bridges"}, {description, "EMQX bridges"},
{vsn, "0.1.16"}, {vsn, "0.1.18"},
{registered, [emqx_bridge_sup]}, {registered, [emqx_bridge_sup]},
{mod, {emqx_bridge_app, []}}, {mod, {emqx_bridge_app, []}},
{applications, [ {applications, [

View File

@ -70,7 +70,10 @@
T == dynamo; T == dynamo;
T == rocketmq; T == rocketmq;
T == cassandra; T == cassandra;
T == sqlserver T == sqlserver;
T == pulsar_producer;
T == oracle;
T == iotdb
). ).
load() -> load() ->

View File

@ -64,7 +64,7 @@
{BridgeType, BridgeName} -> {BridgeType, BridgeName} ->
EXPR EXPR
catch catch
throw:{invalid_bridge_id, Reason} -> throw:#{reason := Reason} ->
?NOT_FOUND(<<"Invalid bridge ID, ", Reason/binary>>) ?NOT_FOUND(<<"Invalid bridge ID, ", Reason/binary>>)
end end
). ).
@ -546,6 +546,8 @@ schema("/bridges_probe") ->
case emqx_bridge_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1)) of case emqx_bridge_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1)) of
ok -> ok ->
?NO_CONTENT; ?NO_CONTENT;
{error, #{kind := validation_error} = Reason} ->
?BAD_REQUEST('TEST_FAILED', map_to_json(Reason));
{error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' -> {error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' ->
?BAD_REQUEST('TEST_FAILED', Reason) ?BAD_REQUEST('TEST_FAILED', Reason)
end; end;

View File

@ -56,6 +56,11 @@
(TYPE) =:= <<"kafka_consumer">> orelse ?IS_BI_DIR_BRIDGE(TYPE) (TYPE) =:= <<"kafka_consumer">> orelse ?IS_BI_DIR_BRIDGE(TYPE)
). ).
%% [FIXME] this has no place here, it's used in parse_confs/3, which should
%% rather delegate to a behavior callback than implementing domain knowledge
%% here (reversed dependency)
-define(INSERT_TABLET_PATH, "/rest/v2/insertTablet").
-if(?EMQX_RELEASE_EDITION == ee). -if(?EMQX_RELEASE_EDITION == ee).
bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt; bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt;
bridge_to_resource_type(mqtt) -> emqx_connector_mqtt; bridge_to_resource_type(mqtt) -> emqx_connector_mqtt;
@ -87,7 +92,7 @@ parse_bridge_id(BridgeId) ->
[Type, Name] -> [Type, Name] ->
{to_type_atom(Type), validate_name(Name)}; {to_type_atom(Type), validate_name(Name)};
_ -> _ ->
invalid_bridge_id( invalid_data(
<<"should be of pattern {type}:{name}, but got ", BridgeId/binary>> <<"should be of pattern {type}:{name}, but got ", BridgeId/binary>>
) )
end. end.
@ -108,14 +113,14 @@ validate_name(Name0) ->
true -> true ->
Name0; Name0;
false -> false ->
invalid_bridge_id(<<"bad name: ", Name0/binary>>) invalid_data(<<"bad name: ", Name0/binary>>)
end; end;
false -> false ->
invalid_bridge_id(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>) invalid_data(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>)
end. end.
-spec invalid_bridge_id(binary()) -> no_return(). -spec invalid_data(binary()) -> no_return().
invalid_bridge_id(Reason) -> throw({?FUNCTION_NAME, Reason}). invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}).
is_id_char(C) when C >= $0 andalso C =< $9 -> true; is_id_char(C) when C >= $0 andalso C =< $9 -> true;
is_id_char(C) when C >= $a andalso C =< $z -> true; is_id_char(C) when C >= $a andalso C =< $z -> true;
@ -130,7 +135,7 @@ to_type_atom(Type) ->
erlang:binary_to_existing_atom(Type, utf8) erlang:binary_to_existing_atom(Type, utf8)
catch catch
_:_ -> _:_ ->
invalid_bridge_id(<<"unknown type: ", Type/binary>>) invalid_data(<<"unknown bridge type: ", Type/binary>>)
end. end.
reset_metrics(ResourceId) -> reset_metrics(ResourceId) ->
@ -243,12 +248,19 @@ create_dry_run(Type, Conf0) ->
{error, Reason} -> {error, Reason} ->
{error, Reason}; {error, Reason};
{ok, ConfNew} -> {ok, ConfNew} ->
try
ParseConf = parse_confs(bin(Type), TmpPath, ConfNew), ParseConf = parse_confs(bin(Type), TmpPath, ConfNew),
Res = emqx_resource:create_dry_run_local( Res = emqx_resource:create_dry_run_local(
bridge_to_resource_type(Type), ParseConf bridge_to_resource_type(Type), ParseConf
), ),
_ = maybe_clear_certs(TmpPath, ConfNew),
Res Res
catch
%% validation errors
throw:Reason ->
{error, Reason}
after
_ = maybe_clear_certs(TmpPath, ConfNew)
end
end. end.
remove(BridgeId) -> remove(BridgeId) ->
@ -300,10 +312,18 @@ parse_confs(
max_retries := Retry max_retries := Retry
} = Conf } = Conf
) -> ) ->
{BaseUrl, Path} = parse_url(Url), Url1 = bin(Url),
{ok, BaseUrl2} = emqx_http_lib:uri_parse(BaseUrl), {BaseUrl, Path} = parse_url(Url1),
BaseUrl1 =
case emqx_http_lib:uri_parse(BaseUrl) of
{ok, BUrl} ->
BUrl;
{error, Reason} ->
Reason1 = emqx_utils:readable_error_msg(Reason),
invalid_data(<<"Invalid URL: ", Url1/binary, ", details: ", Reason1/binary>>)
end,
Conf#{ Conf#{
base_url => BaseUrl2, base_url => BaseUrl1,
request => request =>
#{ #{
path => Path, path => Path,
@ -314,6 +334,30 @@ parse_confs(
max_retries => Retry max_retries => Retry
} }
}; };
parse_confs(<<"iotdb">>, Name, Conf) ->
#{
base_url := BaseURL,
authentication :=
#{
username := Username,
password := Password
}
} = Conf,
BasicToken = base64:encode(<<Username/binary, ":", Password/binary>>),
WebhookConfig =
Conf#{
method => <<"post">>,
url => <<BaseURL/binary, ?INSERT_TABLET_PATH>>,
headers => [
{<<"Content-type">>, <<"application/json">>},
{<<"Authorization">>, BasicToken}
]
},
parse_confs(
<<"webhook">>,
Name,
WebhookConfig
);
parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) -> parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) ->
%% For some drivers that can be used as data-sources, we need to provide a %% For some drivers that can be used as data-sources, we need to provide a
%% hookpoint. The underlying driver will run `emqx_hooks:run/3` when it %% hookpoint. The underlying driver will run `emqx_hooks:run/3` when it
@ -325,6 +369,8 @@ parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) ->
%% to hocon; keeping this as just `kafka' for backwards compatibility. %% to hocon; keeping this as just `kafka' for backwards compatibility.
parse_confs(<<"kafka">> = _Type, Name, Conf) -> parse_confs(<<"kafka">> = _Type, Name, Conf) ->
Conf#{bridge_name => Name}; Conf#{bridge_name => Name};
parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) ->
Conf#{bridge_name => Name};
parse_confs(_Type, _Name, Conf) -> parse_confs(_Type, _Name, Conf) ->
Conf. Conf.
@ -338,7 +384,7 @@ parse_url(Url) ->
{iolist_to_binary([Scheme, "//", HostPort]), <<>>} {iolist_to_binary([Scheme, "//", HostPort]), <<>>}
end; end;
[Url] -> [Url] ->
error({invalid_url, Url}) invalid_data(<<"Missing scheme in URL: ", Url/binary>>)
end. end.
str(Bin) when is_binary(Bin) -> binary_to_list(Bin); str(Bin) when is_binary(Bin) -> binary_to_list(Bin);

View File

@ -141,8 +141,7 @@ setup_fake_telemetry_data() ->
} }
} }
}, },
Opts = #{raw_with_default => true}, ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf),
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf, Opts),
ok = snabbkaffe:start_trace(), ok = snabbkaffe:start_trace(),
Predicate = fun(#{?snk_kind := K}) -> K =:= emqx_bridge_loaded end, Predicate = fun(#{?snk_kind := K}) -> K =:= emqx_bridge_loaded end,

View File

@ -414,6 +414,18 @@ t_http_crud_apis(Config) ->
}, },
json(maps:get(<<"message">>, PutFail2)) json(maps:get(<<"message">>, PutFail2))
), ),
{ok, 400, _} = request_json(
put,
uri(["bridges", BridgeID]),
?HTTP_BRIDGE(<<"localhost:1234/foo">>, Name),
Config
),
{ok, 400, _} = request_json(
put,
uri(["bridges", BridgeID]),
?HTTP_BRIDGE(<<"htpp://localhost:12341234/foo">>, Name),
Config
),
%% delete the bridge %% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config), {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config),
@ -498,6 +510,22 @@ t_http_crud_apis(Config) ->
%% Try create bridge with bad characters as name %% Try create bridge with bad characters as name
{ok, 400, _} = request(post, uri(["bridges"]), ?HTTP_BRIDGE(URL1, <<"隋达"/utf8>>), Config), {ok, 400, _} = request(post, uri(["bridges"]), ?HTTP_BRIDGE(URL1, <<"隋达"/utf8>>), Config),
%% Missing scheme in URL
{ok, 400, _} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(<<"localhost:1234/foo">>, <<"missing_url_scheme">>),
Config
),
%% Invalid port
{ok, 400, _} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(<<"http://localhost:12341234/foo">>, <<"invalid_port">>),
Config
),
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config). {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config).
t_http_bridges_local_topic(Config) -> t_http_bridges_local_topic(Config) ->
@ -1016,6 +1044,34 @@ t_bridges_probe(Config) ->
) )
), ),
%% Missing scheme in URL
?assertMatch(
{ok, 400, #{
<<"code">> := <<"TEST_FAILED">>,
<<"message">> := _
}},
request_json(
post,
uri(["bridges_probe"]),
?HTTP_BRIDGE(<<"203.0.113.3:1234/foo">>),
Config
)
),
%% Invalid port
?assertMatch(
{ok, 400, #{
<<"code">> := <<"TEST_FAILED">>,
<<"message">> := _
}},
request_json(
post,
uri(["bridges_probe"]),
?HTTP_BRIDGE(<<"http://203.0.113.3:12341234/foo">>),
Config
)
),
{ok, 204, _} = request( {ok, 204, _} = request(
post, post,
uri(["bridges_probe"]), uri(["bridges_probe"]),

View File

@ -0,0 +1,350 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_testlib).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
%% ct setup helpers
init_per_suite(Config, Apps) ->
[{start_apps, Apps} | Config].
end_per_suite(Config) ->
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?config(start_apps, Config))),
_ = application:stop(emqx_connector),
ok.
init_per_group(TestGroup, BridgeType, Config) ->
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
application:load(emqx_bridge),
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps(?config(start_apps, Config)),
{ok, _} = application:ensure_all_started(emqx_connector),
emqx_mgmt_api_test_util:init_suite(),
UniqueNum = integer_to_binary(erlang:unique_integer([positive])),
MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>,
[
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
{mqtt_topic, MQTTTopic},
{test_group, TestGroup},
{bridge_type, BridgeType}
| Config
].
end_per_group(Config) ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
delete_all_bridges(),
ok.
init_per_testcase(TestCase, Config0, BridgeConfigCb) ->
ct:timetrap(timer:seconds(60)),
delete_all_bridges(),
UniqueNum = integer_to_binary(erlang:unique_integer()),
BridgeTopic =
<<
(atom_to_binary(TestCase))/binary,
UniqueNum/binary
>>,
TestGroup = ?config(test_group, Config0),
Config = [{bridge_topic, BridgeTopic} | Config0],
{Name, ConfigString, BridgeConfig} = BridgeConfigCb(
TestCase, TestGroup, Config
),
ok = snabbkaffe:start_trace(),
[
{bridge_name, Name},
{bridge_config_string, ConfigString},
{bridge_config, BridgeConfig}
| Config
].
end_per_testcase(_Testcase, Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ok;
false ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
delete_all_bridges(),
%% in CI, apparently this needs more time since the
%% machines struggle with all the containers running...
emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
delete_all_bridges() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
).
%% test helpers
parse_and_check(Config, ConfigString, Name) ->
BridgeType = ?config(bridge_type, Config),
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
#{<<"bridges">> := #{BridgeType := #{Name := BridgeConfig}}} = RawConf,
BridgeConfig.
resource_id(Config) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
emqx_bridge_resource:resource_id(BridgeType, Name).
create_bridge(Config) ->
create_bridge(Config, _Overrides = #{}).
create_bridge(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
emqx_bridge:create(BridgeType, Name, BridgeConfig).
create_bridge_api(Config) ->
create_bridge_api(Config, _Overrides = #{}).
create_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("creating bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {Status, Headers, Body0}} ->
{ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}};
Error ->
Error
end,
ct:pal("bridge create result: ~p", [Res]),
Res.
update_bridge_api(Config) ->
update_bridge_api(Config, _Overrides = #{}).
update_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, Name),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("updating bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of
{ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])};
Error -> Error
end,
ct:pal("bridge update result: ~p", [Res]),
Res.
probe_bridge_api(Config) ->
probe_bridge_api(Config, _Overrides = #{}).
probe_bridge_api(Config, _Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig = ?config(bridge_config, Config),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("probing bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0};
Error -> Error
end,
ct:pal("bridge probe result: ~p", [Res]),
Res.
create_rule_and_action_http(BridgeType, RuleTopic, Config) ->
BridgeName = ?config(bridge_name, Config),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
Params = #{
enable => true,
sql => <<"SELECT * FROM \"", RuleTopic/binary, "\"">>,
actions => [BridgeId]
},
Path = emqx_mgmt_api_test_util:api_path(["rules"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
ct:pal("rule action params: ~p", [Params]),
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
{ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])};
Error -> Error
end.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_sync_query(Config, MakeMessageFun, IsSuccessCheck) ->
ResourceId = resource_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Message = {send_message, MakeMessageFun()},
IsSuccessCheck(emqx_resource:simple_sync_query(ResourceId, Message)),
ok
end,
[]
),
ok.
t_async_query(Config, MakeMessageFun, IsSuccessCheck) ->
ResourceId = resource_id(Config),
ReplyFun =
fun(Pid, Result) ->
Pid ! {result, Result}
end,
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Message = {send_message, MakeMessageFun()},
emqx_resource:query(ResourceId, Message, #{async_reply_fun => {ReplyFun, [self()]}}),
ok
end,
[]
),
receive
{result, Result} -> IsSuccessCheck(Result)
after 5_000 ->
throw(timeout)
end,
ok.
t_create_via_http(Config) ->
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
%% lightweight matrix testing some configs
?assertMatch(
{ok, _},
update_bridge_api(
Config
)
),
?assertMatch(
{ok, _},
update_bridge_api(
Config
)
),
ok
end,
[]
),
ok.
t_start_stop(Config, StopTracePoint) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
ResourceId = resource_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge(Config)),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
%% Check that the bridge probe API doesn't leak atoms.
ProbeRes0 = probe_bridge_api(
Config,
#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
AtomsBefore = erlang:system_info(atom_count),
%% Probe again; shouldn't have created more atoms.
ProbeRes1 = probe_bridge_api(
Config,
#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
AtomsAfter = erlang:system_info(atom_count),
?assertEqual(AtomsBefore, AtomsAfter),
%% Now stop the bridge.
?assertMatch(
{{ok, _}, {ok, _}},
?wait_async_action(
emqx_bridge:disable_enable(disable, BridgeType, BridgeName),
#{?snk_kind := StopTracePoint},
5_000
)
),
ok
end,
fun(Trace) ->
%% one for each probe, one for real
?assertMatch([_, _, _], ?of_kind(StopTracePoint, Trace)),
ok
end
),
ok.
t_on_get_status(Config) ->
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
ResourceId = resource_id(Config),
?assertMatch({ok, _}, create_bridge(Config)),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
ct:sleep(500),
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId))
end),
%% Check that it recovers itself.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
ok.

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,41 @@
# EMQX Cassandra Bridge
[Apache Cassandra](https://github.com/apache/cassandra) is an open-source, distributed
NoSQL database management system that is designed to manage large amounts of structured
and semi-structured data across many commodity servers, providing high availability
with no single point of failure.
It is commonly used in web and mobile applications, IoT, and other systems that
require storing, querying, and analyzing large amounts of data.
The application is used to connect EMQX and Cassandra. User can create a rule
and easily ingest IoT data into Cassandra by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
<!---
# Documentation
- Refer to [Ingest data into Cassandra](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-cassa.html)
for how to use EMQX dashboard to ingest IoT data into Cassandra.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
--->
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,2 @@
toxiproxy
cassandra

View File

@ -0,0 +1,5 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-define(CASSANDRA_DEFAULT_PORT, 9042).

View File

@ -0,0 +1,11 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {ecql, {git, "https://github.com/emqx/ecql.git", {tag, "v0.5.1"}}}
, {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{shell, [
{apps, [emqx_bridge_cassandra]}
]}.

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_cassandra, [
{description, "EMQX Enterprise Cassandra Bridge"},
{vsn, "0.1.1"},
{registered, []},
{applications, [kernel, stdlib, ecql]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -1,7 +1,7 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_ee_bridge_cassa). -module(emqx_bridge_cassandra).
-include_lib("typerefl/include/types.hrl"). -include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl"). -include_lib("hocon/include/hoconsc.hrl").
@ -88,7 +88,7 @@ fields("config") ->
#{desc => ?DESC("local_topic"), default => undefined} #{desc => ?DESC("local_topic"), default => undefined}
)} )}
] ++ emqx_resource_schema:fields("resource_opts") ++ ] ++ emqx_resource_schema:fields("resource_opts") ++
(emqx_ee_connector_cassa:fields(config) -- (emqx_bridge_cassandra_connector:fields(config) --
emqx_connector_schema_lib:prepare_statement_fields()); emqx_connector_schema_lib:prepare_statement_fields());
fields("post") -> fields("post") ->
fields("post", cassandra); fields("post", cassandra);

View File

@ -2,12 +2,12 @@
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_ee_connector_cassa). -module(emqx_bridge_cassandra_connector).
-behaviour(emqx_resource). -behaviour(emqx_resource).
-include_lib("emqx_connector/include/emqx_connector.hrl"). -include_lib("emqx_connector/include/emqx_connector.hrl").
-include_lib("emqx_ee_connector/include/emqx_ee_connector.hrl"). -include("emqx_bridge_cassandra.hrl").
-include_lib("typerefl/include/types.hrl"). -include_lib("typerefl/include/types.hrl").
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
-include_lib("hocon/include/hoconsc.hrl"). -include_lib("hocon/include/hoconsc.hrl").
@ -44,7 +44,7 @@
-type state() :: -type state() ::
#{ #{
poolname := atom(), pool_name := binary(),
prepare_cql := prepares(), prepare_cql := prepares(),
params_tokens := params_tokens(), params_tokens := params_tokens(),
%% returned by ecql:prepare/2 %% returned by ecql:prepare/2
@ -92,7 +92,7 @@ callback_mode() -> async_if_possible.
on_start( on_start(
InstId, InstId,
#{ #{
servers := Servers, servers := Servers0,
keyspace := Keyspace, keyspace := Keyspace,
username := Username, username := Username,
pool_size := PoolSize, pool_size := PoolSize,
@ -104,9 +104,16 @@ on_start(
connector => InstId, connector => InstId,
config => emqx_utils:redact(Config) config => emqx_utils:redact(Config)
}), }),
Servers =
lists:map(
fun(#{hostname := Host, port := Port}) ->
{Host, Port}
end,
emqx_schema:parse_servers(Servers0, ?DEFAULT_SERVER_OPTION)
),
Options = [ Options = [
{nodes, emqx_schema:parse_servers(Servers, ?DEFAULT_SERVER_OPTION)}, {nodes, Servers},
{username, Username}, {username, Username},
{password, emqx_secret:wrap(maps:get(password, Config, ""))}, {password, emqx_secret:wrap(maps:get(password, Config, ""))},
{keyspace, Keyspace}, {keyspace, Keyspace},
@ -124,14 +131,10 @@ on_start(
false -> false ->
[] []
end, end,
%% use InstaId of binary type as Pool name, which is supported in ecpool. State = parse_prepare_cql(Config),
PoolName = InstId, case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
Prepares = parse_prepare_cql(Config),
InitState = #{poolname => PoolName, prepare_statement => #{}},
State = maps:merge(InitState, Prepares),
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
ok -> ok ->
{ok, init_prepare(State)}; {ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})};
{error, Reason} -> {error, Reason} ->
?tp( ?tp(
cassandra_connector_start_failed, cassandra_connector_start_failed,
@ -140,12 +143,12 @@ on_start(
{error, Reason} {error, Reason}
end. end.
on_stop(InstId, #{poolname := PoolName}) -> on_stop(InstId, #{pool_name := PoolName}) ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "stopping_cassandra_connector", msg => "stopping_cassandra_connector",
connector => InstId connector => InstId
}), }),
emqx_plugin_libs_pool:stop_pool(PoolName). emqx_resource_pool:stop(PoolName).
-type request() :: -type request() ::
% emqx_bridge.erl % emqx_bridge.erl
@ -184,7 +187,7 @@ do_single_query(
InstId, InstId,
Request, Request,
Async, Async,
#{poolname := PoolName} = State #{pool_name := PoolName} = State
) -> ) ->
{Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request), {Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request),
?tp( ?tp(
@ -232,7 +235,7 @@ do_batch_query(
InstId, InstId,
Requests, Requests,
Async, Async,
#{poolname := PoolName} = State #{pool_name := PoolName} = State
) -> ) ->
CQLs = CQLs =
lists:map( lists:map(
@ -312,8 +315,8 @@ exec_cql_batch_query(InstId, PoolName, Async, CQLs) ->
exec(PoolName, Query) -> exec(PoolName, Query) ->
ecpool:pick_and_do(PoolName, Query, no_handover). ecpool:pick_and_do(PoolName, Query, no_handover).
on_get_status(_InstId, #{poolname := Pool} = State) -> on_get_status(_InstId, #{pool_name := PoolName} = State) ->
case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
true -> true ->
case do_check_prepares(State) of case do_check_prepares(State) of
ok -> ok ->
@ -334,7 +337,7 @@ do_get_status(Conn) ->
do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) -> do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) ->
ok; ok;
do_check_prepares(State = #{poolname := PoolName, prepare_cql := {error, Prepares}}) -> do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepares}}) ->
%% retry to prepare %% retry to prepare
case prepare_cql(Prepares, PoolName) of case prepare_cql(Prepares, PoolName) of
{ok, Sts} -> {ok, Sts} ->
@ -410,7 +413,7 @@ parse_prepare_cql([], Prepares, Tokens) ->
params_tokens => Tokens params_tokens => Tokens
}. }.
init_prepare(State = #{prepare_cql := Prepares, poolname := PoolName}) -> init_prepare(State = #{prepare_cql := Prepares, pool_name := PoolName}) ->
case maps:size(Prepares) of case maps:size(Prepares) of
0 -> 0 ->
State; State;
@ -442,17 +445,17 @@ prepare_cql(Prepares, PoolName) ->
end. end.
do_prepare_cql(Prepares, PoolName) -> do_prepare_cql(Prepares, PoolName) ->
do_prepare_cql(ecpool:workers(PoolName), Prepares, PoolName, #{}). do_prepare_cql(ecpool:workers(PoolName), Prepares, #{}).
do_prepare_cql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) -> do_prepare_cql([{_Name, Worker} | T], Prepares, _LastSts) ->
{ok, Conn} = ecpool_worker:client(Worker), {ok, Conn} = ecpool_worker:client(Worker),
case prepare_cql_to_conn(Conn, Prepares) of case prepare_cql_to_conn(Conn, Prepares) of
{ok, Sts} -> {ok, Sts} ->
do_prepare_cql(T, Prepares, PoolName, Sts); do_prepare_cql(T, Prepares, Sts);
Error -> Error ->
Error Error
end; end;
do_prepare_cql([], _Prepares, _PoolName, LastSts) -> do_prepare_cql([], _Prepares, LastSts) ->
{ok, LastSts}. {ok, LastSts}.
prepare_cql_to_conn(Conn, Prepares) -> prepare_cql_to_conn(Conn, Prepares) ->

View File

@ -2,7 +2,7 @@
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_ee_bridge_cassa_SUITE). -module(emqx_bridge_cassandra_SUITE).
-compile(nowarn_export_all). -compile(nowarn_export_all).
-compile(export_all). -compile(export_all).
@ -57,7 +57,7 @@
%% CASSA_TCP_HOST=127.0.0.1 CASSA_TCP_PORT=19042 \ %% CASSA_TCP_HOST=127.0.0.1 CASSA_TCP_PORT=19042 \
%% CASSA_TLS_HOST=127.0.0.1 CASSA_TLS_PORT=19142 \ %% CASSA_TLS_HOST=127.0.0.1 CASSA_TLS_PORT=19142 \
%% PROXY_HOST=127.0.0.1 ./rebar3 as test ct -c -v --name ct@127.0.0.1 \ %% PROXY_HOST=127.0.0.1 ./rebar3 as test ct -c -v --name ct@127.0.0.1 \
%% --suite lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl %% --suite apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl
%% %%
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
@ -530,15 +530,16 @@ t_write_failure(Config) ->
fun(Trace0) -> fun(Trace0) ->
ct:pal("trace: ~p", [Trace0]), ct:pal("trace: ~p", [Trace0]),
Trace = ?of_kind(buffer_worker_flush_nack, Trace0), Trace = ?of_kind(buffer_worker_flush_nack, Trace0),
?assertMatch([#{result := {async_return, {error, _}}} | _], Trace), [#{result := Result} | _] = Trace,
[#{result := {async_return, {error, Error}}} | _] = Trace, case Result of
case Error of {async_return, {error, {resource_error, _}}} ->
{resource_error, _} ->
ok; ok;
{recoverable_error, disconnected} -> {async_return, {error, {recoverable_error, disconnected}}} ->
ok;
{error, {resource_error, _}} ->
ok; ok;
_ -> _ ->
ct:fail("unexpected error: ~p", [Error]) ct:fail("unexpected error: ~p", [Result])
end end
end end
), ),
@ -589,7 +590,7 @@ t_missing_data(Config) ->
{ok, _}, {ok, _},
create_bridge(Config) create_bridge(Config)
), ),
%% emqx_ee_connector_cassa will send missed data as a `null` atom %% emqx_bridge_cassandra_connector will send missed data as a `null` atom
%% to ecql driver %% to ecql driver
?check_trace( ?check_trace(
begin begin

View File

@ -2,13 +2,13 @@
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_ee_connector_cassa_SUITE). -module(emqx_bridge_cassandra_connector_SUITE).
-compile(nowarn_export_all). -compile(nowarn_export_all).
-compile(export_all). -compile(export_all).
-include("emqx_connector.hrl"). -include("emqx_bridge_cassandra.hrl").
-include("emqx_ee_connector.hrl"). -include("emqx_connector/include/emqx_connector.hrl").
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
-include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/emqx.hrl").
-include_lib("stdlib/include/assert.hrl"). -include_lib("stdlib/include/assert.hrl").
@ -16,7 +16,7 @@
%% Cassandra server defined at `.ci/docker-compose-file/docker-compose-cassandra-tcp.yaml` %% Cassandra server defined at `.ci/docker-compose-file/docker-compose-cassandra-tcp.yaml`
%% You can change it to `127.0.0.1`, if you run this SUITE locally %% You can change it to `127.0.0.1`, if you run this SUITE locally
-define(CASSANDRA_HOST, "cassandra"). -define(CASSANDRA_HOST, "cassandra").
-define(CASSANDRA_RESOURCE_MOD, emqx_ee_connector_cassa). -define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_connector).
%% This test SUITE requires a running cassandra instance. If you don't want to %% This test SUITE requires a running cassandra instance. If you don't want to
%% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script %% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script
@ -38,9 +38,14 @@ groups() ->
[]. [].
cassandra_servers() -> cassandra_servers() ->
lists:map(
fun(#{hostname := Host, port := Port}) ->
{Host, Port}
end,
emqx_schema:parse_servers( emqx_schema:parse_servers(
iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]), iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]),
#{default_port => ?CASSANDRA_DEFAULT_PORT} #{default_port => ?CASSANDRA_DEFAULT_PORT}
)
). ).
init_per_suite(Config) -> init_per_suite(Config) ->
@ -101,15 +106,15 @@ show(Label, What) ->
erlang:display({Label, What}), erlang:display({Label, What}),
What. What.
perform_lifecycle_check(PoolName, InitialConfig) -> perform_lifecycle_check(ResourceId, InitialConfig) ->
{ok, #{config := CheckedConfig}} = {ok, #{config := CheckedConfig}} =
emqx_resource:check_config(?CASSANDRA_RESOURCE_MOD, InitialConfig), emqx_resource:check_config(?CASSANDRA_RESOURCE_MOD, InitialConfig),
{ok, #{ {ok, #{
state := #{poolname := ReturnedPoolName} = State, state := #{pool_name := PoolName} = State,
status := InitialStatus status := InitialStatus
}} = }} =
emqx_resource:create_local( emqx_resource:create_local(
PoolName, ResourceId,
?CONNECTOR_RESOURCE_GROUP, ?CONNECTOR_RESOURCE_GROUP,
?CASSANDRA_RESOURCE_MOD, ?CASSANDRA_RESOURCE_MOD,
CheckedConfig, CheckedConfig,
@ -121,45 +126,45 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
state := State, state := State,
status := InitialStatus status := InitialStatus
}} = }} =
emqx_resource:get_instance(PoolName), emqx_resource:get_instance(ResourceId),
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
% % Perform query as further check that the resource is working as expected % % Perform query as further check that the resource is working as expected
(fun() -> (fun() ->
erlang:display({pool_name, PoolName}), erlang:display({pool_name, ResourceId}),
QueryNoParamsResWrapper = emqx_resource:query(PoolName, test_query_no_params()), QueryNoParamsResWrapper = emqx_resource:query(ResourceId, test_query_no_params()),
?assertMatch({ok, _}, QueryNoParamsResWrapper) ?assertMatch({ok, _}, QueryNoParamsResWrapper)
end)(), end)(),
?assertEqual(ok, emqx_resource:stop(PoolName)), ?assertEqual(ok, emqx_resource:stop(ResourceId)),
% Resource will be listed still, but state will be changed and healthcheck will fail % Resource will be listed still, but state will be changed and healthcheck will fail
% as the worker no longer exists. % as the worker no longer exists.
{ok, ?CONNECTOR_RESOURCE_GROUP, #{ {ok, ?CONNECTOR_RESOURCE_GROUP, #{
state := State, state := State,
status := StoppedStatus status := StoppedStatus
}} = }} =
emqx_resource:get_instance(PoolName), emqx_resource:get_instance(ResourceId),
?assertEqual(stopped, StoppedStatus), ?assertEqual(stopped, StoppedStatus),
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)),
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
% Can call stop/1 again on an already stopped instance % Can call stop/1 again on an already stopped instance
?assertEqual(ok, emqx_resource:stop(PoolName)), ?assertEqual(ok, emqx_resource:stop(ResourceId)),
% Make sure it can be restarted and the healthchecks and queries work properly % Make sure it can be restarted and the healthchecks and queries work properly
?assertEqual(ok, emqx_resource:restart(PoolName)), ?assertEqual(ok, emqx_resource:restart(ResourceId)),
% async restart, need to wait resource % async restart, need to wait resource
timer:sleep(500), timer:sleep(500),
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
emqx_resource:get_instance(PoolName), emqx_resource:get_instance(ResourceId),
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
(fun() -> (fun() ->
QueryNoParamsResWrapper = QueryNoParamsResWrapper =
emqx_resource:query(PoolName, test_query_no_params()), emqx_resource:query(ResourceId, test_query_no_params()),
?assertMatch({ok, _}, QueryNoParamsResWrapper) ?assertMatch({ok, _}, QueryNoParamsResWrapper)
end)(), end)(),
% Stop and remove the resource in one go. % Stop and remove the resource in one go.
?assertEqual(ok, emqx_resource:remove_local(PoolName)), ?assertEqual(ok, emqx_resource:remove_local(ResourceId)),
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
% Should not even be able to get the resource data out of ets now unlike just stopping. % Should not even be able to get the resource data out of ets now unlike just stopping.
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% utils %% utils

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,37 @@
# EMQX ClickHouse Bridge
[ClickHouse](https://github.com/ClickHouse/ClickHouse) is an open-source, column-based
database management system. It is designed for real-time processing of large volumes of
data and is known for its high performance and scalability.
The application is used to connect EMQX and ClickHouse.
User can create a rule and easily ingest IoT data into ClickHouse by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into ClickHouse](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-clickhouse.html)
for how to use EMQX dashboard to ingest IoT data into ClickHouse.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

Some files were not shown because too many files have changed in this diff Show More