Merge remote-tracking branch 'origin/master' into 0503-no-telemetry-app-for-ee

This commit is contained in:
Zaiming (Stone) Shi 2023-05-23 13:03:55 +02:00
commit 3bb1f7ab2b
378 changed files with 13530 additions and 1532 deletions

View File

@ -0,0 +1,31 @@
version: '3.9'
services:
iotdb:
container_name: iotdb
hostname: iotdb
image: apache/iotdb:1.1.0-standalone
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_target_config_node_list=iotdb:10710
- dn_rpc_address=iotdb
- dn_internal_address=iotdb
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_target_config_node_list=iotdb:10710
# volumes:
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs
expose:
- "18080"
# IoTDB's REST interface, uncomment for local testing
# ports:
# - "18080:18080"
networks:
- emqx_bridge

View File

@ -0,0 +1,17 @@
version: '3.9'
services:
rabbitmq:
container_name: rabbitmq
image: rabbitmq:3.11-management
restart: always
expose:
- "15672"
- "5672"
# We don't want to take ports from the host
# ports:
# - "15672:15672"
# - "5672:5672"
networks:
- emqx_bridge

View File

@ -25,8 +25,8 @@ services:
- ./rocketmq/conf/broker.conf:/etc/rocketmq/broker.conf
environment:
NAMESRV_ADDR: "rocketmq_namesrv:9876"
JAVA_OPTS: " -Duser.home=/opt"
JAVA_OPT_EXT: "-server -Xms1024m -Xmx1024m -Xmn1024m"
JAVA_OPTS: " -Duser.home=/opt -Drocketmq.broker.diskSpaceWarningLevelRatio=0.99"
JAVA_OPT_EXT: "-server -Xms512m -Xmx512m -Xmn512m"
command: ./mqbroker -c /etc/rocketmq/broker.conf
depends_on:
- mqnamesrv

View File

@ -27,6 +27,7 @@ services:
- 19042:9042
- 19142:9142
- 14242:4242
- 28080:18080
command:
- "-host=0.0.0.0"
- "-config=/config/toxiproxy.json"

View File

@ -125,5 +125,11 @@
"listen": "0.0.0.0:1521",
"upstream": "oracle:1521",
"enabled": true
},
{
"name": "iotdb",
"listen": "0.0.0.0:18080",
"upstream": "iotdb:18080",
"enabled": true
}
]

View File

@ -25,7 +25,7 @@ jobs:
prepare:
runs-on: ubuntu-22.04
# prepare source with any OTP version, no need for a matrix
container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04"
container: "ghcr.io/emqx/emqx-builder/5.0-35:1.13.4-24.3.4.2-3-ubuntu22.04"
outputs:
PROFILE: ${{ steps.get_profile.outputs.PROFILE }}
@ -121,7 +121,7 @@ jobs:
# NOTE: 'otp' and 'elixir' are to configure emqx-builder image
# only support latest otp and elixir, not a matrix
builder:
- 5.0-34 # update to latest
- 5.0-35 # update to latest
otp:
- 24.3.4.2-3 # switch to 25 once ready to release 5.1
elixir:

View File

@ -21,7 +21,7 @@ on:
jobs:
prepare:
runs-on: ubuntu-22.04
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04
container: ghcr.io/emqx/emqx-builder/5.0-35:1.13.4-24.3.4.2-3-ubuntu22.04
outputs:
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }}
@ -35,6 +35,7 @@ jobs:
- name: Get profile to build
id: get_profile
run: |
git config --global --add safe.directory "$GITHUB_WORKSPACE"
tag=${{ github.ref }}
if git describe --tags --match "[v|e]*" --exact; then
echo "WARN: This is an exact git tag, will publish release"
@ -183,7 +184,7 @@ jobs:
- aws-arm64
- ubuntu-22.04
builder:
- 5.0-34
- 5.0-35
elixir:
- 1.13.4
exclude:
@ -197,7 +198,7 @@ jobs:
arch: amd64
os: ubuntu22.04
build_machine: ubuntu-22.04
builder: 5.0-34
builder: 5.0-35
elixir: 1.13.4
release_with: elixir
- profile: emqx
@ -205,7 +206,7 @@ jobs:
arch: amd64
os: amzn2
build_machine: ubuntu-22.04
builder: 5.0-34
builder: 5.0-35
elixir: 1.13.4
release_with: elixir
@ -229,7 +230,7 @@ jobs:
ARCH: ${{ matrix.arch }}
run: |
set -eu
git config --global --add safe.directory "/__w/emqx/emqx"
git config --global --add safe.directory "$GITHUB_WORKSPACE"
# Align path for CMake caches
if [ ! "$PWD" = "/emqx" ]; then
ln -s $PWD /emqx
@ -305,35 +306,3 @@ jobs:
fi
aws s3 cp --recursive packages/$PROFILE s3://${{ secrets.AWS_S3_BUCKET }}/$s3dir/${{ github.ref_name }}
aws cloudfront create-invalidation --distribution-id ${{ secrets.AWS_CLOUDFRONT_ID }} --paths "/$s3dir/${{ github.ref_name }}/*"
- name: Push to packagecloud.io
env:
PROFILE: ${{ matrix.profile }}
VERSION: ${{ needs.prepare.outputs.VERSION }}
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
run: |
set -eu
REPO=$PROFILE
if [ $PROFILE = 'emqx-enterprise' ]; then
REPO='emqx-enterprise5'
fi
function push() {
docker run -t --rm -e PACKAGECLOUD_TOKEN=$PACKAGECLOUD_TOKEN -v $(pwd)/$2:/w/$2 -w /w ghcr.io/emqx/package_cloud push emqx/$REPO/$1 $2
}
push "debian/buster" "packages/$PROFILE/$PROFILE-$VERSION-debian10-amd64.deb"
push "debian/buster" "packages/$PROFILE/$PROFILE-$VERSION-debian10-arm64.deb"
push "debian/bullseye" "packages/$PROFILE/$PROFILE-$VERSION-debian11-amd64.deb"
push "debian/bullseye" "packages/$PROFILE/$PROFILE-$VERSION-debian11-arm64.deb"
push "ubuntu/bionic" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu18.04-amd64.deb"
push "ubuntu/bionic" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu18.04-arm64.deb"
push "ubuntu/focal" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu20.04-amd64.deb"
push "ubuntu/focal" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu20.04-arm64.deb"
push "ubuntu/jammy" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu22.04-amd64.deb"
push "ubuntu/jammy" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu22.04-arm64.deb"
push "el/6" "packages/$PROFILE/$PROFILE-$VERSION-amzn2-amd64.rpm"
push "el/6" "packages/$PROFILE/$PROFILE-$VERSION-amzn2-arm64.rpm"
push "el/7" "packages/$PROFILE/$PROFILE-$VERSION-el7-amd64.rpm"
push "el/7" "packages/$PROFILE/$PROFILE-$VERSION-el7-arm64.rpm"
push "el/8" "packages/$PROFILE/$PROFILE-$VERSION-el8-amd64.rpm"
push "el/8" "packages/$PROFILE/$PROFILE-$VERSION-el8-arm64.rpm"
push "el/9" "packages/$PROFILE/$PROFILE-$VERSION-el9-amd64.rpm"
push "el/9" "packages/$PROFILE/$PROFILE-$VERSION-el9-arm64.rpm"

View File

@ -24,9 +24,6 @@ jobs:
profile:
- ['emqx', 'master']
- ['emqx-enterprise', 'release-50']
branch:
- master
- release-50
otp:
- 24.3.4.2-3
arch:
@ -35,7 +32,7 @@ jobs:
- debian10
- amzn2
builder:
- 5.0-34
- 5.0-35
elixir:
- 1.13.4
@ -57,6 +54,7 @@ jobs:
ARCH: ${{ matrix.arch }}
run: |
set -eu
git config --global --add safe.directory "$GITHUB_WORKSPACE"
PKGTYPES="tgz pkg"
IS_ELIXIR="no"
for PKGTYPE in ${PKGTYPES};

View File

@ -35,7 +35,7 @@ jobs:
- ["emqx-enterprise", "24.3.4.2-3", "amzn2", "erlang"]
- ["emqx-enterprise", "25.1.2-3", "ubuntu20.04", "erlang"]
builder:
- 5.0-34
- 5.0-35
elixir:
- '1.13.4'

View File

@ -6,7 +6,7 @@ on:
jobs:
check_deps_integrity:
runs-on: ubuntu-22.04
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04
container: ghcr.io/emqx/emqx-builder/5.0-35:1.13.4-25.1.2-3-ubuntu22.04
steps:
- uses: actions/checkout@v3

View File

@ -5,7 +5,7 @@ on: [pull_request]
jobs:
code_style_check:
runs-on: ubuntu-22.04
container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04"
container: "ghcr.io/emqx/emqx-builder/5.0-35:1.13.4-25.1.2-3-ubuntu22.04"
steps:
- uses: actions/checkout@v3
with:

View File

@ -9,7 +9,7 @@ jobs:
elixir_apps_check:
runs-on: ubuntu-22.04
# just use the latest builder
container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04"
container: "ghcr.io/emqx/emqx-builder/5.0-35:1.13.4-25.1.2-3-ubuntu22.04"
strategy:
fail-fast: false

View File

@ -8,7 +8,7 @@ on:
jobs:
elixir_deps_check:
runs-on: ubuntu-22.04
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04
container: ghcr.io/emqx/emqx-builder/5.0-35:1.13.4-25.1.2-3-ubuntu22.04
steps:
- name: Checkout

View File

@ -17,7 +17,7 @@ jobs:
profile:
- emqx
- emqx-enterprise
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu22.04
container: ghcr.io/emqx/emqx-builder/5.0-35:1.13.4-25.1.2-3-ubuntu22.04
steps:
- name: Checkout
uses: actions/checkout@v3

View File

@ -15,7 +15,7 @@ jobs:
prepare:
runs-on: ubuntu-latest
if: github.repository_owner == 'emqx'
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu20.04
container: ghcr.io/emqx/emqx-builder/5.0-35:1.13.4-25.1.2-3-ubuntu20.04
outputs:
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
@ -51,11 +51,10 @@ jobs:
needs:
- prepare
env:
TF_VAR_bench_id: ${{ needs.prepare.outputs.BENCH_ID }}
TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }}
TF_VAR_test_duration: 300
TF_VAR_grafana_api_key: ${{ secrets.TF_EMQX_PERF_TEST_GRAFANA_API_KEY }}
TF_AWS_REGION: eu-north-1
TF_VAR_test_duration: 1800
steps:
- name: Configure AWS Credentials
@ -77,38 +76,37 @@ jobs:
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- name: terraform init
- name: 1on1 scenario
id: scenario_1on1
working-directory: ./tf-emqx-performance-test
timeout-minutes: 60
env:
TF_VAR_bench_id: "${{ needs.prepare.outputs.BENCH_ID }}/1on1"
TF_VAR_use_emqttb: 1
TF_VAR_use_emqtt_bench: 0
TF_VAR_emqttb_instance_count: 2
TF_VAR_emqttb_instance_type: "c5.large"
TF_VAR_emqttb_scenario: "@pub --topic 't/%n' --pubinterval 10ms --qos 1 --publatency 50ms --size 16 --num-clients 25000 @sub --topic 't/%n' --num-clients 25000"
TF_VAR_emqx_instance_type: "c5.xlarge"
TF_VAR_emqx_instance_count: 3
run: |
terraform init
- name: terraform apply
working-directory: ./tf-emqx-performance-test
run: |
terraform apply -auto-approve
- name: Wait for test results
timeout-minutes: 30
working-directory: ./tf-emqx-performance-test
id: test-results
run: |
sleep $TF_VAR_test_duration
until aws s3api head-object --bucket tf-emqx-performance-test --key "$TF_VAR_bench_id/DONE" > /dev/null 2>&1
do
printf '.'
sleep 10
done
echo
aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/metrics.json" ./
aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/stats.json" ./
echo MESSAGES_DELIVERED=$(cat metrics.json | jq '[.[]."messages.delivered"] | add') >> $GITHUB_OUTPUT
echo MESSAGES_DROPPED=$(cat metrics.json | jq '[.[]."messages.dropped"] | add') >> $GITHUB_OUTPUT
./wait-emqttb.sh
./fetch-metrics.sh
MESSAGES_RECEIVED=$(cat metrics.json | jq '[.[]."messages.received"] | add')
MESSAGES_SENT=$(cat metrics.json | jq '[.[]."messages.sent"] | add')
echo MESSAGES_DROPPED=$(cat metrics.json | jq '[.[]."messages.dropped"] | add') >> $GITHUB_OUTPUT
echo PUB_MSG_RATE=$(($MESSAGES_RECEIVED / $TF_VAR_test_duration)) >> $GITHUB_OUTPUT
echo SUB_MSG_RATE=$(($MESSAGES_SENT / $TF_VAR_test_duration)) >> $GITHUB_OUTPUT
terraform destroy -auto-approve
- name: Send notification to Slack
if: success()
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "EMQX performance test completed.\nMessages delivered: ${{ steps.test-results.outputs.MESSAGES_DELIVERED }}.\nMessages dropped: ${{ steps.test-results.outputs.MESSAGES_DROPPED }}.\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
{"text": "Performance test result for 1on1 scenario (50k pub, 50k sub): ${{ job.status }}\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Pub message rate*: ${{ steps.scenario_1on1.outputs.PUB_MSG_RATE }}\n*Sub message rate*: ${{ steps.scenario_1on1.outputs.SUB_MSG_RATE }}\nDropped messages: ${{ steps.scenario_1on1.outputs.MESSAGES_DROPPED }}"}
- name: terraform destroy
if: always()
working-directory: ./tf-emqx-performance-test
@ -117,10 +115,10 @@ jobs:
- uses: actions/upload-artifact@v3
if: success()
with:
name: test-results
path: "./tf-emqx-performance-test/*.json"
name: metrics
path: "./tf-emqx-performance-test/metrics.json"
- uses: actions/upload-artifact@v3
if: always()
if: failure()
with:
name: terraform
path: |

View File

@ -15,7 +15,7 @@ on:
jobs:
upload:
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
strategy:
fail-fast: false
steps:
@ -53,16 +53,6 @@ jobs:
BUCKET=${{ secrets.AWS_S3_BUCKET }}
OUTPUT_DIR=${{ steps.profile.outputs.s3dir }}
aws s3 cp --recursive s3://$BUCKET/$OUTPUT_DIR/${{ github.ref_name }} packages
cd packages
DEFAULT_BEAM_PLATFORM='otp24.3.4.2-3'
# all packages including full-name and default-name are uploaded to s3
# but we only upload default-name packages (and elixir) as github artifacts
# so we rename (overwrite) non-default packages before uploading
while read -r fname; do
default_fname=$(echo "$fname" | sed "s/-${DEFAULT_BEAM_PLATFORM}//g")
echo "$fname -> $default_fname"
mv -f "$fname" "$default_fname"
done < <(find . -maxdepth 1 -type f | grep -E "emqx(-enterprise)?-5\.[0-9]+\.[0-9]+.*-${DEFAULT_BEAM_PLATFORM}" | grep -v elixir)
- uses: alexellis/upload-assets@0.4.0
env:
GITHUB_TOKEN: ${{ github.token }}
@ -79,3 +69,35 @@ jobs:
-X POST \
-d "{\"repo\":\"emqx/emqx\", \"tag\": \"${{ github.ref_name }}\" }" \
${{ secrets.EMQX_IO_RELEASE_API }}
- name: Push to packagecloud.io
env:
PROFILE: ${{ steps.profile.outputs.profile }}
VERSION: ${{ steps.profile.outputs.version }}
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
run: |
set -eu
REPO=$PROFILE
if [ $PROFILE = 'emqx-enterprise' ]; then
REPO='emqx-enterprise5'
fi
function push() {
docker run -t --rm -e PACKAGECLOUD_TOKEN=$PACKAGECLOUD_TOKEN -v $(pwd)/$2:/w/$2 -w /w ghcr.io/emqx/package_cloud push emqx/$REPO/$1 $2
}
push "debian/buster" "packages/$PROFILE-$VERSION-debian10-amd64.deb"
push "debian/buster" "packages/$PROFILE-$VERSION-debian10-arm64.deb"
push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-amd64.deb"
push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-arm64.deb"
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-amd64.deb"
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-arm64.deb"
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-amd64.deb"
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-arm64.deb"
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-amd64.deb"
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-arm64.deb"
push "el/6" "packages/$PROFILE-$VERSION-amzn2-amd64.rpm"
push "el/6" "packages/$PROFILE-$VERSION-amzn2-arm64.rpm"
push "el/7" "packages/$PROFILE-$VERSION-el7-amd64.rpm"
push "el/7" "packages/$PROFILE-$VERSION-el7-arm64.rpm"
push "el/8" "packages/$PROFILE-$VERSION-el8-amd64.rpm"
push "el/8" "packages/$PROFILE-$VERSION-el8-arm64.rpm"
push "el/9" "packages/$PROFILE-$VERSION-el9-amd64.rpm"
push "el/9" "packages/$PROFILE-$VERSION-el9-arm64.rpm"

View File

@ -12,7 +12,7 @@ jobs:
strategy:
matrix:
builder:
- 5.0-34
- 5.0-35
otp:
- 24.3.4.2-3
- 25.1.2-3

View File

@ -17,7 +17,7 @@ jobs:
prepare:
runs-on: ubuntu-22.04
# prepare source with any OTP version, no need for a matrix
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-debian11
container: ghcr.io/emqx/emqx-builder/5.0-35:1.13.4-24.3.4.2-3-debian11
steps:
- uses: actions/checkout@v3
@ -50,7 +50,7 @@ jobs:
os:
- ["debian11", "debian:11-slim"]
builder:
- 5.0-34
- 5.0-35
otp:
- 24.3.4.2-3
elixir:
@ -123,7 +123,7 @@ jobs:
os:
- ["debian11", "debian:11-slim"]
builder:
- 5.0-34
- 5.0-35
otp:
- 24.3.4.2-3
elixir:

View File

@ -15,7 +15,7 @@ concurrency:
jobs:
relup_test_plan:
runs-on: ubuntu-22.04
container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04"
container: "ghcr.io/emqx/emqx-builder/5.0-35:1.13.4-24.3.4.2-3-ubuntu22.04"
outputs:
CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }}
OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }}

View File

@ -34,12 +34,12 @@ jobs:
MATRIX="$(echo "${APPS}" | jq -c '
[
(.[] | select(.profile == "emqx") | . + {
builder: "5.0-34",
builder: "5.0-35",
otp: "25.1.2-3",
elixir: "1.13.4"
}),
(.[] | select(.profile == "emqx-enterprise") | . + {
builder: "5.0-34",
builder: "5.0-35",
otp: ["24.3.4.2-3", "25.1.2-3"][],
elixir: "1.13.4"
})
@ -109,7 +109,9 @@ jobs:
- uses: actions/cache@v3
with:
path: "source/emqx_dialyzer_${{ matrix.otp }}_plt"
key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }}
key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }}-${{ hashFiles('source/rebar.*', 'source/apps/*/rebar.*', 'source/lib-ee/*/rebar.*') }}
restore-keys: |
rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }}-
- name: run static checks
env:
PROFILE: ${{ matrix.profile }}
@ -255,7 +257,7 @@ jobs:
- ct
- ct_docker
runs-on: ubuntu-22.04
container: "ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04"
container: "ghcr.io/emqx/emqx-builder/5.0-35:1.13.4-24.3.4.2-3-ubuntu22.04"
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3

View File

@ -4,12 +4,6 @@ SCRIPTS = $(CURDIR)/scripts
export EMQX_RELUP ?= true
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-debian11
export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.2.4
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6
export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT)
@ -19,6 +13,22 @@ else
FIND=find
endif
# Dashbord version
# from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.2.4-1
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6
# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used
# In make 4.4+, for backward-compatibility the value from the original environment is used.
# so the shell script will be executed tons of times.
# https://github.com/emqx/emqx/pull/10627
ifeq ($(strip $(OTP_VSN)),)
export OTP_VSN := $(shell $(SCRIPTS)/get-otp-vsn.sh)
endif
ifeq ($(strip $(ELIXIR_VSN)),)
export ELIXIR_VSN := $(shell $(SCRIPTS)/get-elixir-vsn.sh)
endif
PROFILE ?= emqx
REL_PROFILES := emqx emqx-enterprise
PKG_PROFILES := emqx-pkg emqx-enterprise-pkg
@ -169,6 +179,7 @@ clean-all:
@rm -f rebar.lock
@rm -rf deps
@rm -rf _build
@rm -f emqx_dialyzer_*_plt
.PHONY: deps-all
deps-all: $(REBAR) $(PROFILES:%=deps-%)

View File

@ -0,0 +1,61 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% This file contains common macros for testing.
%% It must not be used anywhere except in test suites.
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-define(assertWaitEvent(Code, EventMatch, Timeout),
?assertMatch(
{_, {ok, EventMatch}},
?wait_async_action(
Code,
EventMatch,
Timeout
)
)
).
-define(drainMailbox(),
(fun F__Flush_() ->
receive
X__Msg_ -> [X__Msg_ | F__Flush_()]
after 0 -> []
end
end)()
).
-define(assertReceive(PATTERN),
?assertReceive(PATTERN, 1000)
).
-define(assertReceive(PATTERN, TIMEOUT),
(fun() ->
receive
X__V = PATTERN -> X__V
after TIMEOUT ->
erlang:error(
{assertReceive, [
{module, ?MODULE},
{line, ?LINE},
{expression, (??PATTERN)},
{mailbox, ?drainMailbox()}
]}
)
end
end)()
).

View File

@ -0,0 +1,42 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2017-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-define(CHANNEL_METRICS, [
recv_pkt,
recv_msg,
'recv_msg.qos0',
'recv_msg.qos1',
'recv_msg.qos2',
'recv_msg.dropped',
'recv_msg.dropped.await_pubrel_timeout',
send_pkt,
send_msg,
'send_msg.qos0',
'send_msg.qos1',
'send_msg.qos2',
'send_msg.dropped',
'send_msg.dropped.expired',
'send_msg.dropped.queue_full',
'send_msg.dropped.too_large'
]).
-define(INFO_KEYS, [
conninfo,
conn_state,
clientinfo,
session,
will_msg
]).

View File

@ -34,6 +34,7 @@
-define(HP_BRIDGE, 870).
-define(HP_DELAY_PUB, 860).
%% apps that can stop the hooks chain from continuing
-define(HP_NODE_REBALANCE, 110).
-define(HP_EXHOOK, 100).
%% == Lowest Priority = 0, don't change this value as the plugins may depend on it.

View File

@ -32,10 +32,10 @@
%% `apps/emqx/src/bpapi/README.md'
%% Community edition
-define(EMQX_RELEASE_CE, "5.0.24").
-define(EMQX_RELEASE_CE, "5.0.25").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.0.3-rc.1").
-define(EMQX_RELEASE_EE, "5.0.4-alpha.1").
%% the HTTP API version
-define(EMQX_API_VERSION, "5.0").

View File

@ -13,6 +13,7 @@
{emqx_conf,2}.
{emqx_dashboard,1}.
{emqx_delayed,1}.
{emqx_eviction_agent,1}.
{emqx_exhook,1}.
{emqx_gateway_api_listeners,1}.
{emqx_gateway_cm,1}.
@ -26,6 +27,10 @@
{emqx_mgmt_cluster,1}.
{emqx_mgmt_trace,1}.
{emqx_mgmt_trace,2}.
{emqx_node_rebalance,1}.
{emqx_node_rebalance_api,1}.
{emqx_node_rebalance_evacuation,1}.
{emqx_node_rebalance_status,1}.
{emqx_persistent_session,1}.
{emqx_plugin_libs,1}.
{emqx_plugins,1}.

View File

@ -24,12 +24,12 @@
{deps, [
{emqx_utils, {path, "../emqx_utils"}},
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
{gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}},
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.1"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.4"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.6"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},

View File

@ -112,8 +112,8 @@ update_log_handler({Action, {handler, Id, Mod, Conf}}) ->
end,
ok.
id_for_log(console) -> "log.console_handler";
id_for_log(Other) -> "log.file_handlers." ++ atom_to_list(Other).
id_for_log(console) -> "log.console";
id_for_log(Other) -> "log.file." ++ atom_to_list(Other).
atom(Id) when is_binary(Id) -> binary_to_atom(Id, utf8);
atom(Id) when is_atom(Id) -> Id.
@ -126,12 +126,12 @@ tr_handlers(Conf) ->
%% For the default logger that outputs to console
tr_console_handler(Conf) ->
case conf_get("log.console_handler.enable", Conf) of
case conf_get("log.console.enable", Conf) of
true ->
ConsoleConf = conf_get("log.console_handler", Conf),
ConsoleConf = conf_get("log.console", Conf),
[
{handler, console, logger_std_h, #{
level => conf_get("log.console_handler.level", Conf),
level => conf_get("log.console.level", Conf),
config => (log_handler_conf(ConsoleConf))#{type => standard_io},
formatter => log_formatter(ConsoleConf),
filters => log_filter(ConsoleConf)
@ -150,14 +150,10 @@ tr_file_handler({HandlerName, SubConf}) ->
{handler, atom(HandlerName), logger_disk_log_h, #{
level => conf_get("level", SubConf),
config => (log_handler_conf(SubConf))#{
type =>
case conf_get("rotation.enable", SubConf) of
true -> wrap;
_ -> halt
end,
file => conf_get("file", SubConf),
max_no_files => conf_get("rotation.count", SubConf),
max_no_bytes => conf_get("max_size", SubConf)
type => wrap,
file => conf_get("to", SubConf),
max_no_files => conf_get("rotation_count", SubConf),
max_no_bytes => conf_get("rotation_size", SubConf)
},
formatter => log_formatter(SubConf),
filters => log_filter(SubConf),
@ -165,14 +161,11 @@ tr_file_handler({HandlerName, SubConf}) ->
}}.
logger_file_handlers(Conf) ->
Handlers = maps:to_list(conf_get("log.file_handlers", Conf, #{})),
lists:filter(
fun({_Name, Opts}) ->
B = conf_get("enable", Opts),
true = is_boolean(B),
B
fun({_Name, Handler}) ->
conf_get("enable", Handler, false)
end,
Handlers
maps:to_list(conf_get("log.file", Conf, #{}))
).
conf_get(Key, Conf) -> emqx_schema:conf_get(Key, Conf).
@ -237,12 +230,8 @@ log_filter(Conf) ->
end.
tr_level(Conf) ->
ConsoleLevel = conf_get("log.console_handler.level", Conf, undefined),
FileLevels = [
conf_get("level", SubConf)
|| {_, SubConf} <-
logger_file_handlers(Conf)
],
ConsoleLevel = conf_get("log.console.level", Conf, undefined),
FileLevels = [conf_get("level", SubConf) || {_, SubConf} <- logger_file_handlers(Conf)],
case FileLevels ++ [ConsoleLevel || ConsoleLevel =/= undefined] of
%% warning is the default level we should use
[] -> warning;

View File

@ -3,7 +3,7 @@
{id, "emqx"},
{description, "EMQX Core"},
% strict semver, bump manually!
{vsn, "5.0.25"},
{vsn, "5.0.26"},
{modules, []},
{registered, []},
{applications, [

View File

@ -184,11 +184,18 @@ run_fold_hook(HookPoint, Args, Acc) ->
-spec get_config(emqx_utils_maps:config_key_path()) -> term().
get_config(KeyPath) ->
emqx_config:get(KeyPath).
KeyPath1 = emqx_config:ensure_atom_conf_path(KeyPath, {raise_error, config_not_found}),
emqx_config:get(KeyPath1).
-spec get_config(emqx_utils_maps:config_key_path(), term()) -> term().
get_config(KeyPath, Default) ->
emqx_config:get(KeyPath, Default).
try
KeyPath1 = emqx_config:ensure_atom_conf_path(KeyPath, {raise_error, config_not_found}),
emqx_config:get(KeyPath1, Default)
catch
error:config_not_found ->
Default
end.
-spec get_raw_config(emqx_utils_maps:config_key_path()) -> term().
get_raw_config(KeyPath) ->

View File

@ -29,9 +29,13 @@
authn_type/1
]).
-ifdef(TEST).
-export([convert_certs/2, convert_certs/3, clear_certs/2]).
-endif.
%% Used in emqx_gateway
-export([
certs_dir/2,
convert_certs/2,
convert_certs/3,
clear_certs/2
]).
-export_type([config/0]).

View File

@ -18,6 +18,7 @@
-module(emqx_channel).
-include("emqx.hrl").
-include("emqx_channel.hrl").
-include("emqx_mqtt.hrl").
-include("logger.hrl").
-include("types.hrl").
@ -57,6 +58,12 @@
clear_keepalive/1
]).
%% Export for emqx_channel implementations
-export([
maybe_nack/1,
maybe_mark_as_delivered/2
]).
%% Exports for CT
-export([set_field/3]).
@ -69,7 +76,7 @@
]
).
-export_type([channel/0, opts/0]).
-export_type([channel/0, opts/0, conn_state/0]).
-record(channel, {
%% MQTT ConnInfo
@ -131,33 +138,6 @@
quota_timer => expire_quota_limit
}).
-define(CHANNEL_METRICS, [
recv_pkt,
recv_msg,
'recv_msg.qos0',
'recv_msg.qos1',
'recv_msg.qos2',
'recv_msg.dropped',
'recv_msg.dropped.await_pubrel_timeout',
send_pkt,
send_msg,
'send_msg.qos0',
'send_msg.qos1',
'send_msg.qos2',
'send_msg.dropped',
'send_msg.dropped.expired',
'send_msg.dropped.queue_full',
'send_msg.dropped.too_large'
]).
-define(INFO_KEYS, [
conninfo,
conn_state,
clientinfo,
session,
will_msg
]).
-define(LIMITER_ROUTING, message_routing).
-dialyzer({no_match, [shutdown/4, ensure_timer/2, interval/2]}).
@ -276,9 +256,7 @@ init(
),
{NClientInfo, NConnInfo} = take_ws_cookie(ClientInfo, ConnInfo),
#channel{
%% We remove the peercert because it duplicates to what's stored in the socket,
%% Saving a copy here causes unnecessary wast of memory (about 1KB per connection).
conninfo = maps:put(peercert, undefined, NConnInfo),
conninfo = NConnInfo,
clientinfo = NClientInfo,
topic_aliases = #{
inbound => #{},
@ -1078,10 +1056,12 @@ handle_out(unsuback, {PacketId, _ReasonCodes}, Channel) ->
handle_out(disconnect, ReasonCode, Channel) when is_integer(ReasonCode) ->
ReasonName = disconnect_reason(ReasonCode),
handle_out(disconnect, {ReasonCode, ReasonName}, Channel);
handle_out(disconnect, {ReasonCode, ReasonName}, Channel = ?IS_MQTT_V5) ->
Packet = ?DISCONNECT_PACKET(ReasonCode),
handle_out(disconnect, {ReasonCode, ReasonName}, Channel) ->
handle_out(disconnect, {ReasonCode, ReasonName, #{}}, Channel);
handle_out(disconnect, {ReasonCode, ReasonName, Props}, Channel = ?IS_MQTT_V5) ->
Packet = ?DISCONNECT_PACKET(ReasonCode, Props),
{ok, [{outgoing, Packet}, {close, ReasonName}], Channel};
handle_out(disconnect, {_ReasonCode, ReasonName}, Channel) ->
handle_out(disconnect, {_ReasonCode, ReasonName, _Props}, Channel) ->
{ok, {close, ReasonName}, Channel};
handle_out(auth, {ReasonCode, Properties}, Channel) ->
{ok, ?AUTH_PACKET(ReasonCode, Properties), Channel};
@ -1198,13 +1178,19 @@ handle_call(
{takeover, 'end'},
Channel = #channel{
session = Session,
pendings = Pendings
pendings = Pendings,
conninfo = #{clientid := ClientId}
}
) ->
ok = emqx_session:takeover(Session),
%% TODO: Should not drain deliver here (side effect)
Delivers = emqx_utils:drain_deliver(),
AllPendings = lists:append(Delivers, Pendings),
?tp(
debug,
emqx_channel_takeover_end,
#{clientid => ClientId}
),
disconnect_and_shutdown(takenover, AllPendings, Channel);
handle_call(list_authz_cache, Channel) ->
{reply, emqx_authz_cache:list_authz_cache(), Channel};
@ -1216,7 +1202,7 @@ handle_call(
}
) ->
ClientId = info(clientid, Channel),
NKeepalive = emqx_keepalive:set(interval, Interval * 1000, KeepAlive),
NKeepalive = emqx_keepalive:update(timer:seconds(Interval), KeepAlive),
NConnInfo = maps:put(keepalive, Interval, ConnInfo),
NChannel = Channel#channel{keepalive = NKeepalive, conninfo = NConnInfo},
SockInfo = maps:get(sockinfo, emqx_cm:get_chan_info(ClientId), #{}),
@ -1276,6 +1262,8 @@ handle_info(die_if_test = Info, Channel) ->
die_if_test_compiled(),
?SLOG(error, #{msg => "unexpected_info", info => Info}),
{ok, Channel};
handle_info({disconnect, ReasonCode, ReasonName, Props}, Channel) ->
handle_out(disconnect, {ReasonCode, ReasonName, Props}, Channel);
handle_info(Info, Channel) ->
?SLOG(error, #{msg => "unexpected_info", info => Info}),
{ok, Channel}.
@ -1999,10 +1987,21 @@ ensure_connected(
NConnInfo = ConnInfo#{connected_at => erlang:system_time(millisecond)},
ok = run_hooks('client.connected', [ClientInfo, NConnInfo]),
Channel#channel{
conninfo = NConnInfo,
conninfo = trim_conninfo(NConnInfo),
conn_state = connected
}.
trim_conninfo(ConnInfo) ->
maps:without(
[
%% NOTE
%% We remove the peercert because it duplicates what's stored in the socket,
%% otherwise it wastes about 1KB per connection.
peercert
],
ConnInfo
).
%%--------------------------------------------------------------------
%% Init Alias Maximum
@ -2035,9 +2034,9 @@ ensure_keepalive_timer(0, Channel) ->
ensure_keepalive_timer(disabled, Channel) ->
Channel;
ensure_keepalive_timer(Interval, Channel = #channel{clientinfo = #{zone := Zone}}) ->
Backoff = get_mqtt_conf(Zone, keepalive_backoff),
RecvOct = emqx_pd:get_counter(incoming_bytes),
Keepalive = emqx_keepalive:init(RecvOct, round(timer:seconds(Interval) * Backoff)),
Multiplier = get_mqtt_conf(Zone, keepalive_multiplier),
RecvCnt = emqx_pd:get_counter(recv_pkt),
Keepalive = emqx_keepalive:init(RecvCnt, round(timer:seconds(Interval) * Multiplier)),
ensure_timer(alive_timer, Channel#channel{keepalive = Keepalive}).
clear_keepalive(Channel = #channel{timers = Timers}) ->
@ -2146,7 +2145,8 @@ publish_will_msg(
ok;
false ->
NMsg = emqx_mountpoint:mount(MountPoint, Msg),
_ = emqx_broker:publish(NMsg),
NMsg2 = NMsg#message{timestamp = erlang:system_time(millisecond)},
_ = emqx_broker:publish(NMsg2),
ok
end.

View File

@ -23,6 +23,8 @@
-include("logger.hrl").
-include("types.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("stdlib/include/qlc.hrl").
-include_lib("stdlib/include/ms_transform.hrl").
-export([start_link/0]).
@ -72,6 +74,12 @@
get_session_confs/2
]).
%% Client management
-export([
channel_with_session_table/1,
live_connection_table/1
]).
%% gen_server callbacks
-export([
init/1,
@ -593,6 +601,40 @@ all_channels() ->
Pat = [{{'_', '$1'}, [], ['$1']}],
ets:select(?CHAN_TAB, Pat).
%% @doc Get clientinfo for all clients with sessions
channel_with_session_table(ConnModuleList) ->
Ms = ets:fun2ms(
fun({{ClientId, _ChanPid}, Info, _Stats}) ->
{ClientId, Info}
end
),
Table = ets:table(?CHAN_INFO_TAB, [{traverse, {select, Ms}}]),
ConnModules = sets:from_list(ConnModuleList, [{version, 2}]),
qlc:q([
{ClientId, ConnState, ConnInfo, ClientInfo}
|| {ClientId, #{
conn_state := ConnState,
clientinfo := ClientInfo,
conninfo := #{clean_start := false, conn_mod := ConnModule} = ConnInfo
}} <-
Table,
sets:is_element(ConnModule, ConnModules)
]).
%% @doc Get all local connection query handle
live_connection_table(ConnModules) ->
Ms = lists:map(fun live_connection_ms/1, ConnModules),
Table = ets:table(?CHAN_CONN_TAB, [{traverse, {select, Ms}}]),
qlc:q([{ClientId, ChanPid} || {ClientId, ChanPid} <- Table, is_channel_connected(ChanPid)]).
live_connection_ms(ConnModule) ->
{{{'$1', '$2'}, ConnModule}, [], [{{'$1', '$2'}}]}.
is_channel_connected(ChanPid) when node(ChanPid) =:= node() ->
ets:member(?CHAN_LIVE_TAB, ChanPid);
is_channel_connected(_ChanPid) ->
false.
%% @doc Get all registered clientIDs. Debug/test interface
all_client_ids() ->
Pat = [{{'$1', '_'}, [], ['$1']}],
@ -693,7 +735,8 @@ code_change(_OldVsn, State, _Extra) ->
%%--------------------------------------------------------------------
clean_down({ChanPid, ClientId}) ->
do_unregister_channel({ClientId, ChanPid}).
do_unregister_channel({ClientId, ChanPid}),
ok = ?tp(debug, emqx_cm_clean_down, #{client_id => ClientId}).
stats_fun() ->
lists:foreach(fun update_stats/1, ?CHAN_STATS).
@ -719,12 +762,12 @@ get_chann_conn_mod(ClientId, ChanPid) ->
wrap_rpc(emqx_cm_proto_v1:get_chann_conn_mod(ClientId, ChanPid)).
mark_channel_connected(ChanPid) ->
?tp(emqx_cm_connected_client_count_inc, #{}),
?tp(emqx_cm_connected_client_count_inc, #{chan_pid => ChanPid}),
ets:insert_new(?CHAN_LIVE_TAB, {ChanPid, true}),
ok.
mark_channel_disconnected(ChanPid) ->
?tp(emqx_cm_connected_client_count_dec, #{}),
?tp(emqx_cm_connected_client_count_dec, #{chan_pid => ChanPid}),
ets:delete(?CHAN_LIVE_TAB, ChanPid),
ok.

View File

@ -88,6 +88,8 @@
remove_handlers/0
]).
-export([ensure_atom_conf_path/2]).
-ifdef(TEST).
-export([erase_all/0]).
-endif.
@ -113,7 +115,8 @@
update_cmd/0,
update_args/0,
update_error/0,
update_result/0
update_result/0,
runtime_config_key_path/0
]).
-type update_request() :: term().
@ -144,6 +147,8 @@
-type config() :: #{atom() => term()} | list() | undefined.
-type app_envs() :: [proplists:property()].
-type runtime_config_key_path() :: [atom()].
%% @doc For the given path, get root value enclosed in a single-key map.
-spec get_root(emqx_utils_maps:config_key_path()) -> map().
get_root([RootName | _]) ->
@ -156,25 +161,21 @@ get_root_raw([RootName | _]) ->
%% @doc Get a config value for the given path.
%% The path should at least include root config name.
-spec get(emqx_utils_maps:config_key_path()) -> term().
-spec get(runtime_config_key_path()) -> term().
get(KeyPath) -> do_get(?CONF, KeyPath).
-spec get(emqx_utils_maps:config_key_path(), term()) -> term().
-spec get(runtime_config_key_path(), term()) -> term().
get(KeyPath, Default) -> do_get(?CONF, KeyPath, Default).
-spec find(emqx_utils_maps:config_key_path()) ->
-spec find(runtime_config_key_path()) ->
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
find([]) ->
case do_get(?CONF, [], ?CONFIG_NOT_FOUND_MAGIC) of
?CONFIG_NOT_FOUND_MAGIC -> {not_found, []};
Res -> {ok, Res}
end;
find(KeyPath) ->
atom_conf_path(
KeyPath,
fun(AtomKeyPath) -> emqx_utils_maps:deep_find(AtomKeyPath, get_root(KeyPath)) end,
{return, {not_found, KeyPath}}
).
find(AtomKeyPath) ->
emqx_utils_maps:deep_find(AtomKeyPath, get_root(AtomKeyPath)).
-spec find_raw(emqx_utils_maps:config_key_path()) ->
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
@ -712,21 +713,14 @@ do_put(Type, Putter, [RootName | KeyPath], DeepValue) ->
NewValue = do_deep_put(Type, Putter, KeyPath, OldValue, DeepValue),
persistent_term:put(?PERSIS_KEY(Type, RootName), NewValue).
do_deep_get(?CONF, KeyPath, Map, Default) ->
atom_conf_path(
KeyPath,
fun(AtomKeyPath) -> emqx_utils_maps:deep_get(AtomKeyPath, Map, Default) end,
{return, Default}
);
do_deep_get(?CONF, AtomKeyPath, Map, Default) ->
emqx_utils_maps:deep_get(AtomKeyPath, Map, Default);
do_deep_get(?RAW_CONF, KeyPath, Map, Default) ->
emqx_utils_maps:deep_get([bin(Key) || Key <- KeyPath], Map, Default).
do_deep_put(?CONF, Putter, KeyPath, Map, Value) ->
atom_conf_path(
KeyPath,
fun(AtomKeyPath) -> Putter(AtomKeyPath, Map, Value) end,
{raise_error, {not_found, KeyPath}}
);
AtomKeyPath = ensure_atom_conf_path(KeyPath, {raise_error, {not_found, KeyPath}}),
Putter(AtomKeyPath, Map, Value);
do_deep_put(?RAW_CONF, Putter, KeyPath, Map, Value) ->
Putter([bin(Key) || Key <- KeyPath], Map, Value).
@ -773,15 +767,24 @@ conf_key(?CONF, RootName) ->
conf_key(?RAW_CONF, RootName) ->
bin(RootName).
atom_conf_path(Path, ExpFun, OnFail) ->
try [atom(Key) || Key <- Path] of
AtomKeyPath -> ExpFun(AtomKeyPath)
ensure_atom_conf_path(Path, OnFail) ->
case lists:all(fun erlang:is_atom/1, Path) of
true ->
%% Do not try to build new atom PATH if it already is.
Path;
_ ->
to_atom_conf_path(Path, OnFail)
end.
to_atom_conf_path(Path, OnFail) ->
try
[atom(Key) || Key <- Path]
catch
error:badarg ->
case OnFail of
{return, Val} ->
Val;
{raise_error, Err} ->
error(Err)
error(Err);
{return, V} ->
V
end
end.

View File

@ -22,7 +22,7 @@
info/1,
info/2,
check/2,
set/3
update/2
]).
-elvis([{elvis_style, no_if_expression, disable}]).
@ -31,66 +31,16 @@
-record(keepalive, {
interval :: pos_integer(),
statval :: non_neg_integer(),
repeat :: non_neg_integer()
statval :: non_neg_integer()
}).
-opaque keepalive() :: #keepalive{}.
-define(MAX_INTERVAL, 65535000).
%% @doc Init keepalive.
-spec init(Interval :: non_neg_integer()) -> keepalive().
init(Interval) -> init(0, Interval).
%% @doc Init keepalive.
-spec init(StatVal :: non_neg_integer(), Interval :: non_neg_integer()) -> keepalive().
init(StatVal, Interval) when Interval > 0 ->
#keepalive{
interval = Interval,
statval = StatVal,
repeat = 0
}.
%% @doc Get Info of the keepalive.
-spec info(keepalive()) -> emqx_types:infos().
info(#keepalive{
interval = Interval,
statval = StatVal,
repeat = Repeat
}) ->
#{
interval => Interval,
statval => StatVal,
repeat => Repeat
}.
-spec info(interval | statval | repeat, keepalive()) ->
non_neg_integer().
info(interval, #keepalive{interval = Interval}) ->
Interval;
info(statval, #keepalive{statval = StatVal}) ->
StatVal;
info(repeat, #keepalive{repeat = Repeat}) ->
Repeat.
%% @doc Check keepalive.
-spec check(non_neg_integer(), keepalive()) ->
{ok, keepalive()} | {error, timeout}.
check(
NewVal,
KeepAlive = #keepalive{
statval = OldVal,
repeat = Repeat
}
) ->
if
NewVal =/= OldVal ->
{ok, KeepAlive#keepalive{statval = NewVal, repeat = 0}};
Repeat < 1 ->
{ok, KeepAlive#keepalive{repeat = Repeat + 1}};
true ->
{error, timeout}
end.
%% from mqtt-v3.1.1 specific
%% A Keep Alive value of zero (0) has the effect of turning off the keep alive mechanism.
%% This means that, in this case, the Server is not required
@ -102,7 +52,43 @@ check(
%%The actual value of the Keep Alive is application specific;
%% typically this is a few minutes.
%% The maximum value is (65535s) 18 hours 12 minutes and 15 seconds.
%% @doc Update keepalive's interval
-spec set(interval, non_neg_integer(), keepalive()) -> keepalive().
set(interval, Interval, KeepAlive) when Interval >= 0 andalso Interval =< 65535000 ->
KeepAlive#keepalive{interval = Interval}.
%% @doc Init keepalive.
-spec init(StatVal :: non_neg_integer(), Interval :: non_neg_integer()) -> keepalive() | undefined.
init(StatVal, Interval) when Interval > 0 andalso Interval =< ?MAX_INTERVAL ->
#keepalive{interval = Interval, statval = StatVal};
init(_, 0) ->
undefined;
init(StatVal, Interval) when Interval > ?MAX_INTERVAL -> init(StatVal, ?MAX_INTERVAL).
%% @doc Get Info of the keepalive.
-spec info(keepalive()) -> emqx_types:infos().
info(#keepalive{
interval = Interval,
statval = StatVal
}) ->
#{
interval => Interval,
statval => StatVal
}.
-spec info(interval | statval, keepalive()) ->
non_neg_integer().
info(interval, #keepalive{interval = Interval}) ->
Interval;
info(statval, #keepalive{statval = StatVal}) ->
StatVal;
info(interval, undefined) ->
0.
%% @doc Check keepalive.
-spec check(non_neg_integer(), keepalive()) ->
{ok, keepalive()} | {error, timeout}.
check(Val, #keepalive{statval = Val}) -> {error, timeout};
check(Val, KeepAlive) -> {ok, KeepAlive#keepalive{statval = Val}}.
%% @doc Update keepalive.
%% The statval of the previous keepalive will be used,
%% and normal checks will begin from the next cycle.
-spec update(non_neg_integer(), keepalive() | undefined) -> keepalive() | undefined.
update(Interval, undefined) -> init(0, Interval);
update(Interval, #keepalive{statval = StatVal}) -> init(StatVal, Interval).

View File

@ -131,11 +131,9 @@ delete_root(Type) ->
delete_bucket(?ROOT_ID, Type).
post_config_update([limiter], _Config, NewConf, _OldConf, _AppEnvs) ->
Types = lists:delete(client, maps:keys(NewConf)),
_ = [on_post_config_update(Type, NewConf) || Type <- Types],
ok;
post_config_update([limiter, Type], _Config, NewConf, _OldConf, _AppEnvs) ->
on_post_config_update(Type, NewConf).
Conf = emqx_limiter_schema:convert_node_opts(NewConf),
_ = [on_post_config_update(Type, Cfg) || {Type, Cfg} <- maps:to_list(Conf)],
ok.
%%--------------------------------------------------------------------
%% @doc
@ -279,8 +277,7 @@ format_status(_Opt, Status) ->
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
on_post_config_update(Type, NewConf) ->
Config = maps:get(Type, NewConf),
on_post_config_update(Type, Config) ->
case emqx_limiter_server:whereis(Type) of
undefined ->
start_server(Type, Config);

View File

@ -32,9 +32,15 @@
get_bucket_cfg_path/2,
desc/1,
types/0,
short_paths/0,
calc_capacity/1,
extract_with_type/2,
default_client_config/0
default_client_config/0,
default_bucket_config/0,
short_paths_fields/1,
get_listener_opts/1,
get_node_opts/1,
convert_node_opts/1
]).
-define(KILOBYTE, 1024).
@ -56,7 +62,7 @@
-type limiter_id() :: atom().
-type bucket_name() :: atom().
-type rate() :: infinity | float().
-type burst_rate() :: 0 | float().
-type burst_rate() :: number().
%% this is a compatible type for the deprecated field and type `capacity`.
-type burst() :: burst_rate().
%% the capacity of the token bucket
@ -104,15 +110,17 @@ roots() ->
].
fields(limiter) ->
[
{Type,
?HOCON(?R_REF(node_opts), #{
desc => ?DESC(Type),
importance => ?IMPORTANCE_HIDDEN,
aliases => alias_of_type(Type)
})}
|| Type <- types()
] ++
short_paths_fields(?MODULE) ++
[
{Type,
?HOCON(?R_REF(node_opts), #{
desc => ?DESC(Type),
importance => ?IMPORTANCE_HIDDEN,
required => {false, recursively},
aliases => alias_of_type(Type)
})}
|| Type <- types()
] ++
[
%% This is an undocumented feature, and it won't be support anymore
{client,
@ -203,6 +211,14 @@ fields(listener_client_fields) ->
fields(Type) ->
simple_bucket_field(Type).
short_paths_fields(DesModule) ->
[
{Name,
?HOCON(rate(), #{desc => ?DESC(DesModule, Name), required => false, example => Example})}
|| {Name, Example} <-
lists:zip(short_paths(), [<<"1000/s">>, <<"1000/s">>, <<"100MB/s">>])
].
desc(limiter) ->
"Settings for the rate limiter.";
desc(node_opts) ->
@ -236,6 +252,9 @@ get_bucket_cfg_path(Type, BucketName) ->
types() ->
[bytes, messages, connection, message_routing, internal].
short_paths() ->
[max_conn_rate, messages_rate, bytes_rate].
calc_capacity(#{rate := infinity}) ->
infinity;
calc_capacity(#{rate := Rate, burst := Burst}) ->
@ -266,6 +285,50 @@ default_client_config() ->
failure_strategy => force
}.
default_bucket_config() ->
#{
rate => infinity,
burst => 0,
initial => 0
}.
get_listener_opts(Conf) ->
Limiter = maps:get(limiter, Conf, undefined),
ShortPaths = maps:with(short_paths(), Conf),
get_listener_opts(Limiter, ShortPaths).
get_node_opts(Type) ->
Opts = emqx:get_config([limiter, Type], default_bucket_config()),
case type_to_short_path_name(Type) of
undefined ->
Opts;
Name ->
case emqx:get_config([limiter, Name], undefined) of
undefined ->
Opts;
Rate ->
Opts#{rate := Rate}
end
end.
convert_node_opts(Conf) ->
DefBucket = default_bucket_config(),
ShorPaths = short_paths(),
Fun = fun
%% The `client` in the node options was deprecated
(client, _Value, Acc) ->
Acc;
(Name, Value, Acc) ->
case lists:member(Name, ShorPaths) of
true ->
Type = short_path_name_to_type(Name),
Acc#{Type => DefBucket#{rate => Value}};
_ ->
Acc#{Name => Value}
end
end,
maps:fold(Fun, #{}, Conf).
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
@ -476,3 +539,42 @@ merge_client_bucket(Type, _, {ok, BucketVal}) ->
#{Type => BucketVal};
merge_client_bucket(_, _, _) ->
undefined.
short_path_name_to_type(max_conn_rate) ->
connection;
short_path_name_to_type(messages_rate) ->
messages;
short_path_name_to_type(bytes_rate) ->
bytes.
type_to_short_path_name(connection) ->
max_conn_rate;
type_to_short_path_name(messages) ->
messages_rate;
type_to_short_path_name(bytes) ->
bytes_rate;
type_to_short_path_name(_) ->
undefined.
get_listener_opts(Limiter, ShortPaths) when map_size(ShortPaths) =:= 0 ->
Limiter;
get_listener_opts(undefined, ShortPaths) ->
convert_listener_short_paths(ShortPaths);
get_listener_opts(Limiter, ShortPaths) ->
Shorts = convert_listener_short_paths(ShortPaths),
emqx_utils_maps:deep_merge(Limiter, Shorts).
convert_listener_short_paths(ShortPaths) ->
DefBucket = default_bucket_config(),
DefClient = default_client_config(),
Fun = fun(Name, Rate, Acc) ->
Type = short_path_name_to_type(Name),
case Name of
max_conn_rate ->
Acc#{Type => DefBucket#{rate => Rate}};
_ ->
Client = maps:get(client, Acc, #{}),
Acc#{client => Client#{Type => DefClient#{rate => Rate}}}
end
end,
maps:fold(Fun, #{}, ShortPaths).

View File

@ -131,6 +131,9 @@ connect(Id, Type, Cfg) ->
-spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok.
add_bucket(_Id, _Type, undefined) ->
ok;
%% a bucket with an infinity rate shouldn't be added to this server, because it is always full
add_bucket(_Id, _Type, #{rate := infinity}) ->
ok;
add_bucket(Id, Type, Cfg) ->
?CALL(Type, {add_bucket, Id, Cfg}).
@ -481,7 +484,7 @@ dispatch_burst_to_buckets([], _, Alloced, Buckets) ->
-spec init_tree(emqx_limiter_schema:limiter_type()) -> state().
init_tree(Type) when is_atom(Type) ->
Cfg = emqx:get_config([limiter, Type]),
Cfg = emqx_limiter_schema:get_node_opts(Type),
init_tree(Type, Cfg).
init_tree(Type, #{rate := Rate} = Cfg) ->
@ -507,8 +510,6 @@ make_root(#{rate := Rate, burst := Burst}) ->
correction => 0
}.
do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) ->
State;
do_add_bucket(Id, #{rate := Rate} = Cfg, #{buckets := Buckets} = State) ->
case maps:get(Id, Buckets, undefined) of
undefined ->
@ -625,13 +626,10 @@ find_referenced_bucket(Id, Type, #{rate := Rate} = Cfg) when Rate =/= infinity -
{error, invalid_bucket}
end;
%% this is a node-level reference
find_referenced_bucket(Id, Type, _) ->
case emqx:get_config([limiter, Type], undefined) of
find_referenced_bucket(_Id, Type, _) ->
case emqx_limiter_schema:get_node_opts(Type) of
#{rate := infinity} ->
false;
undefined ->
?SLOG(error, #{msg => "invalid limiter type", type => Type, id => Id}),
{error, invalid_bucket};
NodeCfg ->
{ok, Bucket} = emqx_limiter_manager:find_root(Type),
{ok, Bucket, NodeCfg}

View File

@ -86,7 +86,7 @@ init([]) ->
%% Internal functions
%%--==================================================================
make_child(Type) ->
Cfg = emqx:get_config([limiter, Type]),
Cfg = emqx_limiter_schema:get_node_opts(Type),
make_child(Type, Cfg).
make_child(Type, Cfg) ->

View File

@ -35,7 +35,8 @@
current_conns/2,
max_conns/2,
id_example/0,
default_max_conn/0
default_max_conn/0,
shutdown_count/2
]).
-export([
@ -195,6 +196,17 @@ max_conns(Type, Name, _ListenOn) when Type =:= ws; Type =:= wss ->
max_conns(_, _, _) ->
{error, not_support}.
shutdown_count(ID, ListenOn) ->
{ok, #{type := Type, name := Name}} = parse_listener_id(ID),
shutdown_count(Type, Name, ListenOn).
shutdown_count(Type, Name, ListenOn) when Type == tcp; Type == ssl ->
esockd:get_shutdown_count({listener_id(Type, Name), ListenOn});
shutdown_count(Type, _Name, _ListenOn) when Type =:= ws; Type =:= wss ->
[];
shutdown_count(_, _, _) ->
{error, not_support}.
%% @doc Start all listeners.
-spec start() -> ok.
start() ->
@ -265,9 +277,8 @@ restart_listener(Type, ListenerName, Conf) ->
restart_listener(Type, ListenerName, Conf, Conf).
restart_listener(Type, ListenerName, OldConf, NewConf) ->
case do_stop_listener(Type, ListenerName, OldConf) of
case stop_listener(Type, ListenerName, OldConf) of
ok -> start_listener(Type, ListenerName, NewConf);
{error, not_found} -> start_listener(Type, ListenerName, NewConf);
{error, Reason} -> {error, Reason}
end.
@ -284,42 +295,63 @@ stop_listener(ListenerId) ->
apply_on_listener(ListenerId, fun stop_listener/3).
stop_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
case do_stop_listener(Type, ListenerName, Conf) of
Id = listener_id(Type, ListenerName),
ok = del_limiter_bucket(Id, Conf),
case do_stop_listener(Type, Id, Conf) of
ok ->
console_print(
"Listener ~ts on ~ts stopped.~n",
[listener_id(Type, ListenerName), format_bind(Bind)]
[Id, format_bind(Bind)]
),
ok;
{error, not_found} ->
?ELOG(
"Failed to stop listener ~ts on ~ts: ~0p~n",
[listener_id(Type, ListenerName), format_bind(Bind), already_stopped]
),
ok;
{error, Reason} ->
?ELOG(
"Failed to stop listener ~ts on ~ts: ~0p~n",
[listener_id(Type, ListenerName), format_bind(Bind), Reason]
[Id, format_bind(Bind), Reason]
),
{error, Reason}
end.
-spec do_stop_listener(atom(), atom(), map()) -> ok | {error, term()}.
do_stop_listener(Type, ListenerName, #{bind := ListenOn} = Conf) when Type == tcp; Type == ssl ->
Id = listener_id(Type, ListenerName),
del_limiter_bucket(Id, Conf),
do_stop_listener(Type, Id, #{bind := ListenOn}) when Type == tcp; Type == ssl ->
esockd:close(Id, ListenOn);
do_stop_listener(Type, ListenerName, Conf) when Type == ws; Type == wss ->
Id = listener_id(Type, ListenerName),
del_limiter_bucket(Id, Conf),
cowboy:stop_listener(Id);
do_stop_listener(quic, ListenerName, Conf) ->
Id = listener_id(quic, ListenerName),
del_limiter_bucket(Id, Conf),
do_stop_listener(Type, Id, #{bind := ListenOn}) when Type == ws; Type == wss ->
case cowboy:stop_listener(Id) of
ok ->
wait_listener_stopped(ListenOn);
Error ->
Error
end;
do_stop_listener(quic, Id, _Conf) ->
quicer:stop_listener(Id).
wait_listener_stopped(ListenOn) ->
% NOTE
% `cowboy:stop_listener/1` will not close the listening socket explicitly,
% it will be closed by the runtime system **only after** the process exits.
Endpoint = maps:from_list(ip_port(ListenOn)),
case
gen_tcp:connect(
maps:get(ip, Endpoint, loopback),
maps:get(port, Endpoint),
[{active, false}]
)
of
{error, _EConnrefused} ->
%% NOTE
%% We should get `econnrefused` here because acceptors are already dead
%% but don't want to crash if not, because this doesn't make any difference.
ok;
{ok, Socket} ->
%% NOTE
%% Tiny chance to get a connected socket here, when some other process
%% concurrently binds to the same port.
gen_tcp:close(Socket)
end.
-ifndef(TEST).
console_print(Fmt, Args) -> ?ULOG(Fmt, Args).
-else.
@ -335,7 +367,8 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
Type == tcp; Type == ssl
->
Id = listener_id(Type, ListenerName),
add_limiter_bucket(Id, Opts),
Limiter = limiter(Opts),
add_limiter_bucket(Id, Limiter),
esockd:open(
Id,
ListenOn,
@ -344,7 +377,7 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
#{
listener => {Type, ListenerName},
zone => zone(Opts),
limiter => limiter(Opts),
limiter => Limiter,
enable_authn => enable_authn(Opts)
}
]}
@ -354,9 +387,10 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
Type == ws; Type == wss
->
Id = listener_id(Type, ListenerName),
add_limiter_bucket(Id, Opts),
Limiter = limiter(Opts),
add_limiter_bucket(Id, Limiter),
RanchOpts = ranch_opts(Type, ListenOn, Opts),
WsOpts = ws_opts(Type, ListenerName, Opts),
WsOpts = ws_opts(Type, ListenerName, Opts, Limiter),
case Type of
ws -> cowboy:start_clear(Id, RanchOpts, WsOpts);
wss -> cowboy:start_tls(Id, RanchOpts, WsOpts)
@ -403,20 +437,22 @@ do_start_listener(quic, ListenerName, #{bind := Bind} = Opts) ->
Password -> [{password, str(Password)}]
end ++
optional_quic_listener_opts(Opts),
Limiter = limiter(Opts),
ConnectionOpts = #{
conn_callback => emqx_quic_connection,
peer_unidi_stream_count => maps:get(peer_unidi_stream_count, Opts, 1),
peer_bidi_stream_count => maps:get(peer_bidi_stream_count, Opts, 10),
zone => zone(Opts),
listener => {quic, ListenerName},
limiter => limiter(Opts)
limiter => Limiter
},
StreamOpts = #{
stream_callback => emqx_quic_stream,
active => 1
},
Id = listener_id(quic, ListenerName),
add_limiter_bucket(Id, Opts),
add_limiter_bucket(Id, Limiter),
quicer:start_listener(
Id,
ListenOn,
@ -520,12 +556,12 @@ esockd_opts(ListenerId, Type, Opts0) ->
end
).
ws_opts(Type, ListenerName, Opts) ->
ws_opts(Type, ListenerName, Opts, Limiter) ->
WsPaths = [
{emqx_utils_maps:deep_get([websocket, mqtt_path], Opts, "/mqtt"), emqx_ws_connection, #{
zone => zone(Opts),
listener => {Type, ListenerName},
limiter => limiter(Opts),
limiter => Limiter,
enable_authn => enable_authn(Opts)
}}
],
@ -639,28 +675,31 @@ zone(Opts) ->
maps:get(zone, Opts, undefined).
limiter(Opts) ->
maps:get(limiter, Opts, undefined).
emqx_limiter_schema:get_listener_opts(Opts).
add_limiter_bucket(Id, #{limiter := Limiter}) ->
add_limiter_bucket(_Id, undefined) ->
ok;
add_limiter_bucket(Id, Limiter) ->
maps:fold(
fun(Type, Cfg, _) ->
emqx_limiter_server:add_bucket(Id, Type, Cfg)
end,
ok,
maps:without([client], Limiter)
);
add_limiter_bucket(_Id, _Cfg) ->
ok.
).
del_limiter_bucket(Id, #{limiter := Limiters}) ->
lists:foreach(
fun(Type) ->
emqx_limiter_server:del_bucket(Id, Type)
end,
maps:keys(Limiters)
);
del_limiter_bucket(_Id, _Cfg) ->
ok.
del_limiter_bucket(Id, Conf) ->
case limiter(Conf) of
undefined ->
ok;
Limiter ->
lists:foreach(
fun(Type) ->
emqx_limiter_server:del_bucket(Id, Type)
end,
maps:keys(Limiter)
)
end.
enable_authn(Opts) ->
maps:get(enable_authn, Opts, true).

View File

@ -237,7 +237,7 @@ set_log_handler_level(HandlerId, Level) ->
end.
%% @doc Set both the primary and all handlers level in one command
-spec set_log_level(logger:handler_id()) -> ok | {error, term()}.
-spec set_log_level(logger:level()) -> ok | {error, term()}.
set_log_level(Level) ->
case set_primary_log_level(Level) of
ok -> set_all_log_handlers_level(Level);

View File

@ -23,8 +23,6 @@
-export([start_link/0]).
-export([
get_mem_check_interval/0,
set_mem_check_interval/1,
get_sysmem_high_watermark/0,
set_sysmem_high_watermark/1,
get_procmem_high_watermark/0,
@ -46,6 +44,9 @@
terminate/2,
code_change/3
]).
-ifdef(TEST).
-export([is_sysmem_check_supported/0]).
-endif.
-include("emqx.hrl").
@ -61,14 +62,6 @@ update(OS) ->
%% API
%%--------------------------------------------------------------------
get_mem_check_interval() ->
memsup:get_check_interval().
set_mem_check_interval(Seconds) when Seconds < 60000 ->
memsup:set_check_interval(1);
set_mem_check_interval(Seconds) ->
memsup:set_check_interval(Seconds div 60000).
get_sysmem_high_watermark() ->
gen_server:call(?OS_MON, ?FUNCTION_NAME, infinity).
@ -103,11 +96,9 @@ init_os_monitor() ->
init_os_monitor(OS) ->
#{
sysmem_high_watermark := SysHW,
procmem_high_watermark := PHW,
mem_check_interval := MCI
procmem_high_watermark := PHW
} = OS,
set_procmem_high_watermark(PHW),
set_mem_check_interval(MCI),
ok = update_mem_alarm_status(SysHW),
SysHW.

View File

@ -167,9 +167,15 @@ handle_info(Info, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok = ekka:unmonitor(membership),
emqx_stats:cancel_update(route_stats),
mnesia:unsubscribe({table, ?ROUTING_NODE, simple}).
try
ok = ekka:unmonitor(membership),
emqx_stats:cancel_update(route_stats),
mnesia:unsubscribe({table, ?ROUTING_NODE, simple})
catch
exit:{noproc, {gen_server, call, [mria_membership, _]}} ->
?SLOG(warning, #{msg => "mria_membership_down"}),
ok
end.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.

View File

@ -42,7 +42,7 @@
-type bar_separated_list() :: list().
-type ip_port() :: tuple() | integer().
-type cipher() :: map().
-type port_number() :: 1..65536.
-type port_number() :: 1..65535.
-type server_parse_option() :: #{
default_port => port_number(),
no_port => boolean(),
@ -77,6 +77,7 @@
validate_heap_size/1,
user_lookup_fun_tr/2,
validate_alarm_actions/1,
validate_keepalive_multiplier/1,
non_empty_string/1,
validations/0,
naive_env_interpolation/1
@ -109,7 +110,8 @@
servers_validator/2,
servers_sc/2,
convert_servers/1,
convert_servers/2
convert_servers/2,
mqtt_converter/2
]).
%% tombstone types
@ -135,7 +137,8 @@
cipher/0,
comma_separated_atoms/0,
url/0,
json_binary/0
json_binary/0,
port_number/0
]).
-export([namespace/0, roots/0, roots/1, fields/1, desc/1, tags/0]).
@ -149,6 +152,8 @@
-define(BIT(Bits), (1 bsl (Bits))).
-define(MAX_UINT(Bits), (?BIT(Bits) - 1)).
-define(DEFAULT_MULTIPLIER, 1.5).
-define(DEFAULT_BACKOFF, 0.75).
namespace() -> broker.
@ -171,6 +176,7 @@ roots(high) ->
ref("mqtt"),
#{
desc => ?DESC(mqtt),
converter => fun ?MODULE:mqtt_converter/2,
importance => ?IMPORTANCE_MEDIUM
}
)},
@ -521,8 +527,19 @@ fields("mqtt") ->
sc(
number(),
#{
default => 0.75,
desc => ?DESC(mqtt_keepalive_backoff)
default => ?DEFAULT_BACKOFF,
%% Must add required => false, zone schema has no default.
required => false,
importance => ?IMPORTANCE_HIDDEN
}
)},
{"keepalive_multiplier",
sc(
number(),
#{
default => ?DEFAULT_MULTIPLIER,
validator => fun ?MODULE:validate_keepalive_multiplier/1,
desc => ?DESC(mqtt_keepalive_multiplier)
}
)},
{"max_subscriptions",
@ -687,12 +704,13 @@ fields("force_shutdown") ->
desc => ?DESC(force_shutdown_enable)
}
)},
{"max_message_queue_len",
{"max_mailbox_size",
sc(
range(0, inf),
#{
default => 1000,
desc => ?DESC(force_shutdown_max_message_queue_len)
aliases => [max_message_queue_len],
desc => ?DESC(force_shutdown_max_mailbox_size)
}
)},
{"max_heap_size",
@ -2000,7 +2018,8 @@ base_listener(Bind) ->
listener_fields
),
#{
desc => ?DESC(base_listener_limiter)
desc => ?DESC(base_listener_limiter),
importance => ?IMPORTANCE_HIDDEN
}
)},
{"enable_authn",
@ -2011,7 +2030,7 @@ base_listener(Bind) ->
default => true
}
)}
].
] ++ emqx_limiter_schema:short_paths_fields(?MODULE).
desc("persistent_session_store") ->
"Settings for message persistence.";
@ -2186,8 +2205,8 @@ filter(Opts) ->
%% @private This function defines the SSL opts which are commonly used by
%% SSL listener and client.
-spec common_ssl_opts_schema(map()) -> hocon_schema:field_schema().
common_ssl_opts_schema(Defaults) ->
-spec common_ssl_opts_schema(map(), server | client) -> hocon_schema:field_schema().
common_ssl_opts_schema(Defaults, Type) ->
D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end,
Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end,
Collection = maps:get(versions, Defaults, tls_all_available),
@ -2197,7 +2216,7 @@ common_ssl_opts_schema(Defaults) ->
sc(
binary(),
#{
default => D("cacertfile"),
default => cert_file("cacert.pem", Type),
required => false,
desc => ?DESC(common_ssl_opts_schema_cacertfile)
}
@ -2206,7 +2225,7 @@ common_ssl_opts_schema(Defaults) ->
sc(
binary(),
#{
default => D("certfile"),
default => cert_file("cert.pem", Type),
required => false,
desc => ?DESC(common_ssl_opts_schema_certfile)
}
@ -2215,7 +2234,7 @@ common_ssl_opts_schema(Defaults) ->
sc(
binary(),
#{
default => D("keyfile"),
default => cert_file("key.pem", Type),
required => false,
desc => ?DESC(common_ssl_opts_schema_keyfile)
}
@ -2286,6 +2305,17 @@ common_ssl_opts_schema(Defaults) ->
desc => ?DESC(common_ssl_opts_schema_secure_renegotiate)
}
)},
{"log_level",
sc(
hoconsc:enum([
emergency, alert, critical, error, warning, notice, info, debug, none, all
]),
#{
default => notice,
desc => ?DESC(common_ssl_opts_schema_log_level),
importance => ?IMPORTANCE_LOW
}
)},
{"hibernate_after",
sc(
@ -2302,7 +2332,7 @@ common_ssl_opts_schema(Defaults) ->
server_ssl_opts_schema(Defaults, IsRanchListener) ->
D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end,
Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end,
common_ssl_opts_schema(Defaults) ++
common_ssl_opts_schema(Defaults, server) ++
[
{"dhfile",
sc(
@ -2428,7 +2458,7 @@ crl_outer_validator(_SSLOpts) ->
%% @doc Make schema for SSL client.
-spec client_ssl_opts_schema(map()) -> hocon_schema:field_schema().
client_ssl_opts_schema(Defaults) ->
common_ssl_opts_schema(Defaults) ++
common_ssl_opts_schema(Defaults, client) ++
[
{"enable",
sc(
@ -2730,6 +2760,13 @@ validate_heap_size(Siz) when is_integer(Siz) ->
validate_heap_size(_SizStr) ->
{error, invalid_heap_size}.
validate_keepalive_multiplier(Multiplier) when
is_number(Multiplier) andalso Multiplier >= 1.0 andalso Multiplier =< 65535.0
->
ok;
validate_keepalive_multiplier(_Multiplier) ->
{error, #{reason => keepalive_multiplier_out_of_range, min => 1, max => 65535}}.
validate_alarm_actions(Actions) ->
UnSupported = lists:filter(
fun(Action) -> Action =/= log andalso Action =/= publish end, Actions
@ -3248,13 +3285,10 @@ default_listener(ws) ->
};
default_listener(SSLListener) ->
%% The env variable is resolved in emqx_tls_lib by calling naive_env_interpolate
CertFile = fun(Name) ->
iolist_to_binary("${EMQX_ETC_DIR}/" ++ filename:join(["certs", Name]))
end,
SslOptions = #{
<<"cacertfile">> => CertFile(<<"cacert.pem">>),
<<"certfile">> => CertFile(<<"cert.pem">>),
<<"keyfile">> => CertFile(<<"key.pem">>)
<<"cacertfile">> => cert_file(<<"cacert.pem">>, server),
<<"certfile">> => cert_file(<<"cert.pem">>, server),
<<"keyfile">> => cert_file(<<"key.pem">>, server)
},
case SSLListener of
ssl ->
@ -3371,3 +3405,23 @@ ensure_default_listener(#{<<"default">> := _} = Map, _ListenerType) ->
ensure_default_listener(Map, ListenerType) ->
NewMap = Map#{<<"default">> => default_listener(ListenerType)},
keep_default_tombstone(NewMap, #{}).
cert_file(_File, client) -> undefined;
cert_file(File, server) -> iolist_to_binary(filename:join(["${EMQX_ETC_DIR}", "certs", File])).
mqtt_converter(#{<<"keepalive_multiplier">> := Multi} = Mqtt, _Opts) ->
case round(Multi * 100) =:= round(?DEFAULT_MULTIPLIER * 100) of
false ->
%% Multiplier is provided, and it's not default value
Mqtt;
true ->
%% Multiplier is default value, fallback to use Backoff value
%% Backoff default value was half of Multiplier default value
%% so there is no need to compare Backoff with its default.
Backoff = maps:get(<<"keepalive_backoff">>, Mqtt, ?DEFAULT_BACKOFF),
Mqtt#{<<"keepalive_multiplier">> => Backoff * 2}
end;
mqtt_converter(#{<<"keepalive_backoff">> := Backoff} = Mqtt, _Opts) ->
Mqtt#{<<"keepalive_multiplier">> => Backoff * 2};
mqtt_converter(Mqtt, _Opts) ->
Mqtt.

View File

@ -291,16 +291,16 @@ stats(Session) -> info(?STATS_KEYS, Session).
ignore_local(ClientInfo, Delivers, Subscriber, Session) ->
Subs = info(subscriptions, Session),
lists:dropwhile(
lists:filter(
fun({deliver, Topic, #message{from = Publisher} = Msg}) ->
case maps:find(Topic, Subs) of
{ok, #{nl := 1}} when Subscriber =:= Publisher ->
ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, no_local]),
ok = emqx_metrics:inc('delivery.dropped'),
ok = emqx_metrics:inc('delivery.dropped.no_local'),
true;
false;
_ ->
false
true
end
end,
Delivers

View File

@ -158,9 +158,18 @@ dispatch(Group, Topic, Delivery = #delivery{message = Msg}, FailedSubs) ->
-spec strategy(emqx_topic:group()) -> strategy().
strategy(Group) ->
case emqx:get_config([broker, shared_subscription_group, Group, strategy], undefined) of
undefined -> emqx:get_config([broker, shared_subscription_strategy]);
Strategy -> Strategy
try
emqx:get_config([
broker,
shared_subscription_group,
binary_to_existing_atom(Group),
strategy
])
catch
error:{config_not_found, _} ->
get_default_shared_subscription_strategy();
error:badarg ->
get_default_shared_subscription_strategy()
end.
-spec ack_enabled() -> boolean().
@ -544,3 +553,6 @@ delete_route_if_needed({Group, Topic} = GroupTopic) ->
if_no_more_subscribers(GroupTopic, fun() ->
ok = emqx_router:do_delete_route(Topic, {Group, node()})
end).
get_default_shared_subscription_strategy() ->
emqx:get_config([broker, shared_subscription_strategy]).

View File

@ -129,7 +129,7 @@
socktype := socktype(),
sockname := peername(),
peername := peername(),
peercert := nossl | undefined | esockd_peercert:peercert(),
peercert => nossl | undefined | esockd_peercert:peercert(),
conn_mod := module(),
proto_name => binary(),
proto_ver => proto_ver(),
@ -238,7 +238,7 @@
-type stats() :: [{atom(), term()}].
-type oom_policy() :: #{
max_message_queue_len => non_neg_integer(),
max_mailbox_size => non_neg_integer(),
max_heap_size => non_neg_integer(),
enable => boolean()
}.

View File

@ -156,6 +156,19 @@ t_cluster_nodes(_) ->
?assertEqual(Expected, emqx:cluster_nodes(cores)),
?assertEqual([], emqx:cluster_nodes(stopped)).
t_get_config(_) ->
?assertEqual(false, emqx:get_config([overload_protection, enable])),
?assertEqual(false, emqx:get_config(["overload_protection", <<"enable">>])).
t_get_config_default_1(_) ->
?assertEqual(false, emqx:get_config([overload_protection, enable], undefined)),
?assertEqual(false, emqx:get_config(["overload_protection", <<"enable">>], undefined)).
t_get_config_default_2(_) ->
AtomPathRes = emqx:get_config([overload_protection, <<"_!no_@exist_">>], undefined),
NonAtomPathRes = emqx:get_config(["doesnotexist", <<"db_backend">>], undefined),
?assertEqual(undefined, NonAtomPathRes),
?assertEqual(undefined, AtomPathRes).
%%--------------------------------------------------------------------
%% Hook fun
%%--------------------------------------------------------------------

View File

@ -116,7 +116,6 @@ clientinfo(InitProps) ->
username => <<"username">>,
password => <<"passwd">>,
is_superuser => false,
peercert => undefined,
mountpoint => undefined
},
InitProps

View File

@ -47,7 +47,9 @@
-type param_types() :: #{emqx_bpapi:var_name() => _Type}.
%% Applications and modules we wish to ignore in the analysis:
-define(IGNORED_APPS, "gen_rpc, recon, redbug, observer_cli, snabbkaffe, ekka, mria").
-define(IGNORED_APPS,
"gen_rpc, recon, redbug, observer_cli, snabbkaffe, ekka, mria, amqp_client, rabbit_common"
).
-define(IGNORED_MODULES, "emqx_rpc").
%% List of known RPC backend modules:
-define(RPC_MODULES, "gen_rpc, erpc, rpc, emqx_rpc").

View File

@ -31,7 +31,7 @@ force_gc_conf() ->
#{bytes => 16777216, count => 16000, enable => true}.
force_shutdown_conf() ->
#{enable => true, max_heap_size => 4194304, max_message_queue_len => 1000}.
#{enable => true, max_heap_size => 4194304, max_mailbox_size => 1000}.
rpc_conf() ->
#{
@ -1211,7 +1211,6 @@ clientinfo(InitProps) ->
clientid => <<"clientid">>,
username => <<"username">>,
is_superuser => false,
peercert => undefined,
mountpoint => undefined
},
InitProps

View File

@ -22,6 +22,8 @@
-import(lists, [nth/2]).
-include_lib("emqx/include/emqx_mqtt.hrl").
-include_lib("emqx/include/emqx_hooks.hrl").
-include_lib("emqx/include/asserts.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
@ -67,14 +69,16 @@ groups() ->
%% t_keepalive,
%% t_redelivery_on_reconnect,
%% subscribe_failure_test,
t_dollar_topics
t_dollar_topics,
t_sub_non_utf8_topic
]},
{mqttv5, [non_parallel_tests], [t_basic_with_props_v5]},
{others, [non_parallel_tests], [
t_username_as_clientid,
t_certcn_as_clientid_default_config_tls,
t_certcn_as_clientid_tlsv1_3,
t_certcn_as_clientid_tlsv1_2
t_certcn_as_clientid_tlsv1_2,
t_peercert_preserved_before_connected
]}
].
@ -297,6 +301,36 @@ t_dollar_topics(_) ->
ok = emqtt:disconnect(C),
ct:pal("$ topics test succeeded").
t_sub_non_utf8_topic(_) ->
{ok, Socket} = gen_tcp:connect({127, 0, 0, 1}, 1883, [{active, true}, binary]),
ConnPacket = emqx_frame:serialize(#mqtt_packet{
header = #mqtt_packet_header{type = 1},
variable = #mqtt_packet_connect{
clientid = <<"abcdefg">>
}
}),
ok = gen_tcp:send(Socket, ConnPacket),
receive
{tcp, _, _ConnAck = <<32, 2, 0, 0>>} -> ok
after 3000 -> ct:fail({connect_ack_not_recv, process_info(self(), messages)})
end,
SubHeader = <<130, 18, 25, 178>>,
SubTopicLen = <<0, 13>>,
%% this is not a valid utf8 topic
SubTopic = <<128, 10, 10, 12, 178, 159, 162, 47, 115, 1, 1, 1, 1>>,
SubQoS = <<1>>,
SubPacket = <<SubHeader/binary, SubTopicLen/binary, SubTopic/binary, SubQoS/binary>>,
ok = gen_tcp:send(Socket, SubPacket),
receive
{tcp_closed, _} -> ok
after 3000 -> ct:fail({should_get_disconnected, process_info(self(), messages)})
end,
timer:sleep(1000),
ListenerCounts = emqx_listeners:shutdown_count('tcp:default', {{0, 0, 0, 0}, 1883}),
TopicInvalidCount = proplists:get_value(topic_filter_invalid, ListenerCounts),
?assert(is_integer(TopicInvalidCount) andalso TopicInvalidCount > 0),
ok.
%%--------------------------------------------------------------------
%% Test cases for MQTT v5
%%--------------------------------------------------------------------
@ -348,6 +382,42 @@ t_certcn_as_clientid_tlsv1_3(_) ->
t_certcn_as_clientid_tlsv1_2(_) ->
tls_certcn_as_clientid('tlsv1.2').
t_peercert_preserved_before_connected(_) ->
ok = emqx_config:put_zone_conf(default, [mqtt], #{}),
ok = emqx_hooks:add(
'client.connect',
{?MODULE, on_hook, ['client.connect', self()]},
?HP_HIGHEST
),
ok = emqx_hooks:add(
'client.connected',
{?MODULE, on_hook, ['client.connected', self()]},
?HP_HIGHEST
),
ClientId = atom_to_binary(?FUNCTION_NAME),
SslConf = emqx_common_test_helpers:client_ssl_twoway(default),
{ok, Client} = emqtt:start_link([
{port, 8883},
{clientid, ClientId},
{ssl, true},
{ssl_opts, SslConf}
]),
{ok, _} = emqtt:connect(Client),
_ = ?assertReceive({'client.connect', #{peercert := PC}} when is_binary(PC)),
_ = ?assertReceive({'client.connected', #{peercert := PC}} when is_binary(PC)),
[ConnPid] = emqx_cm:lookup_channels(ClientId),
?assertMatch(
#{conninfo := ConnInfo} when not is_map_key(peercert, ConnInfo),
emqx_connection:info(ConnPid)
).
on_hook(ConnInfo, _, 'client.connect' = HP, Pid) ->
_ = Pid ! {HP, ConnInfo},
ok;
on_hook(_ClientInfo, ConnInfo, 'client.connected' = HP, Pid) ->
_ = Pid ! {HP, ConnInfo},
ok.
%%--------------------------------------------------------------------
%% Helper functions
%%--------------------------------------------------------------------
@ -390,10 +460,4 @@ tls_certcn_as_clientid(TLSVsn, RequiredTLSVsn) ->
{ok, _} = emqtt:connect(Client),
#{clientinfo := #{clientid := CN}} = emqx_cm:get_chan_info(CN),
confirm_tls_version(Client, RequiredTLSVsn),
%% verify that the peercert won't be stored in the conninfo
[ChannPid] = emqx_cm:lookup_channels(CN),
SysState = sys:get_state(ChannPid),
ChannelRecord = lists:keyfind(channel, 1, tuple_to_list(SysState)),
ConnInfo = lists:nth(2, tuple_to_list(ChannelRecord)),
?assertMatch(#{peercert := undefined}, ConnInfo),
emqtt:disconnect(Client).

View File

@ -231,22 +231,21 @@ render_and_load_app_config(App, Opts) ->
try
do_render_app_config(App, Schema, Conf, Opts)
catch
throw:skip ->
ok;
throw:E:St ->
%% turn throw into error
error({Conf, E, St})
end.
do_render_app_config(App, Schema, ConfigFile, Opts) ->
try
Vars = mustache_vars(App, Opts),
RenderedConfigFile = render_config_file(ConfigFile, Vars),
read_schema_configs(Schema, RenderedConfigFile),
force_set_config_file_paths(App, [RenderedConfigFile]),
copy_certs(App, RenderedConfigFile),
ok
catch
throw:skip ->
ok
end.
%% copy acl_conf must run before read_schema_configs
copy_acl_conf(),
Vars = mustache_vars(App, Opts),
RenderedConfigFile = render_config_file(ConfigFile, Vars),
read_schema_configs(Schema, RenderedConfigFile),
force_set_config_file_paths(App, [RenderedConfigFile]),
copy_certs(App, RenderedConfigFile),
ok.
start_app(App, SpecAppConfig, Opts) ->
render_and_load_app_config(App, Opts),
@ -255,6 +254,7 @@ start_app(App, SpecAppConfig, Opts) ->
{ok, _} ->
ok = ensure_dashboard_listeners_started(App),
ok = wait_for_app_processes(App),
ok = perform_sanity_checks(App),
ok;
{error, Reason} ->
error({failed_to_start_app, App, Reason})
@ -268,6 +268,27 @@ wait_for_app_processes(emqx_conf) ->
wait_for_app_processes(_) ->
ok.
%% These are checks to detect inter-suite or inter-testcase flakiness
%% early. For example, one suite might forget one application running
%% and stop others, and then the `application:start/2' callback is
%% never called again for this application.
perform_sanity_checks(emqx_rule_engine) ->
ensure_config_handler(emqx_rule_engine, [rule_engine, rules]),
ok;
perform_sanity_checks(emqx_bridge) ->
ensure_config_handler(emqx_bridge, [bridges]),
ok;
perform_sanity_checks(_App) ->
ok.
ensure_config_handler(Module, ConfigPath) ->
#{handlers := Handlers} = sys:get_state(emqx_config_handler),
case emqx_utils_maps:deep_get(ConfigPath, Handlers, not_found) of
#{{mod} := Module} -> ok;
_NotFound -> error({config_handler_missing, ConfigPath, Module})
end,
ok.
app_conf_file(emqx_conf) -> "emqx.conf.all";
app_conf_file(App) -> atom_to_list(App) ++ ".conf".
@ -503,6 +524,16 @@ copy_certs(emqx_conf, Dest0) ->
copy_certs(_, _) ->
ok.
copy_acl_conf() ->
Dest = filename:join([code:lib_dir(emqx), "etc/acl.conf"]),
case code:lib_dir(emqx_authz) of
{error, bad_name} ->
(not filelib:is_regular(Dest)) andalso file:write_file(Dest, <<"">>);
_ ->
{ok, _} = file:copy(deps_path(emqx_authz, "etc/acl.conf"), Dest)
end,
ok.
load_config(SchemaModule, Config) ->
ConfigBin =
case is_map(Config) of
@ -830,8 +861,8 @@ setup_node(Node, Opts) when is_map(Opts) ->
LoadSchema andalso
begin
%% to avoid sharing data between executions and/or
%% nodes. these variables might notbe in the
%% config file (e.g.: emqx_ee_conf_schema).
%% nodes. these variables might not be in the
%% config file (e.g.: emqx_enterprise_schema).
NodeDataDir = filename:join([
PrivDataDir,
node(),

View File

@ -676,7 +676,6 @@ channel(InitFields) ->
clientid => <<"clientid">>,
username => <<"username">>,
is_superuser => false,
peercert => undefined,
mountpoint => undefined
},
Conf = emqx_cm:get_session_confs(ClientInfo, #{

View File

@ -27,20 +27,14 @@ t_check(_) ->
Keepalive = emqx_keepalive:init(60),
?assertEqual(60, emqx_keepalive:info(interval, Keepalive)),
?assertEqual(0, emqx_keepalive:info(statval, Keepalive)),
?assertEqual(0, emqx_keepalive:info(repeat, Keepalive)),
Info = emqx_keepalive:info(Keepalive),
?assertEqual(
#{
interval => 60,
statval => 0,
repeat => 0
statval => 0
},
Info
),
{ok, Keepalive1} = emqx_keepalive:check(1, Keepalive),
?assertEqual(1, emqx_keepalive:info(statval, Keepalive1)),
?assertEqual(0, emqx_keepalive:info(repeat, Keepalive1)),
{ok, Keepalive2} = emqx_keepalive:check(1, Keepalive1),
?assertEqual(1, emqx_keepalive:info(statval, Keepalive2)),
?assertEqual(1, emqx_keepalive:info(repeat, Keepalive2)),
?assertEqual({error, timeout}, emqx_keepalive:check(1, Keepalive2)).
?assertEqual({error, timeout}, emqx_keepalive:check(1, Keepalive1)).

View File

@ -829,6 +829,42 @@ t_subscribe_no_local(Config) ->
?assertEqual(1, length(receive_messages(2))),
ok = emqtt:disconnect(Client1).
t_subscribe_no_local_mixed(Config) ->
ConnFun = ?config(conn_fun, Config),
Topic = nth(1, ?TOPICS),
{ok, Client1} = emqtt:start_link([{proto_ver, v5} | Config]),
{ok, _} = emqtt:ConnFun(Client1),
{ok, Client2} = emqtt:start_link([{proto_ver, v5} | Config]),
{ok, _} = emqtt:ConnFun(Client2),
%% Given tow clients and client1 subscribe to topic with 'no local' set to true
{ok, _, [2]} = emqtt:subscribe(Client1, #{}, [{Topic, [{nl, true}, {qos, 2}]}]),
%% When mixed publish traffic are sent from both clients (Client1 sent 6 and Client2 sent 2)
CB = {fun emqtt:sync_publish_result/3, [self(), async_res]},
ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed1">>, 0, CB),
ok = emqtt:publish_async(Client2, Topic, <<"t_subscribe_no_local_mixed2">>, 0, CB),
ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed3">>, 0, CB),
ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed4">>, 0, CB),
ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed5">>, 0, CB),
ok = emqtt:publish_async(Client2, Topic, <<"t_subscribe_no_local_mixed6">>, 0, CB),
ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed7">>, 0, CB),
ok = emqtt:publish_async(Client1, Topic, <<"t_subscribe_no_local_mixed8">>, 0, CB),
[
receive
{async_res, Res} -> ?assertEqual(ok, Res)
end
|| _ <- lists:seq(1, 8)
],
%% Then only two messages from clients 2 are received
PubRecvd = receive_messages(9),
ct:pal("~p", [PubRecvd]),
?assertEqual(2, length(PubRecvd)),
ok = emqtt:disconnect(Client1),
ok = emqtt:disconnect(Client2).
t_subscribe_actions(Config) ->
ConnFun = ?config(conn_fun, Config),
Topic = nth(1, ?TOPICS),

View File

@ -967,20 +967,11 @@ do_t_validations(_Config) ->
{error, {_, _, ResRaw3}} = update_listener_via_api(ListenerId, ListenerData3),
#{<<"code">> := <<"BAD_REQUEST">>, <<"message">> := MsgRaw3} =
emqx_utils_json:decode(ResRaw3, [return_maps]),
%% we can't remove certfile now, because it has default value.
?assertMatch(
#{
<<"mismatches">> :=
#{
<<"listeners:ssl_not_required_bind">> :=
#{
<<"reason">> :=
<<"Server certificate must be defined when using OCSP stapling">>
}
}
},
emqx_utils_json:decode(MsgRaw3, [return_maps])
<<"{bad_ssl_config,#{file_read => enoent,pem_check => invalid_pem", _/binary>>,
MsgRaw3
),
ok.
t_unknown_error_fetching_ocsp_response(_Config) ->

View File

@ -43,8 +43,8 @@ init_per_testcase(t_cpu_check_alarm, Config) ->
{ok, _} = supervisor:restart_child(emqx_sys_sup, emqx_os_mon),
Config;
init_per_testcase(t_sys_mem_check_alarm, Config) ->
case os:type() of
{unix, linux} ->
case emqx_os_mon:is_sysmem_check_supported() of
true ->
SysMon = emqx_config:get([sysmon, os], #{}),
emqx_config:put([sysmon, os], SysMon#{
sysmem_high_watermark => 0.51,
@ -54,7 +54,7 @@ init_per_testcase(t_sys_mem_check_alarm, Config) ->
ok = supervisor:terminate_child(emqx_sys_sup, emqx_os_mon),
{ok, _} = supervisor:restart_child(emqx_sys_sup, emqx_os_mon),
Config;
_ ->
false ->
Config
end;
init_per_testcase(_, Config) ->
@ -63,12 +63,6 @@ init_per_testcase(_, Config) ->
Config.
t_api(_) ->
?assertEqual(60000, emqx_os_mon:get_mem_check_interval()),
?assertEqual(ok, emqx_os_mon:set_mem_check_interval(30000)),
?assertEqual(60000, emqx_os_mon:get_mem_check_interval()),
?assertEqual(ok, emqx_os_mon:set_mem_check_interval(122000)),
?assertEqual(120000, emqx_os_mon:get_mem_check_interval()),
?assertEqual(0.7, emqx_os_mon:get_sysmem_high_watermark()),
?assertEqual(ok, emqx_os_mon:set_sysmem_high_watermark(0.8)),
?assertEqual(0.8, emqx_os_mon:get_sysmem_high_watermark()),
@ -86,12 +80,29 @@ t_api(_) ->
gen_server:stop(emqx_os_mon),
ok.
t_sys_mem_check_disable(Config) ->
case emqx_os_mon:is_sysmem_check_supported() of
true -> do_sys_mem_check_disable(Config);
false -> skip
end.
do_sys_mem_check_disable(_Config) ->
MemRef0 = maps:get(mem_time_ref, sys:get_state(emqx_os_mon)),
?assertEqual(true, is_reference(MemRef0), MemRef0),
emqx_config:put([sysmon, os, mem_check_interval], 1000),
emqx_os_mon:update(emqx_config:get([sysmon, os])),
MemRef1 = maps:get(mem_time_ref, sys:get_state(emqx_os_mon)),
?assertEqual(true, is_reference(MemRef1), {MemRef0, MemRef1}),
?assertNotEqual(MemRef0, MemRef1),
emqx_config:put([sysmon, os, mem_check_interval], disabled),
emqx_os_mon:update(emqx_config:get([sysmon, os])),
?assertEqual(undefined, maps:get(mem_time_ref, sys:get_state(emqx_os_mon))),
ok.
t_sys_mem_check_alarm(Config) ->
case os:type() of
{unix, linux} ->
do_sys_mem_check_alarm(Config);
_ ->
skip
case emqx_os_mon:is_sysmem_check_supported() of
true -> do_sys_mem_check_alarm(Config);
false -> skip
end.
do_sys_mem_check_alarm(_Config) ->

View File

@ -47,7 +47,7 @@ all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF),
load_conf(),
emqx_common_test_helpers:start_apps([?APP]),
Config.
@ -55,13 +55,15 @@ end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([?APP]).
init_per_testcase(_TestCase, Config) ->
emqx_config:erase(limiter),
load_conf(),
Config.
end_per_testcase(_TestCase, Config) ->
Config.
load_conf() ->
emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF).
ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF).
init_config() ->
emqx_config:init_load(emqx_limiter_schema, ?BASE_CONF).
@ -313,8 +315,8 @@ t_capacity(_) ->
%% Test Cases Global Level
%%--------------------------------------------------------------------
t_collaborative_alloc(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
GlobalMod = fun(Cfg) ->
Cfg#{message_routing => #{rate => ?RATE("600/1s"), burst => 0}}
end,
Bucket1 = fun(#{client := Cli} = Bucket) ->
@ -353,11 +355,11 @@ t_collaborative_alloc(_) ->
).
t_burst(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
GlobalMod = fun(Cfg) ->
Cfg#{
message_routing := MR#{
rate := ?RATE("200/1s"),
burst := ?RATE("400/1s")
message_routing => #{
rate => ?RATE("200/1s"),
burst => ?RATE("400/1s")
}
}
end,
@ -615,6 +617,24 @@ t_extract_with_type(_) ->
)
).
t_add_bucket(_) ->
Checker = fun(Size) ->
#{buckets := Buckets} = sys:get_state(emqx_limiter_server:whereis(bytes)),
?assertEqual(Size, maps:size(Buckets), Buckets)
end,
DefBucket = emqx_limiter_schema:default_bucket_config(),
?assertEqual(ok, emqx_limiter_server:add_bucket(?FUNCTION_NAME, bytes, undefined)),
Checker(0),
?assertEqual(ok, emqx_limiter_server:add_bucket(?FUNCTION_NAME, bytes, DefBucket)),
Checker(0),
?assertEqual(
ok, emqx_limiter_server:add_bucket(?FUNCTION_NAME, bytes, DefBucket#{rate := 100})
),
Checker(1),
?assertEqual(ok, emqx_limiter_server:del_bucket(?FUNCTION_NAME, bytes)),
Checker(0),
ok.
%%--------------------------------------------------------------------
%% Test Cases Create Instance
%%--------------------------------------------------------------------
@ -653,16 +673,16 @@ t_not_exists_instance(_) ->
),
?assertEqual(
{error, invalid_bucket},
{ok, infinity},
emqx_limiter_server:connect(?FUNCTION_NAME, not_exists, Cfg)
),
ok.
t_create_instance_with_node(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
GlobalMod = fun(Cfg) ->
Cfg#{
message_routing := MR#{rate := ?RATE("200/1s")},
messages := MR#{rate := ?RATE("200/1s")}
message_routing => #{rate => ?RATE("200/1s"), burst => 0},
messages => #{rate => ?RATE("200/1s"), burst => 0}
}
end,
@ -739,6 +759,68 @@ t_esockd_htb_consume(_) ->
?assertMatch({ok, _}, C2R),
ok.
%%--------------------------------------------------------------------
%% Test Cases short paths
%%--------------------------------------------------------------------
t_node_short_paths(_) ->
CfgStr = <<"limiter {max_conn_rate = \"1000\", messages_rate = \"100\", bytes_rate = \"10\"}">>,
ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, CfgStr),
Accessor = fun emqx_limiter_schema:get_node_opts/1,
?assertMatch(#{rate := 100.0}, Accessor(connection)),
?assertMatch(#{rate := 10.0}, Accessor(messages)),
?assertMatch(#{rate := 1.0}, Accessor(bytes)),
?assertMatch(#{rate := infinity}, Accessor(message_routing)),
?assertEqual(undefined, emqx:get_config([limiter, connection], undefined)).
t_compatibility_for_node_short_paths(_) ->
CfgStr =
<<"limiter {max_conn_rate = \"1000\", connection.rate = \"500\", bytes.rate = \"200\"}">>,
ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, CfgStr),
Accessor = fun emqx_limiter_schema:get_node_opts/1,
?assertMatch(#{rate := 100.0}, Accessor(connection)),
?assertMatch(#{rate := 20.0}, Accessor(bytes)).
t_listener_short_paths(_) ->
CfgStr = <<
""
"listeners.tcp.default {max_conn_rate = \"1000\", messages_rate = \"100\", bytes_rate = \"10\"}"
""
>>,
ok = emqx_common_test_helpers:load_config(emqx_schema, CfgStr),
ListenerOpt = emqx:get_config([listeners, tcp, default]),
?assertMatch(
#{
client := #{
messages := #{rate := 10.0},
bytes := #{rate := 1.0}
},
connection := #{rate := 100.0}
},
emqx_limiter_schema:get_listener_opts(ListenerOpt)
).
t_compatibility_for_listener_short_paths(_) ->
CfgStr = <<
"" "listeners.tcp.default {max_conn_rate = \"1000\", limiter.connection.rate = \"500\"}" ""
>>,
ok = emqx_common_test_helpers:load_config(emqx_schema, CfgStr),
ListenerOpt = emqx:get_config([listeners, tcp, default]),
?assertMatch(
#{
connection := #{rate := 100.0}
},
emqx_limiter_schema:get_listener_opts(ListenerOpt)
).
t_no_limiter_for_listener(_) ->
CfgStr = <<>>,
ok = emqx_common_test_helpers:load_config(emqx_schema, CfgStr),
ListenerOpt = emqx:get_config([listeners, tcp, default]),
?assertEqual(
undefined,
emqx_limiter_schema:get_listener_opts(ListenerOpt)
).
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
@ -1043,3 +1125,11 @@ make_create_test_data_with_infinity_node(FakeInstnace) ->
%% client = C bucket = B C > B
{MkA(1000, 100), IsRefLimiter(FakeInstnace)}
].
parse_schema(ConfigString) ->
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
hocon_tconf:check_plain(
emqx_limiter_schema,
RawConf,
#{required => false, atom_key => false}
).

View File

@ -655,6 +655,43 @@ password_converter_test() ->
?assertThrow("must_quote", emqx_schema:password_converter(foobar, #{})),
ok.
-define(MQTT(B, M), #{<<"keepalive_backoff">> => B, <<"keepalive_multiplier">> => M}).
keepalive_convert_test() ->
?assertEqual(undefined, emqx_schema:mqtt_converter(undefined, #{})),
DefaultBackoff = 0.75,
DefaultMultiplier = 1.5,
Default = ?MQTT(DefaultBackoff, DefaultMultiplier),
?assertEqual(Default, emqx_schema:mqtt_converter(Default, #{})),
?assertEqual(?MQTT(1.5, 3), emqx_schema:mqtt_converter(?MQTT(1.5, 3), #{})),
?assertEqual(
?MQTT(DefaultBackoff, 3), emqx_schema:mqtt_converter(?MQTT(DefaultBackoff, 3), #{})
),
?assertEqual(?MQTT(1, 2), emqx_schema:mqtt_converter(?MQTT(1, DefaultMultiplier), #{})),
?assertEqual(?MQTT(1.5, 3), emqx_schema:mqtt_converter(?MQTT(1.5, 3), #{})),
?assertEqual(#{}, emqx_schema:mqtt_converter(#{}, #{})),
?assertEqual(
#{<<"keepalive_backoff">> => 1.5, <<"keepalive_multiplier">> => 3.0},
emqx_schema:mqtt_converter(#{<<"keepalive_backoff">> => 1.5}, #{})
),
?assertEqual(
#{<<"keepalive_multiplier">> => 5.0},
emqx_schema:mqtt_converter(#{<<"keepalive_multiplier">> => 5.0}, #{})
),
?assertEqual(
#{
<<"keepalive_backoff">> => DefaultBackoff,
<<"keepalive_multiplier">> => DefaultMultiplier
},
emqx_schema:mqtt_converter(#{<<"keepalive_backoff">> => DefaultBackoff}, #{})
),
?assertEqual(
#{<<"keepalive_multiplier">> => DefaultMultiplier},
emqx_schema:mqtt_converter(#{<<"keepalive_multiplier">> => DefaultMultiplier}, #{})
),
ok.
url_type_test_() ->
[
?_assertEqual(

View File

@ -33,17 +33,6 @@
]
).
-define(STATS_KEYS, [
recv_oct,
recv_cnt,
send_oct,
send_cnt,
recv_pkt,
recv_msg,
send_pkt,
send_msg
]).
-define(ws_conn, emqx_ws_connection).
all() -> emqx_common_test_helpers:all(?MODULE).
@ -618,7 +607,6 @@ channel(InitFields) ->
clientid => <<"clientid">>,
username => <<"username">>,
is_superuser => false,
peercert => undefined,
mountpoint => undefined
},
Conf = emqx_cm:get_session_confs(ClientInfo, #{

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authn, [
{description, "EMQX Authentication"},
{vsn, "0.1.18"},
{vsn, "0.1.20"},
{modules, []},
{registered, [emqx_authn_sup, emqx_authn_registry]},
{applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]},

View File

@ -228,6 +228,7 @@ schema("/listeners/:listener_id/authentication") ->
'operationId' => listener_authenticators,
get => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_get),
parameters => [param_listener_id()],
responses => #{
@ -239,6 +240,7 @@ schema("/listeners/:listener_id/authentication") ->
},
post => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_post),
parameters => [param_listener_id()],
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
@ -260,6 +262,7 @@ schema("/listeners/:listener_id/authentication/:id") ->
'operationId' => listener_authenticator,
get => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_id_get),
parameters => [param_listener_id(), param_auth_id()],
responses => #{
@ -272,6 +275,7 @@ schema("/listeners/:listener_id/authentication/:id") ->
},
put => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_id_put),
parameters => [param_listener_id(), param_auth_id()],
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
@ -287,6 +291,7 @@ schema("/listeners/:listener_id/authentication/:id") ->
},
delete => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_id_delete),
parameters => [param_listener_id(), param_auth_id()],
responses => #{
@ -300,6 +305,7 @@ schema("/listeners/:listener_id/authentication/:id/status") ->
'operationId' => listener_authenticator_status,
get => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_id_status_get),
parameters => [param_listener_id(), param_auth_id()],
responses => #{
@ -330,6 +336,7 @@ schema("/listeners/:listener_id/authentication/:id/position/:position") ->
'operationId' => listener_authenticator_position,
put => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_id_position_put),
parameters => [param_listener_id(), param_auth_id(), param_position()],
responses => #{
@ -393,6 +400,7 @@ schema("/listeners/:listener_id/authentication/:id/users") ->
'operationId' => listener_authenticator_users,
post => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_id_users_post),
parameters => [param_auth_id(), param_listener_id()],
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
@ -410,6 +418,7 @@ schema("/listeners/:listener_id/authentication/:id/users") ->
},
get => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_id_users_get),
parameters => [
param_listener_id(),
@ -479,6 +488,7 @@ schema("/listeners/:listener_id/authentication/:id/users/:user_id") ->
'operationId' => listener_authenticator_user,
get => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_id_users_user_id_get),
parameters => [param_listener_id(), param_auth_id(), param_user_id()],
responses => #{
@ -491,6 +501,7 @@ schema("/listeners/:listener_id/authentication/:id/users/:user_id") ->
},
put => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_id_users_user_id_put),
parameters => [param_listener_id(), param_auth_id(), param_user_id()],
'requestBody' => emqx_dashboard_swagger:schema_with_example(
@ -508,6 +519,7 @@ schema("/listeners/:listener_id/authentication/:id/users/:user_id") ->
},
delete => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_id_users_user_id_delete),
parameters => [param_listener_id(), param_auth_id(), param_user_id()],
responses => #{
@ -793,7 +805,11 @@ with_listener(ListenerID, Fun) ->
find_listener(ListenerID) ->
case binary:split(ListenerID, <<":">>) of
[BType, BName] ->
case emqx_config:find([listeners, BType, BName]) of
case
emqx_config:find([
listeners, binary_to_existing_atom(BType), binary_to_existing_atom(BName)
])
of
{ok, _} ->
{ok, {BType, BName}};
{not_found, _, _} ->

View File

@ -72,7 +72,7 @@ chain_configs() ->
[global_chain_config() | listener_chain_configs()].
global_chain_config() ->
{?GLOBAL, emqx:get_config([?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY], [])}.
{?GLOBAL, emqx:get_config([?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM], [])}.
listener_chain_configs() ->
lists:map(
@ -83,9 +83,11 @@ listener_chain_configs() ->
).
auth_config_path(ListenerID) ->
[<<"listeners">>] ++
binary:split(atom_to_binary(ListenerID), <<":">>) ++
[?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY].
Names = [
binary_to_existing_atom(N, utf8)
|| N <- binary:split(atom_to_binary(ListenerID), <<":">>)
],
[listeners] ++ Names ++ [?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM].
provider_types() ->
lists:map(fun({Type, _Module}) -> Type end, emqx_authn:providers()).

View File

@ -72,6 +72,7 @@ schema("/listeners/:listener_id/authentication/:id/import_users") ->
'operationId' => listener_authenticator_import_users,
post => #{
tags => ?API_TAGS_SINGLE,
deprecated => true,
description => ?DESC(listeners_listener_id_authentication_id_import_users_post),
parameters => [emqx_authn_api:param_listener_id(), emqx_authn_api:param_auth_id()],
'requestBody' => emqx_dashboard_swagger:file_schema(filename),

View File

@ -100,7 +100,6 @@ common_fields() ->
maps:to_list(
maps:without(
[
base_url,
pool_type
],
maps:from_list(emqx_connector_http:fields(config))

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authz, [
{description, "An OTP application"},
{vsn, "0.1.19"},
{vsn, "0.1.20"},
{registered, []},
{mod, {emqx_authz_app, []}},
{applications, [

View File

@ -140,7 +140,12 @@ update(Cmd, Sources) ->
emqx_authz_utils:update_config(?CONF_KEY_PATH, {Cmd, Sources}).
pre_config_update(_, Cmd, Sources) ->
{ok, do_pre_config_update(Cmd, Sources)}.
try do_pre_config_update(Cmd, Sources) of
{error, Reason} -> {error, Reason};
NSources -> {ok, NSources}
catch
_:Reason -> {error, Reason}
end.
do_pre_config_update({?CMD_MOVE, _, _} = Cmd, Sources) ->
do_move(Cmd, Sources);
@ -475,11 +480,14 @@ maybe_write_files(#{<<"type">> := <<"file">>} = Source) ->
maybe_write_files(NewSource) ->
maybe_write_certs(NewSource).
write_acl_file(#{<<"rules">> := Rules} = Source) ->
NRules = check_acl_file_rules(Rules),
Path = ?MODULE:acl_conf_file(),
{ok, _Filename} = write_file(Path, NRules),
maps:without([<<"rules">>], Source#{<<"path">> => Path}).
write_acl_file(#{<<"rules">> := Rules} = Source0) ->
AclPath = ?MODULE:acl_conf_file(),
%% Always check if the rules are valid before writing to the file
%% If the rules are invalid, the old file will be kept
ok = check_acl_file_rules(AclPath, Rules),
ok = write_file(AclPath, Rules),
Source1 = maps:remove(<<"rules">>, Source0),
maps:put(<<"path">>, AclPath, Source1).
%% @doc where the acl.conf file is stored.
acl_conf_file() ->
@ -506,7 +514,7 @@ write_file(Filename, Bytes) ->
ok = filelib:ensure_dir(Filename),
case file:write_file(Filename, Bytes) of
ok ->
{ok, iolist_to_binary(Filename)};
ok;
{error, Reason} ->
?SLOG(error, #{filename => Filename, msg => "write_file_error", reason => Reason}),
throw(Reason)
@ -528,6 +536,14 @@ get_source_by_type(Type, Sources) ->
update_authz_chain(Actions) ->
emqx_hooks:put('client.authorize', {?MODULE, authorize, [Actions]}, ?HP_AUTHZ).
check_acl_file_rules(RawRules) ->
%% TODO: make sure the bin rules checked
RawRules.
check_acl_file_rules(Path, Rules) ->
TmpPath = Path ++ ".tmp",
try
ok = write_file(TmpPath, Rules),
{ok, _} = emqx_authz_file:validate(TmpPath),
ok
catch
throw:Reason -> throw(Reason)
after
_ = file:delete(TmpPath)
end.

View File

@ -39,8 +39,10 @@ fields(file) ->
type => binary(),
required => true,
example =>
<<"{allow,{username,\"^dashboard?\"},", "subscribe,[\"$SYS/#\"]}.\n",
"{allow,{ipaddr,\"127.0.0.1\"},all,[\"$SYS/#\",\"#\"]}.">>,
<<
"{allow,{username,{re,\"^dashboard$\"}},subscribe,[\"$SYS/#\"]}.\n",
"{allow,{ipaddr,\"127.0.0.1\"},all,[\"$SYS/#\",\"#\"]}."
>>,
desc => ?DESC(rules)
}}
];
@ -114,7 +116,6 @@ authz_http_common_fields() ->
maps:to_list(
maps:without(
[
base_url,
pool_type
],
maps:from_list(emqx_connector_http:fields(config))

View File

@ -33,13 +33,14 @@
update/1,
destroy/1,
authorize/4,
validate/1,
read_file/1
]).
description() ->
"AuthZ with static rules".
create(#{path := Path0} = Source) ->
validate(Path0) ->
Path = filename(Path0),
Rules =
case file:consult(Path) of
@ -54,8 +55,12 @@ create(#{path := Path0} = Source) ->
throw(failed_to_read_acl_file);
{error, Reason} ->
?SLOG(alert, #{msg => bad_acl_file_content, path => Path, reason => Reason}),
throw(bad_acl_file_content)
throw({bad_acl_file_content, Reason})
end,
{ok, Rules}.
create(#{path := Path} = Source) ->
{ok, Rules} = validate(Path),
Source#{annotations => #{rules => Rules}}.
update(#{path := _Path} = Source) ->

View File

@ -68,7 +68,13 @@ compile({Permission, Who, Action, TopicFilters}) when
{atom(Permission), compile_who(Who), atom(Action), [
compile_topic(Topic)
|| Topic <- TopicFilters
]}.
]};
compile({Permission, _Who, _Action, _TopicFilter}) when not ?ALLOW_DENY(Permission) ->
throw({invalid_authorization_permission, Permission});
compile({_Permission, _Who, Action, _TopicFilter}) when not ?PUBSUB(Action) ->
throw({invalid_authorization_action, Action});
compile(BadRule) ->
throw({invalid_authorization_rule, BadRule}).
compile_who(all) ->
all;

View File

@ -78,7 +78,17 @@ fields("authorization") ->
authz_fields();
fields(file) ->
authz_common_fields(file) ++
[{path, ?HOCON(string(), #{required => true, desc => ?DESC(path)})}];
[
{path,
?HOCON(
string(),
#{
required => true,
validator => fun(Path) -> element(1, emqx_authz_file:validate(Path)) end,
desc => ?DESC(path)
}
)}
];
fields(http_get) ->
authz_common_fields(http) ++
http_common_fields() ++
@ -230,7 +240,6 @@ http_common_fields() ->
maps:to_list(
maps:without(
[
base_url,
pool_type
],
maps:from_list(connector_fields(http))
@ -496,7 +505,7 @@ authz_fields() ->
%% doc_lift is force a root level reference instead of nesting sub-structs
extra => #{doc_lift => true},
%% it is recommended to configure authz sources from dashboard
%% hance the importance level for config is low
%% hence the importance level for config is low
importance => ?IMPORTANCE_LOW
}
)}

View File

@ -155,22 +155,36 @@ set_special_configs(_App) ->
<<"ssl">> => #{<<"enable">> => false},
<<"cmd">> => <<"HGETALL mqtt_authz:", ?PH_USERNAME/binary>>
}).
-define(SOURCE6, #{
-define(FILE_SOURCE(Rules), #{
<<"type">> => <<"file">>,
<<"enable">> => true,
<<"rules">> =>
<<"rules">> => Rules
}).
-define(SOURCE6,
?FILE_SOURCE(
<<
"{allow,{username,\"^dashboard?\"},subscribe,[\"$SYS/#\"]}."
"\n{allow,{ipaddr,\"127.0.0.1\"},all,[\"$SYS/#\",\"#\"]}."
>>
}).
-define(SOURCE7, #{
)
).
-define(SOURCE7,
?FILE_SOURCE(
<<
"{allow,{username,\"some_client\"},publish,[\"some_client/lwt\"]}.\n"
"{deny, all}."
>>
)
).
-define(BAD_FILE_SOURCE2, #{
<<"type">> => <<"file">>,
<<"enable">> => true,
<<"rules">> =>
<<
"{allow,{username,\"some_client\"},publish,[\"some_client/lwt\"]}.\n"
"{deny, all}."
"{not_allow,{username,\"some_client\"},publish,[\"some_client/lwt\"]}."
>>
}).
@ -178,6 +192,40 @@ set_special_configs(_App) ->
%% Testcases
%%------------------------------------------------------------------------------
-define(UPDATE_ERROR(Err), {error, {pre_config_update, emqx_authz, Err}}).
t_bad_file_source(_) ->
BadContent = ?FILE_SOURCE(<<"{allow,{username,\"bar\"}, publish, [\"test\"]}">>),
BadContentErr = {bad_acl_file_content, {1, erl_parse, ["syntax error before: ", []]}},
BadRule = ?FILE_SOURCE(<<"{allow,{username,\"bar\"},publish}.">>),
BadRuleErr = {invalid_authorization_rule, {allow, {username, "bar"}, publish}},
BadPermission = ?FILE_SOURCE(<<"{not_allow,{username,\"bar\"},publish,[\"test\"]}.">>),
BadPermissionErr = {invalid_authorization_permission, not_allow},
BadAction = ?FILE_SOURCE(<<"{allow,{username,\"bar\"},pubsub,[\"test\"]}.">>),
BadActionErr = {invalid_authorization_action, pubsub},
lists:foreach(
fun({Source, Error}) ->
File = emqx_authz:acl_conf_file(),
{ok, Bin1} = file:read_file(File),
?assertEqual(?UPDATE_ERROR(Error), emqx_authz:update(?CMD_REPLACE, [Source])),
?assertEqual(?UPDATE_ERROR(Error), emqx_authz:update(?CMD_PREPEND, Source)),
?assertEqual(?UPDATE_ERROR(Error), emqx_authz:update(?CMD_APPEND, Source)),
%% Check file content not changed if update failed
{ok, Bin2} = file:read_file(File),
?assertEqual(Bin1, Bin2)
end,
[
{BadContent, BadContentErr},
{BadRule, BadRuleErr},
{BadPermission, BadPermissionErr},
{BadAction, BadActionErr}
]
),
?assertMatch(
[],
emqx_conf:get([authorization, sources], [])
).
t_update_source(_) ->
%% replace all
{ok, _} = emqx_authz:update(?CMD_REPLACE, [?SOURCE3]),

View File

@ -120,7 +120,9 @@ t_superuser(_Config) ->
t_invalid_file(_Config) ->
?assertMatch(
{error, bad_acl_file_content},
{error,
{pre_config_update, emqx_authz,
{bad_acl_file_content, {1, erl_parse, ["syntax error before: ", "term"]}}}},
emqx_authz:update(?CMD_REPLACE, [?RAW_SOURCE#{<<"rules">> => <<"{{invalid term">>}])
).

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_bridge, [
{description, "EMQX bridges"},
{vsn, "0.1.18"},
{vsn, "0.1.19"},
{registered, [emqx_bridge_sup]},
{mod, {emqx_bridge_app, []}},
{applications, [

View File

@ -72,7 +72,8 @@
T == cassandra;
T == sqlserver;
T == pulsar_producer;
T == oracle
T == oracle;
T == iotdb
).
load() ->

View File

@ -54,13 +54,14 @@
-define(BRIDGE_NOT_FOUND(BRIDGE_TYPE, BRIDGE_NAME),
?NOT_FOUND(
<<"Bridge lookup failed: bridge named '", (BRIDGE_NAME)/binary, "' of type ",
<<"Bridge lookup failed: bridge named '", (bin(BRIDGE_NAME))/binary, "' of type ",
(bin(BRIDGE_TYPE))/binary, " does not exist.">>
)
).
%% Don't turn bridge_name to atom, it's maybe not a existing atom.
-define(TRY_PARSE_ID(ID, EXPR),
try emqx_bridge_resource:parse_bridge_id(Id) of
try emqx_bridge_resource:parse_bridge_id(Id, #{atom_name => false}) of
{BridgeType, BridgeName} ->
EXPR
catch
@ -686,11 +687,15 @@ get_metrics_from_local_node(BridgeType, BridgeName) ->
).
is_enabled_bridge(BridgeType, BridgeName) ->
try emqx:get_config([bridges, BridgeType, BridgeName]) of
try emqx:get_config([bridges, BridgeType, binary_to_existing_atom(BridgeName)]) of
ConfMap ->
maps:get(enable, ConfMap, false)
catch
error:{config_not_found, _} ->
throw(not_found);
error:badarg ->
%% catch non-existing atom,
%% none-existing atom means it is not available in config PT storage.
throw(not_found)
end.
@ -891,11 +896,18 @@ fill_defaults(Type, RawConf) ->
pack_bridge_conf(Type, RawConf) ->
#{<<"bridges">> => #{bin(Type) => #{<<"foo">> => RawConf}}}.
unpack_bridge_conf(Type, PackedConf) ->
#{<<"bridges">> := Bridges} = PackedConf,
#{<<"foo">> := RawConf} = maps:get(bin(Type), Bridges),
%% Hide webhook's resource_opts.request_timeout from user.
filter_raw_conf(<<"webhook">>, RawConf0) ->
emqx_utils_maps:deep_remove([<<"resource_opts">>, <<"request_timeout">>], RawConf0);
filter_raw_conf(_TypeBin, RawConf) ->
RawConf.
unpack_bridge_conf(Type, PackedConf) ->
TypeBin = bin(Type),
#{<<"bridges">> := Bridges} = PackedConf,
#{<<"foo">> := RawConf} = maps:get(TypeBin, Bridges),
filter_raw_conf(TypeBin, RawConf).
is_ok(ok) ->
ok;
is_ok(OkResult = {ok, _}) ->

View File

@ -25,6 +25,7 @@
resource_id/2,
bridge_id/2,
parse_bridge_id/1,
parse_bridge_id/2,
bridge_hookpoint/1,
bridge_hookpoint_to_bridge_id/1
]).
@ -56,6 +57,11 @@
(TYPE) =:= <<"kafka_consumer">> orelse ?IS_BI_DIR_BRIDGE(TYPE)
).
%% [FIXME] this has no place here, it's used in parse_confs/3, which should
%% rather delegate to a behavior callback than implementing domain knowledge
%% here (reversed dependency)
-define(INSERT_TABLET_PATH, "/rest/v2/insertTablet").
-if(?EMQX_RELEASE_EDITION == ee).
bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt;
bridge_to_resource_type(mqtt) -> emqx_connector_mqtt;
@ -81,11 +87,15 @@ bridge_id(BridgeType, BridgeName) ->
Type = bin(BridgeType),
<<Type/binary, ":", Name/binary>>.
-spec parse_bridge_id(list() | binary() | atom()) -> {atom(), binary()}.
parse_bridge_id(BridgeId) ->
parse_bridge_id(BridgeId, #{atom_name => true}).
-spec parse_bridge_id(list() | binary() | atom(), #{atom_name => boolean()}) ->
{atom(), atom() | binary()}.
parse_bridge_id(BridgeId, Opts) ->
case string:split(bin(BridgeId), ":", all) of
[Type, Name] ->
{to_type_atom(Type), validate_name(Name)};
{to_type_atom(Type), validate_name(Name, Opts)};
_ ->
invalid_data(
<<"should be of pattern {type}:{name}, but got ", BridgeId/binary>>
@ -100,13 +110,16 @@ bridge_hookpoint_to_bridge_id(?BRIDGE_HOOKPOINT(BridgeId)) ->
bridge_hookpoint_to_bridge_id(_) ->
{error, bad_bridge_hookpoint}.
validate_name(Name0) ->
validate_name(Name0, Opts) ->
Name = unicode:characters_to_list(Name0, utf8),
case is_list(Name) andalso Name =/= [] of
true ->
case lists:all(fun is_id_char/1, Name) of
true ->
Name0;
case maps:get(atom_name, Opts, true) of
true -> list_to_existing_atom(Name);
false -> Name0
end;
false ->
invalid_data(<<"bad name: ", Name0/binary>>)
end;
@ -152,20 +165,20 @@ create(BridgeId, Conf) ->
create(Type, Name, Conf) ->
create(Type, Name, Conf, #{}).
create(Type, Name, Conf, Opts0) ->
create(Type, Name, Conf, Opts) ->
?SLOG(info, #{
msg => "create bridge",
type => Type,
name => Name,
config => emqx_utils:redact(Conf)
}),
Opts = override_start_after_created(Conf, Opts0),
TypeBin = bin(Type),
{ok, _Data} = emqx_resource:create_local(
resource_id(Type, Name),
<<"emqx_bridge">>,
bridge_to_resource_type(Type),
parse_confs(bin(Type), Name, Conf),
Opts
parse_confs(TypeBin, Name, Conf),
parse_opts(Conf, Opts)
),
ok.
@ -176,7 +189,7 @@ update(BridgeId, {OldConf, Conf}) ->
update(Type, Name, {OldConf, Conf}) ->
update(Type, Name, {OldConf, Conf}, #{}).
update(Type, Name, {OldConf, Conf}, Opts0) ->
update(Type, Name, {OldConf, Conf}, Opts) ->
%% TODO: sometimes its not necessary to restart the bridge connection.
%%
%% - if the connection related configs like `servers` is updated, we should restart/start
@ -185,7 +198,6 @@ update(Type, Name, {OldConf, Conf}, Opts0) ->
%% the `method` or `headers` of a WebHook is changed, then the bridge can be updated
%% without restarting the bridge.
%%
Opts = override_start_after_created(Conf, Opts0),
case emqx_utils_maps:if_only_to_toggle_enable(OldConf, Conf) of
false ->
?SLOG(info, #{
@ -228,11 +240,12 @@ recreate(Type, Name, Conf) ->
recreate(Type, Name, Conf, #{}).
recreate(Type, Name, Conf, Opts) ->
TypeBin = bin(Type),
emqx_resource:recreate_local(
resource_id(Type, Name),
bridge_to_resource_type(Type),
parse_confs(bin(Type), Name, Conf),
Opts
parse_confs(TypeBin, Name, Conf),
parse_opts(Conf, Opts)
).
create_dry_run(Type, Conf0) ->
@ -329,6 +342,30 @@ parse_confs(
max_retries => Retry
}
};
parse_confs(<<"iotdb">>, Name, Conf) ->
#{
base_url := BaseURL,
authentication :=
#{
username := Username,
password := Password
}
} = Conf,
BasicToken = base64:encode(<<Username/binary, ":", Password/binary>>),
WebhookConfig =
Conf#{
method => <<"post">>,
url => <<BaseURL/binary, ?INSERT_TABLET_PATH>>,
headers => [
{<<"Content-type">>, <<"application/json">>},
{<<"Authorization">>, BasicToken}
]
},
parse_confs(
<<"webhook">>,
Name,
WebhookConfig
);
parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) ->
%% For some drivers that can be used as data-sources, we need to provide a
%% hookpoint. The underlying driver will run `emqx_hooks:run/3` when it
@ -365,6 +402,9 @@ bin(Bin) when is_binary(Bin) -> Bin;
bin(Str) when is_list(Str) -> list_to_binary(Str);
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
parse_opts(Conf, Opts0) ->
override_start_after_created(Conf, Opts0).
override_start_after_created(Config, Opts) ->
Enabled = maps:get(enable, Config, true),
StartAfterCreated = Enabled andalso maps:get(start_after_created, Opts, Enabled),

View File

@ -238,36 +238,10 @@ webhook_bridge_converter(Conf0, _HoconOpts) ->
)
end.
%% We hide resource_opts.request_timeout from user.
do_convert_webhook_config(
#{<<"request_timeout">> := ReqT, <<"resource_opts">> := #{<<"request_timeout">> := ReqT}} = Conf
#{<<"request_timeout">> := ReqT, <<"resource_opts">> := ResOpts} = Conf
) ->
%% ok: same values
Conf;
do_convert_webhook_config(
#{
<<"request_timeout">> := ReqTRootRaw,
<<"resource_opts">> := #{<<"request_timeout">> := ReqTResourceRaw}
} = Conf0
) ->
%% different values; we set them to the same, if they are valid
%% durations
MReqTRoot = emqx_schema:to_duration_ms(ReqTRootRaw),
MReqTResource = emqx_schema:to_duration_ms(ReqTResourceRaw),
case {MReqTRoot, MReqTResource} of
{{ok, ReqTRoot}, {ok, ReqTResource}} ->
{_Parsed, ReqTRaw} = max({ReqTRoot, ReqTRootRaw}, {ReqTResource, ReqTResourceRaw}),
Conf1 = emqx_utils_maps:deep_merge(
Conf0,
#{
<<"request_timeout">> => ReqTRaw,
<<"resource_opts">> => #{<<"request_timeout">> => ReqTRaw}
}
),
Conf1;
_ ->
%% invalid values; let the type checker complain about
%% that.
Conf0
end;
Conf#{<<"resource_opts">> => ResOpts#{<<"request_timeout">> => ReqT}};
do_convert_webhook_config(Conf) ->
Conf.

View File

@ -40,12 +40,15 @@ fields("put") ->
fields("get") ->
emqx_bridge_schema:status_fields() ++ fields("post");
fields("creation_opts") ->
lists:filter(
fun({K, _V}) ->
not lists:member(K, unsupported_opts())
end,
emqx_resource_schema:fields("creation_opts")
).
[
hidden_request_timeout()
| lists:filter(
fun({K, _V}) ->
not lists:member(K, unsupported_opts())
end,
emqx_resource_schema:fields("creation_opts")
)
].
desc("config") ->
?DESC("desc_config");
@ -68,7 +71,7 @@ basic_config() ->
)}
] ++ webhook_creation_opts() ++
proplists:delete(
max_retries, proplists:delete(base_url, emqx_connector_http:fields(config))
max_retries, emqx_connector_http:fields(config)
).
request_config() ->
@ -163,7 +166,8 @@ unsupported_opts() ->
[
enable_batch,
batch_size,
batch_time
batch_time,
request_timeout
].
%%======================================================================================
@ -190,3 +194,13 @@ name_field() ->
method() ->
enum([post, put, get, delete]).
hidden_request_timeout() ->
{request_timeout,
mk(
hoconsc:union([infinity, emqx_schema:duration_ms()]),
#{
required => false,
importance => ?IMPORTANCE_HIDDEN
}
)}.

View File

@ -1284,21 +1284,43 @@ t_inconsistent_webhook_request_timeouts(Config) ->
<<"resource_opts">> => #{<<"request_timeout">> => <<"2s">>}
}
),
?assertMatch(
{ok, 201, #{
%% note: same value on both fields
<<"request_timeout">> := <<"2s">>,
<<"resource_opts">> := #{<<"request_timeout">> := <<"2s">>}
}},
{ok, 201, #{
<<"request_timeout">> := <<"1s">>,
<<"resource_opts">> := ResourceOpts
}} =
request_json(
post,
uri(["bridges"]),
BadBridgeParams,
Config
)
),
),
?assertNot(maps:is_key(<<"request_timeout">>, ResourceOpts)),
validate_resource_request_timeout(proplists:get_value(group, Config), 1000, Name),
ok.
validate_resource_request_timeout(single, Timeout, Name) ->
SentData = #{payload => <<"Hello EMQX">>, timestamp => 1668602148000},
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name),
ResId = emqx_bridge_resource:resource_id(<<"webhook">>, Name),
?check_trace(
begin
{ok, Res} =
?wait_async_action(
emqx_bridge:send_message(BridgeID, SentData),
#{?snk_kind := async_query},
1000
),
?assertMatch({ok, #{id := ResId, query_opts := #{timeout := Timeout}}}, Res)
end,
fun(Trace0) ->
Trace = ?of_kind(async_query, Trace0),
?assertMatch([#{query_opts := #{timeout := Timeout}}], Trace),
ok
end
);
validate_resource_request_timeout(_Cluster, _Timeout, _Name) ->
ignore.
%%
request(Method, URL, Config) ->

View File

@ -59,27 +59,21 @@ webhook_config_test() ->
},
check(Conf2)
),
%% the converter should pick the greater of the two
%% request_timeouts and place them in the root and inside
%% resource_opts.
?assertMatch(
#{
<<"bridges">> := #{
<<"webhook">> := #{
<<"the_name">> :=
#{
<<"method">> := get,
<<"request_timeout">> := 60_000,
<<"resource_opts">> := #{<<"request_timeout">> := 60_000},
<<"body">> := <<"${payload}">>
}
}
#{
<<"bridges">> := #{
<<"webhook">> := #{
<<"the_name">> :=
#{
<<"method">> := get,
<<"request_timeout">> := RequestTime,
<<"resource_opts">> := ResourceOpts,
<<"body">> := <<"${payload}">>
}
}
},
check(Conf3)
),
}
} = check(Conf3),
?assertEqual(60_000, RequestTime),
?assertMatch(#{<<"request_timeout">> := 60_000}, ResourceOpts),
ok.
up(#{<<"bridges">> := Bridges0} = Conf0) ->
@ -129,7 +123,7 @@ assert_upgraded1(Map) ->
?assert(maps:is_key(<<"ssl">>, Map)).
check(Conf) when is_map(Conf) ->
hocon_tconf:check_plain(emqx_bridge_schema, Conf).
hocon_tconf:check_plain(emqx_bridge_schema, Conf, #{required => false}).
%% erlfmt-ignore
%% this is config generated from v5.0.11

View File

@ -100,17 +100,21 @@
?assertMetrics(Pat, true, BridgeID)
).
-define(assertMetrics(Pat, Guard, BridgeID),
?assertMatch(
#{
<<"metrics">> := Pat,
<<"node_metrics">> := [
#{
<<"node">> := _,
<<"metrics">> := Pat
}
]
} when Guard,
request_bridge_metrics(BridgeID)
?retry(
_Sleep = 300,
_Attempts0 = 20,
?assertMatch(
#{
<<"metrics">> := Pat,
<<"node_metrics">> := [
#{
<<"node">> := _,
<<"metrics">> := Pat
}
]
} when Guard,
request_bridge_metrics(BridgeID)
)
)
).

View File

@ -0,0 +1,350 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_testlib).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
%% ct setup helpers
init_per_suite(Config, Apps) ->
[{start_apps, Apps} | Config].
end_per_suite(Config) ->
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?config(start_apps, Config))),
_ = application:stop(emqx_connector),
ok.
init_per_group(TestGroup, BridgeType, Config) ->
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
application:load(emqx_bridge),
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps(?config(start_apps, Config)),
{ok, _} = application:ensure_all_started(emqx_connector),
emqx_mgmt_api_test_util:init_suite(),
UniqueNum = integer_to_binary(erlang:unique_integer([positive])),
MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>,
[
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
{mqtt_topic, MQTTTopic},
{test_group, TestGroup},
{bridge_type, BridgeType}
| Config
].
end_per_group(Config) ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
delete_all_bridges(),
ok.
init_per_testcase(TestCase, Config0, BridgeConfigCb) ->
ct:timetrap(timer:seconds(60)),
delete_all_bridges(),
UniqueNum = integer_to_binary(erlang:unique_integer()),
BridgeTopic =
<<
(atom_to_binary(TestCase))/binary,
UniqueNum/binary
>>,
TestGroup = ?config(test_group, Config0),
Config = [{bridge_topic, BridgeTopic} | Config0],
{Name, ConfigString, BridgeConfig} = BridgeConfigCb(
TestCase, TestGroup, Config
),
ok = snabbkaffe:start_trace(),
[
{bridge_name, Name},
{bridge_config_string, ConfigString},
{bridge_config, BridgeConfig}
| Config
].
end_per_testcase(_Testcase, Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ok;
false ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
delete_all_bridges(),
%% in CI, apparently this needs more time since the
%% machines struggle with all the containers running...
emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
delete_all_bridges() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
).
%% test helpers
parse_and_check(Config, ConfigString, Name) ->
BridgeType = ?config(bridge_type, Config),
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
#{<<"bridges">> := #{BridgeType := #{Name := BridgeConfig}}} = RawConf,
BridgeConfig.
resource_id(Config) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
emqx_bridge_resource:resource_id(BridgeType, Name).
create_bridge(Config) ->
create_bridge(Config, _Overrides = #{}).
create_bridge(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
emqx_bridge:create(BridgeType, Name, BridgeConfig).
create_bridge_api(Config) ->
create_bridge_api(Config, _Overrides = #{}).
create_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("creating bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {Status, Headers, Body0}} ->
{ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}};
Error ->
Error
end,
ct:pal("bridge create result: ~p", [Res]),
Res.
update_bridge_api(Config) ->
update_bridge_api(Config, _Overrides = #{}).
update_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, Name),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("updating bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of
{ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])};
Error -> Error
end,
ct:pal("bridge update result: ~p", [Res]),
Res.
probe_bridge_api(Config) ->
probe_bridge_api(Config, _Overrides = #{}).
probe_bridge_api(Config, _Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig = ?config(bridge_config, Config),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("probing bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0};
Error -> Error
end,
ct:pal("bridge probe result: ~p", [Res]),
Res.
create_rule_and_action_http(BridgeType, RuleTopic, Config) ->
BridgeName = ?config(bridge_name, Config),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
Params = #{
enable => true,
sql => <<"SELECT * FROM \"", RuleTopic/binary, "\"">>,
actions => [BridgeId]
},
Path = emqx_mgmt_api_test_util:api_path(["rules"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
ct:pal("rule action params: ~p", [Params]),
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
{ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])};
Error -> Error
end.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_sync_query(Config, MakeMessageFun, IsSuccessCheck) ->
ResourceId = resource_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Message = {send_message, MakeMessageFun()},
IsSuccessCheck(emqx_resource:simple_sync_query(ResourceId, Message)),
ok
end,
[]
),
ok.
t_async_query(Config, MakeMessageFun, IsSuccessCheck) ->
ResourceId = resource_id(Config),
ReplyFun =
fun(Pid, Result) ->
Pid ! {result, Result}
end,
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Message = {send_message, MakeMessageFun()},
emqx_resource:query(ResourceId, Message, #{async_reply_fun => {ReplyFun, [self()]}}),
ok
end,
[]
),
receive
{result, Result} -> IsSuccessCheck(Result)
after 5_000 ->
throw(timeout)
end,
ok.
t_create_via_http(Config) ->
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
%% lightweight matrix testing some configs
?assertMatch(
{ok, _},
update_bridge_api(
Config
)
),
?assertMatch(
{ok, _},
update_bridge_api(
Config
)
),
ok
end,
[]
),
ok.
t_start_stop(Config, StopTracePoint) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
ResourceId = resource_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge(Config)),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
%% Check that the bridge probe API doesn't leak atoms.
ProbeRes0 = probe_bridge_api(
Config,
#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
AtomsBefore = erlang:system_info(atom_count),
%% Probe again; shouldn't have created more atoms.
ProbeRes1 = probe_bridge_api(
Config,
#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
AtomsAfter = erlang:system_info(atom_count),
?assertEqual(AtomsBefore, AtomsAfter),
%% Now stop the bridge.
?assertMatch(
{{ok, _}, {ok, _}},
?wait_async_action(
emqx_bridge:disable_enable(disable, BridgeType, BridgeName),
#{?snk_kind := StopTracePoint},
5_000
)
),
ok
end,
fun(Trace) ->
%% one for each probe, one for real
?assertMatch([_, _, _], ?of_kind(StopTracePoint, Trace)),
ok
end
),
ok.
t_on_get_status(Config) ->
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
ResourceId = resource_id(Config),
?assertMatch({ok, _}, create_bridge(Config)),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
ct:sleep(500),
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId))
end),
%% Check that it recovers itself.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
ok.

View File

@ -23,6 +23,7 @@
-compile(export_all).
-import(emqx_mgmt_api_test_util, [request/3, uri/1]).
-import(emqx_common_test_helpers, [on_exit/1]).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
@ -52,6 +53,13 @@ end_per_suite(_Config) ->
suite() ->
[{timetrap, {seconds, 60}}].
init_per_testcase(_TestCase, Config) ->
Config.
end_per_testcase(_TestCase, _Config) ->
emqx_common_test_helpers:call_janitor(),
ok.
%%------------------------------------------------------------------------------
%% HTTP server for testing
%% (Orginally copied from emqx_bridge_api_SUITE)
@ -158,7 +166,8 @@ bridge_async_config(#{port := Port} = Config) ->
QueryMode = maps:get(query_mode, Config, "async"),
ConnectTimeout = maps:get(connect_timeout, Config, 1),
RequestTimeout = maps:get(request_timeout, Config, 10000),
ResourceRequestTimeout = maps:get(resouce_request_timeout, Config, "infinity"),
ResumeInterval = maps:get(resume_interval, Config, "1s"),
ResourceRequestTimeout = maps:get(resource_request_timeout, Config, "infinity"),
ConfigString = io_lib:format(
"bridges.~s.~s {\n"
" url = \"http://localhost:~p\"\n"
@ -177,7 +186,8 @@ bridge_async_config(#{port := Port} = Config) ->
" health_check_interval = \"15s\"\n"
" max_buffer_bytes = \"1GB\"\n"
" query_mode = \"~s\"\n"
" request_timeout = \"~s\"\n"
" request_timeout = \"~p\"\n"
" resume_interval = \"~s\"\n"
" start_after_created = \"true\"\n"
" start_timeout = \"5s\"\n"
" worker_pool_size = \"1\"\n"
@ -194,7 +204,8 @@ bridge_async_config(#{port := Port} = Config) ->
PoolSize,
RequestTimeout,
QueryMode,
ResourceRequestTimeout
ResourceRequestTimeout,
ResumeInterval
]
),
ct:pal(ConfigString),
@ -236,7 +247,7 @@ t_send_async_connection_timeout(_Config) ->
query_mode => "async",
connect_timeout => ResponseDelayMS * 2,
request_timeout => 10000,
resouce_request_timeout => "infinity"
resource_request_timeout => "infinity"
}),
NumberOfMessagesToSend = 10,
[
@ -250,6 +261,97 @@ t_send_async_connection_timeout(_Config) ->
stop_http_server(Server),
ok.
t_async_free_retries(_Config) ->
#{port := Port} = start_http_server(#{response_delay_ms => 0}),
BridgeID = make_bridge(#{
port => Port,
pool_size => 1,
query_mode => "sync",
connect_timeout => 1_000,
request_timeout => 10_000,
resource_request_timeout => "10000s"
}),
%% Fail 5 times then succeed.
Context = #{error_attempts => 5},
ExpectedAttempts = 6,
Fn = fun(Get, Error) ->
?assertMatch(
{ok, 200, _, _},
emqx_bridge:send_message(BridgeID, #{<<"hello">> => <<"world">>}),
#{error => Error}
),
?assertEqual(ExpectedAttempts, Get(), #{error => Error})
end,
do_t_async_retries(Context, {error, normal}, Fn),
do_t_async_retries(Context, {error, {shutdown, normal}}, Fn),
ok.
t_async_common_retries(_Config) ->
#{port := Port} = start_http_server(#{response_delay_ms => 0}),
BridgeID = make_bridge(#{
port => Port,
pool_size => 1,
query_mode => "sync",
resume_interval => "100ms",
connect_timeout => 1_000,
request_timeout => 10_000,
resource_request_timeout => "10000s"
}),
%% Keeps failing until connector gives up.
Context = #{error_attempts => infinity},
ExpectedAttempts = 3,
FnSucceed = fun(Get, Error) ->
?assertMatch(
{ok, 200, _, _},
emqx_bridge:send_message(BridgeID, #{<<"hello">> => <<"world">>}),
#{error => Error, attempts => Get()}
),
?assertEqual(ExpectedAttempts, Get(), #{error => Error})
end,
FnFail = fun(Get, Error) ->
?assertMatch(
Error,
emqx_bridge:send_message(BridgeID, #{<<"hello">> => <<"world">>}),
#{error => Error, attempts => Get()}
),
?assertEqual(ExpectedAttempts, Get(), #{error => Error})
end,
%% These two succeed because they're further retried by the buffer
%% worker synchronously, and we're not mock that call.
do_t_async_retries(Context, {error, {closed, "The connection was lost."}}, FnSucceed),
do_t_async_retries(Context, {error, {shutdown, closed}}, FnSucceed),
%% This fails because this error is treated as unrecoverable.
do_t_async_retries(Context, {error, something_else}, FnFail),
ok.
do_t_async_retries(TestContext, Error, Fn) ->
#{error_attempts := ErrorAttempts} = TestContext,
persistent_term:put({?MODULE, ?FUNCTION_NAME, attempts}, 0),
on_exit(fun() -> persistent_term:erase({?MODULE, ?FUNCTION_NAME, attempts}) end),
Get = fun() -> persistent_term:get({?MODULE, ?FUNCTION_NAME, attempts}) end,
GetAndBump = fun() ->
Attempts = persistent_term:get({?MODULE, ?FUNCTION_NAME, attempts}),
persistent_term:put({?MODULE, ?FUNCTION_NAME, attempts}, Attempts + 1),
Attempts + 1
end,
emqx_common_test_helpers:with_mock(
emqx_connector_http,
reply_delegator,
fun(Context, ReplyFunAndArgs, Result) ->
Attempts = GetAndBump(),
case Attempts > ErrorAttempts of
true ->
ct:pal("succeeding ~p : ~p", [Error, Attempts]),
meck:passthrough([Context, ReplyFunAndArgs, Result]);
false ->
ct:pal("failing ~p : ~p", [Error, Attempts]),
meck:passthrough([Context, ReplyFunAndArgs, Error])
end
end,
fun() -> Fn(Get, Error) end
),
ok.
receive_request_notifications(MessageIDs, _ResponseDelay) when map_size(MessageIDs) =:= 0 ->
ok;
receive_request_notifications(MessageIDs, ResponseDelay) ->

View File

@ -11,7 +11,6 @@ The application is used to connect EMQX and Cassandra. User can create a rule
and easily ingest IoT data into Cassandra by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
<!---
# Documentation
@ -20,7 +19,6 @@ and easily ingest IoT data into Cassandra by leveraging
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
--->
# HTTP APIs

View File

@ -23,7 +23,7 @@ User can create a rule and easily ingest IoT data into ClickHouse by leveraging
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
- Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
for more detailed information.

View File

@ -0,0 +1 @@
clickhouse

View File

@ -0,0 +1,11 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {clickhouse, {git, "https://github.com/emqx/clickhouse-client-erl", {tag, "0.3"}}}
, {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{shell, [
{apps, [emqx_bridge_clickhouse]}
]}.

View File

@ -1,8 +1,8 @@
{application, emqx_bridge_clickhouse, [
{description, "EMQX Enterprise ClickHouse Bridge"},
{vsn, "0.1.0"},
{vsn, "0.2.0"},
{registered, []},
{applications, [kernel, stdlib]},
{applications, [kernel, stdlib, clickhouse, emqx_resource]},
{env, []},
{modules, []},
{links, []}

View File

@ -1,9 +1,8 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_bridge_clickhouse).
-module(emqx_bridge_clickhouse).
-include_lib("emqx_bridge/include/emqx_bridge.hrl").
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
@ -101,7 +100,7 @@ fields("config") ->
}
)}
] ++
emqx_ee_connector_clickhouse:fields(config);
emqx_bridge_clickhouse_connector:fields(config);
fields("creation_opts") ->
emqx_resource_schema:fields("creation_opts");
fields("post") ->

View File

@ -2,7 +2,7 @@
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_connector_clickhouse).
-module(emqx_bridge_clickhouse_connector).
-include_lib("emqx_connector/include/emqx_connector.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").

View File

@ -2,17 +2,17 @@
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_bridge_clickhouse_SUITE).
-module(emqx_bridge_clickhouse_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-define(APP, emqx_bridge_clickhouse).
-define(CLICKHOUSE_HOST, "clickhouse").
-define(CLICKHOUSE_RESOURCE_MOD, emqx_ee_connector_clickhouse).
-include_lib("emqx_connector/include/emqx_connector.hrl").
%% See comment in
%% lib-ee/emqx_ee_connector/test/ee_connector_clickhouse_SUITE.erl for how to
%% lib-ee/emqx_ee_connector/test/ee_bridge_clickhouse_connector_SUITE.erl for how to
%% run this without bringing up the whole CI infrastucture
%%------------------------------------------------------------------------------
@ -26,10 +26,7 @@ init_per_suite(Config) ->
true ->
emqx_common_test_helpers:render_and_load_app_config(emqx_conf),
ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]),
ok = emqx_connector_test_helpers:start_apps([emqx_resource]),
{ok, _} = application:ensure_all_started(emqx_connector),
{ok, _} = application:ensure_all_started(emqx_ee_connector),
{ok, _} = application:ensure_all_started(emqx_ee_bridge),
ok = emqx_connector_test_helpers:start_apps([emqx_resource, ?APP]),
snabbkaffe:fix_ct_logging(),
%% Create the db table
Conn = start_clickhouse_connection(),
@ -76,11 +73,8 @@ start_clickhouse_connection() ->
end_per_suite(Config) ->
ClickhouseConnection = proplists:get_value(clickhouse_connection, Config),
clickhouse:stop(ClickhouseConnection),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps([emqx_resource]),
_ = application:stop(emqx_connector),
_ = application:stop(emqx_ee_connector),
_ = application:stop(emqx_bridge).
ok = emqx_connector_test_helpers:stop_apps([?APP, emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]).
init_per_testcase(_, Config) ->
reset_table(Config),

View File

@ -2,18 +2,18 @@
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_connector_clickhouse_SUITE).
-module(emqx_bridge_clickhouse_connector_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include("emqx_connector.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("emqx/include/emqx.hrl").
-include_lib("stdlib/include/assert.hrl").
-define(APP, emqx_bridge_clickhouse).
-define(CLICKHOUSE_HOST, "clickhouse").
-define(CLICKHOUSE_RESOURCE_MOD, emqx_ee_connector_clickhouse).
-define(CLICKHOUSE_RESOURCE_MOD, emqx_bridge_clickhouse_connector).
%% This test SUITE requires a running clickhouse instance. If you don't want to
%% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script
@ -21,7 +21,15 @@
%% from root of the EMQX directory.). You also need to set ?CLICKHOUSE_HOST and
%% ?CLICKHOUSE_PORT to appropriate values.
%%
%% docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 -v "`pwd`/.ci/docker-compose-file/clickhouse/users.xml:/etc/clickhouse-server/users.xml" -v "`pwd`/.ci/docker-compose-file/clickhouse/config.xml:/etc/clickhouse-server/config.xml" clickhouse/clickhouse-server
%% docker run \
%% -d \
%% -p 18123:8123 \
%% -p 19000:9000 \
%% --name some-clickhouse-server \
%% --ulimit nofile=262144:262144 \
%% -v "`pwd`/.ci/docker-compose-file/clickhouse/users.xml:/etc/clickhouse-server/users.xml" \
%% -v "`pwd`/.ci/docker-compose-file/clickhouse/config.xml:/etc/clickhouse-server/config.xml" \
%% clickhouse/clickhouse-server
all() ->
emqx_common_test_helpers:all(?MODULE).
@ -43,9 +51,7 @@ init_per_suite(Config) ->
of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps([emqx_resource]),
{ok, _} = application:ensure_all_started(emqx_connector),
{ok, _} = application:ensure_all_started(emqx_ee_connector),
ok = emqx_connector_test_helpers:start_apps([emqx_resource, ?APP]),
%% Create the db table
{ok, Conn} =
clickhouse:start_link([
@ -68,8 +74,7 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps([emqx_resource]),
_ = application:stop(emqx_connector).
ok = emqx_connector_test_helpers:stop_apps([?APP, emqx_resource]).
init_per_testcase(_, Config) ->
Config.
@ -119,7 +124,6 @@ perform_lifecycle_check(ResourceID, InitialConfig) ->
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceID)),
% % Perform query as further check that the resource is working as expected
(fun() ->
erlang:display({pool_name, ResourceID}),
QueryNoParamsResWrapper = emqx_resource:query(ResourceID, test_query_no_params()),
?assertMatch({ok, _}, QueryNoParamsResWrapper),
{_, QueryNoParamsRes} = QueryNoParamsResWrapper,

View File

@ -1,6 +1,6 @@
# EMQX DynamoDB Bridge
[Dynamodb](https://aws.amazon.com/dynamodb/) is a high-performance NoSQL database
[DynamoDB](https://aws.amazon.com/dynamodb/) is a high-performance NoSQL database
service provided by Amazon that's designed for scalability and low-latency access
to structured data.

View File

@ -0,0 +1,2 @@
toxiproxy
dynamo

Some files were not shown because too many files have changed in this diff Show More