Merge branch 'master' into file-transfer

* master: (194 commits)
  fix(limiter): update change && fix deprecated version
  chore: update changes
  perf(limiter): simplify the memory represent of limiter configuration
  ci(perf test): update tf variable name and set job timeout
  ci: fix artifact name in scheduled packages workflow
  fix: build_packages_cron.yaml workflow
  ci: move scheduled builds to a separate workflow
  build: check mnesia compatibility when generating mria config
  docs: fix a typo in api doc description
  feat(./dev): use command style and added 'ctl' command
  test: fix delayed-pubish test case flakyness
  refactor: remove raw_with_default config load option
  chore: add changelog for trace timestrap
  feat: increase the time precision of trace logs to microseconds
  chore: make sure topic_metrics/rewrite's default is []
  docs: Update changes/ce/perf-10417.en.md
  chore: bump `snabbkaffe` to 1.0.8
  ci: run static checks in separate jobs
  chore(schema): mark deprecated quic listener fields ?IMPORTANCE_HIDDEN
  chore: remove unused mqtt cap 'subscription_identifiers'
  ...
This commit is contained in:
Ilya Averyanov 2023-05-05 16:50:18 +03:00
commit dd3471bc22
251 changed files with 8989 additions and 1777 deletions

View File

@ -8,6 +8,7 @@ TDENGINE_TAG=3.0.2.4
DYNAMO_TAG=1.21.0
CASSANDRA_TAG=3.11.6
MINIO_TAG=RELEASE.2023-03-20T20-16-18Z
OPENTS_TAG=9aa7f88
MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server
SQLSERVER_TAG=2019-CU19-ubuntu-20.04

View File

@ -0,0 +1,9 @@
version: '3.9'
services:
opents_server:
container_name: opents
image: petergrace/opentsdb-docker:${OPENTS_TAG}
restart: always
networks:
- emqx_bridge

View File

@ -0,0 +1,11 @@
version: '3.9'
services:
oracle_server:
container_name: oracle
image: oracleinanutshell/oracle-xe-11g:1.0.0
restart: always
environment:
ORACLE_DISABLE_ASYNCH_IO: true
networks:
- emqx_bridge

View File

@ -0,0 +1,32 @@
version: '3'
services:
pulsar:
container_name: pulsar
image: apachepulsar/pulsar:2.11.0
# ports:
# - 6650:6650
# - 8080:8080
networks:
emqx_bridge:
volumes:
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem
restart: always
command:
- bash
- "-c"
- |
sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf
sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf
sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf
sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf
sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf
sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf
sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf
sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf
sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf
sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf
echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf
bin/pulsar standalone -nfw -nss

View File

@ -43,6 +43,8 @@ services:
- 19000:19000
# S3 TLS
- 19100:19100
# IOTDB
- 14242:4242
command:
- "-host=0.0.0.0"
- "-config=/config/toxiproxy.json"

View File

@ -20,8 +20,8 @@ esac
{
echo "HOCON_ENV_OVERRIDE_PREFIX=EMQX_"
echo "EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s"
echo "EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10"
echo "EMQX_MQTT__RETRY_INTERVAL=2s"
echo "EMQX_MQTT__MAX_TOPIC_ALIAS=10"
echo "EMQX_AUTHORIZATION__SOURCES=[]"
echo "EMQX_AUTHORIZATION__NO_MATCH=allow"
} >> .ci/docker-compose-file/conf.cluster.env

View File

@ -102,6 +102,30 @@
"upstream": "sqlserver:1433",
"enabled": true
},
{
"name": "opents",
"listen": "0.0.0.0:4242",
"upstream": "opents:4242",
"enabled": true
},
{
"name": "pulsar_plain",
"listen": "0.0.0.0:6652",
"upstream": "pulsar:6652",
"enabled": true
},
{
"name": "pulsar_tls",
"listen": "0.0.0.0:6653",
"upstream": "pulsar:6653",
"enabled": true
},
{
"name": "oracle",
"listen": "0.0.0.0:1521",
"upstream": "oracle:1521",
"enabled": true
},
{
"name": "minio_tcp",
"listen": "0.0.0.0:19000",

View File

@ -5,8 +5,6 @@ concurrency:
cancel-in-progress: true
on:
schedule:
- cron: '0 */6 * * *'
push:
branches:
- 'ci/**'
@ -23,7 +21,6 @@ on:
jobs:
prepare:
runs-on: ubuntu-22.04
if: (github.repository_owner == 'emqx' && github.event_name == 'schedule') || github.event_name != 'schedule'
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04
outputs:
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
@ -134,14 +131,6 @@ jobs:
with:
name: ${{ matrix.profile }}
path: source/_packages/${{ matrix.profile }}/
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure() && github.event_name == 'schedule'
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "Scheduled run of ${{ github.workflow }}@Windows failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
mac:
needs: prepare
@ -182,14 +171,6 @@ jobs:
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure() && github.event_name == 'schedule'
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "Scheduled run of ${{ github.workflow }}@${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
linux:
needs: prepare
@ -304,19 +285,11 @@ jobs:
with:
name: ${{ matrix.profile }}
path: source/_packages/${{ matrix.profile }}/
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure() && github.event_name == 'schedule'
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "Scheduled run of ${{ github.workflow }}@${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
publish_artifacts:
runs-on: ubuntu-22.04
needs: [prepare, mac, linux]
if: needs.prepare.outputs.IS_EXACT_TAG && github.event_name != 'schedule'
if: needs.prepare.outputs.IS_EXACT_TAG
strategy:
fail-fast: false
matrix:

View File

@ -0,0 +1,153 @@
name: Scheduled build packages
concurrency:
group: build-${{ github.event_name }}-${{ github.ref }}
cancel-in-progress: true
on:
schedule:
- cron: '0 */6 * * *'
jobs:
prepare:
runs-on: aws-amd64
if: github.repository_owner == 'emqx'
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04
strategy:
fail-fast: false
matrix:
profile:
- ['emqx', 'master']
- ['emqx-enterprise', 'release-50']
steps:
- uses: actions/checkout@v3
with:
ref: ${{ matrix.profile[1] }}
path: source
fetch-depth: 0
- name: get_all_deps
run: |
make -C source deps-all
zip -ryq source.zip source/* source/.[^.]*
- uses: actions/upload-artifact@v3
with:
name: source-${{ matrix.profile[0] }}
path: source.zip
linux:
needs: prepare
runs-on: aws-${{ matrix.arch }}
strategy:
fail-fast: false
matrix:
profile:
- emqx
- emqx-enterprise
otp:
- 24.3.4.2-3
arch:
- amd64
os:
- debian10
- amzn2
builder:
- 5.0-34
elixir:
- 1.13.4
defaults:
run:
shell: bash
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3
with:
name: source-${{ matrix.profile }}
path: .
- name: unzip source code
run: unzip -q source.zip
- name: build emqx packages
working-directory: source
env:
BUILDER: ${{ matrix.builder }}
ELIXIR: ${{ matrix.elixir }}
OTP: ${{ matrix.otp }}
PROFILE: ${{ matrix.profile[0] }}
ARCH: ${{ matrix.arch }}
OS: ${{ matrix.os }}
run: |
set -eu
PKGTYPES="tgz pkg"
IS_ELIXIR="no"
for PKGTYPE in ${PKGTYPES};
do
./scripts/buildx.sh \
--profile "${PROFILE}" \
--pkgtype "${PKGTYPE}" \
--arch "${ARCH}" \
--elixir "${IS_ELIXIR}" \
--builder "ghcr.io/emqx/emqx-builder/${BUILDER}:${ELIXIR}-${OTP}-${OS}
done
- uses: actions/upload-artifact@v3
if: success()
with:
name: ${{ matrix.profile }}
path: source/_packages/${{ matrix.profile }}/
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
mac:
needs: prepare
strategy:
fail-fast: false
matrix:
profile:
- emqx
otp:
- 24.3.4.2-3
os:
- macos-12
- macos-12-arm64
runs-on: ${{ matrix.os }}
steps:
- uses: emqx/self-hosted-cleanup-action@v1.0.3
- uses: actions/download-artifact@v3
with:
name: source-${{ matrix.profile }}
path: .
- name: unzip source code
run: |
ln -s . source
unzip -o -q source.zip
rm source source.zip
- uses: ./.github/actions/package-macos
with:
profile: ${{ matrix.profile }}
otp: ${{ matrix.otp }}
os: ${{ matrix.os }}
apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }}
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@v3
if: success()
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}

View File

@ -194,15 +194,12 @@ jobs:
run: |
CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG)
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID)
export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='yes'
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
docker stop $CID
- name: test two nodes cluster with proto_dist=inet_tls in docker
run: |
./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy)
# versions before 5.0.22 have hidden fields included in the API spec
export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='no'
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
# cleanup
./scripts/test/start-two-nodes-in-docker.sh -c

127
.github/workflows/performance_test.yaml vendored Normal file
View File

@ -0,0 +1,127 @@
name: Performance Test Suite
on:
push:
branches:
- 'perf/**'
schedule:
- cron: '0 1 * * *'
workflow_dispatch:
inputs:
ref:
required: false
jobs:
prepare:
runs-on: ubuntu-latest
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu20.04
outputs:
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.event.inputs.ref }}
- name: Work around https://github.com/actions/checkout/issues/766
run: |
git config --global --add safe.directory "$GITHUB_WORKSPACE"
- id: prepare
run: |
echo "EMQX_NAME=emqx" >> $GITHUB_ENV
echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
echo "BENCH_ID=$(date --utc +%F)/emqx-$(./pkg-vsn.sh emqx)" >> $GITHUB_OUTPUT
- name: Build deb package
run: |
make ${EMQX_NAME}-pkg
./scripts/pkg-tests.sh ${EMQX_NAME}-pkg
- name: Get package file name
id: package_file
run: |
echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT
- uses: actions/upload-artifact@v3
with:
name: emqx-ubuntu20.04
path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }}
tf_emqx_perf_test:
runs-on: ubuntu-latest
needs:
- prepare
env:
TF_VAR_bench_id: ${{ needs.prepare.outputs.BENCH_ID }}
TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }}
TF_VAR_test_duration: 300
TF_VAR_grafana_api_key: ${{ secrets.TF_EMQX_PERF_TEST_GRAFANA_API_KEY }}
TF_AWS_REGION: eu-north-1
steps:
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-north-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@v3
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
- uses: actions/download-artifact@v3
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
- name: Setup Terraform
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- name: terraform init
working-directory: ./tf-emqx-performance-test
run: |
terraform init
- name: terraform apply
working-directory: ./tf-emqx-performance-test
run: |
terraform apply -auto-approve
- name: Wait for test results
timeout-minutes: 30
working-directory: ./tf-emqx-performance-test
id: test-results
run: |
sleep $TF_VAR_test_duration
until aws s3api head-object --bucket tf-emqx-performance-test --key "$TF_VAR_bench_id/DONE" > /dev/null 2>&1
do
printf '.'
sleep 10
done
echo
aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/metrics.json" ./
aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/stats.json" ./
echo MESSAGES_DELIVERED=$(cat metrics.json | jq '[.[]."messages.delivered"] | add') >> $GITHUB_OUTPUT
echo MESSAGES_DROPPED=$(cat metrics.json | jq '[.[]."messages.dropped"] | add') >> $GITHUB_OUTPUT
- name: Send notification to Slack
if: success()
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "EMQX performance test completed.\nMessages delivered: ${{ steps.test-results.outputs.MESSAGES_DELIVERED }}.\nMessages dropped: ${{ steps.test-results.outputs.MESSAGES_DROPPED }}.\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
- name: terraform destroy
if: always()
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@v3
if: success()
with:
name: test-results
path: "./tf-emqx-performance-test/*.json"
- uses: actions/upload-artifact@v3
if: always()
with:
name: terraform
path: |
./tf-emqx-performance-test/.terraform
./tf-emqx-performance-test/*.tfstate

View File

@ -167,8 +167,8 @@ jobs:
--set image.pullPolicy=Never \
--set image.tag=$EMQX_TAG \
--set emqxAclConfig="" \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
deploy/charts/${{ matrix.profile }} \
@ -185,8 +185,8 @@ jobs:
--set image.pullPolicy=Never \
--set image.tag=$EMQX_TAG \
--set emqxAclConfig="" \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
deploy/charts/${{ matrix.profile }} \

View File

@ -14,6 +14,9 @@ on:
- e*
pull_request:
env:
IS_CI: "yes"
jobs:
build-matrix:
runs-on: ubuntu-22.04
@ -69,21 +72,14 @@ jobs:
- uses: actions/checkout@v3
with:
path: source
- uses: actions/cache@v3
id: cache
with:
path: "$HOME/.cache/rebar3/rebar3_${{ matrix.otp }}_plt"
key: rebar3-dialyzer-plt-${{ matrix.otp }}
- name: get_all_deps
working-directory: source
env:
PROFILE: ${{ matrix.profile }}
#DIAGNOSTIC: 1
run: |
make ensure-rebar3
# fetch all deps and compile
make ${{ matrix.profile }}
make static_checks
make ${{ matrix.profile }}-compile
make test-compile
cd ..
zip -ryq source.zip source/* source/.[^.]*
@ -92,6 +88,34 @@ jobs:
name: source-${{ matrix.profile }}-${{ matrix.otp }}
path: source.zip
static_checks:
needs:
- build-matrix
- prepare
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
strategy:
fail-fast: false
matrix:
include: ${{ fromJson(needs.build-matrix.outputs.prepare) }}
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3
with:
name: source-${{ matrix.profile }}-${{ matrix.otp }}
path: .
- name: unzip source code
run: unzip -o -q source.zip
- uses: actions/cache@v3
with:
path: "source/emqx_dialyzer_${{ matrix.otp }}_plt"
key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }}
- name: run static checks
env:
PROFILE: ${{ matrix.profile }}
working-directory: source
run: make static_checks
eunit_and_proper:
needs:
- build-matrix
@ -168,6 +192,7 @@ jobs:
REDIS_TAG: "7.0"
INFLUXDB_TAG: "2.5.0"
TDENGINE_TAG: "3.0.2.4"
OPENTS_TAG: "9aa7f88"
MINIO_TAG: "RELEASE.2023-03-20T20-16-18Z"
PROFILE: ${{ matrix.profile }}
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}

5
.gitignore vendored
View File

@ -43,8 +43,7 @@ tmp/
_packages
elvis
emqx_dialyzer_*_plt
*/emqx_dashboard/priv/www
*/emqx_dashboard/priv/i18n.conf
*/emqx_dashboard/priv/
dist.zip
scripts/git-token
apps/*/etc/*.all
@ -71,3 +70,5 @@ apps/emqx/test/emqx_static_checks_data/master.bpapi
lux_logs/
/.prepare
bom.json
ct_run*/
apps/emqx_conf/etc/emqx.conf.all.rendered*

View File

@ -1,7 +1,7 @@
Source code in this repository is variously licensed under below licenses.
For EMQX: Apache License 2.0, see APL.txt,
which applies to all source files except for lib-ee sub-directory.
For Default: Apache License 2.0, see APL.txt,
which applies to all source files except for folders applied with Business Source License.
For EMQX Enterprise (since version 5.0): Business Source License 1.1,
see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory.
see apps/emqx_bridge_kafka/BSL.txt as an example, please check license files under sub directory of apps.

View File

@ -7,7 +7,8 @@ export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.2.3
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.2
export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT)
@ -73,6 +74,10 @@ proper: $(REBAR)
test-compile: $(REBAR) merge-config
$(REBAR) as test compile
.PHONY: $(REL_PROFILES:%=%-compile)
$(REL_PROFILES:%=%-compile): $(REBAR) merge-config
$(REBAR) as $(@:%-compile=%) compile
.PHONY: ct
ct: $(REBAR) merge-config
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
@ -88,10 +93,9 @@ APPS=$(shell $(SCRIPTS)/find-apps.sh)
.PHONY: $(APPS:%=%-ct)
define gen-app-ct-target
$1-ct: $(REBAR)
$1-ct: $(REBAR) merge-config
$(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1))
ifneq ($(SUITES),)
@$(SCRIPTS)/pre-compile.sh $(PROFILE)
@ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
--readable=$(CT_READABLE) \
--name $(CT_NODE_NAME) \
@ -139,6 +143,11 @@ COMMON_DEPS := $(REBAR)
$(REL_PROFILES:%=%): $(COMMON_DEPS)
@$(BUILD) $(@) rel
.PHONY: compile $(PROFILES:%=compile-%)
compile: $(PROFILES:%=compile-%)
$(PROFILES:%=compile-%):
@$(BUILD) $(@:compile-%=%) apps
## Not calling rebar3 clean because
## 1. rebar3 clean relies on rebar3, meaning it reads config, fetches dependencies etc.
## 2. it's slow
@ -222,11 +231,11 @@ endef
$(foreach pt,$(PKG_PROFILES),$(eval $(call gen-pkg-target,$(pt))))
.PHONY: run
run: $(PROFILE) quickrun
run: compile-$(PROFILE) quickrun
.PHONY: quickrun
quickrun:
./_build/$(PROFILE)/rel/emqx/bin/emqx console
./dev -p $(PROFILE)
## Take the currently set PROFILE
docker:

View File

@ -1,43 +0,0 @@
listeners.tcp.default {
bind = "0.0.0.0:1883"
max_connections = 1024000
}
listeners.ssl.default {
bind = "0.0.0.0:8883"
max_connections = 512000
ssl_options {
keyfile = "{{ platform_etc_dir }}/certs/key.pem"
certfile = "{{ platform_etc_dir }}/certs/cert.pem"
cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
}
}
listeners.ws.default {
bind = "0.0.0.0:8083"
max_connections = 1024000
websocket.mqtt_path = "/mqtt"
}
listeners.wss.default {
bind = "0.0.0.0:8084"
max_connections = 512000
websocket.mqtt_path = "/mqtt"
ssl_options {
keyfile = "{{ platform_etc_dir }}/certs/key.pem"
certfile = "{{ platform_etc_dir }}/certs/cert.pem"
cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
}
}
# listeners.quic.default {
# enabled = true
# bind = "0.0.0.0:14567"
# max_connections = 1024000
# ssl_options {
# verify = verify_none
# keyfile = "{{ platform_etc_dir }}/certs/key.pem"
# certfile = "{{ platform_etc_dir }}/certs/cert.pem"
# cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
# }
# }

View File

@ -32,10 +32,10 @@
%% `apps/emqx/src/bpapi/README.md'
%% Community edition
-define(EMQX_RELEASE_CE, "5.0.23").
-define(EMQX_RELEASE_CE, "5.0.24").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.0.3-alpha.1").
-define(EMQX_RELEASE_EE, "5.0.3-alpha.5").
%% the HTTP API version
-define(EMQX_API_VERSION, "5.0").

View File

@ -0,0 +1,23 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-ifndef(EMQX_SCHEMA_HRL).
-define(EMQX_SCHEMA_HRL, true).
-define(TOMBSTONE_TYPE, marked_for_deletion).
-define(TOMBSTONE_VALUE, <<"marked_for_deletion">>).
-define(TOMBSTONE_CONFIG_CHANGE_REQ, mark_it_for_deletion).
-endif.

View File

@ -27,13 +27,13 @@
{gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.1"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.4"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.7"}}}
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.8"}}}
]}.
{plugins, [{rebar3_proper, "0.12.1"}, rebar3_path_deps]}.

View File

@ -3,7 +3,7 @@
{id, "emqx"},
{description, "EMQX Core"},
% strict semver, bump manually!
{vsn, "5.0.24"},
{vsn, "5.0.25"},
{modules, []},
{registered, []},
{applications, [

View File

@ -89,7 +89,7 @@
%% Authentication Data Cache
auth_cache :: maybe(map()),
%% Quota checkers
quota :: maybe(emqx_limiter_container:limiter()),
quota :: emqx_limiter_container:limiter(),
%% Timers
timers :: #{atom() => disabled | maybe(reference())},
%% Conn State
@ -768,7 +768,7 @@ do_finish_publish(PacketId, PubRes, RC, Channel) ->
NChannel = ensure_quota(PubRes, Channel),
handle_out(puback, {PacketId, RC}, NChannel).
ensure_quota(_, Channel = #channel{quota = undefined}) ->
ensure_quota(_, Channel = #channel{quota = infinity}) ->
Channel;
ensure_quota(PubRes, Channel = #channel{quota = Limiter}) ->
Cnt = lists:foldl(

View File

@ -22,7 +22,6 @@
-export([
init_load/1,
init_load/2,
init_load/3,
read_override_conf/1,
has_deprecated_file/0,
delete_override_conf_files/0,
@ -35,7 +34,6 @@
save_to_config_map/2,
save_to_override_conf/3
]).
-export([raw_conf_with_default/4]).
-export([merge_envs/2]).
-export([
@ -90,7 +88,7 @@
]).
-ifdef(TEST).
-export([erase_schema_mod_and_names/0]).
-export([erase_all/0]).
-endif.
-include("logger.hrl").
@ -103,6 +101,8 @@
-define(ZONE_CONF_PATH(ZONE, PATH), [zones, ZONE | PATH]).
-define(LISTENER_CONF_PATH(TYPE, LISTENER, PATH), [listeners, TYPE, LISTENER | PATH]).
-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound').
-export_type([
update_request/0,
raw_config/0,
@ -164,9 +164,8 @@ get(KeyPath, Default) -> do_get(?CONF, KeyPath, Default).
-spec find(emqx_utils_maps:config_key_path()) ->
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
find([]) ->
Ref = make_ref(),
case do_get(?CONF, [], Ref) of
Ref -> {not_found, []};
case do_get(?CONF, [], ?CONFIG_NOT_FOUND_MAGIC) of
?CONFIG_NOT_FOUND_MAGIC -> {not_found, []};
Res -> {ok, Res}
end;
find(KeyPath) ->
@ -179,9 +178,8 @@ find(KeyPath) ->
-spec find_raw(emqx_utils_maps:config_key_path()) ->
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
find_raw([]) ->
Ref = make_ref(),
case do_get_raw([], Ref) of
Ref -> {not_found, []};
case do_get_raw([], ?CONFIG_NOT_FOUND_MAGIC) of
?CONFIG_NOT_FOUND_MAGIC -> {not_found, []};
Res -> {ok, Res}
end;
find_raw(KeyPath) ->
@ -315,45 +313,38 @@ put_raw(KeyPath, Config) ->
%%============================================================================
init_load(SchemaMod) ->
ConfFiles = application:get_env(emqx, config_files, []),
init_load(SchemaMod, ConfFiles, #{raw_with_default => true}).
init_load(SchemaMod, Opts) when is_map(Opts) ->
ConfFiles = application:get_env(emqx, config_files, []),
init_load(SchemaMod, ConfFiles, Opts);
init_load(SchemaMod, ConfFiles) ->
init_load(SchemaMod, ConfFiles, #{raw_with_default => false}).
init_load(SchemaMod, ConfFiles).
%% @doc Initial load of the given config files.
%% NOTE: The order of the files is significant, configs from files ordered
%% in the rear of the list overrides prior values.
-spec init_load(module(), [string()] | binary() | hocon:config()) -> ok.
init_load(SchemaMod, Conf, Opts) when is_list(Conf) orelse is_binary(Conf) ->
init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) ->
ok = save_schema_mod_and_names(SchemaMod),
HasDeprecatedFile = has_deprecated_file(),
RawConf = parse_hocon(HasDeprecatedFile, Conf),
init_load(HasDeprecatedFile, SchemaMod, RawConf, Opts).
RawConf0 = load_config_files(HasDeprecatedFile, Conf),
RawConf1 =
case HasDeprecatedFile of
true ->
overlay_v0(SchemaMod, RawConf0);
false ->
overlay_v1(SchemaMod, RawConf0)
end,
RawConf = fill_defaults_for_all_roots(SchemaMod, RawConf1),
%% check configs against the schema
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf, #{}),
save_to_app_env(AppEnvs),
ok = save_to_config_map(CheckedConf, RawConf).
init_load(true, SchemaMod, RawConf, Opts) when is_map(RawConf) ->
%% deprecated conf will be removed in 5.1
%% Merge environment variable overrides on top
%% Merge environment variable overrides on top, then merge with overrides.
overlay_v0(SchemaMod, RawConf) when is_map(RawConf) ->
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
Overrides = read_override_confs(),
RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides),
RootNames = get_root_names(),
RawConfAll = raw_conf_with_default(SchemaMod, RootNames, RawConfWithOverrides, Opts),
%% check configs against the schema
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}),
save_to_app_env(AppEnvs),
ok = save_to_config_map(CheckedConf, RawConfAll);
init_load(false, SchemaMod, RawConf, Opts) when is_map(RawConf) ->
ok = save_schema_mod_and_names(SchemaMod),
RootNames = get_root_names(),
%% Merge environment variable overrides on top
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
RawConfAll = raw_conf_with_default(SchemaMod, RootNames, RawConfWithEnvs, Opts),
%% check configs against the schema
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}),
save_to_app_env(AppEnvs),
ok = save_to_config_map(CheckedConf, RawConfAll).
hocon:deep_merge(RawConfWithEnvs, Overrides).
%% Merge environment variable overrides on top.
overlay_v1(SchemaMod, RawConf) when is_map(RawConf) ->
merge_envs(SchemaMod, RawConf).
%% @doc Read merged cluster + local overrides.
read_override_confs() ->
@ -362,47 +353,58 @@ read_override_confs() ->
hocon:deep_merge(ClusterOverrides, LocalOverrides).
%% keep the raw and non-raw conf has the same keys to make update raw conf easier.
raw_conf_with_default(SchemaMod, RootNames, RawConf, #{raw_with_default := true}) ->
Fun = fun(Name, Acc) ->
case maps:is_key(Name, RawConf) of
true ->
Acc;
false ->
case lists:keyfind(Name, 1, hocon_schema:roots(SchemaMod)) of
false ->
Acc;
{_, {_, Schema}} ->
Acc#{Name => schema_default(Schema)}
end
fill_defaults_for_all_roots(SchemaMod, RawConf0) ->
RootSchemas = hocon_schema:roots(SchemaMod),
%% the roots which are missing from the loaded configs
MissingRoots = lists:filtermap(
fun({BinName, Sc}) ->
case maps:is_key(BinName, RawConf0) orelse is_already_loaded(BinName) of
true -> false;
false -> {true, Sc}
end
end,
RawDefault = lists:foldl(Fun, #{}, RootNames),
maps:merge(RawConf, fill_defaults(SchemaMod, RawDefault, #{}));
raw_conf_with_default(_SchemaMod, _RootNames, RawConf, _Opts) ->
RawConf.
RootSchemas
),
RawConf = lists:foldl(
fun({RootName, Schema}, Acc) ->
Acc#{bin(RootName) => seed_default(Schema)}
end,
RawConf0,
MissingRoots
),
fill_defaults(RawConf).
schema_default(Schema) ->
case hocon_schema:field_schema(Schema, type) of
?ARRAY(_) ->
[];
_ ->
#{}
%% So far, this can only return true when testing.
%% e.g. when testing an app, we need to load its config first
%% then start emqx_conf application which will load the
%% possibly empty config again (then filled with defaults).
is_already_loaded(Name) ->
?MODULE:get_raw([Name], #{}) =/= #{}.
%% if a root is not found in the raw conf, fill it with default values.
seed_default(Schema) ->
case hocon_schema:field_schema(Schema, default) of
undefined ->
%% so far all roots without a default value are objects
#{};
Value ->
Value
end.
parse_hocon(HasDeprecatedFile, Conf) ->
load_config_files(HasDeprecatedFile, Conf) ->
IncDirs = include_dirs(),
case do_parse_hocon(HasDeprecatedFile, Conf, IncDirs) of
{ok, HoconMap} ->
HoconMap;
{error, Reason} ->
?SLOG(error, #{
msg => "failed_to_load_hocon_file",
msg => "failed_to_load_config_file",
reason => Reason,
pwd => file:get_cwd(),
include_dirs => IncDirs,
config_file => Conf
}),
error(failed_to_load_hocon_file)
error(failed_to_load_config_file)
end.
do_parse_hocon(true, Conf, IncDirs) ->
@ -547,7 +549,9 @@ save_schema_mod_and_names(SchemaMod) ->
}).
-ifdef(TEST).
erase_schema_mod_and_names() ->
erase_all() ->
Names = get_root_names(),
lists:foreach(fun erase/1, Names),
persistent_term:erase(?PERSIS_SCHEMA_MODS).
-endif.
@ -665,11 +669,9 @@ do_get_raw(Path, Default) ->
do_get(?RAW_CONF, Path, Default).
do_get(Type, KeyPath) ->
Ref = make_ref(),
Res = do_get(Type, KeyPath, Ref),
case Res =:= Ref of
true -> error({config_not_found, KeyPath});
false -> Res
case do_get(Type, KeyPath, ?CONFIG_NOT_FOUND_MAGIC) of
?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, KeyPath});
Res -> Res
end.
do_get(Type, [], Default) ->

View File

@ -18,6 +18,7 @@
-module(emqx_config_handler).
-include("logger.hrl").
-include("emqx_schema.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-behaviour(gen_server).
@ -447,11 +448,17 @@ merge_to_override_config(RawConf, Opts) ->
up_req({remove, _Opts}) -> '$remove';
up_req({{update, Req}, _Opts}) -> Req.
return_change_result(ConfKeyPath, {{update, _Req}, Opts}) ->
return_change_result(ConfKeyPath, {{update, Req}, Opts}) ->
case Req =/= ?TOMBSTONE_CONFIG_CHANGE_REQ of
true ->
#{
config => emqx_config:get(ConfKeyPath),
raw_config => return_rawconf(ConfKeyPath, Opts)
};
false ->
%% like remove, nothing to return
#{}
end;
return_change_result(_ConfKeyPath, {remove, _Opts}) ->
#{}.

View File

@ -111,7 +111,7 @@
listener :: {Type :: atom(), Name :: atom()},
%% Limiter
limiter :: maybe(limiter()),
limiter :: limiter(),
%% limiter buffer for overload use
limiter_buffer :: queue:queue(pending_req()),
@ -974,19 +974,22 @@ handle_cast(Req, State) ->
list(any()),
state()
) -> _.
check_limiter(
_Needs,
Data,
WhenOk,
Msgs,
#state{limiter = infinity} = State
) ->
WhenOk(Data, Msgs, State);
check_limiter(
Needs,
Data,
WhenOk,
Msgs,
#state{
limiter = Limiter,
limiter_timer = LimiterTimer,
limiter_buffer = Cache
} = State
) when Limiter =/= undefined ->
case LimiterTimer of
undefined ->
#state{limiter_timer = undefined, limiter = Limiter} = State
) ->
case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
@ -1014,15 +1017,18 @@ check_limiter(
{drop, Limiter2} ->
{ok, State#state{limiter = Limiter2}}
end;
_ ->
check_limiter(
Needs,
Data,
WhenOk,
_Msgs,
#state{limiter_buffer = Cache} = State
) ->
%% if there has a retry timer,
%% cache the operation and execute it after the retry is over
%% the maximum length of the cache queue is equal to the active_n
New = #pending_req{need = Needs, data = Data, next = WhenOk},
{ok, State#state{limiter_buffer = queue:in(New, Cache)}}
end;
check_limiter(_, Data, WhenOk, Msgs, State) ->
WhenOk(Data, Msgs, State).
{ok, State#state{limiter_buffer = queue:in(New, Cache)}}.
%% try to perform a retry
-spec retry_limiter(state()) -> _.

View File

@ -22,7 +22,7 @@
%% API
-export([
make_token_bucket_limiter/2,
make_local_limiter/2,
make_ref_limiter/2,
check/2,
consume/2,
@ -32,12 +32,11 @@
make_future/1,
available/1
]).
-export_type([token_bucket_limiter/0]).
-export_type([local_limiter/0]).
%% a token bucket limiter with a limiter server's bucket reference
%% the number of tokens currently available
-type token_bucket_limiter() :: #{
%% a token bucket limiter which may or not contains a reference to another limiter,
%% and can be used in a client alone
-type local_limiter() :: #{
tokens := non_neg_integer(),
rate := decimal(),
capacity := decimal(),
@ -58,12 +57,12 @@
retry_ctx =>
undefined
%% the retry context
| retry_context(token_bucket_limiter()),
| retry_context(local_limiter()),
%% allow to add other keys
atom => any()
}.
%% a limiter server's bucket reference
%% a limiter instance which only contains a reference to another limiter(bucket)
-type ref_limiter() :: #{
max_retry_time := non_neg_integer(),
failure_strategy := failure_strategy(),
@ -88,7 +87,7 @@
}.
-type bucket() :: emqx_limiter_bucket_ref:bucket_ref().
-type limiter() :: token_bucket_limiter() | ref_limiter() | infinity.
-type limiter() :: local_limiter() | ref_limiter() | infinity.
-type millisecond() :: non_neg_integer().
-type pause_type() :: pause | partial.
@ -116,7 +115,7 @@
rate := decimal(),
initial := non_neg_integer(),
low_watermark := non_neg_integer(),
capacity := decimal(),
burst := decimal(),
divisible := boolean(),
max_retry_time := non_neg_integer(),
failure_strategy := failure_strategy()
@ -134,8 +133,8 @@
%% API
%%--------------------------------------------------------------------
%%@doc create a limiter
-spec make_token_bucket_limiter(limiter_bucket_cfg(), bucket()) -> _.
make_token_bucket_limiter(Cfg, Bucket) ->
-spec make_local_limiter(limiter_bucket_cfg(), bucket()) -> _.
make_local_limiter(Cfg, Bucket) ->
Cfg#{
tokens => emqx_limiter_server:get_initial_val(Cfg),
lasttime => ?NOW,
@ -312,8 +311,8 @@ on_failure(throw, Limiter) ->
Message = io_lib:format("limiter consume failed, limiter:~p~n", [Limiter]),
erlang:throw({rate_check_fail, Message}).
-spec do_check_with_parent_limiter(pos_integer(), token_bucket_limiter()) ->
inner_check_result(token_bucket_limiter()).
-spec do_check_with_parent_limiter(pos_integer(), local_limiter()) ->
inner_check_result(local_limiter()).
do_check_with_parent_limiter(
Need,
#{
@ -336,7 +335,7 @@ do_check_with_parent_limiter(
)
end.
-spec do_reset(pos_integer(), token_bucket_limiter()) -> inner_check_result(token_bucket_limiter()).
-spec do_reset(pos_integer(), local_limiter()) -> inner_check_result(local_limiter()).
do_reset(
Need,
#{

View File

@ -34,7 +34,9 @@
-export_type([container/0, check_result/0]).
-type container() :: #{
-type container() ::
infinity
| #{
limiter_type() => undefined | limiter(),
%% the retry context of the limiter
retry_key() =>
@ -43,7 +45,7 @@
| future(),
%% the retry context of the container
retry_ctx := undefined | any()
}.
}.
-type future() :: pos_integer().
-type limiter_id() :: emqx_limiter_schema:limiter_id().
@ -78,7 +80,20 @@ get_limiter_by_types(Id, Types, BucketCfgs) ->
{ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs),
add_new(Type, Limiter, Acc)
end,
lists:foldl(Init, #{retry_ctx => undefined}, Types).
Container = lists:foldl(Init, #{retry_ctx => undefined}, Types),
case
lists:all(
fun(Type) ->
maps:get(Type, Container) =:= infinity
end,
Types
)
of
true ->
infinity;
_ ->
Container
end.
-spec add_new(limiter_type(), limiter(), container()) -> container().
add_new(Type, Limiter, Container) ->
@ -89,11 +104,15 @@ add_new(Type, Limiter, Container) ->
%% @doc check the specified limiter
-spec check(pos_integer(), limiter_type(), container()) -> check_result().
check(_Need, _Type, infinity) ->
{ok, infinity};
check(Need, Type, Container) ->
check_list([{Need, Type}], Container).
%% @doc check multiple limiters
-spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result().
check_list(_Need, infinity) ->
{ok, infinity};
check_list([{Need, Type} | T], Container) ->
Limiter = maps:get(Type, Container),
case emqx_htb_limiter:check(Need, Limiter) of
@ -121,11 +140,15 @@ check_list([], Container) ->
%% @doc retry the specified limiter
-spec retry(limiter_type(), container()) -> check_result().
retry(_Type, infinity) ->
{ok, infinity};
retry(Type, Container) ->
retry_list([Type], Container).
%% @doc retry multiple limiters
-spec retry_list(list(limiter_type()), container()) -> check_result().
retry_list(_Types, infinity) ->
{ok, infinity};
retry_list([Type | T], Container) ->
Key = ?RETRY_KEY(Type),
case Container of

View File

@ -30,6 +30,12 @@
post_config_update/5
]).
-export([
find_root/1,
insert_root/2,
delete_root/1
]).
-export([
start_server/1,
start_server/2,
@ -62,6 +68,7 @@
-define(UID(Id, Type), {Id, Type}).
-define(TAB, emqx_limiter_counters).
-define(ROOT_ID, root).
%%--------------------------------------------------------------------
%% API
@ -104,9 +111,25 @@ insert_bucket(Id, Type, Bucket) ->
).
-spec delete_bucket(limiter_id(), limiter_type()) -> true.
delete_bucket(Type, Id) ->
delete_bucket(Id, Type) ->
ets:delete(?TAB, ?UID(Id, Type)).
-spec find_root(limiter_type()) ->
{ok, bucket_ref()} | undefined.
find_root(Type) ->
find_bucket(?ROOT_ID, Type).
-spec insert_root(
limiter_type(),
bucket_ref()
) -> boolean().
insert_root(Type, Bucket) ->
insert_bucket(?ROOT_ID, Type, Bucket).
-spec delete_root(limiter_type()) -> true.
delete_root(Type) ->
delete_bucket(?ROOT_ID, Type).
post_config_update([limiter], _Config, NewConf, _OldConf, _AppEnvs) ->
Types = lists:delete(client, maps:keys(NewConf)),
_ = [on_post_config_update(Type, NewConf) || Type <- Types],

View File

@ -32,15 +32,17 @@
get_bucket_cfg_path/2,
desc/1,
types/0,
calc_capacity/1
calc_capacity/1,
extract_with_type/2,
default_client_config/0
]).
-define(KILOBYTE, 1024).
-define(BUCKET_KEYS, [
{bytes, bucket_infinity},
{messages, bucket_infinity},
{connection, bucket_limit},
{message_routing, bucket_infinity}
-define(LISTENER_BUCKET_KEYS, [
bytes,
messages,
connection,
message_routing
]).
-type limiter_type() ::
@ -94,30 +96,33 @@
namespace() -> limiter.
roots() ->
[{limiter, hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{importance => ?IMPORTANCE_HIDDEN})}].
[
{limiter,
hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{
importance => ?IMPORTANCE_HIDDEN
})}
].
fields(limiter) ->
[
{Type,
?HOCON(?R_REF(node_opts), #{
desc => ?DESC(Type),
default => #{},
importance => ?IMPORTANCE_HIDDEN,
aliases => alias_of_type(Type)
})}
|| Type <- types()
] ++
[
%% This is an undocumented feature, and it won't be support anymore
{client,
?HOCON(
?R_REF(client_fields),
#{
desc => ?DESC(client),
importance => ?IMPORTANCE_HIDDEN,
default => maps:from_list([
{erlang:atom_to_binary(Type), #{}}
|| Type <- types()
])
required => {false, recursively},
deprecated => {since, "5.0.25"}
}
)}
];
@ -131,11 +136,9 @@ fields(node_opts) ->
})}
];
fields(client_fields) ->
client_fields(types(), #{default => #{}});
fields(bucket_infinity) ->
client_fields(types());
fields(bucket_opts) ->
fields_of_bucket(<<"infinity">>);
fields(bucket_limit) ->
fields_of_bucket(<<"1000/s">>);
fields(client_opts) ->
[
{rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})},
@ -194,10 +197,9 @@ fields(client_opts) ->
)}
];
fields(listener_fields) ->
composite_bucket_fields(?BUCKET_KEYS, listener_client_fields);
composite_bucket_fields(?LISTENER_BUCKET_KEYS, listener_client_fields);
fields(listener_client_fields) ->
{Types, _} = lists:unzip(?BUCKET_KEYS),
client_fields(Types, #{required => false});
client_fields(?LISTENER_BUCKET_KEYS);
fields(Type) ->
simple_bucket_field(Type).
@ -205,10 +207,8 @@ desc(limiter) ->
"Settings for the rate limiter.";
desc(node_opts) ->
"Settings for the limiter of the node level.";
desc(bucket_infinity) ->
desc(bucket_opts) ->
"Settings for the bucket.";
desc(bucket_limit) ->
desc(bucket_infinity);
desc(client_opts) ->
"Settings for the client in bucket level.";
desc(client_fields) ->
@ -241,6 +241,31 @@ calc_capacity(#{rate := infinity}) ->
calc_capacity(#{rate := Rate, burst := Burst}) ->
erlang:floor(1000 * Rate / default_period()) + Burst.
extract_with_type(_Type, undefined) ->
undefined;
extract_with_type(Type, #{client := ClientCfg} = BucketCfg) ->
BucketVal = maps:find(Type, BucketCfg),
ClientVal = maps:find(Type, ClientCfg),
merge_client_bucket(Type, ClientVal, BucketVal);
extract_with_type(Type, BucketCfg) ->
BucketVal = maps:find(Type, BucketCfg),
merge_client_bucket(Type, undefined, BucketVal).
%% Since the client configuration can be absent and be a undefined value,
%% but we must need some basic settings to control the behaviour of the limiter,
%% so here add this helper function to generate a default setting.
%% This is a temporary workaround until we found a better way to simplify.
default_client_config() ->
#{
rate => infinity,
initial => 0,
low_watermark => 0,
burst => 0,
divisible => false,
max_retry_time => timer:seconds(10),
failure_strategy => force
}.
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
@ -360,14 +385,14 @@ apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
%% A bucket with only one type
simple_bucket_field(Type) when is_atom(Type) ->
fields(bucket_infinity) ++
fields(bucket_opts) ++
[
{client,
?HOCON(
?R_REF(?MODULE, client_opts),
#{
desc => ?DESC(client),
required => false,
required => {false, recursively},
importance => importance_of_type(Type),
aliases => alias_of_type(Type)
}
@ -378,13 +403,13 @@ simple_bucket_field(Type) when is_atom(Type) ->
composite_bucket_fields(Types, ClientRef) ->
[
{Type,
?HOCON(?R_REF(?MODULE, Opts), #{
?HOCON(?R_REF(?MODULE, bucket_opts), #{
desc => ?DESC(?MODULE, Type),
required => false,
required => {false, recursively},
importance => importance_of_type(Type),
aliases => alias_of_type(Type)
})}
|| {Type, Opts} <- Types
|| Type <- Types
] ++
[
{client,
@ -392,7 +417,7 @@ composite_bucket_fields(Types, ClientRef) ->
?R_REF(?MODULE, ClientRef),
#{
desc => ?DESC(client),
required => false
required => {false, recursively}
}
)}
].
@ -415,11 +440,12 @@ fields_of_bucket(Default) ->
})}
].
client_fields(Types, Meta) ->
client_fields(Types) ->
[
{Type,
?HOCON(?R_REF(client_opts), Meta#{
?HOCON(?R_REF(client_opts), #{
desc => ?DESC(Type),
required => false,
importance => importance_of_type(Type),
aliases => alias_of_type(Type)
})}
@ -441,3 +467,12 @@ alias_of_type(bytes) ->
[bytes_in];
alias_of_type(_) ->
[].
merge_client_bucket(Type, {ok, ClientVal}, {ok, BucketVal}) ->
#{Type => BucketVal, client => #{Type => ClientVal}};
merge_client_bucket(Type, {ok, ClientVal}, _) ->
#{client => #{Type => ClientVal}};
merge_client_bucket(Type, _, {ok, BucketVal}) ->
#{Type => BucketVal};
merge_client_bucket(_, _, _) ->
undefined.

View File

@ -59,7 +59,8 @@
burst := rate(),
%% token generation interval(second)
period := pos_integer(),
produced := float()
produced := float(),
correction := emqx_limiter_decimal:zero_or_float()
}.
-type bucket() :: #{
@ -98,6 +99,7 @@
%% minimum coefficient for overloaded limiter
-define(OVERLOAD_MIN_ALLOC, 0.3).
-define(COUNTER_SIZE, 8).
-define(ROOT_COUNTER_IDX, 1).
-export_type([index/0]).
-import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]).
@ -110,47 +112,24 @@
-spec connect(
limiter_id(),
limiter_type(),
bucket_name() | #{limiter_type() => bucket_name() | undefined}
hocons:config() | undefined
) ->
{ok, emqx_htb_limiter:limiter()} | {error, _}.
%% If no bucket path is set in config, there will be no limit
connect(_Id, _Type, undefined) ->
{ok, emqx_htb_limiter:make_infinity_limiter()};
%% undefined is the default situation, no limiter setting by default
connect(Id, Type, undefined) ->
create_limiter(Id, Type, undefined, undefined);
connect(Id, Type, #{rate := _} = Cfg) ->
create_limiter(Id, Type, maps:get(client, Cfg, undefined), Cfg);
connect(Id, Type, Cfg) ->
case find_limiter_cfg(Type, Cfg) of
{_ClientCfg, undefined, _NodeCfg} ->
{ok, emqx_htb_limiter:make_infinity_limiter()};
{#{rate := infinity}, #{rate := infinity}, #{rate := infinity}} ->
{ok, emqx_htb_limiter:make_infinity_limiter()};
{ClientCfg, #{rate := infinity}, #{rate := infinity}} ->
{ok,
emqx_htb_limiter:make_token_bucket_limiter(
ClientCfg, emqx_limiter_bucket_ref:infinity_bucket()
)};
{
#{rate := CliRate} = ClientCfg,
#{rate := BucketRate} = BucketCfg,
_
} ->
case emqx_limiter_manager:find_bucket(Id, Type) of
{ok, Bucket} ->
BucketSize = emqx_limiter_schema:calc_capacity(BucketCfg),
CliSize = emqx_limiter_schema:calc_capacity(ClientCfg),
{ok,
if
CliRate < BucketRate orelse CliSize < BucketSize ->
emqx_htb_limiter:make_token_bucket_limiter(ClientCfg, Bucket);
true ->
emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)
end};
undefined ->
?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}),
{error, invalid_bucket}
end
end.
create_limiter(
Id,
Type,
emqx_utils_maps:deep_get([client, Type], Cfg, undefined),
maps:get(Type, Cfg, undefined)
).
-spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok.
add_bucket(_Id, _Type, undefine) ->
add_bucket(_Id, _Type, undefined) ->
ok;
add_bucket(Id, Type, Cfg) ->
?CALL(Type, {add_bucket, Id, Cfg}).
@ -288,7 +267,8 @@ handle_info(Info, State) ->
Reason :: normal | shutdown | {shutdown, term()} | term(),
State :: term()
) -> any().
terminate(_Reason, _State) ->
terminate(_Reason, #{type := Type}) ->
emqx_limiter_manager:delete_root(Type),
ok.
%%--------------------------------------------------------------------
@ -343,10 +323,14 @@ oscillation(
oscillate(Interval),
Ordereds = get_ordered_buckets(Buckets),
{Alloced, Buckets2} = transverse(Ordereds, Flow, 0.0, Buckets),
maybe_burst(State#{
State2 = maybe_adjust_root_tokens(
State#{
buckets := Buckets2,
root := Root#{produced := Produced + Alloced}
}).
},
Alloced
),
maybe_burst(State2).
%% @doc horizontal spread
-spec transverse(
@ -419,6 +403,24 @@ get_ordered_buckets(Buckets) ->
Buckets
).
-spec maybe_adjust_root_tokens(state(), float()) -> state().
maybe_adjust_root_tokens(#{root := #{rate := infinity}} = State, _Alloced) ->
State;
maybe_adjust_root_tokens(#{root := #{rate := Rate}} = State, Alloced) when Alloced >= Rate ->
State;
maybe_adjust_root_tokens(#{root := #{rate := Rate} = Root, counter := Counter} = State, Alloced) ->
InFlow = Rate - Alloced,
Token = counters:get(Counter, ?ROOT_COUNTER_IDX),
case Token >= Rate of
true ->
State;
_ ->
Available = erlang:min(Rate - Token, InFlow),
{Inc, Root2} = emqx_limiter_correction:add(Available, Root),
counters:add(Counter, ?ROOT_COUNTER_IDX, Inc),
State#{root := Root2}
end.
-spec maybe_burst(state()) -> state().
maybe_burst(
#{
@ -482,12 +484,16 @@ init_tree(Type) when is_atom(Type) ->
Cfg = emqx:get_config([limiter, Type]),
init_tree(Type, Cfg).
init_tree(Type, Cfg) ->
init_tree(Type, #{rate := Rate} = Cfg) ->
Counter = counters:new(?COUNTER_SIZE, [write_concurrency]),
RootBucket = emqx_limiter_bucket_ref:new(Counter, ?ROOT_COUNTER_IDX, Rate),
emqx_limiter_manager:insert_root(Type, RootBucket),
#{
type => Type,
root => make_root(Cfg),
counter => counters:new(?COUNTER_SIZE, [write_concurrency]),
index => 0,
counter => Counter,
%% The first slot is reserved for the root
index => ?ROOT_COUNTER_IDX,
buckets => #{}
}.
@ -497,7 +503,8 @@ make_root(#{rate := Rate, burst := Burst}) ->
rate => Rate,
burst => Burst,
period => emqx_limiter_schema:default_period(),
produced => 0.0
produced => 0.0,
correction => 0
}.
do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) ->
@ -571,25 +578,61 @@ call(Type, Msg) ->
gen_server:call(Pid, Msg)
end.
find_limiter_cfg(Type, #{rate := _} = Cfg) ->
{find_client_cfg(Type, maps:get(client, Cfg, undefined)), Cfg, find_node_cfg(Type)};
find_limiter_cfg(Type, Cfg) ->
{
find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined)),
maps:get(Type, Cfg, undefined),
find_node_cfg(Type)
}.
create_limiter(Id, Type, #{rate := Rate} = ClientCfg, BucketCfg) when Rate =/= infinity ->
create_limiter_with_client(Id, Type, ClientCfg, BucketCfg);
create_limiter(Id, Type, _, BucketCfg) ->
create_limiter_without_client(Id, Type, BucketCfg).
find_client_cfg(Type, BucketCfg) ->
NodeCfg = emqx:get_config([limiter, client, Type], undefined),
merge_client_cfg(NodeCfg, BucketCfg).
%% create a limiter with the client-level configuration
create_limiter_with_client(Id, Type, ClientCfg, BucketCfg) ->
case find_referenced_bucket(Id, Type, BucketCfg) of
false ->
{ok, emqx_htb_limiter:make_local_limiter(ClientCfg, infinity)};
{ok, Bucket, RefCfg} ->
create_limiter_with_ref(Bucket, ClientCfg, RefCfg);
Error ->
Error
end.
merge_client_cfg(undefined, BucketCfg) ->
BucketCfg;
merge_client_cfg(NodeCfg, undefined) ->
NodeCfg;
merge_client_cfg(NodeCfg, BucketCfg) ->
maps:merge(NodeCfg, BucketCfg).
%% create a limiter only with the referenced configuration
create_limiter_without_client(Id, Type, BucketCfg) ->
case find_referenced_bucket(Id, Type, BucketCfg) of
false ->
{ok, emqx_htb_limiter:make_infinity_limiter()};
{ok, Bucket, RefCfg} ->
ClientCfg = emqx_limiter_schema:default_client_config(),
create_limiter_with_ref(Bucket, ClientCfg, RefCfg);
Error ->
Error
end.
find_node_cfg(Type) ->
emqx:get_config([limiter, Type], #{rate => infinity, burst => 0}).
create_limiter_with_ref(
Bucket,
#{rate := CliRate} = ClientCfg,
#{rate := RefRate}
) when CliRate < RefRate ->
{ok, emqx_htb_limiter:make_local_limiter(ClientCfg, Bucket)};
create_limiter_with_ref(Bucket, ClientCfg, _) ->
{ok, emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)}.
%% this is a listener(server)-level reference
find_referenced_bucket(Id, Type, #{rate := Rate} = Cfg) when Rate =/= infinity ->
case emqx_limiter_manager:find_bucket(Id, Type) of
{ok, Bucket} ->
{ok, Bucket, Cfg};
_ ->
?SLOG(error, #{msg => "bucket not found", type => Type, id => Id}),
{error, invalid_bucket}
end;
%% this is a node-level reference
find_referenced_bucket(Id, Type, _) ->
case emqx:get_config([limiter, Type], undefined) of
#{rate := infinity} ->
false;
undefined ->
?SLOG(error, #{msg => "invalid limiter type", type => Type, id => Id}),
{error, invalid_bucket};
NodeCfg ->
{ok, Bucket} = emqx_limiter_manager:find_root(Type),
{ok, Bucket, NodeCfg}
end.

View File

@ -20,6 +20,7 @@
-elvis([{elvis_style, dont_repeat_yourself, #{min_complexity => 10000}}]).
-include("emqx_mqtt.hrl").
-include("emqx_schema.hrl").
-include("logger.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
@ -33,7 +34,8 @@
is_running/1,
current_conns/2,
max_conns/2,
id_example/0
id_example/0,
default_max_conn/0
]).
-export([
@ -61,8 +63,11 @@
-export([certs_dir/2]).
-endif.
-type listener_id() :: atom() | binary().
-define(CONF_KEY_PATH, [listeners, '?', '?']).
-define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]).
-define(MARK_DEL, ?TOMBSTONE_CONFIG_CHANGE_REQ).
-spec id_example() -> atom().
id_example() -> 'tcp:default'.
@ -105,19 +110,22 @@ do_list_raw() ->
format_raw_listeners({Type0, Conf}) ->
Type = binary_to_atom(Type0),
lists:map(
fun({LName, LConf0}) when is_map(LConf0) ->
lists:filtermap(
fun
({LName, LConf0}) when is_map(LConf0) ->
Bind = parse_bind(LConf0),
Running = is_running(Type, listener_id(Type, LName), LConf0#{bind => Bind}),
LConf1 = maps:remove(<<"authentication">>, LConf0),
LConf3 = maps:put(<<"running">>, Running, LConf1),
LConf2 = maps:put(<<"running">>, Running, LConf1),
CurrConn =
case Running of
true -> current_conns(Type, LName, Bind);
false -> 0
end,
LConf4 = maps:put(<<"current_connections">>, CurrConn, LConf3),
{Type0, LName, LConf4}
LConf = maps:put(<<"current_connections">>, CurrConn, LConf2),
{true, {Type0, LName, LConf}};
({_LName, _MarkDel}) ->
false
end,
maps:to_list(Conf)
).
@ -195,7 +203,7 @@ start() ->
ok = emqx_config_handler:add_handler(?CONF_KEY_PATH, ?MODULE),
foreach_listeners(fun start_listener/3).
-spec start_listener(atom()) -> ok | {error, term()}.
-spec start_listener(listener_id()) -> ok | {error, term()}.
start_listener(ListenerId) ->
apply_on_listener(ListenerId, fun start_listener/3).
@ -246,7 +254,7 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
restart() ->
foreach_listeners(fun restart_listener/3).
-spec restart_listener(atom()) -> ok | {error, term()}.
-spec restart_listener(listener_id()) -> ok | {error, term()}.
restart_listener(ListenerId) ->
apply_on_listener(ListenerId, fun restart_listener/3).
@ -271,7 +279,7 @@ stop() ->
_ = emqx_config_handler:remove_handler(?CONF_KEY_PATH),
foreach_listeners(fun stop_listener/3).
-spec stop_listener(atom()) -> ok | {error, term()}.
-spec stop_listener(listener_id()) -> ok | {error, term()}.
stop_listener(ListenerId) ->
apply_on_listener(ListenerId, fun stop_listener/3).
@ -419,7 +427,9 @@ do_start_listener(quic, ListenerName, #{bind := Bind} = Opts) ->
end.
%% Update the listeners at runtime
pre_config_update([listeners, Type, Name], {create, NewConf}, undefined) ->
pre_config_update([listeners, Type, Name], {create, NewConf}, V) when
V =:= undefined orelse V =:= ?TOMBSTONE_VALUE
->
CertsDir = certs_dir(Type, Name),
{ok, convert_certs(CertsDir, NewConf)};
pre_config_update([listeners, _Type, _Name], {create, _NewConf}, _RawConf) ->
@ -434,6 +444,8 @@ pre_config_update([listeners, Type, Name], {update, Request}, RawConf) ->
pre_config_update([listeners, _Type, _Name], {action, _Action, Updated}, RawConf) ->
NewConf = emqx_utils_maps:deep_merge(RawConf, Updated),
{ok, NewConf};
pre_config_update([listeners, _Type, _Name], ?MARK_DEL, _RawConf) ->
{ok, ?TOMBSTONE_VALUE};
pre_config_update(_Path, _Request, RawConf) ->
{ok, RawConf}.
@ -441,13 +453,15 @@ post_config_update([listeners, Type, Name], {create, _Request}, NewConf, undefin
start_listener(Type, Name, NewConf);
post_config_update([listeners, Type, Name], {update, _Request}, NewConf, OldConf, _AppEnvs) ->
try_clear_ssl_files(certs_dir(Type, Name), NewConf, OldConf),
ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf),
case NewConf of
#{enabled := true} -> restart_listener(Type, Name, {OldConf, NewConf});
_ -> ok
end;
post_config_update([listeners, _Type, _Name], '$remove', undefined, undefined, _AppEnvs) ->
ok;
post_config_update([listeners, Type, Name], '$remove', undefined, OldConf, _AppEnvs) ->
post_config_update([listeners, Type, Name], Op, _, OldConf, _AppEnvs) when
Op =:= ?MARK_DEL andalso is_map(OldConf)
->
ok = unregister_ocsp_stapling_refresh(Type, Name),
case stop_listener(Type, Name, OldConf) of
ok ->
_ = emqx_authentication:delete_chain(listener_id(Type, Name)),
@ -460,10 +474,18 @@ post_config_update([listeners, Type, Name], {action, _Action, _}, NewConf, OldCo
#{enabled := NewEnabled} = NewConf,
#{enabled := OldEnabled} = OldConf,
case {NewEnabled, OldEnabled} of
{true, true} -> restart_listener(Type, Name, {OldConf, NewConf});
{true, false} -> start_listener(Type, Name, NewConf);
{false, true} -> stop_listener(Type, Name, OldConf);
{false, false} -> stop_listener(Type, Name, OldConf)
{true, true} ->
ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf),
restart_listener(Type, Name, {OldConf, NewConf});
{true, false} ->
ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf),
start_listener(Type, Name, NewConf);
{false, true} ->
ok = unregister_ocsp_stapling_refresh(Type, Name),
stop_listener(Type, Name, OldConf);
{false, false} ->
ok = unregister_ocsp_stapling_refresh(Type, Name),
stop_listener(Type, Name, OldConf)
end;
post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) ->
ok.
@ -472,7 +494,7 @@ esockd_opts(ListenerId, Type, Opts0) ->
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
Limiter = limiter(Opts0),
Opts2 =
case maps:get(connection, Limiter, undefined) of
case emqx_limiter_schema:extract_with_type(connection, Limiter) of
undefined ->
Opts1;
BucketCfg ->
@ -601,6 +623,7 @@ format_bind(Bin) when is_binary(Bin) ->
listener_id(Type, ListenerName) ->
list_to_atom(lists:append([str(Type), ":", str(ListenerName)])).
-spec parse_listener_id(listener_id()) -> {ok, #{type => atom(), name => atom()}} | {error, term()}.
parse_listener_id(Id) ->
case string:split(str(Id), ":", leading) of
[Type, Name] ->
@ -616,7 +639,7 @@ zone(Opts) ->
maps:get(zone, Opts, undefined).
limiter(Opts) ->
maps:get(limiter, Opts, #{}).
maps:get(limiter, Opts, undefined).
add_limiter_bucket(Id, #{limiter := Limiter}) ->
maps:fold(
@ -813,3 +836,22 @@ inject_crl_config(
};
inject_crl_config(Conf) ->
Conf.
maybe_unregister_ocsp_stapling_refresh(
ssl = Type, Name, #{ssl_options := #{ocsp := #{enable_ocsp_stapling := false}}} = _Conf
) ->
unregister_ocsp_stapling_refresh(Type, Name),
ok;
maybe_unregister_ocsp_stapling_refresh(_Type, _Name, _Conf) ->
ok.
unregister_ocsp_stapling_refresh(Type, Name) ->
ListenerId = listener_id(Type, Name),
emqx_ocsp_cache:unregister_listener(ListenerId),
ok.
%% There is currently an issue with frontend
%% infinity is not a good value for it, so we use 5m for now
default_max_conn() ->
%% TODO: <<"infinity">>
5_000_000.

View File

@ -37,7 +37,6 @@
max_qos_allowed => emqx_types:qos(),
retain_available => boolean(),
wildcard_subscription => boolean(),
subscription_identifiers => boolean(),
shared_subscription => boolean(),
exclusive_subscription => boolean()
}.
@ -58,18 +57,17 @@
exclusive_subscription
]).
-define(DEFAULT_CAPS, #{
max_packet_size => ?MAX_PACKET_SIZE,
max_clientid_len => ?MAX_CLIENTID_LEN,
max_topic_alias => ?MAX_TOPIC_AlIAS,
max_topic_levels => ?MAX_TOPIC_LEVELS,
max_qos_allowed => ?QOS_2,
retain_available => true,
wildcard_subscription => true,
subscription_identifiers => true,
shared_subscription => true,
exclusive_subscription => false
}).
-define(DEFAULT_CAPS_KEYS, [
max_packet_size,
max_clientid_len,
max_topic_alias,
max_topic_levels,
max_qos_allowed,
retain_available,
wildcard_subscription,
shared_subscription,
exclusive_subscription
]).
-spec check_pub(
emqx_types:zone(),
@ -88,7 +86,7 @@ check_pub(Zone, Flags) when is_map(Flags) ->
error ->
Flags
end,
maps:with(?PUBCAP_KEYS, get_caps(Zone))
get_caps(?PUBCAP_KEYS, Zone)
).
do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when
@ -111,7 +109,7 @@ do_check_pub(_Flags, _Caps) ->
) ->
ok_or_error(emqx_types:reason_code()).
check_sub(ClientInfo = #{zone := Zone}, Topic, SubOpts) ->
Caps = maps:with(?SUBCAP_KEYS, get_caps(Zone)),
Caps = get_caps(?SUBCAP_KEYS, Zone),
Flags = lists:foldl(
fun
(max_topic_levels, Map) ->
@ -152,10 +150,12 @@ do_check_sub(_Flags, _Caps, _, _) ->
ok.
get_caps(Zone) ->
lists:foldl(
fun({K, V}, Acc) ->
Acc#{K => emqx_config:get_zone_conf(Zone, [mqtt, K], V)}
end,
#{},
maps:to_list(?DEFAULT_CAPS)
get_caps(?DEFAULT_CAPS_KEYS, Zone).
get_caps(Keys, Zone) ->
maps:with(
Keys,
maps:merge(
emqx_config:get([mqtt]),
emqx_config:get_zone_conf(Zone, [mqtt])
)
).

View File

@ -30,6 +30,7 @@
sni_fun/2,
fetch_response/1,
register_listener/2,
unregister_listener/1,
inject_sni_fun/2
]).
@ -107,6 +108,9 @@ fetch_response(ListenerID) ->
register_listener(ListenerID, Opts) ->
gen_server:call(?MODULE, {register_listener, ListenerID, Opts}, ?CALL_TIMEOUT).
unregister_listener(ListenerID) ->
gen_server:cast(?MODULE, {unregister_listener, ListenerID}).
-spec inject_sni_fun(emqx_listeners:listener_id(), map()) -> map().
inject_sni_fun(ListenerID, Conf0) ->
SNIFun = emqx_const_v1:make_sni_fun(ListenerID),
@ -160,6 +164,18 @@ handle_call({register_listener, ListenerID, Conf}, _From, State0) ->
handle_call(Call, _From, State) ->
{reply, {error, {unknown_call, Call}}, State}.
handle_cast({unregister_listener, ListenerID}, State0) ->
State2 =
case maps:take(?REFRESH_TIMER(ListenerID), State0) of
error ->
State0;
{TRef, State1} ->
emqx_utils:cancel_timer(TRef),
State1
end,
State = maps:remove({refresh_interval, ListenerID}, State2),
?tp(ocsp_cache_listener_unregistered, #{listener_id => ListenerID}),
{noreply, State};
handle_cast(_Cast, State) ->
{noreply, State}.

View File

@ -23,6 +23,7 @@
-dialyzer(no_fail_call).
-elvis([{elvis_style, invalid_dynamic_call, disable}]).
-include("emqx_schema.hrl").
-include("emqx_authentication.hrl").
-include("emqx_access_control.hrl").
-include_lib("typerefl/include/types.hrl").
@ -42,7 +43,12 @@
-type ip_port() :: tuple() | integer().
-type cipher() :: map().
-type port_number() :: 1..65536.
-type server_parse_option() :: #{default_port => port_number(), no_port => boolean()}.
-type server_parse_option() :: #{
default_port => port_number(),
no_port => boolean(),
supported_schemes => [string()],
default_scheme => string()
}.
-type url() :: binary().
-type json_binary() :: binary().
@ -61,12 +67,19 @@
-typerefl_from_string({url/0, emqx_schema, to_url}).
-typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}).
-type parsed_server() :: #{
hostname := string(),
port => port_number(),
scheme => string()
}.
-export([
validate_heap_size/1,
user_lookup_fun_tr/2,
validate_alarm_actions/1,
non_empty_string/1,
validations/0
validations/0,
naive_env_interpolation/1
]).
-export([qos/0]).
@ -99,6 +112,12 @@
convert_servers/2
]).
%% tombstone types
-export([
tombstone_map/2,
get_tombstone_map_value_type/1
]).
-behaviour(hocon_schema).
-reflect_type([
@ -776,41 +795,48 @@ fields("listeners") ->
[
{"tcp",
sc(
map(name, ref("mqtt_tcp_listener")),
tombstone_map(name, ref("mqtt_tcp_listener")),
#{
desc => ?DESC(fields_listeners_tcp),
converter => fun(X, _) ->
ensure_default_listener(X, tcp)
end,
required => {false, recursively}
}
)},
{"ssl",
sc(
map(name, ref("mqtt_ssl_listener")),
tombstone_map(name, ref("mqtt_ssl_listener")),
#{
desc => ?DESC(fields_listeners_ssl),
converter => fun(X, _) -> ensure_default_listener(X, ssl) end,
required => {false, recursively}
}
)},
{"ws",
sc(
map(name, ref("mqtt_ws_listener")),
tombstone_map(name, ref("mqtt_ws_listener")),
#{
desc => ?DESC(fields_listeners_ws),
converter => fun(X, _) -> ensure_default_listener(X, ws) end,
required => {false, recursively}
}
)},
{"wss",
sc(
map(name, ref("mqtt_wss_listener")),
tombstone_map(name, ref("mqtt_wss_listener")),
#{
desc => ?DESC(fields_listeners_wss),
converter => fun(X, _) -> ensure_default_listener(X, wss) end,
required => {false, recursively}
}
)},
{"quic",
sc(
map(name, ref("mqtt_quic_listener")),
tombstone_map(name, ref("mqtt_quic_listener")),
#{
desc => ?DESC(fields_listeners_quic),
converter => fun keep_default_tombstone/2,
required => {false, recursively}
}
)}
@ -821,7 +847,7 @@ fields("crl_cache") ->
%% same URL. If they had diverging timeout options, it would be
%% confusing.
[
{"refresh_interval",
{refresh_interval,
sc(
duration(),
#{
@ -829,7 +855,7 @@ fields("crl_cache") ->
desc => ?DESC("crl_cache_refresh_interval")
}
)},
{"http_timeout",
{http_timeout,
sc(
duration(),
#{
@ -837,7 +863,7 @@ fields("crl_cache") ->
desc => ?DESC("crl_cache_refresh_http_timeout")
}
)},
{"capacity",
{capacity,
sc(
pos_integer(),
#{
@ -909,15 +935,17 @@ fields("mqtt_quic_listener") ->
string(),
#{
%% TODO: deprecated => {since, "5.1.0"}
desc => ?DESC(fields_mqtt_quic_listener_certfile)
desc => ?DESC(fields_mqtt_quic_listener_certfile),
importance => ?IMPORTANCE_HIDDEN
}
)},
{"keyfile",
sc(
string(),
%% TODO: deprecated => {since, "5.1.0"}
#{
desc => ?DESC(fields_mqtt_quic_listener_keyfile)
%% TODO: deprecated => {since, "5.1.0"}
desc => ?DESC(fields_mqtt_quic_listener_keyfile),
importance => ?IMPORTANCE_HIDDEN
}
)},
{"ciphers", ciphers_schema(quic)},
@ -993,7 +1021,10 @@ fields("mqtt_quic_listener") ->
duration_ms(),
#{
default => 0,
desc => ?DESC(fields_mqtt_quic_listener_idle_timeout)
desc => ?DESC(fields_mqtt_quic_listener_idle_timeout),
%% TODO: deprecated => {since, "5.1.0"}
%% deprecated, use idle_timeout_ms instead
importance => ?IMPORTANCE_HIDDEN
}
)},
{"idle_timeout_ms",
@ -1007,7 +1038,10 @@ fields("mqtt_quic_listener") ->
duration_ms(),
#{
default => <<"10s">>,
desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout)
desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout),
%% TODO: deprecated => {since, "5.1.0"}
%% use handshake_idle_timeout_ms
importance => ?IMPORTANCE_HIDDEN
}
)},
{"handshake_idle_timeout_ms",
@ -1021,7 +1055,10 @@ fields("mqtt_quic_listener") ->
duration_ms(),
#{
default => 0,
desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval)
desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval),
%% TODO: deprecated => {since, "5.1.0"}
%% use keep_alive_interval_ms instead
importance => ?IMPORTANCE_HIDDEN
}
)},
{"keep_alive_interval_ms",
@ -1354,7 +1391,7 @@ fields("ssl_client_opts") ->
client_ssl_opts_schema(#{});
fields("ocsp") ->
[
{"enable_ocsp_stapling",
{enable_ocsp_stapling,
sc(
boolean(),
#{
@ -1362,7 +1399,7 @@ fields("ocsp") ->
desc => ?DESC("server_ssl_opts_schema_enable_ocsp_stapling")
}
)},
{"responder_url",
{responder_url,
sc(
url(),
#{
@ -1370,7 +1407,7 @@ fields("ocsp") ->
desc => ?DESC("server_ssl_opts_schema_ocsp_responder_url")
}
)},
{"issuer_pem",
{issuer_pem,
sc(
binary(),
#{
@ -1378,7 +1415,7 @@ fields("ocsp") ->
desc => ?DESC("server_ssl_opts_schema_ocsp_issuer_pem")
}
)},
{"refresh_interval",
{refresh_interval,
sc(
duration(),
#{
@ -1386,7 +1423,7 @@ fields("ocsp") ->
desc => ?DESC("server_ssl_opts_schema_ocsp_refresh_interval")
}
)},
{"refresh_http_timeout",
{refresh_http_timeout,
sc(
duration(),
#{
@ -1489,10 +1526,8 @@ fields("broker") ->
sc(
boolean(),
#{
%% TODO: deprecated => {since, "5.1.0"}
%% in favor of session message re-dispatch at termination
%% we will stop supporting dispatch acks for shared
%% subscriptions.
deprecated => {since, "5.1.0"},
importance => ?IMPORTANCE_HIDDEN,
default => false,
desc => ?DESC(broker_shared_dispatch_ack_enabled)
}
@ -1938,7 +1973,7 @@ base_listener(Bind) ->
sc(
hoconsc:union([infinity, pos_integer()]),
#{
default => <<"infinity">>,
default => emqx_listeners:default_max_conn(),
desc => ?DESC(base_listener_max_connections)
}
)},
@ -2314,12 +2349,12 @@ server_ssl_opts_schema(Defaults, IsRanchListener) ->
Field
|| not IsRanchListener,
Field <- [
{"gc_after_handshake",
{gc_after_handshake,
sc(boolean(), #{
default => false,
desc => ?DESC(server_ssl_opts_schema_gc_after_handshake)
})},
{"ocsp",
{ocsp,
sc(
ref("ocsp"),
#{
@ -2327,7 +2362,7 @@ server_ssl_opts_schema(Defaults, IsRanchListener) ->
validator => fun ocsp_inner_validator/1
}
)},
{"enable_crl_check",
{enable_crl_check,
sc(
boolean(),
#{
@ -2790,6 +2825,7 @@ authentication(Which) ->
hoconsc:mk(Type, #{
desc => Desc,
converter => fun ensure_array/2,
default => [],
importance => Importance
}).
@ -2898,7 +2934,7 @@ servers_validator(Opts, Required) ->
%% `no_port': by default it's `false', when set to `true',
%% a `throw' exception is raised if the port is found.
-spec parse_server(undefined | string() | binary(), server_parse_option()) ->
{string(), port_number()}.
undefined | parsed_server().
parse_server(Str, Opts) ->
case parse_servers(Str, Opts) of
undefined ->
@ -2912,7 +2948,7 @@ parse_server(Str, Opts) ->
%% @doc Parse comma separated `host[:port][,host[:port]]' endpoints
%% into a list of `{Host, Port}' tuples or just `Host' string.
-spec parse_servers(undefined | string() | binary(), server_parse_option()) ->
[{string(), port_number()}].
undefined | [parsed_server()].
parse_servers(undefined, _Opts) ->
%% should not parse 'undefined' as string,
%% not to throw exception either,
@ -2958,6 +2994,9 @@ split_host_port(Str) ->
do_parse_server(Str, Opts) ->
DefaultPort = maps:get(default_port, Opts, undefined),
NotExpectingPort = maps:get(no_port, Opts, false),
DefaultScheme = maps:get(default_scheme, Opts, undefined),
SupportedSchemes = maps:get(supported_schemes, Opts, []),
NotExpectingScheme = (not is_list(DefaultScheme)) andalso length(SupportedSchemes) =:= 0,
case is_integer(DefaultPort) andalso NotExpectingPort of
true ->
%% either provide a default port from schema,
@ -2966,22 +3005,129 @@ do_parse_server(Str, Opts) ->
false ->
ok
end,
case is_list(DefaultScheme) andalso (not lists:member(DefaultScheme, SupportedSchemes)) of
true ->
%% inconsistent schema
error("bad_schema");
false ->
ok
end,
%% do not split with space, there should be no space allowed between host and port
case string:tokens(Str, ":") of
[Hostname, Port] ->
Tokens = string:tokens(Str, ":"),
Context = #{
not_expecting_port => NotExpectingPort,
not_expecting_scheme => NotExpectingScheme,
default_port => DefaultPort,
default_scheme => DefaultScheme,
opts => Opts
},
check_server_parts(Tokens, Context).
check_server_parts([Scheme, "//" ++ Hostname, Port], Context) ->
#{
not_expecting_scheme := NotExpectingScheme,
not_expecting_port := NotExpectingPort,
opts := Opts
} = Context,
NotExpectingPort andalso throw("not_expecting_port_number"),
{check_hostname(Hostname), parse_port(Port)};
[Hostname] ->
NotExpectingScheme andalso throw("not_expecting_scheme"),
#{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname),
port => parse_port(Port)
};
check_server_parts([Scheme, "//" ++ Hostname], Context) ->
#{
not_expecting_scheme := NotExpectingScheme,
not_expecting_port := NotExpectingPort,
default_port := DefaultPort,
opts := Opts
} = Context,
NotExpectingScheme andalso throw("not_expecting_scheme"),
case is_integer(DefaultPort) of
true ->
{check_hostname(Hostname), DefaultPort};
#{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname),
port => DefaultPort
};
false when NotExpectingPort ->
check_hostname(Hostname);
#{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname)
};
false ->
throw("missing_port_number")
end;
_ ->
throw("bad_host_port")
check_server_parts([Hostname, Port], Context) ->
#{
not_expecting_port := NotExpectingPort,
default_scheme := DefaultScheme
} = Context,
NotExpectingPort andalso throw("not_expecting_port_number"),
case is_list(DefaultScheme) of
false ->
#{
hostname => check_hostname(Hostname),
port => parse_port(Port)
};
true ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname),
port => parse_port(Port)
}
end;
check_server_parts([Hostname], Context) ->
#{
not_expecting_scheme := NotExpectingScheme,
not_expecting_port := NotExpectingPort,
default_port := DefaultPort,
default_scheme := DefaultScheme
} = Context,
case is_integer(DefaultPort) orelse NotExpectingPort of
true ->
ok;
false ->
throw("missing_port_number")
end,
case is_list(DefaultScheme) orelse NotExpectingScheme of
true ->
ok;
false ->
throw("missing_scheme")
end,
case {is_integer(DefaultPort), is_list(DefaultScheme)} of
{true, true} ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname),
port => DefaultPort
};
{true, false} ->
#{
hostname => check_hostname(Hostname),
port => DefaultPort
};
{false, true} ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname)
};
{false, false} ->
#{hostname => check_hostname(Hostname)}
end;
check_server_parts(_Tokens, _Context) ->
throw("bad_host_port").
check_scheme(Str, Opts) ->
SupportedSchemes = maps:get(supported_schemes, Opts, []),
IsSupported = lists:member(Str, SupportedSchemes),
case IsSupported of
true ->
Str;
false ->
throw("unsupported_scheme")
end.
check_hostname(Str) ->
@ -3084,3 +3230,138 @@ assert_required_field(Conf, Key, ErrorMessage) ->
_ ->
ok
end.
default_listener(tcp) ->
#{
<<"bind">> => <<"0.0.0.0:1883">>
};
default_listener(ws) ->
#{
<<"bind">> => <<"0.0.0.0:8083">>,
<<"websocket">> => #{<<"mqtt_path">> => <<"/mqtt">>}
};
default_listener(SSLListener) ->
%% The env variable is resolved in emqx_tls_lib by calling naive_env_interpolate
CertFile = fun(Name) ->
iolist_to_binary("${EMQX_ETC_DIR}/" ++ filename:join(["certs", Name]))
end,
SslOptions = #{
<<"cacertfile">> => CertFile(<<"cacert.pem">>),
<<"certfile">> => CertFile(<<"cert.pem">>),
<<"keyfile">> => CertFile(<<"key.pem">>)
},
case SSLListener of
ssl ->
#{
<<"bind">> => <<"0.0.0.0:8883">>,
<<"ssl_options">> => SslOptions
};
wss ->
#{
<<"bind">> => <<"0.0.0.0:8084">>,
<<"ssl_options">> => SslOptions,
<<"websocket">> => #{<<"mqtt_path">> => <<"/mqtt">>}
}
end.
%% @doc This function helps to perform a naive string interpolation which
%% only looks at the first segment of the string and tries to replace it.
%% For example
%% "$MY_FILE_PATH"
%% "${MY_FILE_PATH}"
%% "$ENV_VARIABLE/sub/path"
%% "${ENV_VARIABLE}/sub/path"
%% "${ENV_VARIABLE}\sub\path" # windows
%% This function returns undefined if the input is undefined
%% otherwise always return string.
naive_env_interpolation(undefined) ->
undefined;
naive_env_interpolation(Bin) when is_binary(Bin) ->
naive_env_interpolation(unicode:characters_to_list(Bin, utf8));
naive_env_interpolation("$" ++ Maybe = Original) ->
{Env, Tail} = split_path(Maybe),
case resolve_env(Env) of
{ok, Path} ->
filename:join([Path, Tail]);
error ->
Original
end;
naive_env_interpolation(Other) ->
Other.
split_path(Path) ->
split_path(Path, []).
split_path([], Acc) ->
{lists:reverse(Acc), []};
split_path([Char | Rest], Acc) when Char =:= $/ orelse Char =:= $\\ ->
{lists:reverse(Acc), string:trim(Rest, leading, "/\\")};
split_path([Char | Rest], Acc) ->
split_path(Rest, [Char | Acc]).
resolve_env(Name0) ->
Name = string:trim(Name0, both, "{}"),
Value = os:getenv(Name),
case Value =/= false andalso Value =/= "" of
true ->
{ok, Value};
false ->
special_env(Name)
end.
-ifdef(TEST).
%% when running tests, we need to mock the env variables
special_env("EMQX_ETC_DIR") ->
{ok, filename:join([code:lib_dir(emqx), etc])};
special_env("EMQX_LOG_DIR") ->
{ok, "log"};
special_env(_Name) ->
%% only in tests
error.
-else.
special_env(_Name) -> error.
-endif.
%% The tombstone atom.
tombstone() ->
?TOMBSTONE_TYPE.
%% Make a map type, the value of which is allowed to be 'marked_for_deletion'
%% 'marked_for_delition' is a special value which means the key is deleted.
%% This is used to support the 'delete' operation in configs,
%% since deleting the key would result in default value being used.
tombstone_map(Name, Type) ->
%% marked_for_deletion must be the last member of the union
%% because we need to first union member to populate the default values
map(Name, ?UNION([Type, ?TOMBSTONE_TYPE])).
%% inverse of mark_del_map
get_tombstone_map_value_type(Schema) ->
%% TODO: violation of abstraction, expose an API in hoconsc
%% hoconsc:map_value_type(Schema)
?MAP(_Name, Union) = hocon_schema:field_schema(Schema, type),
%% TODO: violation of abstraction, fix hoconsc:union_members/1
?UNION(Members) = Union,
Tombstone = tombstone(),
[Type, Tombstone] = hoconsc:union_members(Members),
Type.
%% Keep the 'default' tombstone, but delete others.
keep_default_tombstone(Map, _Opts) when is_map(Map) ->
maps:filter(
fun(Key, Value) ->
Key =:= <<"default">> orelse Value =/= ?TOMBSTONE_VALUE
end,
Map
);
keep_default_tombstone(Value, _Opts) ->
Value.
ensure_default_listener(undefined, ListenerType) ->
%% let the schema's default value do its job
#{<<"default">> => default_listener(ListenerType)};
ensure_default_listener(#{<<"default">> := _} = Map, _ListenerType) ->
keep_default_tombstone(Map, #{});
ensure_default_listener(Map, ListenerType) ->
NewMap = Map#{<<"default">> => default_listener(ListenerType)},
keep_default_tombstone(NewMap, #{}).

View File

@ -165,7 +165,7 @@ strategy(Group) ->
-spec ack_enabled() -> boolean().
ack_enabled() ->
emqx:get_config([broker, shared_dispatch_ack_enabled]).
emqx:get_config([broker, shared_dispatch_ack_enabled], false).
do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() ->
%% Deadlock otherwise
@ -181,7 +181,7 @@ do_dispatch(SubPid, _Group, Topic, Msg, retry) ->
do_dispatch(SubPid, Group, Topic, Msg, fresh) ->
case ack_enabled() of
true ->
%% FIXME: replace with `emqx_shared_sub_proto:dispatch_with_ack' in 5.2
%% TODO: delete this clase after 5.1.0
do_dispatch_with_ack(SubPid, Group, Topic, Msg);
false ->
send(SubPid, Topic, {deliver, Topic, Msg})

View File

@ -309,19 +309,19 @@ ensure_ssl_files(Dir, SSL, Opts) ->
case ensure_ssl_file_key(SSL, RequiredKeys) of
ok ->
KeyPaths = ?SSL_FILE_OPT_PATHS ++ ?SSL_FILE_OPT_PATHS_A,
ensure_ssl_files(Dir, SSL, KeyPaths, Opts);
ensure_ssl_files_per_key(Dir, SSL, KeyPaths, Opts);
{error, _} = Error ->
Error
end.
ensure_ssl_files(_Dir, SSL, [], _Opts) ->
ensure_ssl_files_per_key(_Dir, SSL, [], _Opts) ->
{ok, SSL};
ensure_ssl_files(Dir, SSL, [KeyPath | KeyPaths], Opts) ->
ensure_ssl_files_per_key(Dir, SSL, [KeyPath | KeyPaths], Opts) ->
case
ensure_ssl_file(Dir, KeyPath, SSL, emqx_utils_maps:deep_get(KeyPath, SSL, undefined), Opts)
of
{ok, NewSSL} ->
ensure_ssl_files(Dir, NewSSL, KeyPaths, Opts);
ensure_ssl_files_per_key(Dir, NewSSL, KeyPaths, Opts);
{error, Reason} ->
{error, Reason#{which_options => [KeyPath]}}
end.
@ -472,7 +472,8 @@ hex_str(Bin) ->
iolist_to_binary([io_lib:format("~2.16.0b", [X]) || <<X:8>> <= Bin]).
%% @doc Returns 'true' when the file is a valid pem, otherwise {error, Reason}.
is_valid_pem_file(Path) ->
is_valid_pem_file(Path0) ->
Path = resolve_cert_path_for_read(Path0),
case file:read_file(Path) of
{ok, Pem} -> is_pem(Pem) orelse {error, not_pem};
{error, Reason} -> {error, Reason}
@ -513,10 +514,16 @@ do_drop_invalid_certs([KeyPath | KeyPaths], SSL) ->
to_server_opts(Type, Opts) ->
Versions = integral_versions(Type, maps:get(versions, Opts, undefined)),
Ciphers = integral_ciphers(Versions, maps:get(ciphers, Opts, undefined)),
Path = fun(Key) -> resolve_cert_path_for_read_strict(maps:get(Key, Opts, undefined)) end,
filter(
maps:to_list(Opts#{
keyfile => Path(keyfile),
certfile => Path(certfile),
cacertfile => Path(cacertfile),
ciphers => Ciphers,
versions => Versions
}).
})
).
%% @doc Convert hocon-checked tls client options (map()) to
%% proplist accepted by ssl library.
@ -530,11 +537,12 @@ to_client_opts(Opts) ->
to_client_opts(Type, Opts) ->
GetD = fun(Key, Default) -> fuzzy_map_get(Key, Opts, Default) end,
Get = fun(Key) -> GetD(Key, undefined) end,
Path = fun(Key) -> resolve_cert_path_for_read_strict(Get(Key)) end,
case GetD(enable, false) of
true ->
KeyFile = ensure_str(Get(keyfile)),
CertFile = ensure_str(Get(certfile)),
CAFile = ensure_str(Get(cacertfile)),
KeyFile = Path(keyfile),
CertFile = Path(certfile),
CAFile = Path(cacertfile),
Verify = GetD(verify, verify_none),
SNI = ensure_sni(Get(server_name_indication)),
Versions = integral_versions(Type, Get(versions)),
@ -556,6 +564,31 @@ to_client_opts(Type, Opts) ->
[]
end.
resolve_cert_path_for_read_strict(Path) ->
case resolve_cert_path_for_read(Path) of
undefined ->
undefined;
ResolvedPath ->
case filelib:is_regular(ResolvedPath) of
true ->
ResolvedPath;
false ->
PathToLog = ensure_str(Path),
LogData =
case PathToLog =:= ResolvedPath of
true ->
#{path => PathToLog};
false ->
#{path => PathToLog, resolved_path => ResolvedPath}
end,
?SLOG(error, LogData#{msg => "cert_file_not_found"}),
undefined
end
end.
resolve_cert_path_for_read(Path) ->
emqx_schema:naive_env_interpolation(Path).
filter([]) -> [];
filter([{_, undefined} | T]) -> filter(T);
filter([{_, ""} | T]) -> filter(T);

View File

@ -27,7 +27,7 @@ format(
#{level := debug, meta := Meta = #{trace_tag := Tag}, msg := Msg},
#{payload_encode := PEncode}
) ->
Time = calendar:system_time_to_rfc3339(erlang:system_time(second)),
Time = calendar:system_time_to_rfc3339(erlang:system_time(microsecond), [{unit, microsecond}]),
ClientId = to_iolist(maps:get(clientid, Meta, "")),
Peername = maps:get(peername, Meta, ""),
MetaBin = format_meta(Meta, PEncode),

View File

@ -90,7 +90,7 @@
listener :: {Type :: atom(), Name :: atom()},
%% Limiter
limiter :: maybe(container()),
limiter :: container(),
%% cache operation when overload
limiter_cache :: queue:queue(cache()),
@ -579,19 +579,21 @@ handle_timeout(TRef, TMsg, State) ->
list(any()),
state()
) -> state().
check_limiter(
_Needs,
Data,
WhenOk,
Msgs,
#state{limiter = infinity} = State
) ->
WhenOk(Data, Msgs, State);
check_limiter(
Needs,
Data,
WhenOk,
Msgs,
#state{
limiter = Limiter,
limiter_timer = LimiterTimer,
limiter_cache = Cache
} = State
#state{limiter_timer = undefined, limiter = Limiter} = State
) ->
case LimiterTimer of
undefined ->
case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
@ -623,10 +625,15 @@ check_limiter(
{drop, Limiter2} ->
{ok, State#state{limiter = Limiter2}}
end;
_ ->
check_limiter(
Needs,
Data,
WhenOk,
_Msgs,
#state{limiter_cache = Cache} = State
) ->
New = #cache{need = Needs, data = Data, next = WhenOk},
State#state{limiter_cache = queue:in(New, Cache)}
end.
State#state{limiter_cache = queue:in(New, Cache)}.
-spec retry_limiter(state()) -> state().
retry_limiter(#state{limiter = Limiter} = State) ->

View File

@ -186,7 +186,7 @@ t_session_taken(_) ->
false
end
end,
6000
15_000
),
Publish(),

View File

@ -267,13 +267,14 @@ t_chan_info(_) ->
t_chan_caps(_) ->
?assertMatch(
#{
exclusive_subscription := false,
max_packet_size := 1048576,
max_clientid_len := 65535,
max_qos_allowed := 2,
max_topic_alias := 65535,
max_topic_levels := Level,
retain_available := true,
shared_subscription := true,
subscription_identifiers := true,
wildcard_subscription := true
} when is_integer(Level),
emqx_channel:caps(channel())

View File

@ -32,6 +32,7 @@
start_apps/3,
start_app/2,
stop_apps/1,
stop_apps/2,
reload/2,
app_path/2,
proj_root/0,
@ -55,12 +56,12 @@
is_tcp_server_available/2,
is_tcp_server_available/3,
load_config/2,
load_config/3,
not_wait_mqtt_payload/1,
read_schema_configs/2,
render_config_file/2,
wait_for/4,
wait_mqtt_payload/1
wait_mqtt_payload/1,
select_free_port/1
]).
-export([
@ -253,11 +254,20 @@ start_app(App, SpecAppConfig, Opts) ->
case application:ensure_all_started(App) of
{ok, _} ->
ok = ensure_dashboard_listeners_started(App),
ok = wait_for_app_processes(App),
ok;
{error, Reason} ->
error({failed_to_start_app, App, Reason})
end.
wait_for_app_processes(emqx_conf) ->
%% emqx_conf app has a gen_server which
%% initializes its state asynchronously
gen_server:call(emqx_cluster_rpc, dummy),
ok;
wait_for_app_processes(_) ->
ok.
app_conf_file(emqx_conf) -> "emqx.conf.all";
app_conf_file(App) -> atom_to_list(App) ++ ".conf".
@ -274,9 +284,9 @@ app_schema(App) ->
mustache_vars(App, Opts) ->
ExtraMustacheVars = maps:get(extra_mustache_vars, Opts, #{}),
Defaults = #{
node_cookie => atom_to_list(erlang:get_cookie()),
platform_data_dir => app_path(App, "data"),
platform_etc_dir => app_path(App, "etc"),
platform_log_dir => app_path(App, "log")
platform_etc_dir => app_path(App, "etc")
},
maps:merge(Defaults, ExtraMustacheVars).
@ -304,12 +314,21 @@ generate_config(SchemaModule, ConfigFile) when is_atom(SchemaModule) ->
-spec stop_apps(list()) -> ok.
stop_apps(Apps) ->
stop_apps(Apps, #{}).
stop_apps(Apps, Opts) ->
[application:stop(App) || App <- Apps ++ [emqx, ekka, mria, mnesia]],
ok = mria_mnesia:delete_schema(),
%% to avoid inter-suite flakiness
application:unset_env(emqx, init_config_load_done),
persistent_term:erase(?EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY),
emqx_config:erase_schema_mod_and_names(),
case Opts of
#{erase_all_configs := false} ->
%% FIXME: this means inter-suite or inter-test dependencies
ok;
_ ->
emqx_config:erase_all()
end,
ok = emqx_config:delete_override_conf_files(),
application:unset_env(emqx, local_override_conf_file),
application:unset_env(emqx, cluster_override_conf_file),
@ -478,18 +497,14 @@ copy_certs(emqx_conf, Dest0) ->
copy_certs(_, _) ->
ok.
load_config(SchemaModule, Config, Opts) ->
load_config(SchemaModule, Config) ->
ConfigBin =
case is_map(Config) of
true -> emqx_utils_json:encode(Config);
false -> Config
end,
ok = emqx_config:delete_override_conf_files(),
ok = emqx_config:init_load(SchemaModule, ConfigBin, Opts),
ok.
load_config(SchemaModule, Config) ->
load_config(SchemaModule, Config, #{raw_with_default => false}).
ok = emqx_config:init_load(SchemaModule, ConfigBin).
-spec is_all_tcp_servers_available(Servers) -> Result when
Servers :: [{Host, Port}],
@ -665,6 +680,7 @@ start_slave(Name, Opts) when is_map(Opts) ->
SlaveMod = maps:get(peer_mod, Opts, ct_slave),
Node = node_name(Name),
put_peer_mod(Node, SlaveMod),
Cookie = atom_to_list(erlang:get_cookie()),
DoStart =
fun() ->
case SlaveMod of
@ -676,7 +692,11 @@ start_slave(Name, Opts) when is_map(Opts) ->
{monitor_master, true},
{init_timeout, 20_000},
{startup_timeout, 20_000},
{erl_flags, erl_flags()}
{erl_flags, erl_flags()},
{env, [
{"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"},
{"EMQX_NODE__COOKIE", Cookie}
]}
]
);
slave ->
@ -1241,3 +1261,34 @@ get_or_spawn_janitor() ->
on_exit(Fun) ->
Janitor = get_or_spawn_janitor(),
ok = emqx_test_janitor:push_on_exit_callback(Janitor, Fun).
%%-------------------------------------------------------------------------------
%% Select a free transport port from the OS
%%-------------------------------------------------------------------------------
%% @doc get unused port from OS
-spec select_free_port(tcp | udp | ssl | quic) -> inets:port_number().
select_free_port(tcp) ->
select_free_port(gen_tcp, listen);
select_free_port(udp) ->
select_free_port(gen_udp, open);
select_free_port(ssl) ->
select_free_port(tcp);
select_free_port(quic) ->
select_free_port(udp).
select_free_port(GenModule, Fun) when
GenModule == gen_tcp orelse
GenModule == gen_udp
->
{ok, S} = GenModule:Fun(0, [{reuseaddr, true}]),
{ok, Port} = inet:port(S),
ok = GenModule:close(S),
case os:type() of
{unix, darwin} ->
%% in MacOS, still get address_in_use after close port
timer:sleep(500);
_ ->
skip
end,
ct:pal("Select free OS port: ~p", [Port]),
Port.

View File

@ -50,7 +50,6 @@ t_fill_default_values(_) ->
},
<<"route_batch_clean">> := false,
<<"session_locking_strategy">> := quorum,
<<"shared_dispatch_ack_enabled">> := false,
<<"shared_subscription_strategy">> := round_robin
}
},
@ -59,3 +58,22 @@ t_fill_default_values(_) ->
%% ensure JSON compatible
_ = emqx_utils_json:encode(WithDefaults),
ok.
t_init_load(_Config) ->
ConfFile = "./test_emqx.conf",
ok = file:write_file(ConfFile, <<"">>),
ExpectRootNames = lists:sort(hocon_schema:root_names(emqx_schema)),
emqx_config:erase_all(),
{ok, DeprecatedFile} = application:get_env(emqx, cluster_override_conf_file),
?assertEqual(false, filelib:is_regular(DeprecatedFile), DeprecatedFile),
%% Don't has deprecated file
ok = emqx_config:init_load(emqx_schema, [ConfFile]),
?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())),
?assertMatch({ok, #{raw_config := 256}}, emqx:update_config([mqtt, max_topic_levels], 256)),
emqx_config:erase_all(),
%% Has deprecated file
ok = file:write_file(DeprecatedFile, <<"{}">>),
ok = emqx_config:init_load(emqx_schema, [ConfFile]),
?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())),
?assertMatch({ok, #{raw_config := 128}}, emqx:update_config([mqtt, max_topic_levels], 128)),
ok = file:delete(DeprecatedFile).

View File

@ -38,8 +38,6 @@ init_per_suite(Config) ->
ok = meck:new(emqx_cm, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end),
ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end),
%% Meck Limiter
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
%% Meck Pd
ok = meck:new(emqx_pd, [passthrough, no_history, no_link]),
%% Meck Metrics
@ -67,7 +65,6 @@ end_per_suite(_Config) ->
ok = meck:unload(emqx_transport),
catch meck:unload(emqx_channel),
ok = meck:unload(emqx_cm),
ok = meck:unload(emqx_htb_limiter),
ok = meck:unload(emqx_pd),
ok = meck:unload(emqx_metrics),
ok = meck:unload(emqx_hooks),
@ -421,6 +418,14 @@ t_ensure_rate_limit(_) ->
{ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})),
?assertEqual(Limiter, emqx_connection:info(limiter, State1)),
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
ok = meck:expect(
emqx_htb_limiter,
make_infinity_limiter,
fun() -> non_infinity end
),
ok = meck:expect(
emqx_htb_limiter,
check,
@ -431,10 +436,10 @@ t_ensure_rate_limit(_) ->
[],
WhenOk,
[],
st(#{limiter => Limiter})
st(#{limiter => init_limiter()})
),
meck:unload(emqx_htb_limiter),
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)).
t_activate_socket(_) ->
@ -495,6 +500,7 @@ t_get_conn_info(_) ->
end).
t_oom_shutdown(init, Config) ->
ok = snabbkaffe:stop(),
ok = snabbkaffe:start_trace(),
ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]),
meck:expect(
@ -707,7 +713,14 @@ init_limiter() ->
limiter_cfg() ->
Cfg = bucket_cfg(),
Client = #{
Client = client_cfg(),
#{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
bucket_cfg() ->
#{rate => infinity, initial => 0, burst => 0}.
client_cfg() ->
#{
rate => infinity,
initial => 0,
burst => 0,
@ -715,11 +728,7 @@ limiter_cfg() ->
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
bucket_cfg() ->
#{rate => infinity, initial => 0, burst => 0}.
}.
add_bucket() ->
Cfg = bucket_cfg(),

View File

@ -35,6 +35,7 @@ all() ->
init_per_suite(Config) ->
application:load(emqx),
{ok, _} = application:ensure_all_started(ssl),
emqx_config:save_schema_mod_and_names(emqx_schema),
emqx_common_test_helpers:boot_modules(all),
Config.
@ -328,7 +329,15 @@ drain_msgs() ->
clear_crl_cache() ->
%% reset the CRL cache
Ref = monitor(process, whereis(ssl_manager)),
exit(whereis(ssl_manager), kill),
receive
{'DOWN', Ref, process, _, _} ->
ok
after 1_000 ->
ct:fail("ssl_manager didn't die")
end,
ensure_ssl_manager_alive(),
ok.
force_cacertfile(Cacertfile) ->
@ -382,7 +391,6 @@ setup_crl_options(Config, #{is_cached := IsCached} = Opts) ->
false ->
%% ensure cache is empty
clear_crl_cache(),
ct:sleep(200),
ok
end,
drain_msgs(),
@ -459,6 +467,13 @@ of_kinds(Trace0, Kinds0) ->
Trace0
).
ensure_ssl_manager_alive() ->
?retry(
_Sleep0 = 200,
_Attempts0 = 50,
true = is_pid(whereis(ssl_manager))
).
%%--------------------------------------------------------------------
%% Test cases
%%--------------------------------------------------------------------

View File

@ -47,13 +47,14 @@ init_per_testcase(Case, Config) when
Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp
->
catch emqx_config_handler:stop(),
Port = emqx_common_test_helpers:select_free_port(tcp),
{ok, _} = emqx_config_handler:start_link(),
PrevListeners = emqx_config:get([listeners], #{}),
PureListeners = remove_default_limiter(PrevListeners),
PureListeners2 = PureListeners#{
tcp => #{
listener_test => #{
bind => {"127.0.0.1", 9999},
bind => {"127.0.0.1", Port},
max_connections => 4321,
limiter => #{}
}
@ -63,19 +64,20 @@ init_per_testcase(Case, Config) when
ok = emqx_listeners:start(),
[
{prev_listener_conf, PrevListeners}
{prev_listener_conf, PrevListeners},
{tcp_port, Port}
| Config
];
init_per_testcase(t_wss_conn, Config) ->
catch emqx_config_handler:stop(),
Port = emqx_common_test_helpers:select_free_port(ssl),
{ok, _} = emqx_config_handler:start_link(),
PrevListeners = emqx_config:get([listeners], #{}),
PureListeners = remove_default_limiter(PrevListeners),
PureListeners2 = PureListeners#{
wss => #{
listener_test => #{
bind => {{127, 0, 0, 1}, 9998},
bind => {{127, 0, 0, 1}, Port},
limiter => #{},
ssl_options => #{
cacertfile => ?CERTS_PATH("cacert.pem"),
@ -89,7 +91,8 @@ init_per_testcase(t_wss_conn, Config) ->
ok = emqx_listeners:start(),
[
{prev_listener_conf, PrevListeners}
{prev_listener_conf, PrevListeners},
{wss_port, Port}
| Config
];
init_per_testcase(_, Config) ->
@ -171,20 +174,30 @@ t_restart_listeners_with_hibernate_after_disabled(_Config) ->
ok = emqx_listeners:stop(),
emqx_config:put([listeners], OldLConf).
t_max_conns_tcp(_) ->
t_max_conns_tcp(Config) ->
%% Note: Using a string representation for the bind address like
%% "127.0.0.1" does not work
?assertEqual(4321, emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})).
?assertEqual(
4321,
emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, ?config(tcp_port, Config)})
).
t_current_conns_tcp(_) ->
?assertEqual(0, emqx_listeners:current_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})).
t_current_conns_tcp(Config) ->
?assertEqual(
0,
emqx_listeners:current_conns('tcp:listener_test', {
{127, 0, 0, 1}, ?config(tcp_port, Config)
})
).
t_wss_conn(_) ->
{ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000),
t_wss_conn(Config) ->
{ok, Socket} = ssl:connect(
{127, 0, 0, 1}, ?config(wss_port, Config), [{verify, verify_none}], 1000
),
ok = ssl:close(Socket).
t_quic_conn(Config) ->
Port = 24568,
Port = emqx_common_test_helpers:select_free_port(quic),
DataDir = ?config(data_dir, Config),
SSLOpts = #{
password => ?SERVER_KEY_PASSWORD,
@ -207,7 +220,7 @@ t_quic_conn(Config) ->
emqx_listeners:stop_listener(quic, ?FUNCTION_NAME, #{bind => Port}).
t_ssl_password_cert(Config) ->
Port = 24568,
Port = emqx_common_test_helpers:select_free_port(ssl),
DataDir = ?config(data_dir, Config),
SSLOptsPWD = #{
password => ?SERVER_KEY_PASSWORD,
@ -266,8 +279,7 @@ render_config_file() ->
mustache_vars() ->
[
{platform_data_dir, local_path(["data"])},
{platform_etc_dir, local_path(["etc"])},
{platform_log_dir, local_path(["log"])}
{platform_etc_dir, local_path(["etc"])}
].
generate_config() ->

View File

@ -22,7 +22,6 @@
-include_lib("eunit/include/eunit.hrl").
-define(LOGGER, emqx_logger).
-define(a, "a").
-define(SUPPORTED_LEVELS, [emergency, alert, critical, error, warning, notice, info, debug]).
all() -> emqx_common_test_helpers:all(?MODULE).

View File

@ -254,10 +254,15 @@ does_module_exist(Mod) ->
end.
assert_no_http_get() ->
Timeout = 0,
Error = should_be_cached,
assert_no_http_get(Timeout, Error).
assert_no_http_get(Timeout, Error) ->
receive
{http_get, _URL} ->
error(should_be_cached)
after 0 ->
error(Error)
after Timeout ->
ok
end.
@ -702,7 +707,9 @@ do_t_update_listener(Config) ->
%% the API converts that to an internally
%% managed file
<<"issuer_pem">> => IssuerPem,
<<"responder_url">> => <<"http://localhost:9877">>
<<"responder_url">> => <<"http://localhost:9877">>,
%% for quicker testing; min refresh in tests is 5 s.
<<"refresh_interval">> => <<"5s">>
}
}
},
@ -739,6 +746,70 @@ do_t_update_listener(Config) ->
)
),
assert_http_get(1, 5_000),
%% Disable OCSP Stapling; the periodic refreshes should stop
RefreshInterval = emqx_config:get([listeners, ssl, default, ssl_options, ocsp, refresh_interval]),
OCSPConfig1 =
#{
<<"ssl_options">> =>
#{
<<"ocsp">> =>
#{
<<"enable_ocsp_stapling">> => false
}
}
},
ListenerData3 = emqx_utils_maps:deep_merge(ListenerData2, OCSPConfig1),
{ok, {_, _, ListenerData4}} = update_listener_via_api(ListenerId, ListenerData3),
?assertMatch(
#{
<<"ssl_options">> :=
#{
<<"ocsp">> :=
#{
<<"enable_ocsp_stapling">> := false
}
}
},
ListenerData4
),
assert_no_http_get(2 * RefreshInterval, should_stop_refreshing),
ok.
t_double_unregister(_Config) ->
ListenerID = <<"ssl:test_ocsp">>,
Conf = emqx_config:get_listener_conf(ssl, test_ocsp, []),
?check_trace(
begin
{ok, {ok, _}} =
?wait_async_action(
emqx_ocsp_cache:register_listener(ListenerID, Conf),
#{?snk_kind := ocsp_http_fetch_and_cache, listener_id := ListenerID},
5_000
),
assert_http_get(1),
{ok, {ok, _}} =
?wait_async_action(
emqx_ocsp_cache:unregister_listener(ListenerID),
#{?snk_kind := ocsp_cache_listener_unregistered, listener_id := ListenerID},
5_000
),
%% Should be idempotent and not crash
{ok, {ok, _}} =
?wait_async_action(
emqx_ocsp_cache:unregister_listener(ListenerID),
#{?snk_kind := ocsp_cache_listener_unregistered, listener_id := ListenerID},
5_000
),
ok
end,
[]
),
ok.
t_ocsp_responder_error_responses(_Config) ->

View File

@ -2026,18 +2026,7 @@ stop_emqx() ->
%% select a random port picked by OS
-spec select_port() -> inet:port_number().
select_port() ->
{ok, S} = gen_udp:open(0, [{reuseaddr, true}]),
{ok, {_, Port}} = inet:sockname(S),
gen_udp:close(S),
case os:type() of
{unix, darwin} ->
%% in MacOS, still get address_in_use after close port
timer:sleep(500);
_ ->
skip
end,
ct:pal("select port: ~p", [Port]),
Port.
emqx_common_test_helpers:select_free_port(quic).
-spec via_stream({quic, quicer:connection_handle(), quicer:stream_handle()}) ->
quicer:stream_handle().

View File

@ -38,6 +38,7 @@
-define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)).
-define(RATE(Rate), to_rate(Rate)).
-define(NOW, erlang:system_time(millisecond)).
-define(ROOT_COUNTER_IDX, 1).
%%--------------------------------------------------------------------
%% Setups
@ -211,11 +212,11 @@ t_infinity_client(_) ->
end,
with_per_client(Fun, Case).
t_try_restore_agg(_) ->
t_try_restore_with_bucket(_) ->
Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := 1,
burst := 199,
rate := 100,
burst := 100,
initial := 50
},
Cli2 = Cli#{
@ -394,38 +395,6 @@ t_burst(_) ->
Case
).
t_limit_global_with_unlimit_other(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
end,
Bucket = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := infinity,
initial := 0,
burst := 0
},
Cli2 = Cli#{
rate := infinity,
burst := 0,
initial := 0
},
Bucket2#{client := Cli2}
end,
Case = fun() ->
C1 = counters:new(1, []),
start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
timer:sleep(2200),
check_average_rate(C1, 2, 600)
end,
with_global(
GlobalMod,
[{b1, Bucket}],
Case
).
%%--------------------------------------------------------------------
%% Test Cases container
%%--------------------------------------------------------------------
@ -454,38 +423,6 @@ t_check_container(_) ->
end,
with_per_client(Cfg, Case).
%%--------------------------------------------------------------------
%% Test Override
%%--------------------------------------------------------------------
t_bucket_no_client(_) ->
Rate = ?RATE("1/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := Rate}}}
end,
BucketMod = fun(Bucket) ->
maps:remove(client, Bucket)
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := Rate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
t_bucket_client(_) ->
GlobalRate = ?RATE("1/s"),
BucketRate = ?RATE("10/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := GlobalRate}}}
end,
BucketMod = fun(#{client := Client} = Bucket) ->
Bucket#{client := Client#{rate := BucketRate}}
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := BucketRate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
%%--------------------------------------------------------------------
%% Test Cases misc
%%--------------------------------------------------------------------
@ -574,7 +511,7 @@ t_schema_unit(_) ->
?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")),
ok.
compatibility_for_capacity(_) ->
t_compatibility_for_capacity(_) ->
CfgStr = <<
""
"\n"
@ -594,7 +531,7 @@ compatibility_for_capacity(_) ->
parse_and_check(CfgStr)
).
compatibility_for_message_in(_) ->
t_compatibility_for_message_in(_) ->
CfgStr = <<
""
"\n"
@ -614,7 +551,7 @@ compatibility_for_message_in(_) ->
parse_and_check(CfgStr)
).
compatibility_for_bytes_in(_) ->
t_compatibility_for_bytes_in(_) ->
CfgStr = <<
""
"\n"
@ -634,6 +571,174 @@ compatibility_for_bytes_in(_) ->
parse_and_check(CfgStr)
).
t_extract_with_type(_) ->
IsOnly = fun
(_Key, Cfg) when map_size(Cfg) =/= 1 ->
false;
(Key, Cfg) ->
maps:is_key(Key, Cfg)
end,
Checker = fun
(Type, #{client := Client} = Cfg) ->
Cfg2 = maps:remove(client, Cfg),
IsOnly(Type, Client) andalso
(IsOnly(Type, Cfg2) orelse
map_size(Cfg2) =:= 0);
(Type, Cfg) ->
IsOnly(Type, Cfg)
end,
?assertEqual(undefined, emqx_limiter_schema:extract_with_type(messages, undefined)),
?assert(
Checker(
messages,
emqx_limiter_schema:extract_with_type(messages, #{
messages => #{rate => 1}, bytes => #{rate => 1}
})
)
),
?assert(
Checker(
messages,
emqx_limiter_schema:extract_with_type(messages, #{
messages => #{rate => 1},
bytes => #{rate => 1},
client => #{messages => #{rate => 2}}
})
)
),
?assert(
Checker(
messages,
emqx_limiter_schema:extract_with_type(messages, #{
client => #{messages => #{rate => 2}, bytes => #{rate => 1}}
})
)
).
%%--------------------------------------------------------------------
%% Test Cases Create Instance
%%--------------------------------------------------------------------
t_create_instance_with_infinity_node(_) ->
emqx_limiter_manager:insert_bucket(?FUNCTION_NAME, bytes, ?FUNCTION_NAME),
Cases = make_create_test_data_with_infinity_node(?FUNCTION_NAME),
lists:foreach(
fun({Cfg, Expected}) ->
{ok, Result} = emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg),
IsMatched =
case is_atom(Expected) of
true ->
Result =:= Expected;
_ ->
Expected(Result)
end,
?assert(
IsMatched,
lists:flatten(
io_lib:format("Got unexpected:~p~n, Cfg:~p~n", [
Result, Cfg
])
)
)
end,
Cases
),
emqx_limiter_manager:delete_bucket(?FUNCTION_NAME, bytes),
ok.
t_not_exists_instance(_) ->
Cfg = #{bytes => #{rate => 100, burst => 0, initial => 0}},
?assertEqual(
{error, invalid_bucket},
emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg)
),
?assertEqual(
{error, invalid_bucket},
emqx_limiter_server:connect(?FUNCTION_NAME, not_exists, Cfg)
),
ok.
t_create_instance_with_node(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{
message_routing := MR#{rate := ?RATE("200/1s")},
messages := MR#{rate := ?RATE("200/1s")}
}
end,
B1 = fun(Bucket) ->
Bucket#{rate := ?RATE("400/1s")}
end,
B2 = fun(Bucket) ->
Bucket#{rate := infinity}
end,
IsRefLimiter = fun
({ok, #{tokens := _}}, _IsRoot) ->
false;
({ok, #{bucket := #{index := ?ROOT_COUNTER_IDX}}}, true) ->
true;
({ok, #{bucket := #{index := Index}}}, false) when Index =/= ?ROOT_COUNTER_IDX ->
true;
(Result, _IsRoot) ->
ct:pal("The result is:~p~n", [Result]),
false
end,
Case = fun() ->
BucketCfg = make_limiter_cfg(),
?assert(
IsRefLimiter(emqx_limiter_server:connect(b1, message_routing, B1(BucketCfg)), false)
),
?assert(
IsRefLimiter(emqx_limiter_server:connect(b2, message_routing, B2(BucketCfg)), true)
),
?assert(IsRefLimiter(emqx_limiter_server:connect(x, messages, undefined), true)),
?assertNot(IsRefLimiter(emqx_limiter_server:connect(x, bytes, undefined), false))
end,
with_global(
GlobalMod,
[{b1, B1}, {b2, B2}],
Case
),
ok.
%%--------------------------------------------------------------------
%% Test Cases emqx_esockd_htb_limiter
%%--------------------------------------------------------------------
t_create_esockd_htb_limiter(_) ->
Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, undefined),
?assertMatch(
#{module := _, id := ?FUNCTION_NAME, type := bytes, bucket := undefined},
Opts
),
Limiter = emqx_esockd_htb_limiter:create(Opts),
?assertMatch(
#{module := _, name := bytes, limiter := infinity},
Limiter
),
?assertEqual(ok, emqx_esockd_htb_limiter:delete(Limiter)),
ok.
t_esockd_htb_consume(_) ->
ClientCfg = emqx_limiter_schema:default_client_config(),
Cfg = #{client => #{bytes => ClientCfg#{rate := 50, max_retry_time := 0}}},
Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, Cfg),
Limiter = emqx_esockd_htb_limiter:create(Opts),
C1R = emqx_esockd_htb_limiter:consume(51, Limiter),
?assertMatch({pause, _Ms, _Limiter2}, C1R),
timer:sleep(300),
C2R = emqx_esockd_htb_limiter:consume(50, Limiter),
?assertMatch({ok, _}, C2R),
ok.
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
@ -877,3 +982,64 @@ apply_modifier(Pairs, #{default := Template}) ->
parse_and_check(ConfigString) ->
ok = emqx_common_test_helpers:load_config(emqx_schema, ConfigString),
emqx:get_config([listeners, tcp, default, limiter]).
make_create_test_data_with_infinity_node(FakeInstnace) ->
Infinity = emqx_htb_limiter:make_infinity_limiter(),
ClientCfg = emqx_limiter_schema:default_client_config(),
InfinityRef = emqx_limiter_bucket_ref:infinity_bucket(),
MkC = fun(Rate) ->
#{client => #{bytes => ClientCfg#{rate := Rate}}}
end,
MkB = fun(Rate) ->
#{bytes => #{rate => Rate, burst => 0, initial => 0}}
end,
MkA = fun(Client, Bucket) ->
maps:merge(MkC(Client), MkB(Bucket))
end,
IsRefLimiter = fun(Expected) ->
fun
(#{tokens := _}) -> false;
(#{bucket := Bucket}) -> Bucket =:= Expected;
(_) -> false
end
end,
IsTokenLimiter = fun(Expected) ->
fun
(#{tokens := _, bucket := Bucket}) -> Bucket =:= Expected;
(_) -> false
end
end,
[
%% default situation, no limiter setting
{undefined, Infinity},
%% client = undefined bucket = undefined
{#{}, Infinity},
%% client = undefined bucket = infinity
{MkB(infinity), Infinity},
%% client = undefined bucket = other
{MkB(100), IsRefLimiter(FakeInstnace)},
%% client = infinity bucket = undefined
{MkC(infinity), Infinity},
%% client = infinity bucket = infinity
{MkA(infinity, infinity), Infinity},
%% client = infinity bucket = other
{MkA(infinity, 100), IsRefLimiter(FakeInstnace)},
%% client = other bucket = undefined
{MkC(100), IsTokenLimiter(InfinityRef)},
%% client = other bucket = infinity
{MkC(100), IsTokenLimiter(InfinityRef)},
%% client = C bucket = B C < B
{MkA(100, 1000), IsTokenLimiter(FakeInstnace)},
%% client = C bucket = B C > B
{MkA(1000, 100), IsRefLimiter(FakeInstnace)}
].

View File

@ -219,112 +219,124 @@ parse_server_test_() ->
?T(
"single server, binary, no port",
?assertEqual(
[{"localhost", DefaultPort}],
[#{hostname => "localhost", port => DefaultPort}],
Parse(<<"localhost">>)
)
),
?T(
"single server, string, no port",
?assertEqual(
[{"localhost", DefaultPort}],
[#{hostname => "localhost", port => DefaultPort}],
Parse("localhost")
)
),
?T(
"single server, list(string), no port",
?assertEqual(
[{"localhost", DefaultPort}],
[#{hostname => "localhost", port => DefaultPort}],
Parse(["localhost"])
)
),
?T(
"single server, list(binary), no port",
?assertEqual(
[{"localhost", DefaultPort}],
[#{hostname => "localhost", port => DefaultPort}],
Parse([<<"localhost">>])
)
),
?T(
"single server, binary, with port",
?assertEqual(
[{"localhost", 9999}],
[#{hostname => "localhost", port => 9999}],
Parse(<<"localhost:9999">>)
)
),
?T(
"single server, list(string), with port",
?assertEqual(
[{"localhost", 9999}],
[#{hostname => "localhost", port => 9999}],
Parse(["localhost:9999"])
)
),
?T(
"single server, string, with port",
?assertEqual(
[{"localhost", 9999}],
[#{hostname => "localhost", port => 9999}],
Parse("localhost:9999")
)
),
?T(
"single server, list(binary), with port",
?assertEqual(
[{"localhost", 9999}],
[#{hostname => "localhost", port => 9999}],
Parse([<<"localhost:9999">>])
)
),
?T(
"multiple servers, string, no port",
?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}],
[
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse("host1, host2")
)
),
?T(
"multiple servers, binary, no port",
?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}],
[
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse(<<"host1, host2,,,">>)
)
),
?T(
"multiple servers, list(string), no port",
?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}],
[
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse(["host1", "host2"])
)
),
?T(
"multiple servers, list(binary), no port",
?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}],
[
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse([<<"host1">>, <<"host2">>])
)
),
?T(
"multiple servers, string, with port",
?assertEqual(
[{"host1", 1234}, {"host2", 2345}],
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse("host1:1234, host2:2345")
)
),
?T(
"multiple servers, binary, with port",
?assertEqual(
[{"host1", 1234}, {"host2", 2345}],
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse(<<"host1:1234, host2:2345, ">>)
)
),
?T(
"multiple servers, list(string), with port",
?assertEqual(
[{"host1", 1234}, {"host2", 2345}],
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse([" host1:1234 ", "host2:2345"])
)
),
?T(
"multiple servers, list(binary), with port",
?assertEqual(
[{"host1", 1234}, {"host2", 2345}],
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse([<<"host1:1234">>, <<"host2:2345">>])
)
),
@ -350,9 +362,9 @@ parse_server_test_() ->
)
),
?T(
"multiple servers wihtout port, mixed list(binary|string)",
"multiple servers without port, mixed list(binary|string)",
?assertEqual(
["host1", "host2"],
[#{hostname => "host1"}, #{hostname => "host2"}],
Parse2([<<"host1">>, "host2"], #{no_port => true})
)
),
@ -394,14 +406,18 @@ parse_server_test_() ->
?T(
"single server map",
?assertEqual(
[{"host1.domain", 1234}],
[#{hostname => "host1.domain", port => 1234}],
HoconParse("host1.domain:1234")
)
),
?T(
"multiple servers map",
?assertEqual(
[{"host1.domain", 1234}, {"host2.domain", 2345}, {"host3.domain", 3456}],
[
#{hostname => "host1.domain", port => 1234},
#{hostname => "host2.domain", port => 2345},
#{hostname => "host3.domain", port => 3456}
],
HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456")
)
),
@ -447,6 +463,171 @@ parse_server_test_() ->
"bad_schema",
emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true})
)
),
?T(
"scheme, hostname and port",
?assertEqual(
#{scheme => "pulsar+ssl", hostname => "host", port => 6651},
emqx_schema:parse_server(
"pulsar+ssl://host:6651",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6650},
emqx_schema:parse_server(
"pulsar://host",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, no port",
?assertEqual(
#{scheme => "pulsar", hostname => "host"},
emqx_schema:parse_server(
"pulsar://host",
#{
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, missing port",
?assertThrow(
"missing_port_number",
emqx_schema:parse_server(
"pulsar://host",
#{
no_port => false,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, no default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host"},
emqx_schema:parse_server(
"host",
#{
default_scheme => "pulsar",
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6650},
emqx_schema:parse_server(
"host",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"just hostname, expecting missing scheme",
?assertThrow(
"missing_scheme",
emqx_schema:parse_server(
"host",
#{
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, defined port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6651},
emqx_schema:parse_server(
"host:6651",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"inconsistent scheme opts",
?assertError(
"bad_schema",
emqx_schema:parse_server(
"pulsar+ssl://host:6651",
#{
default_port => 6650,
default_scheme => "something",
supported_schemes => ["not", "supported"]
}
)
)
),
?T(
"hostname, default scheme, defined port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6651},
emqx_schema:parse_server(
"host:6651",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"unsupported scheme",
?assertThrow(
"unsupported_scheme",
emqx_schema:parse_server(
"pulsar+quic://host:6651",
#{
default_port => 6650,
supported_schemes => ["pulsar"]
}
)
)
),
?T(
"multiple hostnames with schemes (1)",
?assertEqual(
[
#{scheme => "pulsar", hostname => "host", port => 6649},
#{scheme => "pulsar+ssl", hostname => "other.host", port => 6651},
#{scheme => "pulsar", hostname => "yet.another", port => 6650}
],
emqx_schema:parse_servers(
"pulsar://host:6649, pulsar+ssl://other.host:6651,pulsar://yet.another",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
)
].
@ -513,3 +694,81 @@ url_type_test_() ->
typerefl:from_string(emqx_schema:url(), <<"">>)
)
].
env_test_() ->
Do = fun emqx_schema:naive_env_interpolation/1,
[
{"undefined", fun() -> ?assertEqual(undefined, Do(undefined)) end},
{"full env abs path",
with_env_fn(
"MY_FILE",
"/path/to/my/file",
fun() -> ?assertEqual("/path/to/my/file", Do("$MY_FILE")) end
)},
{"full env relative path",
with_env_fn(
"MY_FILE",
"path/to/my/file",
fun() -> ?assertEqual("path/to/my/file", Do("${MY_FILE}")) end
)},
%% we can not test windows style file join though
{"windows style",
with_env_fn(
"MY_FILE",
"path\\to\\my\\file",
fun() -> ?assertEqual("path\\to\\my\\file", Do("$MY_FILE")) end
)},
{"dir no {}",
with_env_fn(
"MY_DIR",
"/mydir",
fun() -> ?assertEqual("/mydir/foobar", Do(<<"$MY_DIR/foobar">>)) end
)},
{"dir with {}",
with_env_fn(
"MY_DIR",
"/mydir",
fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}/foobar">>)) end
)},
%% a trailing / should not cause the sub path to become absolute
{"env dir with trailing /",
with_env_fn(
"MY_DIR",
"/mydir//",
fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}/foobar">>)) end
)},
{"string dir with doulbe /",
with_env_fn(
"MY_DIR",
"/mydir/",
fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}//foobar">>)) end
)},
{"env not found",
with_env_fn(
"MY_DIR",
"/mydir/",
fun() -> ?assertEqual("${MY_DIR2}//foobar", Do(<<"${MY_DIR2}//foobar">>)) end
)}
].
with_env_fn(Name, Value, F) ->
fun() ->
with_envs(F, [{Name, Value}])
end.
with_envs(Fun, Envs) ->
with_envs(Fun, [], Envs).
with_envs(Fun, Args, [{_Name, _Value} | _] = Envs) ->
set_envs(Envs),
try
apply(Fun, Args)
after
unset_envs(Envs)
end.
set_envs([{_Name, _Value} | _] = Envs) ->
lists:map(fun({Name, Value}) -> os:putenv(Name, Value) end, Envs).
unset_envs([{_Name, _Value} | _] = Envs) ->
lists:map(fun({Name, _}) -> os:unsetenv(Name) end, Envs).

View File

@ -60,12 +60,12 @@ init(Parent) ->
{ok, #{callbacks => [], owner => Parent}}.
terminate(_Reason, #{callbacks := Callbacks}) ->
lists:foreach(fun(Fun) -> catch Fun() end, Callbacks).
do_terminate(Callbacks).
handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) ->
{reply, ok, State#{callbacks := [Callback | Callbacks]}};
handle_call(terminate, _From, State = #{callbacks := Callbacks}) ->
lists:foreach(fun(Fun) -> catch Fun() end, Callbacks),
do_terminate(Callbacks),
{stop, normal, ok, State};
handle_call(_Req, _From, State) ->
{reply, error, State}.
@ -77,3 +77,23 @@ handle_info({'EXIT', Parent, _Reason}, State = #{owner := Parent}) ->
{stop, normal, State};
handle_info(_Msg, State) ->
{noreply, State}.
%%----------------------------------------------------------------------------------
%% Internal fns
%%----------------------------------------------------------------------------------
do_terminate(Callbacks) ->
lists:foreach(
fun(Fun) ->
try
Fun()
catch
K:E:S ->
ct:pal("error executing callback ~p: ~p", [Fun, {K, E}]),
ct:pal("stacktrace: ~p", [S]),
ok
end
end,
Callbacks
),
ok.

View File

@ -138,13 +138,13 @@ end_per_testcase(t_ws_non_check_origin, Config) ->
del_bucket(),
PrevConfig = ?config(prev_config, Config),
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
emqx_common_test_helpers:stop_apps([]),
stop_apps(),
ok;
end_per_testcase(_, Config) ->
del_bucket(),
PrevConfig = ?config(prev_config, Config),
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
emqx_common_test_helpers:stop_apps([]),
stop_apps(),
Config.
init_per_suite(Config) ->
@ -156,6 +156,10 @@ end_per_suite(_) ->
emqx_common_test_helpers:stop_apps([]),
ok.
%% FIXME: this is a temp fix to tests share configs.
stop_apps() ->
emqx_common_test_helpers:stop_apps([], #{erase_all_configs => false}).
%%--------------------------------------------------------------------
%% Test Cases
%%--------------------------------------------------------------------
@ -443,7 +447,12 @@ t_websocket_info_deliver(_) ->
t_websocket_info_timeout_limiter(_) ->
Ref = make_ref(),
LimiterT = init_limiter(),
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
LimiterT = init_limiter(#{
bytes => bucket_cfg(),
messages => bucket_cfg(),
client => #{bytes => client_cfg(Rate)}
}),
Next = fun emqx_ws_connection:when_msg_in/3,
Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT),
Event = {timeout, Ref, limit_timeout},

View File

@ -67,7 +67,7 @@ init_per_suite(Config) ->
emqx_config:erase(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY),
_ = application:load(emqx_conf),
ok = emqx_mgmt_api_test_util:init_suite(
[emqx_authn]
[emqx_conf, emqx_authn]
),
?AUTHN:delete_chain(?GLOBAL),

View File

@ -42,15 +42,16 @@ init_per_testcase(_Case, Config) ->
<<"backend">> => <<"built_in_database">>,
<<"user_id_type">> => <<"clientid">>
},
emqx:update_config(
{ok, _} = emqx:update_config(
?PATH,
{create_authenticator, ?GLOBAL, AuthnConfig}
),
emqx_conf:update(
[listeners, tcp, listener_authn_enabled], {create, listener_mqtt_tcp_conf(18830, true)}, #{}
{ok, _} = emqx_conf:update(
[listeners, tcp, listener_authn_enabled],
{create, listener_mqtt_tcp_conf(18830, true)},
#{}
),
emqx_conf:update(
{ok, _} = emqx_conf:update(
[listeners, tcp, listener_authn_disabled],
{create, listener_mqtt_tcp_conf(18831, false)},
#{}

View File

@ -37,7 +37,7 @@ init_per_testcase(_, Config) ->
init_per_suite(Config) ->
_ = application:load(emqx_conf),
emqx_common_test_helpers:start_apps([emqx_authn]),
emqx_common_test_helpers:start_apps([emqx_conf, emqx_authn]),
application:ensure_all_started(emqx_resource),
application:ensure_all_started(emqx_connector),
Config.

View File

@ -78,7 +78,8 @@ t_check_schema(_Config) ->
).
t_union_member_selector(_) ->
?assertMatch(#{authentication := undefined}, check(undefined)),
%% default value for authentication
?assertMatch(#{authentication := []}, check(undefined)),
C1 = #{<<"backend">> => <<"built_in_database">>},
?assertThrow(
#{

View File

@ -2,14 +2,4 @@ authorization {
deny_action = ignore
no_match = allow
cache = { enable = true }
sources = [
{
type = file
enable = true
# This file is immutable to EMQX.
# Once new rules are created from dashboard UI or HTTP API,
# the file 'data/authz/acl.conf' is used instead of this one
path = "{{ platform_etc_dir }}/acl.conf"
}
]
}

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authz, [
{description, "An OTP application"},
{vsn, "0.1.18"},
{vsn, "0.1.19"},
{registered, []},
{mod, {emqx_authz_app, []}},
{applications, [

View File

@ -205,7 +205,7 @@ sources(get, _) ->
},
AccIn
) ->
case file:read_file(Path) of
case emqx_authz_file:read_file(Path) of
{ok, Rules} ->
lists:append(AccIn, [
#{
@ -242,7 +242,7 @@ source(get, #{bindings := #{type := Type}}) ->
Type,
fun
(#{<<"type">> := <<"file">>, <<"enable">> := Enable, <<"path">> := Path}) ->
case file:read_file(Path) of
case emqx_authz_file:read_file(Path) of
{ok, Rules} ->
{200, #{
type => file,

View File

@ -32,13 +32,15 @@
create/1,
update/1,
destroy/1,
authorize/4
authorize/4,
read_file/1
]).
description() ->
"AuthZ with static rules".
create(#{path := Path} = Source) ->
create(#{path := Path0} = Source) ->
Path = filename(Path0),
Rules =
case file:consult(Path) of
{ok, Terms} ->
@ -63,3 +65,9 @@ destroy(_Source) -> ok.
authorize(Client, PubSub, Topic, #{annotations := #{rules := Rules}}) ->
emqx_authz_rule:matches(Client, PubSub, Topic, Rules).
read_file(Path) ->
file:read_file(filename(Path)).
filename(PathMaybeTemplate) ->
emqx_schema:naive_env_interpolation(PathMaybeTemplate).

View File

@ -491,7 +491,7 @@ authz_fields() ->
?HOCON(
?ARRAY(?UNION(UnionMemberSelector)),
#{
default => [],
default => [default_authz()],
desc => ?DESC(sources),
%% doc_lift is force a root level reference instead of nesting sub-structs
extra => #{doc_lift => true},
@ -501,3 +501,10 @@ authz_fields() ->
}
)}
].
default_authz() ->
#{
<<"type">> => <<"file">>,
<<"enable">> => true,
<<"path">> => <<"${EMQX_ETC_DIR}/acl.conf">>
}.

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_bridge, [
{description, "EMQX bridges"},
{vsn, "0.1.17"},
{vsn, "0.1.18"},
{registered, [emqx_bridge_sup]},
{mod, {emqx_bridge_app, []}},
{applications, [

View File

@ -70,7 +70,9 @@
T == dynamo;
T == rocketmq;
T == cassandra;
T == sqlserver
T == sqlserver;
T == pulsar_producer;
T == oracle
).
load() ->

View File

@ -340,6 +340,8 @@ parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) ->
%% to hocon; keeping this as just `kafka' for backwards compatibility.
parse_confs(<<"kafka">> = _Type, Name, Conf) ->
Conf#{bridge_name => Name};
parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) ->
Conf#{bridge_name => Name};
parse_confs(_Type, _Name, Conf) ->
Conf.

View File

@ -230,7 +230,12 @@ webhook_bridge_converter(Conf0, _HoconOpts) ->
undefined ->
undefined;
_ ->
do_convert_webhook_config(Conf1)
maps:map(
fun(_Name, Conf) ->
do_convert_webhook_config(Conf)
end,
Conf1
)
end.
do_convert_webhook_config(

View File

@ -141,8 +141,7 @@ setup_fake_telemetry_data() ->
}
}
},
Opts = #{raw_with_default => true},
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf, Opts),
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf),
ok = snabbkaffe:start_trace(),
Predicate = fun(#{?snk_kind := K}) -> K =:= emqx_bridge_loaded end,

View File

@ -11,6 +11,7 @@ The application is used to connect EMQX and Cassandra. User can create a rule
and easily ingest IoT data into Cassandra by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
<!---
# Documentation
@ -19,6 +20,7 @@ and easily ingest IoT data into Cassandra by leveraging
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
--->
# HTTP APIs

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_cassandra, [
{description, "EMQX Enterprise Cassandra Bridge"},
{vsn, "0.1.0"},
{vsn, "0.1.1"},
{registered, []},
{applications, [kernel, stdlib, ecql]},
{env, []},

View File

@ -92,7 +92,7 @@ callback_mode() -> async_if_possible.
on_start(
InstId,
#{
servers := Servers,
servers := Servers0,
keyspace := Keyspace,
username := Username,
pool_size := PoolSize,
@ -104,9 +104,16 @@ on_start(
connector => InstId,
config => emqx_utils:redact(Config)
}),
Servers =
lists:map(
fun(#{hostname := Host, port := Port}) ->
{Host, Port}
end,
emqx_schema:parse_servers(Servers0, ?DEFAULT_SERVER_OPTION)
),
Options = [
{nodes, emqx_schema:parse_servers(Servers, ?DEFAULT_SERVER_OPTION)},
{nodes, Servers},
{username, Username},
{password, emqx_secret:wrap(maps:get(password, Config, ""))},
{keyspace, Keyspace},
@ -274,7 +281,7 @@ proc_cql_params(query, SQL, Params, _State) ->
exec_cql_query(InstId, PoolName, Type, Async, PreparedKey, Data) when
Type == query; Type == prepared_query
->
case ecpool:pick_and_do(PoolName, {?MODULE, Type, [Async, PreparedKey, Data]}, no_handover) of
case exec(PoolName, {?MODULE, Type, [Async, PreparedKey, Data]}) of
{error, Reason} = Result ->
?tp(
error,
@ -288,7 +295,7 @@ exec_cql_query(InstId, PoolName, Type, Async, PreparedKey, Data) when
end.
exec_cql_batch_query(InstId, PoolName, Async, CQLs) ->
case ecpool:pick_and_do(PoolName, {?MODULE, batch_query, [Async, CQLs]}, no_handover) of
case exec(PoolName, {?MODULE, batch_query, [Async, CQLs]}) of
{error, Reason} = Result ->
?tp(
error,
@ -301,6 +308,13 @@ exec_cql_batch_query(InstId, PoolName, Async, CQLs) ->
Result
end.
%% Pick one of the pool members to do the query.
%% Using 'no_handoever' strategy,
%% meaning the buffer worker does the gen_server call or gen_server cast
%% towards the connection process.
exec(PoolName, Query) ->
ecpool:pick_and_do(PoolName, Query, no_handover).
on_get_status(_InstId, #{pool_name := PoolName} = State) ->
case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
true ->
@ -339,17 +353,23 @@ do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepar
query(Conn, sync, CQL, Params) ->
ecql:query(Conn, CQL, Params);
query(Conn, {async, Callback}, CQL, Params) ->
ecql:async_query(Conn, CQL, Params, one, Callback).
ok = ecql:async_query(Conn, CQL, Params, one, Callback),
%% return the connection pid for buffer worker to monitor
{ok, Conn}.
prepared_query(Conn, sync, PreparedKey, Params) ->
ecql:execute(Conn, PreparedKey, Params);
prepared_query(Conn, {async, Callback}, PreparedKey, Params) ->
ecql:async_execute(Conn, PreparedKey, Params, Callback).
ok = ecql:async_execute(Conn, PreparedKey, Params, Callback),
%% return the connection pid for buffer worker to monitor
{ok, Conn}.
batch_query(Conn, sync, Rows) ->
ecql:batch(Conn, Rows);
batch_query(Conn, {async, Callback}, Rows) ->
ecql:async_batch(Conn, Rows, Callback).
ok = ecql:async_batch(Conn, Rows, Callback),
%% return the connection pid for buffer worker to monitor
{ok, Conn}.
%%--------------------------------------------------------------------
%% callbacks for ecpool

View File

@ -404,7 +404,7 @@ t_setup_via_config_and_publish(Config) ->
end,
fun(Trace0) ->
Trace = ?of_kind(cassandra_connector_query_return, Trace0),
?assertMatch([#{result := ok}], Trace),
?assertMatch([#{result := {ok, _Pid}}], Trace),
ok
end
),
@ -443,7 +443,7 @@ t_setup_via_http_api_and_publish(Config) ->
end,
fun(Trace0) ->
Trace = ?of_kind(cassandra_connector_query_return, Trace0),
?assertMatch([#{result := ok}], Trace),
?assertMatch([#{result := {ok, _Pid}}], Trace),
ok
end
),
@ -604,7 +604,7 @@ t_missing_data(Config) ->
fun(Trace0) ->
%% 1. ecql driver will return `ok` first in async query
Trace = ?of_kind(cassandra_connector_query_return, Trace0),
?assertMatch([#{result := ok}], Trace),
?assertMatch([#{result := {ok, _Pid}}], Trace),
%% 2. then it will return an error in callback function
Trace1 = ?of_kind(handle_async_reply, Trace0),
?assertMatch([#{result := {error, {8704, _}}}], Trace1),

View File

@ -38,9 +38,14 @@ groups() ->
[].
cassandra_servers() ->
lists:map(
fun(#{hostname := Host, port := Port}) ->
{Host, Port}
end,
emqx_schema:parse_servers(
iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]),
#{default_port => ?CASSANDRA_DEFAULT_PORT}
)
).
init_per_suite(Config) ->

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_gcp_pubsub, [
{description, "EMQX Enterprise GCP Pub/Sub Bridge"},
{vsn, "0.1.0"},
{vsn, "0.1.1"},
{registered, []},
{applications, [
kernel,

View File

@ -38,7 +38,6 @@
}.
-type state() :: #{
connect_timeout := timer:time(),
instance_id := manager_id(),
jwt_worker_id := jwt_worker(),
max_retries := non_neg_integer(),
payload_template := emqx_plugin_libs_rule:tmpl_token(),
@ -61,9 +60,9 @@ is_buffer_supported() -> false.
callback_mode() -> async_if_possible.
-spec on_start(manager_id(), config()) -> {ok, state()} | {error, term()}.
-spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}.
on_start(
InstanceId,
ResourceId,
#{
connect_timeout := ConnectTimeout,
max_retries := MaxRetries,
@ -75,13 +74,13 @@ on_start(
) ->
?SLOG(info, #{
msg => "starting_gcp_pubsub_bridge",
connector => InstanceId,
connector => ResourceId,
config => Config
}),
%% emulating the emulator behavior
%% https://cloud.google.com/pubsub/docs/emulator
HostPort = os:getenv("PUBSUB_EMULATOR_HOST", "pubsub.googleapis.com:443"),
{Host, Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}),
#{hostname := Host, port := Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}),
PoolType = random,
Transport = tls,
TransportOpts = emqx_tls_lib:to_client_opts(#{enable => true, verify => verify_none}),
@ -100,14 +99,13 @@ on_start(
#{
jwt_worker_id := JWTWorkerId,
project_id := ProjectId
} = ensure_jwt_worker(InstanceId, Config),
} = ensure_jwt_worker(ResourceId, Config),
State = #{
connect_timeout => ConnectTimeout,
instance_id => InstanceId,
jwt_worker_id => JWTWorkerId,
max_retries => MaxRetries,
payload_template => emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate),
pool_name => InstanceId,
pool_name => ResourceId,
project_id => ProjectId,
pubsub_topic => PubSubTopic,
request_timeout => RequestTimeout
@ -115,39 +113,39 @@ on_start(
?tp(
gcp_pubsub_on_start_before_starting_pool,
#{
instance_id => InstanceId,
pool_name => InstanceId,
resource_id => ResourceId,
pool_name => ResourceId,
pool_opts => PoolOpts
}
),
?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => InstanceId}),
case ehttpc_sup:start_pool(InstanceId, PoolOpts) of
?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => ResourceId}),
case ehttpc_sup:start_pool(ResourceId, PoolOpts) of
{ok, _} ->
{ok, State};
{error, {already_started, _}} ->
?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => InstanceId}),
?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => ResourceId}),
{ok, State};
{error, Reason} ->
?tp(gcp_pubsub_ehttpc_pool_start_failure, #{
pool_name => InstanceId,
pool_name => ResourceId,
reason => Reason
}),
{error, Reason}
end.
-spec on_stop(manager_id(), state()) -> ok | {error, term()}.
-spec on_stop(resource_id(), state()) -> ok | {error, term()}.
on_stop(
InstanceId,
_State = #{jwt_worker_id := JWTWorkerId, pool_name := PoolName}
ResourceId,
_State = #{jwt_worker_id := JWTWorkerId}
) ->
?tp(gcp_pubsub_stop, #{instance_id => InstanceId, jwt_worker_id => JWTWorkerId}),
?tp(gcp_pubsub_stop, #{resource_id => ResourceId, jwt_worker_id => JWTWorkerId}),
?SLOG(info, #{
msg => "stopping_gcp_pubsub_bridge",
connector => InstanceId
connector => ResourceId
}),
emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
emqx_connector_jwt:delete_jwt(?JWT_TABLE, InstanceId),
ehttpc_sup:stop_pool(PoolName).
emqx_connector_jwt:delete_jwt(?JWT_TABLE, ResourceId),
ehttpc_sup:stop_pool(ResourceId).
-spec on_query(
resource_id(),
@ -213,9 +211,9 @@ on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) ->
),
do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId).
-spec on_get_status(manager_id(), state()) -> connected | disconnected.
on_get_status(InstanceId, #{connect_timeout := Timeout, pool_name := PoolName} = State) ->
case do_get_status(InstanceId, PoolName, Timeout) of
-spec on_get_status(resource_id(), state()) -> connected | disconnected.
on_get_status(ResourceId, #{connect_timeout := Timeout} = State) ->
case do_get_status(ResourceId, Timeout) of
true ->
connected;
false ->
@ -230,12 +228,12 @@ on_get_status(InstanceId, #{connect_timeout := Timeout, pool_name := PoolName} =
%% Helper fns
%%-------------------------------------------------------------------------------------------------
-spec ensure_jwt_worker(manager_id(), config()) ->
-spec ensure_jwt_worker(resource_id(), config()) ->
#{
jwt_worker_id := jwt_worker(),
project_id := binary()
}.
ensure_jwt_worker(InstanceId, #{
ensure_jwt_worker(ResourceId, #{
service_account_json := ServiceAccountJSON
}) ->
#{
@ -250,7 +248,7 @@ ensure_jwt_worker(InstanceId, #{
Alg = <<"RS256">>,
Config = #{
private_key => PrivateKeyPEM,
resource_id => InstanceId,
resource_id => ResourceId,
expiration => ExpirationMS,
table => ?JWT_TABLE,
iss => ServiceAccountEmail,
@ -260,14 +258,14 @@ ensure_jwt_worker(InstanceId, #{
alg => Alg
},
JWTWorkerId = <<"gcp_pubsub_jwt_worker:", InstanceId/binary>>,
JWTWorkerId = <<"gcp_pubsub_jwt_worker:", ResourceId/binary>>,
Worker =
case emqx_connector_jwt_sup:ensure_worker_present(JWTWorkerId, Config) of
{ok, Worker0} ->
Worker0;
Error ->
?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{
connector => InstanceId,
connector => ResourceId,
reason => Error
}),
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
@ -281,18 +279,18 @@ ensure_jwt_worker(InstanceId, #{
%% produced by the worker.
receive
{Ref, token_created} ->
?tp(gcp_pubsub_bridge_jwt_created, #{resource_id => InstanceId}),
?tp(gcp_pubsub_bridge_jwt_created, #{resource_id => ResourceId}),
demonitor(MRef, [flush]),
ok;
{'DOWN', MRef, process, Worker, Reason} ->
?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{
connector => InstanceId,
connector => ResourceId,
reason => Reason
}),
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
throw(failed_to_start_jwt_worker)
after 10_000 ->
?tp(warning, "gcp_pubsub_bridge_jwt_timeout", #{connector => InstanceId}),
?tp(warning, "gcp_pubsub_bridge_jwt_timeout", #{connector => ResourceId}),
demonitor(MRef, [flush]),
_ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId),
throw(timeout_creating_jwt)
@ -325,8 +323,8 @@ publish_path(
<<"/v1/projects/", ProjectId/binary, "/topics/", PubSubTopic/binary, ":publish">>.
-spec get_jwt_authorization_header(resource_id()) -> [{binary(), binary()}].
get_jwt_authorization_header(InstanceId) ->
case emqx_connector_jwt:lookup_jwt(?JWT_TABLE, InstanceId) of
get_jwt_authorization_header(ResourceId) ->
case emqx_connector_jwt:lookup_jwt(?JWT_TABLE, ResourceId) of
%% Since we synchronize the JWT creation during resource start
%% (see `on_start/2'), this will be always be populated.
{ok, JWT} ->
@ -345,7 +343,6 @@ get_jwt_authorization_header(InstanceId) ->
do_send_requests_sync(State, Requests, ResourceId) ->
#{
pool_name := PoolName,
instance_id := InstanceId,
max_retries := MaxRetries,
request_timeout := RequestTimeout
} = State,
@ -353,12 +350,11 @@ do_send_requests_sync(State, Requests, ResourceId) ->
gcp_pubsub_bridge_do_send_requests,
#{
query_mode => sync,
instance_id => InstanceId,
resource_id => ResourceId,
requests => Requests
}
),
Headers = get_jwt_authorization_header(InstanceId),
Headers = get_jwt_authorization_header(ResourceId),
Payloads =
lists:map(
fun({send_message, Selected}) ->
@ -471,19 +467,17 @@ do_send_requests_sync(State, Requests, ResourceId) ->
do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId) ->
#{
pool_name := PoolName,
instance_id := InstanceId,
request_timeout := RequestTimeout
} = State,
?tp(
gcp_pubsub_bridge_do_send_requests,
#{
query_mode => async,
instance_id => InstanceId,
resource_id => ResourceId,
requests => Requests
}
),
Headers = get_jwt_authorization_header(InstanceId),
Headers = get_jwt_authorization_header(ResourceId),
Payloads =
lists:map(
fun({send_message, Selected}) ->
@ -541,9 +535,9 @@ reply_delegator(_ResourceId, ReplyFunAndArgs, Result) ->
emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result)
end.
-spec do_get_status(manager_id(), binary(), timer:time()) -> boolean().
do_get_status(InstanceId, PoolName, Timeout) ->
Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(PoolName)],
-spec do_get_status(resource_id(), timer:time()) -> boolean().
do_get_status(ResourceId, Timeout) ->
Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(ResourceId)],
DoPerWorker =
fun(Worker) ->
case ehttpc:health_check(Worker, Timeout) of
@ -552,7 +546,7 @@ do_get_status(InstanceId, PoolName, Timeout) ->
{error, Reason} ->
?SLOG(error, #{
msg => "ehttpc_health_check_failed",
instance_id => InstanceId,
connector => ResourceId,
reason => Reason,
worker => Worker
}),

View File

@ -9,6 +9,7 @@ The application is used to connect EMQX and HStreamDB.
User can create a rule and easily ingest IoT data into HStreamDB by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
<!---
# Documentation
@ -18,6 +19,7 @@ User can create a rule and easily ingest IoT data into HStreamDB by leveraging
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
--->
# HTTP APIs

View File

@ -10,10 +10,21 @@ workers from `emqx_resource`. It implements the connection management
and interaction without need for a separate connector app, since it's
not used by authentication and authorization applications.
## Contributing
# Documentation links
For more information on Apache Kafka, please see its [official
site](https://kafka.apache.org/).
# Configurations
Please see [our official
documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-kafka.html)
for more detailed info.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
## License
# License
See [BSL](./BSL.txt).
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_kafka, [
{description, "EMQX Enterprise Kafka Bridge"},
{vsn, "0.1.0"},
{vsn, "0.1.2"},
{registered, [emqx_bridge_kafka_consumer_sup]},
{applications, [
kernel,

View File

@ -114,8 +114,8 @@ callback_mode() ->
is_buffer_supported() ->
true.
-spec on_start(manager_id(), config()) -> {ok, state()}.
on_start(InstanceId, Config) ->
-spec on_start(resource_id(), config()) -> {ok, state()}.
on_start(ResourceId, Config) ->
#{
authentication := Auth,
bootstrap_hosts := BootstrapHosts0,
@ -133,7 +133,7 @@ on_start(InstanceId, Config) ->
BootstrapHosts = emqx_bridge_kafka_impl:hosts(BootstrapHosts0),
KafkaType = kafka_consumer,
%% Note: this is distinct per node.
ClientID = make_client_id(InstanceId, KafkaType, BridgeName),
ClientID = make_client_id(ResourceId, KafkaType, BridgeName),
ClientOpts0 =
case Auth of
none -> [];
@ -144,26 +144,26 @@ on_start(InstanceId, Config) ->
ok ->
?tp(
kafka_consumer_client_started,
#{client_id => ClientID, instance_id => InstanceId}
#{client_id => ClientID, resource_id => ResourceId}
),
?SLOG(info, #{
msg => "kafka_consumer_client_started",
instance_id => InstanceId,
resource_id => ResourceId,
kafka_hosts => BootstrapHosts
});
{error, Reason} ->
?SLOG(error, #{
msg => "failed_to_start_kafka_consumer_client",
instance_id => InstanceId,
resource_id => ResourceId,
kafka_hosts => BootstrapHosts,
reason => emqx_utils:redact(Reason)
}),
throw(?CLIENT_DOWN_MESSAGE)
end,
start_consumer(Config, InstanceId, ClientID).
start_consumer(Config, ResourceId, ClientID).
-spec on_stop(manager_id(), state()) -> ok.
on_stop(_InstanceID, State) ->
-spec on_stop(resource_id(), state()) -> ok.
on_stop(_ResourceID, State) ->
#{
subscriber_id := SubscriberId,
kafka_client_id := ClientID
@ -172,14 +172,19 @@ on_stop(_InstanceID, State) ->
stop_client(ClientID),
ok.
-spec on_get_status(manager_id(), state()) -> connected | disconnected.
on_get_status(_InstanceID, State) ->
-spec on_get_status(resource_id(), state()) -> connected | disconnected.
on_get_status(_ResourceID, State) ->
#{
subscriber_id := SubscriberId,
kafka_client_id := ClientID,
kafka_topics := KafkaTopics
} = State,
do_get_status(State, ClientID, KafkaTopics, SubscriberId).
case do_get_status(ClientID, KafkaTopics, SubscriberId) of
{disconnected, Message} ->
{disconnected, State, Message};
Res ->
Res
end.
%%-------------------------------------------------------------------------------------
%% `brod_group_subscriber' API
@ -266,8 +271,8 @@ ensure_consumer_supervisor_started() ->
ok
end.
-spec start_consumer(config(), manager_id(), brod:client_id()) -> {ok, state()}.
start_consumer(Config, InstanceId, ClientID) ->
-spec start_consumer(config(), resource_id(), brod:client_id()) -> {ok, state()}.
start_consumer(Config, ResourceId, ClientID) ->
#{
bootstrap_hosts := BootstrapHosts0,
bridge_name := BridgeName,
@ -287,7 +292,7 @@ start_consumer(Config, InstanceId, ClientID) ->
InitialState = #{
key_encoding_mode => KeyEncodingMode,
hookpoint => Hookpoint,
resource_id => emqx_bridge_resource:resource_id(kafka_consumer, BridgeName),
resource_id => ResourceId,
topic_mapping => TopicMapping,
value_encoding_mode => ValueEncodingMode
},
@ -332,7 +337,7 @@ start_consumer(Config, InstanceId, ClientID) ->
{ok, _ConsumerPid} ->
?tp(
kafka_consumer_subscriber_started,
#{instance_id => InstanceId, subscriber_id => SubscriberId}
#{resource_id => ResourceId, subscriber_id => SubscriberId}
),
{ok, #{
subscriber_id => SubscriberId,
@ -342,7 +347,7 @@ start_consumer(Config, InstanceId, ClientID) ->
{error, Reason2} ->
?SLOG(error, #{
msg => "failed_to_start_kafka_consumer",
instance_id => InstanceId,
resource_id => ResourceId,
kafka_hosts => emqx_bridge_kafka_impl:hosts(BootstrapHosts0),
reason => emqx_utils:redact(Reason2)
}),
@ -376,41 +381,41 @@ stop_client(ClientID) ->
),
ok.
do_get_status(State, ClientID, [KafkaTopic | RestTopics], SubscriberId) ->
do_get_status(ClientID, [KafkaTopic | RestTopics], SubscriberId) ->
case brod:get_partitions_count(ClientID, KafkaTopic) of
{ok, NPartitions} ->
case do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) of
connected -> do_get_status(State, ClientID, RestTopics, SubscriberId);
case do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) of
connected -> do_get_status(ClientID, RestTopics, SubscriberId);
disconnected -> disconnected
end;
{error, {client_down, Context}} ->
case infer_client_error(Context) of
auth_error ->
Message = "Authentication error. " ++ ?CLIENT_DOWN_MESSAGE,
{disconnected, State, Message};
{disconnected, Message};
{auth_error, Message0} ->
Message = binary_to_list(Message0) ++ "; " ++ ?CLIENT_DOWN_MESSAGE,
{disconnected, State, Message};
{disconnected, Message};
connection_refused ->
Message = "Connection refused. " ++ ?CLIENT_DOWN_MESSAGE,
{disconnected, State, Message};
{disconnected, Message};
_ ->
{disconnected, State, ?CLIENT_DOWN_MESSAGE}
{disconnected, ?CLIENT_DOWN_MESSAGE}
end;
{error, leader_not_available} ->
Message =
"Leader connection not available. Please check the Kafka topic used,"
" the connection parameters and Kafka cluster health",
{disconnected, State, Message};
{disconnected, Message};
_ ->
disconnected
end;
do_get_status(_State, _ClientID, _KafkaTopics = [], _SubscriberId) ->
do_get_status(_ClientID, _KafkaTopics = [], _SubscriberId) ->
connected.
-spec do_get_status1(brod:client_id(), binary(), subscriber_id(), pos_integer()) ->
-spec do_get_topic_status(brod:client_id(), binary(), subscriber_id(), pos_integer()) ->
connected | disconnected.
do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) ->
do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) ->
Results =
lists:map(
fun(N) ->
@ -466,19 +471,19 @@ consumer_group_id(BridgeName0) ->
BridgeName = to_bin(BridgeName0),
<<"emqx-kafka-consumer-", BridgeName/binary>>.
-spec is_dry_run(manager_id()) -> boolean().
is_dry_run(InstanceId) ->
TestIdStart = string:find(InstanceId, ?TEST_ID_PREFIX),
-spec is_dry_run(resource_id()) -> boolean().
is_dry_run(ResourceId) ->
TestIdStart = string:find(ResourceId, ?TEST_ID_PREFIX),
case TestIdStart of
nomatch ->
false;
_ ->
string:equal(TestIdStart, InstanceId)
string:equal(TestIdStart, ResourceId)
end.
-spec make_client_id(manager_id(), kafka_consumer, atom() | binary()) -> atom().
make_client_id(InstanceId, KafkaType, KafkaName) ->
case is_dry_run(InstanceId) of
-spec make_client_id(resource_id(), kafka_consumer, atom() | binary()) -> atom().
make_client_id(ResourceId, KafkaType, KafkaName) ->
case is_dry_run(ResourceId) of
false ->
ClientID0 = emqx_bridge_kafka_impl:make_client_id(KafkaType, KafkaName),
binary_to_atom(ClientID0);

View File

@ -1156,11 +1156,12 @@ t_start_and_consume_ok(Config) ->
),
%% Check that the bridge probe API doesn't leak atoms.
ProbeRes = probe_bridge_api(Config),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes),
ProbeRes0 = probe_bridge_api(Config),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
AtomsBefore = erlang:system_info(atom_count),
%% Probe again; shouldn't have created more atoms.
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes),
ProbeRes1 = probe_bridge_api(Config),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
AtomsAfter = erlang:system_info(atom_count),
?assertEqual(AtomsBefore, AtomsAfter),
@ -1259,11 +1260,12 @@ t_multiple_topic_mappings(Config) ->
{ok, _} = snabbkaffe:receive_events(SRef0),
%% Check that the bridge probe API doesn't leak atoms.
ProbeRes = probe_bridge_api(Config),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes),
ProbeRes0 = probe_bridge_api(Config),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
AtomsBefore = erlang:system_info(atom_count),
%% Probe again; shouldn't have created more atoms.
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes),
ProbeRes1 = probe_bridge_api(Config),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
AtomsAfter = erlang:system_info(atom_count),
?assertEqual(AtomsBefore, AtomsAfter),
@ -1473,7 +1475,10 @@ do_t_receive_after_recovery(Config) ->
ResourceId = resource_id(Config),
?check_trace(
begin
{ok, _} = create_bridge(Config),
{ok, _} = create_bridge(
Config,
#{<<"kafka">> => #{<<"offset_reset_policy">> => <<"earliest">>}}
),
ping_until_healthy(Config, _Period = 1_500, _Timeout0 = 24_000),
{ok, connected} = emqx_resource_manager:health_check(ResourceId),
%% 0) ensure each partition commits its offset so it can

View File

@ -1,12 +1,12 @@
# EMQX MatrixDB Bridge
[MatrixDB](http://matrixdb.univ-lyon1.fr/) is a biological database focused on
molecular interactions between extracellular proteins and polysaccharides.
[YMatrix](https://www.ymatrix.cn/) is a hyper-converged database product developed by YMatrix based on the PostgreSQL / Greenplum classic open source database. In addition to being able to handle time series scenarios with ease, it also supports classic scenarios such as online transaction processing (OLTP) and online analytical processing (OLAP).
The application is used to connect EMQX and MatrixDB.
User can create a rule and easily ingest IoT data into MatrixDB by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
<!---
# Documentation
@ -16,6 +16,7 @@ User can create a rule and easily ingest IoT data into MatrixDB by leveraging
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
--->
# HTTP APIs

View File

@ -1,6 +1,6 @@
# EMQX MySQL Bridge
[MySQL](https://github.com/MySQL/MySQL) is a popular open-source relational database
[MySQL](https://github.com/mysql/mysql-server) is a popular open-source relational database
management system.
The application is used to connect EMQX and MySQL.

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,36 @@
# EMQX OpenTSDB Bridge
[OpenTSDB](http://opentsdb.net) is a distributed, scalable Time Series Database (TSDB) written on top of HBase.
OpenTSDB was written to address a common need: store, index and serve metrics collected from computer systems (network gear, operating systems, applications) at a large scale, and make this data easily accessible and graphable.
OpenTSDB allows you to collect thousands of metrics from tens of thousands of hosts and applications, at a high rate (every few seconds).
OpenTSDB will never delete or downsample data and can easily store hundreds of billions of data points.
The application is used to connect EMQX and OpenTSDB. User can create a rule and easily ingest IoT data into OpenTSDB by leveraging the
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,2 @@
toxiproxy
opents

View File

@ -0,0 +1,8 @@
{erl_opts, [debug_info]}.
{deps, [
{opentsdb, {git, "https://github.com/emqx/opentsdb-client-erl", {tag, "v0.5.1"}}},
{emqx_connector, {path, "../../apps/emqx_connector"}},
{emqx_resource, {path, "../../apps/emqx_resource"}},
{emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.

View File

@ -0,0 +1,15 @@
{application, emqx_bridge_opents, [
{description, "EMQX Enterprise OpenTSDB Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [
kernel,
stdlib,
opentsdb
]},
{env, []},
{modules, []},
{licenses, ["BSL"]},
{links, []}
]}.

View File

@ -0,0 +1,85 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_opents).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-import(hoconsc, [mk/2, enum/1, ref/2]).
-export([
conn_bridge_examples/1
]).
-export([
namespace/0,
roots/0,
fields/1,
desc/1
]).
%% -------------------------------------------------------------------------------------------------
%% api
conn_bridge_examples(Method) ->
[
#{
<<"opents">> => #{
summary => <<"OpenTSDB Bridge">>,
value => values(Method)
}
}
].
values(_Method) ->
#{
enable => true,
type => opents,
name => <<"foo">>,
server => <<"http://127.0.0.1:4242">>,
pool_size => 8,
resource_opts => #{
worker_pool_size => 1,
health_check_interval => ?HEALTHCHECK_INTERVAL_RAW,
auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW,
batch_size => ?DEFAULT_BATCH_SIZE,
batch_time => ?DEFAULT_BATCH_TIME,
query_mode => async,
max_buffer_bytes => ?DEFAULT_BUFFER_BYTES
}
}.
%% -------------------------------------------------------------------------------------------------
%% Hocon Schema Definitions
namespace() -> "bridge_opents".
roots() -> [].
fields("config") ->
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}
] ++ emqx_resource_schema:fields("resource_opts") ++
emqx_bridge_opents_connector:fields(config);
fields("post") ->
[type_field(), name_field() | fields("config")];
fields("put") ->
fields("config");
fields("get") ->
emqx_bridge_schema:status_fields() ++ fields("post").
desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for OpenTSDB using `", string:to_upper(Method), "` method."];
desc(_) ->
undefined.
%% -------------------------------------------------------------------------------------------------
%% internal
type_field() ->
{type, mk(enum([opents]), #{required => true, desc => ?DESC("desc_type")})}.
name_field() ->
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.

View File

@ -0,0 +1,184 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_opents_connector).
-behaviour(emqx_resource).
-include_lib("emqx_resource/include/emqx_resource.hrl").
-include_lib("typerefl/include/types.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-export([roots/0, fields/1]).
%% `emqx_resource' API
-export([
callback_mode/0,
is_buffer_supported/0,
on_start/2,
on_stop/2,
on_query/3,
on_batch_query/3,
on_get_status/2
]).
-export([connect/1]).
-import(hoconsc, [mk/2, enum/1, ref/2]).
%%=====================================================================
%% Hocon schema
roots() ->
[{config, #{type => hoconsc:ref(?MODULE, config)}}].
fields(config) ->
[
{server, mk(binary(), #{required => true, desc => ?DESC("server")})},
{pool_size, fun emqx_connector_schema_lib:pool_size/1},
{summary, mk(boolean(), #{default => true, desc => ?DESC("summary")})},
{details, mk(boolean(), #{default => false, desc => ?DESC("details")})},
{auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1}
].
%%========================================================================================
%% `emqx_resource' API
%%========================================================================================
callback_mode() -> always_sync.
is_buffer_supported() -> false.
on_start(
InstanceId,
#{
server := Server,
pool_size := PoolSize,
summary := Summary,
details := Details,
resource_opts := #{batch_size := BatchSize}
} = Config
) ->
?SLOG(info, #{
msg => "starting_opents_connector",
connector => InstanceId,
config => emqx_utils:redact(Config)
}),
Options = [
{server, to_str(Server)},
{summary, Summary},
{details, Details},
{max_batch_size, BatchSize},
{pool_size, PoolSize}
],
State = #{pool_name => InstanceId, server => Server},
case opentsdb_connectivity(Server) of
ok ->
case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of
ok ->
{ok, State};
Error ->
Error
end;
{error, Reason} = Error ->
?SLOG(error, #{msg => "Initiate resource failed", reason => Reason}),
Error
end.
on_stop(InstanceId, #{pool_name := PoolName} = _State) ->
?SLOG(info, #{
msg => "stopping_opents_connector",
connector => InstanceId
}),
emqx_resource_pool:stop(PoolName).
on_query(InstanceId, Request, State) ->
on_batch_query(InstanceId, [Request], State).
on_batch_query(
InstanceId,
BatchReq,
State
) ->
Datas = [format_opentsdb_msg(Msg) || {_Key, Msg} <- BatchReq],
do_query(InstanceId, Datas, State).
on_get_status(_InstanceId, #{server := Server}) ->
Result =
case opentsdb_connectivity(Server) of
ok ->
connected;
{error, Reason} ->
?SLOG(error, #{msg => "OpenTSDB lost connection", reason => Reason}),
connecting
end,
Result.
%%========================================================================================
%% Helper fns
%%========================================================================================
do_query(InstanceId, Query, #{pool_name := PoolName} = State) ->
?TRACE(
"QUERY",
"opents_connector_received",
#{connector => InstanceId, query => Query, state => State}
),
Result = ecpool:pick_and_do(PoolName, {opentsdb, put, [Query]}, no_handover),
case Result of
{error, Reason} ->
?tp(
opents_connector_query_return,
#{error => Reason}
),
?SLOG(error, #{
msg => "opents_connector_do_query_failed",
connector => InstanceId,
query => Query,
reason => Reason
}),
Result;
_ ->
?tp(
opents_connector_query_return,
#{result => Result}
),
Result
end.
connect(Opts) ->
opentsdb:start_link(Opts).
to_str(List) when is_list(List) ->
List;
to_str(Bin) when is_binary(Bin) ->
erlang:binary_to_list(Bin).
opentsdb_connectivity(Server) ->
SvrUrl =
case Server of
<<"http://", _/binary>> -> Server;
<<"https://", _/binary>> -> Server;
_ -> "http://" ++ Server
end,
emqx_plugin_libs_rule:http_connectivity(SvrUrl).
format_opentsdb_msg(Msg) ->
maps:with(
[
timestamp,
metric,
tags,
value,
<<"timestamp">>,
<<"metric">>,
<<"tags">>,
<<"value">>
],
Msg
).

View File

@ -0,0 +1,363 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_opents_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
% DB defaults
-define(BATCH_SIZE, 10).
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
[
{group, with_batch},
{group, without_batch}
].
groups() ->
TCs = emqx_common_test_helpers:all(?MODULE),
[
{with_batch, TCs},
{without_batch, TCs}
].
init_per_group(with_batch, Config0) ->
Config = [{batch_size, ?BATCH_SIZE} | Config0],
common_init(Config);
init_per_group(without_batch, Config0) ->
Config = [{batch_size, 1} | Config0],
common_init(Config);
init_per_group(_Group, Config) ->
Config.
end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
ok;
end_per_group(_Group, _Config) ->
ok.
init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]),
ok.
init_per_testcase(_Testcase, Config) ->
delete_bridge(Config),
snabbkaffe:start_trace(),
Config.
end_per_testcase(_Testcase, Config) ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
ok = snabbkaffe:stop(),
delete_bridge(Config),
ok.
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
common_init(ConfigT) ->
Host = os:getenv("OPENTS_HOST", "toxiproxy"),
Port = list_to_integer(os:getenv("OPENTS_PORT", "4242")),
Config0 = [
{opents_host, Host},
{opents_port, Port},
{proxy_name, "opents"}
| ConfigT
],
BridgeType = proplists:get_value(bridge_type, Config0, <<"opents">>),
case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of
true ->
% Setup toxiproxy
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
% Ensure EE bridge module is loaded
_ = application:load(emqx_ee_bridge),
_ = emqx_ee_bridge:module_info(),
ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]),
emqx_mgmt_api_test_util:init_suite(),
{Name, OpenTSConf} = opents_config(BridgeType, Config0),
Config =
[
{opents_config, OpenTSConf},
{opents_bridge_type, BridgeType},
{opents_name, Name},
{proxy_host, ProxyHost},
{proxy_port, ProxyPort}
| Config0
],
Config;
false ->
case os:getenv("IS_CI") of
"yes" ->
throw(no_opents);
_ ->
{skip, no_opents}
end
end.
opents_config(BridgeType, Config) ->
Port = integer_to_list(?config(opents_port, Config)),
Server = "http://" ++ ?config(opents_host, Config) ++ ":" ++ Port,
Name = atom_to_binary(?MODULE),
BatchSize = ?config(batch_size, Config),
ConfigString =
io_lib:format(
"bridges.~s.~s {\n"
" enable = true\n"
" server = ~p\n"
" resource_opts = {\n"
" request_timeout = 500ms\n"
" batch_size = ~b\n"
" query_mode = sync\n"
" }\n"
"}",
[
BridgeType,
Name,
Server,
BatchSize
]
),
{Name, parse_and_check(ConfigString, BridgeType, Name)}.
parse_and_check(ConfigString, BridgeType, Name) ->
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
#{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf,
Config.
create_bridge(Config) ->
create_bridge(Config, _Overrides = #{}).
create_bridge(Config, Overrides) ->
BridgeType = ?config(opents_bridge_type, Config),
Name = ?config(opents_name, Config),
Config0 = ?config(opents_config, Config),
Config1 = emqx_utils_maps:deep_merge(Config0, Overrides),
emqx_bridge:create(BridgeType, Name, Config1).
delete_bridge(Config) ->
BridgeType = ?config(opents_bridge_type, Config),
Name = ?config(opents_name, Config),
emqx_bridge:remove(BridgeType, Name).
create_bridge_http(Params) ->
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
{ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])};
Error -> Error
end.
send_message(Config, Payload) ->
Name = ?config(opents_name, Config),
BridgeType = ?config(opents_bridge_type, Config),
BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name),
emqx_bridge:send_message(BridgeID, Payload).
query_resource(Config, Request) ->
query_resource(Config, Request, 1_000).
query_resource(Config, Request, Timeout) ->
Name = ?config(opents_name, Config),
BridgeType = ?config(opents_bridge_type, Config),
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
emqx_resource:query(ResourceID, Request, #{timeout => Timeout}).
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_setup_via_config_and_publish(Config) ->
?assertMatch(
{ok, _},
create_bridge(Config)
),
SentData = make_data(),
?check_trace(
begin
{_, {ok, #{result := Result}}} =
?wait_async_action(
send_message(Config, SentData),
#{?snk_kind := buffer_worker_flush_ack},
2_000
),
?assertMatch(
{ok, 200, #{failed := 0, success := 1}}, Result
),
ok
end,
fun(Trace0) ->
Trace = ?of_kind(opents_connector_query_return, Trace0),
?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace),
ok
end
),
ok.
t_setup_via_http_api_and_publish(Config) ->
BridgeType = ?config(opents_bridge_type, Config),
Name = ?config(opents_name, Config),
OpentsConfig0 = ?config(opents_config, Config),
OpentsConfig = OpentsConfig0#{
<<"name">> => Name,
<<"type">> => BridgeType
},
?assertMatch(
{ok, _},
create_bridge_http(OpentsConfig)
),
SentData = make_data(),
?check_trace(
begin
Request = {send_message, SentData},
Res0 = query_resource(Config, Request, 2_500),
?assertMatch(
{ok, 200, #{failed := 0, success := 1}}, Res0
),
ok
end,
fun(Trace0) ->
Trace = ?of_kind(opents_connector_query_return, Trace0),
?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace),
ok
end
),
ok.
t_get_status(Config) ->
?assertMatch(
{ok, _},
create_bridge(Config)
),
Name = ?config(opents_name, Config),
BridgeType = ?config(opents_bridge_type, Config),
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)),
ok.
t_create_disconnected(Config) ->
BridgeType = proplists:get_value(bridge_type, Config, <<"opents">>),
Config1 = lists:keyreplace(opents_port, 1, Config, {opents_port, 61234}),
{_Name, OpenTSConf} = opents_config(BridgeType, Config1),
Config2 = lists:keyreplace(opents_config, 1, Config1, {opents_config, OpenTSConf}),
?assertMatch({ok, _}, create_bridge(Config2)),
Name = ?config(opents_name, Config),
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceID)),
ok.
t_write_failure(Config) ->
ProxyName = ?config(proxy_name, Config),
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
{ok, _} = create_bridge(Config),
SentData = make_data(),
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
{_, {ok, #{result := Result}}} =
?wait_async_action(
send_message(Config, SentData),
#{?snk_kind := buffer_worker_flush_ack},
2_000
),
?assertMatch({error, _}, Result),
ok
end),
ok.
t_write_timeout(Config) ->
ProxyName = ?config(proxy_name, Config),
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
{ok, _} = create_bridge(
Config,
#{
<<"resource_opts">> => #{
<<"request_timeout">> => 500,
<<"resume_interval">> => 100,
<<"health_check_interval">> => 100
}
}
),
SentData = make_data(),
emqx_common_test_helpers:with_failure(
timeout, ProxyName, ProxyHost, ProxyPort, fun() ->
?assertMatch(
{error, {resource_error, #{reason := timeout}}},
query_resource(Config, {send_message, SentData})
)
end
),
ok.
t_missing_data(Config) ->
?assertMatch(
{ok, _},
create_bridge(Config)
),
{_, {ok, #{result := Result}}} =
?wait_async_action(
send_message(Config, #{}),
#{?snk_kind := buffer_worker_flush_ack},
2_000
),
?assertMatch(
{error, {400, #{failed := 1, success := 0}}},
Result
),
ok.
t_bad_data(Config) ->
?assertMatch(
{ok, _},
create_bridge(Config)
),
Data = maps:without([metric], make_data()),
{_, {ok, #{result := Result}}} =
?wait_async_action(
send_message(Config, Data),
#{?snk_kind := buffer_worker_flush_ack},
2_000
),
?assertMatch(
{error, {400, #{failed := 1, success := 0}}}, Result
),
ok.
make_data() ->
make_data(<<"cpu">>, 12).
make_data(Metric, Value) ->
#{
metric => Metric,
tags => #{
<<"host">> => <<"serverA">>
},
value => Value
}.

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,28 @@
# EMQX Oracle Database Bridge
This application houses the Oracle Database bridge for EMQX Enterprise Edition.
It implements the data bridge APIs for interacting with an Oracle Database Bridge.
# Documentation
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information.
## Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
## License
See [BSL](./BSL.txt).

View File

@ -0,0 +1,2 @@
toxiproxy
oracle

View File

@ -0,0 +1,13 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {emqx_oracle, {path, "../../apps/emqx_oracle"}}
, {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{shell, [
% {config, "config/sys.config"},
{apps, [emqx_bridge_oracle]}
]}.

View File

@ -0,0 +1,14 @@
{application, emqx_bridge_oracle, [
{description, "EMQX Enterprise Oracle Database Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [
kernel,
stdlib,
emqx_oracle
]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -0,0 +1,109 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_oracle).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx_bridge/include/emqx_bridge.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-export([
conn_bridge_examples/1
]).
-export([
namespace/0,
roots/0,
fields/1,
desc/1
]).
-define(DEFAULT_SQL, <<
"insert into t_mqtt_msg(msgid, topic, qos, payload)"
"values (${id}, ${topic}, ${qos}, ${payload})"
>>).
conn_bridge_examples(Method) ->
[
#{
<<"oracle">> => #{
summary => <<"Oracle Database Bridge">>,
value => values(Method)
}
}
].
values(_Method) ->
#{
enable => true,
type => oracle,
name => <<"foo">>,
server => <<"127.0.0.1:1521">>,
pool_size => 8,
database => <<"ORCL">>,
sid => <<"ORCL">>,
username => <<"root">>,
password => <<"******">>,
sql => ?DEFAULT_SQL,
local_topic => <<"local/topic/#">>,
resource_opts => #{
worker_pool_size => 8,
health_check_interval => ?HEALTHCHECK_INTERVAL_RAW,
auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW,
batch_size => ?DEFAULT_BATCH_SIZE,
batch_time => ?DEFAULT_BATCH_TIME,
query_mode => async,
max_buffer_bytes => ?DEFAULT_BUFFER_BYTES
}
}.
%% -------------------------------------------------------------------------------------------------
%% Hocon Schema Definitions
namespace() -> "bridge_oracle".
roots() -> [].
fields("config") ->
[
{enable,
hoconsc:mk(
boolean(),
#{desc => ?DESC("config_enable"), default => true}
)},
{sql,
hoconsc:mk(
binary(),
#{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>}
)},
{local_topic,
hoconsc:mk(
binary(),
#{desc => ?DESC("local_topic"), default => undefined}
)}
] ++ emqx_resource_schema:fields("resource_opts") ++
(emqx_oracle_schema:fields(config) --
emqx_connector_schema_lib:prepare_statement_fields());
fields("post") ->
fields("post", oracle);
fields("put") ->
fields("config");
fields("get") ->
emqx_bridge_schema:status_fields() ++ fields("post").
fields("post", Type) ->
[type_field(Type), name_field() | fields("config")].
desc("config") ->
?DESC("desc_config");
desc(_) ->
undefined.
%% -------------------------------------------------------------------------------------------------
type_field(Type) ->
{type, hoconsc:mk(hoconsc:enum([Type]), #{required => true, desc => ?DESC("desc_type")})}.
name_field() ->
{name, hoconsc:mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.

View File

@ -0,0 +1,514 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_oracle_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-import(emqx_common_test_helpers, [on_exit/1]).
-define(BRIDGE_TYPE_BIN, <<"oracle">>).
-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_oracle, emqx_bridge_oracle]).
-define(DATABASE, "XE").
-define(RULE_TOPIC, "mqtt/rule").
% -define(RULE_TOPIC_BIN, <<?RULE_TOPIC>>).
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
[
{group, plain}
].
groups() ->
AllTCs = emqx_common_test_helpers:all(?MODULE),
[
{plain, AllTCs}
].
only_once_tests() ->
[t_create_via_http].
init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)),
_ = application:stop(emqx_connector),
ok.
init_per_group(plain = Type, Config) ->
OracleHost = os:getenv("ORACLE_PLAIN_HOST", "toxiproxy.emqx.net"),
OraclePort = list_to_integer(os:getenv("ORACLE_PLAIN_PORT", "1521")),
ProxyName = "oracle",
case emqx_common_test_helpers:is_tcp_server_available(OracleHost, OraclePort) of
true ->
Config1 = common_init_per_group(),
[
{proxy_name, ProxyName},
{oracle_host, OracleHost},
{oracle_port, OraclePort},
{connection_type, Type}
| Config1 ++ Config
];
false ->
case os:getenv("IS_CI") of
"yes" ->
throw(no_oracle);
_ ->
{skip, no_oracle}
end
end;
init_per_group(_Group, Config) ->
Config.
end_per_group(Group, Config) when
Group =:= plain
->
common_end_per_group(Config),
ok;
end_per_group(_Group, _Config) ->
ok.
common_init_per_group() ->
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
application:load(emqx_bridge),
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps(?APPS),
{ok, _} = application:ensure_all_started(emqx_connector),
emqx_mgmt_api_test_util:init_suite(),
UniqueNum = integer_to_binary(erlang:unique_integer()),
MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>,
[
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
{mqtt_topic, MQTTTopic}
].
common_end_per_group(Config) ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
delete_all_bridges(),
ok.
init_per_testcase(TestCase, Config) ->
common_init_per_testcase(TestCase, Config).
end_per_testcase(_Testcase, Config) ->
common_end_per_testcase(_Testcase, Config).
common_init_per_testcase(TestCase, Config0) ->
ct:timetrap(timer:seconds(60)),
delete_all_bridges(),
UniqueNum = integer_to_binary(erlang:unique_integer()),
OracleTopic =
<<
(atom_to_binary(TestCase))/binary,
UniqueNum/binary
>>,
ConnectionType = ?config(connection_type, Config0),
Config = [{oracle_topic, OracleTopic} | Config0],
{Name, ConfigString, OracleConfig} = oracle_config(
TestCase, ConnectionType, Config
),
ok = snabbkaffe:start_trace(),
[
{oracle_name, Name},
{oracle_config_string, ConfigString},
{oracle_config, OracleConfig}
| Config
].
common_end_per_testcase(_Testcase, Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ok;
false ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
delete_all_bridges(),
%% in CI, apparently this needs more time since the
%% machines struggle with all the containers running...
emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
delete_all_bridges() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
).
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
sql_insert_template_for_bridge() ->
"INSERT INTO mqtt_test(topic, msgid, payload, retain) VALUES (${topic}, ${id}, ${payload}, ${retain})".
sql_create_table() ->
"CREATE TABLE mqtt_test (topic VARCHAR2(255), msgid VARCHAR2(64), payload NCLOB, retain NUMBER(1))".
sql_drop_table() ->
"DROP TABLE mqtt_test".
reset_table(Config) ->
ResourceId = resource_id(Config),
_ = emqx_resource:simple_sync_query(ResourceId, {sql, sql_drop_table()}),
{ok, [{proc_result, 0, _}]} = emqx_resource:simple_sync_query(
ResourceId, {sql, sql_create_table()}
),
ok.
drop_table(Config) ->
ResourceId = resource_id(Config),
emqx_resource:simple_sync_query(ResourceId, {query, sql_drop_table()}),
ok.
oracle_config(TestCase, _ConnectionType, Config) ->
UniqueNum = integer_to_binary(erlang:unique_integer()),
OracleHost = ?config(oracle_host, Config),
OraclePort = ?config(oracle_port, Config),
Name = <<
(atom_to_binary(TestCase))/binary, UniqueNum/binary
>>,
ServerURL = iolist_to_binary([
OracleHost,
":",
integer_to_binary(OraclePort)
]),
ConfigString =
io_lib:format(
"bridges.oracle.~s {\n"
" enable = true\n"
" database = \"~s\"\n"
" sid = \"~s\"\n"
" server = \"~s\"\n"
" username = \"system\"\n"
" password = \"oracle\"\n"
" pool_size = 1\n"
" sql = \"~s\"\n"
" resource_opts = {\n"
" auto_restart_interval = 5000\n"
" request_timeout = 30000\n"
" query_mode = \"async\"\n"
" enable_batch = true\n"
" batch_size = 3\n"
" batch_time = \"3s\"\n"
" worker_pool_size = 1\n"
" }\n"
"}\n",
[
Name,
?DATABASE,
?DATABASE,
ServerURL,
sql_insert_template_for_bridge()
]
),
{Name, ConfigString, parse_and_check(ConfigString, Name)}.
parse_and_check(ConfigString, Name) ->
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
TypeBin = ?BRIDGE_TYPE_BIN,
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
#{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf,
Config.
resource_id(Config) ->
Type = ?BRIDGE_TYPE_BIN,
Name = ?config(oracle_name, Config),
emqx_bridge_resource:resource_id(Type, Name).
create_bridge(Config) ->
create_bridge(Config, _Overrides = #{}).
create_bridge(Config, Overrides) ->
Type = ?BRIDGE_TYPE_BIN,
Name = ?config(oracle_name, Config),
OracleConfig0 = ?config(oracle_config, Config),
OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides),
emqx_bridge:create(Type, Name, OracleConfig).
create_bridge_api(Config) ->
create_bridge_api(Config, _Overrides = #{}).
create_bridge_api(Config, Overrides) ->
TypeBin = ?BRIDGE_TYPE_BIN,
Name = ?config(oracle_name, Config),
OracleConfig0 = ?config(oracle_config, Config),
OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides),
Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("creating bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {Status, Headers, Body0}} ->
{ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}};
Error ->
Error
end,
ct:pal("bridge create result: ~p", [Res]),
Res.
update_bridge_api(Config) ->
update_bridge_api(Config, _Overrides = #{}).
update_bridge_api(Config, Overrides) ->
TypeBin = ?BRIDGE_TYPE_BIN,
Name = ?config(oracle_name, Config),
OracleConfig0 = ?config(oracle_config, Config),
OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides),
BridgeId = emqx_bridge_resource:bridge_id(TypeBin, Name),
Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("updating bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of
{ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])};
Error -> Error
end,
ct:pal("bridge update result: ~p", [Res]),
Res.
probe_bridge_api(Config) ->
probe_bridge_api(Config, _Overrides = #{}).
probe_bridge_api(Config, _Overrides) ->
TypeBin = ?BRIDGE_TYPE_BIN,
Name = ?config(oracle_name, Config),
OracleConfig = ?config(oracle_config, Config),
Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("probing bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0};
Error -> Error
end,
ct:pal("bridge probe result: ~p", [Res]),
Res.
create_rule_and_action_http(Config) ->
OracleName = ?config(oracle_name, Config),
BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, OracleName),
Params = #{
enable => true,
sql => <<"SELECT * FROM \"", ?RULE_TOPIC, "\"">>,
actions => [BridgeId]
},
Path = emqx_mgmt_api_test_util:api_path(["rules"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
ct:pal("rule action params: ~p", [Params]),
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
{ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])};
Error -> Error
end.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_sync_query(Config) ->
ResourceId = resource_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
reset_table(Config),
MsgId = erlang:unique_integer(),
Params = #{
topic => ?config(mqtt_topic, Config),
id => MsgId,
payload => ?config(oracle_name, Config),
retain => true
},
Message = {send_message, Params},
?assertEqual(
{ok, [{affected_rows, 1}]}, emqx_resource:simple_sync_query(ResourceId, Message)
),
ok
end,
[]
),
ok.
t_batch_sync_query(Config) ->
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
ResourceId = resource_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
?retry(
_Sleep = 1_000,
_Attempts = 30,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
reset_table(Config),
MsgId = erlang:unique_integer(),
Params = #{
topic => ?config(mqtt_topic, Config),
id => MsgId,
payload => ?config(oracle_name, Config),
retain => false
},
% Send 3 async messages while resource is down. When it comes back, these messages
% will be delivered in sync way. If we try to send sync messages directly, it will
% be sent async as callback_mode is set to async_if_possible.
Message = {send_message, Params},
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
ct:sleep(1000),
emqx_resource:query(ResourceId, Message),
emqx_resource:query(ResourceId, Message),
emqx_resource:query(ResourceId, Message)
end),
?retry(
_Sleep = 1_000,
_Attempts = 30,
?assertMatch(
{ok, [{result_set, _, _, [[{3}]]}]},
emqx_resource:simple_sync_query(
ResourceId, {query, "SELECT COUNT(*) FROM mqtt_test"}
)
)
),
ok
end,
[]
),
ok.
t_create_via_http(Config) ->
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
%% lightweight matrix testing some configs
?assertMatch(
{ok, _},
update_bridge_api(
Config,
#{
<<"resource_opts">> =>
#{<<"batch_size">> => 4}
}
)
),
?assertMatch(
{ok, _},
update_bridge_api(
Config,
#{
<<"resource_opts">> =>
#{<<"batch_time">> => <<"4s">>}
}
)
),
ok
end,
[]
),
ok.
t_start_stop(Config) ->
OracleName = ?config(oracle_name, Config),
ResourceId = resource_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge(Config)),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
%% Check that the bridge probe API doesn't leak atoms.
ProbeRes0 = probe_bridge_api(
Config,
#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
AtomsBefore = erlang:system_info(atom_count),
%% Probe again; shouldn't have created more atoms.
ProbeRes1 = probe_bridge_api(
Config,
#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
AtomsAfter = erlang:system_info(atom_count),
?assertEqual(AtomsBefore, AtomsAfter),
%% Now stop the bridge.
?assertMatch(
{{ok, _}, {ok, _}},
?wait_async_action(
emqx_bridge:disable_enable(disable, ?BRIDGE_TYPE_BIN, OracleName),
#{?snk_kind := oracle_bridge_stopped},
5_000
)
),
ok
end,
fun(Trace) ->
%% one for each probe, one for real
?assertMatch([_, _, _], ?of_kind(oracle_bridge_stopped, Trace)),
ok
end
),
ok.
t_on_get_status(Config) ->
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
ResourceId = resource_id(Config),
?assertMatch({ok, _}, create_bridge(Config)),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
ct:sleep(500),
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId))
end),
%% Check that it recovers itself.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
ok.

19
apps/emqx_bridge_pulsar/.gitignore vendored Normal file
View File

@ -0,0 +1,19 @@
.rebar3
_*
.eunit
*.o
*.beam
*.plt
*.swp
*.swo
.erlang.cookie
ebin
log
erl_crash.dump
.rebar
logs
_build
.idea
*.iml
rebar3.crashdump
*~

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,30 @@
# Pulsar Data Integration Bridge
This application houses the Pulsar Producer data integration bridge
for EMQX Enterprise Edition. It provides the means to connect to
Pulsar and publish messages to it.
Currently, our Pulsar Producer library has its own `replayq` buffering
implementation, so this bridge does not require buffer workers from
`emqx_resource`. It implements the connection management and
interaction without need for a separate connector app, since it's not
used by authentication and authorization applications.
# Documentation links
For more information on Apache Pulsar, please see its [official
site](https://pulsar.apache.org/).
# Configurations
Please see [our official
documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-pulsar.html)
for more detailed info.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,2 @@
toxiproxy
pulsar

Some files were not shown because too many files have changed in this diff Show More