Merge remote-tracking branch 'upstream/master' into release-50

This commit is contained in:
Zhongwen Deng 2023-05-08 14:58:03 +08:00
commit 4f396a36a9
584 changed files with 28295 additions and 17254 deletions

View File

@ -7,6 +7,7 @@ INFLUXDB_TAG=2.5.0
TDENGINE_TAG=3.0.2.4
DYNAMO_TAG=1.21.0
CASSANDRA_TAG=3.11.6
OPENTS_TAG=9aa7f88
MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server
SQLSERVER_TAG=2019-CU19-ubuntu-20.04

View File

@ -0,0 +1,9 @@
version: '3.9'
services:
opents_server:
container_name: opents
image: petergrace/opentsdb-docker:${OPENTS_TAG}
restart: always
networks:
- emqx_bridge

View File

@ -0,0 +1,11 @@
version: '3.9'
services:
oracle_server:
container_name: oracle
image: oracleinanutshell/oracle-xe-11g:1.0.0
restart: always
environment:
ORACLE_DISABLE_ASYNCH_IO: true
networks:
- emqx_bridge

View File

@ -0,0 +1,32 @@
version: '3'
services:
pulsar:
container_name: pulsar
image: apachepulsar/pulsar:2.11.0
# ports:
# - 6650:6650
# - 8080:8080
networks:
emqx_bridge:
volumes:
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem
restart: always
command:
- bash
- "-c"
- |
sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf
sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf
sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf
sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf
sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf
sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf
sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf
sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf
sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf
sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf
echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf
bin/pulsar standalone -nfw -nss

View File

@ -26,6 +26,7 @@ services:
- 19876:9876
- 19042:9042
- 19142:9142
- 14242:4242
command:
- "-host=0.0.0.0"
- "-config=/config/toxiproxy.json"

View File

@ -20,8 +20,8 @@ esac
{
echo "HOCON_ENV_OVERRIDE_PREFIX=EMQX_"
echo "EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s"
echo "EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10"
echo "EMQX_MQTT__RETRY_INTERVAL=2s"
echo "EMQX_MQTT__MAX_TOPIC_ALIAS=10"
echo "EMQX_AUTHORIZATION__SOURCES=[]"
echo "EMQX_AUTHORIZATION__NO_MATCH=allow"
} >> .ci/docker-compose-file/conf.cluster.env

View File

@ -101,5 +101,29 @@
"listen": "0.0.0.0:1433",
"upstream": "sqlserver:1433",
"enabled": true
},
{
"name": "opents",
"listen": "0.0.0.0:4242",
"upstream": "opents:4242",
"enabled": true
},
{
"name": "pulsar_plain",
"listen": "0.0.0.0:6652",
"upstream": "pulsar:6652",
"enabled": true
},
{
"name": "pulsar_tls",
"listen": "0.0.0.0:6653",
"upstream": "pulsar:6653",
"enabled": true
},
{
"name": "oracle",
"listen": "0.0.0.0:1521",
"upstream": "oracle:1521",
"enabled": true
}
]

View File

@ -0,0 +1,125 @@
name: Scheduled build packages
concurrency:
group: build-${{ github.event_name }}-${{ github.ref }}
cancel-in-progress: true
on:
schedule:
- cron: '0 */6 * * *'
workflow_dispatch:
jobs:
linux:
needs: prepare
runs-on: aws-${{ matrix.arch }}
# always run in builder container because the host might have the wrong OTP version etc.
# otherwise buildx.sh does not run docker if arch and os matches the target arch and os.
container:
image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
strategy:
fail-fast: false
matrix:
profile:
- emqx
- emqx-enterprise
branch:
- master
- release-50
otp:
- 24.3.4.2-3
arch:
- amd64
os:
- debian10
- amzn2
builder:
- 5.0-34
elixir:
- 1.13.4
defaults:
run:
shell: bash
steps:
- uses: emqx/self-hosted-cleanup-action@v1.0.3
- uses: actions/checkout@v3
with:
ref: ${{ matrix.branch }}
fetch-depth: 0
- name: build emqx packages
env:
ELIXIR: ${{ matrix.elixir }}
PROFILE: ${{ matrix.profile }}
ARCH: ${{ matrix.arch }}
run: |
set -eu
PKGTYPES="tgz pkg"
IS_ELIXIR="no"
for PKGTYPE in ${PKGTYPES};
do
./scripts/buildx.sh \
--profile "${PROFILE}" \
--pkgtype "${PKGTYPE}" \
--arch "${ARCH}" \
--elixir "${IS_ELIXIR}" \
--builder "force_host"
done
- uses: actions/upload-artifact@v3
if: success()
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
mac:
needs: prepare
strategy:
fail-fast: false
matrix:
profile:
- emqx
otp:
- 24.3.4.2-3
os:
- macos-12
- macos-12-arm64
runs-on: ${{ matrix.os }}
steps:
- uses: emqx/self-hosted-cleanup-action@v1.0.3
- uses: actions/checkout@v3
with:
ref: ${{ matrix.branch }}
fetch-depth: 0
- uses: ./.github/actions/package-macos
with:
profile: ${{ matrix.profile }}
otp: ${{ matrix.otp }}
os: ${{ matrix.os }}
apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }}
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@v3
if: success()
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}

View File

@ -194,15 +194,12 @@ jobs:
run: |
CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG)
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID)
export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='yes'
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
docker stop $CID
- name: test two nodes cluster with proto_dist=inet_tls in docker
run: |
./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy)
# versions before 5.0.22 have hidden fields included in the API spec
export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='no'
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
# cleanup
./scripts/test/start-two-nodes-in-docker.sh -c

127
.github/workflows/performance_test.yaml vendored Normal file
View File

@ -0,0 +1,127 @@
name: Performance Test Suite
on:
push:
branches:
- 'perf/**'
schedule:
- cron: '0 1 * * *'
workflow_dispatch:
inputs:
ref:
required: false
jobs:
prepare:
runs-on: ubuntu-latest
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu20.04
outputs:
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.event.inputs.ref }}
- name: Work around https://github.com/actions/checkout/issues/766
run: |
git config --global --add safe.directory "$GITHUB_WORKSPACE"
- id: prepare
run: |
echo "EMQX_NAME=emqx" >> $GITHUB_ENV
echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
echo "BENCH_ID=$(date --utc +%F)/emqx-$(./pkg-vsn.sh emqx)" >> $GITHUB_OUTPUT
- name: Build deb package
run: |
make ${EMQX_NAME}-pkg
./scripts/pkg-tests.sh ${EMQX_NAME}-pkg
- name: Get package file name
id: package_file
run: |
echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT
- uses: actions/upload-artifact@v3
with:
name: emqx-ubuntu20.04
path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }}
tf_emqx_perf_test:
runs-on: ubuntu-latest
needs:
- prepare
env:
TF_VAR_bench_id: ${{ needs.prepare.outputs.BENCH_ID }}
TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }}
TF_VAR_test_duration: 300
TF_VAR_grafana_api_key: ${{ secrets.TF_EMQX_PERF_TEST_GRAFANA_API_KEY }}
TF_AWS_REGION: eu-north-1
steps:
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-north-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@v3
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
- uses: actions/download-artifact@v3
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
- name: Setup Terraform
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- name: terraform init
working-directory: ./tf-emqx-performance-test
run: |
terraform init
- name: terraform apply
working-directory: ./tf-emqx-performance-test
run: |
terraform apply -auto-approve
- name: Wait for test results
timeout-minutes: 30
working-directory: ./tf-emqx-performance-test
id: test-results
run: |
sleep $TF_VAR_test_duration
until aws s3api head-object --bucket tf-emqx-performance-test --key "$TF_VAR_bench_id/DONE" > /dev/null 2>&1
do
printf '.'
sleep 10
done
echo
aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/metrics.json" ./
aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/stats.json" ./
echo MESSAGES_DELIVERED=$(cat metrics.json | jq '[.[]."messages.delivered"] | add') >> $GITHUB_OUTPUT
echo MESSAGES_DROPPED=$(cat metrics.json | jq '[.[]."messages.dropped"] | add') >> $GITHUB_OUTPUT
- name: Send notification to Slack
if: success()
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "EMQX performance test completed.\nMessages delivered: ${{ steps.test-results.outputs.MESSAGES_DELIVERED }}.\nMessages dropped: ${{ steps.test-results.outputs.MESSAGES_DROPPED }}.\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
- name: terraform destroy
if: always()
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@v3
if: success()
with:
name: test-results
path: "./tf-emqx-performance-test/*.json"
- uses: actions/upload-artifact@v3
if: always()
with:
name: terraform
path: |
./tf-emqx-performance-test/.terraform
./tf-emqx-performance-test/*.tfstate

View File

@ -167,8 +167,8 @@ jobs:
--set image.pullPolicy=Never \
--set image.tag=$EMQX_TAG \
--set emqxAclConfig="" \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
deploy/charts/${{ matrix.profile }} \
@ -185,8 +185,8 @@ jobs:
--set image.pullPolicy=Never \
--set image.tag=$EMQX_TAG \
--set emqxAclConfig="" \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
deploy/charts/${{ matrix.profile }} \

View File

@ -14,6 +14,9 @@ on:
- e*
pull_request:
env:
IS_CI: "yes"
jobs:
build-matrix:
runs-on: ubuntu-22.04
@ -69,21 +72,14 @@ jobs:
- uses: actions/checkout@v3
with:
path: source
- uses: actions/cache@v3
id: cache
with:
path: "$HOME/.cache/rebar3/rebar3_${{ matrix.otp }}_plt"
key: rebar3-dialyzer-plt-${{ matrix.otp }}
- name: get_all_deps
working-directory: source
env:
PROFILE: ${{ matrix.profile }}
#DIAGNOSTIC: 1
run: |
make ensure-rebar3
# fetch all deps and compile
make ${{ matrix.profile }}
make static_checks
make ${{ matrix.profile }}-compile
make test-compile
cd ..
zip -ryq source.zip source/* source/.[^.]*
@ -92,6 +88,34 @@ jobs:
name: source-${{ matrix.profile }}-${{ matrix.otp }}
path: source.zip
static_checks:
needs:
- build-matrix
- prepare
runs-on: ${{ needs.build-matrix.outputs.runs-on }}
strategy:
fail-fast: false
matrix:
include: ${{ fromJson(needs.build-matrix.outputs.prepare) }}
container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04"
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3
with:
name: source-${{ matrix.profile }}-${{ matrix.otp }}
path: .
- name: unzip source code
run: unzip -o -q source.zip
- uses: actions/cache@v3
with:
path: "source/emqx_dialyzer_${{ matrix.otp }}_plt"
key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }}
- name: run static checks
env:
PROFILE: ${{ matrix.profile }}
working-directory: source
run: make static_checks
eunit_and_proper:
needs:
- build-matrix
@ -168,6 +192,7 @@ jobs:
REDIS_TAG: "7.0"
INFLUXDB_TAG: "2.5.0"
TDENGINE_TAG: "3.0.2.4"
OPENTS_TAG: "9aa7f88"
PROFILE: ${{ matrix.profile }}
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }}

View File

@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Copyright (c) 2016-2023 EMQ Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,7 +1,7 @@
Source code in this repository is variously licensed under below licenses.
For EMQX: Apache License 2.0, see APL.txt,
which applies to all source files except for lib-ee sub-directory.
For Default: Apache License 2.0, see APL.txt,
which applies to all source files except for folders applied with Business Source License.
For EMQX Enterprise (since version 5.0): Business Source License 1.1,
see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory.
see apps/emqx_bridge_kafka/BSL.txt as an example, please check license files under sub directory of apps.

View File

@ -6,8 +6,10 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2
export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.2.1
export EMQX_DASHBOARD_VERSION ?= v1.2.4
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6
export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT)
@ -73,6 +75,10 @@ proper: $(REBAR)
test-compile: $(REBAR) merge-config
$(REBAR) as test compile
.PHONY: $(REL_PROFILES:%=%-compile)
$(REL_PROFILES:%=%-compile): $(REBAR) merge-config
$(REBAR) as $(@:%-compile=%) compile
.PHONY: ct
ct: $(REBAR) merge-config
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
@ -88,13 +94,17 @@ APPS=$(shell $(SCRIPTS)/find-apps.sh)
.PHONY: $(APPS:%=%-ct)
define gen-app-ct-target
$1-ct: $(REBAR)
@$(SCRIPTS)/pre-compile.sh $(PROFILE)
@ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
--readable=$(CT_READABLE) \
--name $(CT_NODE_NAME) \
--cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \
--suite $(shell $(SCRIPTS)/find-suites.sh $1)
$1-ct: $(REBAR) merge-config
$(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1))
ifneq ($(SUITES),)
@ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
--readable=$(CT_READABLE) \
--name $(CT_NODE_NAME) \
--cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \
--suite $(SUITES)
else
@echo 'No suites found for $1'
endif
endef
$(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app))))
@ -134,6 +144,11 @@ COMMON_DEPS := $(REBAR)
$(REL_PROFILES:%=%): $(COMMON_DEPS)
@$(BUILD) $(@) rel
.PHONY: compile $(PROFILES:%=compile-%)
compile: $(PROFILES:%=compile-%)
$(PROFILES:%=compile-%):
@$(BUILD) $(@:compile-%=%) apps
## Not calling rebar3 clean because
## 1. rebar3 clean relies on rebar3, meaning it reads config, fetches dependencies etc.
## 2. it's slow
@ -217,11 +232,11 @@ endef
$(foreach pt,$(PKG_PROFILES),$(eval $(call gen-pkg-target,$(pt))))
.PHONY: run
run: $(PROFILE) quickrun
run: compile-$(PROFILE) quickrun
.PHONY: quickrun
quickrun:
./_build/$(PROFILE)/rel/emqx/bin/emqx console
./dev -p $(PROFILE)
## Take the currently set PROFILE
docker:
@ -239,7 +254,6 @@ $(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt))))
.PHONY:
merge-config:
@$(SCRIPTS)/merge-config.escript
@$(SCRIPTS)/merge-i18n.escript
## elixir target is to create release packages using Elixir's Mix
.PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir)

View File

@ -1,4 +1,4 @@
%% This additional config file is used when the config 'cluster.proto_dis' in emqx.conf is set to 'inet_tls'.
%% This additional config file is used when the config 'cluster.proto_dist' in emqx.conf is set to 'inet_tls'.
%% Which means the EMQX nodes will connect to each other over TLS.
%% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html

View File

@ -32,7 +32,7 @@
%% `apps/emqx/src/bpapi/README.md'
%% Community edition
-define(EMQX_RELEASE_CE, "5.0.22").
-define(EMQX_RELEASE_CE, "5.0.24").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.0.3-rc.1").

View File

@ -57,16 +57,16 @@
-define(ERROR_CODES, [
{?BAD_USERNAME_OR_PWD, <<"Bad username or password">>},
{?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>},
{'BAD_REQUEST', <<"Request parameters are not legal">>},
{'BAD_REQUEST', <<"Request parameters are invalid">>},
{'NOT_MATCH', <<"Conditions are not matched">>},
{'ALREADY_EXISTS', <<"Resource already existed">>},
{'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>},
{'BAD_CONFIG_SCHEMA', <<"Configuration data is invalid">>},
{'BAD_LISTENER_ID', <<"Bad listener ID">>},
{'BAD_NODE_NAME', <<"Bad Node Name">>},
{'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>},
{'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>},
{'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>},
{'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>},
{'INVALID_PARAMETER', <<"Request parameters is invalid and exceeds the boundary value">>},
{'CONFLICT', <<"Conflicting request resources">>},
{'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>},
{'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>},

View File

@ -27,9 +27,9 @@
{gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.1"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.38.2"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.4"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},

View File

@ -3,7 +3,7 @@
{id, "emqx"},
{description, "EMQX Core"},
% strict semver, bump manually!
{vsn, "5.0.23"},
{vsn, "5.0.25"},
{modules, []},
{registered, []},
{applications, [

View File

@ -30,6 +30,12 @@
stop/0
]).
%% Cluster API
-export([
cluster_nodes/1,
running_nodes/0
]).
%% PubSub API
-export([
subscribe/1,
@ -102,6 +108,18 @@ is_running() ->
_ -> true
end.
%%--------------------------------------------------------------------
%% Cluster API
%%--------------------------------------------------------------------
-spec running_nodes() -> [node()].
running_nodes() ->
mria:running_nodes().
-spec cluster_nodes(all | running | cores | stopped) -> [node()].
cluster_nodes(Type) ->
mria:cluster_nodes(Type).
%%--------------------------------------------------------------------
%% PubSub API
%%--------------------------------------------------------------------

View File

@ -42,7 +42,9 @@
get_alarms/0,
get_alarms/1,
format/1,
format/2
format/2,
safe_activate/3,
safe_deactivate/1
]).
%% gen_server callbacks
@ -57,7 +59,6 @@
%% Internal exports (RPC)
-export([
create_activate_alarm/3,
do_get_alarms/0
]).
@ -123,6 +124,9 @@ activate(Name, Details) ->
activate(Name, Details, Message) ->
gen_server:call(?MODULE, {activate_alarm, Name, Details, Message}).
safe_activate(Name, Details, Message) ->
safe_call({activate_alarm, Name, Details, Message}).
-spec ensure_deactivated(binary() | atom()) -> ok.
ensure_deactivated(Name) ->
ensure_deactivated(Name, no_details).
@ -155,6 +159,9 @@ deactivate(Name, Details) ->
deactivate(Name, Details, Message) ->
gen_server:call(?MODULE, {deactivate_alarm, Name, Details, Message}).
safe_deactivate(Name) ->
safe_call({deactivate_alarm, Name, no_details, <<"">>}).
-spec delete_all_deactivated_alarms() -> ok.
delete_all_deactivated_alarms() ->
gen_server:call(?MODULE, delete_all_deactivated_alarms).
@ -218,17 +225,12 @@ init([]) ->
{ok, #{}, get_validity_period()}.
handle_call({activate_alarm, Name, Details, Message}, _From, State) ->
Res = mria:transaction(
mria:local_content_shard(),
fun ?MODULE:create_activate_alarm/3,
[Name, Details, Message]
),
case Res of
{atomic, Alarm} ->
case create_activate_alarm(Name, Details, Message) of
{ok, Alarm} ->
do_actions(activate, Alarm, emqx:get_config([alarm, actions])),
{reply, ok, State, get_validity_period()};
{aborted, Reason} ->
{reply, Reason, State, get_validity_period()}
Err ->
{reply, Err, State, get_validity_period()}
end;
handle_call({deactivate_alarm, Name, Details, Message}, _From, State) ->
case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of
@ -283,9 +285,9 @@ get_validity_period() ->
emqx:get_config([alarm, validity_period]).
create_activate_alarm(Name, Details, Message) ->
case mnesia:read(?ACTIVATED_ALARM, Name) of
case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of
[#activated_alarm{name = Name}] ->
mnesia:abort({error, already_existed});
{error, already_existed};
[] ->
Alarm = #activated_alarm{
name = Name,
@ -293,8 +295,8 @@ create_activate_alarm(Name, Details, Message) ->
message = normalize_message(Name, iolist_to_binary(Message)),
activate_at = erlang:system_time(microsecond)
},
ok = mnesia:write(?ACTIVATED_ALARM, Alarm, write),
Alarm
ok = mria:dirty_write(?ACTIVATED_ALARM, Alarm),
{ok, Alarm}
end.
do_get_alarms() ->
@ -474,3 +476,19 @@ normalize_message(Name, <<"">>) ->
list_to_binary(io_lib:format("~p", [Name]));
normalize_message(_Name, Message) ->
Message.
safe_call(Req) ->
try
gen_server:call(?MODULE, Req)
catch
_:{timeout, _} = Reason ->
?SLOG(warning, #{msg => "emqx_alarm_safe_call_timeout", reason => Reason}),
{error, timeout};
_:Reason:St ->
?SLOG(error, #{
msg => "emqx_alarm_safe_call_exception",
reason => Reason,
stacktrace => St
}),
{error, Reason}
end.

View File

@ -89,7 +89,7 @@
%% Authentication Data Cache
auth_cache :: maybe(map()),
%% Quota checkers
quota :: maybe(emqx_limiter_container:limiter()),
quota :: emqx_limiter_container:limiter(),
%% Timers
timers :: #{atom() => disabled | maybe(reference())},
%% Conn State
@ -760,7 +760,7 @@ do_publish(
handle_out(disconnect, RC, Channel)
end.
ensure_quota(_, Channel = #channel{quota = undefined}) ->
ensure_quota(_, Channel = #channel{quota = infinity}) ->
Channel;
ensure_quota(PubRes, Channel = #channel{quota = Limiter}) ->
Cnt = lists:foldl(

View File

@ -18,11 +18,11 @@
-compile({no_auto_import, [get/0, get/1, put/2, erase/1]}).
-elvis([{elvis_style, god_modules, disable}]).
-include("logger.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-export([
init_load/1,
init_load/2,
init_load/3,
read_override_conf/1,
has_deprecated_file/0,
delete_override_conf_files/0,
@ -102,6 +102,8 @@
-define(ZONE_CONF_PATH(ZONE, PATH), [zones, ZONE | PATH]).
-define(LISTENER_CONF_PATH(TYPE, LISTENER, PATH), [listeners, TYPE, LISTENER | PATH]).
-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound').
-export_type([
update_request/0,
raw_config/0,
@ -163,9 +165,8 @@ get(KeyPath, Default) -> do_get(?CONF, KeyPath, Default).
-spec find(emqx_utils_maps:config_key_path()) ->
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
find([]) ->
Ref = make_ref(),
case do_get(?CONF, [], Ref) of
Ref -> {not_found, []};
case do_get(?CONF, [], ?CONFIG_NOT_FOUND_MAGIC) of
?CONFIG_NOT_FOUND_MAGIC -> {not_found, []};
Res -> {ok, Res}
end;
find(KeyPath) ->
@ -178,9 +179,8 @@ find(KeyPath) ->
-spec find_raw(emqx_utils_maps:config_key_path()) ->
{ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}.
find_raw([]) ->
Ref = make_ref(),
case do_get_raw([], Ref) of
Ref -> {not_found, []};
case do_get_raw([], ?CONFIG_NOT_FOUND_MAGIC) of
?CONFIG_NOT_FOUND_MAGIC -> {not_found, []};
Res -> {ok, Res}
end;
find_raw(KeyPath) ->
@ -314,44 +314,39 @@ put_raw(KeyPath, Config) ->
%%============================================================================
init_load(SchemaMod) ->
ConfFiles = application:get_env(emqx, config_files, []),
init_load(SchemaMod, ConfFiles, #{raw_with_default => true}).
init_load(SchemaMod, Opts) when is_map(Opts) ->
ConfFiles = application:get_env(emqx, config_files, []),
init_load(SchemaMod, ConfFiles, Opts);
init_load(SchemaMod, ConfFiles) ->
init_load(SchemaMod, ConfFiles, #{raw_with_default => false}).
init_load(SchemaMod, ConfFiles).
%% @doc Initial load of the given config files.
%% NOTE: The order of the files is significant, configs from files ordered
%% in the rear of the list overrides prior values.
-spec init_load(module(), [string()] | binary() | hocon:config()) -> ok.
init_load(SchemaMod, Conf, Opts) when is_list(Conf) orelse is_binary(Conf) ->
HasDeprecatedFile = has_deprecated_file(),
RawConf = load_config_files(HasDeprecatedFile, Conf),
init_load(HasDeprecatedFile, SchemaMod, RawConf, Opts).
init_load(true, SchemaMod, RawConf, Opts) when is_map(RawConf) ->
init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) ->
ok = save_schema_mod_and_names(SchemaMod),
%% deprecated conf will be removed in 5.1
%% Merge environment variable overrides on top
HasDeprecatedFile = has_deprecated_file(),
RawConf0 = load_config_files(HasDeprecatedFile, Conf),
warning_deprecated_root_key(RawConf0),
RawConf1 =
case HasDeprecatedFile of
true ->
overlay_v0(SchemaMod, RawConf0);
false ->
overlay_v1(SchemaMod, RawConf0)
end,
RawConf = fill_defaults_for_all_roots(SchemaMod, RawConf1),
%% check configs against the schema
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf, #{}),
save_to_app_env(AppEnvs),
ok = save_to_config_map(CheckedConf, RawConf).
%% Merge environment variable overrides on top, then merge with overrides.
overlay_v0(SchemaMod, RawConf) when is_map(RawConf) ->
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
Overrides = read_override_confs(),
RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides),
RawConfAll = maybe_fill_defaults(SchemaMod, RawConfWithOverrides, Opts),
%% check configs against the schema
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}),
save_to_app_env(AppEnvs),
ok = save_to_config_map(CheckedConf, RawConfAll);
init_load(false, SchemaMod, RawConf, Opts) when is_map(RawConf) ->
ok = save_schema_mod_and_names(SchemaMod),
%% Merge environment variable overrides on top
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
RawConfAll = maybe_fill_defaults(SchemaMod, RawConfWithEnvs, Opts),
%% check configs against the schema
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}),
save_to_app_env(AppEnvs),
ok = save_to_config_map(CheckedConf, RawConfAll).
hocon:deep_merge(RawConfWithEnvs, Overrides).
%% Merge environment variable overrides on top.
overlay_v1(SchemaMod, RawConf) when is_map(RawConf) ->
merge_envs(SchemaMod, RawConf).
%% @doc Read merged cluster + local overrides.
read_override_confs() ->
@ -360,8 +355,7 @@ read_override_confs() ->
hocon:deep_merge(ClusterOverrides, LocalOverrides).
%% keep the raw and non-raw conf has the same keys to make update raw conf easier.
%% TODO: remove raw_with_default as it's now always true.
maybe_fill_defaults(SchemaMod, RawConf0, #{raw_with_default := true}) ->
fill_defaults_for_all_roots(SchemaMod, RawConf0) ->
RootSchemas = hocon_schema:roots(SchemaMod),
%% the roots which are missing from the loaded configs
MissingRoots = lists:filtermap(
@ -380,9 +374,7 @@ maybe_fill_defaults(SchemaMod, RawConf0, #{raw_with_default := true}) ->
RawConf0,
MissingRoots
),
fill_defaults(RawConf);
maybe_fill_defaults(_SchemaMod, RawConf, _Opts) ->
RawConf.
fill_defaults(RawConf).
%% So far, this can only return true when testing.
%% e.g. when testing an app, we need to load its config first
@ -679,11 +671,9 @@ do_get_raw(Path, Default) ->
do_get(?RAW_CONF, Path, Default).
do_get(Type, KeyPath) ->
Ref = make_ref(),
Res = do_get(Type, KeyPath, Ref),
case Res =:= Ref of
true -> error({config_not_found, KeyPath});
false -> Res
case do_get(Type, KeyPath, ?CONFIG_NOT_FOUND_MAGIC) of
?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, KeyPath});
Res -> Res
end.
do_get(Type, [], Default) ->
@ -760,6 +750,22 @@ bin(Bin) when is_binary(Bin) -> Bin;
bin(Str) when is_list(Str) -> list_to_binary(Str);
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
warning_deprecated_root_key(RawConf) ->
case maps:keys(RawConf) -- get_root_names() of
[] ->
ok;
Keys ->
Unknowns = string:join([binary_to_list(K) || K <- Keys], ","),
?tp(unknown_config_keys, #{unknown_config_keys => Unknowns}),
?SLOG(
warning,
#{
msg => "config_key_not_recognized",
unknown_config_keys => Unknowns
}
)
end.
conf_key(?CONF, RootName) ->
atom(RootName);
conf_key(?RAW_CONF, RootName) ->

View File

@ -111,7 +111,7 @@
listener :: {Type :: atom(), Name :: atom()},
%% Limiter
limiter :: maybe(limiter()),
limiter :: limiter(),
%% limiter buffer for overload use
limiter_buffer :: queue:queue(pending_req()),
@ -182,10 +182,8 @@
-define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]).
-define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]).
%% use macro to do compile time limiter's type check
-define(LIMITER_BYTES_IN, bytes_in).
-define(LIMITER_MESSAGE_IN, message_in).
-define(EMPTY_QUEUE, {[], []}).
-define(LIMITER_BYTES_IN, bytes).
-define(LIMITER_MESSAGE_IN, messages).
-dialyzer({no_match, [info/2]}).
-dialyzer(
@ -976,55 +974,61 @@ handle_cast(Req, State) ->
list(any()),
state()
) -> _.
check_limiter(
_Needs,
Data,
WhenOk,
Msgs,
#state{limiter = infinity} = State
) ->
WhenOk(Data, Msgs, State);
check_limiter(
Needs,
Data,
WhenOk,
Msgs,
#state{
limiter = Limiter,
limiter_timer = LimiterTimer,
limiter_buffer = Cache
} = State
) when Limiter =/= undefined ->
case LimiterTimer of
undefined ->
case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
{pause, Time, Limiter2} ->
?SLOG(debug, #{
msg => "pause_time_dueto_rate_limit",
needs => Needs,
time_in_ms => Time
}),
#state{limiter_timer = undefined, limiter = Limiter} = State
) ->
case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
{pause, Time, Limiter2} ->
?SLOG(debug, #{
msg => "pause_time_dueto_rate_limit",
needs => Needs,
time_in_ms => Time
}),
Retry = #retry{
types = [Type || {_, Type} <- Needs],
data = Data,
next = WhenOk
},
Retry = #retry{
types = [Type || {_, Type} <- Needs],
data = Data,
next = WhenOk
},
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
TRef = start_timer(Time, limit_timeout),
TRef = start_timer(Time, limit_timeout),
{ok, State#state{
limiter = Limiter3,
limiter_timer = TRef
}};
{drop, Limiter2} ->
{ok, State#state{limiter = Limiter2}}
end;
_ ->
%% if there has a retry timer,
%% cache the operation and execute it after the retry is over
%% the maximum length of the cache queue is equal to the active_n
New = #pending_req{need = Needs, data = Data, next = WhenOk},
{ok, State#state{limiter_buffer = queue:in(New, Cache)}}
{ok, State#state{
limiter = Limiter3,
limiter_timer = TRef
}};
{drop, Limiter2} ->
{ok, State#state{limiter = Limiter2}}
end;
check_limiter(_, Data, WhenOk, Msgs, State) ->
WhenOk(Data, Msgs, State).
check_limiter(
Needs,
Data,
WhenOk,
_Msgs,
#state{limiter_buffer = Cache} = State
) ->
%% if there has a retry timer,
%% cache the operation and execute it after the retry is over
%% the maximum length of the cache queue is equal to the active_n
New = #pending_req{need = Needs, data = Data, next = WhenOk},
{ok, State#state{limiter_buffer = queue:in(New, Cache)}}.
%% try to perform a retry
-spec retry_limiter(state()) -> _.

View File

@ -22,7 +22,7 @@
%% API
-export([
make_token_bucket_limiter/2,
make_local_limiter/2,
make_ref_limiter/2,
check/2,
consume/2,
@ -32,12 +32,11 @@
make_future/1,
available/1
]).
-export_type([token_bucket_limiter/0]).
-export_type([local_limiter/0]).
%% a token bucket limiter with a limiter server's bucket reference
%% the number of tokens currently available
-type token_bucket_limiter() :: #{
%% a token bucket limiter which may or not contains a reference to another limiter,
%% and can be used in a client alone
-type local_limiter() :: #{
tokens := non_neg_integer(),
rate := decimal(),
capacity := decimal(),
@ -58,12 +57,12 @@
retry_ctx =>
undefined
%% the retry context
| retry_context(token_bucket_limiter()),
| retry_context(local_limiter()),
%% allow to add other keys
atom => any()
}.
%% a limiter server's bucket reference
%% a limiter instance which only contains a reference to another limiter(bucket)
-type ref_limiter() :: #{
max_retry_time := non_neg_integer(),
failure_strategy := failure_strategy(),
@ -88,7 +87,7 @@
}.
-type bucket() :: emqx_limiter_bucket_ref:bucket_ref().
-type limiter() :: token_bucket_limiter() | ref_limiter() | infinity.
-type limiter() :: local_limiter() | ref_limiter() | infinity.
-type millisecond() :: non_neg_integer().
-type pause_type() :: pause | partial.
@ -116,7 +115,7 @@
rate := decimal(),
initial := non_neg_integer(),
low_watermark := non_neg_integer(),
capacity := decimal(),
burst := decimal(),
divisible := boolean(),
max_retry_time := non_neg_integer(),
failure_strategy := failure_strategy()
@ -134,12 +133,13 @@
%% API
%%--------------------------------------------------------------------
%%@doc create a limiter
-spec make_token_bucket_limiter(limiter_bucket_cfg(), bucket()) -> _.
make_token_bucket_limiter(Cfg, Bucket) ->
-spec make_local_limiter(limiter_bucket_cfg(), bucket()) -> _.
make_local_limiter(Cfg, Bucket) ->
Cfg#{
tokens => emqx_limiter_server:get_initial_val(Cfg),
lasttime => ?NOW,
bucket => Bucket
bucket => Bucket,
capacity => emqx_limiter_schema:calc_capacity(Cfg)
}.
%%@doc create a limiter server's reference
@ -311,8 +311,8 @@ on_failure(throw, Limiter) ->
Message = io_lib:format("limiter consume failed, limiter:~p~n", [Limiter]),
erlang:throw({rate_check_fail, Message}).
-spec do_check_with_parent_limiter(pos_integer(), token_bucket_limiter()) ->
inner_check_result(token_bucket_limiter()).
-spec do_check_with_parent_limiter(pos_integer(), local_limiter()) ->
inner_check_result(local_limiter()).
do_check_with_parent_limiter(
Need,
#{
@ -335,7 +335,7 @@ do_check_with_parent_limiter(
)
end.
-spec do_reset(pos_integer(), token_bucket_limiter()) -> inner_check_result(token_bucket_limiter()).
-spec do_reset(pos_integer(), local_limiter()) -> inner_check_result(local_limiter()).
do_reset(
Need,
#{

View File

@ -23,6 +23,7 @@
%% API
-export([
new/3,
infinity_bucket/0,
check/3,
try_restore/2,
available/1
@ -58,6 +59,10 @@ new(Counter, Index, Rate) ->
rate => Rate
}.
-spec infinity_bucket() -> bucket_ref().
infinity_bucket() ->
infinity.
%% @doc check tokens
-spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) ->
HasToken ::

View File

@ -34,16 +34,18 @@
-export_type([container/0, check_result/0]).
-type container() :: #{
limiter_type() => undefined | limiter(),
%% the retry context of the limiter
retry_key() =>
undefined
| retry_context()
| future(),
%% the retry context of the container
retry_ctx := undefined | any()
}.
-type container() ::
infinity
| #{
limiter_type() => undefined | limiter(),
%% the retry context of the limiter
retry_key() =>
undefined
| retry_context()
| future(),
%% the retry context of the container
retry_ctx := undefined | any()
}.
-type future() :: pos_integer().
-type limiter_id() :: emqx_limiter_schema:limiter_id().
@ -78,7 +80,20 @@ get_limiter_by_types(Id, Types, BucketCfgs) ->
{ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs),
add_new(Type, Limiter, Acc)
end,
lists:foldl(Init, #{retry_ctx => undefined}, Types).
Container = lists:foldl(Init, #{retry_ctx => undefined}, Types),
case
lists:all(
fun(Type) ->
maps:get(Type, Container) =:= infinity
end,
Types
)
of
true ->
infinity;
_ ->
Container
end.
-spec add_new(limiter_type(), limiter(), container()) -> container().
add_new(Type, Limiter, Container) ->
@ -89,11 +104,15 @@ add_new(Type, Limiter, Container) ->
%% @doc check the specified limiter
-spec check(pos_integer(), limiter_type(), container()) -> check_result().
check(_Need, _Type, infinity) ->
{ok, infinity};
check(Need, Type, Container) ->
check_list([{Need, Type}], Container).
%% @doc check multiple limiters
-spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result().
check_list(_Need, infinity) ->
{ok, infinity};
check_list([{Need, Type} | T], Container) ->
Limiter = maps:get(Type, Container),
case emqx_htb_limiter:check(Need, Limiter) of
@ -121,11 +140,15 @@ check_list([], Container) ->
%% @doc retry the specified limiter
-spec retry(limiter_type(), container()) -> check_result().
retry(_Type, infinity) ->
{ok, infinity};
retry(Type, Container) ->
retry_list([Type], Container).
%% @doc retry multiple limiters
-spec retry_list(list(limiter_type()), container()) -> check_result().
retry_list(_Types, infinity) ->
{ok, infinity};
retry_list([Type | T], Container) ->
Key = ?RETRY_KEY(Type),
case Container of

View File

@ -30,6 +30,12 @@
post_config_update/5
]).
-export([
find_root/1,
insert_root/2,
delete_root/1
]).
-export([
start_server/1,
start_server/2,
@ -62,6 +68,7 @@
-define(UID(Id, Type), {Id, Type}).
-define(TAB, emqx_limiter_counters).
-define(ROOT_ID, root).
%%--------------------------------------------------------------------
%% API
@ -104,9 +111,25 @@ insert_bucket(Id, Type, Bucket) ->
).
-spec delete_bucket(limiter_id(), limiter_type()) -> true.
delete_bucket(Type, Id) ->
delete_bucket(Id, Type) ->
ets:delete(?TAB, ?UID(Id, Type)).
-spec find_root(limiter_type()) ->
{ok, bucket_ref()} | undefined.
find_root(Type) ->
find_bucket(?ROOT_ID, Type).
-spec insert_root(
limiter_type(),
bucket_ref()
) -> boolean().
insert_root(Type, Bucket) ->
insert_bucket(?ROOT_ID, Type, Bucket).
-spec delete_root(limiter_type()) -> true.
delete_root(Type) ->
delete_bucket(?ROOT_ID, Type).
post_config_update([limiter], _Config, NewConf, _OldConf, _AppEnvs) ->
Types = lists:delete(client, maps:keys(NewConf)),
_ = [on_post_config_update(Type, NewConf) || Type <- Types],

View File

@ -24,6 +24,7 @@
fields/1,
to_rate/1,
to_capacity/1,
to_burst/1,
default_period/0,
to_burst_rate/1,
to_initial/1,
@ -31,20 +32,22 @@
get_bucket_cfg_path/2,
desc/1,
types/0,
infinity_value/0
calc_capacity/1,
extract_with_type/2,
default_client_config/0
]).
-define(KILOBYTE, 1024).
-define(BUCKET_KEYS, [
{bytes_in, bucket_infinity},
{message_in, bucket_infinity},
{connection, bucket_limit},
{message_routing, bucket_infinity}
-define(LISTENER_BUCKET_KEYS, [
bytes,
messages,
connection,
message_routing
]).
-type limiter_type() ::
bytes_in
| message_in
bytes
| messages
| connection
| message_routing
%% internal limiter for unclassified resources
@ -54,8 +57,10 @@
-type bucket_name() :: atom().
-type rate() :: infinity | float().
-type burst_rate() :: 0 | float().
%% this is a compatible type for the deprecated field and type `capacity`.
-type burst() :: burst_rate().
%% the capacity of the token bucket
-type capacity() :: non_neg_integer().
%%-type capacity() :: non_neg_integer().
%% initial capacity of the token bucket
-type initial() :: non_neg_integer().
-type bucket_path() :: list(atom()).
@ -72,13 +77,13 @@
-typerefl_from_string({rate/0, ?MODULE, to_rate}).
-typerefl_from_string({burst_rate/0, ?MODULE, to_burst_rate}).
-typerefl_from_string({capacity/0, ?MODULE, to_capacity}).
-typerefl_from_string({burst/0, ?MODULE, to_burst}).
-typerefl_from_string({initial/0, ?MODULE, to_initial}).
-reflect_type([
rate/0,
burst_rate/0,
capacity/0,
burst/0,
initial/0,
failure_strategy/0,
bucket_name/0
@ -90,27 +95,34 @@
namespace() -> limiter.
roots() -> [limiter].
roots() ->
[
{limiter,
hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{
importance => ?IMPORTANCE_HIDDEN
})}
].
fields(limiter) ->
[
{Type,
?HOCON(?R_REF(node_opts), #{
desc => ?DESC(Type),
default => #{}
importance => ?IMPORTANCE_HIDDEN,
aliases => alias_of_type(Type)
})}
|| Type <- types()
] ++
[
%% This is an undocumented feature, and it won't be support anymore
{client,
?HOCON(
?R_REF(client_fields),
#{
desc => ?DESC(client),
default => maps:from_list([
{erlang:atom_to_binary(Type), #{}}
|| Type <- types()
])
importance => ?IMPORTANCE_HIDDEN,
required => {false, recursively},
deprecated => {since, "5.0.25"}
}
)}
];
@ -124,30 +136,18 @@ fields(node_opts) ->
})}
];
fields(client_fields) ->
[
{Type,
?HOCON(?R_REF(client_opts), #{
desc => ?DESC(Type),
default => #{}
})}
|| Type <- types()
];
fields(bucket_infinity) ->
[
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})},
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"infinity">>})},
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}
];
fields(bucket_limit) ->
[
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"1000/s">>})},
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"1000">>})},
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}
];
client_fields(types());
fields(bucket_opts) ->
fields_of_bucket(<<"infinity">>);
fields(client_opts) ->
[
{rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})},
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})},
{initial,
?HOCON(initial(), #{
default => <<"0">>,
desc => ?DESC(initial),
importance => ?IMPORTANCE_HIDDEN
})},
%% low_watermark add for emqx_channel and emqx_session
%% both modules consume first and then check
%% so we need to use this value to prevent excessive consumption
@ -157,20 +157,24 @@ fields(client_opts) ->
initial(),
#{
desc => ?DESC(low_watermark),
default => <<"0">>
default => <<"0">>,
importance => ?IMPORTANCE_HIDDEN
}
)},
{capacity,
?HOCON(capacity(), #{
desc => ?DESC(client_bucket_capacity),
default => <<"infinity">>
{burst,
?HOCON(burst(), #{
desc => ?DESC(burst),
default => <<"0">>,
importance => ?IMPORTANCE_HIDDEN,
aliases => [capacity]
})},
{divisible,
?HOCON(
boolean(),
#{
desc => ?DESC(divisible),
default => false
default => false,
importance => ?IMPORTANCE_HIDDEN
}
)},
{max_retry_time,
@ -178,7 +182,8 @@ fields(client_opts) ->
emqx_schema:duration(),
#{
desc => ?DESC(max_retry_time),
default => <<"10s">>
default => <<"10s">>,
importance => ?IMPORTANCE_HIDDEN
}
)},
{failure_strategy,
@ -186,25 +191,24 @@ fields(client_opts) ->
failure_strategy(),
#{
desc => ?DESC(failure_strategy),
default => force
default => force,
importance => ?IMPORTANCE_HIDDEN
}
)}
];
fields(listener_fields) ->
bucket_fields(?BUCKET_KEYS, listener_client_fields);
composite_bucket_fields(?LISTENER_BUCKET_KEYS, listener_client_fields);
fields(listener_client_fields) ->
client_fields(?BUCKET_KEYS);
client_fields(?LISTENER_BUCKET_KEYS);
fields(Type) ->
bucket_field(Type).
simple_bucket_field(Type).
desc(limiter) ->
"Settings for the rate limiter.";
desc(node_opts) ->
"Settings for the limiter of the node level.";
desc(bucket_infinity) ->
desc(bucket_opts) ->
"Settings for the bucket.";
desc(bucket_limit) ->
desc(bucket_infinity);
desc(client_opts) ->
"Settings for the client in bucket level.";
desc(client_fields) ->
@ -230,19 +234,37 @@ get_bucket_cfg_path(Type, BucketName) ->
[limiter, Type, bucket, BucketName].
types() ->
[bytes_in, message_in, connection, message_routing, internal].
[bytes, messages, connection, message_routing, internal].
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
calc_capacity(#{rate := infinity}) ->
infinity;
calc_capacity(#{rate := Rate, burst := Burst}) ->
erlang:floor(1000 * Rate / default_period()) + Burst.
%% `infinity` to `infinity_value` rules:
%% 1. all infinity capacity will change to infinity_value
%% 2. if the rate of global and bucket both are `infinity`,
%% use `infinity_value` as bucket rate. see `emqx_limiter_server:get_counter_rate/2`
infinity_value() ->
%% 1 TB
1099511627776.
extract_with_type(_Type, undefined) ->
undefined;
extract_with_type(Type, #{client := ClientCfg} = BucketCfg) ->
BucketVal = maps:find(Type, BucketCfg),
ClientVal = maps:find(Type, ClientCfg),
merge_client_bucket(Type, ClientVal, BucketVal);
extract_with_type(Type, BucketCfg) ->
BucketVal = maps:find(Type, BucketCfg),
merge_client_bucket(Type, undefined, BucketVal).
%% Since the client configuration can be absent and be a undefined value,
%% but we must need some basic settings to control the behaviour of the limiter,
%% so here add this helper function to generate a default setting.
%% This is a temporary workaround until we found a better way to simplify.
default_client_config() ->
#{
rate => infinity,
initial => 0,
low_watermark => 0,
burst => 0,
divisible => false,
max_retry_time => timer:seconds(10),
failure_strategy => force
}.
%%--------------------------------------------------------------------
%% Internal functions
@ -251,6 +273,17 @@ infinity_value() ->
to_burst_rate(Str) ->
to_rate(Str, false, true).
%% The default value of `capacity` is `infinity`,
%% but we have changed `capacity` to `burst` which should not be `infinity`
%% and its default value is 0, so we should convert `infinity` to 0
to_burst(Str) ->
case to_rate(Str, true, true) of
{ok, infinity} ->
{ok, 0};
Any ->
Any
end.
%% rate can be: 10 10MB 10MB/s 10MB/2s infinity
%% e.g. the bytes_in regex tree is:
%%
@ -335,7 +368,7 @@ to_quota(Str, Regex) ->
{match, [Quota, ""]} ->
{ok, erlang:list_to_integer(Quota)};
{match, ""} ->
{ok, infinity_value()};
{ok, infinity};
_ ->
{error, Str}
end
@ -350,26 +383,33 @@ apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
bucket_field(Type) when is_atom(Type) ->
fields(bucket_infinity) ++
%% A bucket with only one type
simple_bucket_field(Type) when is_atom(Type) ->
fields(bucket_opts) ++
[
{client,
?HOCON(
?R_REF(?MODULE, client_opts),
#{
desc => ?DESC(client),
required => false
required => {false, recursively},
importance => importance_of_type(Type),
aliases => alias_of_type(Type)
}
)}
].
bucket_fields(Types, ClientRef) ->
%% A bucket with multi types
composite_bucket_fields(Types, ClientRef) ->
[
{Type,
?HOCON(?R_REF(?MODULE, Opts), #{
?HOCON(?R_REF(?MODULE, bucket_opts), #{
desc => ?DESC(?MODULE, Type),
required => false
required => {false, recursively},
importance => importance_of_type(Type),
aliases => alias_of_type(Type)
})}
|| {Type, Opts} <- Types
|| Type <- Types
] ++
[
{client,
@ -377,17 +417,62 @@ bucket_fields(Types, ClientRef) ->
?R_REF(?MODULE, ClientRef),
#{
desc => ?DESC(client),
required => false
required => {false, recursively}
}
)}
].
fields_of_bucket(Default) ->
[
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => Default})},
{burst,
?HOCON(burst(), #{
desc => ?DESC(burst),
default => <<"0">>,
importance => ?IMPORTANCE_HIDDEN,
aliases => [capacity]
})},
{initial,
?HOCON(initial(), #{
default => <<"0">>,
desc => ?DESC(initial),
importance => ?IMPORTANCE_HIDDEN
})}
].
client_fields(Types) ->
[
{Type,
?HOCON(?R_REF(client_opts), #{
desc => ?DESC(Type),
required => false
required => false,
importance => importance_of_type(Type),
aliases => alias_of_type(Type)
})}
|| {Type, _} <- Types
|| Type <- Types
].
importance_of_type(interval) ->
?IMPORTANCE_HIDDEN;
importance_of_type(message_routing) ->
?IMPORTANCE_HIDDEN;
importance_of_type(connection) ->
?IMPORTANCE_HIDDEN;
importance_of_type(_) ->
?DEFAULT_IMPORTANCE.
alias_of_type(messages) ->
[message_in];
alias_of_type(bytes) ->
[bytes_in];
alias_of_type(_) ->
[].
merge_client_bucket(Type, {ok, ClientVal}, {ok, BucketVal}) ->
#{Type => BucketVal, client => #{Type => ClientVal}};
merge_client_bucket(Type, {ok, ClientVal}, _) ->
#{client => #{Type => ClientVal}};
merge_client_bucket(Type, _, {ok, BucketVal}) ->
#{Type => BucketVal};
merge_client_bucket(_, _, _) ->
undefined.

View File

@ -59,7 +59,8 @@
burst := rate(),
%% token generation interval(second)
period := pos_integer(),
produced := float()
produced := float(),
correction := emqx_limiter_decimal:zero_or_float()
}.
-type bucket() :: #{
@ -98,6 +99,7 @@
%% minimum coefficient for overloaded limiter
-define(OVERLOAD_MIN_ALLOC, 0.3).
-define(COUNTER_SIZE, 8).
-define(ROOT_COUNTER_IDX, 1).
-export_type([index/0]).
-import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]).
@ -110,40 +112,24 @@
-spec connect(
limiter_id(),
limiter_type(),
bucket_name() | #{limiter_type() => bucket_name() | undefined}
hocons:config() | undefined
) ->
{ok, emqx_htb_limiter:limiter()} | {error, _}.
%% If no bucket path is set in config, there will be no limit
connect(_Id, _Type, undefined) ->
{ok, emqx_htb_limiter:make_infinity_limiter()};
%% undefined is the default situation, no limiter setting by default
connect(Id, Type, undefined) ->
create_limiter(Id, Type, undefined, undefined);
connect(Id, Type, #{rate := _} = Cfg) ->
create_limiter(Id, Type, maps:get(client, Cfg, undefined), Cfg);
connect(Id, Type, Cfg) ->
case find_limiter_cfg(Type, Cfg) of
{undefined, _} ->
{ok, emqx_htb_limiter:make_infinity_limiter()};
{
#{
rate := BucketRate,
capacity := BucketSize
},
#{rate := CliRate, capacity := CliSize} = ClientCfg
} ->
case emqx_limiter_manager:find_bucket(Id, Type) of
{ok, Bucket} ->
{ok,
if
CliRate < BucketRate orelse CliSize < BucketSize ->
emqx_htb_limiter:make_token_bucket_limiter(ClientCfg, Bucket);
true ->
emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)
end};
undefined ->
?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}),
{error, invalid_bucket}
end
end.
create_limiter(
Id,
Type,
emqx_utils_maps:deep_get([client, Type], Cfg, undefined),
maps:get(Type, Cfg, undefined)
).
-spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok.
add_bucket(_Id, _Type, undefine) ->
add_bucket(_Id, _Type, undefined) ->
ok;
add_bucket(Id, Type, Cfg) ->
?CALL(Type, {add_bucket, Id, Cfg}).
@ -281,7 +267,8 @@ handle_info(Info, State) ->
Reason :: normal | shutdown | {shutdown, term()} | term(),
State :: term()
) -> any().
terminate(_Reason, _State) ->
terminate(_Reason, #{type := Type}) ->
emqx_limiter_manager:delete_root(Type),
ok.
%%--------------------------------------------------------------------
@ -336,10 +323,14 @@ oscillation(
oscillate(Interval),
Ordereds = get_ordered_buckets(Buckets),
{Alloced, Buckets2} = transverse(Ordereds, Flow, 0.0, Buckets),
maybe_burst(State#{
buckets := Buckets2,
root := Root#{produced := Produced + Alloced}
}).
State2 = maybe_adjust_root_tokens(
State#{
buckets := Buckets2,
root := Root#{produced := Produced + Alloced}
},
Alloced
),
maybe_burst(State2).
%% @doc horizontal spread
-spec transverse(
@ -412,6 +403,24 @@ get_ordered_buckets(Buckets) ->
Buckets
).
-spec maybe_adjust_root_tokens(state(), float()) -> state().
maybe_adjust_root_tokens(#{root := #{rate := infinity}} = State, _Alloced) ->
State;
maybe_adjust_root_tokens(#{root := #{rate := Rate}} = State, Alloced) when Alloced >= Rate ->
State;
maybe_adjust_root_tokens(#{root := #{rate := Rate} = Root, counter := Counter} = State, Alloced) ->
InFlow = Rate - Alloced,
Token = counters:get(Counter, ?ROOT_COUNTER_IDX),
case Token >= Rate of
true ->
State;
_ ->
Available = erlang:min(Rate - Token, InFlow),
{Inc, Root2} = emqx_limiter_correction:add(Available, Root),
counters:add(Counter, ?ROOT_COUNTER_IDX, Inc),
State#{root := Root2}
end.
-spec maybe_burst(state()) -> state().
maybe_burst(
#{
@ -475,12 +484,16 @@ init_tree(Type) when is_atom(Type) ->
Cfg = emqx:get_config([limiter, Type]),
init_tree(Type, Cfg).
init_tree(Type, Cfg) ->
init_tree(Type, #{rate := Rate} = Cfg) ->
Counter = counters:new(?COUNTER_SIZE, [write_concurrency]),
RootBucket = emqx_limiter_bucket_ref:new(Counter, ?ROOT_COUNTER_IDX, Rate),
emqx_limiter_manager:insert_root(Type, RootBucket),
#{
type => Type,
root => make_root(Cfg),
counter => counters:new(?COUNTER_SIZE, [write_concurrency]),
index => 0,
counter => Counter,
%% The first slot is reserved for the root
index => ?ROOT_COUNTER_IDX,
buckets => #{}
}.
@ -490,15 +503,18 @@ make_root(#{rate := Rate, burst := Burst}) ->
rate => Rate,
burst => Burst,
period => emqx_limiter_schema:default_period(),
produced => 0.0
produced => 0.0,
correction => 0
}.
do_add_bucket(Id, #{rate := Rate, capacity := Capacity} = Cfg, #{buckets := Buckets} = State) ->
do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) ->
State;
do_add_bucket(Id, #{rate := Rate} = Cfg, #{buckets := Buckets} = State) ->
case maps:get(Id, Buckets, undefined) of
undefined ->
make_bucket(Id, Cfg, State);
Bucket ->
Bucket2 = Bucket#{rate := Rate, capacity := Capacity},
Bucket2 = Bucket#{rate := Rate, capacity := emqx_limiter_schema:calc_capacity(Cfg)},
State#{buckets := Buckets#{Id := Bucket2}}
end.
@ -509,7 +525,7 @@ make_bucket(Id, Cfg, #{index := ?COUNTER_SIZE} = State) ->
});
make_bucket(
Id,
#{rate := Rate, capacity := Capacity} = Cfg,
#{rate := Rate} = Cfg,
#{type := Type, counter := Counter, index := Index, buckets := Buckets} = State
) ->
NewIndex = Index + 1,
@ -519,7 +535,7 @@ make_bucket(
rate => Rate,
obtained => Initial,
correction => 0,
capacity => Capacity,
capacity => emqx_limiter_schema:calc_capacity(Cfg),
counter => Counter,
index => NewIndex
},
@ -541,19 +557,14 @@ do_del_bucket(Id, #{type := Type, buckets := Buckets} = State) ->
get_initial_val(
#{
initial := Initial,
rate := Rate,
capacity := Capacity
rate := Rate
}
) ->
%% initial will nevner be infinity(see the emqx_limiter_schema)
InfVal = emqx_limiter_schema:infinity_value(),
if
Initial > 0 ->
Initial;
Rate =/= infinity ->
erlang:min(Rate, Capacity);
Capacity =/= infinity andalso Capacity =/= InfVal ->
Capacity;
Rate;
true ->
0
end.
@ -567,21 +578,61 @@ call(Type, Msg) ->
gen_server:call(Pid, Msg)
end.
find_limiter_cfg(Type, #{rate := _} = Cfg) ->
{Cfg, find_client_cfg(Type, maps:get(client, Cfg, undefined))};
find_limiter_cfg(Type, Cfg) ->
{
maps:get(Type, Cfg, undefined),
find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined))
}.
create_limiter(Id, Type, #{rate := Rate} = ClientCfg, BucketCfg) when Rate =/= infinity ->
create_limiter_with_client(Id, Type, ClientCfg, BucketCfg);
create_limiter(Id, Type, _, BucketCfg) ->
create_limiter_without_client(Id, Type, BucketCfg).
find_client_cfg(Type, BucketCfg) ->
NodeCfg = emqx:get_config([limiter, client, Type], undefined),
merge_client_cfg(NodeCfg, BucketCfg).
%% create a limiter with the client-level configuration
create_limiter_with_client(Id, Type, ClientCfg, BucketCfg) ->
case find_referenced_bucket(Id, Type, BucketCfg) of
false ->
{ok, emqx_htb_limiter:make_local_limiter(ClientCfg, infinity)};
{ok, Bucket, RefCfg} ->
create_limiter_with_ref(Bucket, ClientCfg, RefCfg);
Error ->
Error
end.
merge_client_cfg(undefined, BucketCfg) ->
BucketCfg;
merge_client_cfg(NodeCfg, undefined) ->
NodeCfg;
merge_client_cfg(NodeCfg, BucketCfg) ->
maps:merge(NodeCfg, BucketCfg).
%% create a limiter only with the referenced configuration
create_limiter_without_client(Id, Type, BucketCfg) ->
case find_referenced_bucket(Id, Type, BucketCfg) of
false ->
{ok, emqx_htb_limiter:make_infinity_limiter()};
{ok, Bucket, RefCfg} ->
ClientCfg = emqx_limiter_schema:default_client_config(),
create_limiter_with_ref(Bucket, ClientCfg, RefCfg);
Error ->
Error
end.
create_limiter_with_ref(
Bucket,
#{rate := CliRate} = ClientCfg,
#{rate := RefRate}
) when CliRate < RefRate ->
{ok, emqx_htb_limiter:make_local_limiter(ClientCfg, Bucket)};
create_limiter_with_ref(Bucket, ClientCfg, _) ->
{ok, emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)}.
%% this is a listener(server)-level reference
find_referenced_bucket(Id, Type, #{rate := Rate} = Cfg) when Rate =/= infinity ->
case emqx_limiter_manager:find_bucket(Id, Type) of
{ok, Bucket} ->
{ok, Bucket, Cfg};
_ ->
?SLOG(error, #{msg => "bucket not found", type => Type, id => Id}),
{error, invalid_bucket}
end;
%% this is a node-level reference
find_referenced_bucket(Id, Type, _) ->
case emqx:get_config([limiter, Type], undefined) of
#{rate := infinity} ->
false;
undefined ->
?SLOG(error, #{msg => "invalid limiter type", type => Type, id => Id}),
{error, invalid_bucket};
NodeCfg ->
{ok, Bucket} = emqx_limiter_manager:find_root(Type),
{ok, Bucket, NodeCfg}
end.

View File

@ -494,7 +494,7 @@ esockd_opts(ListenerId, Type, Opts0) ->
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
Limiter = limiter(Opts0),
Opts2 =
case maps:get(connection, Limiter, undefined) of
case emqx_limiter_schema:extract_with_type(connection, Limiter) of
undefined ->
Opts1;
BucketCfg ->
@ -639,7 +639,7 @@ zone(Opts) ->
maps:get(zone, Opts, undefined).
limiter(Opts) ->
maps:get(limiter, Opts, #{}).
maps:get(limiter, Opts, undefined).
add_limiter_bucket(Id, #{limiter := Limiter}) ->
maps:fold(

View File

@ -37,7 +37,6 @@
max_qos_allowed => emqx_types:qos(),
retain_available => boolean(),
wildcard_subscription => boolean(),
subscription_identifiers => boolean(),
shared_subscription => boolean(),
exclusive_subscription => boolean()
}.
@ -58,18 +57,17 @@
exclusive_subscription
]).
-define(DEFAULT_CAPS, #{
max_packet_size => ?MAX_PACKET_SIZE,
max_clientid_len => ?MAX_CLIENTID_LEN,
max_topic_alias => ?MAX_TOPIC_AlIAS,
max_topic_levels => ?MAX_TOPIC_LEVELS,
max_qos_allowed => ?QOS_2,
retain_available => true,
wildcard_subscription => true,
subscription_identifiers => true,
shared_subscription => true,
exclusive_subscription => false
}).
-define(DEFAULT_CAPS_KEYS, [
max_packet_size,
max_clientid_len,
max_topic_alias,
max_topic_levels,
max_qos_allowed,
retain_available,
wildcard_subscription,
shared_subscription,
exclusive_subscription
]).
-spec check_pub(
emqx_types:zone(),
@ -88,7 +86,7 @@ check_pub(Zone, Flags) when is_map(Flags) ->
error ->
Flags
end,
maps:with(?PUBCAP_KEYS, get_caps(Zone))
get_caps(?PUBCAP_KEYS, Zone)
).
do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when
@ -111,7 +109,7 @@ do_check_pub(_Flags, _Caps) ->
) ->
ok_or_error(emqx_types:reason_code()).
check_sub(ClientInfo = #{zone := Zone}, Topic, SubOpts) ->
Caps = maps:with(?SUBCAP_KEYS, get_caps(Zone)),
Caps = get_caps(?SUBCAP_KEYS, Zone),
Flags = lists:foldl(
fun
(max_topic_levels, Map) ->
@ -152,10 +150,12 @@ do_check_sub(_Flags, _Caps, _, _) ->
ok.
get_caps(Zone) ->
lists:foldl(
fun({K, V}, Acc) ->
Acc#{K => emqx_config:get_zone_conf(Zone, [mqtt, K], V)}
end,
#{},
maps:to_list(?DEFAULT_CAPS)
get_caps(?DEFAULT_CAPS_KEYS, Zone).
get_caps(Keys, Zone) ->
maps:with(
Keys,
maps:merge(
emqx_config:get([mqtt]),
emqx_config:get_zone_conf(Zone, [mqtt])
)
).

View File

@ -43,7 +43,12 @@
-type ip_port() :: tuple() | integer().
-type cipher() :: map().
-type port_number() :: 1..65536.
-type server_parse_option() :: #{default_port => port_number(), no_port => boolean()}.
-type server_parse_option() :: #{
default_port => port_number(),
no_port => boolean(),
supported_schemes => [string()],
default_scheme => string()
}.
-type url() :: binary().
-type json_binary() :: binary().
@ -62,6 +67,12 @@
-typerefl_from_string({url/0, emqx_schema, to_url}).
-typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}).
-type parsed_server() :: #{
hostname := string(),
port => port_number(),
scheme => string()
}.
-export([
validate_heap_size/1,
user_lookup_fun_tr/2,
@ -172,7 +183,7 @@ roots(high) ->
}
)},
{?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(global)},
%% NOTE: authorization schema here is only to keep emqx app prue
%% NOTE: authorization schema here is only to keep emqx app pure
%% the full schema for EMQX node is injected in emqx_conf_schema.
{?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME,
sc(
@ -924,15 +935,17 @@ fields("mqtt_quic_listener") ->
string(),
#{
%% TODO: deprecated => {since, "5.1.0"}
desc => ?DESC(fields_mqtt_quic_listener_certfile)
desc => ?DESC(fields_mqtt_quic_listener_certfile),
importance => ?IMPORTANCE_HIDDEN
}
)},
{"keyfile",
sc(
string(),
%% TODO: deprecated => {since, "5.1.0"}
#{
desc => ?DESC(fields_mqtt_quic_listener_keyfile)
%% TODO: deprecated => {since, "5.1.0"}
desc => ?DESC(fields_mqtt_quic_listener_keyfile),
importance => ?IMPORTANCE_HIDDEN
}
)},
{"ciphers", ciphers_schema(quic)},
@ -1008,7 +1021,10 @@ fields("mqtt_quic_listener") ->
duration_ms(),
#{
default => 0,
desc => ?DESC(fields_mqtt_quic_listener_idle_timeout)
desc => ?DESC(fields_mqtt_quic_listener_idle_timeout),
%% TODO: deprecated => {since, "5.1.0"}
%% deprecated, use idle_timeout_ms instead
importance => ?IMPORTANCE_HIDDEN
}
)},
{"idle_timeout_ms",
@ -1022,7 +1038,10 @@ fields("mqtt_quic_listener") ->
duration_ms(),
#{
default => <<"10s">>,
desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout)
desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout),
%% TODO: deprecated => {since, "5.1.0"}
%% use handshake_idle_timeout_ms
importance => ?IMPORTANCE_HIDDEN
}
)},
{"handshake_idle_timeout_ms",
@ -1036,7 +1055,10 @@ fields("mqtt_quic_listener") ->
duration_ms(),
#{
default => 0,
desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval)
desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval),
%% TODO: deprecated => {since, "5.1.0"}
%% use keep_alive_interval_ms instead
importance => ?IMPORTANCE_HIDDEN
}
)},
{"keep_alive_interval_ms",
@ -1504,10 +1526,8 @@ fields("broker") ->
sc(
boolean(),
#{
%% TODO: deprecated => {since, "5.1.0"}
%% in favor of session message re-dispatch at termination
%% we will stop supporting dispatch acks for shared
%% subscriptions.
deprecated => {since, "5.1.0"},
importance => ?IMPORTANCE_HIDDEN,
default => false,
desc => ?DESC(broker_shared_dispatch_ack_enabled)
}
@ -2171,7 +2191,7 @@ common_ssl_opts_schema(Defaults) ->
D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end,
Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end,
Collection = maps:get(versions, Defaults, tls_all_available),
AvailableVersions = default_tls_vsns(Collection),
DefaultVersions = default_tls_vsns(Collection),
[
{"cacertfile",
sc(
@ -2233,6 +2253,7 @@ common_ssl_opts_schema(Defaults) ->
example => <<"">>,
format => <<"password">>,
desc => ?DESC(common_ssl_opts_schema_password),
importance => ?IMPORTANCE_LOW,
converter => fun password_converter/2
}
)},
@ -2240,9 +2261,10 @@ common_ssl_opts_schema(Defaults) ->
sc(
hoconsc:array(typerefl:atom()),
#{
default => AvailableVersions,
default => DefaultVersions,
desc => ?DESC(common_ssl_opts_schema_versions),
validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end
importance => ?IMPORTANCE_HIGH,
validator => fun(Input) -> validate_tls_versions(Collection, Input) end
}
)},
{"ciphers", ciphers_schema(D("ciphers"))},
@ -2428,10 +2450,14 @@ client_ssl_opts_schema(Defaults) ->
)}
].
default_tls_vsns(dtls_all_available) ->
emqx_tls_lib:available_versions(dtls);
default_tls_vsns(tls_all_available) ->
emqx_tls_lib:available_versions(tls).
available_tls_vsns(dtls_all_available) -> emqx_tls_lib:available_versions(dtls);
available_tls_vsns(tls_all_available) -> emqx_tls_lib:available_versions(tls).
outdated_tls_vsn(dtls_all_available) -> [dtlsv1];
outdated_tls_vsn(tls_all_available) -> ['tlsv1.1', tlsv1].
default_tls_vsns(Key) ->
available_tls_vsns(Key) -- outdated_tls_vsn(Key).
-spec ciphers_schema(quic | dtls_all_available | tls_all_available | undefined) ->
hocon_schema:field_schema().
@ -2740,7 +2766,8 @@ validate_ciphers(Ciphers) ->
Bad -> {error, {bad_ciphers, Bad}}
end.
validate_tls_versions(AvailableVersions, Versions) ->
validate_tls_versions(Collection, Versions) ->
AvailableVersions = available_tls_vsns(Collection),
case lists:filter(fun(V) -> not lists:member(V, AvailableVersions) end, Versions) of
[] -> ok;
Vs -> {error, {unsupported_tls_versions, Vs}}
@ -2913,7 +2940,7 @@ servers_validator(Opts, Required) ->
%% `no_port': by default it's `false', when set to `true',
%% a `throw' exception is raised if the port is found.
-spec parse_server(undefined | string() | binary(), server_parse_option()) ->
{string(), port_number()}.
undefined | parsed_server().
parse_server(Str, Opts) ->
case parse_servers(Str, Opts) of
undefined ->
@ -2927,7 +2954,7 @@ parse_server(Str, Opts) ->
%% @doc Parse comma separated `host[:port][,host[:port]]' endpoints
%% into a list of `{Host, Port}' tuples or just `Host' string.
-spec parse_servers(undefined | string() | binary(), server_parse_option()) ->
[{string(), port_number()}].
undefined | [parsed_server()].
parse_servers(undefined, _Opts) ->
%% should not parse 'undefined' as string,
%% not to throw exception either,
@ -2973,6 +3000,9 @@ split_host_port(Str) ->
do_parse_server(Str, Opts) ->
DefaultPort = maps:get(default_port, Opts, undefined),
NotExpectingPort = maps:get(no_port, Opts, false),
DefaultScheme = maps:get(default_scheme, Opts, undefined),
SupportedSchemes = maps:get(supported_schemes, Opts, []),
NotExpectingScheme = (not is_list(DefaultScheme)) andalso length(SupportedSchemes) =:= 0,
case is_integer(DefaultPort) andalso NotExpectingPort of
true ->
%% either provide a default port from schema,
@ -2981,22 +3011,129 @@ do_parse_server(Str, Opts) ->
false ->
ok
end,
case is_list(DefaultScheme) andalso (not lists:member(DefaultScheme, SupportedSchemes)) of
true ->
%% inconsistent schema
error("bad_schema");
false ->
ok
end,
%% do not split with space, there should be no space allowed between host and port
case string:tokens(Str, ":") of
[Hostname, Port] ->
NotExpectingPort andalso throw("not_expecting_port_number"),
{check_hostname(Hostname), parse_port(Port)};
[Hostname] ->
case is_integer(DefaultPort) of
true ->
{check_hostname(Hostname), DefaultPort};
false when NotExpectingPort ->
check_hostname(Hostname);
false ->
throw("missing_port_number")
end;
_ ->
throw("bad_host_port")
Tokens = string:tokens(Str, ":"),
Context = #{
not_expecting_port => NotExpectingPort,
not_expecting_scheme => NotExpectingScheme,
default_port => DefaultPort,
default_scheme => DefaultScheme,
opts => Opts
},
check_server_parts(Tokens, Context).
check_server_parts([Scheme, "//" ++ Hostname, Port], Context) ->
#{
not_expecting_scheme := NotExpectingScheme,
not_expecting_port := NotExpectingPort,
opts := Opts
} = Context,
NotExpectingPort andalso throw("not_expecting_port_number"),
NotExpectingScheme andalso throw("not_expecting_scheme"),
#{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname),
port => parse_port(Port)
};
check_server_parts([Scheme, "//" ++ Hostname], Context) ->
#{
not_expecting_scheme := NotExpectingScheme,
not_expecting_port := NotExpectingPort,
default_port := DefaultPort,
opts := Opts
} = Context,
NotExpectingScheme andalso throw("not_expecting_scheme"),
case is_integer(DefaultPort) of
true ->
#{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname),
port => DefaultPort
};
false when NotExpectingPort ->
#{
scheme => check_scheme(Scheme, Opts),
hostname => check_hostname(Hostname)
};
false ->
throw("missing_port_number")
end;
check_server_parts([Hostname, Port], Context) ->
#{
not_expecting_port := NotExpectingPort,
default_scheme := DefaultScheme
} = Context,
NotExpectingPort andalso throw("not_expecting_port_number"),
case is_list(DefaultScheme) of
false ->
#{
hostname => check_hostname(Hostname),
port => parse_port(Port)
};
true ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname),
port => parse_port(Port)
}
end;
check_server_parts([Hostname], Context) ->
#{
not_expecting_scheme := NotExpectingScheme,
not_expecting_port := NotExpectingPort,
default_port := DefaultPort,
default_scheme := DefaultScheme
} = Context,
case is_integer(DefaultPort) orelse NotExpectingPort of
true ->
ok;
false ->
throw("missing_port_number")
end,
case is_list(DefaultScheme) orelse NotExpectingScheme of
true ->
ok;
false ->
throw("missing_scheme")
end,
case {is_integer(DefaultPort), is_list(DefaultScheme)} of
{true, true} ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname),
port => DefaultPort
};
{true, false} ->
#{
hostname => check_hostname(Hostname),
port => DefaultPort
};
{false, true} ->
#{
scheme => DefaultScheme,
hostname => check_hostname(Hostname)
};
{false, false} ->
#{hostname => check_hostname(Hostname)}
end;
check_server_parts(_Tokens, _Context) ->
throw("bad_host_port").
check_scheme(Str, Opts) ->
SupportedSchemes = maps:get(supported_schemes, Opts, []),
IsSupported = lists:member(Str, SupportedSchemes),
case IsSupported of
true ->
Str;
false ->
throw("unsupported_scheme")
end.
check_hostname(Str) ->

View File

@ -165,7 +165,7 @@ strategy(Group) ->
-spec ack_enabled() -> boolean().
ack_enabled() ->
emqx:get_config([broker, shared_dispatch_ack_enabled]).
emqx:get_config([broker, shared_dispatch_ack_enabled], false).
do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() ->
%% Deadlock otherwise
@ -181,7 +181,7 @@ do_dispatch(SubPid, _Group, Topic, Msg, retry) ->
do_dispatch(SubPid, Group, Topic, Msg, fresh) ->
case ack_enabled() of
true ->
%% FIXME: replace with `emqx_shared_sub_proto:dispatch_with_ack' in 5.2
%% TODO: delete this clase after 5.1.0
do_dispatch_with_ack(SubPid, Group, Topic, Msg);
false ->
send(SubPid, Topic, {deliver, Topic, Msg})

View File

@ -27,7 +27,7 @@ format(
#{level := debug, meta := Meta = #{trace_tag := Tag}, msg := Msg},
#{payload_encode := PEncode}
) ->
Time = calendar:system_time_to_rfc3339(erlang:system_time(second)),
Time = calendar:system_time_to_rfc3339(erlang:system_time(microsecond), [{unit, microsecond}]),
ClientId = to_iolist(maps:get(clientid, Meta, "")),
Peername = maps:get(peername, Meta, ""),
MetaBin = format_meta(Meta, PEncode),

View File

@ -90,7 +90,7 @@
listener :: {Type :: atom(), Name :: atom()},
%% Limiter
limiter :: maybe(container()),
limiter :: container(),
%% cache operation when overload
limiter_cache :: queue:queue(cache()),
@ -121,8 +121,8 @@
-define(SOCK_STATS, [recv_oct, recv_cnt, send_oct, send_cnt]).
-define(ENABLED(X), (X =/= undefined)).
-define(LIMITER_BYTES_IN, bytes_in).
-define(LIMITER_MESSAGE_IN, message_in).
-define(LIMITER_BYTES_IN, bytes).
-define(LIMITER_MESSAGE_IN, messages).
-dialyzer({no_match, [info/2]}).
-dialyzer({nowarn_function, [websocket_init/1]}).
@ -579,54 +579,61 @@ handle_timeout(TRef, TMsg, State) ->
list(any()),
state()
) -> state().
check_limiter(
_Needs,
Data,
WhenOk,
Msgs,
#state{limiter = infinity} = State
) ->
WhenOk(Data, Msgs, State);
check_limiter(
Needs,
Data,
WhenOk,
Msgs,
#state{
limiter = Limiter,
limiter_timer = LimiterTimer,
limiter_cache = Cache
} = State
#state{limiter_timer = undefined, limiter = Limiter} = State
) ->
case LimiterTimer of
undefined ->
case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
{pause, Time, Limiter2} ->
?SLOG(debug, #{
msg => "pause_time_due_to_rate_limit",
needs => Needs,
time_in_ms => Time
}),
case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
{pause, Time, Limiter2} ->
?SLOG(debug, #{
msg => "pause_time_due_to_rate_limit",
needs => Needs,
time_in_ms => Time
}),
Retry = #retry{
types = [Type || {_, Type} <- Needs],
data = Data,
next = WhenOk
},
Retry = #retry{
types = [Type || {_, Type} <- Needs],
data = Data,
next = WhenOk
},
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
TRef = start_timer(Time, limit_timeout),
TRef = start_timer(Time, limit_timeout),
enqueue(
{active, false},
State#state{
sockstate = blocked,
limiter = Limiter3,
limiter_timer = TRef
}
);
{drop, Limiter2} ->
{ok, State#state{limiter = Limiter2}}
end;
_ ->
New = #cache{need = Needs, data = Data, next = WhenOk},
State#state{limiter_cache = queue:in(New, Cache)}
end.
enqueue(
{active, false},
State#state{
sockstate = blocked,
limiter = Limiter3,
limiter_timer = TRef
}
);
{drop, Limiter2} ->
{ok, State#state{limiter = Limiter2}}
end;
check_limiter(
Needs,
Data,
WhenOk,
_Msgs,
#state{limiter_cache = Cache} = State
) ->
New = #cache{need = Needs, data = Data, next = WhenOk},
State#state{limiter_cache = queue:in(New, Cache)}.
-spec retry_limiter(state()) -> state().
retry_limiter(#state{limiter = Limiter} = State) ->

View File

@ -148,6 +148,14 @@ t_run_hook(_) ->
?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)),
?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)).
t_cluster_nodes(_) ->
Expected = [node()],
?assertEqual(Expected, emqx:running_nodes()),
?assertEqual(Expected, emqx:cluster_nodes(running)),
?assertEqual(Expected, emqx:cluster_nodes(all)),
?assertEqual(Expected, emqx:cluster_nodes(cores)),
?assertEqual([], emqx:cluster_nodes(stopped)).
%%--------------------------------------------------------------------
%% Hook fun
%%--------------------------------------------------------------------

View File

@ -186,7 +186,7 @@ t_session_taken(_) ->
false
end
end,
6000
15_000
),
Publish(),

View File

@ -162,8 +162,7 @@ limiter_conf() ->
Make = fun() ->
#{
burst => 0,
rate => infinity,
capacity => infinity
rate => infinity
}
end,
@ -172,7 +171,7 @@ limiter_conf() ->
Acc#{Name => Make()}
end,
#{},
[bytes_in, message_in, message_routing, connection, internal]
[bytes, messages, message_routing, connection, internal]
).
stats_conf() ->
@ -268,13 +267,14 @@ t_chan_info(_) ->
t_chan_caps(_) ->
?assertMatch(
#{
exclusive_subscription := false,
max_packet_size := 1048576,
max_clientid_len := 65535,
max_qos_allowed := 2,
max_topic_alias := 65535,
max_topic_levels := Level,
retain_available := true,
shared_subscription := true,
subscription_identifiers := true,
wildcard_subscription := true
} when is_integer(Level),
emqx_channel:caps(channel())
@ -1258,7 +1258,7 @@ limiter_cfg() ->
Client = #{
rate => 5,
initial => 0,
capacity => 5,
burst => 0,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
@ -1270,7 +1270,7 @@ limiter_cfg() ->
}.
bucket_cfg() ->
#{rate => 10, initial => 0, capacity => 10}.
#{rate => 10, initial => 0, burst => 0}.
add_bucket() ->
emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()).

View File

@ -55,12 +55,12 @@
is_tcp_server_available/2,
is_tcp_server_available/3,
load_config/2,
load_config/3,
not_wait_mqtt_payload/1,
read_schema_configs/2,
render_config_file/2,
wait_for/4,
wait_mqtt_payload/1
wait_mqtt_payload/1,
select_free_port/1
]).
-export([
@ -280,6 +280,7 @@ app_schema(App) ->
mustache_vars(App, Opts) ->
ExtraMustacheVars = maps:get(extra_mustache_vars, Opts, #{}),
Defaults = #{
node_cookie => atom_to_list(erlang:get_cookie()),
platform_data_dir => app_path(App, "data"),
platform_etc_dir => app_path(App, "etc")
},
@ -497,18 +498,14 @@ copy_certs(emqx_conf, Dest0) ->
copy_certs(_, _) ->
ok.
load_config(SchemaModule, Config, Opts) ->
load_config(SchemaModule, Config) ->
ConfigBin =
case is_map(Config) of
true -> emqx_utils_json:encode(Config);
false -> Config
end,
ok = emqx_config:delete_override_conf_files(),
ok = emqx_config:init_load(SchemaModule, ConfigBin, Opts),
ok.
load_config(SchemaModule, Config) ->
load_config(SchemaModule, Config, #{raw_with_default => true}).
ok = emqx_config:init_load(SchemaModule, ConfigBin).
-spec is_all_tcp_servers_available(Servers) -> Result when
Servers :: [{Host, Port}],
@ -684,6 +681,7 @@ start_slave(Name, Opts) when is_map(Opts) ->
SlaveMod = maps:get(peer_mod, Opts, ct_slave),
Node = node_name(Name),
put_peer_mod(Node, SlaveMod),
Cookie = atom_to_list(erlang:get_cookie()),
DoStart =
fun() ->
case SlaveMod of
@ -695,7 +693,11 @@ start_slave(Name, Opts) when is_map(Opts) ->
{monitor_master, true},
{init_timeout, 20_000},
{startup_timeout, 20_000},
{erl_flags, erl_flags()}
{erl_flags, erl_flags()},
{env, [
{"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"},
{"EMQX_NODE__COOKIE", Cookie}
]}
]
);
slave ->
@ -782,6 +784,7 @@ setup_node(Node, Opts) when is_map(Opts) ->
load_apps => LoadApps,
apps => Apps,
env => Env,
join_to => JoinTo,
start_apps => StartApps
}
]
@ -1259,3 +1262,34 @@ get_or_spawn_janitor() ->
on_exit(Fun) ->
Janitor = get_or_spawn_janitor(),
ok = emqx_test_janitor:push_on_exit_callback(Janitor, Fun).
%%-------------------------------------------------------------------------------
%% Select a free transport port from the OS
%%-------------------------------------------------------------------------------
%% @doc get unused port from OS
-spec select_free_port(tcp | udp | ssl | quic) -> inets:port_number().
select_free_port(tcp) ->
select_free_port(gen_tcp, listen);
select_free_port(udp) ->
select_free_port(gen_udp, open);
select_free_port(ssl) ->
select_free_port(tcp);
select_free_port(quic) ->
select_free_port(udp).
select_free_port(GenModule, Fun) when
GenModule == gen_tcp orelse
GenModule == gen_udp
->
{ok, S} = GenModule:Fun(0, [{reuseaddr, true}]),
{ok, Port} = inet:port(S),
ok = GenModule:close(S),
case os:type() of
{unix, darwin} ->
%% in MacOS, still get address_in_use after close port
timer:sleep(500);
_ ->
skip
end,
ct:pal("Select free OS port: ~p", [Port]),
Port.

View File

@ -19,6 +19,7 @@
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
all() -> emqx_common_test_helpers:all(?MODULE).
@ -50,7 +51,6 @@ t_fill_default_values(_) ->
},
<<"route_batch_clean">> := false,
<<"session_locking_strategy">> := quorum,
<<"shared_dispatch_ack_enabled">> := false,
<<"shared_subscription_strategy">> := round_robin
}
},
@ -78,3 +78,21 @@ t_init_load(_Config) ->
?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())),
?assertMatch({ok, #{raw_config := 128}}, emqx:update_config([mqtt, max_topic_levels], 128)),
ok = file:delete(DeprecatedFile).
t_unknown_rook_keys(_) ->
?check_trace(
#{timetrap => 1000},
begin
ok = emqx_config:init_load(
emqx_schema, <<"test_1 {}\n test_2 {sub = 100}\n listeners {}">>
),
?block_until(#{?snk_kind := unknown_config_keys})
end,
fun(Trace) ->
?assertMatch(
[#{unknown_config_keys := "test_1,test_2"}],
?of_kind(unknown_config_keys, Trace)
)
end
),
ok.

View File

@ -38,8 +38,6 @@ init_per_suite(Config) ->
ok = meck:new(emqx_cm, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end),
ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end),
%% Meck Limiter
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
%% Meck Pd
ok = meck:new(emqx_pd, [passthrough, no_history, no_link]),
%% Meck Metrics
@ -67,7 +65,6 @@ end_per_suite(_Config) ->
ok = meck:unload(emqx_transport),
catch meck:unload(emqx_channel),
ok = meck:unload(emqx_cm),
ok = meck:unload(emqx_htb_limiter),
ok = meck:unload(emqx_pd),
ok = meck:unload(emqx_metrics),
ok = meck:unload(emqx_hooks),
@ -421,20 +418,28 @@ t_ensure_rate_limit(_) ->
{ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})),
?assertEqual(Limiter, emqx_connection:info(limiter, State1)),
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
ok = meck:expect(
emqx_htb_limiter,
make_infinity_limiter,
fun() -> non_infinity end
),
ok = meck:expect(
emqx_htb_limiter,
check,
fun(_, Client) -> {pause, 3000, undefined, Client} end
),
{ok, State2} = emqx_connection:check_limiter(
[{1000, bytes_in}],
[{1000, bytes}],
[],
WhenOk,
[],
st(#{limiter => Limiter})
st(#{limiter => init_limiter()})
),
meck:unload(emqx_htb_limiter),
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)).
t_activate_socket(_) ->
@ -495,6 +500,7 @@ t_get_conn_info(_) ->
end).
t_oom_shutdown(init, Config) ->
ok = snabbkaffe:stop(),
ok = snabbkaffe:start_trace(),
ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]),
meck:expect(
@ -703,31 +709,32 @@ handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St).
-define(LIMITER_ID, 'tcp:default').
init_limiter() ->
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], limiter_cfg()).
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], limiter_cfg()).
limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Cfg = bucket_cfg(),
Client = #{
rate => Infinity,
Client = client_cfg(),
#{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
bucket_cfg() ->
#{rate => infinity, initial => 0, burst => 0}.
client_cfg() ->
#{
rate => infinity,
initial => 0,
capacity => Infinity,
burst => 0,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
#{rate => Infinity, initial => 0, capacity => Infinity}.
}.
add_bucket() ->
Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg).
del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes),
emqx_limiter_server:del_bucket(?LIMITER_ID, messages).

View File

@ -35,6 +35,7 @@ all() ->
init_per_suite(Config) ->
application:load(emqx),
{ok, _} = application:ensure_all_started(ssl),
emqx_config:save_schema_mod_and_names(emqx_schema),
emqx_common_test_helpers:boot_modules(all),
Config.
@ -328,7 +329,15 @@ drain_msgs() ->
clear_crl_cache() ->
%% reset the CRL cache
Ref = monitor(process, whereis(ssl_manager)),
exit(whereis(ssl_manager), kill),
receive
{'DOWN', Ref, process, _, _} ->
ok
after 1_000 ->
ct:fail("ssl_manager didn't die")
end,
ensure_ssl_manager_alive(),
ok.
force_cacertfile(Cacertfile) ->
@ -382,7 +391,6 @@ setup_crl_options(Config, #{is_cached := IsCached} = Opts) ->
false ->
%% ensure cache is empty
clear_crl_cache(),
ct:sleep(200),
ok
end,
drain_msgs(),
@ -459,6 +467,13 @@ of_kinds(Trace0, Kinds0) ->
Trace0
).
ensure_ssl_manager_alive() ->
?retry(
_Sleep0 = 200,
_Attempts0 = 50,
true = is_pid(whereis(ssl_manager))
).
%%--------------------------------------------------------------------
%% Test cases
%%--------------------------------------------------------------------

View File

@ -47,13 +47,14 @@ init_per_testcase(Case, Config) when
Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp
->
catch emqx_config_handler:stop(),
Port = emqx_common_test_helpers:select_free_port(tcp),
{ok, _} = emqx_config_handler:start_link(),
PrevListeners = emqx_config:get([listeners], #{}),
PureListeners = remove_default_limiter(PrevListeners),
PureListeners2 = PureListeners#{
tcp => #{
listener_test => #{
bind => {"127.0.0.1", 9999},
bind => {"127.0.0.1", Port},
max_connections => 4321,
limiter => #{}
}
@ -63,19 +64,20 @@ init_per_testcase(Case, Config) when
ok = emqx_listeners:start(),
[
{prev_listener_conf, PrevListeners}
{prev_listener_conf, PrevListeners},
{tcp_port, Port}
| Config
];
init_per_testcase(t_wss_conn, Config) ->
catch emqx_config_handler:stop(),
Port = emqx_common_test_helpers:select_free_port(ssl),
{ok, _} = emqx_config_handler:start_link(),
PrevListeners = emqx_config:get([listeners], #{}),
PureListeners = remove_default_limiter(PrevListeners),
PureListeners2 = PureListeners#{
wss => #{
listener_test => #{
bind => {{127, 0, 0, 1}, 9998},
bind => {{127, 0, 0, 1}, Port},
limiter => #{},
ssl_options => #{
cacertfile => ?CERTS_PATH("cacert.pem"),
@ -89,7 +91,8 @@ init_per_testcase(t_wss_conn, Config) ->
ok = emqx_listeners:start(),
[
{prev_listener_conf, PrevListeners}
{prev_listener_conf, PrevListeners},
{wss_port, Port}
| Config
];
init_per_testcase(_, Config) ->
@ -171,20 +174,30 @@ t_restart_listeners_with_hibernate_after_disabled(_Config) ->
ok = emqx_listeners:stop(),
emqx_config:put([listeners], OldLConf).
t_max_conns_tcp(_) ->
t_max_conns_tcp(Config) ->
%% Note: Using a string representation for the bind address like
%% "127.0.0.1" does not work
?assertEqual(4321, emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})).
?assertEqual(
4321,
emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, ?config(tcp_port, Config)})
).
t_current_conns_tcp(_) ->
?assertEqual(0, emqx_listeners:current_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})).
t_current_conns_tcp(Config) ->
?assertEqual(
0,
emqx_listeners:current_conns('tcp:listener_test', {
{127, 0, 0, 1}, ?config(tcp_port, Config)
})
).
t_wss_conn(_) ->
{ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000),
t_wss_conn(Config) ->
{ok, Socket} = ssl:connect(
{127, 0, 0, 1}, ?config(wss_port, Config), [{verify, verify_none}], 1000
),
ok = ssl:close(Socket).
t_quic_conn(Config) ->
Port = 24568,
Port = emqx_common_test_helpers:select_free_port(quic),
DataDir = ?config(data_dir, Config),
SSLOpts = #{
password => ?SERVER_KEY_PASSWORD,
@ -207,7 +220,7 @@ t_quic_conn(Config) ->
emqx_listeners:stop_listener(quic, ?FUNCTION_NAME, #{bind => Port}).
t_ssl_password_cert(Config) ->
Port = 24568,
Port = emqx_common_test_helpers:select_free_port(ssl),
DataDir = ?config(data_dir, Config),
SSLOptsPWD = #{
password => ?SERVER_KEY_PASSWORD,

View File

@ -22,7 +22,16 @@
-include_lib("emqx/include/emqx_mqtt.hrl").
-include_lib("eunit/include/eunit.hrl").
all() -> emqx_common_test_helpers:all(?MODULE).
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
emqx_common_test_helpers:start_apps([]),
Config.
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([]),
ok.
t_check_pub(_) ->
OldConf = emqx:get_config([zones], #{}),

View File

@ -2026,18 +2026,7 @@ stop_emqx() ->
%% select a random port picked by OS
-spec select_port() -> inet:port_number().
select_port() ->
{ok, S} = gen_udp:open(0, [{reuseaddr, true}]),
{ok, {_, Port}} = inet:sockname(S),
gen_udp:close(S),
case os:type() of
{unix, darwin} ->
%% in MacOS, still get address_in_use after close port
timer:sleep(500);
_ ->
skip
end,
ct:pal("select port: ~p", [Port]),
Port.
emqx_common_test_helpers:select_free_port(quic).
-spec via_stream({quic, quicer:connection_handle(), quicer:stream_handle()}) ->
quicer:stream_handle().

View File

@ -38,6 +38,7 @@
-define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)).
-define(RATE(Rate), to_rate(Rate)).
-define(NOW, erlang:system_time(millisecond)).
-define(ROOT_COUNTER_IDX, 1).
%%--------------------------------------------------------------------
%% Setups
@ -72,7 +73,7 @@ t_consume(_) ->
Cfg = fun(Cfg) ->
Cfg#{
rate := 100,
capacity := 100,
burst := 0,
initial := 100,
max_retry_time := 1000,
failure_strategy := force
@ -89,7 +90,7 @@ t_retry(_) ->
Cfg = fun(Cfg) ->
Cfg#{
rate := 50,
capacity := 200,
burst := 150,
initial := 0,
max_retry_time := 1000,
failure_strategy := force
@ -109,7 +110,7 @@ t_restore(_) ->
Cfg = fun(Cfg) ->
Cfg#{
rate := 1,
capacity := 200,
burst := 199,
initial := 50,
max_retry_time := 100,
failure_strategy := force
@ -129,7 +130,7 @@ t_max_retry_time(_) ->
Cfg = fun(Cfg) ->
Cfg#{
rate := 1,
capacity := 1,
burst := 0,
max_retry_time := 500,
failure_strategy := drop
}
@ -139,8 +140,12 @@ t_max_retry_time(_) ->
Begin = ?NOW,
Result = emqx_htb_limiter:consume(101, Client),
?assertMatch({drop, _}, Result),
Time = ?NOW - Begin,
?assert(Time >= 500 andalso Time < 550)
End = ?NOW,
Time = End - Begin,
?assert(
Time >= 500 andalso Time < 550,
lists:flatten(io_lib:format("Begin:~p, End:~p, Time:~p~n", [Begin, End, Time]))
)
end,
with_per_client(Cfg, Case).
@ -150,7 +155,7 @@ t_divisible(_) ->
divisible := true,
rate := ?RATE("1000/1s"),
initial := 600,
capacity := 600
burst := 0
}
end,
Case = fun(BucketCfg) ->
@ -176,7 +181,7 @@ t_low_watermark(_) ->
low_watermark := 400,
rate := ?RATE("1000/1s"),
initial := 1000,
capacity := 1000
burst := 0
}
end,
Case = fun(BucketCfg) ->
@ -201,23 +206,22 @@ t_infinity_client(_) ->
Fun = fun(Cfg) -> Cfg end,
Case = fun(Cfg) ->
Client = connect(Cfg),
InfVal = emqx_limiter_schema:infinity_value(),
?assertMatch(#{bucket := #{rate := InfVal}}, Client),
?assertMatch(infinity, Client),
Result = emqx_htb_limiter:check(100000, Client),
?assertEqual({ok, Client}, Result)
end,
with_per_client(Fun, Case).
t_try_restore_agg(_) ->
t_try_restore_with_bucket(_) ->
Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := 1,
capacity := 200,
rate := 100,
burst := 100,
initial := 50
},
Cli2 = Cli#{
rate := infinity,
capacity := infinity,
burst := 0,
divisible := true,
max_retry_time := 100,
failure_strategy := force
@ -239,11 +243,11 @@ t_short_board(_) ->
Bucket2 = Bucket#{
rate := ?RATE("100/1s"),
initial := 0,
capacity := 100
burst := 0
},
Cli2 = Cli#{
rate := ?RATE("600/1s"),
capacity := 600,
burst := 0,
initial := 600
},
Bucket2#{client := Cli2}
@ -261,46 +265,45 @@ t_rate(_) ->
Bucket2 = Bucket#{
rate := ?RATE("100/100ms"),
initial := 0,
capacity := infinity
burst := 0
},
Cli2 = Cli#{
rate := infinity,
capacity := infinity,
burst := 0,
initial := 0
},
Bucket2#{client := Cli2}
end,
Case = fun(Cfg) ->
Time = 1000,
Client = connect(Cfg),
Ts1 = erlang:system_time(millisecond),
C1 = emqx_htb_limiter:available(Client),
timer:sleep(1000),
Ts2 = erlang:system_time(millisecond),
timer:sleep(1100),
C2 = emqx_htb_limiter:available(Client),
ShouldInc = floor((Ts2 - Ts1) / 100) * 100,
ShouldInc = floor(Time / 100) * 100,
Inc = C2 - C1,
?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate")
end,
with_bucket(Fun, Case).
t_capacity(_) ->
Capacity = 600,
Capacity = 1200,
Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := ?RATE("100/100ms"),
initial := 0,
capacity := 600
burst := 200
},
Cli2 = Cli#{
rate := infinity,
capacity := infinity,
burst := 0,
initial := 0
},
Bucket2#{client := Cli2}
end,
Case = fun(Cfg) ->
Client = connect(Cfg),
timer:sleep(1000),
timer:sleep(1500),
C1 = emqx_htb_limiter:available(Client),
?assertEqual(Capacity, C1, "test bucket capacity")
end,
@ -318,11 +321,11 @@ t_collaborative_alloc(_) ->
Bucket2 = Bucket#{
rate := ?RATE("400/1s"),
initial := 0,
capacity := 600
burst := 200
},
Cli2 = Cli#{
rate := ?RATE("50"),
capacity := 100,
burst := 50,
initial := 100
},
Bucket2#{client := Cli2}
@ -363,11 +366,11 @@ t_burst(_) ->
Bucket2 = Bucket#{
rate := ?RATE("200/1s"),
initial := 0,
capacity := 200
burst := 0
},
Cli2 = Cli#{
rate := ?RATE("50/1s"),
capacity := 200,
burst := 150,
divisible := true
},
Bucket2#{client := Cli2}
@ -392,38 +395,6 @@ t_burst(_) ->
Case
).
t_limit_global_with_unlimit_other(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
end,
Bucket = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := infinity,
initial := 0,
capacity := infinity
},
Cli2 = Cli#{
rate := infinity,
capacity := infinity,
initial := 0
},
Bucket2#{client := Cli2}
end,
Case = fun() ->
C1 = counters:new(1, []),
start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
timer:sleep(2100),
check_average_rate(C1, 2, 600)
end,
with_global(
GlobalMod,
[{b1, Bucket}],
Case
).
%%--------------------------------------------------------------------
%% Test Cases container
%%--------------------------------------------------------------------
@ -432,7 +403,7 @@ t_check_container(_) ->
Cfg#{
rate := ?RATE("1000/1s"),
initial := 1000,
capacity := 1000
burst := 0
}
end,
Case = fun(#{client := Client} = BucketCfg) ->
@ -452,38 +423,6 @@ t_check_container(_) ->
end,
with_per_client(Cfg, Case).
%%--------------------------------------------------------------------
%% Test Override
%%--------------------------------------------------------------------
t_bucket_no_client(_) ->
Rate = ?RATE("1/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := Rate}}}
end,
BucketMod = fun(Bucket) ->
maps:remove(client, Bucket)
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := Rate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
t_bucket_client(_) ->
GlobalRate = ?RATE("1/s"),
BucketRate = ?RATE("10/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := GlobalRate}}}
end,
BucketMod = fun(#{client := Client} = Bucket) ->
Bucket#{client := Client#{rate := BucketRate}}
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := BucketRate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
%%--------------------------------------------------------------------
%% Test Cases misc
%%--------------------------------------------------------------------
@ -565,13 +504,241 @@ t_schema_unit(_) ->
?assertMatch({error, _}, M:to_rate("100MB/1")),
?assertMatch({error, _}, M:to_rate("100/10x")),
?assertEqual({ok, emqx_limiter_schema:infinity_value()}, M:to_capacity("infinity")),
?assertEqual({ok, infinity}, M:to_capacity("infinity")),
?assertEqual({ok, 100}, M:to_capacity("100")),
?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")),
?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")),
?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")),
ok.
t_compatibility_for_capacity(_) ->
CfgStr = <<
""
"\n"
"listeners.tcp.default {\n"
" bind = \"0.0.0.0:1883\"\n"
" max_connections = 1024000\n"
" limiter.messages.capacity = infinity\n"
" limiter.client.messages.capacity = infinity\n"
"}\n"
""
>>,
?assertMatch(
#{
messages := #{burst := 0},
client := #{messages := #{burst := 0}}
},
parse_and_check(CfgStr)
).
t_compatibility_for_message_in(_) ->
CfgStr = <<
""
"\n"
"listeners.tcp.default {\n"
" bind = \"0.0.0.0:1883\"\n"
" max_connections = 1024000\n"
" limiter.message_in.rate = infinity\n"
" limiter.client.message_in.rate = infinity\n"
"}\n"
""
>>,
?assertMatch(
#{
messages := #{rate := infinity},
client := #{messages := #{rate := infinity}}
},
parse_and_check(CfgStr)
).
t_compatibility_for_bytes_in(_) ->
CfgStr = <<
""
"\n"
"listeners.tcp.default {\n"
" bind = \"0.0.0.0:1883\"\n"
" max_connections = 1024000\n"
" limiter.bytes_in.rate = infinity\n"
" limiter.client.bytes_in.rate = infinity\n"
"}\n"
""
>>,
?assertMatch(
#{
bytes := #{rate := infinity},
client := #{bytes := #{rate := infinity}}
},
parse_and_check(CfgStr)
).
t_extract_with_type(_) ->
IsOnly = fun
(_Key, Cfg) when map_size(Cfg) =/= 1 ->
false;
(Key, Cfg) ->
maps:is_key(Key, Cfg)
end,
Checker = fun
(Type, #{client := Client} = Cfg) ->
Cfg2 = maps:remove(client, Cfg),
IsOnly(Type, Client) andalso
(IsOnly(Type, Cfg2) orelse
map_size(Cfg2) =:= 0);
(Type, Cfg) ->
IsOnly(Type, Cfg)
end,
?assertEqual(undefined, emqx_limiter_schema:extract_with_type(messages, undefined)),
?assert(
Checker(
messages,
emqx_limiter_schema:extract_with_type(messages, #{
messages => #{rate => 1}, bytes => #{rate => 1}
})
)
),
?assert(
Checker(
messages,
emqx_limiter_schema:extract_with_type(messages, #{
messages => #{rate => 1},
bytes => #{rate => 1},
client => #{messages => #{rate => 2}}
})
)
),
?assert(
Checker(
messages,
emqx_limiter_schema:extract_with_type(messages, #{
client => #{messages => #{rate => 2}, bytes => #{rate => 1}}
})
)
).
%%--------------------------------------------------------------------
%% Test Cases Create Instance
%%--------------------------------------------------------------------
t_create_instance_with_infinity_node(_) ->
emqx_limiter_manager:insert_bucket(?FUNCTION_NAME, bytes, ?FUNCTION_NAME),
Cases = make_create_test_data_with_infinity_node(?FUNCTION_NAME),
lists:foreach(
fun({Cfg, Expected}) ->
{ok, Result} = emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg),
IsMatched =
case is_atom(Expected) of
true ->
Result =:= Expected;
_ ->
Expected(Result)
end,
?assert(
IsMatched,
lists:flatten(
io_lib:format("Got unexpected:~p~n, Cfg:~p~n", [
Result, Cfg
])
)
)
end,
Cases
),
emqx_limiter_manager:delete_bucket(?FUNCTION_NAME, bytes),
ok.
t_not_exists_instance(_) ->
Cfg = #{bytes => #{rate => 100, burst => 0, initial => 0}},
?assertEqual(
{error, invalid_bucket},
emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg)
),
?assertEqual(
{error, invalid_bucket},
emqx_limiter_server:connect(?FUNCTION_NAME, not_exists, Cfg)
),
ok.
t_create_instance_with_node(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{
message_routing := MR#{rate := ?RATE("200/1s")},
messages := MR#{rate := ?RATE("200/1s")}
}
end,
B1 = fun(Bucket) ->
Bucket#{rate := ?RATE("400/1s")}
end,
B2 = fun(Bucket) ->
Bucket#{rate := infinity}
end,
IsRefLimiter = fun
({ok, #{tokens := _}}, _IsRoot) ->
false;
({ok, #{bucket := #{index := ?ROOT_COUNTER_IDX}}}, true) ->
true;
({ok, #{bucket := #{index := Index}}}, false) when Index =/= ?ROOT_COUNTER_IDX ->
true;
(Result, _IsRoot) ->
ct:pal("The result is:~p~n", [Result]),
false
end,
Case = fun() ->
BucketCfg = make_limiter_cfg(),
?assert(
IsRefLimiter(emqx_limiter_server:connect(b1, message_routing, B1(BucketCfg)), false)
),
?assert(
IsRefLimiter(emqx_limiter_server:connect(b2, message_routing, B2(BucketCfg)), true)
),
?assert(IsRefLimiter(emqx_limiter_server:connect(x, messages, undefined), true)),
?assertNot(IsRefLimiter(emqx_limiter_server:connect(x, bytes, undefined), false))
end,
with_global(
GlobalMod,
[{b1, B1}, {b2, B2}],
Case
),
ok.
%%--------------------------------------------------------------------
%% Test Cases emqx_esockd_htb_limiter
%%--------------------------------------------------------------------
t_create_esockd_htb_limiter(_) ->
Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, undefined),
?assertMatch(
#{module := _, id := ?FUNCTION_NAME, type := bytes, bucket := undefined},
Opts
),
Limiter = emqx_esockd_htb_limiter:create(Opts),
?assertMatch(
#{module := _, name := bytes, limiter := infinity},
Limiter
),
?assertEqual(ok, emqx_esockd_htb_limiter:delete(Limiter)),
ok.
t_esockd_htb_consume(_) ->
ClientCfg = emqx_limiter_schema:default_client_config(),
Cfg = #{client => #{bytes => ClientCfg#{rate := 50, max_retry_time := 0}}},
Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, Cfg),
Limiter = emqx_esockd_htb_limiter:create(Opts),
C1R = emqx_esockd_htb_limiter:consume(51, Limiter),
?assertMatch({pause, _Ms, _Limiter2}, C1R),
timer:sleep(300),
C2R = emqx_esockd_htb_limiter:consume(50, Limiter),
?assertMatch({ok, _}, C2R),
ok.
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
@ -748,17 +915,16 @@ connect(Name, Cfg) ->
Limiter.
make_limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Client = #{
rate => Infinity,
rate => infinity,
initial => 0,
capacity => Infinity,
burst => 0,
low_watermark => 0,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{client => Client, rate => Infinity, initial => 0, capacity => Infinity}.
#{client => Client, rate => infinity, initial => 0, burst => 0}.
add_bucket(Cfg) ->
add_bucket(?MODULE, Cfg).
@ -812,3 +978,68 @@ apply_modifier(Pairs, #{default := Template}) ->
Acc#{N => M(Template)}
end,
lists:foldl(Fun, #{}, Pairs).
parse_and_check(ConfigString) ->
ok = emqx_common_test_helpers:load_config(emqx_schema, ConfigString),
emqx:get_config([listeners, tcp, default, limiter]).
make_create_test_data_with_infinity_node(FakeInstnace) ->
Infinity = emqx_htb_limiter:make_infinity_limiter(),
ClientCfg = emqx_limiter_schema:default_client_config(),
InfinityRef = emqx_limiter_bucket_ref:infinity_bucket(),
MkC = fun(Rate) ->
#{client => #{bytes => ClientCfg#{rate := Rate}}}
end,
MkB = fun(Rate) ->
#{bytes => #{rate => Rate, burst => 0, initial => 0}}
end,
MkA = fun(Client, Bucket) ->
maps:merge(MkC(Client), MkB(Bucket))
end,
IsRefLimiter = fun(Expected) ->
fun
(#{tokens := _}) -> false;
(#{bucket := Bucket}) -> Bucket =:= Expected;
(_) -> false
end
end,
IsTokenLimiter = fun(Expected) ->
fun
(#{tokens := _, bucket := Bucket}) -> Bucket =:= Expected;
(_) -> false
end
end,
[
%% default situation, no limiter setting
{undefined, Infinity},
%% client = undefined bucket = undefined
{#{}, Infinity},
%% client = undefined bucket = infinity
{MkB(infinity), Infinity},
%% client = undefined bucket = other
{MkB(100), IsRefLimiter(FakeInstnace)},
%% client = infinity bucket = undefined
{MkC(infinity), Infinity},
%% client = infinity bucket = infinity
{MkA(infinity, infinity), Infinity},
%% client = infinity bucket = other
{MkA(infinity, 100), IsRefLimiter(FakeInstnace)},
%% client = other bucket = undefined
{MkC(100), IsTokenLimiter(InfinityRef)},
%% client = other bucket = infinity
{MkC(100), IsTokenLimiter(InfinityRef)},
%% client = C bucket = B C < B
{MkA(100, 1000), IsTokenLimiter(FakeInstnace)},
%% client = C bucket = B C > B
{MkA(1000, 100), IsRefLimiter(FakeInstnace)}
].

View File

@ -219,112 +219,124 @@ parse_server_test_() ->
?T(
"single server, binary, no port",
?assertEqual(
[{"localhost", DefaultPort}],
[#{hostname => "localhost", port => DefaultPort}],
Parse(<<"localhost">>)
)
),
?T(
"single server, string, no port",
?assertEqual(
[{"localhost", DefaultPort}],
[#{hostname => "localhost", port => DefaultPort}],
Parse("localhost")
)
),
?T(
"single server, list(string), no port",
?assertEqual(
[{"localhost", DefaultPort}],
[#{hostname => "localhost", port => DefaultPort}],
Parse(["localhost"])
)
),
?T(
"single server, list(binary), no port",
?assertEqual(
[{"localhost", DefaultPort}],
[#{hostname => "localhost", port => DefaultPort}],
Parse([<<"localhost">>])
)
),
?T(
"single server, binary, with port",
?assertEqual(
[{"localhost", 9999}],
[#{hostname => "localhost", port => 9999}],
Parse(<<"localhost:9999">>)
)
),
?T(
"single server, list(string), with port",
?assertEqual(
[{"localhost", 9999}],
[#{hostname => "localhost", port => 9999}],
Parse(["localhost:9999"])
)
),
?T(
"single server, string, with port",
?assertEqual(
[{"localhost", 9999}],
[#{hostname => "localhost", port => 9999}],
Parse("localhost:9999")
)
),
?T(
"single server, list(binary), with port",
?assertEqual(
[{"localhost", 9999}],
[#{hostname => "localhost", port => 9999}],
Parse([<<"localhost:9999">>])
)
),
?T(
"multiple servers, string, no port",
?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}],
[
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse("host1, host2")
)
),
?T(
"multiple servers, binary, no port",
?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}],
[
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse(<<"host1, host2,,,">>)
)
),
?T(
"multiple servers, list(string), no port",
?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}],
[
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse(["host1", "host2"])
)
),
?T(
"multiple servers, list(binary), no port",
?assertEqual(
[{"host1", DefaultPort}, {"host2", DefaultPort}],
[
#{hostname => "host1", port => DefaultPort},
#{hostname => "host2", port => DefaultPort}
],
Parse([<<"host1">>, <<"host2">>])
)
),
?T(
"multiple servers, string, with port",
?assertEqual(
[{"host1", 1234}, {"host2", 2345}],
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse("host1:1234, host2:2345")
)
),
?T(
"multiple servers, binary, with port",
?assertEqual(
[{"host1", 1234}, {"host2", 2345}],
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse(<<"host1:1234, host2:2345, ">>)
)
),
?T(
"multiple servers, list(string), with port",
?assertEqual(
[{"host1", 1234}, {"host2", 2345}],
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse([" host1:1234 ", "host2:2345"])
)
),
?T(
"multiple servers, list(binary), with port",
?assertEqual(
[{"host1", 1234}, {"host2", 2345}],
[#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}],
Parse([<<"host1:1234">>, <<"host2:2345">>])
)
),
@ -350,9 +362,9 @@ parse_server_test_() ->
)
),
?T(
"multiple servers wihtout port, mixed list(binary|string)",
"multiple servers without port, mixed list(binary|string)",
?assertEqual(
["host1", "host2"],
[#{hostname => "host1"}, #{hostname => "host2"}],
Parse2([<<"host1">>, "host2"], #{no_port => true})
)
),
@ -394,14 +406,18 @@ parse_server_test_() ->
?T(
"single server map",
?assertEqual(
[{"host1.domain", 1234}],
[#{hostname => "host1.domain", port => 1234}],
HoconParse("host1.domain:1234")
)
),
?T(
"multiple servers map",
?assertEqual(
[{"host1.domain", 1234}, {"host2.domain", 2345}, {"host3.domain", 3456}],
[
#{hostname => "host1.domain", port => 1234},
#{hostname => "host2.domain", port => 2345},
#{hostname => "host3.domain", port => 3456}
],
HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456")
)
),
@ -447,6 +463,171 @@ parse_server_test_() ->
"bad_schema",
emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true})
)
),
?T(
"scheme, hostname and port",
?assertEqual(
#{scheme => "pulsar+ssl", hostname => "host", port => 6651},
emqx_schema:parse_server(
"pulsar+ssl://host:6651",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6650},
emqx_schema:parse_server(
"pulsar://host",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, no port",
?assertEqual(
#{scheme => "pulsar", hostname => "host"},
emqx_schema:parse_server(
"pulsar://host",
#{
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"scheme and hostname, missing port",
?assertThrow(
"missing_port_number",
emqx_schema:parse_server(
"pulsar://host",
#{
no_port => false,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, no default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host"},
emqx_schema:parse_server(
"host",
#{
default_scheme => "pulsar",
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, default port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6650},
emqx_schema:parse_server(
"host",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"just hostname, expecting missing scheme",
?assertThrow(
"missing_scheme",
emqx_schema:parse_server(
"host",
#{
no_port => true,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"hostname, default scheme, defined port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6651},
emqx_schema:parse_server(
"host:6651",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"inconsistent scheme opts",
?assertError(
"bad_schema",
emqx_schema:parse_server(
"pulsar+ssl://host:6651",
#{
default_port => 6650,
default_scheme => "something",
supported_schemes => ["not", "supported"]
}
)
)
),
?T(
"hostname, default scheme, defined port",
?assertEqual(
#{scheme => "pulsar", hostname => "host", port => 6651},
emqx_schema:parse_server(
"host:6651",
#{
default_port => 6650,
default_scheme => "pulsar",
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
),
?T(
"unsupported scheme",
?assertThrow(
"unsupported_scheme",
emqx_schema:parse_server(
"pulsar+quic://host:6651",
#{
default_port => 6650,
supported_schemes => ["pulsar"]
}
)
)
),
?T(
"multiple hostnames with schemes (1)",
?assertEqual(
[
#{scheme => "pulsar", hostname => "host", port => 6649},
#{scheme => "pulsar+ssl", hostname => "other.host", port => 6651},
#{scheme => "pulsar", hostname => "yet.another", port => 6650}
],
emqx_schema:parse_servers(
"pulsar://host:6649, pulsar+ssl://other.host:6651,pulsar://yet.another",
#{
default_port => 6650,
supported_schemes => ["pulsar", "pulsar+ssl"]
}
)
)
)
].

View File

@ -60,12 +60,12 @@ init(Parent) ->
{ok, #{callbacks => [], owner => Parent}}.
terminate(_Reason, #{callbacks := Callbacks}) ->
lists:foreach(fun(Fun) -> catch Fun() end, Callbacks).
do_terminate(Callbacks).
handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) ->
{reply, ok, State#{callbacks := [Callback | Callbacks]}};
handle_call(terminate, _From, State = #{callbacks := Callbacks}) ->
lists:foreach(fun(Fun) -> catch Fun() end, Callbacks),
do_terminate(Callbacks),
{stop, normal, ok, State};
handle_call(_Req, _From, State) ->
{reply, error, State}.
@ -77,3 +77,23 @@ handle_info({'EXIT', Parent, _Reason}, State = #{owner := Parent}) ->
{stop, normal, State};
handle_info(_Msg, State) ->
{noreply, State}.
%%----------------------------------------------------------------------------------
%% Internal fns
%%----------------------------------------------------------------------------------
do_terminate(Callbacks) ->
lists:foreach(
fun(Fun) ->
try
Fun()
catch
K:E:S ->
ct:pal("error executing callback ~p: ~p", [Fun, {K, E}]),
ct:pal("stacktrace: ~p", [S]),
ok
end
end,
Callbacks
),
ok.

View File

@ -229,7 +229,8 @@ ssl_files_handle_non_generated_file_test() ->
ok = emqx_tls_lib:delete_ssl_files(Dir, undefined, SSL2),
%% verify the file is not delete and not changed, because it is not generated by
%% emqx_tls_lib
?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)).
?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)),
ok = file:delete(TmpKeyFile).
ssl_file_replace_test() ->
Key1 = bin(test_key()),

View File

@ -447,7 +447,12 @@ t_websocket_info_deliver(_) ->
t_websocket_info_timeout_limiter(_) ->
Ref = make_ref(),
LimiterT = init_limiter(),
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
LimiterT = init_limiter(#{
bytes => bucket_cfg(),
messages => bucket_cfg(),
client => #{bytes => client_cfg(Rate)}
}),
Next = fun emqx_ws_connection:when_msg_in/3,
Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT),
Event = {timeout, Ref, limit_timeout},
@ -513,16 +518,16 @@ t_handle_timeout_emit_stats(_) ->
t_ensure_rate_limit(_) ->
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
Limiter = init_limiter(#{
bytes_in => bucket_cfg(),
message_in => bucket_cfg(),
client => #{bytes_in => client_cfg(Rate)}
bytes => bucket_cfg(),
messages => bucket_cfg(),
client => #{bytes => client_cfg(Rate)}
}),
St = st(#{limiter => Limiter}),
%% must bigger than value in emqx_ratelimit_SUITE
{ok, Need} = emqx_limiter_schema:to_capacity("1GB"),
St1 = ?ws_conn:check_limiter(
[{Need, bytes_in}],
[{Need, bytes}],
[],
fun(_, _, S) -> S end,
[],
@ -703,23 +708,21 @@ init_limiter() ->
init_limiter(limiter_cfg()).
init_limiter(LimiterCfg) ->
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], LimiterCfg).
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], LimiterCfg).
limiter_cfg() ->
Cfg = bucket_cfg(),
Client = client_cfg(),
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
#{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
client_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
client_cfg(Infinity).
client_cfg(infinity).
client_cfg(Rate) ->
Infinity = emqx_limiter_schema:infinity_value(),
#{
rate => Rate,
initial => 0,
capacity => Infinity,
burst => 0,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
@ -727,14 +730,13 @@ client_cfg(Rate) ->
}.
bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
#{rate => Infinity, initial => 0, capacity => Infinity}.
#{rate => infinity, initial => 0, burst => 0}.
add_bucket() ->
Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg).
del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes),
emqx_limiter_server:del_bucket(?LIMITER_ID, messages).

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authn, [
{description, "EMQX Authentication"},
{vsn, "0.1.17"},
{vsn, "0.1.18"},
{modules, []},
{registered, [emqx_authn_sup, emqx_authn_registry]},
{applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]},

View File

@ -28,6 +28,7 @@
parse_sql/2,
render_deep/2,
render_str/2,
render_urlencoded_str/2,
render_sql_params/2,
is_superuser/1,
bin/1,
@ -129,6 +130,13 @@ render_str(Template, Credential) ->
#{return => full_binary, var_trans => fun handle_var/2}
).
render_urlencoded_str(Template, Credential) ->
emqx_placeholder:proc_tmpl(
Template,
mapping_credential(Credential),
#{return => full_binary, var_trans => fun urlencode_var/2}
).
render_sql_params(ParamList, Credential) ->
emqx_placeholder:proc_tmpl(
ParamList,
@ -217,6 +225,11 @@ without_password(Credential, [Name | Rest]) ->
without_password(Credential, Rest)
end.
urlencode_var({var, _} = Var, Value) ->
emqx_http_lib:uri_encode(handle_var(Var, Value));
urlencode_var(Var, Value) ->
handle_var(Var, Value).
handle_var({var, _Name}, undefined) ->
<<>>;
handle_var({var, <<"peerhost">>}, PeerHost) ->

View File

@ -105,14 +105,16 @@ mnesia(boot) ->
%% Hocon Schema
%%------------------------------------------------------------------------------
namespace() -> "authn-scram-builtin_db".
namespace() -> "authn".
tags() ->
[<<"Authentication">>].
roots() -> [?CONF_NS].
%% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, scram))}].
fields(?CONF_NS) ->
fields(scram) ->
[
{mechanism, emqx_authn_schema:mechanism(scram)},
{backend, emqx_authn_schema:backend(built_in_database)},
@ -120,7 +122,7 @@ fields(?CONF_NS) ->
{iteration_count, fun iteration_count/1}
] ++ emqx_authn_schema:common_fields().
desc(?CONF_NS) ->
desc(scram) ->
"Settings for Salted Challenge Response Authentication Mechanism\n"
"(SCRAM) authentication.";
desc(_) ->
@ -141,7 +143,7 @@ iteration_count(_) -> undefined.
%%------------------------------------------------------------------------------
refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)].
[hoconsc:ref(?MODULE, scram)].
create(
AuthenticatorID,

View File

@ -53,34 +53,35 @@
%% Hocon Schema
%%------------------------------------------------------------------------------
namespace() -> "authn-http".
namespace() -> "authn".
tags() ->
[<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() ->
[
{?CONF_NS,
hoconsc:mk(
hoconsc:union(fun union_member_selector/1),
hoconsc:union(fun ?MODULE:union_member_selector/1),
#{}
)}
].
fields(get) ->
fields(http_get) ->
[
{method, #{type => get, required => true, desc => ?DESC(method)}},
{headers, fun headers_no_content_type/1}
] ++ common_fields();
fields(post) ->
fields(http_post) ->
[
{method, #{type => post, required => true, desc => ?DESC(method)}},
{headers, fun headers/1}
] ++ common_fields().
desc(get) ->
desc(http_get) ->
?DESC(get);
desc(post) ->
desc(http_post) ->
?DESC(post);
desc(_) ->
undefined.
@ -158,8 +159,8 @@ request_timeout(_) -> undefined.
refs() ->
[
hoconsc:ref(?MODULE, get),
hoconsc:ref(?MODULE, post)
hoconsc:ref(?MODULE, http_get),
hoconsc:ref(?MODULE, http_post)
].
union_member_selector(all_union_members) ->
@ -168,9 +169,9 @@ union_member_selector({value, Value}) ->
refs(Value).
refs(#{<<"method">> := <<"get">>}) ->
[hoconsc:ref(?MODULE, get)];
[hoconsc:ref(?MODULE, http_get)];
refs(#{<<"method">> := <<"post">>}) ->
[hoconsc:ref(?MODULE, post)];
[hoconsc:ref(?MODULE, http_post)];
refs(_) ->
throw(#{
field_name => method,
@ -313,9 +314,9 @@ parse_url(Url) ->
BaseUrl = iolist_to_binary([Scheme, "//", HostPort]),
case string:split(Remaining, "?", leading) of
[Path, QueryString] ->
{BaseUrl, Path, QueryString};
{BaseUrl, <<"/", Path/binary>>, QueryString};
[Path] ->
{BaseUrl, Path, <<>>}
{BaseUrl, <<"/", Path/binary>>, <<>>}
end;
[HostPort] ->
{iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>}
@ -356,7 +357,7 @@ generate_request(Credential, #{
body_template := BodyTemplate
}) ->
Headers = maps:to_list(Headers0),
Path = emqx_authn_utils:render_str(BasePathTemplate, Credential),
Path = emqx_authn_utils:render_urlencoded_str(BasePathTemplate, Credential),
Query = emqx_authn_utils:render_deep(BaseQueryTemplate, Credential),
Body = emqx_authn_utils:render_deep(BodyTemplate, Credential),
case Method of
@ -371,9 +372,9 @@ generate_request(Credential, #{
end.
append_query(Path, []) ->
encode_path(Path);
Path;
append_query(Path, Query) ->
encode_path(Path) ++ "?" ++ binary_to_list(qs(Query)).
Path ++ "?" ++ binary_to_list(qs(Query)).
qs(KVs) ->
qs(KVs, []).
@ -435,10 +436,6 @@ parse_body(ContentType, _) ->
uri_encode(T) ->
emqx_http_lib:uri_encode(to_list(T)).
encode_path(Path) ->
Parts = string:split(Path, "/", all),
lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]).
request_for_log(Credential, #{url := Url} = State) ->
SafeCredential = emqx_authn_utils:without_password(Credential),
case generate_request(SafeCredential, State) of

View File

@ -35,18 +35,17 @@
callback_mode() -> always_sync.
on_start(InstId, Opts) ->
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
PoolOpts = [
{pool_size, maps:get(pool_size, Opts, ?DEFAULT_POOL_SIZE)},
{connector_opts, Opts}
],
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, PoolOpts) of
ok -> {ok, #{pool_name => PoolName}};
case emqx_resource_pool:start(InstId, ?MODULE, PoolOpts) of
ok -> {ok, #{pool_name => InstId}};
{error, Reason} -> {error, Reason}
end.
on_stop(_InstId, #{pool_name := PoolName}) ->
emqx_plugin_libs_pool:stop_pool(PoolName).
emqx_resource_pool:stop(PoolName).
on_query(InstId, get_jwks, #{pool_name := PoolName}) ->
Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover),
@ -72,18 +71,17 @@ on_query(_InstId, {update, Opts}, #{pool_name := PoolName}) ->
ok.
on_get_status(_InstId, #{pool_name := PoolName}) ->
Func =
fun(Conn) ->
case emqx_authn_jwks_client:get_jwks(Conn) of
{ok, _} -> true;
_ -> false
end
end,
case emqx_plugin_libs_pool:health_check_ecpool_workers(PoolName, Func) of
case emqx_resource_pool:health_check_workers(PoolName, fun health_check/1) of
true -> connected;
false -> disconnected
end.
health_check(Conn) ->
case emqx_authn_jwks_client:get_jwks(Conn) of
{ok, _} -> true;
_ -> false
end.
connect(Opts) ->
ConnectorOpts = proplists:get_value(connector_opts, Opts),
emqx_authn_jwks_client:start_link(ConnectorOpts).

View File

@ -43,36 +43,57 @@
%% Hocon Schema
%%------------------------------------------------------------------------------
namespace() -> "authn-jwt".
namespace() -> "authn".
tags() ->
[<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() ->
[
{?CONF_NS,
hoconsc:mk(
hoconsc:union(fun union_member_selector/1),
hoconsc:union(fun ?MODULE:union_member_selector/1),
#{}
)}
].
fields('hmac-based') ->
fields(jwt_hmac) ->
[
{use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})},
%% for hmac, it's the 'algorithm' field which selects this type
%% use_jwks field can be ignored (kept for backward compatibility)
{use_jwks,
sc(
hoconsc:enum([false]),
#{
required => false,
desc => ?DESC(use_jwks),
importance => ?IMPORTANCE_HIDDEN
}
)},
{algorithm,
sc(hoconsc:enum(['hmac-based']), #{required => true, desc => ?DESC(algorithm)})},
{secret, fun secret/1},
{secret_base64_encoded, fun secret_base64_encoded/1}
] ++ common_fields();
fields('public-key') ->
fields(jwt_public_key) ->
[
{use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})},
%% for public-key, it's the 'algorithm' field which selects this type
%% use_jwks field can be ignored (kept for backward compatibility)
{use_jwks,
sc(
hoconsc:enum([false]),
#{
required => false,
desc => ?DESC(use_jwks),
importance => ?IMPORTANCE_HIDDEN
}
)},
{algorithm,
sc(hoconsc:enum(['public-key']), #{required => true, desc => ?DESC(algorithm)})},
{public_key, fun public_key/1}
] ++ common_fields();
fields('jwks') ->
fields(jwt_jwks) ->
[
{use_jwks, sc(hoconsc:enum([true]), #{required => true, desc => ?DESC(use_jwks)})},
{endpoint, fun endpoint/1},
@ -85,12 +106,12 @@ fields('jwks') ->
}}
] ++ common_fields().
desc('hmac-based') ->
?DESC('hmac-based');
desc('public-key') ->
?DESC('public-key');
desc('jwks') ->
?DESC('jwks');
desc(jwt_hmac) ->
?DESC(jwt_hmac);
desc(jwt_public_key) ->
?DESC(jwt_public_key);
desc(jwt_jwks) ->
?DESC(jwt_jwks);
desc(undefined) ->
undefined.
@ -160,9 +181,9 @@ from(_) -> undefined.
refs() ->
[
hoconsc:ref(?MODULE, 'hmac-based'),
hoconsc:ref(?MODULE, 'public-key'),
hoconsc:ref(?MODULE, 'jwks')
hoconsc:ref(?MODULE, jwt_hmac),
hoconsc:ref(?MODULE, jwt_public_key),
hoconsc:ref(?MODULE, jwt_jwks)
].
union_member_selector(all_union_members) ->
@ -179,11 +200,11 @@ boolean(<<"false">>) -> false;
boolean(Other) -> Other.
select_ref(true, _) ->
[hoconsc:ref(?MODULE, 'jwks')];
[hoconsc:ref(?MODULE, 'jwt_jwks')];
select_ref(false, #{<<"public_key">> := _}) ->
[hoconsc:ref(?MODULE, 'public-key')];
[hoconsc:ref(?MODULE, jwt_public_key)];
select_ref(false, _) ->
[hoconsc:ref(?MODULE, 'hmac-based')];
[hoconsc:ref(?MODULE, jwt_hmac)];
select_ref(_, _) ->
throw(#{
field_name => use_jwks,

View File

@ -107,14 +107,16 @@ mnesia(boot) ->
%% Hocon Schema
%%------------------------------------------------------------------------------
namespace() -> "authn-builtin_db".
namespace() -> "authn".
tags() ->
[<<"Authentication">>].
roots() -> [?CONF_NS].
%% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, builtin_db))}].
fields(?CONF_NS) ->
fields(builtin_db) ->
[
{mechanism, emqx_authn_schema:mechanism(password_based)},
{backend, emqx_authn_schema:backend(built_in_database)},
@ -122,8 +124,8 @@ fields(?CONF_NS) ->
{password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1}
] ++ emqx_authn_schema:common_fields().
desc(?CONF_NS) ->
?DESC(?CONF_NS);
desc(builtin_db) ->
?DESC(builtin_db);
desc(_) ->
undefined.
@ -138,7 +140,7 @@ user_id_type(_) -> undefined.
%%------------------------------------------------------------------------------
refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)].
[hoconsc:ref(?MODULE, builtin_db)].
create(_AuthenticatorID, Config) ->
create(Config).

View File

@ -44,32 +44,33 @@
%% Hocon Schema
%%------------------------------------------------------------------------------
namespace() -> "authn-mongodb".
namespace() -> "authn".
tags() ->
[<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() ->
[
{?CONF_NS,
hoconsc:mk(
hoconsc:union(fun union_member_selector/1),
hoconsc:union(fun ?MODULE:union_member_selector/1),
#{}
)}
].
fields(standalone) ->
fields(mongo_single) ->
common_fields() ++ emqx_connector_mongo:fields(single);
fields('replica-set') ->
fields(mongo_rs) ->
common_fields() ++ emqx_connector_mongo:fields(rs);
fields('sharded-cluster') ->
fields(mongo_sharded) ->
common_fields() ++ emqx_connector_mongo:fields(sharded).
desc(standalone) ->
?DESC(standalone);
desc('replica-set') ->
desc(mongo_single) ->
?DESC(single);
desc(mongo_rs) ->
?DESC('replica-set');
desc('sharded-cluster') ->
desc(mongo_sharded) ->
?DESC('sharded-cluster');
desc(_) ->
undefined.
@ -126,9 +127,9 @@ is_superuser_field(_) -> undefined.
refs() ->
[
hoconsc:ref(?MODULE, standalone),
hoconsc:ref(?MODULE, 'replica-set'),
hoconsc:ref(?MODULE, 'sharded-cluster')
hoconsc:ref(?MODULE, mongo_single),
hoconsc:ref(?MODULE, mongo_rs),
hoconsc:ref(?MODULE, mongo_sharded)
].
create(_AuthenticatorID, Config) ->
@ -254,11 +255,11 @@ union_member_selector({value, Value}) ->
refs(Value).
refs(#{<<"mongo_type">> := <<"single">>}) ->
[hoconsc:ref(?MODULE, standalone)];
[hoconsc:ref(?MODULE, mongo_single)];
refs(#{<<"mongo_type">> := <<"rs">>}) ->
[hoconsc:ref(?MODULE, 'replica-set')];
[hoconsc:ref(?MODULE, mongo_rs)];
refs(#{<<"mongo_type">> := <<"sharded">>}) ->
[hoconsc:ref(?MODULE, 'sharded-cluster')];
[hoconsc:ref(?MODULE, mongo_sharded)];
refs(_) ->
throw(#{
field_name => mongo_type,

View File

@ -45,14 +45,16 @@
%% Hocon Schema
%%------------------------------------------------------------------------------
namespace() -> "authn-mysql".
namespace() -> "authn".
tags() ->
[<<"Authentication">>].
roots() -> [?CONF_NS].
%% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, mysql))}].
fields(?CONF_NS) ->
fields(mysql) ->
[
{mechanism, emqx_authn_schema:mechanism(password_based)},
{backend, emqx_authn_schema:backend(mysql)},
@ -62,8 +64,8 @@ fields(?CONF_NS) ->
] ++ emqx_authn_schema:common_fields() ++
proplists:delete(prepare_statement, emqx_connector_mysql:fields(config)).
desc(?CONF_NS) ->
?DESC(?CONF_NS);
desc(mysql) ->
?DESC(mysql);
desc(_) ->
undefined.
@ -82,7 +84,7 @@ query_timeout(_) -> undefined.
%%------------------------------------------------------------------------------
refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)].
[hoconsc:ref(?MODULE, mysql)].
create(_AuthenticatorID, Config) ->
create(Config).

View File

@ -49,14 +49,16 @@
%% Hocon Schema
%%------------------------------------------------------------------------------
namespace() -> "authn-postgresql".
namespace() -> "authn".
tags() ->
[<<"Authentication">>].
roots() -> [?CONF_NS].
%% used for config check when the schema module is resolved
roots() ->
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, postgresql))}].
fields(?CONF_NS) ->
fields(postgresql) ->
[
{mechanism, emqx_authn_schema:mechanism(password_based)},
{backend, emqx_authn_schema:backend(postgresql)},
@ -66,8 +68,8 @@ fields(?CONF_NS) ->
emqx_authn_schema:common_fields() ++
proplists:delete(prepare_statement, emqx_connector_pgsql:fields(config)).
desc(?CONF_NS) ->
?DESC(?CONF_NS);
desc(postgresql) ->
?DESC(postgresql);
desc(_) ->
undefined.
@ -81,7 +83,7 @@ query(_) -> undefined.
%%------------------------------------------------------------------------------
refs() ->
[hoconsc:ref(?MODULE, ?CONF_NS)].
[hoconsc:ref(?MODULE, postgresql)].
create(_AuthenticatorID, Config) ->
create(Config).

View File

@ -44,32 +44,33 @@
%% Hocon Schema
%%------------------------------------------------------------------------------
namespace() -> "authn-redis".
namespace() -> "authn".
tags() ->
[<<"Authentication">>].
%% used for config check when the schema module is resolved
roots() ->
[
{?CONF_NS,
hoconsc:mk(
hoconsc:union(fun union_member_selector/1),
hoconsc:union(fun ?MODULE:union_member_selector/1),
#{}
)}
].
fields(standalone) ->
fields(redis_single) ->
common_fields() ++ emqx_connector_redis:fields(single);
fields(cluster) ->
fields(redis_cluster) ->
common_fields() ++ emqx_connector_redis:fields(cluster);
fields(sentinel) ->
fields(redis_sentinel) ->
common_fields() ++ emqx_connector_redis:fields(sentinel).
desc(standalone) ->
?DESC(standalone);
desc(cluster) ->
desc(redis_single) ->
?DESC(single);
desc(redis_cluster) ->
?DESC(cluster);
desc(sentinel) ->
desc(redis_sentinel) ->
?DESC(sentinel);
desc(_) ->
"".
@ -93,9 +94,9 @@ cmd(_) -> undefined.
refs() ->
[
hoconsc:ref(?MODULE, standalone),
hoconsc:ref(?MODULE, cluster),
hoconsc:ref(?MODULE, sentinel)
hoconsc:ref(?MODULE, redis_single),
hoconsc:ref(?MODULE, redis_cluster),
hoconsc:ref(?MODULE, redis_sentinel)
].
union_member_selector(all_union_members) ->
@ -104,11 +105,11 @@ union_member_selector({value, Value}) ->
refs(Value).
refs(#{<<"redis_type">> := <<"single">>}) ->
[hoconsc:ref(?MODULE, standalone)];
[hoconsc:ref(?MODULE, redis_single)];
refs(#{<<"redis_type">> := <<"cluster">>}) ->
[hoconsc:ref(?MODULE, cluster)];
[hoconsc:ref(?MODULE, redis_cluster)];
refs(#{<<"redis_type">> := <<"sentinel">>}) ->
[hoconsc:ref(?MODULE, sentinel)];
[hoconsc:ref(?MODULE, redis_sentinel)];
refs(_) ->
throw(#{
field_name => redis_type,

View File

@ -47,7 +47,6 @@
})
).
-define(SERVER_RESPONSE_URLENCODE(Result), ?SERVER_RESPONSE_URLENCODE(Result, false)).
-define(SERVER_RESPONSE_URLENCODE(Result, IsSuperuser),
list_to_binary(
"result=" ++
@ -166,6 +165,54 @@ test_user_auth(#{
?GLOBAL
).
t_authenticate_path_placeholders(_Config) ->
ok = emqx_authn_http_test_server:stop(),
{ok, _} = emqx_authn_http_test_server:start_link(?HTTP_PORT, <<"/[...]">>),
ok = emqx_authn_http_test_server:set_handler(
fun(Req0, State) ->
Req =
case cowboy_req:path(Req0) of
<<"/my/p%20ath//us%20er/auth//">> ->
cowboy_req:reply(
200,
#{<<"content-type">> => <<"application/json">>},
emqx_utils_json:encode(#{result => allow, is_superuser => false}),
Req0
);
Path ->
ct:pal("Unexpected path: ~p", [Path]),
cowboy_req:reply(403, Req0)
end,
{ok, Req, State}
end
),
Credentials = ?CREDENTIALS#{
username => <<"us er">>
},
AuthConfig = maps:merge(
raw_http_auth_config(),
#{
<<"url">> => <<"http://127.0.0.1:32333/my/p%20ath//${username}/auth//">>,
<<"body">> => #{}
}
),
{ok, _} = emqx:update_config(
?PATH,
{create_authenticator, ?GLOBAL, AuthConfig}
),
?assertMatch(
{ok, #{is_superuser := false}},
emqx_access_control:authenticate(Credentials)
),
_ = emqx_authn_test_lib:delete_authenticators(
[authentication],
?GLOBAL
).
t_no_value_for_placeholder(_Config) ->
Handler = fun(Req0, State) ->
{ok, RawBody, Req1} = cowboy_req:read_body(Req0),

View File

@ -107,7 +107,7 @@ t_update_with_invalid_config(_Config) ->
?assertMatch(
{error, #{
kind := validation_error,
matched_type := "authn-postgresql:authentication",
matched_type := "authn:postgresql",
path := "authentication.1.server",
reason := required_field
}},

View File

@ -162,7 +162,7 @@ t_create_invalid_config(_Config) ->
?assertMatch(
{error, #{
kind := validation_error,
matched_type := "authn-redis:standalone",
matched_type := "authn:redis_single",
path := "authentication.1.server",
reason := required_field
}},

View File

@ -53,7 +53,7 @@ t_check_schema(_Config) ->
?assertThrow(
#{
path := "authentication.1.password_hash_algorithm.name",
matched_type := "authn-builtin_db:authentication/authn-hash:simple",
matched_type := "authn:builtin_db/authn-hash:simple",
reason := unable_to_convert_to_enum_symbol
},
Check(ConfigNotOk)
@ -72,7 +72,7 @@ t_check_schema(_Config) ->
#{
path := "authentication.1.password_hash_algorithm",
reason := "algorithm_name_missing",
matched_type := "authn-builtin_db:authentication"
matched_type := "authn:builtin_db"
},
Check(ConfigMissingAlgoName)
).

View File

@ -32,19 +32,19 @@ union_member_selector_mongo_test_() ->
end},
{"single", fun() ->
?assertMatch(
?ERR(#{matched_type := "authn-mongodb:standalone"}),
?ERR(#{matched_type := "authn:mongo_single"}),
Check("{mongo_type: single}")
)
end},
{"replica-set", fun() ->
?assertMatch(
?ERR(#{matched_type := "authn-mongodb:replica-set"}),
?ERR(#{matched_type := "authn:mongo_rs"}),
Check("{mongo_type: rs}")
)
end},
{"sharded", fun() ->
?assertMatch(
?ERR(#{matched_type := "authn-mongodb:sharded-cluster"}),
?ERR(#{matched_type := "authn:mongo_sharded"}),
Check("{mongo_type: sharded}")
)
end}
@ -61,19 +61,19 @@ union_member_selector_jwt_test_() ->
end},
{"jwks", fun() ->
?assertMatch(
?ERR(#{matched_type := "authn-jwt:jwks"}),
?ERR(#{matched_type := "authn:jwt_jwks"}),
Check("{use_jwks = true}")
)
end},
{"publick-key", fun() ->
?assertMatch(
?ERR(#{matched_type := "authn-jwt:public-key"}),
?ERR(#{matched_type := "authn:jwt_public_key"}),
Check("{use_jwks = false, public_key = 1}")
)
end},
{"hmac-based", fun() ->
?assertMatch(
?ERR(#{matched_type := "authn-jwt:hmac-based"}),
?ERR(#{matched_type := "authn:jwt_hmac"}),
Check("{use_jwks = false}")
)
end}
@ -90,19 +90,19 @@ union_member_selector_redis_test_() ->
end},
{"single", fun() ->
?assertMatch(
?ERR(#{matched_type := "authn-redis:standalone"}),
?ERR(#{matched_type := "authn:redis_single"}),
Check("{redis_type = single}")
)
end},
{"cluster", fun() ->
?assertMatch(
?ERR(#{matched_type := "authn-redis:cluster"}),
?ERR(#{matched_type := "authn:redis_cluster"}),
Check("{redis_type = cluster}")
)
end},
{"sentinel", fun() ->
?assertMatch(
?ERR(#{matched_type := "authn-redis:sentinel"}),
?ERR(#{matched_type := "authn:redis_sentinel"}),
Check("{redis_type = sentinel}")
)
end}
@ -119,13 +119,13 @@ union_member_selector_http_test_() ->
end},
{"get", fun() ->
?assertMatch(
?ERR(#{matched_type := "authn-http:get"}),
?ERR(#{matched_type := "authn:http_get"}),
Check("{method = get}")
)
end},
{"post", fun() ->
?assertMatch(
?ERR(#{matched_type := "authn-http:post"}),
?ERR(#{matched_type := "authn:http_post"}),
Check("{method = post}")
)
end}

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authz, [
{description, "An OTP application"},
{vsn, "0.1.17"},
{vsn, "0.1.19"},
{registered, []},
{mod, {emqx_authz_app, []}},
{applications, [

View File

@ -161,9 +161,9 @@ parse_url(Url) ->
BaseUrl = iolist_to_binary([Scheme, "//", HostPort]),
case string:split(Remaining, "?", leading) of
[Path, QueryString] ->
{BaseUrl, Path, QueryString};
{BaseUrl, <<"/", Path/binary>>, QueryString};
[Path] ->
{BaseUrl, Path, <<>>}
{BaseUrl, <<"/", Path/binary>>, <<>>}
end;
[HostPort] ->
{iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>}
@ -185,7 +185,7 @@ generate_request(
}
) ->
Values = client_vars(Client, PubSub, Topic),
Path = emqx_authz_utils:render_str(BasePathTemplate, Values),
Path = emqx_authz_utils:render_urlencoded_str(BasePathTemplate, Values),
Query = emqx_authz_utils:render_deep(BaseQueryTemplate, Values),
Body = emqx_authz_utils:render_deep(BodyTemplate, Values),
case Method of
@ -202,9 +202,9 @@ generate_request(
end.
append_query(Path, []) ->
encode_path(Path);
to_list(Path);
append_query(Path, Query) ->
encode_path(Path) ++ "?" ++ to_list(query_string(Query)).
to_list(Path) ++ "?" ++ to_list(query_string(Query)).
query_string(Body) ->
query_string(Body, []).
@ -222,10 +222,6 @@ query_string([{K, V} | More], Acc) ->
uri_encode(T) ->
emqx_http_lib:uri_encode(to_list(T)).
encode_path(Path) ->
Parts = string:split(Path, "/", all),
lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]).
serialize_body(<<"application/json">>, Body) ->
emqx_utils_json:encode(Body);
serialize_body(<<"application/x-www-form-urlencoded">>, Body) ->

View File

@ -185,7 +185,7 @@ match_who(#{peerhost := IpAddress}, {ipaddrs, CIDRs}) ->
match_who(ClientInfo, {'and', Principals}) when is_list(Principals) ->
lists:foldl(
fun(Principal, Permission) ->
match_who(ClientInfo, Principal) andalso Permission
Permission andalso match_who(ClientInfo, Principal)
end,
true,
Principals
@ -193,7 +193,7 @@ match_who(ClientInfo, {'and', Principals}) when is_list(Principals) ->
match_who(ClientInfo, {'or', Principals}) when is_list(Principals) ->
lists:foldl(
fun(Principal, Permission) ->
match_who(ClientInfo, Principal) orelse Permission
Permission orelse match_who(ClientInfo, Principal)
end,
false,
Principals

View File

@ -54,7 +54,7 @@ type_names() ->
file,
http_get,
http_post,
mnesia,
builtin_db,
mongo_single,
mongo_rs,
mongo_sharded,
@ -93,7 +93,7 @@ fields(http_post) ->
{method, method(post)},
{headers, fun headers/1}
];
fields(mnesia) ->
fields(builtin_db) ->
authz_common_fields(built_in_database);
fields(mongo_single) ->
authz_common_fields(mongodb) ++
@ -191,8 +191,8 @@ desc(http_get) ->
?DESC(http_get);
desc(http_post) ->
?DESC(http_post);
desc(mnesia) ->
?DESC(mnesia);
desc(builtin_db) ->
?DESC(builtin_db);
desc(mongo_single) ->
?DESC(mongo_single);
desc(mongo_rs) ->
@ -459,7 +459,7 @@ select_union_member(#{<<"type">> := <<"http">>} = Value) ->
})
end;
select_union_member(#{<<"type">> := <<"built_in_database">>}) ->
?R_REF(mnesia);
?R_REF(builtin_db);
select_union_member(#{<<"type">> := Type}) ->
select_union_member_loop(Type, type_names());
select_union_member(_) ->

View File

@ -16,7 +16,6 @@
-module(emqx_authz_utils).
-include_lib("emqx/include/emqx_placeholder.hrl").
-include_lib("emqx_authz.hrl").
-export([
@ -28,6 +27,7 @@
update_config/2,
parse_deep/2,
parse_str/2,
render_urlencoded_str/2,
parse_sql/3,
render_deep/2,
render_str/2,
@ -128,6 +128,13 @@ render_str(Template, Values) ->
#{return => full_binary, var_trans => fun handle_var/2}
).
render_urlencoded_str(Template, Values) ->
emqx_placeholder:proc_tmpl(
Template,
client_vars(Values),
#{return => full_binary, var_trans => fun urlencode_var/2}
).
render_sql_params(ParamList, Values) ->
emqx_placeholder:proc_tmpl(
ParamList,
@ -181,6 +188,11 @@ convert_client_var({dn, DN}) -> {cert_subject, DN};
convert_client_var({protocol, Proto}) -> {proto_name, Proto};
convert_client_var(Other) -> Other.
urlencode_var({var, _} = Var, Value) ->
emqx_http_lib:uri_encode(handle_var(Var, Value));
urlencode_var(Var, Value) ->
handle_var(Var, Value).
handle_var({var, _Name}, undefined) ->
<<>>;
handle_var({var, <<"peerhost">>}, IpAddr) ->

View File

@ -199,7 +199,7 @@ t_query_params(_Config) ->
peerhost := <<"127.0.0.1">>,
proto_name := <<"MQTT">>,
mountpoint := <<"MOUNTPOINT">>,
topic := <<"t">>,
topic := <<"t/1">>,
action := <<"publish">>
} = cowboy_req:match_qs(
[
@ -241,7 +241,7 @@ t_query_params(_Config) ->
?assertEqual(
allow,
emqx_access_control:authorize(ClientInfo, publish, <<"t">>)
emqx_access_control:authorize(ClientInfo, publish, <<"t/1">>)
).
t_path(_Config) ->
@ -249,13 +249,13 @@ t_path(_Config) ->
fun(Req0, State) ->
?assertEqual(
<<
"/authz/users/"
"/authz/use%20rs/"
"user%20name/"
"client%20id/"
"127.0.0.1/"
"MQTT/"
"MOUNTPOINT/"
"t/1/"
"t%2F1/"
"publish"
>>,
cowboy_req:path(Req0)
@ -264,7 +264,7 @@ t_path(_Config) ->
end,
#{
<<"url">> => <<
"http://127.0.0.1:33333/authz/users/"
"http://127.0.0.1:33333/authz/use%20rs/"
"${username}/"
"${clientid}/"
"${peerhost}/"

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_bridge, [
{description, "EMQX bridges"},
{vsn, "0.1.16"},
{vsn, "0.1.18"},
{registered, [emqx_bridge_sup]},
{mod, {emqx_bridge_app, []}},
{applications, [

View File

@ -70,7 +70,9 @@
T == dynamo;
T == rocketmq;
T == cassandra;
T == sqlserver
T == sqlserver;
T == pulsar_producer;
T == oracle
).
load() ->

View File

@ -64,7 +64,7 @@
{BridgeType, BridgeName} ->
EXPR
catch
throw:{invalid_bridge_id, Reason} ->
throw:#{reason := Reason} ->
?NOT_FOUND(<<"Invalid bridge ID, ", Reason/binary>>)
end
).
@ -546,6 +546,8 @@ schema("/bridges_probe") ->
case emqx_bridge_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1)) of
ok ->
?NO_CONTENT;
{error, #{kind := validation_error} = Reason} ->
?BAD_REQUEST('TEST_FAILED', map_to_json(Reason));
{error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' ->
?BAD_REQUEST('TEST_FAILED', Reason)
end;

View File

@ -87,7 +87,7 @@ parse_bridge_id(BridgeId) ->
[Type, Name] ->
{to_type_atom(Type), validate_name(Name)};
_ ->
invalid_bridge_id(
invalid_data(
<<"should be of pattern {type}:{name}, but got ", BridgeId/binary>>
)
end.
@ -108,14 +108,14 @@ validate_name(Name0) ->
true ->
Name0;
false ->
invalid_bridge_id(<<"bad name: ", Name0/binary>>)
invalid_data(<<"bad name: ", Name0/binary>>)
end;
false ->
invalid_bridge_id(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>)
invalid_data(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>)
end.
-spec invalid_bridge_id(binary()) -> no_return().
invalid_bridge_id(Reason) -> throw({?FUNCTION_NAME, Reason}).
-spec invalid_data(binary()) -> no_return().
invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}).
is_id_char(C) when C >= $0 andalso C =< $9 -> true;
is_id_char(C) when C >= $a andalso C =< $z -> true;
@ -130,7 +130,7 @@ to_type_atom(Type) ->
erlang:binary_to_existing_atom(Type, utf8)
catch
_:_ ->
invalid_bridge_id(<<"unknown type: ", Type/binary>>)
invalid_data(<<"unknown bridge type: ", Type/binary>>)
end.
reset_metrics(ResourceId) ->
@ -243,12 +243,19 @@ create_dry_run(Type, Conf0) ->
{error, Reason} ->
{error, Reason};
{ok, ConfNew} ->
ParseConf = parse_confs(bin(Type), TmpPath, ConfNew),
Res = emqx_resource:create_dry_run_local(
bridge_to_resource_type(Type), ParseConf
),
_ = maybe_clear_certs(TmpPath, ConfNew),
Res
try
ParseConf = parse_confs(bin(Type), TmpPath, ConfNew),
Res = emqx_resource:create_dry_run_local(
bridge_to_resource_type(Type), ParseConf
),
Res
catch
%% validation errors
throw:Reason ->
{error, Reason}
after
_ = maybe_clear_certs(TmpPath, ConfNew)
end
end.
remove(BridgeId) ->
@ -300,10 +307,18 @@ parse_confs(
max_retries := Retry
} = Conf
) ->
{BaseUrl, Path} = parse_url(Url),
{ok, BaseUrl2} = emqx_http_lib:uri_parse(BaseUrl),
Url1 = bin(Url),
{BaseUrl, Path} = parse_url(Url1),
BaseUrl1 =
case emqx_http_lib:uri_parse(BaseUrl) of
{ok, BUrl} ->
BUrl;
{error, Reason} ->
Reason1 = emqx_utils:readable_error_msg(Reason),
invalid_data(<<"Invalid URL: ", Url1/binary, ", details: ", Reason1/binary>>)
end,
Conf#{
base_url => BaseUrl2,
base_url => BaseUrl1,
request =>
#{
path => Path,
@ -325,6 +340,8 @@ parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) ->
%% to hocon; keeping this as just `kafka' for backwards compatibility.
parse_confs(<<"kafka">> = _Type, Name, Conf) ->
Conf#{bridge_name => Name};
parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) ->
Conf#{bridge_name => Name};
parse_confs(_Type, _Name, Conf) ->
Conf.
@ -338,7 +355,7 @@ parse_url(Url) ->
{iolist_to_binary([Scheme, "//", HostPort]), <<>>}
end;
[Url] ->
error({invalid_url, Url})
invalid_data(<<"Missing scheme in URL: ", Url/binary>>)
end.
str(Bin) when is_binary(Bin) -> binary_to_list(Bin);

View File

@ -141,8 +141,7 @@ setup_fake_telemetry_data() ->
}
}
},
Opts = #{raw_with_default => true},
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf, Opts),
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf),
ok = snabbkaffe:start_trace(),
Predicate = fun(#{?snk_kind := K}) -> K =:= emqx_bridge_loaded end,

View File

@ -414,6 +414,18 @@ t_http_crud_apis(Config) ->
},
json(maps:get(<<"message">>, PutFail2))
),
{ok, 400, _} = request_json(
put,
uri(["bridges", BridgeID]),
?HTTP_BRIDGE(<<"localhost:1234/foo">>, Name),
Config
),
{ok, 400, _} = request_json(
put,
uri(["bridges", BridgeID]),
?HTTP_BRIDGE(<<"htpp://localhost:12341234/foo">>, Name),
Config
),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config),
@ -498,6 +510,22 @@ t_http_crud_apis(Config) ->
%% Try create bridge with bad characters as name
{ok, 400, _} = request(post, uri(["bridges"]), ?HTTP_BRIDGE(URL1, <<"隋达"/utf8>>), Config),
%% Missing scheme in URL
{ok, 400, _} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(<<"localhost:1234/foo">>, <<"missing_url_scheme">>),
Config
),
%% Invalid port
{ok, 400, _} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(<<"http://localhost:12341234/foo">>, <<"invalid_port">>),
Config
),
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config).
t_http_bridges_local_topic(Config) ->
@ -1016,6 +1044,34 @@ t_bridges_probe(Config) ->
)
),
%% Missing scheme in URL
?assertMatch(
{ok, 400, #{
<<"code">> := <<"TEST_FAILED">>,
<<"message">> := _
}},
request_json(
post,
uri(["bridges_probe"]),
?HTTP_BRIDGE(<<"203.0.113.3:1234/foo">>),
Config
)
),
%% Invalid port
?assertMatch(
{ok, 400, #{
<<"code">> := <<"TEST_FAILED">>,
<<"message">> := _
}},
request_json(
post,
uri(["bridges_probe"]),
?HTTP_BRIDGE(<<"http://203.0.113.3:12341234/foo">>),
Config
)
),
{ok, 204, _} = request(
post,
uri(["bridges_probe"]),

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,41 @@
# EMQX Cassandra Bridge
[Apache Cassandra](https://github.com/apache/cassandra) is an open-source, distributed
NoSQL database management system that is designed to manage large amounts of structured
and semi-structured data across many commodity servers, providing high availability
with no single point of failure.
It is commonly used in web and mobile applications, IoT, and other systems that
require storing, querying, and analyzing large amounts of data.
The application is used to connect EMQX and Cassandra. User can create a rule
and easily ingest IoT data into Cassandra by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
<!---
# Documentation
- Refer to [Ingest data into Cassandra](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-cassa.html)
for how to use EMQX dashboard to ingest IoT data into Cassandra.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
--->
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,2 @@
toxiproxy
cassandra

View File

@ -0,0 +1,5 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-define(CASSANDRA_DEFAULT_PORT, 9042).

View File

@ -0,0 +1,11 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {ecql, {git, "https://github.com/emqx/ecql.git", {tag, "v0.5.1"}}}
, {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{shell, [
{apps, [emqx_bridge_cassandra]}
]}.

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_cassandra, [
{description, "EMQX Enterprise Cassandra Bridge"},
{vsn, "0.1.1"},
{registered, []},
{applications, [kernel, stdlib, ecql]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -1,7 +1,7 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_bridge_cassa).
-module(emqx_bridge_cassandra).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
@ -88,7 +88,7 @@ fields("config") ->
#{desc => ?DESC("local_topic"), default => undefined}
)}
] ++ emqx_resource_schema:fields("resource_opts") ++
(emqx_ee_connector_cassa:fields(config) --
(emqx_bridge_cassandra_connector:fields(config) --
emqx_connector_schema_lib:prepare_statement_fields());
fields("post") ->
fields("post", cassandra);

View File

@ -2,12 +2,12 @@
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_connector_cassa).
-module(emqx_bridge_cassandra_connector).
-behaviour(emqx_resource).
-include_lib("emqx_connector/include/emqx_connector.hrl").
-include_lib("emqx_ee_connector/include/emqx_ee_connector.hrl").
-include("emqx_bridge_cassandra.hrl").
-include_lib("typerefl/include/types.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("hocon/include/hoconsc.hrl").
@ -44,7 +44,7 @@
-type state() ::
#{
poolname := atom(),
pool_name := binary(),
prepare_cql := prepares(),
params_tokens := params_tokens(),
%% returned by ecql:prepare/2
@ -92,7 +92,7 @@ callback_mode() -> async_if_possible.
on_start(
InstId,
#{
servers := Servers,
servers := Servers0,
keyspace := Keyspace,
username := Username,
pool_size := PoolSize,
@ -104,9 +104,16 @@ on_start(
connector => InstId,
config => emqx_utils:redact(Config)
}),
Servers =
lists:map(
fun(#{hostname := Host, port := Port}) ->
{Host, Port}
end,
emqx_schema:parse_servers(Servers0, ?DEFAULT_SERVER_OPTION)
),
Options = [
{nodes, emqx_schema:parse_servers(Servers, ?DEFAULT_SERVER_OPTION)},
{nodes, Servers},
{username, Username},
{password, emqx_secret:wrap(maps:get(password, Config, ""))},
{keyspace, Keyspace},
@ -124,14 +131,10 @@ on_start(
false ->
[]
end,
%% use InstaId of binary type as Pool name, which is supported in ecpool.
PoolName = InstId,
Prepares = parse_prepare_cql(Config),
InitState = #{poolname => PoolName, prepare_statement => #{}},
State = maps:merge(InitState, Prepares),
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
State = parse_prepare_cql(Config),
case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
ok ->
{ok, init_prepare(State)};
{ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})};
{error, Reason} ->
?tp(
cassandra_connector_start_failed,
@ -140,12 +143,12 @@ on_start(
{error, Reason}
end.
on_stop(InstId, #{poolname := PoolName}) ->
on_stop(InstId, #{pool_name := PoolName}) ->
?SLOG(info, #{
msg => "stopping_cassandra_connector",
connector => InstId
}),
emqx_plugin_libs_pool:stop_pool(PoolName).
emqx_resource_pool:stop(PoolName).
-type request() ::
% emqx_bridge.erl
@ -184,7 +187,7 @@ do_single_query(
InstId,
Request,
Async,
#{poolname := PoolName} = State
#{pool_name := PoolName} = State
) ->
{Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request),
?tp(
@ -232,7 +235,7 @@ do_batch_query(
InstId,
Requests,
Async,
#{poolname := PoolName} = State
#{pool_name := PoolName} = State
) ->
CQLs =
lists:map(
@ -312,8 +315,8 @@ exec_cql_batch_query(InstId, PoolName, Async, CQLs) ->
exec(PoolName, Query) ->
ecpool:pick_and_do(PoolName, Query, no_handover).
on_get_status(_InstId, #{poolname := Pool} = State) ->
case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of
on_get_status(_InstId, #{pool_name := PoolName} = State) ->
case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
true ->
case do_check_prepares(State) of
ok ->
@ -334,7 +337,7 @@ do_get_status(Conn) ->
do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) ->
ok;
do_check_prepares(State = #{poolname := PoolName, prepare_cql := {error, Prepares}}) ->
do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepares}}) ->
%% retry to prepare
case prepare_cql(Prepares, PoolName) of
{ok, Sts} ->
@ -410,7 +413,7 @@ parse_prepare_cql([], Prepares, Tokens) ->
params_tokens => Tokens
}.
init_prepare(State = #{prepare_cql := Prepares, poolname := PoolName}) ->
init_prepare(State = #{prepare_cql := Prepares, pool_name := PoolName}) ->
case maps:size(Prepares) of
0 ->
State;
@ -442,17 +445,17 @@ prepare_cql(Prepares, PoolName) ->
end.
do_prepare_cql(Prepares, PoolName) ->
do_prepare_cql(ecpool:workers(PoolName), Prepares, PoolName, #{}).
do_prepare_cql(ecpool:workers(PoolName), Prepares, #{}).
do_prepare_cql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) ->
do_prepare_cql([{_Name, Worker} | T], Prepares, _LastSts) ->
{ok, Conn} = ecpool_worker:client(Worker),
case prepare_cql_to_conn(Conn, Prepares) of
{ok, Sts} ->
do_prepare_cql(T, Prepares, PoolName, Sts);
do_prepare_cql(T, Prepares, Sts);
Error ->
Error
end;
do_prepare_cql([], _Prepares, _PoolName, LastSts) ->
do_prepare_cql([], _Prepares, LastSts) ->
{ok, LastSts}.
prepare_cql_to_conn(Conn, Prepares) ->

View File

@ -2,7 +2,7 @@
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_bridge_cassa_SUITE).
-module(emqx_bridge_cassandra_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
@ -57,7 +57,7 @@
%% CASSA_TCP_HOST=127.0.0.1 CASSA_TCP_PORT=19042 \
%% CASSA_TLS_HOST=127.0.0.1 CASSA_TLS_PORT=19142 \
%% PROXY_HOST=127.0.0.1 ./rebar3 as test ct -c -v --name ct@127.0.0.1 \
%% --suite lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl
%% --suite apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl
%%
%%------------------------------------------------------------------------------
@ -530,15 +530,16 @@ t_write_failure(Config) ->
fun(Trace0) ->
ct:pal("trace: ~p", [Trace0]),
Trace = ?of_kind(buffer_worker_flush_nack, Trace0),
?assertMatch([#{result := {async_return, {error, _}}} | _], Trace),
[#{result := {async_return, {error, Error}}} | _] = Trace,
case Error of
{resource_error, _} ->
[#{result := Result} | _] = Trace,
case Result of
{async_return, {error, {resource_error, _}}} ->
ok;
{recoverable_error, disconnected} ->
{async_return, {error, {recoverable_error, disconnected}}} ->
ok;
{error, {resource_error, _}} ->
ok;
_ ->
ct:fail("unexpected error: ~p", [Error])
ct:fail("unexpected error: ~p", [Result])
end
end
),
@ -589,7 +590,7 @@ t_missing_data(Config) ->
{ok, _},
create_bridge(Config)
),
%% emqx_ee_connector_cassa will send missed data as a `null` atom
%% emqx_bridge_cassandra_connector will send missed data as a `null` atom
%% to ecql driver
?check_trace(
begin

View File

@ -2,13 +2,13 @@
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_connector_cassa_SUITE).
-module(emqx_bridge_cassandra_connector_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include("emqx_connector.hrl").
-include("emqx_ee_connector.hrl").
-include("emqx_bridge_cassandra.hrl").
-include("emqx_connector/include/emqx_connector.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("emqx/include/emqx.hrl").
-include_lib("stdlib/include/assert.hrl").
@ -16,7 +16,7 @@
%% Cassandra server defined at `.ci/docker-compose-file/docker-compose-cassandra-tcp.yaml`
%% You can change it to `127.0.0.1`, if you run this SUITE locally
-define(CASSANDRA_HOST, "cassandra").
-define(CASSANDRA_RESOURCE_MOD, emqx_ee_connector_cassa).
-define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_connector).
%% This test SUITE requires a running cassandra instance. If you don't want to
%% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script
@ -38,9 +38,14 @@ groups() ->
[].
cassandra_servers() ->
emqx_schema:parse_servers(
iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]),
#{default_port => ?CASSANDRA_DEFAULT_PORT}
lists:map(
fun(#{hostname := Host, port := Port}) ->
{Host, Port}
end,
emqx_schema:parse_servers(
iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]),
#{default_port => ?CASSANDRA_DEFAULT_PORT}
)
).
init_per_suite(Config) ->
@ -101,15 +106,15 @@ show(Label, What) ->
erlang:display({Label, What}),
What.
perform_lifecycle_check(PoolName, InitialConfig) ->
perform_lifecycle_check(ResourceId, InitialConfig) ->
{ok, #{config := CheckedConfig}} =
emqx_resource:check_config(?CASSANDRA_RESOURCE_MOD, InitialConfig),
{ok, #{
state := #{poolname := ReturnedPoolName} = State,
state := #{pool_name := PoolName} = State,
status := InitialStatus
}} =
emqx_resource:create_local(
PoolName,
ResourceId,
?CONNECTOR_RESOURCE_GROUP,
?CASSANDRA_RESOURCE_MOD,
CheckedConfig,
@ -121,45 +126,45 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
state := State,
status := InitialStatus
}} =
emqx_resource:get_instance(PoolName),
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
emqx_resource:get_instance(ResourceId),
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
% % Perform query as further check that the resource is working as expected
(fun() ->
erlang:display({pool_name, PoolName}),
QueryNoParamsResWrapper = emqx_resource:query(PoolName, test_query_no_params()),
erlang:display({pool_name, ResourceId}),
QueryNoParamsResWrapper = emqx_resource:query(ResourceId, test_query_no_params()),
?assertMatch({ok, _}, QueryNoParamsResWrapper)
end)(),
?assertEqual(ok, emqx_resource:stop(PoolName)),
?assertEqual(ok, emqx_resource:stop(ResourceId)),
% Resource will be listed still, but state will be changed and healthcheck will fail
% as the worker no longer exists.
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
state := State,
status := StoppedStatus
}} =
emqx_resource:get_instance(PoolName),
emqx_resource:get_instance(ResourceId),
?assertEqual(stopped, StoppedStatus),
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)),
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)),
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
% Can call stop/1 again on an already stopped instance
?assertEqual(ok, emqx_resource:stop(PoolName)),
?assertEqual(ok, emqx_resource:stop(ResourceId)),
% Make sure it can be restarted and the healthchecks and queries work properly
?assertEqual(ok, emqx_resource:restart(PoolName)),
?assertEqual(ok, emqx_resource:restart(ResourceId)),
% async restart, need to wait resource
timer:sleep(500),
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
emqx_resource:get_instance(PoolName),
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
emqx_resource:get_instance(ResourceId),
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
(fun() ->
QueryNoParamsResWrapper =
emqx_resource:query(PoolName, test_query_no_params()),
emqx_resource:query(ResourceId, test_query_no_params()),
?assertMatch({ok, _}, QueryNoParamsResWrapper)
end)(),
% Stop and remove the resource in one go.
?assertEqual(ok, emqx_resource:remove_local(PoolName)),
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
?assertEqual(ok, emqx_resource:remove_local(ResourceId)),
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
% Should not even be able to get the resource data out of ets now unlike just stopping.
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)).
?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)).
%%--------------------------------------------------------------------
%% utils

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,37 @@
# EMQX ClickHouse Bridge
[ClickHouse](https://github.com/ClickHouse/ClickHouse) is an open-source, column-based
database management system. It is designed for real-time processing of large volumes of
data and is known for its high performance and scalability.
The application is used to connect EMQX and ClickHouse.
User can create a rule and easily ingest IoT data into ClickHouse by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into ClickHouse](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-clickhouse.html)
for how to use EMQX dashboard to ingest IoT data into ClickHouse.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_clickhouse, [
{description, "EMQX Enterprise ClickHouse Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [kernel, stdlib]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,40 @@
# EMQX DynamoDB Bridge
[Dynamodb](https://aws.amazon.com/dynamodb/) is a high-performance NoSQL database
service provided by Amazon that's designed for scalability and low-latency access
to structured data.
It's often used in applications that require fast and reliable access to data,
such as mobile, ad tech, and IoT.
The application is used to connect EMQX and DynamoDB.
User can create a rule and easily ingest IoT data into DynamoDB by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into DynamoDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-dynamo.html)
for how to use EMQX dashboard to ingest IoT data into DynamoDB.
- Refer to [Rules engine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_dynamo, [
{description, "EMQX Enterprise Dynamo Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [kernel, stdlib]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,36 @@
# EMQX GCP Pub/Sub Bridge
[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) is a messaging service
provided by Google Cloud Platform (GCP).
The application is used to connect EMQX and GCP Pub/Sub.
User can create a rule and easily ingest IoT data into GCP Pub/Sub by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to [Ingest data into GCP Pub/Sub](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-gcp-pubsub.html)
for how to use EMQX dashboard to ingest IoT data into GCP Pub/Sub.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for the EMQX rules engine introduction.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges)
for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,10 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{shell, [
{apps, [emqx_bridge_gcp_pubsub]}
]}.

View File

@ -0,0 +1,13 @@
{application, emqx_bridge_gcp_pubsub, [
{description, "EMQX Enterprise GCP Pub/Sub Bridge"},
{vsn, "0.1.1"},
{registered, []},
{applications, [
kernel,
stdlib,
ehttpc
]},
{env, []},
{modules, []},
{links, []}
]}.

Some files were not shown because too many files have changed in this diff Show More