Merge branch 'release-50' into file-transfer

* release-50: (73 commits)
  feat: add RabbitMQ bridge
  docs: improve rule engine labels and descriptions
  chore: bump version && update changes
  refactor(rocketmq): move rocketmq bridge into its own app
  test: dashboard_listener_test crash
  chore: bump chart versions
  chore: bump ee version to e5.0.4-alpha.1
  test: fix inter-suite flakiness
  build: compatibility to make 4.4+
  feat: add IotDB bridge
  ci: ensure git safe dir in build_packages
  ci: ensure git safe dir
  test: check_oom's max_mailbox_size
  feat: rename max_message_queue_len to max_mailbox_size
  fix(buffer_worker): fix inflight count when updating inflight item
  chore: prepare for v5.0.25-rc.1 release
  docs: add change log entry
  fix: non_neg_integer() translated to minimum = 1 in bridge-api-en.json
  chore: `MQTT X` -> `MQTTX`
  chore: make sure brod_gssapi app is included in relese package
  ...
This commit is contained in:
Ilya Averyanov 2023-05-09 23:30:22 +05:00
commit 7fa166f034
144 changed files with 4470 additions and 352 deletions

View File

@ -0,0 +1,31 @@
version: '3.9'
services:
iotdb:
container_name: iotdb
hostname: iotdb
image: apache/iotdb:1.1.0-standalone
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_target_config_node_list=iotdb:10710
- dn_rpc_address=iotdb
- dn_internal_address=iotdb
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_target_config_node_list=iotdb:10710
# volumes:
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs
expose:
- "18080"
# IoTDB's REST interface, uncomment for local testing
# ports:
# - "18080:18080"
networks:
- emqx_bridge

View File

@ -0,0 +1,17 @@
version: '3.9'
services:
rabbitmq:
container_name: rabbitmq
image: rabbitmq:3.11-management
restart: always
expose:
- "15672"
- "5672"
# We don't want to take ports from the host
# ports:
# - "15672:15672"
# - "5672:5672"
networks:
- emqx_bridge

View File

@ -25,8 +25,8 @@ services:
- ./rocketmq/conf/broker.conf:/etc/rocketmq/broker.conf
environment:
NAMESRV_ADDR: "rocketmq_namesrv:9876"
JAVA_OPTS: " -Duser.home=/opt"
JAVA_OPT_EXT: "-server -Xms1024m -Xmx1024m -Xmn1024m"
JAVA_OPTS: " -Duser.home=/opt -Drocketmq.broker.diskSpaceWarningLevelRatio=0.99"
JAVA_OPT_EXT: "-server -Xms512m -Xmx512m -Xmn512m"
command: ./mqbroker -c /etc/rocketmq/broker.conf
depends_on:
- mqnamesrv

View File

@ -45,6 +45,7 @@ services:
- 19100:19100
# IOTDB
- 14242:4242
- 28080:18080
command:
- "-host=0.0.0.0"
- "-config=/config/toxiproxy.json"

View File

@ -126,6 +126,12 @@
"upstream": "oracle:1521",
"enabled": true
},
{
"name": "iotdb",
"listen": "0.0.0.0:18080",
"upstream": "iotdb:18080",
"enabled": true
},
{
"name": "minio_tcp",
"listen": "0.0.0.0:19000",

View File

@ -26,19 +26,16 @@ jobs:
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }}
VERSION: ${{ steps.get_profile.outputs.VERSION }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.branch_or_tag }} # when input is not given, the event tag is used
path: source
fetch-depth: 0
- name: Get profile to build
id: get_profile
run: |
cd source
git config --global --add safe.directory "$(pwd)"
git config --global --add safe.directory "$GITHUB_WORKSPACE"
tag=${{ github.ref }}
if git describe --tags --match "[v|e]*" --exact; then
echo "WARN: This is an exact git tag, will publish release"
@ -75,31 +72,21 @@ jobs:
esac
echo "BUILD_PROFILE=$PROFILE" >> $GITHUB_OUTPUT
echo "VERSION=$(./pkg-vsn.sh $PROFILE)" >> $GITHUB_OUTPUT
- name: get_all_deps
run: |
make -C source deps-all
zip -ryq source.zip source/* source/.[^.]*
- uses: actions/upload-artifact@v3
with:
name: source
path: source.zip
windows:
runs-on: windows-2019
if: startsWith(github.ref_name, 'v')
needs: prepare
strategy:
fail-fast: false
matrix:
profile: # for now only CE for windows
- emqx
steps:
- uses: actions/download-artifact@v3
- uses: actions/checkout@v3
with:
name: source
path: .
- name: unzip source code
run: Expand-Archive -Path source.zip -DestinationPath ./
ref: ${{ github.event.inputs.branch_or_tag }}
fetch-depth: 0
- uses: ilammy/msvc-dev-cmd@v1.12.0
- uses: erlef/setup-beam@v1.15.2
with:
@ -108,14 +95,12 @@ jobs:
env:
PYTHON: python
DIAGNOSTIC: 1
working-directory: source
run: |
# ensure crypto app (openssl)
erl -eval "erlang:display(crypto:info_lib())" -s init stop
make ${{ matrix.profile }}-tgz
- name: run emqx
timeout-minutes: 5
working-directory: source
run: |
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx start
Start-Sleep -s 5
@ -130,7 +115,7 @@ jobs:
if: success()
with:
name: ${{ matrix.profile }}
path: source/_packages/${{ matrix.profile }}/
path: _packages/${{ matrix.profile }}/
mac:
needs: prepare
@ -148,15 +133,10 @@ jobs:
runs-on: ${{ matrix.os }}
steps:
- uses: emqx/self-hosted-cleanup-action@v1.0.3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v3
with:
name: source
path: .
- name: unzip source code
run: |
ln -s . source
unzip -o -q source.zip
rm source source.zip
ref: ${{ github.event.inputs.branch_or_tag }}
fetch-depth: 0
- uses: ./.github/actions/package-macos
with:
profile: ${{ matrix.profile }}
@ -175,6 +155,8 @@ jobs:
linux:
needs: prepare
runs-on: ${{ matrix.build_machine }}
# always run in builder container because the host might have the wrong OTP version etc.
# otherwise buildx.sh does not run docker if arch and os matches the target arch and os.
container:
image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
@ -235,29 +217,20 @@ jobs:
steps:
- uses: AutoModality/action-clean@v1
if: matrix.build_machine == 'aws-arm64'
- uses: actions/download-artifact@v3
- uses: actions/checkout@v3
with:
name: source
path: .
- name: unzip source code
run: unzip -q source.zip
- name: tmp fix for el9
if: matrix.os == 'el9'
run: |
set -eu
dnf install -y krb5-devel
ref: ${{ github.event.inputs.branch_or_tag }}
fetch-depth: 0
- name: build emqx packages
working-directory: source
env:
BUILDER: ${{ matrix.builder }}
ELIXIR: ${{ matrix.elixir }}
OTP: ${{ matrix.otp }}
PROFILE: ${{ matrix.profile }}
ARCH: ${{ matrix.arch }}
SYSTEM: ${{ matrix.os }}
run: |
set -eu
git config --global --add safe.directory "/__w/emqx/emqx"
git config --global --add safe.directory "$GITHUB_WORKSPACE"
# Align path for CMake caches
if [ ! "$PWD" = "/emqx" ]; then
ln -s $PWD /emqx
@ -266,7 +239,8 @@ jobs:
echo "pwd is $PWD"
PKGTYPES="tgz pkg"
IS_ELIXIR="no"
if [ ${{ matrix.release_with }} == 'elixir' ]; then
WITH_ELIXIR=${{ matrix.release_with }}
if [ "${WITH_ELIXIR:-}" == 'elixir' ]; then
PKGTYPES="tgz"
# set Elixir build flag
IS_ELIXIR="yes"
@ -278,18 +252,18 @@ jobs:
--pkgtype "${PKGTYPE}" \
--arch "${ARCH}" \
--elixir "${IS_ELIXIR}" \
--builder "ghcr.io/emqx/emqx-builder/${BUILDER}:${ELIXIR}-${OTP}-${SYSTEM}"
--builder "force_host"
done
- uses: actions/upload-artifact@v3
if: success()
with:
name: ${{ matrix.profile }}
path: source/_packages/${{ matrix.profile }}/
path: _packages/${{ matrix.profile }}/
publish_artifacts:
runs-on: ubuntu-22.04
needs: [prepare, mac, linux]
if: needs.prepare.outputs.IS_EXACT_TAG
if: needs.prepare.outputs.IS_EXACT_TAG == 'true'
strategy:
fail-fast: false
matrix:

View File

@ -7,44 +7,26 @@ concurrency:
on:
schedule:
- cron: '0 */6 * * *'
workflow_dispatch:
jobs:
prepare:
runs-on: aws-amd64
linux:
if: github.repository_owner == 'emqx'
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04
runs-on: aws-${{ matrix.arch }}
# always run in builder container because the host might have the wrong OTP version etc.
# otherwise buildx.sh does not run docker if arch and os matches the target arch and os.
container:
image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
strategy:
fail-fast: false
matrix:
profile:
- ['emqx', 'master']
- ['emqx-enterprise', 'release-50']
steps:
- uses: actions/checkout@v3
with:
ref: ${{ matrix.profile[1] }}
path: source
fetch-depth: 0
- name: get_all_deps
run: |
make -C source deps-all
zip -ryq source.zip source/* source/.[^.]*
- uses: actions/upload-artifact@v3
with:
name: source-${{ matrix.profile[0] }}
path: source.zip
linux:
needs: prepare
runs-on: aws-${{ matrix.arch }}
strategy:
fail-fast: false
matrix:
profile:
- emqx
- emqx-enterprise
branch:
- master
- release-50
otp:
- 24.3.4.2-3
arch:
@ -62,24 +44,20 @@ jobs:
shell: bash
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3
- uses: emqx/self-hosted-cleanup-action@v1.0.3
- uses: actions/checkout@v3
with:
name: source-${{ matrix.profile }}
path: .
- name: unzip source code
run: unzip -q source.zip
ref: ${{ matrix.profile[1] }}
fetch-depth: 0
- name: build emqx packages
working-directory: source
env:
BUILDER: ${{ matrix.builder }}
ELIXIR: ${{ matrix.elixir }}
OTP: ${{ matrix.otp }}
PROFILE: ${{ matrix.profile[0] }}
ARCH: ${{ matrix.arch }}
OS: ${{ matrix.os }}
run: |
set -eu
git config --global --add safe.directory "$GITHUB_WORKSPACE"
PKGTYPES="tgz pkg"
IS_ELIXIR="no"
for PKGTYPE in ${PKGTYPES};
@ -89,13 +67,13 @@ jobs:
--pkgtype "${PKGTYPE}" \
--arch "${ARCH}" \
--elixir "${IS_ELIXIR}" \
--builder "ghcr.io/emqx/emqx-builder/${BUILDER}:${ELIXIR}-${OTP}-${OS}
--builder "force_host"
done
- uses: actions/upload-artifact@v3
if: success()
with:
name: ${{ matrix.profile }}
path: source/_packages/${{ matrix.profile }}/
name: ${{ matrix.profile[0] }}
path: _packages/${{ matrix.profile[0] }}/
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure()
@ -103,32 +81,31 @@ jobs:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
{"text": "Scheduled build of ${{ matrix.profile[0] }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
mac:
needs: prepare
runs-on: ${{ matrix.os }}
if: github.repository_owner == 'emqx'
strategy:
fail-fast: false
matrix:
profile:
- emqx
branch:
- master
otp:
- 24.3.4.2-3
os:
- macos-12
- macos-12-arm64
runs-on: ${{ matrix.os }}
steps:
- uses: emqx/self-hosted-cleanup-action@v1.0.3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v3
with:
name: source-${{ matrix.profile }}
path: .
- name: unzip source code
run: |
ln -s . source
unzip -o -q source.zip
rm source source.zip
ref: ${{ matrix.branch }}
fetch-depth: 0
- uses: ./.github/actions/package-macos
with:
profile: ${{ matrix.profile }}

View File

@ -14,6 +14,7 @@ on:
jobs:
prepare:
runs-on: ubuntu-latest
if: github.repository_owner == 'emqx'
container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu20.04
outputs:
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}

View File

@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Copyright (c) 2016-2023 EMQ Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -4,11 +4,6 @@ SCRIPTS = $(CURDIR)/scripts
export EMQX_RELUP ?= true
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-debian11
export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.2.3
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.2
export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT)
@ -18,6 +13,22 @@ else
FIND=find
endif
# Dashbord version
# from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.2.4
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6
# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used
# In make 4.4+, for backward-compatibility the value from the original environment is used.
# so the shell script will be executed tons of times.
# https://github.com/emqx/emqx/pull/10627
ifeq ($(strip $(OTP_VSN)),)
export OTP_VSN := $(shell $(SCRIPTS)/get-otp-vsn.sh)
endif
ifeq ($(strip $(ELIXIR_VSN)),)
export ELIXIR_VSN := $(shell $(SCRIPTS)/get-elixir-vsn.sh)
endif
PROFILE ?= emqx
REL_PROFILES := emqx emqx-enterprise
PKG_PROFILES := emqx-pkg emqx-enterprise-pkg

View File

@ -32,10 +32,10 @@
%% `apps/emqx/src/bpapi/README.md'
%% Community edition
-define(EMQX_RELEASE_CE, "5.0.24").
-define(EMQX_RELEASE_CE, "5.0.25-rc.1").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.0.3-alpha.5").
-define(EMQX_RELEASE_EE, "5.0.4-alpha.1").
%% the HTTP API version
-define(EMQX_API_VERSION, "5.0").

View File

@ -18,6 +18,7 @@
-compile({no_auto_import, [get/0, get/1, put/2, erase/1]}).
-elvis([{elvis_style, god_modules, disable}]).
-include("logger.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-export([
init_load/1,
@ -151,7 +152,7 @@ get_root([RootName | _]) ->
%% @doc For the given path, get raw root value enclosed in a single-key map.
%% key is ensured to be binary.
get_root_raw([RootName | _]) ->
#{bin(RootName) => do_get_raw([RootName], #{})}.
#{bin(RootName) => get_raw([RootName], #{})}.
%% @doc Get a config value for the given path.
%% The path should at least include root config name.
@ -230,14 +231,14 @@ find_listener_conf(Type, Listener, KeyPath) ->
put(Config) ->
maps:fold(
fun(RootName, RootValue, _) ->
?MODULE:put([RootName], RootValue)
?MODULE:put([atom(RootName)], RootValue)
end,
ok,
Config
).
erase(RootName) ->
persistent_term:erase(?PERSIS_KEY(?CONF, bin(RootName))),
persistent_term:erase(?PERSIS_KEY(?CONF, atom(RootName))),
persistent_term:erase(?PERSIS_KEY(?RAW_CONF, bin(RootName))),
ok.
@ -286,9 +287,11 @@ get_default_value([RootName | _] = KeyPath) ->
end.
-spec get_raw(emqx_utils_maps:config_key_path()) -> term().
get_raw([Root | T]) when is_atom(Root) -> get_raw([bin(Root) | T]);
get_raw(KeyPath) -> do_get_raw(KeyPath).
-spec get_raw(emqx_utils_maps:config_key_path(), term()) -> term().
get_raw([Root | T], Default) when is_atom(Root) -> get_raw([bin(Root) | T], Default);
get_raw(KeyPath, Default) -> do_get_raw(KeyPath, Default).
-spec put_raw(map()) -> ok.
@ -323,6 +326,7 @@ init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) ->
ok = save_schema_mod_and_names(SchemaMod),
HasDeprecatedFile = has_deprecated_file(),
RawConf0 = load_config_files(HasDeprecatedFile, Conf),
warning_deprecated_root_key(RawConf0),
RawConf1 =
case HasDeprecatedFile of
true ->
@ -690,9 +694,9 @@ do_get(Type, [], Default) ->
false -> AllConf
end;
do_get(Type, [RootName], Default) ->
persistent_term:get(?PERSIS_KEY(Type, bin(RootName)), Default);
persistent_term:get(?PERSIS_KEY(Type, RootName), Default);
do_get(Type, [RootName | KeyPath], Default) ->
RootV = persistent_term:get(?PERSIS_KEY(Type, bin(RootName)), #{}),
RootV = persistent_term:get(?PERSIS_KEY(Type, RootName), #{}),
do_deep_get(Type, KeyPath, RootV, Default).
do_put(Type, Putter, [], DeepValue) ->
@ -706,7 +710,7 @@ do_put(Type, Putter, [], DeepValue) ->
do_put(Type, Putter, [RootName | KeyPath], DeepValue) ->
OldValue = do_get(Type, [RootName], #{}),
NewValue = do_deep_put(Type, Putter, KeyPath, OldValue, DeepValue),
persistent_term:put(?PERSIS_KEY(Type, bin(RootName)), NewValue).
persistent_term:put(?PERSIS_KEY(Type, RootName), NewValue).
do_deep_get(?CONF, KeyPath, Map, Default) ->
atom_conf_path(
@ -748,6 +752,22 @@ bin(Bin) when is_binary(Bin) -> Bin;
bin(Str) when is_list(Str) -> list_to_binary(Str);
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
warning_deprecated_root_key(RawConf) ->
case maps:keys(RawConf) -- get_root_names() of
[] ->
ok;
Keys ->
Unknowns = string:join([binary_to_list(K) || K <- Keys], ","),
?tp(unknown_config_keys, #{unknown_config_keys => Unknowns}),
?SLOG(
warning,
#{
msg => "config_key_not_recognized",
unknown_config_keys => Unknowns
}
)
end.
conf_key(?CONF, RootName) ->
atom(RootName);
conf_key(?RAW_CONF, RootName) ->

View File

@ -32,9 +32,13 @@
get_bucket_cfg_path/2,
desc/1,
types/0,
short_paths/0,
calc_capacity/1,
extract_with_type/2,
default_client_config/0
default_client_config/0,
short_paths_fields/1,
get_listener_opts/1,
get_node_opts/1
]).
-define(KILOBYTE, 1024).
@ -104,11 +108,13 @@ roots() ->
].
fields(limiter) ->
short_paths_fields(?MODULE) ++
[
{Type,
?HOCON(?R_REF(node_opts), #{
desc => ?DESC(Type),
importance => ?IMPORTANCE_HIDDEN,
required => {false, recursively},
aliases => alias_of_type(Type)
})}
|| Type <- types()
@ -203,6 +209,14 @@ fields(listener_client_fields) ->
fields(Type) ->
simple_bucket_field(Type).
short_paths_fields(DesModule) ->
[
{Name,
?HOCON(rate(), #{desc => ?DESC(DesModule, Name), required => false, example => Example})}
|| {Name, Example} <-
lists:zip(short_paths(), [<<"1000/s">>, <<"1000/s">>, <<"100MB/s">>])
].
desc(limiter) ->
"Settings for the rate limiter.";
desc(node_opts) ->
@ -236,6 +250,9 @@ get_bucket_cfg_path(Type, BucketName) ->
types() ->
[bytes, messages, connection, message_routing, internal].
short_paths() ->
[max_conn_rate, messages_rate, bytes_rate].
calc_capacity(#{rate := infinity}) ->
infinity;
calc_capacity(#{rate := Rate, burst := Burst}) ->
@ -266,6 +283,31 @@ default_client_config() ->
failure_strategy => force
}.
default_bucket_config() ->
#{
rate => infinity,
burst => 0
}.
get_listener_opts(Conf) ->
Limiter = maps:get(limiter, Conf, undefined),
ShortPaths = maps:with(short_paths(), Conf),
get_listener_opts(Limiter, ShortPaths).
get_node_opts(Type) ->
Opts = emqx:get_config([limiter, Type], default_bucket_config()),
case type_to_short_path_name(Type) of
undefined ->
Opts;
Name ->
case emqx:get_config([limiter, Name], undefined) of
undefined ->
Opts;
Rate ->
Opts#{rate := Rate}
end
end.
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
@ -476,3 +518,42 @@ merge_client_bucket(Type, _, {ok, BucketVal}) ->
#{Type => BucketVal};
merge_client_bucket(_, _, _) ->
undefined.
short_path_name_to_type(max_conn_rate) ->
connection;
short_path_name_to_type(messages_rate) ->
messages;
short_path_name_to_type(bytes_rate) ->
bytes.
type_to_short_path_name(connection) ->
max_conn_rate;
type_to_short_path_name(messages) ->
messages_rate;
type_to_short_path_name(bytes) ->
bytes_rate;
type_to_short_path_name(_) ->
undefined.
get_listener_opts(Limiter, ShortPaths) when map_size(ShortPaths) =:= 0 ->
Limiter;
get_listener_opts(undefined, ShortPaths) ->
convert_listener_short_paths(ShortPaths);
get_listener_opts(Limiter, ShortPaths) ->
Shorts = convert_listener_short_paths(ShortPaths),
emqx_utils_maps:deep_merge(Limiter, Shorts).
convert_listener_short_paths(ShortPaths) ->
DefBucket = default_bucket_config(),
DefClient = default_client_config(),
Fun = fun(Name, Rate, Acc) ->
Type = short_path_name_to_type(Name),
case Name of
max_conn_rate ->
Acc#{Type => DefBucket#{rate => Rate}};
_ ->
Client = maps:get(client, Acc, #{}),
Acc#{client => Client#{Type => DefClient#{rate => Rate}}}
end
end,
maps:fold(Fun, #{}, ShortPaths).

View File

@ -481,7 +481,7 @@ dispatch_burst_to_buckets([], _, Alloced, Buckets) ->
-spec init_tree(emqx_limiter_schema:limiter_type()) -> state().
init_tree(Type) when is_atom(Type) ->
Cfg = emqx:get_config([limiter, Type]),
Cfg = emqx_limiter_schema:get_node_opts(Type),
init_tree(Type, Cfg).
init_tree(Type, #{rate := Rate} = Cfg) ->
@ -625,13 +625,10 @@ find_referenced_bucket(Id, Type, #{rate := Rate} = Cfg) when Rate =/= infinity -
{error, invalid_bucket}
end;
%% this is a node-level reference
find_referenced_bucket(Id, Type, _) ->
case emqx:get_config([limiter, Type], undefined) of
find_referenced_bucket(_Id, Type, _) ->
case emqx_limiter_schema:get_node_opts(Type) of
#{rate := infinity} ->
false;
undefined ->
?SLOG(error, #{msg => "invalid limiter type", type => Type, id => Id}),
{error, invalid_bucket};
NodeCfg ->
{ok, Bucket} = emqx_limiter_manager:find_root(Type),
{ok, Bucket, NodeCfg}

View File

@ -86,7 +86,7 @@ init([]) ->
%% Internal functions
%%--==================================================================
make_child(Type) ->
Cfg = emqx:get_config([limiter, Type]),
Cfg = emqx_limiter_schema:get_node_opts(Type),
make_child(Type, Cfg).
make_child(Type, Cfg) ->

View File

@ -35,7 +35,8 @@
current_conns/2,
max_conns/2,
id_example/0,
default_max_conn/0
default_max_conn/0,
shutdown_count/2
]).
-export([
@ -195,6 +196,17 @@ max_conns(Type, Name, _ListenOn) when Type =:= ws; Type =:= wss ->
max_conns(_, _, _) ->
{error, not_support}.
shutdown_count(ID, ListenOn) ->
{ok, #{type := Type, name := Name}} = parse_listener_id(ID),
shutdown_count(Type, Name, ListenOn).
shutdown_count(Type, Name, ListenOn) when Type == tcp; Type == ssl ->
esockd:get_shutdown_count({listener_id(Type, Name), ListenOn});
shutdown_count(Type, _Name, _ListenOn) when Type =:= ws; Type =:= wss ->
[];
shutdown_count(_, _, _) ->
{error, not_support}.
%% @doc Start all listeners.
-spec start() -> ok.
start() ->
@ -639,7 +651,7 @@ zone(Opts) ->
maps:get(zone, Opts, undefined).
limiter(Opts) ->
maps:get(limiter, Opts, undefined).
emqx_limiter_schema:get_listener_opts(Opts).
add_limiter_bucket(Id, #{limiter := Limiter}) ->
maps:fold(

View File

@ -237,7 +237,7 @@ set_log_handler_level(HandlerId, Level) ->
end.
%% @doc Set both the primary and all handlers level in one command
-spec set_log_level(logger:handler_id()) -> ok | {error, term()}.
-spec set_log_level(logger:level()) -> ok | {error, term()}.
set_log_level(Level) ->
case set_primary_log_level(Level) of
ok -> set_all_log_handlers_level(Level);

View File

@ -42,7 +42,7 @@
-type bar_separated_list() :: list().
-type ip_port() :: tuple() | integer().
-type cipher() :: map().
-type port_number() :: 1..65536.
-type port_number() :: 1..65535.
-type server_parse_option() :: #{
default_port => port_number(),
no_port => boolean(),
@ -135,7 +135,8 @@
cipher/0,
comma_separated_atoms/0,
url/0,
json_binary/0
json_binary/0,
port_number/0
]).
-export([namespace/0, roots/0, roots/1, fields/1, desc/1, tags/0]).
@ -687,12 +688,13 @@ fields("force_shutdown") ->
desc => ?DESC(force_shutdown_enable)
}
)},
{"max_message_queue_len",
{"max_mailbox_size",
sc(
range(0, inf),
#{
default => 1000,
desc => ?DESC(force_shutdown_max_message_queue_len)
aliases => [max_message_queue_len],
desc => ?DESC(force_shutdown_max_mailbox_size)
}
)},
{"max_heap_size",
@ -2000,7 +2002,8 @@ base_listener(Bind) ->
listener_fields
),
#{
desc => ?DESC(base_listener_limiter)
desc => ?DESC(base_listener_limiter),
importance => ?IMPORTANCE_HIDDEN
}
)},
{"enable_authn",
@ -2011,7 +2014,7 @@ base_listener(Bind) ->
default => true
}
)}
].
] ++ emqx_limiter_schema:short_paths_fields(?MODULE).
desc("persistent_session_store") ->
"Settings for message persistence.";
@ -2191,7 +2194,7 @@ common_ssl_opts_schema(Defaults) ->
D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end,
Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end,
Collection = maps:get(versions, Defaults, tls_all_available),
AvailableVersions = default_tls_vsns(Collection),
DefaultVersions = default_tls_vsns(Collection),
[
{"cacertfile",
sc(
@ -2253,6 +2256,7 @@ common_ssl_opts_schema(Defaults) ->
example => <<"">>,
format => <<"password">>,
desc => ?DESC(common_ssl_opts_schema_password),
importance => ?IMPORTANCE_LOW,
converter => fun password_converter/2
}
)},
@ -2260,10 +2264,10 @@ common_ssl_opts_schema(Defaults) ->
sc(
hoconsc:array(typerefl:atom()),
#{
default => AvailableVersions,
default => DefaultVersions,
desc => ?DESC(common_ssl_opts_schema_versions),
importance => ?IMPORTANCE_HIGH,
validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end
validator => fun(Input) -> validate_tls_versions(Collection, Input) end
}
)},
{"ciphers", ciphers_schema(D("ciphers"))},
@ -2449,10 +2453,14 @@ client_ssl_opts_schema(Defaults) ->
)}
].
default_tls_vsns(dtls_all_available) ->
emqx_tls_lib:available_versions(dtls);
default_tls_vsns(tls_all_available) ->
emqx_tls_lib:available_versions(tls).
available_tls_vsns(dtls_all_available) -> emqx_tls_lib:available_versions(dtls);
available_tls_vsns(tls_all_available) -> emqx_tls_lib:available_versions(tls).
outdated_tls_vsn(dtls_all_available) -> [dtlsv1];
outdated_tls_vsn(tls_all_available) -> ['tlsv1.1', tlsv1].
default_tls_vsns(Key) ->
available_tls_vsns(Key) -- outdated_tls_vsn(Key).
-spec ciphers_schema(quic | dtls_all_available | tls_all_available | undefined) ->
hocon_schema:field_schema().
@ -2761,7 +2769,8 @@ validate_ciphers(Ciphers) ->
Bad -> {error, {bad_ciphers, Bad}}
end.
validate_tls_versions(AvailableVersions, Versions) ->
validate_tls_versions(Collection, Versions) ->
AvailableVersions = available_tls_vsns(Collection),
case lists:filter(fun(V) -> not lists:member(V, AvailableVersions) end, Versions) of
[] -> ok;
Vs -> {error, {unsupported_tls_versions, Vs}}

View File

@ -240,7 +240,7 @@
-type stats() :: [{atom(), term()}].
-type oom_policy() :: #{
max_message_queue_len => non_neg_integer(),
max_mailbox_size => non_neg_integer(),
max_heap_size => non_neg_integer(),
enable => boolean()
}.

View File

@ -47,7 +47,9 @@
-type param_types() :: #{emqx_bpapi:var_name() => _Type}.
%% Applications and modules we wish to ignore in the analysis:
-define(IGNORED_APPS, "gen_rpc, recon, redbug, observer_cli, snabbkaffe, ekka, mria").
-define(IGNORED_APPS,
"gen_rpc, recon, redbug, observer_cli, snabbkaffe, ekka, mria, amqp_client, rabbit_common"
).
-define(IGNORED_MODULES, "emqx_rpc").
%% List of known RPC backend modules:
-define(RPC_MODULES, "gen_rpc, erpc, rpc, emqx_rpc").

View File

@ -31,7 +31,7 @@ force_gc_conf() ->
#{bytes => 16777216, count => 16000, enable => true}.
force_shutdown_conf() ->
#{enable => true, max_heap_size => 4194304, max_message_queue_len => 1000}.
#{enable => true, max_heap_size => 4194304, max_mailbox_size => 1000}.
rpc_conf() ->
#{

View File

@ -67,7 +67,8 @@ groups() ->
%% t_keepalive,
%% t_redelivery_on_reconnect,
%% subscribe_failure_test,
t_dollar_topics
t_dollar_topics,
t_sub_non_utf8_topic
]},
{mqttv5, [non_parallel_tests], [t_basic_with_props_v5]},
{others, [non_parallel_tests], [
@ -297,6 +298,36 @@ t_dollar_topics(_) ->
ok = emqtt:disconnect(C),
ct:pal("$ topics test succeeded").
t_sub_non_utf8_topic(_) ->
{ok, Socket} = gen_tcp:connect({127, 0, 0, 1}, 1883, [{active, true}, binary]),
ConnPacket = emqx_frame:serialize(#mqtt_packet{
header = #mqtt_packet_header{type = 1},
variable = #mqtt_packet_connect{
clientid = <<"abcdefg">>
}
}),
ok = gen_tcp:send(Socket, ConnPacket),
receive
{tcp, _, _ConnAck = <<32, 2, 0, 0>>} -> ok
after 3000 -> ct:fail({connect_ack_not_recv, process_info(self(), messages)})
end,
SubHeader = <<130, 18, 25, 178>>,
SubTopicLen = <<0, 13>>,
%% this is not a valid utf8 topic
SubTopic = <<128, 10, 10, 12, 178, 159, 162, 47, 115, 1, 1, 1, 1>>,
SubQoS = <<1>>,
SubPacket = <<SubHeader/binary, SubTopicLen/binary, SubTopic/binary, SubQoS/binary>>,
ok = gen_tcp:send(Socket, SubPacket),
receive
{tcp_closed, _} -> ok
after 3000 -> ct:fail({should_get_disconnected, process_info(self(), messages)})
end,
timer:sleep(1000),
ListenerCounts = emqx_listeners:shutdown_count('tcp:default', {{0, 0, 0, 0}, 1883}),
TopicInvalidCount = proplists:get_value(topic_filter_invalid, ListenerCounts),
?assert(is_integer(TopicInvalidCount) andalso TopicInvalidCount > 0),
ok.
%%--------------------------------------------------------------------
%% Test cases for MQTT v5
%%--------------------------------------------------------------------

View File

@ -19,6 +19,7 @@
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
all() -> emqx_common_test_helpers:all(?MODULE).
@ -77,3 +78,21 @@ t_init_load(_Config) ->
?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())),
?assertMatch({ok, #{raw_config := 128}}, emqx:update_config([mqtt, max_topic_levels], 128)),
ok = file:delete(DeprecatedFile).
t_unknown_rook_keys(_) ->
?check_trace(
#{timetrap => 1000},
begin
ok = emqx_config:init_load(
emqx_schema, <<"test_1 {}\n test_2 {sub = 100}\n listeners {}">>
),
?block_until(#{?snk_kind := unknown_config_keys})
end,
fun(Trace) ->
?assertMatch(
[#{unknown_config_keys := "test_1,test_2"}],
?of_kind(unknown_config_keys, Trace)
)
end
),
ok.

View File

@ -177,7 +177,9 @@ t_sub_key_update_remove(_Config) ->
{ok, #{post_config_update => #{emqx_config_handler_SUITE => ok}}},
emqx:remove_config(KeyPath)
),
?assertError({config_not_found, KeyPath}, emqx:get_raw_config(KeyPath)),
?assertError(
{config_not_found, [<<"sysmon">>, os, cpu_check_interval]}, emqx:get_raw_config(KeyPath)
),
OSKey = maps:keys(emqx:get_raw_config([sysmon, os])),
?assertEqual(false, lists:member(<<"cpu_check_interval">>, OSKey)),
?assert(length(OSKey) > 0),

View File

@ -22,7 +22,16 @@
-include_lib("emqx/include/emqx_mqtt.hrl").
-include_lib("eunit/include/eunit.hrl").
all() -> emqx_common_test_helpers:all(?MODULE).
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
emqx_common_test_helpers:start_apps([]),
Config.
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([]),
ok.
t_check_pub(_) ->
OldConf = emqx:get_config([zones], #{}),

View File

@ -47,7 +47,7 @@ all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF),
load_conf(),
emqx_common_test_helpers:start_apps([?APP]),
Config.
@ -55,13 +55,15 @@ end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([?APP]).
init_per_testcase(_TestCase, Config) ->
emqx_config:erase(limiter),
load_conf(),
Config.
end_per_testcase(_TestCase, Config) ->
Config.
load_conf() ->
emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF).
ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF).
init_config() ->
emqx_config:init_load(emqx_limiter_schema, ?BASE_CONF).
@ -313,8 +315,8 @@ t_capacity(_) ->
%% Test Cases Global Level
%%--------------------------------------------------------------------
t_collaborative_alloc(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
GlobalMod = fun(Cfg) ->
Cfg#{message_routing => #{rate => ?RATE("600/1s"), burst => 0}}
end,
Bucket1 = fun(#{client := Cli} = Bucket) ->
@ -353,11 +355,11 @@ t_collaborative_alloc(_) ->
).
t_burst(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
GlobalMod = fun(Cfg) ->
Cfg#{
message_routing := MR#{
rate := ?RATE("200/1s"),
burst := ?RATE("400/1s")
message_routing => #{
rate => ?RATE("200/1s"),
burst => ?RATE("400/1s")
}
}
end,
@ -653,16 +655,16 @@ t_not_exists_instance(_) ->
),
?assertEqual(
{error, invalid_bucket},
{ok, infinity},
emqx_limiter_server:connect(?FUNCTION_NAME, not_exists, Cfg)
),
ok.
t_create_instance_with_node(_) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
GlobalMod = fun(Cfg) ->
Cfg#{
message_routing := MR#{rate := ?RATE("200/1s")},
messages := MR#{rate := ?RATE("200/1s")}
message_routing => #{rate => ?RATE("200/1s"), burst => 0},
messages => #{rate => ?RATE("200/1s"), burst => 0}
}
end,
@ -739,6 +741,68 @@ t_esockd_htb_consume(_) ->
?assertMatch({ok, _}, C2R),
ok.
%%--------------------------------------------------------------------
%% Test Cases short paths
%%--------------------------------------------------------------------
t_node_short_paths(_) ->
CfgStr = <<"limiter {max_conn_rate = \"1000\", messages_rate = \"100\", bytes_rate = \"10\"}">>,
ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, CfgStr),
Accessor = fun emqx_limiter_schema:get_node_opts/1,
?assertMatch(#{rate := 100.0}, Accessor(connection)),
?assertMatch(#{rate := 10.0}, Accessor(messages)),
?assertMatch(#{rate := 1.0}, Accessor(bytes)),
?assertMatch(#{rate := infinity}, Accessor(message_routing)),
?assertEqual(undefined, emqx:get_config([limiter, connection], undefined)).
t_compatibility_for_node_short_paths(_) ->
CfgStr =
<<"limiter {max_conn_rate = \"1000\", connection.rate = \"500\", bytes.rate = \"200\"}">>,
ok = emqx_common_test_helpers:load_config(emqx_limiter_schema, CfgStr),
Accessor = fun emqx_limiter_schema:get_node_opts/1,
?assertMatch(#{rate := 100.0}, Accessor(connection)),
?assertMatch(#{rate := 20.0}, Accessor(bytes)).
t_listener_short_paths(_) ->
CfgStr = <<
""
"listeners.tcp.default {max_conn_rate = \"1000\", messages_rate = \"100\", bytes_rate = \"10\"}"
""
>>,
ok = emqx_common_test_helpers:load_config(emqx_schema, CfgStr),
ListenerOpt = emqx:get_config([listeners, tcp, default]),
?assertMatch(
#{
client := #{
messages := #{rate := 10.0},
bytes := #{rate := 1.0}
},
connection := #{rate := 100.0}
},
emqx_limiter_schema:get_listener_opts(ListenerOpt)
).
t_compatibility_for_listener_short_paths(_) ->
CfgStr = <<
"" "listeners.tcp.default {max_conn_rate = \"1000\", limiter.connection.rate = \"500\"}" ""
>>,
ok = emqx_common_test_helpers:load_config(emqx_schema, CfgStr),
ListenerOpt = emqx:get_config([listeners, tcp, default]),
?assertMatch(
#{
connection := #{rate := 100.0}
},
emqx_limiter_schema:get_listener_opts(ListenerOpt)
).
t_no_limiter_for_listener(_) ->
CfgStr = <<>>,
ok = emqx_common_test_helpers:load_config(emqx_schema, CfgStr),
ListenerOpt = emqx:get_config([listeners, tcp, default]),
?assertEqual(
undefined,
emqx_limiter_schema:get_listener_opts(ListenerOpt)
).
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
@ -1043,3 +1107,11 @@ make_create_test_data_with_infinity_node(FakeInstnace) ->
%% client = C bucket = B C > B
{MkA(1000, 100), IsRefLimiter(FakeInstnace)}
].
parse_schema(ConfigString) ->
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
hocon_tconf:check_plain(
emqx_limiter_schema,
RawConf,
#{required => false, atom_key => false}
).

View File

@ -229,7 +229,8 @@ ssl_files_handle_non_generated_file_test() ->
ok = emqx_tls_lib:delete_ssl_files(Dir, undefined, SSL2),
%% verify the file is not delete and not changed, because it is not generated by
%% emqx_tls_lib
?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)).
?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)),
ok = file:delete(TmpKeyFile).
ssl_file_replace_test() ->
Key1 = bin(test_key()),

View File

@ -72,7 +72,8 @@
T == cassandra;
T == sqlserver;
T == pulsar_producer;
T == oracle
T == oracle;
T == iotdb
).
load() ->

View File

@ -56,6 +56,11 @@
(TYPE) =:= <<"kafka_consumer">> orelse ?IS_BI_DIR_BRIDGE(TYPE)
).
%% [FIXME] this has no place here, it's used in parse_confs/3, which should
%% rather delegate to a behavior callback than implementing domain knowledge
%% here (reversed dependency)
-define(INSERT_TABLET_PATH, "/rest/v2/insertTablet").
-if(?EMQX_RELEASE_EDITION == ee).
bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt;
bridge_to_resource_type(mqtt) -> emqx_connector_mqtt;
@ -329,6 +334,30 @@ parse_confs(
max_retries => Retry
}
};
parse_confs(<<"iotdb">>, Name, Conf) ->
#{
base_url := BaseURL,
authentication :=
#{
username := Username,
password := Password
}
} = Conf,
BasicToken = base64:encode(<<Username/binary, ":", Password/binary>>),
WebhookConfig =
Conf#{
method => <<"post">>,
url => <<BaseURL/binary, ?INSERT_TABLET_PATH>>,
headers => [
{<<"Content-type">>, <<"application/json">>},
{<<"Authorization">>, BasicToken}
]
},
parse_confs(
<<"webhook">>,
Name,
WebhookConfig
);
parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) ->
%% For some drivers that can be used as data-sources, we need to provide a
%% hookpoint. The underlying driver will run `emqx_hooks:run/3` when it

View File

@ -0,0 +1,350 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_testlib).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
%% ct setup helpers
init_per_suite(Config, Apps) ->
[{start_apps, Apps} | Config].
end_per_suite(Config) ->
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?config(start_apps, Config))),
_ = application:stop(emqx_connector),
ok.
init_per_group(TestGroup, BridgeType, Config) ->
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
application:load(emqx_bridge),
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps(?config(start_apps, Config)),
{ok, _} = application:ensure_all_started(emqx_connector),
emqx_mgmt_api_test_util:init_suite(),
UniqueNum = integer_to_binary(erlang:unique_integer([positive])),
MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>,
[
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
{mqtt_topic, MQTTTopic},
{test_group, TestGroup},
{bridge_type, BridgeType}
| Config
].
end_per_group(Config) ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
delete_all_bridges(),
ok.
init_per_testcase(TestCase, Config0, BridgeConfigCb) ->
ct:timetrap(timer:seconds(60)),
delete_all_bridges(),
UniqueNum = integer_to_binary(erlang:unique_integer()),
BridgeTopic =
<<
(atom_to_binary(TestCase))/binary,
UniqueNum/binary
>>,
TestGroup = ?config(test_group, Config0),
Config = [{bridge_topic, BridgeTopic} | Config0],
{Name, ConfigString, BridgeConfig} = BridgeConfigCb(
TestCase, TestGroup, Config
),
ok = snabbkaffe:start_trace(),
[
{bridge_name, Name},
{bridge_config_string, ConfigString},
{bridge_config, BridgeConfig}
| Config
].
end_per_testcase(_Testcase, Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ok;
false ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
delete_all_bridges(),
%% in CI, apparently this needs more time since the
%% machines struggle with all the containers running...
emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
delete_all_bridges() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
).
%% test helpers
parse_and_check(Config, ConfigString, Name) ->
BridgeType = ?config(bridge_type, Config),
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
#{<<"bridges">> := #{BridgeType := #{Name := BridgeConfig}}} = RawConf,
BridgeConfig.
resource_id(Config) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
emqx_bridge_resource:resource_id(BridgeType, Name).
create_bridge(Config) ->
create_bridge(Config, _Overrides = #{}).
create_bridge(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
emqx_bridge:create(BridgeType, Name, BridgeConfig).
create_bridge_api(Config) ->
create_bridge_api(Config, _Overrides = #{}).
create_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("creating bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {Status, Headers, Body0}} ->
{ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}};
Error ->
Error
end,
ct:pal("bridge create result: ~p", [Res]),
Res.
update_bridge_api(Config) ->
update_bridge_api(Config, _Overrides = #{}).
update_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, Name),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("updating bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of
{ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])};
Error -> Error
end,
ct:pal("bridge update result: ~p", [Res]),
Res.
probe_bridge_api(Config) ->
probe_bridge_api(Config, _Overrides = #{}).
probe_bridge_api(Config, _Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig = ?config(bridge_config, Config),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name},
Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("probing bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0};
Error -> Error
end,
ct:pal("bridge probe result: ~p", [Res]),
Res.
create_rule_and_action_http(BridgeType, RuleTopic, Config) ->
BridgeName = ?config(bridge_name, Config),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
Params = #{
enable => true,
sql => <<"SELECT * FROM \"", RuleTopic/binary, "\"">>,
actions => [BridgeId]
},
Path = emqx_mgmt_api_test_util:api_path(["rules"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
ct:pal("rule action params: ~p", [Params]),
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
{ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])};
Error -> Error
end.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_sync_query(Config, MakeMessageFun, IsSuccessCheck) ->
ResourceId = resource_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Message = {send_message, MakeMessageFun()},
IsSuccessCheck(emqx_resource:simple_sync_query(ResourceId, Message)),
ok
end,
[]
),
ok.
t_async_query(Config, MakeMessageFun, IsSuccessCheck) ->
ResourceId = resource_id(Config),
ReplyFun =
fun(Pid, Result) ->
Pid ! {result, Result}
end,
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Message = {send_message, MakeMessageFun()},
emqx_resource:query(ResourceId, Message, #{async_reply_fun => {ReplyFun, [self()]}}),
ok
end,
[]
),
receive
{result, Result} -> IsSuccessCheck(Result)
after 5_000 ->
throw(timeout)
end,
ok.
t_create_via_http(Config) ->
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
%% lightweight matrix testing some configs
?assertMatch(
{ok, _},
update_bridge_api(
Config
)
),
?assertMatch(
{ok, _},
update_bridge_api(
Config
)
),
ok
end,
[]
),
ok.
t_start_stop(Config, StopTracePoint) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
ResourceId = resource_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge(Config)),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
%% Check that the bridge probe API doesn't leak atoms.
ProbeRes0 = probe_bridge_api(
Config,
#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
AtomsBefore = erlang:system_info(atom_count),
%% Probe again; shouldn't have created more atoms.
ProbeRes1 = probe_bridge_api(
Config,
#{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}}
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
AtomsAfter = erlang:system_info(atom_count),
?assertEqual(AtomsBefore, AtomsAfter),
%% Now stop the bridge.
?assertMatch(
{{ok, _}, {ok, _}},
?wait_async_action(
emqx_bridge:disable_enable(disable, BridgeType, BridgeName),
#{?snk_kind := StopTracePoint},
5_000
)
),
ok
end,
fun(Trace) ->
%% one for each probe, one for real
?assertMatch([_, _, _], ?of_kind(StopTracePoint, Trace)),
ok
end
),
ok.
t_on_get_status(Config) ->
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
ResourceId = resource_id(Config),
?assertMatch({ok, _}, create_bridge(Config)),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
ct:sleep(500),
?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId))
end),
%% Check that it recovers itself.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
ok.

View File

@ -524,7 +524,7 @@ t_write_failure(Config) ->
send_message(Config, SentData)
end,
#{?snk_kind := buffer_worker_flush_nack},
1_000
10_000
)
end),
fun(Trace0) ->

19
apps/emqx_bridge_iotdb/.gitignore vendored Normal file
View File

@ -0,0 +1,19 @@
.rebar3
_*
.eunit
*.o
*.beam
*.plt
*.swp
*.swo
.erlang.cookie
ebin
log
erl_crash.dump
.rebar
logs
_build
.idea
*.iml
rebar3.crashdump
*~

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,26 @@
# Apache IoTDB Data Integration Bridge
This application houses the IoTDB data integration bridge for EMQX Enterprise
Edition. It provides the means to connect to IoTDB and publish messages to it.
It implements the connection management and interaction without need for a
separate connector app, since it's not used by authentication and authorization
applications.
# Documentation links
For more information on Apache IoTDB, please see its [official
site](https://iotdb.apache.org/).
# Configurations
Please see [our official
documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-iotdb.html)
for more detailed info.
# Contributing - [Mandatory]
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
See [BSL](./BSL.txt).

View File

@ -0,0 +1,2 @@
toxiproxy
iotdb

View File

@ -0,0 +1,11 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-ifndef(EMQX_BRIDGE_IOTDB_HRL).
-define(EMQX_BRIDGE_IOTDB_HRL, true).
-define(VSN_1_0_X, 'v1.0.x').
-define(VSN_0_13_X, 'v0.13.x').
-endif.

View File

@ -0,0 +1,14 @@
%% -*- mode: erlang -*-
{erl_opts, [
debug_info
]}.
{deps, [
{emqx, {path, "../../apps/emqx"}},
{emqx_connector, {path, "../../apps/emqx_connector"}},
{emqx_resource, {path, "../../apps/emqx_resource"}},
{emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{plugins, [rebar3_path_deps]}.
{project_plugins, [erlfmt]}.

View File

@ -0,0 +1,22 @@
%% -*- mode: erlang -*-
{application, emqx_bridge_iotdb, [
{description, "EMQX Enterprise Apache IoTDB Bridge"},
{vsn, "0.1.0"},
{modules, [
emqx_bridge_iotdb,
emqx_bridge_iotdb_impl
]},
{registered, []},
{applications, [
kernel,
stdlib,
emqx_connector
]},
{env, []},
{licenses, ["Business Source License 1.1"]},
{maintainers, ["EMQX Team <contact@emqx.io>"]},
{links, [
{"Homepage", "https://emqx.io/"},
{"Github", "https://github.com/emqx/emqx"}
]}
]}.

View File

@ -0,0 +1,232 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_iotdb).
-include("emqx_bridge_iotdb.hrl").
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-import(hoconsc, [mk/2, enum/1, ref/2]).
%% hocon_schema API
-export([
namespace/0,
roots/0,
fields/1,
desc/1
]).
%% emqx_ee_bridge "unofficial" API
-export([conn_bridge_examples/1]).
%%-------------------------------------------------------------------------------------------------
%% `hocon_schema' API
%%-------------------------------------------------------------------------------------------------
namespace() -> "bridge_iotdb".
roots() -> [].
fields("config") ->
basic_config() ++ request_config();
fields("post") ->
[
type_field(),
name_field()
] ++ fields("config");
fields("put") ->
fields("config");
fields("get") ->
emqx_bridge_schema:status_fields() ++ fields("post");
fields("creation_opts") ->
lists:filter(
fun({K, _V}) ->
not lists:member(K, unsupported_opts())
end,
emqx_resource_schema:fields("creation_opts")
);
fields(auth_basic) ->
[
{username, mk(binary(), #{required => true, desc => ?DESC("config_auth_basic_username")})},
{password,
mk(binary(), #{
required => true,
desc => ?DESC("config_auth_basic_password"),
sensitive => true,
converter => fun emqx_schema:password_converter/2
})}
].
desc("config") ->
?DESC("desc_config");
desc("creation_opts") ->
?DESC(emqx_resource_schema, "creation_opts");
desc("post") ->
["Configuration for IoTDB using `POST` method."];
desc(Name) ->
lists:member(Name, struct_names()) orelse throw({missing_desc, Name}),
?DESC(Name).
struct_names() ->
[
auth_basic
].
basic_config() ->
[
{enable,
mk(
boolean(),
#{
desc => ?DESC("config_enable"),
default => true
}
)},
{authentication,
mk(
hoconsc:union([ref(?MODULE, auth_basic)]),
#{
default => auth_basic, desc => ?DESC("config_authentication")
}
)},
{is_aligned,
mk(
boolean(),
#{
desc => ?DESC("config_is_aligned"),
default => false
}
)},
{device_id,
mk(
binary(),
#{
desc => ?DESC("config_device_id")
}
)},
{iotdb_version,
mk(
hoconsc:enum([?VSN_1_0_X, ?VSN_0_13_X]),
#{
desc => ?DESC("config_iotdb_version"),
default => ?VSN_1_0_X
}
)}
] ++ resource_creation_opts() ++
proplists_without(
[max_retries, base_url, request],
emqx_connector_http:fields(config)
).
proplists_without(Keys, List) ->
[El || El = {K, _} <- List, not lists:member(K, Keys)].
request_config() ->
[
{base_url,
mk(
emqx_schema:url(),
#{
desc => ?DESC("config_base_url")
}
)},
{max_retries,
mk(
non_neg_integer(),
#{
default => 2,
desc => ?DESC("config_max_retries")
}
)},
{request_timeout,
mk(
emqx_schema:duration_ms(),
#{
default => <<"15s">>,
desc => ?DESC("config_request_timeout")
}
)}
].
resource_creation_opts() ->
[
{resource_opts,
mk(
ref(?MODULE, "creation_opts"),
#{
required => false,
default => #{},
desc => ?DESC(emqx_resource_schema, <<"resource_opts">>)
}
)}
].
unsupported_opts() ->
[
batch_size,
batch_time
].
%%======================================================================================
type_field() ->
{type,
mk(
hoconsc:enum([iotdb]),
#{
required => true,
desc => ?DESC("desc_type")
}
)}.
name_field() ->
{name,
mk(
binary(),
#{
required => true,
desc => ?DESC("desc_name")
}
)}.
%%======================================================================================
conn_bridge_examples(Method) ->
[
#{
<<"iotdb">> =>
#{
summary => <<"Apache IoTDB Bridge">>,
value => conn_bridge_example(Method, iotdb)
}
}
].
conn_bridge_example(_Method, Type) ->
#{
name => <<"My IoTDB Bridge">>,
type => Type,
enable => true,
authentication => #{
<<"username">> => <<"root">>,
<<"password">> => <<"*****">>
},
is_aligned => false,
device_id => <<"my_device">>,
base_url => <<"http://iotdb.local:18080/">>,
iotdb_version => ?VSN_1_0_X,
connect_timeout => <<"15s">>,
pool_type => <<"random">>,
pool_size => 8,
enable_pipelining => 100,
ssl => #{enable => false},
resource_opts => #{
worker_pool_size => 8,
health_check_interval => ?HEALTHCHECK_INTERVAL_RAW,
auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW,
query_mode => async,
max_buffer_bytes => ?DEFAULT_BUFFER_BYTES
}
}.

View File

@ -0,0 +1,382 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_iotdb_impl).
-include("emqx_bridge_iotdb.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
%% `emqx_resource' API
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_get_status/2,
on_query/3,
on_query_async/4
]).
-type config() ::
#{
base_url := #{
scheme := http | https,
host := iolist(),
port := inet:port_number(),
path := '_'
},
connect_timeout := pos_integer(),
pool_type := random | hash,
pool_size := pos_integer(),
request := undefined | map(),
is_aligned := boolean(),
iotdb_version := binary(),
device_id := binary() | undefined,
atom() => '_'
}.
-type state() ::
#{
base_path := '_',
base_url := #{
scheme := http | https,
host := iolist(),
port := inet:port_number(),
path := '_'
},
connect_timeout := pos_integer(),
pool_type := random | hash,
pool_size := pos_integer(),
request := undefined | map(),
is_aligned := boolean(),
iotdb_version := binary(),
device_id := binary() | undefined,
atom() => '_'
}.
-type manager_id() :: binary().
%%-------------------------------------------------------------------------------------
%% `emqx_resource' API
%%-------------------------------------------------------------------------------------
callback_mode() -> async_if_possible.
-spec on_start(manager_id(), config()) -> {ok, state()} | no_return().
on_start(InstanceId, Config) ->
%% [FIXME] The configuration passed in here is pre-processed and transformed
%% in emqx_bridge_resource:parse_confs/2.
case emqx_connector_http:on_start(InstanceId, Config) of
{ok, State} ->
?SLOG(info, #{
msg => "iotdb_bridge_started",
instance_id => InstanceId,
request => maps:get(request, State, <<>>)
}),
?tp(iotdb_bridge_started, #{}),
{ok, maps:merge(Config, State)};
{error, Reason} ->
?SLOG(error, #{
msg => "failed_to_start_iotdb_bridge",
instance_id => InstanceId,
base_url => maps:get(request, Config, <<>>),
reason => Reason
}),
throw(failed_to_start_iotdb_bridge)
end.
-spec on_stop(manager_id(), state()) -> ok | {error, term()}.
on_stop(InstanceId, State) ->
?SLOG(info, #{
msg => "stopping_iotdb_bridge",
connector => InstanceId
}),
Res = emqx_connector_http:on_stop(InstanceId, State),
?tp(iotdb_bridge_stopped, #{instance_id => InstanceId}),
Res.
-spec on_get_status(manager_id(), state()) ->
{connected, state()} | {disconnected, state(), term()}.
on_get_status(InstanceId, State) ->
emqx_connector_http:on_get_status(InstanceId, State).
-spec on_query(manager_id(), {send_message, map()}, state()) ->
{ok, pos_integer(), [term()], term()}
| {ok, pos_integer(), [term()]}
| {error, term()}.
on_query(InstanceId, {send_message, Message}, State) ->
?SLOG(debug, #{
msg => "iotdb_bridge_on_query_called",
instance_id => InstanceId,
send_message => Message,
state => emqx_utils:redact(State)
}),
IoTDBPayload = make_iotdb_insert_request(Message, State),
handle_response(
emqx_connector_http:on_query(
InstanceId, {send_message, IoTDBPayload}, State
)
).
-spec on_query_async(manager_id(), {send_message, map()}, {function(), [term()]}, state()) ->
{ok, pid()}.
on_query_async(InstanceId, {send_message, Message}, ReplyFunAndArgs0, State) ->
?SLOG(debug, #{
msg => "iotdb_bridge_on_query_async_called",
instance_id => InstanceId,
send_message => Message,
state => emqx_utils:redact(State)
}),
IoTDBPayload = make_iotdb_insert_request(Message, State),
ReplyFunAndArgs =
{
fun(Result) ->
Response = handle_response(Result),
emqx_resource:apply_reply_fun(ReplyFunAndArgs0, Response)
end,
[]
},
emqx_connector_http:on_query_async(
InstanceId, {send_message, IoTDBPayload}, ReplyFunAndArgs, State
).
%%--------------------------------------------------------------------
%% Internal Functions
%%--------------------------------------------------------------------
preproc_data(DataList) ->
lists:map(
fun(
#{
measurement := Measurement,
data_type := DataType,
value := Value
} = Data
) ->
#{
timestamp => emqx_plugin_libs_rule:preproc_tmpl(
maps:get(<<"timestamp">>, Data, <<"now">>)
),
measurement => emqx_plugin_libs_rule:preproc_tmpl(Measurement),
data_type => DataType,
value => emqx_plugin_libs_rule:preproc_tmpl(Value)
}
end,
DataList
).
proc_data(PreProcessedData, Msg) ->
NowNS = erlang:system_time(nanosecond),
Nows = #{
now_ms => erlang:convert_time_unit(NowNS, nanosecond, millisecond),
now_us => erlang:convert_time_unit(NowNS, nanosecond, microsecond),
now_ns => NowNS
},
lists:map(
fun(
#{
timestamp := TimestampTkn,
measurement := Measurement,
data_type := DataType,
value := ValueTkn
}
) ->
#{
timestamp => iot_timestamp(
emqx_plugin_libs_rule:proc_tmpl(TimestampTkn, Msg), Nows
),
measurement => emqx_plugin_libs_rule:proc_tmpl(Measurement, Msg),
data_type => DataType,
value => proc_value(DataType, ValueTkn, Msg)
}
end,
PreProcessedData
).
iot_timestamp(Timestamp, #{now_ms := NowMs}) when
Timestamp =:= <<"now">>; Timestamp =:= <<"now_ms">>; Timestamp =:= <<>>
->
NowMs;
iot_timestamp(Timestamp, #{now_us := NowUs}) when Timestamp =:= <<"now_us">> ->
NowUs;
iot_timestamp(Timestamp, #{now_ns := NowNs}) when Timestamp =:= <<"now_ns">> ->
NowNs;
iot_timestamp(Timestamp, _) when is_binary(Timestamp) ->
binary_to_integer(Timestamp).
proc_value(<<"TEXT">>, ValueTkn, Msg) ->
case emqx_plugin_libs_rule:proc_tmpl(ValueTkn, Msg) of
<<"undefined">> -> null;
Val -> Val
end;
proc_value(<<"BOOLEAN">>, ValueTkn, Msg) ->
convert_bool(replace_var(ValueTkn, Msg));
proc_value(Int, ValueTkn, Msg) when Int =:= <<"INT32">>; Int =:= <<"INT64">> ->
convert_int(replace_var(ValueTkn, Msg));
proc_value(Int, ValueTkn, Msg) when Int =:= <<"FLOAT">>; Int =:= <<"DOUBLE">> ->
convert_float(replace_var(ValueTkn, Msg)).
replace_var(Tokens, Data) when is_list(Tokens) ->
[Val] = emqx_plugin_libs_rule:proc_tmpl(Tokens, Data, #{return => rawlist}),
Val;
replace_var(Val, _Data) ->
Val.
convert_bool(B) when is_boolean(B) -> B;
convert_bool(1) -> true;
convert_bool(0) -> false;
convert_bool(<<"1">>) -> true;
convert_bool(<<"0">>) -> false;
convert_bool(<<"true">>) -> true;
convert_bool(<<"True">>) -> true;
convert_bool(<<"TRUE">>) -> true;
convert_bool(<<"false">>) -> false;
convert_bool(<<"False">>) -> false;
convert_bool(<<"FALSE">>) -> false;
convert_bool(undefined) -> null.
convert_int(Int) when is_integer(Int) -> Int;
convert_int(Float) when is_float(Float) -> floor(Float);
convert_int(Str) when is_binary(Str) ->
try
binary_to_integer(Str)
catch
_:_ ->
convert_int(binary_to_float(Str))
end;
convert_int(undefined) ->
null.
convert_float(Float) when is_float(Float) -> Float;
convert_float(Int) when is_integer(Int) -> Int * 10 / 10;
convert_float(Str) when is_binary(Str) ->
try
binary_to_float(Str)
catch
_:_ ->
convert_float(binary_to_integer(Str))
end;
convert_float(undefined) ->
null.
make_iotdb_insert_request(Message, State) ->
IsAligned = maps:get(is_aligned, State, false),
DeviceId = device_id(Message, State),
IotDBVsn = maps:get(iotdb_version, State, ?VSN_1_0_X),
Payload = make_list(maps:get(payload, Message)),
PreProcessedData = preproc_data(Payload),
DataList = proc_data(PreProcessedData, Message),
InitAcc = #{timestamps => [], measurements => [], dtypes => [], values => []},
Rows = replace_dtypes(aggregate_rows(DataList, InitAcc), IotDBVsn),
maps:merge(Rows, #{
iotdb_field_key(is_aligned, IotDBVsn) => IsAligned,
iotdb_field_key(device_id, IotDBVsn) => DeviceId
}).
replace_dtypes(Rows, IotDBVsn) ->
{Types, Map} = maps:take(dtypes, Rows),
Map#{iotdb_field_key(data_types, IotDBVsn) => Types}.
aggregate_rows(DataList, InitAcc) ->
lists:foldr(
fun(
#{
timestamp := Timestamp,
measurement := Measurement,
data_type := DataType,
value := Data
},
#{
timestamps := AccTs,
measurements := AccM,
dtypes := AccDt,
values := AccV
} = Acc
) ->
Timestamps = [Timestamp | AccTs],
case index_of(Measurement, AccM) of
0 ->
Acc#{
timestamps => Timestamps,
values => [pad_value(Data, length(AccTs)) | pad_existing_values(AccV)],
measurements => [Measurement | AccM],
dtypes => [DataType | AccDt]
};
Index ->
Acc#{
timestamps => Timestamps,
values => insert_value(Index, Data, AccV),
measurements => AccM,
dtypes => AccDt
}
end
end,
InitAcc,
DataList
).
pad_value(Data, N) ->
[Data | lists:duplicate(N, null)].
pad_existing_values(Values) ->
[[null | Value] || Value <- Values].
index_of(E, List) ->
string:str(List, [E]).
insert_value(_Index, _Data, []) ->
[];
insert_value(1, Data, [Value | Values]) ->
[[Data | Value] | insert_value(0, Data, Values)];
insert_value(Index, Data, [Value | Values]) ->
[[null | Value] | insert_value(Index - 1, Data, Values)].
iotdb_field_key(is_aligned, ?VSN_1_0_X) ->
<<"is_aligned">>;
iotdb_field_key(is_aligned, ?VSN_0_13_X) ->
<<"isAligned">>;
iotdb_field_key(device_id, ?VSN_1_0_X) ->
<<"device">>;
iotdb_field_key(device_id, ?VSN_0_13_X) ->
<<"deviceId">>;
iotdb_field_key(data_types, ?VSN_1_0_X) ->
<<"data_types">>;
iotdb_field_key(data_types, ?VSN_0_13_X) ->
<<"dataTypes">>.
make_list(List) when is_list(List) -> List;
make_list(Data) -> [Data].
device_id(Message, State) ->
case maps:get(device_id, State, undefined) of
undefined ->
case maps:get(payload, Message) of
#{device_id := DeviceId} ->
DeviceId;
_NotFound ->
Topic = maps:get(topic, Message),
case re:replace(Topic, "/", ".", [global, {return, binary}]) of
<<"root.", _/binary>> = Device -> Device;
Device -> <<"root.", Device/binary>>
end
end;
DeviceId ->
DeviceIdTkn = emqx_plugin_libs_rule:preproc_tmpl(DeviceId),
emqx_plugin_libs_rule:proc_tmpl(DeviceIdTkn, Message)
end.
handle_response({ok, 200, _Headers, Body} = Resp) ->
eval_response_body(Body, Resp);
handle_response({ok, 200, Body} = Resp) ->
eval_response_body(Body, Resp);
handle_response({ok, Code, _Headers, Body}) ->
{error, #{code => Code, body => Body}};
handle_response({ok, Code, Body}) ->
{error, #{code => Code, body => Body}};
handle_response({error, _} = Error) ->
Error.
eval_response_body(Body, Resp) ->
case emqx_utils_json:decode(Body) of
#{<<"code">> := 200} -> Resp;
Reason -> {error, Reason}
end.

View File

@ -0,0 +1,229 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_iotdb_impl_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-define(BRIDGE_TYPE_BIN, <<"iotdb">>).
-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_bridge_iotdb]).
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
[
{group, plain}
].
groups() ->
AllTCs = emqx_common_test_helpers:all(?MODULE),
[
{plain, AllTCs}
].
init_per_suite(Config) ->
emqx_bridge_testlib:init_per_suite(Config, ?APPS).
end_per_suite(Config) ->
emqx_bridge_testlib:end_per_suite(Config).
init_per_group(plain = Type, Config0) ->
Host = os:getenv("IOTDB_PLAIN_HOST", "toxiproxy.emqx.net"),
Port = list_to_integer(os:getenv("IOTDB_PLAIN_PORT", "18080")),
ProxyName = "iotdb",
case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of
true ->
Config = emqx_bridge_testlib:init_per_group(Type, ?BRIDGE_TYPE_BIN, Config0),
[
{bridge_host, Host},
{bridge_port, Port},
{proxy_name, ProxyName}
| Config
];
false ->
case os:getenv("IS_CI") of
"yes" ->
throw(no_iotdb);
_ ->
{skip, no_iotdb}
end
end;
init_per_group(_Group, Config) ->
Config.
end_per_group(Group, Config) when
Group =:= plain
->
emqx_bridge_testlib:end_per_group(Config),
ok;
end_per_group(_Group, _Config) ->
ok.
init_per_testcase(TestCase, Config0) ->
Config = emqx_bridge_testlib:init_per_testcase(TestCase, Config0, fun bridge_config/3),
reset_service(Config),
Config.
end_per_testcase(TestCase, Config) ->
emqx_bridge_testlib:end_per_testcase(TestCase, Config).
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
bridge_config(TestCase, _TestGroup, Config) ->
UniqueNum = integer_to_binary(erlang:unique_integer()),
Host = ?config(bridge_host, Config),
Port = ?config(bridge_port, Config),
Name = <<
(atom_to_binary(TestCase))/binary, UniqueNum/binary
>>,
ServerURL = iolist_to_binary([
"http://",
Host,
":",
integer_to_binary(Port)
]),
ConfigString =
io_lib:format(
"bridges.iotdb.~s {\n"
" enable = true\n"
" base_url = \"~s\"\n"
" authentication = {\n"
" username = \"root\"\n"
" password = \"root\"\n"
" }\n"
" pool_size = 1\n"
" resource_opts = {\n"
" auto_restart_interval = 5000\n"
" request_timeout = 30000\n"
" query_mode = \"async\"\n"
" worker_pool_size = 1\n"
" }\n"
"}\n",
[
Name,
ServerURL
]
),
{Name, ConfigString, emqx_bridge_testlib:parse_and_check(Config, ConfigString, Name)}.
reset_service(Config) ->
_BridgeConfig =
#{
<<"base_url">> := BaseURL,
<<"authentication">> := #{
<<"username">> := Username,
<<"password">> := Password
}
} =
?config(bridge_config, Config),
ct:pal("bridge config: ~p", [_BridgeConfig]),
Path = <<BaseURL/binary, "/rest/v2/nonQuery">>,
BasicToken = base64:encode(<<Username/binary, ":", Password/binary>>),
Headers = [
{"Content-type", "application/json"},
{"Authorization", binary_to_list(BasicToken)}
],
Device = iotdb_device(Config),
Body = #{sql => <<"delete from ", Device/binary, ".*">>},
{ok, _} = emqx_mgmt_api_test_util:request_api(post, Path, "", Headers, Body, #{}).
make_iotdb_payload(DeviceId) ->
make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36").
make_iotdb_payload(DeviceId, Measurement, Type, Value) ->
#{
measurement => Measurement,
data_type => Type,
value => Value,
device_id => DeviceId,
is_aligned => false
}.
make_message_fun(Topic, Payload) ->
fun() ->
MsgId = erlang:unique_integer([positive]),
#{
topic => Topic,
id => MsgId,
payload => Payload,
retain => true
}
end.
iotdb_device(Config) ->
MQTTTopic = ?config(mqtt_topic, Config),
Device = re:replace(MQTTTopic, "/", ".dev", [global, {return, binary}]),
<<"root.", Device/binary>>.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_sync_query_simple(Config) ->
DeviceId = iotdb_device(Config),
Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"),
MakeMessageFun = make_message_fun(DeviceId, Payload),
IsSuccessCheck =
fun(Result) ->
?assertEqual(ok, element(1, Result))
end,
emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck).
t_async_query(Config) ->
DeviceId = iotdb_device(Config),
Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"),
MakeMessageFun = make_message_fun(DeviceId, Payload),
IsSuccessCheck =
fun(Result) ->
?assertEqual(ok, element(1, Result))
end,
emqx_bridge_testlib:t_async_query(Config, MakeMessageFun, IsSuccessCheck).
t_sync_query_aggregated(Config) ->
DeviceId = iotdb_device(Config),
Payload = [
make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"),
(make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "37"))#{timestamp => <<"mow_us">>},
(make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "38"))#{timestamp => <<"mow_ns">>},
make_iotdb_payload(DeviceId, "charged", <<"BOOLEAN">>, "1"),
make_iotdb_payload(DeviceId, "stoked", <<"BOOLEAN">>, "true"),
make_iotdb_payload(DeviceId, "enriched", <<"BOOLEAN">>, <<"TRUE">>),
make_iotdb_payload(DeviceId, "drained", <<"BOOLEAN">>, "0"),
make_iotdb_payload(DeviceId, "dazzled", <<"BOOLEAN">>, "false"),
make_iotdb_payload(DeviceId, "unplugged", <<"BOOLEAN">>, <<"FALSE">>),
make_iotdb_payload(DeviceId, "weight", <<"FLOAT">>, "87.3"),
make_iotdb_payload(DeviceId, "foo", <<"TEXT">>, <<"bar">>)
],
MakeMessageFun = make_message_fun(DeviceId, Payload),
IsSuccessCheck =
fun(Result) ->
?assertEqual(ok, element(1, Result))
end,
emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck).
t_sync_query_fail(Config) ->
DeviceId = iotdb_device(Config),
Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "Anton"),
MakeMessageFun = make_message_fun(DeviceId, Payload),
IsSuccessCheck =
fun(Result) ->
?assertEqual(error, element(1, Result))
end,
emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck).
t_create_via_http(Config) ->
emqx_bridge_testlib:t_create_via_http(Config).
t_start_stop(Config) ->
emqx_bridge_testlib:t_start_stop(Config, iotdb_bridge_stopped).
t_on_get_status(Config) ->
emqx_bridge_testlib:t_on_get_status(Config).

View File

@ -12,14 +12,11 @@ not used by authentication and authorization applications.
# Documentation links
For more information on Apache Kafka, please see its [official
site](https://kafka.apache.org/).
For more information about Apache Kafka, please see its [official site](https://kafka.apache.org/).
# Configurations
Please see [our official
documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-kafka.html)
for more detailed info.
Please see [Ingest data into Kafka](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-kafka.html) for more detailed info.
# Contributing

View File

@ -2,7 +2,7 @@
{erl_opts, [debug_info]}.
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.5"}}}
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.2"}}}
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0-rc1"}}}
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}
, {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}

View File

@ -7,7 +7,8 @@
stdlib,
telemetry,
wolff,
brod
brod,
brod_gssapi
]},
{env, []},
{modules, []},

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,46 @@
# EMQX RabbitMQ Bridge
[RabbitMQ](https://www.rabbitmq.com/) is a powerful, open-source message broker
that facilitates asynchronous communication between different components of an
application. Built on the Advanced Message Queuing Protocol (AMQP), RabbitMQ
enables the reliable transmission of messages by decoupling the sender and
receiver components. This separation allows for increased scalability,
robustness, and flexibility in application architecture.
RabbitMQ is commonly used for a wide range of purposes, such as distributing
tasks among multiple workers, enabling event-driven architectures, and
implementing publish-subscribe patterns. It is a popular choice for
microservices, distributed systems, and real-time applications, providing an
efficient way to handle varying workloads and ensuring message delivery in
complex environments.
This application is used to connect EMQX and RabbitMQ. User can create a rule
and easily ingest IoT data into RabbitMQ by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
# Documentation
- Refer to the [RabbitMQ bridge documentation](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-rabbitmq.html)
for how to use EMQX dashboard to ingest IoT data into RabbitMQ.
- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html)
for an introduction to the EMQX rules engine.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1 @@
rabbitmq

View File

@ -0,0 +1,33 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [
%% The following two are dependencies of rabbit_common
{thoas, {git, "https://github.com/emqx/thoas.git", {tag, "v1.0.0"}}}
, {credentials_obfuscation, {git, "https://github.com/emqx/credentials-obfuscation.git", {tag, "v3.2.0"}}}
%% The v3.11.13_with_app_src tag, employed in the next two dependencies,
%% represents a fork of the official RabbitMQ v3.11.13 tag. This fork diverges
%% from the official version as it includes app and hrl files
%% generated by make files in subdirectories deps/rabbit_common and
%% deps/amqp_client (app files are also relocated from the ebin to the src
%% directory). This modification ensures compatibility with rebar3, as
%% rabbit_common and amqp_client utilize the erlang.mk build tool.
%% Similar changes are probably needed when upgrading to newer versions
%% of rabbit_common and amqp_client. There are hex packages for rabbit_common and
%% amqp_client, but they are not used here as we don't want to depend on
%% packages that we don't have control over.
, {rabbit_common, {git_subdir,
"https://github.com/emqx/rabbitmq-server.git",
{tag, "v3.11.13-emqx"},
"deps/rabbit_common"}}
, {amqp_client, {git_subdir,
"https://github.com/emqx/rabbitmq-server.git",
{tag, "v3.11.13-emqx"},
"deps/amqp_client"}}
, {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{shell, [
{apps, [emqx_bridge_rabbitmq]}
]}.

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_rabbitmq, [
{description, "EMQX Enterprise RabbitMQ Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [kernel, stdlib, ecql, rabbit_common, amqp_client]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -0,0 +1,124 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_rabbitmq).
-include_lib("emqx_bridge/include/emqx_bridge.hrl").
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-import(hoconsc, [mk/2, enum/1, ref/2]).
-export([
conn_bridge_examples/1
]).
-export([
namespace/0,
roots/0,
fields/1,
desc/1
]).
%% -------------------------------------------------------------------------------------------------
%% Callback used by HTTP API
%% -------------------------------------------------------------------------------------------------
conn_bridge_examples(Method) ->
[
#{
<<"rabbitmq">> => #{
summary => <<"RabbitMQ Bridge">>,
value => values(Method, "rabbitmq")
}
}
].
values(_Method, Type) ->
#{
enable => true,
type => Type,
name => <<"foo">>,
server => <<"localhost">>,
port => 5672,
username => <<"guest">>,
password => <<"******">>,
pool_size => 8,
timeout => 5,
virtual_host => <<"/">>,
heartbeat => <<"30s">>,
auto_reconnect => <<"2s">>,
exchange => <<"messages">>,
exchange_type => <<"topic">>,
routing_key => <<"my_routing_key">>,
durable => false,
payload_template => <<"">>,
resource_opts => #{
worker_pool_size => 8,
health_check_interval => ?HEALTHCHECK_INTERVAL_RAW,
auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW,
batch_size => ?DEFAULT_BATCH_SIZE,
batch_time => ?DEFAULT_BATCH_TIME,
query_mode => async,
max_buffer_bytes => ?DEFAULT_BUFFER_BYTES
}
}.
%% -------------------------------------------------------------------------------------------------
%% Hocon Schema Definitions
%% -------------------------------------------------------------------------------------------------
namespace() -> "bridge_rabbitmq".
roots() -> [].
fields("config") ->
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{local_topic,
mk(
binary(),
#{desc => ?DESC("local_topic"), default => undefined}
)},
{resource_opts,
mk(
ref(?MODULE, "creation_opts"),
#{
required => false,
default => #{},
desc => ?DESC(emqx_resource_schema, <<"resource_opts">>)
}
)}
] ++
emqx_bridge_rabbitmq_connector:fields(config);
fields("creation_opts") ->
emqx_resource_schema:fields("creation_opts");
fields("post") ->
fields("post", clickhouse);
fields("put") ->
fields("config");
fields("get") ->
emqx_bridge_schema:status_fields() ++ fields("post").
fields("post", Type) ->
[type_field(Type), name_field() | fields("config")].
desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for RabbitMQ using `", string:to_upper(Method), "` method."];
desc("creation_opts" = Name) ->
emqx_resource_schema:desc(Name);
desc(_) ->
undefined.
%% -------------------------------------------------------------------------------------------------
%% internal
%% -------------------------------------------------------------------------------------------------
type_field(Type) ->
{type, mk(enum([Type]), #{required => true, desc => ?DESC("desc_type")})}.
name_field() ->
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.

View File

@ -0,0 +1,548 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_rabbitmq_connector).
-include_lib("emqx_connector/include/emqx_connector.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-include_lib("typerefl/include/types.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
%% Needed to create RabbitMQ connection
-include_lib("amqp_client/include/amqp_client.hrl").
-behaviour(emqx_resource).
-behaviour(hocon_schema).
-behaviour(ecpool_worker).
%% hocon_schema callbacks
-export([roots/0, fields/1]).
%% HTTP API callbacks
-export([values/1]).
%% emqx_resource callbacks
-export([
%% Required callbacks
on_start/2,
on_stop/2,
callback_mode/0,
%% Optional callbacks
on_get_status/2,
on_query/3,
is_buffer_supported/0,
on_batch_query/3
]).
%% callbacks for ecpool_worker
-export([connect/1]).
%% Internal callbacks
-export([publish_messages/3]).
roots() ->
[{config, #{type => hoconsc:ref(?MODULE, config)}}].
fields(config) ->
[
{server,
hoconsc:mk(
typerefl:binary(),
#{
default => <<"localhost">>,
desc => ?DESC("server")
}
)},
{port,
hoconsc:mk(
emqx_schema:port_number(),
#{
default => 5672,
desc => ?DESC("server")
}
)},
{username,
hoconsc:mk(
typerefl:binary(),
#{
required => true,
desc => ?DESC("username")
}
)},
{password,
hoconsc:mk(
typerefl:binary(),
#{
required => true,
desc => ?DESC("password")
}
)},
{pool_size,
hoconsc:mk(
typerefl:pos_integer(),
#{
default => 8,
desc => ?DESC("pool_size")
}
)},
{timeout,
hoconsc:mk(
emqx_schema:duration_ms(),
#{
default => <<"5s">>,
desc => ?DESC("timeout")
}
)},
{wait_for_publish_confirmations,
hoconsc:mk(
boolean(),
#{
default => true,
desc => ?DESC("wait_for_publish_confirmations")
}
)},
{publish_confirmation_timeout,
hoconsc:mk(
emqx_schema:duration_ms(),
#{
default => <<"30s">>,
desc => ?DESC("timeout")
}
)},
{virtual_host,
hoconsc:mk(
typerefl:binary(),
#{
default => <<"/">>,
desc => ?DESC("virtual_host")
}
)},
{heartbeat,
hoconsc:mk(
emqx_schema:duration_ms(),
#{
default => <<"30s">>,
desc => ?DESC("heartbeat")
}
)},
{auto_reconnect,
hoconsc:mk(
emqx_schema:duration_ms(),
#{
default => <<"2s">>,
desc => ?DESC("auto_reconnect")
}
)},
%% Things related to sending messages to RabbitMQ
{exchange,
hoconsc:mk(
typerefl:binary(),
#{
required => true,
desc => ?DESC("exchange")
}
)},
{routing_key,
hoconsc:mk(
typerefl:binary(),
#{
required => true,
desc => ?DESC("routing_key")
}
)},
{delivery_mode,
hoconsc:mk(
hoconsc:enum([non_persistent, persistent]),
#{
default => non_persistent,
desc => ?DESC("delivery_mode")
}
)},
{payload_template,
hoconsc:mk(
binary(),
#{
default => <<"${.}">>,
desc => ?DESC("payload_template")
}
)}
].
values(post) ->
maps:merge(values(put), #{name => <<"connector">>});
values(get) ->
values(post);
values(put) ->
#{
server => <<"localhost">>,
port => 5672,
enable => true,
pool_size => 8,
type => rabbitmq,
username => <<"guest">>,
password => <<"******">>,
routing_key => <<"my_routing_key">>,
payload_template => <<"">>
};
values(_) ->
#{}.
%% ===================================================================
%% Callbacks defined in emqx_resource
%% ===================================================================
%% emqx_resource callback
callback_mode() -> always_sync.
%% emqx_resource callback
-spec is_buffer_supported() -> boolean().
is_buffer_supported() ->
%% We want to make use of EMQX's buffer mechanism
false.
%% emqx_resource callback called when the resource is started
-spec on_start(resource_id(), term()) -> {ok, resource_state()} | {error, _}.
on_start(
InstanceID,
#{
pool_size := PoolSize,
payload_template := PayloadTemplate,
password := Password,
delivery_mode := InitialDeliveryMode
} = InitialConfig
) ->
DeliveryMode =
case InitialDeliveryMode of
non_persistent -> 1;
persistent -> 2
end,
Config = InitialConfig#{
password => emqx_secret:wrap(Password),
delivery_mode => DeliveryMode
},
?SLOG(info, #{
msg => "starting_rabbitmq_connector",
connector => InstanceID,
config => emqx_utils:redact(Config)
}),
Options = [
{config, Config},
%% The pool_size is read by ecpool and decides the number of workers in
%% the pool
{pool_size, PoolSize},
{pool, InstanceID}
],
ProcessedTemplate = emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate),
State = #{
poolname => InstanceID,
processed_payload_template => ProcessedTemplate,
config => Config
},
case emqx_resource_pool:start(InstanceID, ?MODULE, Options) of
ok ->
{ok, State};
{error, Reason} ->
LogMessage =
#{
msg => "rabbitmq_connector_start_failed",
error_reason => Reason,
config => emqx_utils:redact(Config)
},
?SLOG(info, LogMessage),
{error, Reason}
end.
%% emqx_resource callback called when the resource is stopped
-spec on_stop(resource_id(), resource_state()) -> term().
on_stop(
ResourceID,
#{poolname := PoolName} = _State
) ->
?SLOG(info, #{
msg => "stopping RabbitMQ connector",
connector => ResourceID
}),
Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)],
Clients = [
begin
{ok, Client} = ecpool_worker:client(Worker),
Client
end
|| Worker <- Workers
],
%% We need to stop the pool before stopping the workers as the pool monitors the workers
StopResult = emqx_resource_pool:stop(PoolName),
lists:foreach(fun stop_worker/1, Clients),
StopResult.
stop_worker({Channel, Connection}) ->
amqp_channel:close(Channel),
amqp_connection:close(Connection).
%% This is the callback function that is called by ecpool when the pool is
%% started
-spec connect(term()) -> {ok, {pid(), pid()}, map()} | {error, term()}.
connect(Options) ->
Config = proplists:get_value(config, Options),
try
create_rabbitmq_connection_and_channel(Config)
catch
_:{error, Reason} ->
?SLOG(error, #{
msg => "rabbitmq_connector_connection_failed",
error_type => error,
error_reason => Reason,
config => emqx_utils:redact(Config)
}),
{error, Reason};
Type:Reason ->
?SLOG(error, #{
msg => "rabbitmq_connector_connection_failed",
error_type => Type,
error_reason => Reason,
config => emqx_utils:redact(Config)
}),
{error, Reason}
end.
create_rabbitmq_connection_and_channel(Config) ->
#{
server := Host,
port := Port,
username := Username,
password := WrappedPassword,
timeout := Timeout,
virtual_host := VirtualHost,
heartbeat := Heartbeat,
wait_for_publish_confirmations := WaitForPublishConfirmations
} = Config,
Password = emqx_secret:unwrap(WrappedPassword),
RabbitMQConnectionOptions =
#amqp_params_network{
host = erlang:binary_to_list(Host),
port = Port,
username = Username,
password = Password,
connection_timeout = Timeout,
virtual_host = VirtualHost,
heartbeat = Heartbeat
},
{ok, RabbitMQConnection} =
case amqp_connection:start(RabbitMQConnectionOptions) of
{ok, Connection} ->
{ok, Connection};
{error, Reason} ->
erlang:error({error, Reason})
end,
{ok, RabbitMQChannel} =
case amqp_connection:open_channel(RabbitMQConnection) of
{ok, Channel} ->
{ok, Channel};
{error, OpenChannelErrorReason} ->
erlang:error({error, OpenChannelErrorReason})
end,
%% We need to enable confirmations if we want to wait for them
case WaitForPublishConfirmations of
true ->
case amqp_channel:call(RabbitMQChannel, #'confirm.select'{}) of
#'confirm.select_ok'{} ->
ok;
Error ->
ConfirmModeErrorReason =
erlang:iolist_to_binary(
io_lib:format(
"Could not enable RabbitMQ confirmation mode ~p",
[Error]
)
),
erlang:error({error, ConfirmModeErrorReason})
end;
false ->
ok
end,
{ok, {RabbitMQConnection, RabbitMQChannel}, #{
supervisees => [RabbitMQConnection, RabbitMQChannel]
}}.
%% emqx_resource callback called to check the status of the resource
-spec on_get_status(resource_id(), term()) ->
{connected, resource_state()} | {disconnected, resource_state(), binary()}.
on_get_status(
_InstId,
#{
poolname := PoolName
} = State
) ->
Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)],
Clients = [
begin
{ok, Client} = ecpool_worker:client(Worker),
Client
end
|| Worker <- Workers
],
CheckResults = [
check_worker(Client)
|| Client <- Clients
],
Connected = length(CheckResults) > 0 andalso lists:all(fun(R) -> R end, CheckResults),
case Connected of
true ->
{connected, State};
false ->
{disconnected, State, <<"not_connected">>}
end;
on_get_status(
_InstId,
State
) ->
{disconnect, State, <<"not_connected: no connection pool in state">>}.
check_worker({Channel, Connection}) ->
erlang:is_process_alive(Channel) andalso erlang:is_process_alive(Connection).
%% emqx_resource callback that is called when a non-batch query is received
-spec on_query(resource_id(), Request, resource_state()) -> query_result() when
Request :: {RequestType, Data},
RequestType :: send_message,
Data :: map().
on_query(
ResourceID,
{RequestType, Data},
#{
poolname := PoolName,
processed_payload_template := PayloadTemplate,
config := Config
} = State
) ->
?SLOG(debug, #{
msg => "RabbitMQ connector received query",
connector => ResourceID,
type => RequestType,
data => Data,
state => emqx_utils:redact(State)
}),
MessageData = format_data(PayloadTemplate, Data),
ecpool:pick_and_do(
PoolName,
{?MODULE, publish_messages, [Config, [MessageData]]},
no_handover
).
%% emqx_resource callback that is called when a batch query is received
-spec on_batch_query(resource_id(), BatchReq, resource_state()) -> query_result() when
BatchReq :: nonempty_list({'send_message', map()}).
on_batch_query(
ResourceID,
BatchReq,
State
) ->
?SLOG(debug, #{
msg => "RabbitMQ connector received batch query",
connector => ResourceID,
data => BatchReq,
state => emqx_utils:redact(State)
}),
%% Currently we only support batch requests with the send_message key
{Keys, MessagesToInsert} = lists:unzip(BatchReq),
ensure_keys_are_of_type_send_message(Keys),
%% Pick out the payload template
#{
processed_payload_template := PayloadTemplate,
poolname := PoolName,
config := Config
} = State,
%% Create batch payload
FormattedMessages = [
format_data(PayloadTemplate, Data)
|| Data <- MessagesToInsert
],
%% Publish the messages
ecpool:pick_and_do(
PoolName,
{?MODULE, publish_messages, [Config, FormattedMessages]},
no_handover
).
publish_messages(
{_Connection, Channel},
#{
delivery_mode := DeliveryMode,
routing_key := RoutingKey,
exchange := Exchange,
wait_for_publish_confirmations := WaitForPublishConfirmations,
publish_confirmation_timeout := PublishConfirmationTimeout
} = _Config,
Messages
) ->
MessageProperties = #'P_basic'{
headers = [],
delivery_mode = DeliveryMode
},
Method = #'basic.publish'{
exchange = Exchange,
routing_key = RoutingKey
},
_ = [
amqp_channel:cast(
Channel,
Method,
#amqp_msg{
payload = Message,
props = MessageProperties
}
)
|| Message <- Messages
],
case WaitForPublishConfirmations of
true ->
case amqp_channel:wait_for_confirms(Channel, PublishConfirmationTimeout) of
true ->
ok;
false ->
erlang:error(
{recoverable_error,
<<"RabbitMQ: Got NACK when waiting for message acknowledgment.">>}
);
timeout ->
erlang:error(
{recoverable_error,
<<"RabbitMQ: Timeout when waiting for message acknowledgment.">>}
)
end;
false ->
ok
end.
ensure_keys_are_of_type_send_message(Keys) ->
case lists:all(fun is_send_message_atom/1, Keys) of
true ->
ok;
false ->
erlang:error(
{unrecoverable_error,
<<"Unexpected type for batch message (Expected send_message)">>}
)
end.
is_send_message_atom(send_message) ->
true;
is_send_message_atom(_) ->
false.
format_data([], Msg) ->
emqx_utils_json:encode(Msg);
format_data(Tokens, Msg) ->
emqx_plugin_libs_rule:proc_tmpl(Tokens, Msg).

View File

@ -0,0 +1,371 @@
%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_rabbitmq_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("emqx_connector/include/emqx_connector.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("stdlib/include/assert.hrl").
-include_lib("amqp_client/include/amqp_client.hrl").
%% See comment in
%% lib-ee/emqx_ee_connector/test/ee_connector_rabbitmq_SUITE.erl for how to
%% run this without bringing up the whole CI infrastucture
rabbit_mq_host() ->
<<"rabbitmq">>.
rabbit_mq_port() ->
5672.
rabbit_mq_exchange() ->
<<"messages">>.
rabbit_mq_queue() ->
<<"test_queue">>.
rabbit_mq_routing_key() ->
<<"test_routing_key">>.
get_channel_connection(Config) ->
proplists:get_value(channel_connection, Config).
%%------------------------------------------------------------------------------
%% Common Test Setup, Teardown and Testcase List
%%------------------------------------------------------------------------------
init_per_suite(Config) ->
% snabbkaffe:fix_ct_logging(),
case
emqx_common_test_helpers:is_tcp_server_available(
erlang:binary_to_list(rabbit_mq_host()), rabbit_mq_port()
)
of
true ->
emqx_common_test_helpers:render_and_load_app_config(emqx_conf),
ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]),
ok = emqx_connector_test_helpers:start_apps([emqx_resource]),
{ok, _} = application:ensure_all_started(emqx_connector),
{ok, _} = application:ensure_all_started(emqx_ee_connector),
{ok, _} = application:ensure_all_started(emqx_ee_bridge),
{ok, _} = application:ensure_all_started(amqp_client),
emqx_mgmt_api_test_util:init_suite(),
ChannelConnection = setup_rabbit_mq_exchange_and_queue(),
[{channel_connection, ChannelConnection} | Config];
false ->
case os:getenv("IS_CI") of
"yes" ->
throw(no_rabbitmq);
_ ->
{skip, no_rabbitmq}
end
end.
setup_rabbit_mq_exchange_and_queue() ->
%% Create an exachange and a queue
{ok, Connection} =
amqp_connection:start(#amqp_params_network{
host = erlang:binary_to_list(rabbit_mq_host()),
port = rabbit_mq_port()
}),
{ok, Channel} = amqp_connection:open_channel(Connection),
%% Create an exchange
#'exchange.declare_ok'{} =
amqp_channel:call(
Channel,
#'exchange.declare'{
exchange = rabbit_mq_exchange(),
type = <<"topic">>
}
),
%% Create a queue
#'queue.declare_ok'{} =
amqp_channel:call(
Channel,
#'queue.declare'{queue = rabbit_mq_queue()}
),
%% Bind the queue to the exchange
#'queue.bind_ok'{} =
amqp_channel:call(
Channel,
#'queue.bind'{
queue = rabbit_mq_queue(),
exchange = rabbit_mq_exchange(),
routing_key = rabbit_mq_routing_key()
}
),
#{
connection => Connection,
channel => Channel
}.
end_per_suite(Config) ->
#{
connection := Connection,
channel := Channel
} = get_channel_connection(Config),
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps([emqx_resource]),
_ = application:stop(emqx_connector),
_ = application:stop(emqx_ee_connector),
_ = application:stop(emqx_bridge),
%% Close the channel
ok = amqp_channel:close(Channel),
%% Close the connection
ok = amqp_connection:close(Connection).
init_per_testcase(_, Config) ->
Config.
end_per_testcase(_, _Config) ->
ok.
all() ->
emqx_common_test_helpers:all(?MODULE).
rabbitmq_config(Config) ->
%%SQL = maps:get(sql, Config, sql_insert_template_for_bridge()),
BatchSize = maps:get(batch_size, Config, 1),
BatchTime = maps:get(batch_time_ms, Config, 0),
Name = atom_to_binary(?MODULE),
Server = maps:get(server, Config, rabbit_mq_host()),
Port = maps:get(port, Config, rabbit_mq_port()),
Template = maps:get(payload_template, Config, <<"">>),
ConfigString =
io_lib:format(
"bridges.rabbitmq.~s {\n"
" enable = true\n"
" server = \"~s\"\n"
" port = ~p\n"
" username = \"guest\"\n"
" password = \"guest\"\n"
" routing_key = \"~s\"\n"
" exchange = \"~s\"\n"
" payload_template = \"~s\"\n"
" resource_opts = {\n"
" batch_size = ~b\n"
" batch_time = ~bms\n"
" }\n"
"}\n",
[
Name,
Server,
Port,
rabbit_mq_routing_key(),
rabbit_mq_exchange(),
Template,
BatchSize,
BatchTime
]
),
ct:pal(ConfigString),
parse_and_check(ConfigString, <<"rabbitmq">>, Name).
parse_and_check(ConfigString, BridgeType, Name) ->
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
#{<<"bridges">> := #{BridgeType := #{Name := RetConfig}}} = RawConf,
RetConfig.
make_bridge(Config) ->
Type = <<"rabbitmq">>,
Name = atom_to_binary(?MODULE),
BridgeConfig = rabbitmq_config(Config),
{ok, _} = emqx_bridge:create(
Type,
Name,
BridgeConfig
),
emqx_bridge_resource:bridge_id(Type, Name).
delete_bridge() ->
Type = <<"rabbitmq">>,
Name = atom_to_binary(?MODULE),
{ok, _} = emqx_bridge:remove(Type, Name),
ok.
%%------------------------------------------------------------------------------
%% Test Cases
%%------------------------------------------------------------------------------
t_make_delete_bridge(_Config) ->
make_bridge(#{}),
%% Check that the new brige is in the list of bridges
Bridges = emqx_bridge:list(),
Name = atom_to_binary(?MODULE),
IsRightName =
fun
(#{name := BName}) when BName =:= Name ->
true;
(_) ->
false
end,
?assert(lists:any(IsRightName, Bridges)),
delete_bridge(),
BridgesAfterDelete = emqx_bridge:list(),
?assertNot(lists:any(IsRightName, BridgesAfterDelete)),
ok.
t_make_delete_bridge_non_existing_server(_Config) ->
make_bridge(#{server => <<"non_existing_server">>, port => 3174}),
%% Check that the new brige is in the list of bridges
Bridges = emqx_bridge:list(),
Name = atom_to_binary(?MODULE),
IsRightName =
fun
(#{name := BName}) when BName =:= Name ->
true;
(_) ->
false
end,
?assert(lists:any(IsRightName, Bridges)),
delete_bridge(),
BridgesAfterDelete = emqx_bridge:list(),
?assertNot(lists:any(IsRightName, BridgesAfterDelete)),
ok.
t_send_message_query(Config) ->
BridgeID = make_bridge(#{batch_size => 1}),
Payload = #{<<"key">> => 42, <<"data">> => <<"RabbitMQ">>, <<"timestamp">> => 10000},
%% This will use the SQL template included in the bridge
emqx_bridge:send_message(BridgeID, Payload),
%% Check that the data got to the database
?assertEqual(Payload, receive_simple_test_message(Config)),
delete_bridge(),
ok.
t_send_message_query_with_template(Config) ->
BridgeID = make_bridge(#{
batch_size => 1,
payload_template =>
<<
"{"
" \\\"key\\\": ${key},"
" \\\"data\\\": \\\"${data}\\\","
" \\\"timestamp\\\": ${timestamp},"
" \\\"secret\\\": 42"
"}"
>>
}),
Payload = #{
<<"key">> => 7,
<<"data">> => <<"RabbitMQ">>,
<<"timestamp">> => 10000
},
emqx_bridge:send_message(BridgeID, Payload),
%% Check that the data got to the database
ExpectedResult = Payload#{
<<"secret">> => 42
},
?assertEqual(ExpectedResult, receive_simple_test_message(Config)),
delete_bridge(),
ok.
t_send_simple_batch(Config) ->
BridgeConf =
#{
batch_size => 100
},
BridgeID = make_bridge(BridgeConf),
Payload = #{<<"key">> => 42, <<"data">> => <<"RabbitMQ">>, <<"timestamp">> => 10000},
emqx_bridge:send_message(BridgeID, Payload),
?assertEqual(Payload, receive_simple_test_message(Config)),
delete_bridge(),
ok.
t_send_simple_batch_with_template(Config) ->
BridgeConf =
#{
batch_size => 100,
payload_template =>
<<
"{"
" \\\"key\\\": ${key},"
" \\\"data\\\": \\\"${data}\\\","
" \\\"timestamp\\\": ${timestamp},"
" \\\"secret\\\": 42"
"}"
>>
},
BridgeID = make_bridge(BridgeConf),
Payload = #{
<<"key">> => 7,
<<"data">> => <<"RabbitMQ">>,
<<"timestamp">> => 10000
},
emqx_bridge:send_message(BridgeID, Payload),
ExpectedResult = Payload#{
<<"secret">> => 42
},
?assertEqual(ExpectedResult, receive_simple_test_message(Config)),
delete_bridge(),
ok.
t_heavy_batching(Config) ->
NumberOfMessages = 20000,
BridgeConf = #{
batch_size => 10173,
batch_time_ms => 50
},
BridgeID = make_bridge(BridgeConf),
SendMessage = fun(Key) ->
Payload = #{
<<"key">> => Key
},
emqx_bridge:send_message(BridgeID, Payload)
end,
[SendMessage(Key) || Key <- lists:seq(1, NumberOfMessages)],
AllMessages = lists:foldl(
fun(_, Acc) ->
Message = receive_simple_test_message(Config),
#{<<"key">> := Key} = Message,
Acc#{Key => true}
end,
#{},
lists:seq(1, NumberOfMessages)
),
?assertEqual(NumberOfMessages, maps:size(AllMessages)),
delete_bridge(),
ok.
receive_simple_test_message(Config) ->
#{channel := Channel} = get_channel_connection(Config),
#'basic.consume_ok'{consumer_tag = ConsumerTag} =
amqp_channel:call(
Channel,
#'basic.consume'{
queue = rabbit_mq_queue()
}
),
receive
%% This is the first message received
#'basic.consume_ok'{} ->
ok
end,
receive
{#'basic.deliver'{delivery_tag = DeliveryTag}, Content} ->
%% Ack the message
amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
%% Cancel the consumer
#'basic.cancel_ok'{consumer_tag = ConsumerTag} =
amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = ConsumerTag}),
emqx_utils_json:decode(Content#amqp_msg.payload)
end.
rabbitmq_config() ->
Config =
#{
server => rabbit_mq_host(),
port => 5672,
exchange => rabbit_mq_exchange(),
routing_key => rabbit_mq_routing_key()
},
#{<<"config">> => Config}.
test_data() ->
#{<<"msg_field">> => <<"Hello">>}.

View File

@ -0,0 +1,232 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_rabbitmq_connector_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include("emqx_connector.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("stdlib/include/assert.hrl").
-include_lib("amqp_client/include/amqp_client.hrl").
%% This test SUITE requires a running RabbitMQ instance. If you don't want to
%% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script
%% you can create a clickhouse instance with the following command.
%% 5672 is the default port for AMQP 0-9-1 and 15672 is the default port for
%% the HTTP managament interface.
%%
%% docker run -it --rm --name rabbitmq -p 127.0.0.1:5672:5672 -p 127.0.0.1:15672:15672 rabbitmq:3.11-management
rabbit_mq_host() ->
<<"rabbitmq">>.
rabbit_mq_port() ->
5672.
rabbit_mq_exchange() ->
<<"test_exchange">>.
rabbit_mq_queue() ->
<<"test_queue">>.
rabbit_mq_routing_key() ->
<<"test_routing_key">>.
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
case
emqx_common_test_helpers:is_tcp_server_available(
erlang:binary_to_list(rabbit_mq_host()), rabbit_mq_port()
)
of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps([emqx_resource]),
{ok, _} = application:ensure_all_started(emqx_connector),
{ok, _} = application:ensure_all_started(emqx_ee_connector),
{ok, _} = application:ensure_all_started(amqp_client),
ChannelConnection = setup_rabbit_mq_exchange_and_queue(),
[{channel_connection, ChannelConnection} | Config];
false ->
case os:getenv("IS_CI") of
"yes" ->
throw(no_rabbitmq);
_ ->
{skip, no_rabbitmq}
end
end.
setup_rabbit_mq_exchange_and_queue() ->
%% Create an exachange and a queue
{ok, Connection} =
amqp_connection:start(#amqp_params_network{
host = erlang:binary_to_list(rabbit_mq_host()),
port = rabbit_mq_port()
}),
{ok, Channel} = amqp_connection:open_channel(Connection),
%% Create an exchange
#'exchange.declare_ok'{} =
amqp_channel:call(
Channel,
#'exchange.declare'{
exchange = rabbit_mq_exchange(),
type = <<"topic">>
}
),
%% Create a queue
#'queue.declare_ok'{} =
amqp_channel:call(
Channel,
#'queue.declare'{queue = rabbit_mq_queue()}
),
%% Bind the queue to the exchange
#'queue.bind_ok'{} =
amqp_channel:call(
Channel,
#'queue.bind'{
queue = rabbit_mq_queue(),
exchange = rabbit_mq_exchange(),
routing_key = rabbit_mq_routing_key()
}
),
#{
connection => Connection,
channel => Channel
}.
get_channel_connection(Config) ->
proplists:get_value(channel_connection, Config).
end_per_suite(Config) ->
#{
connection := Connection,
channel := Channel
} = get_channel_connection(Config),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps([emqx_resource]),
_ = application:stop(emqx_connector),
%% Close the channel
ok = amqp_channel:close(Channel),
%% Close the connection
ok = amqp_connection:close(Connection).
% %%------------------------------------------------------------------------------
% %% Testcases
% %%------------------------------------------------------------------------------
t_lifecycle(Config) ->
perform_lifecycle_check(
erlang:atom_to_binary(?MODULE),
rabbitmq_config(),
Config
).
perform_lifecycle_check(ResourceID, InitialConfig, TestConfig) ->
#{
channel := Channel
} = get_channel_connection(TestConfig),
{ok, #{config := CheckedConfig}} =
emqx_resource:check_config(emqx_bridge_rabbitmq_connector, InitialConfig),
{ok, #{
state := #{poolname := PoolName} = State,
status := InitialStatus
}} =
emqx_resource:create_local(
ResourceID,
?CONNECTOR_RESOURCE_GROUP,
emqx_bridge_rabbitmq_connector,
CheckedConfig,
#{}
),
?assertEqual(InitialStatus, connected),
%% Instance should match the state and status of the just started resource
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
state := State,
status := InitialStatus
}} =
emqx_resource:get_instance(ResourceID),
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceID)),
%% Perform query as further check that the resource is working as expected
perform_query(ResourceID, Channel),
?assertEqual(ok, emqx_resource:stop(ResourceID)),
%% Resource will be listed still, but state will be changed and healthcheck will fail
%% as the worker no longer exists.
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
state := State,
status := StoppedStatus
}} = emqx_resource:get_instance(ResourceID),
?assertEqual(stopped, StoppedStatus),
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceID)),
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
% Can call stop/1 again on an already stopped instance
?assertEqual(ok, emqx_resource:stop(ResourceID)),
% Make sure it can be restarted and the healthchecks and queries work properly
?assertEqual(ok, emqx_resource:restart(ResourceID)),
% async restart, need to wait resource
timer:sleep(500),
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
emqx_resource:get_instance(ResourceID),
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceID)),
%% Check that everything is working again by performing a query
perform_query(ResourceID, Channel),
% Stop and remove the resource in one go.
?assertEqual(ok, emqx_resource:remove_local(ResourceID)),
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
% Should not even be able to get the resource data out of ets now unlike just stopping.
?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceID)).
% %%------------------------------------------------------------------------------
% %% Helpers
% %%------------------------------------------------------------------------------
perform_query(PoolName, Channel) ->
%% Send message to queue:
ok = emqx_resource:query(PoolName, {query, test_data()}),
%% Get the message from queue:
ok = receive_simple_test_message(Channel).
receive_simple_test_message(Channel) ->
#'basic.consume_ok'{consumer_tag = ConsumerTag} =
amqp_channel:call(
Channel,
#'basic.consume'{
queue = rabbit_mq_queue()
}
),
receive
%% This is the first message received
#'basic.consume_ok'{} ->
ok
end,
receive
{#'basic.deliver'{delivery_tag = DeliveryTag}, Content} ->
Expected = test_data(),
?assertEqual(Expected, emqx_utils_json:decode(Content#amqp_msg.payload)),
%% Ack the message
amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
%% Cancel the consumer
#'basic.cancel_ok'{consumer_tag = ConsumerTag} =
amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = ConsumerTag}),
ok
end.
rabbitmq_config() ->
Config =
#{
server => rabbit_mq_host(),
port => 5672,
username => <<"guest">>,
password => <<"guest">>,
exchange => rabbit_mq_exchange(),
routing_key => rabbit_mq_routing_key()
},
#{<<"config">> => Config}.
test_data() ->
#{<<"msg_field">> => <<"Hello">>}.

View File

@ -0,0 +1,2 @@
toxiproxy
rocketmq

View File

@ -0,0 +1,8 @@
{erl_opts, [debug_info]}.
{deps, [
{rocketmq, {git, "https://github.com/emqx/rocketmq-client-erl.git", {tag, "v0.5.1"}}},
{emqx_connector, {path, "../../apps/emqx_connector"}},
{emqx_resource, {path, "../../apps/emqx_resource"}},
{emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.

View File

@ -1,8 +1,8 @@
{application, emqx_bridge_rocketmq, [
{description, "EMQX Enterprise RocketMQ Bridge"},
{vsn, "0.1.0"},
{vsn, "0.1.1"},
{registered, []},
{applications, [kernel, stdlib]},
{applications, [kernel, stdlib, rocketmq]},
{env, []},
{modules, []},
{links, []}

View File

@ -1,7 +1,7 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_bridge_rocketmq).
-module(emqx_bridge_rocketmq).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
@ -82,7 +82,7 @@ fields("config") ->
#{desc => ?DESC("local_topic"), required => false}
)}
] ++ emqx_resource_schema:fields("resource_opts") ++
(emqx_ee_connector_rocketmq:fields(config) --
(emqx_bridge_rocketmq_connector:fields(config) --
emqx_connector_schema_lib:prepare_statement_fields());
fields("post") ->
[type_field(), name_field() | fields("config")];

View File

@ -1,8 +1,8 @@
%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_connector_rocketmq).
-module(emqx_bridge_rocketmq_connector).
-behaviour(emqx_resource).
@ -52,9 +52,10 @@ fields(config) ->
{secret_key,
mk(
binary(),
#{default => <<>>, desc => ?DESC("secret_key")}
#{default => <<>>, desc => ?DESC("secret_key"), sensitive => true}
)},
{security_token, mk(binary(), #{default => <<>>, desc => ?DESC(security_token)})},
{security_token,
mk(binary(), #{default => <<>>, desc => ?DESC(security_token), sensitive => true})},
{sync_timeout,
mk(
emqx_schema:duration(),

View File

@ -2,7 +2,7 @@
% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_bridge_rocketmq_SUITE).
-module(emqx_bridge_rocketmq_SUITE).
-compile(nowarn_export_all).
-compile(export_all).

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,36 @@
# EMQX SQL Server Bridge
[Microsoft SQL Server](https://www.microsoft.com/en-us/sql-server) is a relational database management system (RDBMS) that is developed and owned by Microsoft.
Microsoft SQL Server offers a wide range of features, including support for high availability and disaster recovery,
integration with other Microsoft products and services, and advanced security and encryption options.
It also provides tools for data warehousing, business intelligence, and analytics, making it a versatile and powerful database platform.
The application is used to connect EMQX and Microsoft SQL Server.
User can create a rule and easily ingest IoT data into Microsoft SQL Server by leveraging
[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html).
## Documentation links
For more information about Microsoft SQL Server, please see the [official site](https://learn.microsoft.com/en-us/sql/sql-server/?view=sql-server-ver16)
# Configurations
Please see [Ingest data into SQL Server](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-sqlserver.html) for more detailed information.
# HTTP APIs
- Several APIs are provided for bridge management, which includes create bridge,
update bridge, get bridge, stop or restart bridge and list bridges etc.
Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,2 @@
toxiproxy
sqlserver

View File

@ -0,0 +1,5 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-define(SQLSERVER_DEFAULT_PORT, 1433).

View File

@ -0,0 +1,10 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{shell, [
{apps, [emqx_bridge_sqlserver]}
]}.

View File

@ -0,0 +1,9 @@
{application, emqx_bridge_sqlserver, [
{description, "EMQX Enterprise SQL Server Bridge"},
{vsn, "0.1.0"},
{registered, []},
{applications, [kernel, stdlib, odbc]},
{env, []},
{modules, []},
{links, []}
]}.

View File

@ -1,7 +1,7 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_bridge_sqlserver).
-module(emqx_bridge_sqlserver).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
@ -96,7 +96,7 @@ fields("config") ->
}
)}
] ++
(emqx_ee_connector_sqlserver:fields(config) --
(emqx_bridge_sqlserver_connector:fields(config) --
emqx_connector_schema_lib:prepare_statement_fields());
fields("creation_opts") ->
emqx_resource_schema:fields("creation_opts");

View File

@ -2,14 +2,15 @@
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_connector_sqlserver).
-module(emqx_bridge_sqlserver_connector).
-behaviour(emqx_resource).
-include("emqx_bridge_sqlserver.hrl").
-include_lib("kernel/include/file.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-include_lib("emqx_ee_connector/include/emqx_ee_connector.hrl").
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
@ -51,7 +52,7 @@
-define(SYNC_QUERY_MODE, handover).
-define(SQLSERVER_HOST_OPTIONS, #{
default_port => 1433
default_port => ?SQLSERVER_DEFAULT_PORT
}).
-define(REQUEST_TIMEOUT(RESOURCE_OPTS),

View File

@ -2,11 +2,12 @@
% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ee_bridge_sqlserver_SUITE).
-module(emqx_bridge_sqlserver_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include("emqx_bridge_sqlserver/include/emqx_bridge_sqlserver.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
@ -59,24 +60,30 @@
%% How to run it locally (all commands are run in $PROJ_ROOT dir):
%% A: run ct on host
%% 1. Start all deps services
%% ```bash
%% sudo docker compose -f .ci/docker-compose-file/docker-compose.yaml \
%% -f .ci/docker-compose-file/docker-compose-sqlserver.yaml \
%% -f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \
%% up --build
%% ```
%%
%% 2. Run use cases with special environment variables
%% 11433 is toxiproxy exported port.
%% Local:
%% ```
%% ```bash
%% SQLSERVER_HOST=toxiproxy SQLSERVER_PORT=11433 \
%% PROXY_HOST=toxiproxy PROXY_PORT=1433 \
%% ./rebar3 as test ct -c -v --readable true --name ct@127.0.0.1 --suite lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_sqlserver_SUITE.erl
%% ./rebar3 as test ct -c -v --readable true --name ct@127.0.0.1 \
%% --suite apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl
%% ```
%%
%% B: run ct in docker container
%% run script:
%% ./scripts/ct/run.sh --ci --app lib-ee/emqx_ee_bridge/ \
%% -- --name 'test@127.0.0.1' -c -v --readable true --suite lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_sqlserver_SUITE.erl
%% ```bash
%% ./scripts/ct/run.sh --ci --app apps/emqx_bridge_sqlserver/ -- \
%% --name 'test@127.0.0.1' -c -v --readable true \
%% --suite apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl
%% ````
%%------------------------------------------------------------------------------
%% CT boilerplate
@ -391,7 +398,7 @@ t_bad_parameter(Config) ->
common_init(ConfigT) ->
Host = os:getenv("SQLSERVER_HOST", "toxiproxy"),
Port = list_to_integer(os:getenv("SQLSERVER_PORT", "1433")),
Port = list_to_integer(os:getenv("SQLSERVER_PORT", str(?SQLSERVER_DEFAULT_PORT))),
Config0 = [
{sqlserver_host, Host},
@ -631,7 +638,7 @@ conn_str([], Acc) ->
conn_str([{driver, Driver} | Opts], Acc) ->
conn_str(Opts, ["Driver=" ++ str(Driver) | Acc]);
conn_str([{host, Host} | Opts], Acc) ->
Port = proplists:get_value(port, Opts, "1433"),
Port = proplists:get_value(port, Opts, str(?SQLSERVER_DEFAULT_PORT)),
NOpts = proplists:delete(port, Opts),
conn_str(NOpts, ["Server=" ++ str(Host) ++ "," ++ str(Port) | Acc]);
conn_str([{port, Port} | Opts], Acc) ->

View File

@ -311,7 +311,7 @@ typename_to_spec("float()", _Mod) ->
typename_to_spec("integer()", _Mod) ->
#{type => number};
typename_to_spec("non_neg_integer()", _Mod) ->
#{type => number, minimum => 1};
#{type => number, minimum => 0};
typename_to_spec("number()", _Mod) ->
#{type => number};
typename_to_spec("string()", _Mod) ->

View File

@ -343,7 +343,7 @@ fields(cluster_etcd) ->
?R_REF(emqx_schema, "ssl_client_opts"),
#{
desc => ?DESC(cluster_etcd_ssl),
alias => [ssl],
aliases => [ssl],
'readOnly' => true
}
)}
@ -1286,7 +1286,7 @@ cluster_options(dns, Conf) ->
{type, conf_get("cluster.dns.record_type", Conf)}
];
cluster_options(etcd, Conf) ->
Namespace = "cluster.etcd.ssl",
Namespace = "cluster.etcd.ssl_options",
SslOpts = fun(C) ->
Options = keys(Namespace, C),
lists:map(fun(Key) -> {to_atom(Key), conf_get([Namespace, Key], Conf)} end, Options)

View File

@ -473,7 +473,7 @@ preprocess_request(
method => emqx_plugin_libs_rule:preproc_tmpl(to_bin(Method)),
path => emqx_plugin_libs_rule:preproc_tmpl(Path),
body => maybe_preproc_tmpl(body, Req),
headers => preproc_headers(Headers),
headers => wrap_auth_header(preproc_headers(Headers)),
request_timeout => maps:get(request_timeout, Req, 30000),
max_retries => maps:get(max_retries, Req, 2)
}.
@ -503,6 +503,36 @@ preproc_headers(Headers) when is_list(Headers) ->
Headers
).
wrap_auth_header(Headers) ->
lists:map(fun maybe_wrap_auth_header/1, Headers).
maybe_wrap_auth_header({[{str, Key}] = StrKey, Val}) ->
{_, MaybeWrapped} = maybe_wrap_auth_header({Key, Val}),
{StrKey, MaybeWrapped};
maybe_wrap_auth_header({Key, Val} = Header) when
is_binary(Key), (size(Key) =:= 19 orelse size(Key) =:= 13)
->
%% We check the size of potential keys in the guard above and consider only
%% those that match the number of characters of either "Authorization" or
%% "Proxy-Authorization".
case try_bin_to_lower(Key) of
<<"authorization">> ->
{Key, emqx_secret:wrap(Val)};
<<"proxy-authorization">> ->
{Key, emqx_secret:wrap(Val)};
_Other ->
Header
end;
maybe_wrap_auth_header(Header) ->
Header.
try_bin_to_lower(Bin) ->
try iolist_to_binary(string:lowercase(Bin)) of
LowercaseBin -> LowercaseBin
catch
_:_ -> Bin
end.
maybe_preproc_tmpl(Key, Conf) ->
case maps:get(Key, Conf, undefined) of
undefined -> undefined;
@ -537,7 +567,7 @@ proc_headers(HeaderTks, Msg) ->
fun({K, V}) ->
{
emqx_plugin_libs_rule:proc_tmpl(K, Msg),
emqx_plugin_libs_rule:proc_tmpl(V, Msg)
emqx_plugin_libs_rule:proc_tmpl(emqx_secret:unwrap(V), Msg)
}
end,
HeaderTks
@ -610,7 +640,8 @@ reply_delegator(ReplyFunAndArgs, Result) ->
Reason =:= econnrefused;
Reason =:= timeout;
Reason =:= normal;
Reason =:= {shutdown, normal}
Reason =:= {shutdown, normal};
Reason =:= {shutdown, closed}
->
Result1 = {error, {recoverable_error, Reason}},
emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result1);
@ -628,21 +659,13 @@ is_sensitive_key([{str, StringKey}]) ->
is_sensitive_key(Atom) when is_atom(Atom) ->
is_sensitive_key(erlang:atom_to_binary(Atom));
is_sensitive_key(Bin) when is_binary(Bin), (size(Bin) =:= 19 orelse size(Bin) =:= 13) ->
try
%% This is wrapped in a try-catch since we don't know that Bin is a
%% valid string so string:lowercase/1 might throw an exception.
%%
%% We want to convert this to lowercase since the http header fields
%% are case insensitive, which means that a user of the Webhook bridge
%% can write this field name in many different ways.
LowercaseBin = iolist_to_binary(string:lowercase(Bin)),
case LowercaseBin of
case try_bin_to_lower(Bin) of
<<"authorization">> -> true;
<<"proxy-authorization">> -> true;
_ -> false
end
catch
_:_ -> false
end;
is_sensitive_key(_) ->
false.

View File

@ -0,0 +1,90 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector_http_tests).
-include_lib("eunit/include/eunit.hrl").
-define(MY_SECRET, <<"my_precious">>).
wrap_auth_headers_test_() ->
{setup,
fun() ->
meck:expect(ehttpc_sup, start_pool, 2, {ok, foo}),
meck:expect(ehttpc, request, fun(_, _, Req, _, _) -> {ok, 200, Req} end),
meck:expect(ehttpc_pool, pick_worker, 1, self()),
[ehttpc_sup, ehttpc, ehttpc_pool]
end,
fun meck:unload/1, fun(_) ->
Config = #{
base_url => #{
scheme => http,
host => "localhost",
port => 18083,
path => "/status"
},
connect_timeout => 1000,
pool_type => random,
pool_size => 1,
request => #{
method => get,
path => "/status",
headers => auth_headers()
}
},
{ok, #{request := #{headers := Headers}} = State} = emqx_connector_http:on_start(
<<"test">>, Config
),
{ok, 200, Req} = emqx_connector_http:on_query(foo, {send_message, #{}}, State),
Tests =
[
?_assert(is_wrapped(V))
|| H <- Headers, is_tuple({K, V} = H), is_auth_header(untmpl(K))
],
[
?_assertEqual(4, length(Tests)),
?_assert(is_unwrapped_headers(element(2, Req)))
| Tests
]
end}.
auth_headers() ->
[
{<<"Authorization">>, ?MY_SECRET},
{<<"authorization">>, ?MY_SECRET},
{<<"Proxy-Authorization">>, ?MY_SECRET},
{<<"proxy-authorization">>, ?MY_SECRET},
{<<"X-Custom-Header">>, <<"foobar">>}
].
is_auth_header(<<"Authorization">>) -> true;
is_auth_header(<<"Proxy-Authorization">>) -> true;
is_auth_header(<<"authorization">>) -> true;
is_auth_header(<<"proxy-authorization">>) -> true;
is_auth_header(_Other) -> false.
is_wrapped(Secret) when is_function(Secret) ->
untmpl(emqx_secret:unwrap(Secret)) =:= ?MY_SECRET;
is_wrapped(_Other) ->
false.
untmpl([{_, V} | _]) -> V.
is_unwrapped_headers(Headers) ->
lists:all(fun is_unwrapped_header/1, Headers).
is_unwrapped_header({_, V}) when is_function(V) -> false;
is_unwrapped_header({_, [{str, _V}]}) -> throw(unexpected_tmpl_token);
is_unwrapped_header(_) -> true.

View File

@ -102,9 +102,7 @@ fields("https") ->
server_ssl_opts() ->
Opts0 = emqx_schema:server_ssl_opts_schema(#{}, true),
Opts1 = exclude_fields(["fail_if_no_peer_cert"], Opts0),
{value, {_, Meta}, Opts2} = lists:keytake("password", 1, Opts1),
[{"password", Meta#{importance => ?IMPORTANCE_HIDDEN}} | Opts2].
exclude_fields(["fail_if_no_peer_cert"], Opts0).
exclude_fields([], Fields) ->
Fields;

View File

@ -898,6 +898,8 @@ typename_to_spec("bucket_name()", _Mod) ->
#{type => string, example => <<"retainer">>};
typename_to_spec("json_binary()", _Mod) ->
#{type => string, example => <<"{\"a\": [1,true]}">>};
typename_to_spec("port_number()", _Mod) ->
range("1..65535");
typename_to_spec(Name, Mod) ->
Spec = range(Name),
Spec1 = remote_module_type(Spec, Name, Mod),

View File

@ -25,6 +25,7 @@ all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
emqx_common_test_helpers:load_config(emqx_dashboard_schema, <<"dashboard {}">>),
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
ok = change_i18n_lang(en),
Config.

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_gateway, [
{description, "The Gateway management application"},
{vsn, "0.1.15"},
{vsn, "0.1.16"},
{registered, []},
{mod, {emqx_gateway_app, []}},
{applications, [kernel, stdlib, emqx, emqx_authn, emqx_ctl]},

View File

@ -78,7 +78,7 @@
-define(DEFAULT_GC_OPTS, #{count => 1000, bytes => 1024 * 1024}).
-define(DEFAULT_OOM_POLICY, #{
max_heap_size => 4194304,
max_message_queue_len => 32000
max_mailbox_size => 32000
}).
-elvis([{elvis_style, god_modules, disable}]).

View File

@ -274,7 +274,7 @@ t_load_unload_gateway(_) ->
?assertException(
error,
{config_not_found, [gateway, stomp]},
{config_not_found, [<<"gateway">>, stomp]},
emqx:get_raw_config([gateway, stomp])
),
ok.
@ -307,7 +307,7 @@ t_load_remove_authn(_) ->
?assertException(
error,
{config_not_found, [gateway, stomp, authentication]},
{config_not_found, [<<"gateway">>, stomp, authentication]},
emqx:get_raw_config([gateway, stomp, authentication])
),
ok.
@ -352,7 +352,7 @@ t_load_remove_listeners(_) ->
?assertException(
error,
{config_not_found, [gateway, stomp, listeners, tcp, default]},
{config_not_found, [<<"gateway">>, stomp, listeners, tcp, default]},
emqx:get_raw_config([gateway, stomp, listeners, tcp, default])
),
ok.
@ -401,7 +401,7 @@ t_load_remove_listener_authn(_) ->
Path = [gateway, stomp, listeners, tcp, default, authentication],
?assertException(
error,
{config_not_found, Path},
{config_not_found, [<<"gateway">>, stomp, listeners, tcp, default, authentication]},
emqx:get_raw_config(Path)
),
ok.
@ -421,7 +421,7 @@ t_load_gateway_with_certs_content(_) ->
assert_ssl_confs_files_deleted(SslConf),
?assertException(
error,
{config_not_found, [gateway, stomp]},
{config_not_found, [<<"gateway">>, stomp]},
emqx:get_raw_config([gateway, stomp])
),
ok.
@ -489,7 +489,7 @@ t_add_listener_with_certs_content(_) ->
?assertException(
error,
{config_not_found, [gateway, stomp, listeners, ssl, default]},
{config_not_found, [<<"gateway">>, stomp, listeners, ssl, default]},
emqx:get_raw_config([gateway, stomp, listeners, ssl, default])
),
ok.

View File

@ -41,7 +41,7 @@
-define(DO_IT, graceful_shutdown).
%% @doc This API is called to shutdown the Erlang VM by RPC call from remote shell node.
%% The shutown of apps is delegated to a to a process instead of doing it in the RPC spawned
%% The shutdown of apps is delegated to a to a process instead of doing it in the RPC spawned
%% process which has a remote group leader.
start_link() ->
{ok, _} = gen_server:start_link({local, ?TERMINATOR}, ?MODULE, [], []).
@ -87,8 +87,9 @@ handle_cast(_Cast, State) ->
handle_call(?DO_IT, _From, State) ->
try
emqx_machine_boot:stop_apps(),
emqx_machine_boot:stop_port_apps()
%% stop port apps before stopping other apps.
emqx_machine_boot:stop_port_apps(),
emqx_machine_boot:stop_apps()
catch
C:E:St ->
Apps = [element(1, A) || A <- application:which_applications()],

View File

@ -28,7 +28,8 @@
config_reset/3,
configs/3,
get_full_config/0,
global_zone_configs/3
global_zone_configs/3,
limiter/3
]).
-define(PREFIX, "/configs/").
@ -42,7 +43,6 @@
<<"alarm">>,
<<"sys_topics">>,
<<"sysmon">>,
<<"limiter">>,
<<"log">>,
<<"persistent_session_store">>,
<<"zones">>
@ -57,7 +57,8 @@ paths() ->
[
"/configs",
"/configs_reset/:rootname",
"/configs/global_zone"
"/configs/global_zone",
"/configs/limiter"
] ++
lists:map(fun({Name, _Type}) -> ?PREFIX ++ binary_to_list(Name) end, config_list()).
@ -147,6 +148,28 @@ schema("/configs/global_zone") ->
}
}
};
schema("/configs/limiter") ->
#{
'operationId' => limiter,
get => #{
tags => ?TAGS,
description => <<"Get the node-level limiter configs">>,
responses => #{
200 => hoconsc:mk(hoconsc:ref(emqx_limiter_schema, limiter)),
404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"config not found">>)
}
},
put => #{
tags => ?TAGS,
description => <<"Update the node-level limiter configs">>,
'requestBody' => hoconsc:mk(hoconsc:ref(emqx_limiter_schema, limiter)),
responses => #{
200 => hoconsc:mk(hoconsc:ref(emqx_limiter_schema, limiter)),
400 => emqx_dashboard_swagger:error_codes(['UPDATE_FAILED']),
403 => emqx_dashboard_swagger:error_codes(['UPDATE_FAILED'])
}
}
};
schema(Path) ->
{RootKey, {_Root, Schema}} = find_schema(Path),
#{
@ -272,6 +295,22 @@ configs(get, Params, _Req) ->
{200, Res}
end.
limiter(get, _Params, _Req) ->
{200, format_limiter_config(get_raw_config(limiter))};
limiter(put, #{body := NewConf}, _Req) ->
case emqx_conf:update([limiter], NewConf, ?OPTS) of
{ok, #{raw_config := RawConf}} ->
{200, format_limiter_config(RawConf)};
{error, {permission_denied, Reason}} ->
{403, #{code => 'UPDATE_FAILED', message => Reason}};
{error, Reason} ->
{400, #{code => 'UPDATE_FAILED', message => ?ERR_MSG(Reason)}}
end.
format_limiter_config(RawConf) ->
Shorts = lists:map(fun erlang:atom_to_binary/1, emqx_limiter_schema:short_paths()),
maps:with(Shorts, RawConf).
conf_path_reset(Req) ->
<<"/api/v5", ?PREFIX_RESET, Path/binary>> = cowboy_req:path(Req),
string:lexemes(Path, "/ ").

View File

@ -615,13 +615,18 @@ listeners([]) ->
{error, _} -> [];
MC -> [{max_conns, MC}]
end,
ShutdownCount =
case emqx_listeners:shutdown_count(ID, Bind) of
{error, _} -> [];
SC -> [{shutdown_count, SC}]
end,
Info =
[
{listen_on, {string, emqx_listeners:format_bind(Bind)}},
{acceptors, Acceptors},
{proxy_protocol, ProxyProtocol},
{running, Running}
] ++ CurrentConns ++ MaxConn,
] ++ CurrentConns ++ MaxConn ++ ShutdownCount,
emqx_ctl:print("~ts~n", [ID]),
lists:foreach(fun indent_print/1, Info)
end,

View File

@ -134,6 +134,9 @@
%% when calling emqx_resource:stop/1
-callback on_stop(resource_id(), resource_state()) -> term().
%% when calling emqx_resource:get_callback_mode/1
-callback callback_mode() -> callback_mode().
%% when calling emqx_resource:query/3
-callback on_query(resource_id(), Request :: term(), resource_state()) -> query_result().

View File

@ -482,14 +482,16 @@ flush(Data0) ->
Data1 = cancel_flush_timer(Data0),
CurrentCount = queue_count(Q0),
IsFull = is_inflight_full(InflightTID),
?tp(buffer_worker_flush, #{
?tp_ignore_side_effects_in_prod(buffer_worker_flush, #{
queued => CurrentCount,
is_inflight_full => IsFull,
inflight => inflight_count(InflightTID)
}),
case {CurrentCount, IsFull} of
{0, _} ->
?tp(buffer_worker_queue_drained, #{inflight => inflight_count(InflightTID)}),
?tp_ignore_side_effects_in_prod(buffer_worker_queue_drained, #{
inflight => inflight_count(InflightTID)
}),
{keep_state, Data1};
{_, true} ->
?tp(buffer_worker_flush_but_inflight_full, #{}),
@ -620,7 +622,7 @@ do_flush(
}),
flush_worker(self());
false ->
?tp(buffer_worker_queue_drained, #{
?tp_ignore_side_effects_in_prod(buffer_worker_queue_drained, #{
inflight => inflight_count(InflightTID)
}),
ok
@ -701,7 +703,7 @@ do_flush(#{queue := Q1} = Data0, #{
Data2 =
case {CurrentCount > 0, CurrentCount >= BatchSize} of
{false, _} ->
?tp(buffer_worker_queue_drained, #{
?tp_ignore_side_effects_in_prod(buffer_worker_queue_drained, #{
inflight => inflight_count(InflightTID)
}),
Data1;
@ -1279,13 +1281,10 @@ append_queue(Id, Index, Q, Queries) ->
%% the inflight queue for async query
-define(MAX_SIZE_REF, max_size).
-define(SIZE_REF, size).
-define(BATCH_COUNT_REF, batch_count).
-define(INITIAL_TIME_REF, initial_time).
-define(INITIAL_MONOTONIC_TIME_REF, initial_monotonic_time).
%% NOTE
%% There are 4 metadata rows in an inflight table, keyed by atoms declared above.
-define(INFLIGHT_META_ROWS, 4).
inflight_new(InfltWinSZ, Id, Index) ->
TableId = ets:new(
emqx_resource_buffer_worker_inflight_tab,
@ -1295,6 +1294,7 @@ inflight_new(InfltWinSZ, Id, Index) ->
%% we use this counter because we might deal with batches as
%% elements.
inflight_append(TableId, {?SIZE_REF, 0}, Id, Index),
inflight_append(TableId, {?BATCH_COUNT_REF, 0}, Id, Index),
inflight_append(TableId, {?INITIAL_TIME_REF, erlang:system_time()}, Id, Index),
inflight_append(
TableId, {?INITIAL_MONOTONIC_TIME_REF, make_request_ref()}, Id, Index
@ -1344,10 +1344,7 @@ is_inflight_full(InflightTID) ->
Size >= MaxSize.
inflight_count(InflightTID) ->
case ets:info(InflightTID, size) of
undefined -> 0;
Size -> max(0, Size - ?INFLIGHT_META_ROWS)
end.
emqx_utils_ets:lookup_value(InflightTID, ?BATCH_COUNT_REF, 0).
inflight_num_msgs(InflightTID) ->
[{_, Size}] = ets:lookup(InflightTID, ?SIZE_REF),
@ -1435,16 +1432,16 @@ store_async_worker_reference(InflightTID, Ref, WorkerMRef) when
ack_inflight(undefined, _Ref, _Id, _Index) ->
false;
ack_inflight(InflightTID, Ref, Id, Index) ->
Count =
{Count, Removed} =
case ets:take(InflightTID, Ref) of
[?INFLIGHT_ITEM(Ref, ?QUERY(_, _, _, _), _IsRetriable, _WorkerMRef)] ->
1;
{1, true};
[?INFLIGHT_ITEM(Ref, [?QUERY(_, _, _, _) | _] = Batch, _IsRetriable, _WorkerMRef)] ->
length(Batch);
{length(Batch), true};
[] ->
0
{0, false}
end,
ok = dec_inflight(InflightTID, Count),
ok = dec_inflight_remove(InflightTID, Count, Removed),
IsKnownRef = (Count > 0),
case IsKnownRef of
true ->
@ -1472,15 +1469,27 @@ mark_inflight_items_as_retriable(Data, WorkerMRef) ->
%% used to update a batch after dropping expired individual queries.
update_inflight_item(InflightTID, Ref, NewBatch, NumExpired) ->
_ = ets:update_element(InflightTID, Ref, {?ITEM_IDX, NewBatch}),
ok = dec_inflight(InflightTID, NumExpired).
ok = dec_inflight_update(InflightTID, NumExpired).
inc_inflight(InflightTID, Count) ->
_ = ets:update_counter(InflightTID, ?SIZE_REF, {2, Count}),
_ = ets:update_counter(InflightTID, ?BATCH_COUNT_REF, {2, 1}),
ok.
dec_inflight(_InflightTID, 0) ->
dec_inflight_remove(_InflightTID, _Count = 0, _Removed = false) ->
ok;
dec_inflight(InflightTID, Count) when Count > 0 ->
dec_inflight_remove(InflightTID, _Count = 0, _Removed = true) ->
_ = ets:update_counter(InflightTID, ?BATCH_COUNT_REF, {2, -1, 0, 0}),
ok;
dec_inflight_remove(InflightTID, Count, _Removed = true) when Count > 0 ->
%% If Count > 0, it must have been removed
_ = ets:update_counter(InflightTID, ?BATCH_COUNT_REF, {2, -1, 0, 0}),
_ = ets:update_counter(InflightTID, ?SIZE_REF, {2, -Count, 0, 0}),
ok.
dec_inflight_update(_InflightTID, _Count = 0) ->
ok;
dec_inflight_update(InflightTID, Count) when Count > 0 ->
_ = ets:update_counter(InflightTID, ?SIZE_REF, {2, -Count, 0, 0}),
ok.

View File

@ -2337,7 +2337,7 @@ t_expiration_retry(_Config) ->
resume_interval => 300
}
),
do_t_expiration_retry(single).
do_t_expiration_retry().
t_expiration_retry_batch(_Config) ->
emqx_connector_demo:set_callback_mode(always_sync),
@ -2354,9 +2354,9 @@ t_expiration_retry_batch(_Config) ->
resume_interval => 300
}
),
do_t_expiration_retry(batch).
do_t_expiration_retry().
do_t_expiration_retry(IsBatch) ->
do_t_expiration_retry() ->
ResumeInterval = 300,
?check_trace(
begin
@ -2409,15 +2409,10 @@ do_t_expiration_retry(IsBatch) ->
ResumeInterval * 10
),
SuccessEventKind =
case IsBatch of
batch -> buffer_worker_retry_inflight_succeeded;
single -> buffer_worker_flush_ack
end,
{ok, {ok, _}} =
?wait_async_action(
emqx_resource:simple_sync_query(?ID, resume),
#{?snk_kind := SuccessEventKind},
#{?snk_kind := buffer_worker_retry_inflight_succeeded},
ResumeInterval * 5
),

View File

@ -230,7 +230,7 @@ check_oom(Policy) ->
check_oom(_Pid, #{enable := false}) ->
ok;
check_oom(Pid, #{
max_message_queue_len := MaxQLen,
max_mailbox_size := MaxQLen,
max_heap_size := MaxHeapSize
}) ->
case process_info(Pid, [message_queue_len, total_heap_size]) of
@ -581,6 +581,15 @@ is_sensitive_key(<<"password">>) -> true;
is_sensitive_key(secret) -> true;
is_sensitive_key("secret") -> true;
is_sensitive_key(<<"secret">>) -> true;
is_sensitive_key(secret_key) -> true;
is_sensitive_key("secret_key") -> true;
is_sensitive_key(<<"secret_key">>) -> true;
is_sensitive_key(security_token) -> true;
is_sensitive_key("security_token") -> true;
is_sensitive_key(<<"security_token">>) -> true;
is_sensitive_key(aws_secret_access_key) -> true;
is_sensitive_key("aws_secret_access_key") -> true;
is_sensitive_key(<<"aws_secret_access_key">>) -> true;
is_sensitive_key(_) -> false.
redact(Term) ->

View File

@ -140,7 +140,7 @@ t_index_of(_) ->
t_check(_) ->
Policy = #{
max_message_queue_len => 10,
max_mailbox_size => 10,
max_heap_size => 1024 * 1024 * 8,
enable => true
},

View File

@ -451,7 +451,7 @@ find_emqx_process() {
if [ -n "${EMQX_NODE__NAME:-}" ]; then
# if node name is provided, filter by node name
# shellcheck disable=SC2009
ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -E "\s\-s?name\s${EMQX_NODE__NAME}" | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true
ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -E "\s-s?name\s${EMQX_NODE__NAME}" | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true
else
# shellcheck disable=SC2009
ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true
@ -482,7 +482,7 @@ RUNNING_NODES_COUNT="$(echo -e "$PS_LINE" | sed '/^\s*$/d' | wc -l)"
if [ "$IS_BOOT_COMMAND" = 'yes' ]; then
if [ "$RUNNING_NODES_COUNT" -gt 0 ] && [ "$COMMAND" != 'check_config' ]; then
running_node_name=$(echo -e "$PS_LINE" | $GREP -oE "\s\-s?name.*" | awk '{print $2}' || true)
running_node_name=$(echo -e "$PS_LINE" | $GREP -oE "\s-s?name.*" | awk '{print $2}' || true)
if [ -n "$running_node_name" ] && [ "$running_node_name" = "${EMQX_NODE__NAME:-}" ]; then
echo "Node ${running_node_name} is already running!"
exit 1
@ -520,10 +520,10 @@ else
# would try to stop the new node instead.
if [ "$RUNNING_NODES_COUNT" -eq 1 ]; then
## only one emqx node is running, get running args from 'ps -ef' output
tmp_nodename=$(echo -e "$PS_LINE" | $GREP -oE "\s\-s?name.*" | awk '{print $2}' || true)
tmp_cookie=$(echo -e "$PS_LINE" | $GREP -oE "\s\-setcookie.*" | awk '{print $2}' || true)
tmp_nodename=$(echo -e "$PS_LINE" | $GREP -oE "\s-s?name.*" | awk '{print $2}' || true)
tmp_cookie=$(echo -e "$PS_LINE" | $GREP -oE "\s-setcookie.*" | awk '{print $2}' || true)
SSL_DIST_OPTFILE="$(echo -e "$PS_LINE" | $GREP -oE '\-ssl_dist_optfile\s.+\s' | awk '{print $2}' || true)"
tmp_ticktime="$(echo -e "$PS_LINE" | $GREP -oE '\s\-kernel\snet_ticktime\s.+\s' | awk '{print $3}' || true)"
tmp_ticktime="$(echo -e "$PS_LINE" | $GREP -oE '\s-kernel\snet_ticktime\s.+\s' | awk '{print $3}' || true)"
# data_dir is actually not needed, but kept anyway
tmp_datadir="$(echo -e "$PS_LINE" | $GREP -oE "\-emqx_data_dir.*" | sed -E 's#.+emqx_data_dir[[:blank:]]##g' | sed -E 's#[[:blank:]]--$##g' || true)"
if [ -z "$SSL_DIST_OPTFILE" ]; then
@ -536,7 +536,7 @@ else
else
if [ "$RUNNING_NODES_COUNT" -gt 1 ]; then
if [ -z "${EMQX_NODE__NAME:-}" ]; then
tmp_nodenames=$(echo -e "$PS_LINE" | $GREP -oE "\s\-s?name.*" | awk '{print $2}' | tr '\n' ' ')
tmp_nodenames=$(echo -e "$PS_LINE" | $GREP -oE "\s-s?name.*" | awk '{print $2}' | tr '\n' ' ')
logerr "More than one EMQX node found running (root dir: ${RUNNER_ROOT_DIR})"
logerr "Running nodes: $tmp_nodenames"
logerr "Make sure environment variable EMQX_NODE__NAME is set to indicate for which node this command is intended."
@ -806,6 +806,7 @@ generate_config() {
}
# check if a PID is down
# shellcheck disable=SC2317 # call in func `nodetool_shutdown()`
is_down() {
PID="$1"
if ps -p "$PID" >/dev/null; then
@ -937,7 +938,7 @@ case "$NAME" in
esac
SHORT_NAME="$(echo "$NAME" | awk -F'@' '{print $1}')"
HOST_NAME="$(echo "$NAME" | awk -F'@' '{print $2}')"
if ! (echo "$SHORT_NAME" | grep -q '^[0-9A-Za-z_\-]\+$'); then
if ! (echo "$SHORT_NAME" | $GREP -q '^[0-9A-Za-z_\-]\+$'); then
logerr "Invalid node name, should be of format '^[0-9A-Za-z_-]+$'."
exit 1
fi
@ -972,7 +973,7 @@ maybe_warn_default_cookie() {
## check if OTP version has mnesia_hook feature; if not, fallback to
## using Mnesia DB backend.
if [[ "$IS_BOOT_COMMAND" == 'yes' && "$(get_boot_config 'node.db_backend')" == "rlog" ]]; then
if ! (echo -e "$COMPATIBILITY_INFO" | grep -q 'MNESIA_OK'); then
if ! (echo -e "$COMPATIBILITY_INFO" | $GREP -q 'MNESIA_OK'); then
logerr "DB Backend is RLOG, but an incompatible OTP version has been detected. Falling back to using Mnesia DB backend."
export EMQX_NODE__DB_BACKEND=mnesia
export EMQX_NODE__DB_ROLE=core

9
build
View File

@ -339,13 +339,18 @@ make_docker() {
fi
local default_tag="emqx/${PROFILE%%-elixir}:${PKG_VSN}"
EMQX_IMAGE_TAG="${EMQX_IMAGE_TAG:-$default_tag}"
## extra_deps is a comma separated list of debian 11 package names
local extra_deps=''
if [[ "$PROFILE" = *enterprise* ]]; then
extra_deps='libsasl2-2'
fi
echo '_build' >> ./.dockerignore
set -x
docker build --no-cache --pull \
--build-arg BUILD_FROM="${EMQX_BUILDER}" \
--build-arg RUN_FROM="${EMQX_RUNNER}" \
--build-arg EMQX_NAME="$PROFILE" \
--build-arg EMQX_NAME="${PROFILE}" \
--build-arg EXTRA_DEPS="${extra_deps}" \
--tag "${EMQX_IMAGE_TAG}" \
-f "${EMQX_DOCKERFILE}" .
[[ "${DEBUG:-}" -eq 1 ]] || set +x

View File

@ -0,0 +1 @@
Add shutdown counter information to `emqx ctl listeners` command

View File

@ -0,0 +1 @@
Renamed `max_message_queue_len` to `max_mailbox_size` in the `force_shutdown` configuration. Old name is kept as an alias, so this change is backward compatible.

View File

@ -0,0 +1 @@
Wrap potentially sensitive data in `emqx_connector_http` if `Authorization` headers are being passed at initialization.

1
changes/ce/fix-10636.md Normal file
View File

@ -0,0 +1 @@
An issue with the MongoDB connector related to the "Max Overflow" parameter has been fixed. Previously, the minimum limit for the parameter was incorrectly set to 1 instead of allowing a minimum value of 0. This issue has been fixed, and the "Max Overflow" parameter now correctly supports a minimum value of 0.

View File

@ -0,0 +1 @@
Reduce memory footprint in hot code path.

View File

@ -0,0 +1,2 @@
Improved performance of Webhook bridge when using synchronous query mode.
This also should improve the performance of other bridges when they are configured with no batching.

View File

@ -0,0 +1,4 @@
Simplify limiter configuration.
- Reduce the complexity of the limiter's configuration.
e.g. now users can use `limiter.messages_rate = 1000/s` to quickly set the node-level limit for the message publish.
- Update the `configs/limiter` API to suit this refactor.

146
changes/e5.0.3.en.md Normal file
View File

@ -0,0 +1,146 @@
# e5.0.3
## Enhancements
- [#10128](https://github.com/emqx/emqx/pull/10128) Add support for OCSP stapling for SSL MQTT listeners.
- [#10156](https://github.com/emqx/emqx/pull/10156) Change the configuration overlay order:
If it is a new installation of EMQX, `emqx.conf` + Environment variables overlays on top of API Updated Configs (`cluster.hocon`)
If EMQX is upgraded from an older version (i.e., the `cluster-override.conf` file still exists in EMQX's `data` directory), then its the same as before, that is `cluster-override.conf` overlays on top of `emqx.conf` + Environment variables.
Please note that `data/configs/cluster-override.conf` is considered deprecated. After upgrade, you are encouraged to update `emqx.conf` to delete configs which are overridden by `cluster-override.conf` and move the configs in `cluster-override.conf` to `cluster.hocon`.
After upgrade, EMQX will continue to read `local-override.conf` (if it exists) as before, but you are encouraged to merge the configs to `emqx.conf`.
- [#10164](https://github.com/emqx/emqx/pull/10164) Add CRL check support for TLS MQTT listeners.
- [#10207](https://github.com/emqx/emqx/pull/10207) Improve OpenAPI (swagger) document readability. Prior to this change, there were a few `summary` docs which are lengthy and lack of translation, now it makes use of the more concise `label` field from schema i18n database instead.
- [#10210](https://github.com/emqx/emqx/pull/10210) Eliminated a few harmless error level logs.
Prior to this change, there might be some Mnesia callback (hook) failures occasionally occurring when stopping/restarting Mria.
Now the callbacks (hooks) are unregistered prior to stop. See also [Mria PR](https://github.com/emqx/mria/pull/133).
- [#10224](https://github.com/emqx/emqx/pull/10224) Add the option to customize `clusterIP` in Helm chart, so that a user may set it to a fixed IP.
- [#10263](https://github.com/emqx/emqx/pull/10263) Add command `eval-ex` for Elixir expression evaluation.
- [#10278](https://github.com/emqx/emqx/pull/10278) Refactor the directory structure of all gateways.
- [#10206](https://github.com/emqx/emqx/pull/10206) Support async query mode for all data bridges.
Prior to this change, setting the query mode of a resource such as a bridge to sync would force the buffer to call the underlying connector in a synchronous way, even if it supports async calls.
- [#10306](https://github.com/emqx/emqx/pull/10306) Add support for async query mode for most bridges.
This is a follow-up change after [#10206](https://github.com/emqx/emqx/pull/10206). Before this change, some bridges (Cassandra, MongoDB, MySQL, Postgres, Redis, RocketMQ, TDengine) were only allowed to be created with a sync query mode. Now async mode is also supported.
- [#10318](https://github.com/emqx/emqx/pull/10318) Prior to this enhancement, only double quotes (") were allowed in rule engine SQL language's FROM clause. Now it also supports single quotes (').
- [#10336](https://github.com/emqx/emqx/pull/10336) Add `/rule_engine` API endpoint to manage configuration of rule engine.
- [#10354](https://github.com/emqx/emqx/pull/10354) More specific error messages when configure with `bad max_heap_size` value. Log current value and the max value when the `message_queue_too_long` error is thrown.
- [#10358](https://github.com/emqx/emqx/pull/10358) Hide `flapping_detect/conn_congestion/stats` configuration. Deprecate `flapping_detect.enable`.
- [#10359](https://github.com/emqx/emqx/pull/10359) Metrics now are not implicitly collected in places where API handlers don't make any use of them. Instead, a separate backplane RPC gathers cluster-wide metrics.
- [#10373](https://github.com/emqx/emqx/pull/10373) Deprecate the `trace.payload_encode` configuration. Add `payload_encode=[text,hidden,hex]` option when creating a trace via HTTP API.
- [#10381](https://github.com/emqx/emqx/pull/10381) Hide the `auto_subscribe` configuration items so that they can be modified later only through the HTTP API.
- [#10391](https://github.com/emqx/emqx/pull/10391) Hide a large number of advanced options to simplify the configuration file.
That includes `rewrite`, `topic_metric`, `persistent_session_store`, `overload_protection`,
`flapping_detect`, `conn_congestion`, `stats,auto_subscribe`, `broker_perf`,
`shared_subscription_group`, `slow_subs`, `ssl_options.user_lookup_fun` and some advance items
in `node` and `dashboard` section, [#10358](https://github.com/emqx/emqx/pull/10358),
[#10381](https://github.com/emqx/emqx/pull/10381), [#10385](https://github.com/emqx/emqx/pull/10385).
- [#10404](https://github.com/emqx/emqx/pull/10404) Change the default queue mode for buffer workers to `memory_only`. Before this change, the default queue mode was `volatile_offload`. When under high message rate pressure and when the resource is not keeping up with such rate, the buffer performance degraded a lot due to the constant disk operations.
- [#10140](https://github.com/emqx/emqx/pull/10140) Integrate Cassandra into bridges as a new backend. At the current stage only support Cassandra version 3.x, not yet 4.x.
- [#10143](https://github.com/emqx/emqx/pull/10143) Add RocketMQ data integration bridge.
- [#10165](https://github.com/emqx/emqx/pull/10165) Support escaped special characters in InfluxDB data bridge `write_syntax`. This update allows to use escaped special characters in string elements in accordance with InfluxDB line protocol.
- [#10211](https://github.com/emqx/emqx/pull/10211) Hide `broker.broker_perf` config and API documents. The two configs `route_lock_type` and `trie_compaction` are rarely used and requires a full cluster restart to take effect. They are not suitable for being exposed to users. Detailed changes can be found here: https://gist.github.com/zmstone/01ad5754b9beaeaf3f5b86d14d49a0b7/revisions.
- [#10294](https://github.com/emqx/emqx/pull/10294) When configuring a MongoDB bridge, you can now use the `${field}` syntax to reference fields in the message. This enables you to select the collection to insert data into dynamically.
- [#10363](https://github.com/emqx/emqx/pull/10363) Implement Microsoft SQL Server bridge.
- [#10573](https://github.com/emqx/emqx/pull/10573) Improved performance of Webhook bridge when using synchronous query mode. This also should improve the performance of other bridges when they are configured with no batching.
## Bug Fixes
- [#10145](https://github.com/emqx/emqx/pull/10145) Add field `status_reason` to `GET /bridges/:id` response in case this bridge is in status `disconnected` if internal health-check reports an error condition. Include this same error condition in message when creating an alarm for a failing bridge.
- [#10172](https://github.com/emqx/emqx/pull/10172) Fix the incorrect regular expression in default ACL rule to allow specify username(dashboard) to subscribe `$SYS/#`.
- [#10174](https://github.com/emqx/emqx/pull/10174) Upgrade library `esockd` from 5.9.4 to 5.9.6. Fix an unnecessary error level logging when a connection is closed before proxy protocol header is sent by the proxy.
- [#10195](https://github.com/emqx/emqx/pull/10195) Add labels to API schemas where description contains raw HTML, which would break formatting of generated documentation otherwise.
- [#10196](https://github.com/emqx/emqx/pull/10196) Use lower-case for schema summaries and descriptions to be used in menu of generated online documentation.
- [#10209](https://github.com/emqx/emqx/pull/10209) Fix bug where a last will testament (LWT) message could be published when kicking out a banned client.
- [#10225](https://github.com/emqx/emqx/pull/10225) Allow installing a plugin if its name matches the beginning of another (already installed) plugin name. For example: if plugin `emqx_plugin_template_a` is installed, it must not block installing plugin `emqx_plugin_template`.
- [#10226](https://github.com/emqx/emqx/pull/10226) Handle validation error in `/bridges` API and return `400` instead of `500`.
- [#10242](https://github.com/emqx/emqx/pull/10242) Fixed a log data field name clash. Prior to this fix, some debug logs may report a wrong Erlang PID which may affect troubleshooting session takeover issues.
- [#10257](https://github.com/emqx/emqx/pull/10257) Fixed the issue where `auto_observe` was not working in LwM2M Gateway.
Before the fix, `OBSERVE` requests were sent without a token, causing failures that LwM2M clients could not handle.
After the fix, LwM2M Gateway can correctly observe the resource list carried by client, furthermore, unknown resources will be ignored and printing the following warning log:
```
2023-03-28T18:50:27.771123+08:00 [warning] msg: ignore_observer_resource, mfa: emqx_lwm2m_session:observe_object_list/3, line: 522, peername: 127.0.0.1:56830, clientid: testlwm2mclient, object_id: 31024, reason: no_xml_definition
```
- [#10286](https://github.com/emqx/emqx/pull/10286) Enhance logging behaviour during boot failure. When EMQX fails to start due to corrupted configuration files, excessive logging is eliminated and no crash dump file is generated.
- [#10297](https://github.com/emqx/emqx/pull/10297) Keeps `eval` command backward compatible with v4 by evaluating only Erlang expressions, even on Elixir node. For Elixir expressions, use `eval-ex` command.
- [#10300](https://github.com/emqx/emqx/pull/10300) Fixed issue with Elixir builds that prevented plugins from being configured via environment variables.
- [#10315](https://github.com/emqx/emqx/pull/10315) Fix crash checking `limit` and `page` parameters in `/mqtt/delayed/messages` API call.
- [#10317](https://github.com/emqx/emqx/pull/10317) Do not expose listener level authentications before extensive verification.
- [#10323](https://github.com/emqx/emqx/pull/10323) For security reasons, the value of the password field in the API examples is replaced with `******`.
- [#10410](https://github.com/emqx/emqx/pull/10410) Fix config check failed when gateways are configured in emqx.conf.
This issue was first introduced in v5.0.22 via [#10278](https://github.com/emqx/emqx/pull/10278), the boot-time config check was missing.
- [#10533](https://github.com/emqx/emqx/pull/10533) Fixed an issue that could cause (otherwise harmless) noise in the logs.
During some particularly slow synchronous calls to bridges, some late replies could be sent to connections processes that were no longer expecting a reply, and then emit an error log like:
```
2023-04-19T18:24:35.350233+00:00 [error] msg: unexpected_info, mfa: emqx_channel:handle_info/2, line: 1278, peername: 172.22.0.1:36384, clientid: caribdis_bench_sub_1137967633_4788, info: {#Ref<0.408802983.1941504010.189402>,{ok,200,[{<<"cache-control">>,<<"max-age=0, ...">>}}
```
Those logs are harmless, but they could flood and worry the users without need.
- [#10449](https://github.com/emqx/emqx/pull/10449) Validate the `ssl_options` and `header` configurations when creating authentication http (`authn_http`). Prior to this, incorrect `ssl` configuration could result in successful creation but the entire authn being unusable.
- [#10548](https://github.com/emqx/emqx/pull/10548) Fixed a race condition in the HTTP driver that would result in an error rather than a retry of the request.
Related fix in the driver: https://github.com/emqx/ehttpc/pull/45
- [#10201](https://github.com/emqx/emqx/pull/10201) In TDengine data bridge, removed the redundant database name from the SQL template.
- [#10270](https://github.com/emqx/emqx/pull/10270) ClickHouse data bridge has got a fix that makes the error message better when users click the test button in the settings dialog.
- [#10324](https://github.com/emqx/emqx/pull/10324) Previously, when attempting to reconnect to a misconfigured ClickHouse bridge through the dashboard, users would not receive an error message. This issue is now resolved, and error messages will now be displayed.
- [#10438](https://github.com/emqx/emqx/pull/10438) Fix some configuration item terminology errors in the DynamoDB data bridge:
- Changed `database` to `table`
- Changed `username` to `aws_access_key_id`
- Changed `password` to `aws_secret_access_key`

138
changes/e5.0.3.zh.md Normal file
View File

@ -0,0 +1,138 @@
# e5.0.3
## 优化
- [#10128](https://github.com/emqx/emqx/pull/10128) SSL MQTT监听器增加对 OCSP Stapling 的支持。
- [#10156](https://github.com/emqx/emqx/pull/10156) 调整配置文件覆盖顺序机制
对于新安装的 EMQXemqx.conf 和环境变量中的配置会覆盖 API 传入的配置(即 `cluster.hocon` 中的配置)
对于从旧版本升级的 EMQX`data` 文件夹中包含 `cluster-override.conf` 文件),保留之前的覆盖规则, 即 `cluster-override.conf` 中的配置会覆盖 `emqx.conf` 和环境变量的配置。
注意:`data/configs/cluster-override.conf` 已弃用。升级后,建议您在 `emqx.conf` 中重新配置之前被 `cluster-override.conf` 覆盖的配置项,并将 cluster-override.conf 中的配置迁移到 `cluster.hocon` 中。
升级后EMQX将像以前一样继续读取 `local-override.conf` (如果存在的话),但建议您将配置合并到 `emqx.conf` 中。
- [#10164](https://github.com/emqx/emqx/pull/10164) TLS MQTT 监听器增加对 CRL 检查的支持。
- [#10207](https://github.com/emqx/emqx/pull/10207) 提高 OpenAPI (swagger) 文档的可读性。 在此更改之前,文档中有一些 `Summary` 字段冗长且缺乏翻译,现在使用了 i18n 数据库中更简洁的 `label` 字段。
- [#10210](https://github.com/emqx/emqx/pull/10210) 解决停止/重启 Mria 时 Mnesia callback 可能出现的问题。优化后,当 Mria 被停止前会取消 Mnesia callback 的注册。详情见 Mria PR
- [#10224](https://github.com/emqx/emqx/pull/10224) 在 Helm 图表中增加自定义 `clusterIP` 选项,用户可以将其设置为固定 IP。
- [#10263](https://github.com/emqx/emqx/pull/10263) 添加用于评估 Elixir 表达式的命令 `eval-ex`
- [#10278](https://github.com/emqx/emqx/pull/10278) 重构所有网关的目录结构。
- [#10206](https://github.com/emqx/emqx/pull/10206) 所有数据桥接支持异步查询模式。
优化前,如将某项资源(如数据桥接)的查询模式设为 sync同步模式缓存将以同步的模式调用底层连接器即使它支持异步调用。
- [#10306](https://github.com/emqx/emqx/pull/10306) 大多数数据桥接支持 async 查询模式。
这是 [#10206](https://github.com/emqx/emqx/pull/10206) 的后续优化, 优化前Cassandra、MongoDB、MySQL、Postgres、Redis、RocketMQ、TDengine 等数据桥接只支持同步查询模式。
- [#10318](https://github.com/emqx/emqx/pull/10318) 规则引擎中的 FROM 语句新增支持由引号(')包裹的字符串。
- [#10336](https://github.com/emqx/emqx/pull/10336) 添加 API Endpoint `/rule_engine`,用以管理规则引擎的配置。
- [#10354](https://github.com/emqx/emqx/pull/10354) 优化 `max_heap_size` 配置错误时的报错信息。当发生 `message_queue_too_long` 报错时,会在日志文件中记录当前值和最大值。
- [#10358](https://github.com/emqx/emqx/pull/10358) 隐藏 `flapping_detect/conn_congestion/stats` 配置。弃用 `flapping_detect.enable` 配置项。
- [#10359](https://github.com/emqx/emqx/pull/10359) 通过独立的 RPC 收集针对集群级别的指标,不再隐式收集不被 API 调用的指标。
- [#10373](https://github.com/emqx/emqx/pull/10373) 废弃 `trace.payload_encode` 配置项。可以在通过 HTTP API 创建的日志追踪时使用 `trace.payload_encode = [text, hidden, hex]` 字段替代。
- [#10381](https://github.com/emqx/emqx/pull/10381) 隐藏 `auto_subscribe` 配置项,后续将只能通过 HTTP API 来修改自动订阅规则。
- [#10391](https://github.com/emqx/emqx/pull/10391) 简化配置文件并隐藏大量的配置项,包括 `rewrite`、`topic_metric`、`persistent_session_store`、`overload_protection`、`flapping_detect`、`conn_congestion`、`stats`、`auto_subscribe`、`broker_perf`、`shared_subscription_group`、`slow_subs`、`ssl_options.user_lookup_fun`、 `node``dashboard` 相关的部分高级配置项,[#10358](https://github.com/emqx/emqx/pull/10358), [#10381](https://github.com/emqx/emqx/pull/10381), [#10385](https://github.com/emqx/emqx/pull/10385)。
- [#10404](https://github.com/emqx/emqx/pull/10404) 将缓冲区工作线程的默认队列模式更改为 `memory_only`。在此优化前,默认队列模式为 `volatile_offload`,当消息速率很高,资源无法满足该需求时,缓冲区性能会由于频繁的磁盘操作而受影响。
- [#10140](https://github.com/emqx/emqx/pull/10140) 新增 Cassandra 数据桥接,目前仅支持 Cassandra 3.x 版本,暂不支持 4.x 版本。
- [#10143](https://github.com/emqx/emqx/pull/10143) 新增 RocketMQ 数据桥接。
- [#10165](https://github.com/emqx/emqx/pull/10165) InfluxDB 数据桥接中的 `write_syntax` 中支持转义特殊字符。优化后,用户可根据 InfluxDB Line Protocol 在字符串中使用经转义的特殊字符。
- [#10211](https://github.com/emqx/emqx/pull/10211) 隐藏 `broker.broker_perf` 配置和相关 API 文档。其中的两个配置项 `route_lock_type``trie_compaction` 很少使用而且需要重新启动整个集群才能生效不必要暴露给用户。更多信息可阅读https://gist.github.com/zmstone/01ad5754b9beaeaf3f5b86d14d49a0b7/revisions 。
- [#10294](https://github.com/emqx/emqx/pull/10294) 配置 MongoDB 数据桥接时,现支持通过占位符 `${field}` 语法来引用消息中的字段,从而动态地选择要插入的数据集合。
- [#10363](https://github.com/emqx/emqx/pull/10363) 新增 Microsoft SQL Server 数桥接。
- [#10573](https://github.com/emqx/emqx/pull/10573) 提升了 WebHook 在同步请求模式下的性能表现,以及其他数据桥接在未配置批处理时的性能表现。
## 修复
- [#10145](https://github.com/emqx/emqx/pull/10145) 在针对 `GET /bridges/:id` 的API 调用响应中,如桥接的状态为断开,且内部健康检查返回错误,添加 `status_reason` 字段说明错误原因。在相应的告警信息中,同样增加 `status_reason` 字段说明断开原因。
- [#10172](https://github.com/emqx/emqx/pull/10172) 修复默认ACL规则中不正确的正则表达式从而允许 dashboard 用户名订阅 `$SYS/#` 主题
- [#10174](https://github.com/emqx/emqx/pull/10174) 将库 `esockd` 从 5.9.4 升级至 5.9.6。如连接在代理发送代理协议头之前关闭,将不再产生的一条错误级别日志。
- [#10195](https://github.com/emqx/emqx/pull/10195) 对包含 HTML 的 API Schema 添加标签,解决之前会破坏生成文档格式的问题。
- [#10196](https://github.com/emqx/emqx/pull/10196) 针对用于生成在线文档菜单中的模式摘要和描述,使用小写字母。
- [#10209](https://github.com/emqx/emqx/pull/10209) 修复在断开禁用客户端时,此类客户端仍可发布遗嘱消息的错误。
- [#10225](https://github.com/emqx/emqx/pull/10225) 对于名称与已安装的插件开头相同的插件,用户仍可继续安装。例如:如果插件 `emqx_plugin_template_a` 已安装,用户仍可安装名为 `emqx_plugin_template` 的插件。
- [#10226](https://github.com/emqx/emqx/pull/10226) 在 `/bridges` API 验证错误时,返回 `400` 而非 `500`
- [#10242](https://github.com/emqx/emqx/pull/10242) 修复日志中数据字段名称冲突。修复前,一些调试日志可能会报告错误的 Erlang PID影响解决会话接管类问题。
- [#10257](https://github.com/emqx/emqx/pull/10257) 修复 LwM2M 网关中 `auto_observe` 无法正常工作的问题。
修复前,在发送 `OBSERVE` 请求时没有发送 token导致 LwM2M 网关无法处理客户端请求。
修复后LwM2M 网关可以正确观察到客户端携带的资源列表,此外,未知资源将被忽并打印以下警告日志:
```
2023-03-28T18:50:27.771123+08:00 [warning] msg: ignore_observer_resource, mfa: emqx_lwm2m_session:observe_object_list/3, line: 522, peername: 127.0.0.1:56830, clientid: testlwm2mclient, object_id: 31024, reason: no_xml_definition
```
- [#10286](https://github.com/emqx/emqx/pull/10286) 优化 EMQX 启动失败时的日志记录行为。当 EMQX 由于配置文件破坏无法启动时,不会再产生过多的日志记录,也不会再产生崩溃转储文件。
- [#10297](https://github.com/emqx/emqx/pull/10297) 通过仅评估 Erlang 表达式实现 `eval` 命令与 v4 版本的向后兼容,该更新同样适用于 Elixir 节点。对于 Elixir 表达式,请使用 `eval-ex` 命令。
- [#10300](https://github.com/emqx/emqx/pull/10300) 针对通过 Elixir 构建的项目,修复无法通过环境变量配置插件的问题。
- [#10315](https://github.com/emqx/emqx/pull/10315) 修复在 `/mqtt/delayed/messages` API 调用中,在检查 `limit``page` 参数时的崩溃问题。
- [#10317](https://github.com/emqx/emqx/pull/10317) 在经过充分验证前,隐藏监听器级别的认证信息。
- [#10323](https://github.com/emqx/emqx/pull/10323) 出于安全原因API 示例中 `password` 字段的值被替换为 `******`
- [#10410](https://github.com/emqx/emqx/pull/10410) 针对由`emqx.conf` 配置的网关,修复配置检查失败问题。
此问题最早在 v5.0.22版本中由 [#10278](https://github.com/emqx/emqx/pull/10278)引入,启动时缺少配置检查。
- [#10533](https://github.com/emqx/emqx/pull/10533) 修复可能导致日志中的出现无害的噪音的问题。
在对数据桥接进行同步调用时,一些迟到的回复可能会发送到不再期待回复的连接进程,导致产生错误日志,如:
```
2023-04-19T18:24:35.350233+00:00 [error] msg: unexpected_info, mfa: emqx_channel:handle_info/2, line: 1278, peername: 172.22.0.1:36384, clientid: caribdis_bench_sub_1137967633_4788, info: {#Ref<0.408802983.1941504010.189402>,{ok,200,[{<<"cache-control">>,<<"max-age=0, ...">>}}
```
这些日志是无害的,但它们可能会泛滥成灾,引起用户不必要的担心。
- [#10449](https://github.com/emqx/emqx/pull/10449) 在通过 HTTP 服务(`authn_http`)创建身份验证时,将进行 `ssl_options``header` 配置验证。在此修复前,用户通过错误的 ssl 配置也可成功创建身份验证,但该验证整体不生效。
- [#10548](https://github.com/emqx/emqx/pull/10548) 修复了 HTTP 驱动程序在竞争条件下会导致错误而不去重试的问题。
相关的驱动程序修复:[emqx/ehttpc#45](https://github.com/emqx/ehttpc/pull/45)
- [#10201](https://github.com/emqx/emqx/pull/10201) 在 TDengine 数据桥接中,移除 SQL 模板中冗余的数据库名称。
- [#10270](https://github.com/emqx/emqx/pull/10270) 在创建 ClickHouse 数据桥接时,优化用户点击测试按钮时的错误信息
- [#10324](https://github.com/emqx/emqx/pull/10324) 针对配置有误的 ClickHouse 数据桥接,当用户尝试通过 Dashboard 重连时,将收到报错。修复前,用户不会收到报错。
- [#10438](https://github.com/emqx/emqx/pull/10438) 修复 DynamoDB 数据桥接中的部分术语使用错误:
- 将 `database` 修改为 `table`
- 将 `username` 修改为 `aws_access_key_id`
- 将 `password` 修改为 `aws_secret_access_key`

1
changes/ee/feat-10534.md Normal file
View File

@ -0,0 +1 @@
A RabbitMQ bridge has been added. This bridge makes it possible to forward messages from EMQX to RabbitMQ.

Some files were not shown because too many files have changed in this diff Show More