chore: merge 'upstream/release-53' into 1031-sync-r53

This commit is contained in:
Ivan Dyachkov 2023-10-31 11:06:25 +01:00
commit b1ab213081
112 changed files with 11764 additions and 1181 deletions

View File

@ -18,7 +18,7 @@ services:
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
kdc:
hostname: kdc.emqx.net
image: ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu20.04
image: ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04
container_name: kdc.emqx.net
expose:
- 88 # kdc

View File

@ -3,17 +3,17 @@ version: '3.9'
services:
erlang:
container_name: erlang
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu20.04}
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04}
env_file:
- conf.env
environment:
GITHUB_ACTIONS: ${GITHUB_ACTIONS}
GITHUB_TOKEN: ${GITHUB_TOKEN}
GITHUB_RUN_ID: ${GITHUB_RUN_ID}
GITHUB_SHA: ${GITHUB_SHA}
GITHUB_RUN_NUMBER: ${GITHUB_RUN_NUMBER}
GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}
GITHUB_REF: ${GITHUB_REF}
GITHUB_ACTIONS: ${GITHUB_ACTIONS:-}
GITHUB_TOKEN: ${GITHUB_TOKEN:-}
GITHUB_RUN_ID: ${GITHUB_RUN_ID:-}
GITHUB_SHA: ${GITHUB_SHA:-}
GITHUB_RUN_NUMBER: ${GITHUB_RUN_NUMBER:-}
GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME:-}
GITHUB_REF: ${GITHUB_REF:-}
networks:
- emqx_bridge
ports:

View File

@ -11,7 +11,7 @@ Please convert it to a draft if any of the following conditions are not met. Rev
- [ ] Added tests for the changes
- [ ] Added property-based tests for code which performs user input validation
- [ ] Changed lines covered in coverage report
- [ ] Change log has been added to `changes/(ce|ee)/(feat|perf|fix)-<PR-id>.en.md` files
- [ ] Change log has been added to `changes/(ce|ee)/(feat|perf|fix|breaking)-<PR-id>.en.md` files
- [ ] For internal contributor: there is a jira ticket to track this change
- [ ] Created PR to [emqx-docs](https://github.com/emqx/emqx-docs) if documentation update is required, or link to a follow-up jira ticket
- [ ] Schema changes are backward compatible

View File

@ -25,6 +25,7 @@ jobs:
version-emqx: ${{ steps.matrix.outputs.version-emqx }}
version-emqx-enterprise: ${{ steps.matrix.outputs.version-emqx-enterprise }}
runner_labels: ${{ github.repository_owner == 'emqx' && '["self-hosted","ephemeral","linux","x64"]' || '["ubuntu-22.04"]' }}
xl_runner_labels: ${{ github.repository_owner == 'emqx' && '["self-hosted","ephemeral-xl","linux","x64"]' || '["ubuntu-22.04"]' }}
builder: "ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04"
builder_vsn: "5.2-3"
otp_vsn: "25.3.2-2"
@ -115,7 +116,7 @@ jobs:
echo "version-emqx-enterprise=$(./pkg-vsn.sh emqx-enterprise)" | tee -a $GITHUB_OUTPUT
compile:
runs-on: ${{ fromJSON(needs.sanity-checks.outputs.runner_labels) }}
runs-on: ${{ fromJSON(needs.sanity-checks.outputs.xl_runner_labels) }}
container: ${{ needs.sanity-checks.outputs.builder }}
needs:
- sanity-checks
@ -154,7 +155,7 @@ jobs:
- compile
uses: ./.github/workflows/run_emqx_app_tests.yaml
with:
runner_labels: ${{ needs.sanity-checks.outputs.runner_labels }}
runner_labels: ${{ needs.sanity-checks.outputs.xl_runner_labels }}
builder: ${{ needs.sanity-checks.outputs.builder }}
before_ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before }}
after_ref: ${{ github.sha }}

View File

@ -40,7 +40,10 @@ jobs:
profile:
- emqx
- emqx-enterprise
rpc:
- tcp
- ssl1.3
- ssl1.2
steps:
- uses: actions/checkout@v3
with:
@ -56,6 +59,40 @@ jobs:
echo "${stderr}";
exit 1;
fi
- name: Prepare emqxConfig.EMQX_RPC using TCP
working-directory: source
if: matrix.rpc == 'tcp'
run: |
cat > rpc-overrides.yaml <<EOL
emqxConfig:
EMQX_RPC__PROTOCOL: tcp
EOL
- name: Prepare emqxConfig.EMQX_RPC using ssl1.3
working-directory: source
if: matrix.rpc == 'ssl1.3'
run: |
cat > rpc-overrides.yaml <<EOL
emqxConfig:
EMQX_RPC__PROTOCOL: ssl
EMQX_RPC__CERTFILE: /opt/emqx/etc/certs/cert.pem
EMQX_RPC__KEYFILE: /opt/emqx/etc/certs/key.pem
EMQX_RPC__CACERTFILE: /opt/emqx/etc/certs/cacert.pem
EMQX_RPC__CIPHERS: TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256
EMQX_RPC__TLS_VERSIONS: "[tlsv1.3]"
EOL
- name: Prepare emqxConfig.EMQX_RPC using ssl1.2
working-directory: source
if: matrix.rpc == 'ssl1.2'
run: |
cat > rpc-overrides.yaml <<EOL
emqxConfig:
EMQX_RPC__PROTOCOL: ssl
EMQX_RPC__CERTFILE: /opt/emqx/etc/certs/cert.pem
EMQX_RPC__KEYFILE: /opt/emqx/etc/certs/key.pem
EMQX_RPC__CACERTFILE: /opt/emqx/etc/certs/cacert.pem
EMQX_RPC__CIPHERS: TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256
EMQX_RPC__TLS_VERSIONS: "[tlsv1.2]"
EOL
- name: run emqx on chart (k8s)
if: matrix.discovery == 'k8s'
working-directory: source
@ -72,7 +109,9 @@ jobs:
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
--set emqxConfig.EMQX_LOG__CONSOLE__LEVEL=debug \
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
--values rpc-overrides.yaml \
deploy/charts/${EMQX_NAME} \
--debug
- name: run emqx on chart (dns)
@ -90,8 +129,11 @@ jobs:
--set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \
--set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
--set emqxConfig.EMQX_LOG__CONSOLE__LEVEL=debug \
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
--values rpc-overrides.yaml \
deploy/charts/${EMQX_NAME} \
--wait \
--debug
- name: waiting emqx started
timeout-minutes: 5
@ -104,12 +146,13 @@ jobs:
echo "waiting emqx started";
sleep 10;
done
- name: Get Token
- name: Setup 18083 port forwarding
run: |
nohup kubectl port-forward service/${EMQX_NAME} 18083:18083 > /dev/null &
- name: Get auth token
run: |
kubectl port-forward service/${EMQX_NAME} 18083:18083 > /dev/null &
curl --head -X GET --retry 10 --retry-connrefused --retry-delay 6 http://localhost:18083/status
echo "TOKEN=$(curl --silent -X 'POST' 'http://127.0.0.1:18083/api/v5/login' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{"username": "admin","password": "public"}' | jq -r ".token")" >> $GITHUB_ENV
- name: Check cluster
timeout-minutes: 1
run: |
@ -117,8 +160,13 @@ jobs:
nodes_length="$(curl --silent -H "Authorization: Bearer $TOKEN" -X GET http://127.0.0.1:18083/api/v5/cluster| jq '.nodes|length')"
[ $nodes_length != "3" ]
do
echo "waiting ${EMQX_NAME} cluster scale. Current live nodes: $nodes_length."
sleep 1
if [ $nodes_length -eq 0 ]; then
echo "node len must >= 1, refresh Token... "
TOKEN=$(curl --silent -X 'POST' 'http://127.0.0.1:18083/api/v5/login' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{"username": "admin","password": "public"}' | jq -r ".token")
else
echo "waiting ${EMQX_NAME} cluster scale. Current live nodes: $nodes_length."
fi
sleep 1;
done
- uses: actions/checkout@v3
with:

View File

@ -52,6 +52,7 @@ jobs:
- name: eunit
env:
PROFILE: ${{ matrix.profile }}
ENABLE_COVER_COMPILE: 1
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
run: make eunit
@ -59,6 +60,7 @@ jobs:
- name: proper
env:
PROFILE: ${{ matrix.profile }}
ENABLE_COVER_COMPILE: 1
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
run: make proper
@ -69,7 +71,7 @@ jobs:
ct_docker:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
name: "ct_docker (${{ matrix.app }}-${{ matrix.suitegroup }})"
name: "${{ matrix.app }}-${{ matrix.suitegroup }} (${{ matrix.profile }})"
strategy:
fail-fast: false
matrix:
@ -102,6 +104,7 @@ jobs:
MINIO_TAG: "RELEASE.2023-03-20T20-16-18Z"
PROFILE: ${{ matrix.profile }}
SUITEGROUP: ${{ matrix.suitegroup }}
ENABLE_COVER_COMPILE: 1
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }}
- uses: actions/upload-artifact@v3
@ -119,7 +122,7 @@ jobs:
ct:
runs-on: ${{ fromJSON(inputs.runner_labels) }}
name: "ct (${{ matrix.app }}-${{ matrix.suitegroup }})"
name: "${{ matrix.app }}-${{ matrix.suitegroup }} (${{ matrix.profile }})"
strategy:
fail-fast: false
matrix:
@ -144,6 +147,7 @@ jobs:
env:
PROFILE: ${{ matrix.profile }}
SUITEGROUP: ${{ matrix.suitegroup }}
ENABLE_COVER_COMPILE: 1
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
run: |
make "${{ matrix.app }}-ct"

View File

@ -1,3 +1,8 @@
ifeq ($(DEBUG),1)
DEBUG_INFO = $(info $1)
else
DEBUG_INFO = @:
endif
REBAR = $(CURDIR)/rebar3
BUILD = $(CURDIR)/build
SCRIPTS = $(CURDIR)/scripts
@ -18,17 +23,6 @@ endif
export EMQX_DASHBOARD_VERSION ?= v1.5.0
export EMQX_EE_DASHBOARD_VERSION ?= e1.3.0
# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used
# In make 4.4+, for backward-compatibility the value from the original environment is used.
# so the shell script will be executed tons of times.
# https://github.com/emqx/emqx/pull/10627
ifeq ($(strip $(OTP_VSN)),)
export OTP_VSN := $(shell $(SCRIPTS)/get-otp-vsn.sh)
endif
ifeq ($(strip $(ELIXIR_VSN)),)
export ELIXIR_VSN := $(shell $(SCRIPTS)/get-elixir-vsn.sh)
endif
PROFILE ?= emqx
REL_PROFILES := emqx emqx-enterprise
PKG_PROFILES := emqx-pkg emqx-enterprise-pkg
@ -75,11 +69,11 @@ mix-deps-get: $(ELIXIR_COMMON_DEPS)
.PHONY: eunit
eunit: $(REBAR) merge-config
@ENABLE_COVER_COMPILE=1 $(REBAR) eunit --name eunit@127.0.0.1 -v -c --cover_export_name $(CT_COVER_EXPORT_PREFIX)-eunit
@$(REBAR) eunit --name eunit@127.0.0.1 -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-eunit
.PHONY: proper
proper: $(REBAR)
@ENABLE_COVER_COMPILE=1 $(REBAR) proper -d test/props -c
@$(REBAR) proper -d test/props -c
.PHONY: test-compile
test-compile: $(REBAR) merge-config
@ -91,7 +85,7 @@ $(REL_PROFILES:%=%-compile): $(REBAR) merge-config
.PHONY: ct
ct: $(REBAR) merge-config
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
@$(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
## only check bpapi for enterprise profile because it's a super-set.
.PHONY: static_checks
@ -101,31 +95,56 @@ static_checks:
./scripts/check-i18n-style.sh
./scripts/check_missing_reboot_apps.exs
APPS=$(shell $(SCRIPTS)/find-apps.sh)
# Allow user-set CASES environment variable
ifneq ($(CASES),)
CASES_ARG := --case $(CASES)
endif
.PHONY: $(APPS:%=%-ct)
# Allow user-set GROUPS environment variable
ifneq ($(GROUPS),)
GROUPS_ARG := --groups $(GROUPS)
endif
ifeq ($(ENABLE_COVER_COMPILE),1)
cover_args = --cover --cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1)
else
cover_args =
endif
## example:
## env SUITES=apps/appname/test/test_SUITE.erl CASES=t_foo make apps/appname-ct
define gen-app-ct-target
$1-ct: $(REBAR) merge-config clean-test-cluster-config
$(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1))
ifneq ($(SUITES),)
ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
--readable=$(CT_READABLE) \
--name $(CT_NODE_NAME) \
--cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \
--suite $(SUITES)
$(REBAR) ct -v \
--readable=$(CT_READABLE) \
--name $(CT_NODE_NAME) \
$(call cover_args,$1) \
--suite $(SUITES) \
$(GROUPS_ARG) \
$(CASES_ARG)
else
@echo 'No suites found for $1'
@echo 'No suites found for $1'
endif
endef
$(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app))))
ifneq ($(filter %-ct,$(MAKECMDGOALS)),)
app_to_test := $(patsubst %-ct,%,$(filter %-ct,$(MAKECMDGOALS)))
$(call DEBUG_INFO,app_to_test $(app_to_test))
$(eval $(call gen-app-ct-target,$(app_to_test)))
endif
## apps/name-prop targets
.PHONY: $(APPS:%=%-prop)
define gen-app-prop-target
$1-prop:
$(REBAR) proper -d test/props -v -m $(shell $(SCRIPTS)/find-props.sh $1)
endef
$(foreach app,$(APPS),$(eval $(call gen-app-prop-target,$(app))))
ifneq ($(filter %-prop,$(MAKECMDGOALS)),)
app_to_test := $(patsubst %-prop,%,$(filter %-prop,$(MAKECMDGOALS)))
$(call DEBUG_INFO,app_to_test $(app_to_test))
$(eval $(call gen-app-prop-target,$(app_to_test)))
endif
.PHONY: ct-suite
ct-suite: $(REBAR) merge-config clean-test-cluster-config
@ -303,3 +322,11 @@ fmt: $(REBAR)
.PHONY: clean-test-cluster-config
clean-test-cluster-config:
@rm -f apps/emqx_conf/data/configs/cluster.hocon || true
.PHONY: spellcheck
spellcheck:
./scripts/spellcheck/spellcheck.sh _build/docgen/$(PROFILE)/schema-en.json
.PHONY: nothing
nothing:
@:

View File

@ -35,7 +35,7 @@
-define(EMQX_RELEASE_CE, "5.3.1-alpha.1").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.3.1-alpha.1").
-define(EMQX_RELEASE_EE, "5.3.1-alpha.2").
%% The HTTP API version
-define(EMQX_API_VERSION, "5.0").

View File

@ -7,12 +7,14 @@
{emqx_bridge,2}.
{emqx_bridge,3}.
{emqx_bridge,4}.
{emqx_bridge,5}.
{emqx_broker,1}.
{emqx_cm,1}.
{emqx_cm,2}.
{emqx_conf,1}.
{emqx_conf,2}.
{emqx_conf,3}.
{emqx_connector, 1}.
{emqx_dashboard,1}.
{emqx_delayed,1}.
{emqx_delayed,2}.

View File

@ -29,7 +29,7 @@
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.7"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.16"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.0"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.16"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
@ -45,7 +45,7 @@
{meck, "0.9.2"},
{proper, "1.4.0"},
{bbmustache, "1.10.0"},
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.0"}}}
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.1"}}}
]},
{extra_src_dirs, [{"test", [recursive]},
{"integration_test", [recursive]}]}
@ -55,7 +55,7 @@
{meck, "0.9.2"},
{proper, "1.4.0"},
{bbmustache, "1.10.0"},
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.0"}}}
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.1"}}}
]},
{extra_src_dirs, [{"test", [recursive]}]}
]}

View File

@ -325,22 +325,32 @@ init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) ->
ok = save_schema_mod_and_names(SchemaMod),
HasDeprecatedFile = has_deprecated_file(),
RawConf0 = load_config_files(HasDeprecatedFile, Conf),
warning_deprecated_root_key(RawConf0),
RawConf1 =
RawConf1 = upgrade_raw_conf(SchemaMod, RawConf0),
warning_deprecated_root_key(RawConf1),
RawConf2 =
case HasDeprecatedFile of
true ->
overlay_v0(SchemaMod, RawConf0);
overlay_v0(SchemaMod, RawConf1);
false ->
overlay_v1(SchemaMod, RawConf0)
overlay_v1(SchemaMod, RawConf1)
end,
RawConf = fill_defaults_for_all_roots(SchemaMod, RawConf1),
RawConf3 = fill_defaults_for_all_roots(SchemaMod, RawConf2),
%% check configs against the schema
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf, #{}),
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf3, #{}),
save_to_app_env(AppEnvs),
ok = save_to_config_map(CheckedConf, RawConf),
ok = save_to_config_map(CheckedConf, RawConf3),
maybe_init_default_zone(),
ok.
upgrade_raw_conf(SchemaMod, RawConf) ->
case erlang:function_exported(SchemaMod, upgrade_raw_conf, 1) of
true ->
%% TODO make it a schema module behaviour in hocon_schema
apply(SchemaMod, upgrade_raw_conf, [RawConf]);
false ->
RawConf
end.
%% Merge environment variable overrides on top, then merge with overrides.
overlay_v0(SchemaMod, RawConf) when is_map(RawConf) ->
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),

View File

@ -19,7 +19,7 @@
-include("logger.hrl").
-include("emqx_schema.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("hocon/include/hocon_types.hrl").
-behaviour(gen_server).
@ -736,7 +736,7 @@ remove_empty_leaf(KeyPath, Handlers) ->
end.
assert_callback_function(Mod) ->
_ = Mod:module_info(),
_ = apply(Mod, module_info, []),
case
erlang:function_exported(Mod, pre_config_update, 3) orelse
erlang:function_exported(Mod, post_config_update, 5)

View File

@ -22,6 +22,8 @@
-export([
all/1,
matrix_to_groups/2,
group_path/1,
init_per_testcase/3,
end_per_testcase/3,
boot_modules/1,
@ -1375,3 +1377,83 @@ select_free_port(GenModule, Fun) when
end,
ct:pal("Select free OS port: ~p", [Port]),
Port.
%% Generate ct sub-groups from test-case's 'matrix' clause
%% NOTE: the test cases must have a root group name which
%% is unkonwn to this API.
%%
%% e.g.
%% all() -> [{group, g1}].
%%
%% groups() ->
%% emqx_common_test_helpers:groups(?MODULE, [case1, case2]).
%%
%% case1(matrix) ->
%% {g1, [[tcp, no_auth],
%% [ssl, no_auth],
%% [ssl, basic_auth]
%% ]};
%%
%% case2(matrix) ->
%% {g1, ...}
%% ...
%%
%% Return:
%%
%% [{g1, [],
%% [ {tcp, [], [{no_auth, [], [case1, case2]}
%% ]},
%% {ssl, [], [{no_auth, [], [case1, case2]},
%% {basic_auth, [], [case1, case2]}
%% ]}
%% ]
%% }
%% ]
matrix_to_groups(Module, Cases) ->
lists:foldr(
fun(Case, Acc) ->
add_case_matrix(Module, Case, Acc)
end,
[],
Cases
).
add_case_matrix(Module, Case, Acc0) ->
{RootGroup, Matrix} = Module:Case(matrix),
lists:foldr(
fun(Row, Acc) ->
add_group([RootGroup | Row], Acc, Case)
end,
Acc0,
Matrix
).
add_group([], Acc, Case) ->
case lists:member(Case, Acc) of
true ->
Acc;
false ->
[Case | Acc]
end;
add_group([Name | More], Acc, Cases) ->
case lists:keyfind(Name, 1, Acc) of
false ->
[{Name, [], add_group(More, [], Cases)} | Acc];
{Name, [], SubGroup} ->
New = {Name, [], add_group(More, SubGroup, Cases)},
lists:keystore(Name, 1, Acc, New)
end.
group_path(Config) ->
try
Current = proplists:get_value(tc_group_properties, Config),
NameF = fun(Props) ->
{name, Name} = lists:keyfind(name, 1, Props),
Name
end,
Stack = proplists:get_value(tc_group_path, Config),
lists:reverse(lists:map(NameF, [Current | Stack]))
catch
_:_ ->
[]
end.

View File

@ -31,6 +31,7 @@
]).
-define(DEFAULT_APP_ID, <<"default_appid">>).
-define(DEFAULT_APP_KEY, <<"default_app_key">>).
-define(DEFAULT_APP_SECRET, <<"default_app_secret">>).
request_api(Method, Url, Auth) ->
@ -60,7 +61,7 @@ request_api(Method, Url, QueryParams, Auth, Body, HttpOpts) ->
do_request_api(Method, Request, HttpOpts).
do_request_api(Method, Request, HttpOpts) ->
ct:pal("Method: ~p, Request: ~p", [Method, Request]),
% ct:pal("Method: ~p, Request: ~p", [Method, Request]),
case httpc:request(Method, Request, HttpOpts, [{body_format, binary}]) of
{error, socket_closed_remotely} ->
{error, socket_closed_remotely};
@ -90,7 +91,12 @@ create_default_app() ->
Now = erlang:system_time(second),
ExpiredAt = Now + timer:minutes(10),
emqx_mgmt_auth:create(
?DEFAULT_APP_ID, ?DEFAULT_APP_SECRET, true, ExpiredAt, <<"default app key for test">>
?DEFAULT_APP_ID,
?DEFAULT_APP_KEY,
?DEFAULT_APP_SECRET,
true,
ExpiredAt,
<<"default app key for test">>
).
delete_default_app() ->

View File

@ -941,10 +941,13 @@ t_revoked(Config) ->
{port, 8883}
]),
unlink(C),
?assertMatch(
{error, {ssl_error, _Sock, {tls_alert, {certificate_revoked, _}}}}, emqtt:connect(C)
),
ok.
case emqtt:connect(C) of
{error, {ssl_error, _Sock, {tls_alert, {certificate_revoked, _}}}} ->
ok;
{error, closed} ->
%% this happens due to an unidentified race-condition
ok
end.
t_revoke_then_refresh(Config) ->
DataDir = ?config(data_dir, Config),

View File

@ -62,19 +62,20 @@
%% Data backup
-export([
import_config/1
import_config/1,
%% exported for emqx_bridge_v2
import_config/4
]).
-export([query_opts/1]).
-define(EGRESS_DIR_BRIDGES(T),
T == webhook;
T == mysql;
T == gcp_pubsub;
T == influxdb_api_v1;
T == influxdb_api_v2;
%% TODO: rename this to `kafka_producer' after alias support is
%% added to hocon; keeping this as just `kafka' for backwards
%% compatibility.
T == kafka;
T == kafka_producer;
T == redis_single;
T == redis_sentinel;
T == redis_cluster;
@ -211,13 +212,19 @@ send_to_matched_egress_bridges(Topic, Msg) ->
_ ->
ok
catch
throw:Reason ->
?SLOG(error, #{
msg => "send_message_to_bridge_exception",
bridge => Id,
reason => emqx_utils:redact(Reason)
});
Err:Reason:ST ->
?SLOG(error, #{
msg => "send_message_to_bridge_exception",
bridge => Id,
error => Err,
reason => Reason,
stacktrace => ST
reason => emqx_utils:redact(Reason),
stacktrace => emqx_utils:redact(ST)
})
end
end,
@ -277,30 +284,40 @@ post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) ->
Result.
list() ->
maps:fold(
fun(Type, NameAndConf, Bridges) ->
maps:fold(
fun(Name, RawConf, Acc) ->
case lookup(Type, Name, RawConf) of
{error, not_found} -> Acc;
{ok, Res} -> [Res | Acc]
end
end,
Bridges,
NameAndConf
)
end,
[],
emqx:get_raw_config([bridges], #{})
).
BridgeV1Bridges =
maps:fold(
fun(Type, NameAndConf, Bridges) ->
maps:fold(
fun(Name, RawConf, Acc) ->
case lookup(Type, Name, RawConf) of
{error, not_found} -> Acc;
{ok, Res} -> [Res | Acc]
end
end,
Bridges,
NameAndConf
)
end,
[],
emqx:get_raw_config([bridges], #{})
),
BridgeV2Bridges =
emqx_bridge_v2:list_and_transform_to_bridge_v1(),
BridgeV1Bridges ++ BridgeV2Bridges.
%%BridgeV2Bridges = emqx_bridge_v2:list().
lookup(Id) ->
{Type, Name} = emqx_bridge_resource:parse_bridge_id(Id),
lookup(Type, Name).
lookup(Type, Name) ->
RawConf = emqx:get_raw_config([bridges, Type, Name], #{}),
lookup(Type, Name, RawConf).
case emqx_bridge_v2:is_bridge_v2_type(Type) of
true ->
emqx_bridge_v2:lookup_and_transform_to_bridge_v1(Type, Name);
false ->
RawConf = emqx:get_raw_config([bridges, Type, Name], #{}),
lookup(Type, Name, RawConf)
end.
lookup(Type, Name, RawConf) ->
case emqx_resource:get_instance(emqx_bridge_resource:resource_id(Type, Name)) of
@ -316,7 +333,18 @@ lookup(Type, Name, RawConf) ->
end.
get_metrics(Type, Name) ->
emqx_resource:get_metrics(emqx_bridge_resource:resource_id(Type, Name)).
case emqx_bridge_v2:is_bridge_v2_type(Type) of
true ->
case emqx_bridge_v2:is_valid_bridge_v1(Type, Name) of
true ->
BridgeV2Type = emqx_bridge_v2:bridge_v2_type_to_connector_type(Type),
emqx_bridge_v2:get_metrics(BridgeV2Type, Name);
false ->
{error, not_bridge_v1_compatible}
end;
false ->
emqx_resource:get_metrics(emqx_bridge_resource:resource_id(Type, Name))
end.
maybe_upgrade(mqtt, Config) ->
emqx_bridge_compatible_config:maybe_upgrade(Config);
@ -325,55 +353,90 @@ maybe_upgrade(webhook, Config) ->
maybe_upgrade(_Other, Config) ->
Config.
disable_enable(Action, BridgeType, BridgeName) when
disable_enable(Action, BridgeType0, BridgeName) when
Action =:= disable; Action =:= enable
->
emqx_conf:update(
config_key_path() ++ [BridgeType, BridgeName],
{Action, BridgeType, BridgeName},
#{override_to => cluster}
).
BridgeType = upgrade_type(BridgeType0),
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
emqx_bridge_v2:bridge_v1_enable_disable(Action, BridgeType, BridgeName);
false ->
emqx_conf:update(
config_key_path() ++ [BridgeType, BridgeName],
{Action, BridgeType, BridgeName},
#{override_to => cluster}
)
end.
create(BridgeType, BridgeName, RawConf) ->
create(BridgeType0, BridgeName, RawConf) ->
BridgeType = upgrade_type(BridgeType0),
?SLOG(debug, #{
bridge_action => create,
bridge_type => BridgeType,
bridge_name => BridgeName,
bridge_raw_config => emqx_utils:redact(RawConf)
}),
emqx_conf:update(
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
RawConf,
#{override_to => cluster}
).
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
emqx_bridge_v2:split_bridge_v1_config_and_create(BridgeType, BridgeName, RawConf);
false ->
emqx_conf:update(
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
RawConf,
#{override_to => cluster}
)
end.
remove(BridgeType, BridgeName) ->
%% NOTE: This function can cause broken references but it is only called from
%% test cases.
-spec remove(atom() | binary(), binary()) -> ok | {error, any()}.
remove(BridgeType0, BridgeName) ->
BridgeType = upgrade_type(BridgeType0),
?SLOG(debug, #{
bridge_action => remove,
bridge_type => BridgeType,
bridge_name => BridgeName
}),
emqx_conf:remove(
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
#{override_to => cluster}
).
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
emqx_bridge_v2:remove(BridgeType, BridgeName);
false ->
remove_v1(BridgeType, BridgeName)
end.
check_deps_and_remove(BridgeType, BridgeName, RemoveDeps) ->
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
%% NOTE: This violates the design: Rule depends on data-bridge but not vice versa.
case emqx_rule_engine:get_rule_ids_by_action(BridgeId) of
[] ->
remove_v1(BridgeType0, BridgeName) ->
BridgeType = upgrade_type(BridgeType0),
case
emqx_conf:remove(
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
#{override_to => cluster}
)
of
{ok, _} ->
ok;
{error, Reason} ->
{error, Reason}
end.
check_deps_and_remove(BridgeType0, BridgeName, RemoveDeps) ->
BridgeType = upgrade_type(BridgeType0),
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
emqx_bridge_v2:bridge_v1_check_deps_and_remove(
BridgeType,
BridgeName,
RemoveDeps
);
false ->
do_check_deps_and_remove(BridgeType, BridgeName, RemoveDeps)
end.
do_check_deps_and_remove(BridgeType, BridgeName, RemoveDeps) ->
case emqx_bridge_lib:maybe_withdraw_rule_action(BridgeType, BridgeName, RemoveDeps) of
ok ->
remove(BridgeType, BridgeName);
RuleIds when RemoveDeps =:= false ->
{error, {rules_deps_on_this_bridge, RuleIds}};
RuleIds when RemoveDeps =:= true ->
lists:foreach(
fun(R) ->
emqx_rule_engine:ensure_action_removed(R, BridgeId)
end,
RuleIds
),
remove(BridgeType, BridgeName)
{error, Reason} ->
{error, Reason}
end.
%%----------------------------------------------------------------------------------------
@ -381,15 +444,18 @@ check_deps_and_remove(BridgeType, BridgeName, RemoveDeps) ->
%%----------------------------------------------------------------------------------------
import_config(RawConf) ->
RootKeyPath = config_key_path(),
BridgesConf = maps:get(<<"bridges">>, RawConf, #{}),
import_config(RawConf, <<"bridges">>, ?ROOT_KEY, config_key_path()).
%% Used in emqx_bridge_v2
import_config(RawConf, RawConfKey, RootKey, RootKeyPath) ->
BridgesConf = maps:get(RawConfKey, RawConf, #{}),
OldBridgesConf = emqx:get_raw_config(RootKeyPath, #{}),
MergedConf = merge_confs(OldBridgesConf, BridgesConf),
case emqx_conf:update(RootKeyPath, MergedConf, #{override_to => cluster}) of
{ok, #{raw_config := NewRawConf}} ->
{ok, #{root_key => ?ROOT_KEY, changed => changed_paths(OldBridgesConf, NewRawConf)}};
{ok, #{root_key => RootKey, changed => changed_paths(OldBridgesConf, NewRawConf)}};
Error ->
{error, #{root_key => ?ROOT_KEY, reason => Error}}
{error, #{root_key => RootKey, reason => Error}}
end.
merge_confs(OldConf, NewConf) ->
@ -600,3 +666,6 @@ validate_bridge_name(BridgeName0) ->
to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
to_bin(B) when is_binary(B) -> B.
upgrade_type(Type) ->
emqx_bridge_lib:upgrade_type(Type).

View File

@ -456,10 +456,13 @@ schema("/bridges_probe") ->
}
}.
'/bridges'(post, #{body := #{<<"type">> := BridgeType, <<"name">> := BridgeName} = Conf0}) ->
'/bridges'(post, #{body := #{<<"type">> := BridgeType0, <<"name">> := BridgeName} = Conf0}) ->
BridgeType = upgrade_type(BridgeType0),
case emqx_bridge:lookup(BridgeType, BridgeName) of
{ok, _} ->
?BAD_REQUEST('ALREADY_EXISTS', <<"bridge already exists">>);
{error, not_bridge_v1_compatible} ->
?BAD_REQUEST('ALREADY_EXISTS', non_compat_bridge_msg());
{error, not_found} ->
Conf = filter_out_request_body(Conf0),
create_bridge(BridgeType, BridgeName, Conf)
@ -485,12 +488,14 @@ schema("/bridges_probe") ->
?TRY_PARSE_ID(
Id,
case emqx_bridge:lookup(BridgeType, BridgeName) of
{ok, _} ->
RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}),
{ok, #{raw_config := RawConf}} ->
%% TODO will the maybe_upgrade step done by emqx_bridge:lookup cause any problems
Conf = deobfuscate(Conf1, RawConf),
update_bridge(BridgeType, BridgeName, Conf);
{error, not_found} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName)
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, not_bridge_v1_compatible} ->
?BAD_REQUEST('ALREADY_EXISTS', non_compat_bridge_msg())
end
);
'/bridges/:id'(delete, #{bindings := #{id := Id}, query_string := Qs}) ->
@ -498,27 +503,33 @@ schema("/bridges_probe") ->
Id,
case emqx_bridge:lookup(BridgeType, BridgeName) of
{ok, _} ->
AlsoDeleteActs =
AlsoDelete =
case maps:get(<<"also_delete_dep_actions">>, Qs, <<"false">>) of
<<"true">> -> true;
true -> true;
_ -> false
<<"true">> -> [rule_actions, connector];
true -> [rule_actions, connector];
_ -> []
end,
case emqx_bridge:check_deps_and_remove(BridgeType, BridgeName, AlsoDeleteActs) of
{ok, _} ->
case emqx_bridge:check_deps_and_remove(BridgeType, BridgeName, AlsoDelete) of
ok ->
?NO_CONTENT;
{error, {rules_deps_on_this_bridge, RuleIds}} ->
?BAD_REQUEST(
{<<"Cannot delete bridge while active rules are defined for this bridge">>,
RuleIds}
);
{error, #{
reason := rules_depending_on_this_bridge,
rule_ids := RuleIds
}} ->
RulesStr = [[" ", I] || I <- RuleIds],
Msg = bin([
"Cannot delete bridge while active rules are depending on it:", RulesStr
]),
?BAD_REQUEST(Msg);
{error, timeout} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end;
{error, not_found} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName)
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, not_bridge_v1_compatible} ->
?BAD_REQUEST('ALREADY_EXISTS', non_compat_bridge_msg())
end
).
@ -528,20 +539,26 @@ schema("/bridges_probe") ->
'/bridges/:id/metrics/reset'(put, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(
Id,
begin
ok = emqx_bridge_resource:reset_metrics(
emqx_bridge_resource:resource_id(BridgeType, BridgeName)
),
?NO_CONTENT
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
BridgeV2Type = emqx_bridge_v2:bridge_v2_type_to_connector_type(BridgeType),
ok = emqx_bridge_v2:reset_metrics(BridgeV2Type, BridgeName),
?NO_CONTENT;
false ->
ok = emqx_bridge_resource:reset_metrics(
emqx_bridge_resource:resource_id(BridgeType, BridgeName)
),
?NO_CONTENT
end
).
'/bridges_probe'(post, Request) ->
RequestMeta = #{module => ?MODULE, method => post, path => "/bridges_probe"},
case emqx_dashboard_swagger:filter_check_request_and_translate_body(Request, RequestMeta) of
{ok, #{body := #{<<"type">> := ConnType} = Params}} ->
{ok, #{body := #{<<"type">> := BridgeType} = Params}} ->
Params1 = maybe_deobfuscate_bridge_probe(Params),
case emqx_bridge_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1)) of
Params2 = maps:remove(<<"type">>, Params1),
case emqx_bridge_resource:create_dry_run(BridgeType, Params2) of
ok ->
?NO_CONTENT;
{error, #{kind := validation_error} = Reason0} ->
@ -560,10 +577,12 @@ schema("/bridges_probe") ->
redact(BadRequest)
end.
maybe_deobfuscate_bridge_probe(#{<<"type">> := BridgeType, <<"name">> := BridgeName} = Params) ->
maybe_deobfuscate_bridge_probe(#{<<"type">> := BridgeType0, <<"name">> := BridgeName} = Params) ->
BridgeType = upgrade_type(BridgeType0),
case emqx_bridge:lookup(BridgeType, BridgeName) of
{ok, _} ->
RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}),
{ok, #{raw_config := RawConf}} ->
%% TODO check if RawConf optained above is compatible with the commented out code below
%% RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}),
deobfuscate(Params, RawConf);
_ ->
%% A bridge may be probed before it's created, so not finding it here is fine
@ -589,6 +608,8 @@ lookup_from_all_nodes(BridgeType, BridgeName, SuccCode) ->
{SuccCode, format_bridge_info([R || {ok, R} <- Results])};
{ok, [{error, not_found} | _]} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{ok, [{error, not_bridge_v1_compatible} | _]} ->
?NOT_FOUND(non_compat_bridge_msg());
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end.
@ -603,9 +624,20 @@ create_bridge(BridgeType, BridgeName, Conf) ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 201).
update_bridge(BridgeType, BridgeName, Conf) ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 200).
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
case emqx_bridge_v2:is_valid_bridge_v1(BridgeType, BridgeName) of
true ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 200);
false ->
?NOT_FOUND(non_compat_bridge_msg())
end;
false ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 200)
end.
create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) ->
create_or_update_bridge(BridgeType0, BridgeName, Conf, HttpStatusCode) ->
BridgeType = upgrade_type(BridgeType0),
case emqx_bridge:create(BridgeType, BridgeName, Conf) of
{ok, _} ->
lookup_from_all_nodes(BridgeType, BridgeName, HttpStatusCode);
@ -615,7 +647,8 @@ create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) ->
?BAD_REQUEST(map_to_json(redact(Reason)))
end.
get_metrics_from_local_node(BridgeType, BridgeName) ->
get_metrics_from_local_node(BridgeType0, BridgeName) ->
BridgeType = upgrade_type(BridgeType0),
format_metrics(emqx_bridge:get_metrics(BridgeType, BridgeName)).
'/bridges/:id/enable/:enable'(put, #{bindings := #{id := Id, enable := Enable}}) ->
@ -650,7 +683,7 @@ get_metrics_from_local_node(BridgeType, BridgeName) ->
invalid ->
?NOT_FOUND(<<"Invalid operation: ", Op/binary>>);
OperFunc ->
try is_enabled_bridge(BridgeType, BridgeName) of
try is_bridge_enabled(BridgeType, BridgeName) of
false ->
?BRIDGE_NOT_ENABLED;
true ->
@ -673,7 +706,7 @@ get_metrics_from_local_node(BridgeType, BridgeName) ->
invalid ->
?NOT_FOUND(<<"Invalid operation: ", Op/binary>>);
OperFunc ->
try is_enabled_bridge(BridgeType, BridgeName) of
try is_bridge_enabled(BridgeType, BridgeName) of
false ->
?BRIDGE_NOT_ENABLED;
true ->
@ -692,7 +725,14 @@ get_metrics_from_local_node(BridgeType, BridgeName) ->
end
).
is_enabled_bridge(BridgeType, BridgeName) ->
is_bridge_enabled(BridgeType, BridgeName) ->
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true -> is_bridge_enabled_v2(BridgeType, BridgeName);
false -> is_bridge_enabled_v1(BridgeType, BridgeName)
end.
is_bridge_enabled_v1(BridgeType, BridgeName) ->
%% we read from the transalted config because the defaults are populated here.
try emqx:get_config([bridges, BridgeType, binary_to_existing_atom(BridgeName)]) of
ConfMap ->
maps:get(enable, ConfMap, false)
@ -705,6 +745,20 @@ is_enabled_bridge(BridgeType, BridgeName) ->
throw(not_found)
end.
is_bridge_enabled_v2(BridgeV1Type, BridgeName) ->
BridgeV2Type = emqx_bridge_v2:bridge_v1_type_to_bridge_v2_type(BridgeV1Type),
try emqx:get_config([bridges_v2, BridgeV2Type, binary_to_existing_atom(BridgeName)]) of
ConfMap ->
maps:get(enable, ConfMap, true)
catch
error:{config_not_found, _} ->
throw(not_found);
error:badarg ->
%% catch non-existing atom,
%% none-existing atom means it is not available in config PT storage.
throw(not_found)
end.
node_operation_func(<<"restart">>) -> restart_bridge_to_node;
node_operation_func(<<"start">>) -> start_bridge_to_node;
node_operation_func(<<"stop">>) -> stop_bridge_to_node;
@ -837,11 +891,18 @@ format_resource(
},
Node
) ->
RawConfFull = fill_defaults(Type, RawConf),
RawConfFull =
case emqx_bridge_v2:is_bridge_v2_type(Type) of
true ->
%% The defaults are already filled in
RawConf;
false ->
fill_defaults(Type, RawConf)
end,
redact(
maps:merge(
RawConfFull#{
type => Type,
type => downgrade_type(Type),
name => maps:get(<<"name">>, RawConf, BridgeName),
node => Node
},
@ -1048,10 +1109,10 @@ maybe_unwrap({error, not_implemented}) ->
maybe_unwrap(RpcMulticallResult) ->
emqx_rpc:unwrap_erpc(RpcMulticallResult).
supported_versions(start_bridge_to_node) -> [2, 3, 4];
supported_versions(start_bridges_to_all_nodes) -> [2, 3, 4];
supported_versions(get_metrics_from_all_nodes) -> [4];
supported_versions(_Call) -> [1, 2, 3, 4].
supported_versions(start_bridge_to_node) -> [2, 3, 4, 5];
supported_versions(start_bridges_to_all_nodes) -> [2, 3, 4, 5];
supported_versions(get_metrics_from_all_nodes) -> [4, 5];
supported_versions(_Call) -> [1, 2, 3, 4, 5].
redact(Term) ->
emqx_utils:redact(Term).
@ -1089,3 +1150,12 @@ map_to_json(M0) ->
M2 = maps:without([value, <<"value">>], M1),
emqx_utils_json:encode(M2)
end.
non_compat_bridge_msg() ->
<<"bridge already exists as non Bridge V1 compatible Bridge V2 bridge">>.
upgrade_type(Type) ->
emqx_bridge_lib:upgrade_type(Type).
downgrade_type(Type) ->
emqx_bridge_lib:downgrade_type(Type).

View File

@ -18,7 +18,6 @@
-behaviour(application).
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-export([start/2, stop/1]).
-export([
@ -33,6 +32,7 @@ start(_StartType, _StartArgs) ->
{ok, Sup} = emqx_bridge_sup:start_link(),
ok = ensure_enterprise_schema_loaded(),
ok = emqx_bridge:load(),
ok = emqx_bridge_v2:load(),
ok = emqx_bridge:load_hook(),
ok = emqx_config_handler:add_handler(?LEAF_NODE_HDLR_PATH, ?MODULE),
ok = emqx_config_handler:add_handler(?TOP_LELVE_HDLR_PATH, emqx_bridge),
@ -43,6 +43,7 @@ stop(_State) ->
emqx_conf:remove_handler(?LEAF_NODE_HDLR_PATH),
emqx_conf:remove_handler(?TOP_LELVE_HDLR_PATH),
ok = emqx_bridge:unload(),
ok = emqx_bridge_v2:unload(),
ok.
-if(?EMQX_RELEASE_EDITION == ee).
@ -56,7 +57,7 @@ ensure_enterprise_schema_loaded() ->
%% NOTE: We depends on the `emqx_bridge:pre_config_update/3` to restart/stop the
%% underlying resources.
pre_config_update(_, {_Oper, _, _}, undefined) ->
pre_config_update(_, {_Oper, _Type, _Name}, undefined) ->
{error, bridge_not_found};
pre_config_update(_, {Oper, _Type, _Name}, OldConfig) ->
%% to save the 'enable' to the config files

View File

@ -0,0 +1,89 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_lib).
-export([
maybe_withdraw_rule_action/3,
upgrade_type/1,
downgrade_type/1
]).
%% @doc A bridge can be used as a rule action.
%% The bridge-ID in rule-engine's world is the action-ID.
%% This function is to remove a bridge (action) from all rules
%% using it if the `rule_actions' is included in `DeleteDeps' list
maybe_withdraw_rule_action(BridgeType, BridgeName, DeleteDeps) ->
BridgeIds = external_ids(BridgeType, BridgeName),
DeleteActions = lists:member(rule_actions, DeleteDeps),
maybe_withdraw_rule_action_loop(BridgeIds, DeleteActions).
maybe_withdraw_rule_action_loop([], _DeleteActions) ->
ok;
maybe_withdraw_rule_action_loop([BridgeId | More], DeleteActions) ->
case emqx_rule_engine:get_rule_ids_by_action(BridgeId) of
[] ->
maybe_withdraw_rule_action_loop(More, DeleteActions);
RuleIds when DeleteActions ->
lists:foreach(
fun(R) ->
emqx_rule_engine:ensure_action_removed(R, BridgeId)
end,
RuleIds
),
maybe_withdraw_rule_action_loop(More, DeleteActions);
RuleIds ->
{error, #{
reason => rules_depending_on_this_bridge,
bridge_id => BridgeId,
rule_ids => RuleIds
}}
end.
%% @doc Kafka producer bridge renamed from 'kafka' to 'kafka_bridge' since 5.3.1.
upgrade_type(kafka) ->
kafka_producer;
upgrade_type(<<"kafka">>) ->
<<"kafka_producer">>;
upgrade_type(Other) ->
Other.
%% @doc Kafka producer bridge type renamed from 'kafka' to 'kafka_bridge' since 5.3.1
downgrade_type(kafka_producer) ->
kafka;
downgrade_type(<<"kafka_producer">>) ->
<<"kafka">>;
downgrade_type(Other) ->
Other.
%% A rule might be referencing an old version bridge type name
%% i.e. 'kafka' instead of 'kafka_producer' so we need to try both
external_ids(Type, Name) ->
case downgrade_type(Type) of
Type ->
[external_id(Type, Name)];
Type0 ->
[external_id(Type0, Name), external_id(Type, Name)]
end.
%% Creates the external id for the bridge_v2 that is used by the rule actions
%% to refer to the bridge_v2
external_id(BridgeType, BridgeName) ->
Name = bin(BridgeName),
Type = bin(BridgeType),
<<Type/binary, ":", Name/binary>>.
bin(Bin) when is_binary(Bin) -> Bin;
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).

View File

@ -80,7 +80,17 @@ bridge_impl_module(_BridgeType) -> undefined.
-endif.
resource_id(BridgeId) when is_binary(BridgeId) ->
<<"bridge:", BridgeId/binary>>.
case binary:split(BridgeId, <<":">>) of
[Type, _Name] ->
case emqx_bridge_v2:is_bridge_v2_type(Type) of
true ->
emqx_bridge_v2:bridge_v1_id_to_connector_resource_id(BridgeId);
false ->
<<"bridge:", BridgeId/binary>>
end;
_ ->
invalid_data(<<"should be of pattern {type}:{name}, but got ", BridgeId/binary>>)
end.
resource_id(BridgeType, BridgeName) ->
BridgeId = bridge_id(BridgeType, BridgeName),
@ -100,6 +110,8 @@ parse_bridge_id(BridgeId, Opts) ->
case string:split(bin(BridgeId), ":", all) of
[Type, Name] ->
{to_type_atom(Type), validate_name(Name, Opts)};
[Bridge, Type, Name] when Bridge =:= <<"bridge">>; Bridge =:= "bridge" ->
{to_type_atom(Type), validate_name(Name, Opts)};
_ ->
invalid_data(
<<"should be of pattern {type}:{name}, but got ", BridgeId/binary>>
@ -145,6 +157,9 @@ is_id_char($-) -> true;
is_id_char($.) -> true;
is_id_char(_) -> false.
to_type_atom(<<"kafka">>) ->
%% backward compatible
kafka_producer;
to_type_atom(Type) ->
try
erlang:binary_to_existing_atom(Type, utf8)
@ -154,16 +169,44 @@ to_type_atom(Type) ->
end.
reset_metrics(ResourceId) ->
emqx_resource:reset_metrics(ResourceId).
%% TODO we should not create atoms here
{Type, Name} = parse_bridge_id(ResourceId),
case emqx_bridge_v2:is_bridge_v2_type(Type) of
false ->
emqx_resource:reset_metrics(ResourceId);
true ->
case emqx_bridge_v2:is_valid_bridge_v1(Type, Name) of
true ->
BridgeV2Type = emqx_bridge_v2:bridge_v2_type_to_connector_type(Type),
emqx_bridge_v2:reset_metrics(BridgeV2Type, Name);
false ->
{error, not_bridge_v1_compatible}
end
end.
restart(Type, Name) ->
emqx_resource:restart(resource_id(Type, Name)).
case emqx_bridge_v2:is_bridge_v2_type(Type) of
false ->
emqx_resource:restart(resource_id(Type, Name));
true ->
emqx_bridge_v2:bridge_v1_restart(Type, Name)
end.
stop(Type, Name) ->
emqx_resource:stop(resource_id(Type, Name)).
case emqx_bridge_v2:is_bridge_v2_type(Type) of
false ->
emqx_resource:stop(resource_id(Type, Name));
true ->
emqx_bridge_v2:bridge_v1_stop(Type, Name)
end.
start(Type, Name) ->
emqx_resource:start(resource_id(Type, Name)).
case emqx_bridge_v2:is_bridge_v2_type(Type) of
false ->
emqx_resource:start(resource_id(Type, Name));
true ->
emqx_bridge_v2:bridge_v1_start(Type, Name)
end.
create(BridgeId, Conf) ->
{BridgeType, BridgeName} = parse_bridge_id(BridgeId),
@ -257,7 +300,16 @@ recreate(Type, Name, Conf0, Opts) ->
parse_opts(Conf, Opts)
).
create_dry_run(Type, Conf0) ->
create_dry_run(Type0, Conf0) ->
Type = emqx_bridge_lib:upgrade_type(Type0),
case emqx_bridge_v2:is_bridge_v2_type(Type) of
false ->
create_dry_run_bridge_v1(Type, Conf0);
true ->
emqx_bridge_v2:bridge_v1_create_dry_run(Type, Conf0)
end.
create_dry_run_bridge_v1(Type, Conf0) ->
TmpName = iolist_to_binary([?TEST_ID_PREFIX, emqx_utils:gen_id(8)]),
TmpPath = emqx_utils:safe_filename(TmpName),
%% Already typechecked, no need to catch errors
@ -297,6 +349,7 @@ remove(Type, Name) ->
%% just for perform_bridge_changes/1
remove(Type, Name, _Conf, _Opts) ->
%% TODO we need to handle bridge_v2 here
?SLOG(info, #{msg => "remove_bridge", type => Type, name => Name}),
emqx_resource:remove_local(resource_id(Type, Name)).

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,760 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_api).
-behaviour(minirest_api).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("emqx_utils/include/emqx_utils_api.hrl").
-import(hoconsc, [mk/2, array/1, enum/1]).
-import(emqx_utils, [redact/1]).
%% Swagger specs from hocon schema
-export([
api_spec/0,
paths/0,
schema/1,
namespace/0
]).
%% API callbacks
-export([
'/bridges_v2'/2,
'/bridges_v2/:id'/2,
'/bridges_v2/:id/enable/:enable'/2,
'/bridges_v2/:id/:operation'/2,
'/nodes/:node/bridges_v2/:id/:operation'/2,
'/bridges_v2_probe'/2
]).
%% BpAPI
-export([lookup_from_local_node/2]).
-define(BRIDGE_NOT_FOUND(BRIDGE_TYPE, BRIDGE_NAME),
?NOT_FOUND(
<<"Bridge lookup failed: bridge named '", (bin(BRIDGE_NAME))/binary, "' of type ",
(bin(BRIDGE_TYPE))/binary, " does not exist.">>
)
).
-define(BRIDGE_NOT_ENABLED,
?BAD_REQUEST(<<"Forbidden operation, bridge not enabled">>)
).
-define(TRY_PARSE_ID(ID, EXPR),
try emqx_bridge_resource:parse_bridge_id(Id, #{atom_name => false}) of
{BridgeType, BridgeName} ->
EXPR
catch
throw:#{reason := Reason} ->
?NOT_FOUND(<<"Invalid bridge ID, ", Reason/binary>>)
end
).
namespace() -> "bridge_v2".
api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
paths() ->
[
"/bridges_v2",
"/bridges_v2/:id",
"/bridges_v2/:id/enable/:enable",
"/bridges_v2/:id/:operation",
"/nodes/:node/bridges_v2/:id/:operation",
"/bridges_v2_probe"
].
error_schema(Code, Message) when is_atom(Code) ->
error_schema([Code], Message);
error_schema(Codes, Message) when is_list(Message) ->
error_schema(Codes, list_to_binary(Message));
error_schema(Codes, Message) when is_list(Codes) andalso is_binary(Message) ->
emqx_dashboard_swagger:error_codes(Codes, Message).
get_response_body_schema() ->
emqx_dashboard_swagger:schema_with_examples(
emqx_bridge_v2_schema:get_response(),
bridge_info_examples(get)
).
bridge_info_examples(Method) ->
maps:merge(
#{},
emqx_enterprise_bridge_examples(Method)
).
bridge_info_array_example(Method) ->
lists:map(fun(#{value := Config}) -> Config end, maps:values(bridge_info_examples(Method))).
-if(?EMQX_RELEASE_EDITION == ee).
emqx_enterprise_bridge_examples(Method) ->
emqx_bridge_v2_enterprise:examples(Method).
-else.
emqx_enterprise_bridge_examples(_Method) -> #{}.
-endif.
param_path_id() ->
{id,
mk(
binary(),
#{
in => path,
required => true,
example => <<"webhook:webhook_example">>,
desc => ?DESC("desc_param_path_id")
}
)}.
param_path_operation_cluster() ->
{operation,
mk(
enum([start]),
#{
in => path,
required => true,
example => <<"start">>,
desc => ?DESC("desc_param_path_operation_cluster")
}
)}.
param_path_operation_on_node() ->
{operation,
mk(
enum([start]),
#{
in => path,
required => true,
example => <<"start">>,
desc => ?DESC("desc_param_path_operation_on_node")
}
)}.
param_path_node() ->
{node,
mk(
binary(),
#{
in => path,
required => true,
example => <<"emqx@127.0.0.1">>,
desc => ?DESC("desc_param_path_node")
}
)}.
param_path_enable() ->
{enable,
mk(
boolean(),
#{
in => path,
required => true,
desc => ?DESC("desc_param_path_enable"),
example => true
}
)}.
schema("/bridges_v2") ->
#{
'operationId' => '/bridges_v2',
get => #{
tags => [<<"bridges_v2">>],
summary => <<"List bridges">>,
description => ?DESC("desc_api1"),
responses => #{
200 => emqx_dashboard_swagger:schema_with_example(
array(emqx_bridge_v2_schema:get_response()),
bridge_info_array_example(get)
)
}
},
post => #{
tags => [<<"bridges_v2">>],
summary => <<"Create bridge">>,
description => ?DESC("desc_api2"),
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_bridge_v2_schema:post_request(),
bridge_info_examples(post)
),
responses => #{
201 => get_response_body_schema(),
400 => error_schema('ALREADY_EXISTS', "Bridge already exists")
}
}
};
schema("/bridges_v2/:id") ->
#{
'operationId' => '/bridges_v2/:id',
get => #{
tags => [<<"bridges_v2">>],
summary => <<"Get bridge">>,
description => ?DESC("desc_api3"),
parameters => [param_path_id()],
responses => #{
200 => get_response_body_schema(),
404 => error_schema('NOT_FOUND', "Bridge not found")
}
},
put => #{
tags => [<<"bridges_v2">>],
summary => <<"Update bridge">>,
description => ?DESC("desc_api4"),
parameters => [param_path_id()],
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_bridge_v2_schema:put_request(),
bridge_info_examples(put)
),
responses => #{
200 => get_response_body_schema(),
404 => error_schema('NOT_FOUND', "Bridge not found"),
400 => error_schema('BAD_REQUEST', "Update bridge failed")
}
},
delete => #{
tags => [<<"bridges_v2">>],
summary => <<"Delete bridge">>,
description => ?DESC("desc_api5"),
parameters => [param_path_id()],
responses => #{
204 => <<"Bridge deleted">>,
400 => error_schema(
'BAD_REQUEST',
"Cannot delete bridge while active rules are defined for this bridge"
),
404 => error_schema('NOT_FOUND', "Bridge not found"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/bridges_v2/:id/enable/:enable") ->
#{
'operationId' => '/bridges_v2/:id/enable/:enable',
put =>
#{
tags => [<<"bridges_v2">>],
summary => <<"Enable or disable bridge">>,
desc => ?DESC("desc_enable_bridge"),
parameters => [param_path_id(), param_path_enable()],
responses =>
#{
204 => <<"Success">>,
404 => error_schema(
'NOT_FOUND', "Bridge not found or invalid operation"
),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/bridges_v2/:id/:operation") ->
#{
'operationId' => '/bridges_v2/:id/:operation',
post => #{
tags => [<<"bridges_v2">>],
summary => <<"Manually start a bridge">>,
description => ?DESC("desc_api7"),
parameters => [
param_path_id(),
param_path_operation_cluster()
],
responses => #{
204 => <<"Operation success">>,
400 => error_schema(
'BAD_REQUEST', "Problem with configuration of external service"
),
404 => error_schema('NOT_FOUND', "Bridge not found or invalid operation"),
501 => error_schema('NOT_IMPLEMENTED', "Not Implemented"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/nodes/:node/bridges_v2/:id/:operation") ->
#{
'operationId' => '/nodes/:node/bridges_v2/:id/:operation',
post => #{
tags => [<<"bridges_v2">>],
summary => <<"Manually start a bridge">>,
description => ?DESC("desc_api8"),
parameters => [
param_path_node(),
param_path_id(),
param_path_operation_on_node()
],
responses => #{
204 => <<"Operation success">>,
400 => error_schema(
'BAD_REQUEST',
"Problem with configuration of external service or bridge not enabled"
),
404 => error_schema(
'NOT_FOUND', "Bridge or node not found or invalid operation"
),
501 => error_schema('NOT_IMPLEMENTED', "Not Implemented"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/bridges_v2_probe") ->
#{
'operationId' => '/bridges_v2_probe',
post => #{
tags => [<<"bridges_v2">>],
desc => ?DESC("desc_api9"),
summary => <<"Test creating bridge">>,
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_bridge_v2_schema:post_request(),
bridge_info_examples(post)
),
responses => #{
204 => <<"Test bridge OK">>,
400 => error_schema(['TEST_FAILED'], "bridge test failed")
}
}
}.
'/bridges_v2'(post, #{body := #{<<"type">> := BridgeType, <<"name">> := BridgeName} = Conf0}) ->
case emqx_bridge_v2:lookup(BridgeType, BridgeName) of
{ok, _} ->
?BAD_REQUEST('ALREADY_EXISTS', <<"bridge already exists">>);
{error, not_found} ->
Conf = filter_out_request_body(Conf0),
create_bridge(BridgeType, BridgeName, Conf)
end;
'/bridges_v2'(get, _Params) ->
Nodes = mria:running_nodes(),
NodeReplies = emqx_bridge_proto_v5:v2_list_bridges_on_nodes(Nodes),
case is_ok(NodeReplies) of
{ok, NodeBridges} ->
AllBridges = [
[format_resource(Data, Node) || Data <- Bridges]
|| {Node, Bridges} <- lists:zip(Nodes, NodeBridges)
],
?OK(zip_bridges(AllBridges));
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end.
'/bridges_v2/:id'(get, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(Id, lookup_from_all_nodes(BridgeType, BridgeName, 200));
'/bridges_v2/:id'(put, #{bindings := #{id := Id}, body := Conf0}) ->
Conf1 = filter_out_request_body(Conf0),
?TRY_PARSE_ID(
Id,
case emqx_bridge_v2:lookup(BridgeType, BridgeName) of
{ok, _} ->
RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}),
Conf = deobfuscate(Conf1, RawConf),
update_bridge(BridgeType, BridgeName, Conf);
{error, not_found} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName)
end
);
'/bridges_v2/:id'(delete, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(
Id,
case emqx_bridge_v2:lookup(BridgeType, BridgeName) of
{ok, _} ->
case emqx_bridge_v2:remove(BridgeType, BridgeName) of
ok ->
?NO_CONTENT;
{error, {active_channels, Channels}} ->
?BAD_REQUEST(
{<<"Cannot delete bridge while there are active channels defined for this bridge">>,
Channels}
);
{error, timeout} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end;
{error, not_found} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName)
end
).
'/bridges_v2/:id/enable/:enable'(put, #{bindings := #{id := Id, enable := Enable}}) ->
?TRY_PARSE_ID(
Id,
case emqx_bridge_v2:disable_enable(enable_func(Enable), BridgeType, BridgeName) of
{ok, _} ->
?NO_CONTENT;
{error, {pre_config_update, _, not_found}} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, {_, _, timeout}} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, timeout} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end
).
'/bridges_v2/:id/:operation'(post, #{
bindings :=
#{id := Id, operation := Op}
}) ->
?TRY_PARSE_ID(
Id,
begin
OperFunc = operation_func(all, Op),
Nodes = mria:running_nodes(),
call_operation_if_enabled(all, OperFunc, [Nodes, BridgeType, BridgeName])
end
).
'/nodes/:node/bridges_v2/:id/:operation'(post, #{
bindings :=
#{id := Id, operation := Op, node := Node}
}) ->
?TRY_PARSE_ID(
Id,
case emqx_utils:safe_to_existing_atom(Node, utf8) of
{ok, TargetNode} ->
OperFunc = operation_func(TargetNode, Op),
call_operation_if_enabled(TargetNode, OperFunc, [TargetNode, BridgeType, BridgeName]);
{error, _} ->
?NOT_FOUND(<<"Invalid node name: ", Node/binary>>)
end
).
'/bridges_v2_probe'(post, Request) ->
RequestMeta = #{module => ?MODULE, method => post, path => "/bridges_v2_probe"},
case emqx_dashboard_swagger:filter_check_request_and_translate_body(Request, RequestMeta) of
{ok, #{body := #{<<"type">> := ConnType} = Params}} ->
Params1 = maybe_deobfuscate_bridge_probe(Params),
Params2 = maps:remove(<<"type">>, Params1),
case emqx_bridge_v2:create_dry_run(ConnType, Params2) of
ok ->
?NO_CONTENT;
{error, #{kind := validation_error} = Reason0} ->
Reason = redact(Reason0),
?BAD_REQUEST('TEST_FAILED', map_to_json(Reason));
{error, Reason0} when not is_tuple(Reason0); element(1, Reason0) =/= 'exit' ->
Reason1 =
case Reason0 of
{unhealthy_target, Message} -> Message;
_ -> Reason0
end,
Reason = redact(Reason1),
?BAD_REQUEST('TEST_FAILED', Reason)
end;
BadRequest ->
redact(BadRequest)
end.
maybe_deobfuscate_bridge_probe(#{<<"type">> := BridgeType, <<"name">> := BridgeName} = Params) ->
case emqx_bridge:lookup(BridgeType, BridgeName) of
{ok, #{raw_config := RawConf}} ->
%% TODO check if RawConf optained above is compatible with the commented out code below
%% RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}),
deobfuscate(Params, RawConf);
_ ->
%% A bridge may be probed before it's created, so not finding it here is fine
Params
end;
maybe_deobfuscate_bridge_probe(Params) ->
Params.
%%% API helpers
is_ok(ok) ->
ok;
is_ok(OkResult = {ok, _}) ->
OkResult;
is_ok(Error = {error, _}) ->
Error;
is_ok(ResL) ->
case
lists:filter(
fun
({ok, _}) -> false;
(ok) -> false;
(_) -> true
end,
ResL
)
of
[] -> {ok, [Res || {ok, Res} <- ResL]};
ErrL -> hd(ErrL)
end.
deobfuscate(NewConf, OldConf) ->
maps:fold(
fun(K, V, Acc) ->
case maps:find(K, OldConf) of
error ->
Acc#{K => V};
{ok, OldV} when is_map(V), is_map(OldV) ->
Acc#{K => deobfuscate(V, OldV)};
{ok, OldV} ->
case emqx_utils:is_redacted(K, V) of
true ->
Acc#{K => OldV};
_ ->
Acc#{K => V}
end
end
end,
#{},
NewConf
).
%% bridge helpers
lookup_from_all_nodes(BridgeType, BridgeName, SuccCode) ->
Nodes = mria:running_nodes(),
case is_ok(emqx_bridge_proto_v5:v2_lookup_from_all_nodes(Nodes, BridgeType, BridgeName)) of
{ok, [{ok, _} | _] = Results} ->
{SuccCode, format_bridge_info([R || {ok, R} <- Results])};
{ok, [{error, not_found} | _]} ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end.
operation_func(all, start) -> v2_start_bridge_to_all_nodes;
operation_func(_Node, start) -> v2_start_bridge_to_node.
call_operation_if_enabled(NodeOrAll, OperFunc, [Nodes, BridgeType, BridgeName]) ->
try is_enabled_bridge(BridgeType, BridgeName) of
false ->
?BRIDGE_NOT_ENABLED;
true ->
call_operation(NodeOrAll, OperFunc, [Nodes, BridgeType, BridgeName])
catch
throw:not_found ->
?BRIDGE_NOT_FOUND(BridgeType, BridgeName)
end.
is_enabled_bridge(BridgeType, BridgeName) ->
try emqx_bridge_v2:lookup(BridgeType, binary_to_existing_atom(BridgeName)) of
{ok, #{raw_config := ConfMap}} ->
maps:get(<<"enable">>, ConfMap, false);
{error, not_found} ->
throw(not_found)
catch
error:badarg ->
%% catch non-existing atom,
%% none-existing atom means it is not available in config PT storage.
throw(not_found)
end.
call_operation(NodeOrAll, OperFunc, Args = [_Nodes, BridgeType, BridgeName]) ->
case is_ok(do_bpapi_call(NodeOrAll, OperFunc, Args)) of
Ok when Ok =:= ok; is_tuple(Ok), element(1, Ok) =:= ok ->
?NO_CONTENT;
{error, not_implemented} ->
?NOT_IMPLEMENTED;
{error, timeout} ->
?BAD_REQUEST(<<"Request timeout">>);
{error, {start_pool_failed, Name, Reason}} ->
Msg = bin(
io_lib:format("Failed to start ~p pool for reason ~p", [Name, redact(Reason)])
),
?BAD_REQUEST(Msg);
{error, not_found} ->
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
?SLOG(warning, #{
msg => "bridge_inconsistent_in_cluster_for_call_operation",
reason => not_found,
type => BridgeType,
name => BridgeName,
bridge => BridgeId
}),
?SERVICE_UNAVAILABLE(<<"Bridge not found on remote node: ", BridgeId/binary>>);
{error, {node_not_found, Node}} ->
?NOT_FOUND(<<"Node not found: ", (atom_to_binary(Node))/binary>>);
{error, {unhealthy_target, Message}} ->
?BAD_REQUEST(Message);
{error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' ->
?BAD_REQUEST(redact(Reason))
end.
do_bpapi_call(all, Call, Args) ->
maybe_unwrap(
do_bpapi_call_vsn(emqx_bpapi:supported_version(emqx_bridge), Call, Args)
);
do_bpapi_call(Node, Call, Args) ->
case lists:member(Node, mria:running_nodes()) of
true ->
do_bpapi_call_vsn(emqx_bpapi:supported_version(Node, emqx_bridge), Call, Args);
false ->
{error, {node_not_found, Node}}
end.
do_bpapi_call_vsn(Version, Call, Args) ->
case is_supported_version(Version, Call) of
true ->
apply(emqx_bridge_proto_v5, Call, Args);
false ->
{error, not_implemented}
end.
is_supported_version(Version, Call) ->
lists:member(Version, supported_versions(Call)).
supported_versions(_Call) -> [5].
maybe_unwrap({error, not_implemented}) ->
{error, not_implemented};
maybe_unwrap(RpcMulticallResult) ->
emqx_rpc:unwrap_erpc(RpcMulticallResult).
zip_bridges([BridgesFirstNode | _] = BridgesAllNodes) ->
lists:foldl(
fun(#{type := Type, name := Name}, Acc) ->
Bridges = pick_bridges_by_id(Type, Name, BridgesAllNodes),
[format_bridge_info(Bridges) | Acc]
end,
[],
BridgesFirstNode
).
pick_bridges_by_id(Type, Name, BridgesAllNodes) ->
lists:foldl(
fun(BridgesOneNode, Acc) ->
case
[
Bridge
|| Bridge = #{type := Type0, name := Name0} <- BridgesOneNode,
Type0 == Type,
Name0 == Name
]
of
[BridgeInfo] ->
[BridgeInfo | Acc];
[] ->
?SLOG(warning, #{
msg => "bridge_inconsistent_in_cluster",
reason => not_found,
type => Type,
name => Name,
bridge => emqx_bridge_resource:bridge_id(Type, Name)
}),
Acc
end
end,
[],
BridgesAllNodes
).
format_bridge_info([FirstBridge | _] = Bridges) ->
Res = maps:remove(node, FirstBridge),
NodeStatus = node_status(Bridges),
redact(Res#{
status => aggregate_status(NodeStatus),
node_status => NodeStatus
}).
node_status(Bridges) ->
[maps:with([node, status, status_reason], B) || B <- Bridges].
aggregate_status(AllStatus) ->
Head = fun([A | _]) -> A end,
HeadVal = maps:get(status, Head(AllStatus), connecting),
AllRes = lists:all(fun(#{status := Val}) -> Val == HeadVal end, AllStatus),
case AllRes of
true -> HeadVal;
false -> inconsistent
end.
lookup_from_local_node(BridgeType, BridgeName) ->
case emqx_bridge_v2:lookup(BridgeType, BridgeName) of
{ok, Res} -> {ok, format_resource(Res, node())};
Error -> Error
end.
%% resource
format_resource(
#{
type := Type,
name := Name,
raw_config := RawConf,
resource_data := ResourceData
},
Node
) ->
redact(
maps:merge(
RawConf#{
type => Type,
name => maps:get(<<"name">>, RawConf, Name),
node => Node
},
format_resource_data(ResourceData)
)
).
format_resource_data(ResData) ->
maps:fold(fun format_resource_data/3, #{}, maps:with([status, error], ResData)).
format_resource_data(error, undefined, Result) ->
Result;
format_resource_data(error, Error, Result) ->
Result#{status_reason => emqx_utils:readable_error_msg(Error)};
format_resource_data(K, V, Result) ->
Result#{K => V}.
create_bridge(BridgeType, BridgeName, Conf) ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 201).
update_bridge(BridgeType, BridgeName, Conf) ->
create_or_update_bridge(BridgeType, BridgeName, Conf, 200).
create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) ->
case emqx_bridge_v2:create(BridgeType, BridgeName, Conf) of
{ok, _} ->
lookup_from_all_nodes(BridgeType, BridgeName, HttpStatusCode);
{error, Reason} when is_map(Reason) ->
?BAD_REQUEST(map_to_json(redact(Reason)))
end.
enable_func(true) -> enable;
enable_func(false) -> disable.
filter_out_request_body(Conf) ->
ExtraConfs = [
<<"id">>,
<<"type">>,
<<"name">>,
<<"status">>,
<<"status_reason">>,
<<"node_status">>,
<<"node">>
],
maps:without(ExtraConfs, Conf).
%% general helpers
bin(S) when is_list(S) ->
list_to_binary(S);
bin(S) when is_atom(S) ->
atom_to_binary(S, utf8);
bin(S) when is_binary(S) ->
S.
map_to_json(M0) ->
%% When dealing with Hocon validation errors, `value' might contain non-serializable
%% values (e.g.: user_lookup_fun), so we try again without that key if serialization
%% fails as a best effort.
M1 = emqx_utils_maps:jsonable_map(M0, fun(K, V) -> {K, emqx_utils_maps:binary_string(V)} end),
try
emqx_utils_json:encode(M1)
catch
error:_ ->
M2 = maps:without([value, <<"value">>], M1),
emqx_utils_json:encode(M2)
end.

View File

@ -0,0 +1,179 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_proto_v5).
-behaviour(emqx_bpapi).
-export([
introduced_in/0,
list_bridges_on_nodes/1,
restart_bridge_to_node/3,
start_bridge_to_node/3,
stop_bridge_to_node/3,
lookup_from_all_nodes/3,
get_metrics_from_all_nodes/3,
restart_bridges_to_all_nodes/3,
start_bridges_to_all_nodes/3,
stop_bridges_to_all_nodes/3,
v2_start_bridge_to_node/3,
v2_start_bridge_to_all_nodes/3,
v2_list_bridges_on_nodes/1,
v2_lookup_from_all_nodes/3
]).
-include_lib("emqx/include/bpapi.hrl").
-define(TIMEOUT, 15000).
introduced_in() ->
"5.3.1".
-spec list_bridges_on_nodes([node()]) ->
emqx_rpc:erpc_multicall([emqx_resource:resource_data()]).
list_bridges_on_nodes(Nodes) ->
erpc:multicall(Nodes, emqx_bridge, list, [], ?TIMEOUT).
-type key() :: atom() | binary() | [byte()].
-spec restart_bridge_to_node(node(), key(), key()) ->
term().
restart_bridge_to_node(Node, BridgeType, BridgeName) ->
rpc:call(
Node,
emqx_bridge_resource,
restart,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec start_bridge_to_node(node(), key(), key()) ->
term().
start_bridge_to_node(Node, BridgeType, BridgeName) ->
rpc:call(
Node,
emqx_bridge_resource,
start,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec stop_bridge_to_node(node(), key(), key()) ->
term().
stop_bridge_to_node(Node, BridgeType, BridgeName) ->
rpc:call(
Node,
emqx_bridge_resource,
stop,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec restart_bridges_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
restart_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_resource,
restart,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec start_bridges_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
start_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_resource,
start,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec stop_bridges_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
stop_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_resource,
stop,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec lookup_from_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
lookup_from_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_api,
lookup_from_local_node,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec get_metrics_from_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall(emqx_metrics_worker:metrics()).
get_metrics_from_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_api,
get_metrics_from_local_node,
[BridgeType, BridgeName],
?TIMEOUT
).
%% V2 Calls
-spec v2_list_bridges_on_nodes([node()]) ->
emqx_rpc:erpc_multicall([emqx_resource:resource_data()]).
v2_list_bridges_on_nodes(Nodes) ->
erpc:multicall(Nodes, emqx_bridge_v2, list, [], ?TIMEOUT).
-spec v2_lookup_from_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
v2_lookup_from_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_v2_api,
lookup_from_local_node,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec v2_start_bridge_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
v2_start_bridge_to_all_nodes(Nodes, BridgeType, BridgeName) ->
erpc:multicall(
Nodes,
emqx_bridge_v2,
start,
[BridgeType, BridgeName],
?TIMEOUT
).
-spec v2_start_bridge_to_node(node(), key(), key()) ->
term().
v2_start_bridge_to_node(Node, BridgeType, BridgeName) ->
rpc:call(
Node,
emqx_bridge_v2,
start,
[BridgeType, BridgeName],
?TIMEOUT
).

View File

@ -23,8 +23,6 @@ api_schemas(Method) ->
api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub">>, Method ++ "_producer"),
api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub_consumer">>, Method ++ "_consumer"),
api_ref(emqx_bridge_kafka, <<"kafka_consumer">>, Method ++ "_consumer"),
%% TODO: rename this to `kafka_producer' after alias support is added
%% to hocon; keeping this as just `kafka' for backwards compatibility.
api_ref(emqx_bridge_kafka, <<"kafka">>, Method ++ "_producer"),
api_ref(emqx_bridge_cassandra, <<"cassandra">>, Method),
api_ref(emqx_bridge_mysql, <<"mysql">>, Method),
@ -95,11 +93,10 @@ examples(Method) ->
end,
lists:foldl(Fun, #{}, schema_modules()).
%% TODO: existing atom
resource_type(Type) when is_binary(Type) -> resource_type(binary_to_atom(Type, utf8));
resource_type(kafka_consumer) -> emqx_bridge_kafka_impl_consumer;
%% TODO: rename this to `kafka_producer' after alias support is added
%% to hocon; keeping this as just `kafka' for backwards compatibility.
resource_type(kafka) -> emqx_bridge_kafka_impl_producer;
resource_type(kafka_producer) -> emqx_bridge_kafka_impl_producer;
resource_type(cassandra) -> emqx_bridge_cassandra_connector;
resource_type(hstreamdb) -> emqx_bridge_hstreamdb_connector;
resource_type(gcp_pubsub) -> emqx_bridge_gcp_pubsub_impl_producer;
@ -235,13 +232,11 @@ mongodb_structs() ->
kafka_structs() ->
[
%% TODO: rename this to `kafka_producer' after alias support
%% is added to hocon; keeping this as just `kafka' for
%% backwards compatibility.
{kafka,
{kafka_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_kafka, kafka_producer)),
#{
aliases => [kafka],
desc => <<"Kafka Producer Bridge Config">>,
required => false,
converter => fun kafka_producer_converter/2

View File

@ -0,0 +1,68 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_enterprise).
-if(?EMQX_RELEASE_EDITION == ee).
-import(hoconsc, [mk/2, enum/1, ref/2]).
-export([
api_schemas/1,
examples/1,
fields/1
]).
examples(Method) ->
MergeFun =
fun(Example, Examples) ->
maps:merge(Examples, Example)
end,
Fun =
fun(Module, Examples) ->
ConnectorExamples = erlang:apply(Module, bridge_v2_examples, [Method]),
lists:foldl(MergeFun, Examples, ConnectorExamples)
end,
lists:foldl(Fun, #{}, schema_modules()).
schema_modules() ->
[
emqx_bridge_kafka,
emqx_bridge_azure_event_hub
].
fields(bridges_v2) ->
bridge_v2_structs().
bridge_v2_structs() ->
[
{kafka_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_kafka, kafka_producer_action)),
#{
desc => <<"Kafka Producer Bridge V2 Config">>,
required => false
}
)},
{azure_event_hub_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_azure_event_hub, bridge_v2)),
#{
desc => <<"Azure Event Hub Bridge V2 Config">>,
required => false
}
)}
].
api_schemas(Method) ->
[
api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_bridge_v2"),
api_ref(emqx_bridge_azure_event_hub, <<"azure_event_hub_producer">>, Method ++ "_bridge_v2")
].
api_ref(Module, Type, Method) ->
{Type, ref(Module, Method)}.
-else.
-endif.

View File

@ -0,0 +1,127 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_schema).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-import(hoconsc, [mk/2, ref/2]).
-export([roots/0, fields/1, desc/1, namespace/0, tags/0]).
-export([
get_response/0,
put_request/0,
post_request/0
]).
-if(?EMQX_RELEASE_EDITION == ee).
enterprise_api_schemas(Method) ->
%% We *must* do this to ensure the module is really loaded, especially when we use
%% `call_hocon' from `nodetool' to generate initial configurations.
_ = emqx_bridge_v2_enterprise:module_info(),
case erlang:function_exported(emqx_bridge_v2_enterprise, api_schemas, 1) of
true -> emqx_bridge_v2_enterprise:api_schemas(Method);
false -> []
end.
enterprise_fields_actions() ->
%% We *must* do this to ensure the module is really loaded, especially when we use
%% `call_hocon' from `nodetool' to generate initial configurations.
_ = emqx_bridge_v2_enterprise:module_info(),
case erlang:function_exported(emqx_bridge_v2_enterprise, fields, 1) of
true ->
emqx_bridge_v2_enterprise:fields(bridges_v2);
false ->
[]
end.
-else.
enterprise_api_schemas(_Method) -> [].
enterprise_fields_actions() -> [].
-endif.
%%======================================================================================
%% For HTTP APIs
get_response() ->
api_schema("get").
put_request() ->
api_schema("put").
post_request() ->
api_schema("post").
api_schema(Method) ->
EE = enterprise_api_schemas(Method),
hoconsc:union(bridge_api_union(EE)).
bridge_api_union(Refs) ->
Index = maps:from_list(Refs),
fun
(all_union_members) ->
maps:values(Index);
({value, V}) ->
case V of
#{<<"type">> := T} ->
case maps:get(T, Index, undefined) of
undefined ->
throw(#{
field_name => type,
value => T,
reason => <<"unknown bridge type">>
});
Ref ->
[Ref]
end;
_ ->
maps:values(Index)
end
end.
%%======================================================================================
%% HOCON Schema Callbacks
%%======================================================================================
namespace() -> "bridges_v2".
tags() ->
[<<"Bridge V2">>].
-dialyzer({nowarn_function, roots/0}).
roots() ->
case fields(bridges_v2) of
[] ->
[
{bridges_v2,
?HOCON(hoconsc:map(name, typerefl:map()), #{importance => ?IMPORTANCE_LOW})}
];
_ ->
[{bridges_v2, ?HOCON(?R_REF(bridges_v2), #{importance => ?IMPORTANCE_LOW})}]
end.
fields(bridges_v2) ->
[] ++ enterprise_fields_actions().
desc(bridges_v2) ->
?DESC("desc_bridges_v2");
desc(_) ->
undefined.

View File

@ -55,7 +55,7 @@ init_per_testcase(_TestCase, Config) ->
end_per_testcase(t_get_basic_usage_info_1, _Config) ->
lists:foreach(
fun({BridgeType, BridgeName}) ->
{ok, _} = emqx_bridge:remove(BridgeType, BridgeName)
ok = emqx_bridge:remove(BridgeType, BridgeName)
end,
[
{webhook, <<"basic_usage_info_webhook">>},

View File

@ -187,7 +187,7 @@ end_per_testcase(_, Config) ->
clear_resources() ->
lists:foreach(
fun(#{type := Type, name := Name}) ->
{ok, _} = emqx_bridge:remove(Type, Name)
ok = emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
).

View File

@ -249,32 +249,42 @@ create_rule_and_action_http(BridgeType, RuleTopic, Config, Opts) ->
Error
end.
make_message(Config, MakeMessageFun) ->
BridgeType = ?config(bridge_type, Config),
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
BridgeId = emqx_bridge_v2_testlib:bridge_id(Config),
{BridgeId, MakeMessageFun()};
false ->
{send_message, MakeMessageFun()}
end.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_sync_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
ResourceId = resource_id(Config),
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
ResourceId = resource_id(Config),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Message = {send_message, MakeMessageFun()},
Message = make_message(Config, MakeMessageFun),
IsSuccessCheck(emqx_resource:simple_sync_query(ResourceId, Message)),
ok
end,
fun(Trace) ->
ResourceId = resource_id(Config),
?assertMatch([#{instance_id := ResourceId}], ?of_kind(TracePoint, Trace))
end
),
ok.
t_async_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
ResourceId = resource_id(Config),
ReplyFun =
fun(Pid, Result) ->
Pid ! {result, Result}
@ -282,12 +292,13 @@ t_async_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
ResourceId = resource_id(Config),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
Message = {send_message, MakeMessageFun()},
Message = make_message(Config, MakeMessageFun),
?assertMatch(
{ok, {ok, _}},
?wait_async_action(
@ -301,6 +312,7 @@ t_async_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
ok
end,
fun(Trace) ->
ResourceId = resource_id(Config),
?assertMatch([#{instance_id := ResourceId}], ?of_kind(TracePoint, Trace))
end
),
@ -342,7 +354,6 @@ t_start_stop(Config, StopTracePoint) ->
t_start_stop(BridgeType, BridgeName, BridgeConfig, StopTracePoint).
t_start_stop(BridgeType, BridgeName, BridgeConfig, StopTracePoint) ->
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
?check_trace(
begin
%% Check that the bridge probe API doesn't leak atoms.
@ -365,6 +376,7 @@ t_start_stop(BridgeType, BridgeName, BridgeConfig, StopTracePoint) ->
?assertEqual(AtomsBefore, AtomsAfter),
?assertMatch({ok, _}, emqx_bridge:create(BridgeType, BridgeName, BridgeConfig)),
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
@ -428,6 +440,7 @@ t_start_stop(BridgeType, BridgeName, BridgeConfig, StopTracePoint) ->
ok
end,
fun(Trace) ->
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
%% one for each probe, two for real
?assertMatch(
[_, _, #{instance_id := ResourceId}, #{instance_id := ResourceId}],
@ -445,9 +458,9 @@ t_on_get_status(Config, Opts) ->
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
ResourceId = resource_id(Config),
FailureStatus = maps:get(failure_status, Opts, disconnected),
?assertMatch({ok, _}, create_bridge(Config)),
ResourceId = resource_id(Config),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(

View File

@ -0,0 +1,722 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-import(emqx_common_test_helpers, [on_exit/1]).
con_mod() ->
emqx_bridge_v2_test_connector.
con_type() ->
bridge_type().
con_name() ->
my_connector.
connector_resource_id() ->
emqx_connector_resource:resource_id(con_type(), con_name()).
bridge_type() ->
test_bridge_type.
con_schema() ->
[
{
con_type(),
hoconsc:mk(
hoconsc:map(name, typerefl:map()),
#{
desc => <<"Test Connector Config">>,
required => false
}
)
}
].
con_config() ->
#{
<<"enable">> => true,
<<"resource_opts">> => #{
%% Set this to a low value to make the test run faster
<<"health_check_interval">> => 100
}
}.
bridge_schema() ->
bridge_schema(_Opts = #{}).
bridge_schema(Opts) ->
Type = maps:get(bridge_type, Opts, bridge_type()),
[
{
Type,
hoconsc:mk(
hoconsc:map(name, typerefl:map()),
#{
desc => <<"Test Bridge Config">>,
required => false
}
)
}
].
bridge_config() ->
#{
<<"connector">> => atom_to_binary(con_name()),
<<"enable">> => true,
<<"send_to">> => registered_process_name(),
<<"resource_opts">> => #{
<<"resume_interval">> => 100
}
}.
fun_table_name() ->
emqx_bridge_v2_SUITE_fun_table.
registered_process_name() ->
my_registered_process.
all() ->
emqx_common_test_helpers:all(?MODULE).
start_apps() ->
[
emqx,
emqx_conf,
emqx_connector,
emqx_bridge,
emqx_rule_engine
].
setup_mocks() ->
MeckOpts = [passthrough, no_link, no_history, non_strict],
catch meck:new(emqx_connector_schema, MeckOpts),
meck:expect(emqx_connector_schema, fields, 1, con_schema()),
catch meck:new(emqx_connector_resource, MeckOpts),
meck:expect(emqx_connector_resource, connector_to_resource_type, 1, con_mod()),
catch meck:new(emqx_bridge_v2_schema, MeckOpts),
meck:expect(emqx_bridge_v2_schema, fields, 1, bridge_schema()),
catch meck:new(emqx_bridge_v2, MeckOpts),
BridgeType = bridge_type(),
BridgeTypeBin = atom_to_binary(BridgeType),
meck:expect(
emqx_bridge_v2,
bridge_v2_type_to_connector_type,
fun(Type) when Type =:= BridgeType; Type =:= BridgeTypeBin -> con_type() end
),
meck:expect(emqx_bridge_v2, bridge_v1_type_to_bridge_v2_type, 1, bridge_type()),
meck:expect(emqx_bridge_v2, is_bridge_v2_type, fun(Type) -> Type =:= BridgeType end),
ok.
init_per_suite(Config) ->
Apps = emqx_cth_suite:start(
app_specs(),
#{work_dir => emqx_cth_suite:work_dir(Config)}
),
[{apps, Apps} | Config].
end_per_suite(Config) ->
Apps = ?config(apps, Config),
emqx_cth_suite:stop(Apps),
ok.
app_specs() ->
[
emqx,
emqx_conf,
emqx_connector,
emqx_bridge,
emqx_rule_engine
].
init_per_testcase(_TestCase, Config) ->
%% Setting up mocks for fake connector and bridge V2
setup_mocks(),
ets:new(fun_table_name(), [named_table, public]),
%% Create a fake connector
{ok, _} = emqx_connector:create(con_type(), con_name(), con_config()),
[
{mocked_mods, [
emqx_connector_schema,
emqx_connector_resource,
emqx_bridge_v2
]}
| Config
].
end_per_testcase(_TestCase, _Config) ->
ets:delete(fun_table_name()),
delete_all_bridges_and_connectors(),
meck:unload(),
emqx_common_test_helpers:call_janitor(),
ok.
delete_all_bridges_and_connectors() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
ct:pal("removing bridge ~p", [{Type, Name}]),
emqx_bridge_v2:remove(Type, Name)
end,
emqx_bridge_v2:list()
),
lists:foreach(
fun(#{name := Name, type := Type}) ->
ct:pal("removing connector ~p", [{Type, Name}]),
emqx_connector:remove(Type, Name)
end,
emqx_connector:list()
),
update_root_config(#{}),
ok.
%% Hocon does not support placing a fun in a config map so we replace it with a string
wrap_fun(Fun) ->
UniqRef = make_ref(),
UniqRefBin = term_to_binary(UniqRef),
UniqRefStr = iolist_to_binary(base64:encode(UniqRefBin)),
ets:insert(fun_table_name(), {UniqRefStr, Fun}),
UniqRefStr.
unwrap_fun(UniqRefStr) ->
ets:lookup_element(fun_table_name(), UniqRefStr, 2).
update_root_config(RootConf) ->
emqx_conf:update([bridges_v2], RootConf, #{override_to => cluster}).
update_root_connectors_config(RootConf) ->
emqx_conf:update([connectors], RootConf, #{override_to => cluster}).
t_create_remove(_) ->
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
t_list(_) ->
[] = emqx_bridge_v2:list(),
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()),
1 = length(emqx_bridge_v2:list()),
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge2, bridge_config()),
2 = length(emqx_bridge_v2:list()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
1 = length(emqx_bridge_v2:list()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge2),
0 = length(emqx_bridge_v2:list()),
ok.
t_create_dry_run(_) ->
ok = emqx_bridge_v2:create_dry_run(bridge_type(), bridge_config()).
t_create_dry_run_fail_add_channel(_) ->
Msg = <<"Failed to add channel">>,
OnAddChannel1 = wrap_fun(fun() ->
{error, Msg}
end),
Conf1 = (bridge_config())#{on_add_channel_fun => OnAddChannel1},
{error, Msg} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf1),
OnAddChannel2 = wrap_fun(fun() ->
throw(Msg)
end),
Conf2 = (bridge_config())#{on_add_channel_fun => OnAddChannel2},
{error, Msg} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf2),
ok.
t_create_dry_run_fail_get_channel_status(_) ->
Msg = <<"Failed to add channel">>,
Fun1 = wrap_fun(fun() ->
{error, Msg}
end),
Conf1 = (bridge_config())#{on_get_channel_status_fun => Fun1},
{error, Msg} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf1),
Fun2 = wrap_fun(fun() ->
throw(Msg)
end),
Conf2 = (bridge_config())#{on_get_channel_status_fun => Fun2},
{error, _} = emqx_bridge_v2:create_dry_run(bridge_type(), Conf2),
ok.
t_create_dry_run_connector_does_not_exist(_) ->
BridgeConf = (bridge_config())#{<<"connector">> => <<"connector_does_not_exist">>},
{error, _} = emqx_bridge_v2:create_dry_run(bridge_type(), BridgeConf).
t_is_valid_bridge_v1(_) ->
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()),
true = emqx_bridge_v2:is_valid_bridge_v1(bridge_v1_type, my_test_bridge),
%% Add another channel/bridge to the connector
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge_2, bridge_config()),
false = emqx_bridge_v2:is_valid_bridge_v1(bridge_v1_type, my_test_bridge),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
true = emqx_bridge_v2:is_valid_bridge_v1(bridge_v1_type, my_test_bridge_2),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge_2),
%% Non existing bridge is a valid Bridge V1
true = emqx_bridge_v2:is_valid_bridge_v1(bridge_v1_type, my_test_bridge),
ok.
t_manual_health_check(_) ->
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()),
%% Run a health check for the bridge
connected = emqx_bridge_v2:health_check(bridge_type(), my_test_bridge),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
t_manual_health_check_exception(_) ->
Conf = (bridge_config())#{
<<"on_get_channel_status_fun">> => wrap_fun(fun() -> throw(my_error) end)
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
%% Run a health check for the bridge
{error, _} = emqx_bridge_v2:health_check(bridge_type(), my_test_bridge),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
t_manual_health_check_exception_error(_) ->
Conf = (bridge_config())#{
<<"on_get_channel_status_fun">> => wrap_fun(fun() -> error(my_error) end)
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
%% Run a health check for the bridge
{error, _} = emqx_bridge_v2:health_check(bridge_type(), my_test_bridge),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
t_manual_health_check_error(_) ->
Conf = (bridge_config())#{
<<"on_get_channel_status_fun">> => wrap_fun(fun() -> {error, my_error} end)
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
%% Run a health check for the bridge
{error, my_error} = emqx_bridge_v2:health_check(bridge_type(), my_test_bridge),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok.
t_send_message(_) ->
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, bridge_config()),
%% Register name for this process
register(registered_process_name(), self()),
_ = emqx_bridge_v2:send_message(bridge_type(), my_test_bridge, <<"my_msg">>, #{}),
receive
<<"my_msg">> ->
ok
after 10000 ->
ct:fail("Failed to receive message")
end,
unregister(registered_process_name()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge).
t_send_message_through_rule(_) ->
BridgeName = my_test_bridge,
{ok, _} = emqx_bridge_v2:create(bridge_type(), BridgeName, bridge_config()),
%% Create a rule to send message to the bridge
{ok, _} = emqx_rule_engine:create_rule(
#{
sql => <<"select * from \"t/a\"">>,
id => atom_to_binary(?FUNCTION_NAME),
actions => [
<<
(atom_to_binary(bridge_type()))/binary,
":",
(atom_to_binary(BridgeName))/binary
>>
],
description => <<"bridge_v2 test rule">>
}
),
%% Register name for this process
register(registered_process_name(), self()),
%% Send message to the topic
ClientId = atom_to_binary(?FUNCTION_NAME),
Payload = <<"hello">>,
Msg = emqx_message:make(ClientId, 0, <<"t/a">>, Payload),
emqx:publish(Msg),
receive
#{payload := Payload} ->
ok
after 10000 ->
ct:fail("Failed to receive message")
end,
unregister(registered_process_name()),
ok = emqx_rule_engine:delete_rule(atom_to_binary(?FUNCTION_NAME)),
ok = emqx_bridge_v2:remove(bridge_type(), BridgeName),
ok.
t_send_message_through_local_topic(_) ->
%% Bridge configuration with local topic
BridgeName = my_test_bridge,
TopicName = <<"t/b">>,
BridgeConfig = (bridge_config())#{
<<"local_topic">> => TopicName
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), BridgeName, BridgeConfig),
%% Register name for this process
register(registered_process_name(), self()),
%% Send message to the topic
ClientId = atom_to_binary(?FUNCTION_NAME),
Payload = <<"hej">>,
Msg = emqx_message:make(ClientId, 0, TopicName, Payload),
emqx:publish(Msg),
receive
#{payload := Payload} ->
ok
after 10000 ->
ct:fail("Failed to receive message")
end,
unregister(registered_process_name()),
ok = emqx_bridge_v2:remove(bridge_type(), BridgeName),
ok.
t_send_message_unhealthy_channel(_) ->
OnGetStatusResponseETS = ets:new(on_get_status_response_ets, [public]),
ets:insert(OnGetStatusResponseETS, {status_value, {error, my_error}}),
OnGetStatusFun = wrap_fun(fun() ->
ets:lookup_element(OnGetStatusResponseETS, status_value, 2)
end),
Conf = (bridge_config())#{<<"on_get_channel_status_fun">> => OnGetStatusFun},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
%% Register name for this process
register(registered_process_name(), self()),
_ = emqx_bridge_v2:send_message(bridge_type(), my_test_bridge, <<"my_msg">>, #{timeout => 1}),
receive
Any ->
ct:pal("Received message: ~p", [Any]),
ct:fail("Should not get message here")
after 1 ->
ok
end,
%% Sending should work again after the channel is healthy
ets:insert(OnGetStatusResponseETS, {status_value, connected}),
_ = emqx_bridge_v2:send_message(
bridge_type(),
my_test_bridge,
<<"my_msg">>,
#{}
),
receive
<<"my_msg">> ->
ok
after 10000 ->
ct:fail("Failed to receive message")
end,
unregister(registered_process_name()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge).
t_send_message_unhealthy_connector(_) ->
ResponseETS = ets:new(response_ets, [public]),
ets:insert(ResponseETS, {on_start_value, conf}),
ets:insert(ResponseETS, {on_get_status_value, connecting}),
OnStartFun = wrap_fun(fun(Conf) ->
case ets:lookup_element(ResponseETS, on_start_value, 2) of
conf ->
{ok, Conf};
V ->
V
end
end),
OnGetStatusFun = wrap_fun(fun() ->
ets:lookup_element(ResponseETS, on_get_status_value, 2)
end),
ConConfig = emqx_utils_maps:deep_merge(con_config(), #{
<<"on_start_fun">> => OnStartFun,
<<"on_get_status_fun">> => OnGetStatusFun,
<<"resource_opts">> => #{<<"start_timeout">> => 100}
}),
ConName = ?FUNCTION_NAME,
{ok, _} = emqx_connector:create(con_type(), ConName, ConConfig),
BridgeConf = (bridge_config())#{
<<"connector">> => atom_to_binary(ConName)
},
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, BridgeConf),
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Test that sending does not work when the connector is unhealthy (connecting)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
register(registered_process_name(), self()),
_ = emqx_bridge_v2:send_message(bridge_type(), my_test_bridge, <<"my_msg">>, #{timeout => 100}),
receive
Any ->
ct:pal("Received message: ~p", [Any]),
ct:fail("Should not get message here")
after 10 ->
ok
end,
%% We should have one alarm
1 = get_bridge_v2_alarm_cnt(),
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Test that sending works again when the connector is healthy (connected)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
ets:insert(ResponseETS, {on_get_status_value, connected}),
_ = emqx_bridge_v2:send_message(bridge_type(), my_test_bridge, <<"my_msg">>, #{timeout => 1000}),
receive
<<"my_msg">> ->
ok
after 1000 ->
ct:fail("Failed to receive message")
end,
%% The alarm should be gone at this point
0 = get_bridge_v2_alarm_cnt(),
unregister(registered_process_name()),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
ok = emqx_connector:remove(con_type(), ConName),
ets:delete(ResponseETS),
ok.
t_unhealthy_channel_alarm(_) ->
Conf = (bridge_config())#{
<<"on_get_channel_status_fun">> =>
wrap_fun(fun() -> {error, my_error} end)
},
0 = get_bridge_v2_alarm_cnt(),
{ok, _} = emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf),
1 = get_bridge_v2_alarm_cnt(),
ok = emqx_bridge_v2:remove(bridge_type(), my_test_bridge),
0 = get_bridge_v2_alarm_cnt(),
ok.
get_bridge_v2_alarm_cnt() ->
Alarms = emqx_alarm:get_alarms(activated),
FilterFun = fun
(#{name := S}) when is_binary(S) -> string:find(S, "bridge_v2") =/= nomatch;
(_) -> false
end,
length(lists:filter(FilterFun, Alarms)).
t_load_no_matching_connector(_Config) ->
Conf = bridge_config(),
BridgeTypeBin = atom_to_binary(bridge_type()),
BridgeNameBin0 = <<"my_test_bridge_update">>,
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), BridgeNameBin0, Conf)),
%% updating to invalid reference
RootConf0 = #{
BridgeTypeBin =>
#{BridgeNameBin0 => Conf#{<<"connector">> := <<"unknown">>}}
},
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
bridge_name := my_test_bridge_update,
connector_name := <<"unknown">>,
bridge_type := _,
reason := "connector_not_found_or_wrong_type"
}}},
update_root_config(RootConf0)
),
%% creating new with invalid reference
BridgeNameBin1 = <<"my_test_bridge_new">>,
RootConf1 = #{
BridgeTypeBin =>
#{BridgeNameBin1 => Conf#{<<"connector">> := <<"unknown">>}}
},
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
bridge_name := my_test_bridge_new,
connector_name := <<"unknown">>,
bridge_type := _,
reason := "connector_not_found_or_wrong_type"
}}},
update_root_config(RootConf1)
),
ok.
%% tests root config handler post config update hook
t_load_config_success(_Config) ->
Conf = bridge_config(),
BridgeType = bridge_type(),
BridgeTypeBin = atom_to_binary(BridgeType),
BridgeName = my_test_bridge_root,
BridgeNameBin = atom_to_binary(BridgeName),
%% pre-condition
?assertEqual(#{}, emqx_config:get([bridges_v2])),
%% create
RootConf0 = #{BridgeTypeBin => #{BridgeNameBin => Conf}},
?assertMatch(
{ok, _},
update_root_config(RootConf0)
),
?assertMatch(
{ok, #{
type := BridgeType,
name := BridgeName,
raw_config := #{},
resource_data := #{}
}},
emqx_bridge_v2:lookup(BridgeType, BridgeName)
),
%% update
RootConf1 = #{BridgeTypeBin => #{BridgeNameBin => Conf#{<<"some_key">> => <<"new_value">>}}},
?assertMatch(
{ok, _},
update_root_config(RootConf1)
),
?assertMatch(
{ok, #{
type := BridgeType,
name := BridgeName,
raw_config := #{<<"some_key">> := <<"new_value">>},
resource_data := #{}
}},
emqx_bridge_v2:lookup(BridgeType, BridgeName)
),
%% delete
RootConf2 = #{},
?assertMatch(
{ok, _},
update_root_config(RootConf2)
),
?assertMatch(
{error, not_found},
emqx_bridge_v2:lookup(BridgeType, BridgeName)
),
ok.
t_create_no_matching_connector(_Config) ->
Conf = (bridge_config())#{<<"connector">> => <<"wrong_connector_name">>},
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
bridge_name := _,
connector_name := _,
bridge_type := _,
reason := "connector_not_found_or_wrong_type"
}}},
emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf)
),
ok.
t_create_wrong_connector_type(_Config) ->
meck:expect(
emqx_bridge_v2_schema,
fields,
1,
bridge_schema(#{bridge_type => wrong_type})
),
Conf = bridge_config(),
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
bridge_name := _,
connector_name := _,
bridge_type := wrong_type,
reason := "connector_not_found_or_wrong_type"
}}},
emqx_bridge_v2:create(wrong_type, my_test_bridge, Conf)
),
ok.
t_update_connector_not_found(_Config) ->
Conf = bridge_config(),
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf)),
BadConf = Conf#{<<"connector">> => <<"wrong_connector_name">>},
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
bridge_name := _,
connector_name := _,
bridge_type := _,
reason := "connector_not_found_or_wrong_type"
}}},
emqx_bridge_v2:create(bridge_type(), my_test_bridge, BadConf)
),
ok.
t_remove_single_connector_being_referenced_with_active_channels(_Config) ->
%% we test the connector post config update here because we also need bridges.
Conf = bridge_config(),
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), my_test_bridge, Conf)),
?assertMatch(
{error, {post_config_update, _HandlerMod, {active_channels, [_ | _]}}},
emqx_connector:remove(con_type(), con_name())
),
ok.
t_remove_single_connector_being_referenced_without_active_channels(_Config) ->
%% we test the connector post config update here because we also need bridges.
Conf = bridge_config(),
BridgeName = my_test_bridge,
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), BridgeName, Conf)),
emqx_common_test_helpers:with_mock(
emqx_bridge_v2_test_connector,
on_get_channels,
fun(_ResId) -> [] end,
fun() ->
?assertMatch(ok, emqx_connector:remove(con_type(), con_name())),
%% we no longer have connector data if this happens...
?assertMatch(
{ok, #{resource_data := #{}}},
emqx_bridge_v2:lookup(bridge_type(), BridgeName)
),
ok
end
),
ok.
t_remove_multiple_connectors_being_referenced_with_channels(_Config) ->
Conf = bridge_config(),
BridgeName = my_test_bridge,
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), BridgeName, Conf)),
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
reason := "connector_has_active_channels",
type := _,
connector_name := _,
active_channels := [_ | _]
}}},
update_root_connectors_config(#{})
),
ok.
t_remove_multiple_connectors_being_referenced_without_channels(_Config) ->
Conf = bridge_config(),
BridgeName = my_test_bridge,
?assertMatch({ok, _}, emqx_bridge_v2:create(bridge_type(), BridgeName, Conf)),
emqx_common_test_helpers:with_mock(
emqx_bridge_v2_test_connector,
on_get_channels,
fun(_ResId) -> [] end,
fun() ->
?assertMatch(
{ok, _},
update_root_connectors_config(#{})
),
%% we no longer have connector data if this happens...
?assertMatch(
{ok, #{resource_data := #{}}},
emqx_bridge_v2:lookup(bridge_type(), BridgeName)
),
ok
end
),
ok.

View File

@ -0,0 +1,747 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_api_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-import(emqx_mgmt_api_test_util, [uri/1]).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/test_macros.hrl").
-define(ROOT, "bridges_v2").
-define(CONNECTOR_NAME, <<"my_connector">>).
-define(RESOURCE(NAME, TYPE), #{
<<"enable">> => true,
%<<"ssl">> => #{<<"enable">> => false},
<<"type">> => TYPE,
<<"name">> => NAME
}).
-define(CONNECTOR_TYPE_STR, "kafka_producer").
-define(CONNECTOR_TYPE, <<?CONNECTOR_TYPE_STR>>).
-define(KAFKA_BOOTSTRAP_HOST, <<"127.0.0.1:9092">>).
-define(KAFKA_CONNECTOR(Name, BootstrapHosts), ?RESOURCE(Name, ?CONNECTOR_TYPE)#{
<<"authentication">> => <<"none">>,
<<"bootstrap_hosts">> => BootstrapHosts,
<<"connect_timeout">> => <<"5s">>,
<<"metadata_request_timeout">> => <<"5s">>,
<<"min_metadata_refresh_interval">> => <<"3s">>,
<<"socket_opts">> =>
#{
<<"nodelay">> => true,
<<"recbuf">> => <<"1024KB">>,
<<"sndbuf">> => <<"1024KB">>,
<<"tcp_keepalive">> => <<"none">>
}
}).
-define(CONNECTOR(Name), ?KAFKA_CONNECTOR(Name, ?KAFKA_BOOTSTRAP_HOST)).
-define(CONNECTOR, ?CONNECTOR(?CONNECTOR_NAME)).
-define(BRIDGE_NAME, (atom_to_binary(?FUNCTION_NAME))).
-define(BRIDGE_TYPE_STR, "kafka_producer").
-define(BRIDGE_TYPE, <<?BRIDGE_TYPE_STR>>).
-define(KAFKA_BRIDGE(Name, Connector), ?RESOURCE(Name, ?BRIDGE_TYPE)#{
<<"connector">> => Connector,
<<"kafka">> => #{
<<"buffer">> => #{
<<"memory_overload_protection">> => true,
<<"mode">> => <<"hybrid">>,
<<"per_partition_limit">> => <<"2GB">>,
<<"segment_bytes">> => <<"100MB">>
},
<<"compression">> => <<"no_compression">>,
<<"kafka_ext_headers">> => [
#{
<<"kafka_ext_header_key">> => <<"clientid">>,
<<"kafka_ext_header_value">> => <<"${clientid}">>
},
#{
<<"kafka_ext_header_key">> => <<"topic">>,
<<"kafka_ext_header_value">> => <<"${topic}">>
}
],
<<"kafka_header_value_encode_mode">> => <<"none">>,
<<"kafka_headers">> => <<"${pub_props}">>,
<<"max_batch_bytes">> => <<"896KB">>,
<<"max_inflight">> => 10,
<<"message">> => #{
<<"key">> => <<"${.clientid}">>,
<<"timestamp">> => <<"${.timestamp}">>,
<<"value">> => <<"${.}">>
},
<<"partition_count_refresh_interval">> => <<"60s">>,
<<"partition_strategy">> => <<"random">>,
<<"required_acks">> => <<"all_isr">>,
<<"topic">> => <<"kafka-topic">>
},
<<"local_topic">> => <<"mqtt/local/topic">>,
<<"resource_opts">> => #{
<<"health_check_interval">> => <<"32s">>
}
}).
-define(KAFKA_BRIDGE(Name), ?KAFKA_BRIDGE(Name, ?CONNECTOR_NAME)).
%% -define(BRIDGE_TYPE_MQTT, <<"mqtt">>).
%% -define(MQTT_BRIDGE(SERVER, NAME), ?BRIDGE(NAME, ?BRIDGE_TYPE_MQTT)#{
%% <<"server">> => SERVER,
%% <<"username">> => <<"user1">>,
%% <<"password">> => <<"">>,
%% <<"proto_ver">> => <<"v5">>,
%% <<"egress">> => #{
%% <<"remote">> => #{
%% <<"topic">> => <<"emqx/${topic}">>,
%% <<"qos">> => <<"${qos}">>,
%% <<"retain">> => false
%% }
%% }
%% }).
%% -define(MQTT_BRIDGE(SERVER), ?MQTT_BRIDGE(SERVER, <<"mqtt_egress_test_bridge">>)).
%% -define(BRIDGE_TYPE_HTTP, <<"kafka">>).
%% -define(HTTP_BRIDGE(URL, NAME), ?BRIDGE(NAME, ?BRIDGE_TYPE_HTTP)#{
%% <<"url">> => URL,
%% <<"local_topic">> => <<"emqx_webhook/#">>,
%% <<"method">> => <<"post">>,
%% <<"body">> => <<"${payload}">>,
%% <<"headers">> => #{
%% % NOTE
%% % The Pascal-Case is important here.
%% % The reason is kinda ridiculous: `emqx_bridge_resource:create_dry_run/2` converts
%% % bridge config keys into atoms, and the atom 'Content-Type' exists in the ERTS
%% % when this happens (while the 'content-type' does not).
%% <<"Content-Type">> => <<"application/json">>
%% }
%% }).
%% -define(HTTP_BRIDGE(URL), ?HTTP_BRIDGE(URL, ?BRIDGE_NAME)).
%% -define(URL(PORT, PATH),
%% list_to_binary(
%% io_lib:format(
%% "http://localhost:~s/~s",
%% [integer_to_list(PORT), PATH]
%% )
%% )
%% ).
-define(APPSPECS, [
emqx_conf,
emqx,
emqx_auth,
emqx_management,
{emqx_bridge, "bridges_v2 {}"}
]).
-define(APPSPEC_DASHBOARD,
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
).
-if(?EMQX_RELEASE_EDITION == ee).
%% For now we got only kafka implementing `bridge_v2` and that is enterprise only.
all() ->
[
{group, single},
%{group, cluster_later_join},
{group, cluster}
].
-else.
all() ->
[].
-endif.
groups() ->
AllTCs = emqx_common_test_helpers:all(?MODULE),
SingleOnlyTests = [
t_bridges_probe
],
ClusterLaterJoinOnlyTCs = [
% t_cluster_later_join_metrics
],
[
{single, [], AllTCs -- ClusterLaterJoinOnlyTCs},
{cluster_later_join, [], ClusterLaterJoinOnlyTCs},
{cluster, [], (AllTCs -- SingleOnlyTests) -- ClusterLaterJoinOnlyTCs}
].
suite() ->
[{timetrap, {seconds, 60}}].
init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok.
init_per_group(cluster = Name, Config) ->
Nodes = [NodePrimary | _] = mk_cluster(Name, Config),
init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
%% init_per_group(cluster_later_join = Name, Config) ->
%% Nodes = [NodePrimary | _] = mk_cluster(Name, Config, #{join_to => undefined}),
%% init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
init_per_group(Name, Config) ->
WorkDir = filename:join(?config(priv_dir, Config), Name),
Apps = emqx_cth_suite:start(?APPSPECS ++ [?APPSPEC_DASHBOARD], #{work_dir => WorkDir}),
init_api([{group, single}, {group_apps, Apps}, {node, node()} | Config]).
init_api(Config) ->
Node = ?config(node, Config),
{ok, ApiKey} = erpc:call(Node, emqx_common_test_http, create_default_app, []),
[{api_key, ApiKey} | Config].
mk_cluster(Name, Config) ->
mk_cluster(Name, Config, #{}).
mk_cluster(Name, Config, Opts) ->
Node1Apps = ?APPSPECS ++ [?APPSPEC_DASHBOARD],
Node2Apps = ?APPSPECS,
emqx_cth_cluster:start(
[
{emqx_bridge_api_SUITE_1, Opts#{role => core, apps => Node1Apps}},
{emqx_bridge_api_SUITE_2, Opts#{role => core, apps => Node2Apps}}
],
#{work_dir => filename:join(?config(priv_dir, Config), Name)}
).
end_per_group(Group, Config) when
Group =:= cluster;
Group =:= cluster_later_join
->
ok = emqx_cth_cluster:stop(?config(cluster_nodes, Config));
end_per_group(_, Config) ->
emqx_cth_suite:stop(?config(group_apps, Config)),
ok.
init_per_testcase(_TestCase, Config) ->
case ?config(cluster_nodes, Config) of
undefined ->
init_mocks();
Nodes ->
[erpc:call(Node, ?MODULE, init_mocks, []) || Node <- Nodes]
end,
{ok, 201, _} = request(post, uri(["connectors"]), ?CONNECTOR, Config),
Config.
end_per_testcase(_TestCase, Config) ->
Node = ?config(node, Config),
ok = erpc:call(Node, fun clear_resources/0),
case ?config(cluster_nodes, Config) of
undefined ->
meck:unload();
ClusterNodes ->
[erpc:call(ClusterNode, meck, unload, []) || ClusterNode <- ClusterNodes]
end,
ok = emqx_common_test_helpers:call_janitor(),
ok.
-define(CONNECTOR_IMPL, dummy_connector_impl).
init_mocks() ->
meck:new(emqx_connector_ee_schema, [passthrough, no_link]),
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR_IMPL),
meck:new(?CONNECTOR_IMPL, [non_strict, no_link]),
meck:expect(?CONNECTOR_IMPL, callback_mode, 0, async_if_possible),
meck:expect(
?CONNECTOR_IMPL,
on_start,
fun
(<<"connector:", ?CONNECTOR_TYPE_STR, ":bad_", _/binary>>, _C) ->
{ok, bad_connector_state};
(_I, _C) ->
{ok, connector_state}
end
),
meck:expect(?CONNECTOR_IMPL, on_stop, 2, ok),
meck:expect(
?CONNECTOR_IMPL,
on_get_status,
fun
(_, bad_connector_state) -> connecting;
(_, _) -> connected
end
),
meck:expect(?CONNECTOR_IMPL, on_add_channel, 4, {ok, connector_state}),
meck:expect(?CONNECTOR_IMPL, on_remove_channel, 3, {ok, connector_state}),
meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connected),
[?CONNECTOR_IMPL, emqx_connector_ee_schema].
clear_resources() ->
lists:foreach(
fun(#{type := Type, name := Name}) ->
ok = emqx_bridge_v2:remove(Type, Name)
end,
emqx_bridge_v2:list()
),
lists:foreach(
fun(#{type := Type, name := Name}) ->
ok = emqx_connector:remove(Type, Name)
end,
emqx_connector:list()
).
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
%% We have to pretend testing a kafka bridge since at this point that's the
%% only one that's implemented.
t_bridges_lifecycle(Config) ->
%% assert we there's no bridges at first
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
{ok, 404, _} = request(get, uri([?ROOT, "foo"]), Config),
{ok, 404, _} = request(get, uri([?ROOT, "kafka_producer:foo"]), Config),
%% need a var for patterns below
BridgeName = ?BRIDGE_NAME,
?assertMatch(
{ok, 201, #{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := BridgeName,
<<"enable">> := true,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _],
<<"connector">> := ?CONNECTOR_NAME,
<<"kafka">> := #{},
<<"local_topic">> := _,
<<"resource_opts">> := _
}},
request_json(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
)
),
%% list all bridges, assert bridge is in it
?assertMatch(
{ok, 200, [
#{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := BridgeName,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _]
}
]},
request_json(get, uri([?ROOT]), Config)
),
%% list all bridges, assert bridge is in it
?assertMatch(
{ok, 200, [
#{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := BridgeName,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _]
}
]},
request_json(get, uri([?ROOT]), Config)
),
%% get the bridge by id
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
?assertMatch(
{ok, 200, #{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := BridgeName,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _]
}},
request_json(get, uri([?ROOT, BridgeID]), Config)
),
?assertMatch(
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := _
}},
request_json(post, uri([?ROOT, BridgeID, "brababbel"]), Config)
),
%% update bridge config
{ok, 201, _} = request(post, uri(["connectors"]), ?CONNECTOR(<<"foobla">>), Config),
?assertMatch(
{ok, 200, #{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := BridgeName,
<<"connector">> := <<"foobla">>,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _]
}},
request_json(
put,
uri([?ROOT, BridgeID]),
maps:without(
[<<"type">>, <<"name">>],
?KAFKA_BRIDGE(?BRIDGE_NAME, <<"foobla">>)
),
Config
)
),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri([?ROOT, BridgeID]), Config),
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
%% update a deleted bridge returns an error
?assertMatch(
{ok, 404, #{
<<"code">> := <<"NOT_FOUND">>,
<<"message">> := _
}},
request_json(
put,
uri([?ROOT, BridgeID]),
maps:without(
[<<"type">>, <<"name">>],
?KAFKA_BRIDGE(?BRIDGE_NAME)
),
Config
)
),
%% Deleting a non-existing bridge should result in an error
?assertMatch(
{ok, 404, #{
<<"code">> := <<"NOT_FOUND">>,
<<"message">> := _
}},
request_json(delete, uri([?ROOT, BridgeID]), Config)
),
%% try delete unknown bridge id
?assertMatch(
{ok, 404, #{
<<"code">> := <<"NOT_FOUND">>,
<<"message">> := <<"Invalid bridge ID", _/binary>>
}},
request_json(delete, uri([?ROOT, "foo"]), Config)
),
%% Try create bridge with bad characters as name
{ok, 400, _} = request(post, uri([?ROOT]), ?KAFKA_BRIDGE(<<"隋达"/utf8>>), Config),
ok.
t_start_bridge_unknown_node(Config) ->
{ok, 404, _} =
request(
post,
uri(["nodes", "thisbetterbenotanatomyet", ?ROOT, "kafka_producer:foo", start]),
Config
),
{ok, 404, _} =
request(
post,
uri(["nodes", "undefined", ?ROOT, "kafka_producer:foo", start]),
Config
).
t_start_bridge_node(Config) ->
do_start_bridge(node, Config).
t_start_bridge_cluster(Config) ->
do_start_bridge(cluster, Config).
do_start_bridge(TestType, Config) ->
%% assert we there's no bridges at first
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
Name = atom_to_binary(TestType),
?assertMatch(
{ok, 201, #{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _]
}},
request_json(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(Name),
Config
)
),
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name),
%% start again
{ok, 204, <<>>} = request(post, {operation, TestType, start, BridgeID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri([?ROOT, BridgeID]), Config)
),
%% start a started bridge
{ok, 204, <<>>} = request(post, {operation, TestType, start, BridgeID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri([?ROOT, BridgeID]), Config)
),
{ok, 400, _} = request(post, {operation, TestType, invalidop, BridgeID}, Config),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri([?ROOT, BridgeID]), Config),
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
%% Fail parse-id check
{ok, 404, _} = request(post, {operation, TestType, start, <<"wreckbook_fugazi">>}, Config),
%% Looks ok but doesn't exist
{ok, 404, _} = request(post, {operation, TestType, start, <<"webhook:cptn_hook">>}, Config),
ok.
%% t_start_stop_inconsistent_bridge_node(Config) ->
%% start_stop_inconsistent_bridge(node, Config).
%% t_start_stop_inconsistent_bridge_cluster(Config) ->
%% start_stop_inconsistent_bridge(cluster, Config).
%% start_stop_inconsistent_bridge(Type, Config) ->
%% Node = ?config(node, Config),
%% erpc:call(Node, fun() ->
%% meck:new(emqx_bridge_resource, [passthrough, no_link]),
%% meck:expect(
%% emqx_bridge_resource,
%% stop,
%% fun
%% (_, <<"bridge_not_found">>) -> {error, not_found};
%% (BridgeType, Name) -> meck:passthrough([BridgeType, Name])
%% end
%% )
%% end),
%% emqx_common_test_helpers:on_exit(fun() ->
%% erpc:call(Node, fun() ->
%% meck:unload([emqx_bridge_resource])
%% end)
%% end),
%% {ok, 201, _Bridge} = request(
%% post,
%% uri([?ROOT]),
%% ?KAFKA_BRIDGE(<<"bridge_not_found">>),
%% Config
%% ),
%% {ok, 503, _} = request(
%% post, {operation, Type, stop, <<"kafka:bridge_not_found">>}, Config
%% ).
%% [TODO] This is a mess, need to clarify what the actual behavior needs to be
%% like.
%% t_enable_disable_bridges(Config) ->
%% %% assert we there's no bridges at first
%% {ok, 200, []} = request_json(get, uri([?ROOT]), Config),
%% Name = ?BRIDGE_NAME,
%% ?assertMatch(
%% {ok, 201, #{
%% <<"type">> := ?BRIDGE_TYPE,
%% <<"name">> := Name,
%% <<"enable">> := true,
%% <<"status">> := <<"connected">>,
%% <<"node_status">> := [_ | _]
%% }},
%% request_json(
%% post,
%% uri([?ROOT]),
%% ?KAFKA_BRIDGE(Name),
%% Config
%% )
%% ),
%% BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name),
%% %% disable it
%% meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connecting),
%% {ok, 204, <<>>} = request(put, enable_path(false, BridgeID), Config),
%% ?assertMatch(
%% {ok, 200, #{<<"status">> := <<"stopped">>}},
%% request_json(get, uri([?ROOT, BridgeID]), Config)
%% ),
%% %% enable again
%% meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connected),
%% {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), Config),
%% ?assertMatch(
%% {ok, 200, #{<<"status">> := <<"connected">>}},
%% request_json(get, uri([?ROOT, BridgeID]), Config)
%% ),
%% %% enable an already started bridge
%% {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), Config),
%% ?assertMatch(
%% {ok, 200, #{<<"status">> := <<"connected">>}},
%% request_json(get, uri([?ROOT, BridgeID]), Config)
%% ),
%% %% disable it again
%% {ok, 204, <<>>} = request(put, enable_path(false, BridgeID), Config),
%% %% bad param
%% {ok, 404, _} = request(put, enable_path(foo, BridgeID), Config),
%% {ok, 404, _} = request(put, enable_path(true, "foo"), Config),
%% {ok, 404, _} = request(put, enable_path(true, "webhook:foo"), Config),
%% {ok, 400, Res} = request(post, {operation, node, start, BridgeID}, <<>>, fun json/1, Config),
%% ?assertEqual(
%% #{
%% <<"code">> => <<"BAD_REQUEST">>,
%% <<"message">> => <<"Forbidden operation, bridge not enabled">>
%% },
%% Res
%% ),
%% {ok, 400, Res} = request(
%% post, {operation, cluster, start, BridgeID}, <<>>, fun json/1, Config
%% ),
%% %% enable a stopped bridge
%% {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), Config),
%% ?assertMatch(
%% {ok, 200, #{<<"status">> := <<"connected">>}},
%% request_json(get, uri([?ROOT, BridgeID]), Config)
%% ),
%% %% delete the bridge
%% {ok, 204, <<>>} = request(delete, uri([?ROOT, BridgeID]), Config),
%% {ok, 200, []} = request_json(get, uri([?ROOT]), Config).
t_bridges_probe(Config) ->
{ok, 204, <<>>} = request(
post,
uri(["bridges_v2_probe"]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
),
%% second time with same name is ok since no real bridge created
{ok, 204, <<>>} = request(
post,
uri(["bridges_v2_probe"]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
),
meck:expect(?CONNECTOR_IMPL, on_start, 2, {error, on_start_error}),
?assertMatch(
{ok, 400, #{
<<"code">> := <<"TEST_FAILED">>,
<<"message">> := _
}},
request_json(
post,
uri(["bridges_v2_probe"]),
?KAFKA_BRIDGE(<<"broken_bridge">>, <<"brokenhost:1234">>),
Config
)
),
meck:expect(?CONNECTOR_IMPL, on_start, 2, {ok, bridge_state}),
?assertMatch(
{ok, 400, #{<<"code">> := <<"BAD_REQUEST">>}},
request_json(
post,
uri(["bridges_v2_probe"]),
?RESOURCE(<<"broken_bridge">>, <<"unknown_type">>),
Config
)
),
ok.
%%% helpers
listen_on_random_port() ->
SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}],
case gen_tcp:listen(0, SockOpts) of
{ok, Sock} ->
{ok, Port} = inet:port(Sock),
{Port, Sock};
{error, Reason} when Reason /= eaddrinuse ->
{error, Reason}
end.
request(Method, URL, Config) ->
request(Method, URL, [], Config).
request(Method, {operation, Type, Op, BridgeID}, Body, Config) ->
URL = operation_path(Type, Op, BridgeID, Config),
request(Method, URL, Body, Config);
request(Method, URL, Body, Config) ->
AuthHeader = emqx_common_test_http:auth_header(?config(api_key, Config)),
Opts = #{compatible_mode => true, httpc_req_opts => [{body_format, binary}]},
emqx_mgmt_api_test_util:request_api(Method, URL, [], AuthHeader, Body, Opts).
request(Method, URL, Body, Decoder, Config) ->
case request(Method, URL, Body, Config) of
{ok, Code, Response} ->
case Decoder(Response) of
{error, _} = Error -> Error;
Decoded -> {ok, Code, Decoded}
end;
Otherwise ->
Otherwise
end.
request_json(Method, URLLike, Config) ->
request(Method, URLLike, [], fun json/1, Config).
request_json(Method, URLLike, Body, Config) ->
request(Method, URLLike, Body, fun json/1, Config).
operation_path(node, Oper, BridgeID, Config) ->
uri(["nodes", ?config(node, Config), ?ROOT, BridgeID, Oper]);
operation_path(cluster, Oper, BridgeID, _Config) ->
uri([?ROOT, BridgeID, Oper]).
enable_path(Enable, BridgeID) ->
uri([?ROOT, BridgeID, "enable", Enable]).
publish_message(Topic, Body, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx, publish, [emqx_message:make(Topic, Body)]).
update_config(Path, Value, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx, update_config, [Path, Value]).
get_raw_config(Path, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx, get_raw_config, [Path]).
add_user_auth(Chain, AuthenticatorID, User, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx_authentication, add_user, [Chain, AuthenticatorID, User]).
delete_user_auth(Chain, AuthenticatorID, User, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx_authentication, delete_user, [Chain, AuthenticatorID, User]).
str(S) when is_list(S) -> S;
str(S) when is_binary(S) -> binary_to_list(S).
json(B) when is_binary(B) ->
case emqx_utils_json:safe_decode(B, [return_maps]) of
{ok, Term} ->
Term;
{error, Reason} = Error ->
ct:pal("Failed to decode json: ~p~n~p", [Reason, B]),
Error
end.

View File

@ -0,0 +1,129 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_test_connector).
-behaviour(emqx_resource).
-export([
query_mode/1,
callback_mode/0,
on_start/2,
on_stop/2,
on_query/3,
on_query_async/4,
on_get_status/2,
on_add_channel/4,
on_remove_channel/3,
on_get_channels/1,
on_get_channel_status/3
]).
query_mode(_Config) ->
sync.
callback_mode() ->
always_sync.
on_start(
_InstId,
#{on_start_fun := FunRef} = Conf
) ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun(Conf);
on_start(_InstId, _Config) ->
{ok, #{}}.
on_add_channel(
_InstId,
_State,
_ChannelId,
#{on_add_channel_fun := FunRef}
) ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun();
on_add_channel(
_InstId,
State,
ChannelId,
ChannelConfig
) ->
Channels = maps:get(channels, State, #{}),
NewChannels = maps:put(ChannelId, ChannelConfig, Channels),
NewState = maps:put(channels, NewChannels, State),
{ok, NewState}.
on_stop(_InstanceId, _State) ->
ok.
on_remove_channel(
_InstId,
State,
ChannelId
) ->
Channels = maps:get(channels, State, #{}),
NewChannels = maps:remove(ChannelId, Channels),
NewState = maps:put(channels, NewChannels, State),
{ok, NewState}.
on_query(
_InstId,
{ChannelId, Message},
ConnectorState
) ->
Channels = maps:get(channels, ConnectorState, #{}),
%% Lookup the channel
ChannelState = maps:get(ChannelId, Channels, not_found),
SendTo = maps:get(send_to, ChannelState),
SendTo ! Message,
ok.
on_get_channels(ResId) ->
emqx_bridge_v2:get_channels_for_connector(ResId).
on_query_async(
_InstId,
{_MessageTag, _Message},
_AsyncReplyFn,
_ConnectorState
) ->
throw(not_implemented).
on_get_status(
_InstId,
#{on_get_status_fun := FunRef}
) ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun();
on_get_status(
_InstId,
_State
) ->
connected.
on_get_channel_status(
_ResId,
ChannelId,
State
) ->
Channels = maps:get(channels, State),
ChannelState = maps:get(ChannelId, Channels),
case ChannelState of
#{on_get_channel_status_fun := FunRef} ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun();
_ ->
connected
end.

View File

@ -0,0 +1,514 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_testlib).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-import(emqx_common_test_helpers, [on_exit/1]).
%% ct setup helpers
init_per_suite(Config, Apps) ->
[{start_apps, Apps} | Config].
end_per_suite(Config) ->
delete_all_bridges_and_connectors(),
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?config(start_apps, Config))),
_ = application:stop(emqx_connector),
ok.
init_per_group(TestGroup, BridgeType, Config) ->
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
application:load(emqx_bridge),
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps(?config(start_apps, Config)),
{ok, _} = application:ensure_all_started(emqx_connector),
emqx_mgmt_api_test_util:init_suite(),
UniqueNum = integer_to_binary(erlang:unique_integer([positive])),
MQTTTopic = <<"mqtt/topic/abc", UniqueNum/binary>>,
[
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
{mqtt_topic, MQTTTopic},
{test_group, TestGroup},
{bridge_type, BridgeType}
| Config
].
end_per_group(Config) ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
% delete_all_bridges(),
ok.
init_per_testcase(TestCase, Config0, BridgeConfigCb) ->
ct:timetrap(timer:seconds(60)),
delete_all_bridges_and_connectors(),
UniqueNum = integer_to_binary(erlang:unique_integer()),
BridgeTopic =
<<
(atom_to_binary(TestCase))/binary,
UniqueNum/binary
>>,
TestGroup = ?config(test_group, Config0),
Config = [{bridge_topic, BridgeTopic} | Config0],
{Name, ConfigString, BridgeConfig} = BridgeConfigCb(
TestCase, TestGroup, Config
),
ok = snabbkaffe:start_trace(),
[
{bridge_name, Name},
{bridge_config_string, ConfigString},
{bridge_config, BridgeConfig}
| Config
].
end_per_testcase(_Testcase, Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ok;
false ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
%% in CI, apparently this needs more time since the
%% machines struggle with all the containers running...
emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
delete_all_bridges_and_connectors() ->
delete_all_bridges(),
delete_all_connectors().
delete_all_bridges() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
emqx_bridge_v2:remove(Type, Name)
end,
emqx_bridge_v2:list()
).
delete_all_connectors() ->
lists:foreach(
fun(#{name := Name, type := Type}) ->
emqx_connector:remove(Type, Name)
end,
emqx_connector:list()
).
%% test helpers
parse_and_check(BridgeType, BridgeName, ConfigString) ->
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
#{<<"bridges">> := #{BridgeType := #{BridgeName := BridgeConfig}}} = RawConf,
BridgeConfig.
bridge_id(Config) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
ConnectorId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
<<"bridge_v2:", BridgeId/binary, ":", ConnectorId/binary>>.
resource_id(Config) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
emqx_bridge_resource:resource_id(BridgeType, BridgeName).
create_bridge(Config) ->
create_bridge(Config, _Overrides = #{}).
create_bridge(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
ConnectorName = ?config(connector_name, Config),
ConnectorType = ?config(connector_type, Config),
ConnectorConfig = ?config(connector_config, Config),
{ok, _} =
emqx_connector:create(ConnectorType, ConnectorName, ConnectorConfig),
ct:pal("creating bridge with config: ~p", [BridgeConfig]),
emqx_bridge_v2:create(BridgeType, BridgeName, BridgeConfig).
create_bridge_api(Config) ->
create_bridge_api(Config, _Overrides = #{}).
create_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
ConnectorName = ?config(connector_name, Config),
ConnectorType = ?config(connector_type, Config),
ConnectorConfig = ?config(connector_config, Config),
{ok, _Connector} =
emqx_connector:create(ConnectorType, ConnectorName, ConnectorConfig),
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => BridgeName},
Path = emqx_mgmt_api_test_util:api_path(["bridges_v2"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("creating bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {Status, Headers, Body0}} ->
{ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}};
Error ->
Error
end,
ct:pal("bridge create result: ~p", [Res]),
Res.
update_bridge_api(Config) ->
update_bridge_api(Config, _Overrides = #{}).
update_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
Name = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, Name),
Path = emqx_mgmt_api_test_util:api_path(["bridges_v2", BridgeId]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("updating bridge (via http): ~p", [BridgeConfig]),
Res =
case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, BridgeConfig, Opts) of
{ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])};
Error -> Error
end,
ct:pal("bridge update result: ~p", [Res]),
Res.
op_bridge_api(Op, BridgeType, BridgeName) ->
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
Path = emqx_mgmt_api_test_util:api_path(["bridges_v2", BridgeId, Op]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("calling bridge ~p (via http): ~p", [BridgeId, Op]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, "", Opts) of
{ok, {Status = {_, 204, _}, Headers, Body}} ->
{ok, {Status, Headers, Body}};
{ok, {Status, Headers, Body}} ->
{ok, {Status, Headers, emqx_utils_json:decode(Body, [return_maps])}};
{error, {Status, Headers, Body}} ->
{error, {Status, Headers, emqx_utils_json:decode(Body, [return_maps])}};
Error ->
Error
end,
ct:pal("bridge op result: ~p", [Res]),
Res.
probe_bridge_api(Config) ->
probe_bridge_api(Config, _Overrides = #{}).
probe_bridge_api(Config, Overrides) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
BridgeConfig0 = ?config(bridge_config, Config),
BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides),
probe_bridge_api(BridgeType, BridgeName, BridgeConfig).
probe_bridge_api(BridgeType, BridgeName, BridgeConfig) ->
Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => BridgeName},
Path = emqx_mgmt_api_test_util:api_path(["bridges_v2_probe"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("probing bridge (via http): ~p", [Params]),
Res =
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of
{ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0};
Error -> Error
end,
ct:pal("bridge probe result: ~p", [Res]),
Res.
try_decode_error(Body0) ->
case emqx_utils_json:safe_decode(Body0, [return_maps]) of
{ok, #{<<"message">> := Msg0} = Body1} ->
case emqx_utils_json:safe_decode(Msg0, [return_maps]) of
{ok, Msg1} -> Body1#{<<"message">> := Msg1};
{error, _} -> Body1
end;
{ok, Body1} ->
Body1;
{error, _} ->
Body0
end.
create_rule_and_action_http(BridgeType, RuleTopic, Config) ->
create_rule_and_action_http(BridgeType, RuleTopic, Config, _Opts = #{}).
create_rule_and_action_http(BridgeType, RuleTopic, Config, Opts) ->
BridgeName = ?config(bridge_name, Config),
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
SQL = maps:get(sql, Opts, <<"SELECT * FROM \"", RuleTopic/binary, "\"">>),
Params = #{
enable => true,
sql => SQL,
actions => [BridgeId]
},
Path = emqx_mgmt_api_test_util:api_path(["rules"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
ct:pal("rule action params: ~p", [Params]),
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
{ok, Res0} ->
Res = #{<<"id">> := RuleId} = emqx_utils_json:decode(Res0, [return_maps]),
on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
{ok, Res};
Error ->
Error
end.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_sync_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
ResourceId = resource_id(Config),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
BridgeId = bridge_id(Config),
Message = {BridgeId, MakeMessageFun()},
IsSuccessCheck(emqx_resource:simple_sync_query(ResourceId, Message)),
ok
end,
fun(Trace) ->
ResourceId = resource_id(Config),
?assertMatch([#{instance_id := ResourceId}], ?of_kind(TracePoint, Trace))
end
),
ok.
t_async_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
ReplyFun =
fun(Pid, Result) ->
Pid ! {result, Result}
end,
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
ResourceId = resource_id(Config),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
BridgeId = bridge_id(Config),
Message = {BridgeId, MakeMessageFun()},
?assertMatch(
{ok, {ok, _}},
?wait_async_action(
emqx_resource:query(ResourceId, Message, #{
async_reply_fun => {ReplyFun, [self()]}
}),
#{?snk_kind := TracePoint, instance_id := ResourceId},
5_000
)
),
ok
end,
fun(Trace) ->
ResourceId = resource_id(Config),
?assertMatch([#{instance_id := ResourceId}], ?of_kind(TracePoint, Trace))
end
),
receive
{result, Result} -> IsSuccessCheck(Result)
after 5_000 ->
throw(timeout)
end,
ok.
t_create_via_http(Config) ->
?check_trace(
begin
?assertMatch({ok, _}, create_bridge_api(Config)),
%% lightweight matrix testing some configs
?assertMatch(
{ok, _},
update_bridge_api(
Config
)
),
?assertMatch(
{ok, _},
update_bridge_api(
Config
)
),
ok
end,
[]
),
ok.
t_start_stop(Config, StopTracePoint) ->
BridgeType = ?config(bridge_type, Config),
BridgeName = ?config(bridge_name, Config),
BridgeConfig = ?config(bridge_config, Config),
ConnectorName = ?config(connector_name, Config),
ConnectorType = ?config(connector_type, Config),
ConnectorConfig = ?config(connector_config, Config),
?assertMatch(
{ok, _},
emqx_connector:create(ConnectorType, ConnectorName, ConnectorConfig)
),
?check_trace(
begin
ProbeRes0 = probe_bridge_api(
BridgeType,
BridgeName,
BridgeConfig
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0),
%% Check that the bridge probe API doesn't leak atoms.
AtomsBefore = erlang:system_info(atom_count),
%% Probe again; shouldn't have created more atoms.
ProbeRes1 = probe_bridge_api(
BridgeType,
BridgeName,
BridgeConfig
),
?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1),
AtomsAfter = erlang:system_info(atom_count),
?assertEqual(AtomsBefore, AtomsAfter),
?assertMatch({ok, _}, emqx_bridge_v2:create(BridgeType, BridgeName, BridgeConfig)),
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
%% `start` bridge to trigger `already_started`
?assertMatch(
{ok, {{_, 204, _}, _Headers, []}},
emqx_bridge_v2_testlib:op_bridge_api("start", BridgeType, BridgeName)
),
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)),
%% Not supported anymore
%% ?assertMatch(
%% {{ok, _}, {ok, _}},
%% ?wait_async_action(
%% emqx_bridge_v2_testlib:op_bridge_api("stop", BridgeType, BridgeName),
%% #{?snk_kind := StopTracePoint},
%% 5_000
%% )
%% ),
%% ?assertEqual(
%% {error, resource_is_stopped}, emqx_resource_manager:health_check(ResourceId)
%% ),
%% ?assertMatch(
%% {ok, {{_, 204, _}, _Headers, []}},
%% emqx_bridge_v2_testlib:op_bridge_api("stop", BridgeType, BridgeName)
%% ),
%% ?assertEqual(
%% {error, resource_is_stopped}, emqx_resource_manager:health_check(ResourceId)
%% ),
%% ?assertMatch(
%% {ok, {{_, 204, _}, _Headers, []}},
%% emqx_bridge_v2_testlib:op_bridge_api("start", BridgeType, BridgeName)
%% ),
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
%% Disable the connector, which will also stop it.
?assertMatch(
{{ok, _}, {ok, _}},
?wait_async_action(
emqx_connector:disable_enable(disable, ConnectorType, ConnectorName),
#{?snk_kind := StopTracePoint},
5_000
)
),
ok
end,
fun(Trace) ->
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
%% one for each probe, one for real
?assertMatch(
[_, _, #{instance_id := ResourceId}],
?of_kind(StopTracePoint, Trace)
),
ok
end
),
ok.
t_on_get_status(Config) ->
t_on_get_status(Config, _Opts = #{}).
t_on_get_status(Config, Opts) ->
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
ProxyName = ?config(proxy_name, Config),
FailureStatus = maps:get(failure_status, Opts, disconnected),
?assertMatch({ok, _}, create_bridge(Config)),
ResourceId = resource_id(Config),
%% Since the connection process is async, we give it some time to
%% stabilize and avoid flakiness.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
ct:sleep(500),
?retry(
_Interval0 = 200,
_Attempts0 = 10,
?assertEqual({ok, FailureStatus}, emqx_resource_manager:health_check(ResourceId))
)
end),
%% Check that it recovers itself.
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
ok.

View File

@ -1,6 +1,6 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.7"}}}
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.8.0"}}}
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_azure_event_hub, [
{description, "EMQX Enterprise Azure Event Hub Bridge"},
{vsn, "0.1.2"},
{vsn, "0.1.3"},
{registered, []},
{applications, [
kernel,

View File

@ -7,7 +7,7 @@
-include_lib("hocon/include/hoconsc.hrl").
-behaviour(hocon_schema).
-behaviour(emqx_bridge_resource).
-behaviour(emqx_connector_resource).
%% `hocon_schema' API
-export([
@ -18,14 +18,22 @@
]).
%% emqx_bridge_enterprise "unofficial" API
-export([conn_bridge_examples/1]).
-export([
bridge_v2_examples/1,
conn_bridge_examples/1,
connector_examples/1
]).
%% emqx_connector_resource behaviour callbacks
-export([connector_config/1]).
-export([producer_converter/2, host_opts/0]).
-import(hoconsc, [mk/2, enum/1, ref/2]).
-define(AEH_CONNECTOR_TYPE, azure_event_hub_producer).
-define(AEH_CONNECTOR_TYPE_BIN, <<"azure_event_hub_producer">>).
%%-------------------------------------------------------------------------------------------------
%% `hocon_schema' API
%%-------------------------------------------------------------------------------------------------
@ -34,12 +42,50 @@ namespace() -> "bridge_azure_event_hub".
roots() -> ["config_producer"].
fields("put_connector") ->
Fields = override(
emqx_bridge_kafka:fields("put_connector"),
connector_overrides()
),
override_documentations(Fields);
fields("get_connector") ->
emqx_bridge_schema:status_fields() ++
fields("post_connector");
fields("post_connector") ->
Fields = override(
emqx_bridge_kafka:fields("post_connector"),
connector_overrides()
),
override_documentations(Fields);
fields("put_bridge_v2") ->
Fields = override(
emqx_bridge_kafka:fields("put_bridge_v2"),
bridge_v2_overrides()
),
override_documentations(Fields);
fields("get_bridge_v2") ->
emqx_bridge_schema:status_fields() ++
fields("post_bridge_v2");
fields("post_bridge_v2") ->
Fields = override(
emqx_bridge_kafka:fields("post_bridge_v2"),
bridge_v2_overrides()
),
override_documentations(Fields);
fields("post_producer") ->
Fields = override(
emqx_bridge_kafka:fields("post_producer"),
producer_overrides()
),
override_documentations(Fields);
fields("config_bridge_v2") ->
fields(bridge_v2);
fields("config_connector") ->
Fields = override(
emqx_bridge_kafka:fields(kafka_connector),
connector_overrides()
),
override_documentations(Fields);
fields("config_producer") ->
Fields = override(
emqx_bridge_kafka:fields(kafka_producer),
@ -52,9 +98,9 @@ fields(auth_username_password) ->
auth_overrides()
),
override_documentations(Fields);
fields("ssl_client_opts") ->
fields(ssl_client_opts) ->
Fields = override(
emqx_schema:fields("ssl_client_opts"),
emqx_bridge_kafka:ssl_client_opts_fields(),
ssl_overrides()
),
override_documentations(Fields);
@ -68,19 +114,35 @@ fields(kafka_message) ->
Fields0 = emqx_bridge_kafka:fields(kafka_message),
Fields = proplists:delete(timestamp, Fields0),
override_documentations(Fields);
fields(bridge_v2) ->
Fields =
override(
emqx_bridge_kafka:fields(producer_opts),
bridge_v2_overrides()
) ++
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{connector,
mk(binary(), #{
desc => ?DESC(emqx_connector_schema, "connector_field"), required => true
})}
],
override_documentations(Fields);
fields(Method) ->
Fields = emqx_bridge_kafka:fields(Method),
override_documentations(Fields).
desc("config") ->
?DESC("desc_config");
desc("config_connector") ->
?DESC("desc_config");
desc("config_producer") ->
?DESC("desc_config");
desc("ssl_client_opts") ->
emqx_schema:desc("ssl_client_opts");
desc("get_producer") ->
desc("get_" ++ Type) when Type == "producer"; Type == "connector"; Type == "bridge_v2" ->
["Configuration for Azure Event Hub using `GET` method."];
desc("put_producer") ->
desc("put_" ++ Type) when Type == "producer"; Type == "connector"; Type == "bridge_v2" ->
["Configuration for Azure Event Hub using `PUT` method."];
desc("post_producer") ->
desc("post_" ++ Type) when Type == "producer"; Type == "connector"; Type == "bridge_v2" ->
["Configuration for Azure Event Hub using `POST` method."];
desc(Name) ->
lists:member(Name, struct_names()) orelse throw({missing_desc, Name}),
@ -90,7 +152,29 @@ struct_names() ->
[
auth_username_password,
kafka_message,
producer_kafka_opts
producer_kafka_opts,
bridge_v2,
ssl_client_opts
].
bridge_v2_examples(Method) ->
[
#{
?AEH_CONNECTOR_TYPE_BIN => #{
summary => <<"Azure Event Hub Bridge v2">>,
value => values({Method, bridge_v2})
}
}
].
connector_examples(Method) ->
[
#{
?AEH_CONNECTOR_TYPE_BIN => #{
summary => <<"Azure Event Hub Connector">>,
value => values({Method, connector})
}
}
].
conn_bridge_examples(Method) ->
@ -104,11 +188,40 @@ conn_bridge_examples(Method) ->
].
values({get, AEHType}) ->
values({post, AEHType});
maps:merge(
#{
status => <<"connected">>,
node_status => [
#{
node => <<"emqx@localhost">>,
status => <<"connected">>
}
]
},
values({post, AEHType})
);
values({post, bridge_v2}) ->
maps:merge(
values(producer),
#{
enable => true,
connector => <<"my_azure_event_hub_connector">>,
name => <<"my_azure_event_hub_bridge">>,
type => ?AEH_CONNECTOR_TYPE_BIN
}
);
values({post, AEHType}) ->
maps:merge(values(common_config), values(AEHType));
values({put, AEHType}) ->
values({post, AEHType});
values(connector) ->
maps:merge(
values(common_config),
#{
name => <<"my_azure_event_hub_connector">>,
type => ?AEH_CONNECTOR_TYPE_BIN
}
);
values(common_config) ->
#{
authentication => #{
@ -119,12 +232,14 @@ values(common_config) ->
enable => true,
metadata_request_timeout => <<"4s">>,
min_metadata_refresh_interval => <<"3s">>,
name => <<"my_azure_event_hub_bridge">>,
socket_opts => #{
sndbuf => <<"1024KB">>,
recbuf => <<"1024KB">>,
nodelay => true,
tcp_keepalive => <<"none">>
}
},
type => <<"azure_event_hub_producer">>
};
values(producer) ->
#{
@ -163,7 +278,7 @@ values(producer) ->
}.
%%-------------------------------------------------------------------------------------------------
%% `emqx_bridge_resource' API
%% `emqx_connector_resource' API
%%-------------------------------------------------------------------------------------------------
connector_config(Config) ->
@ -182,6 +297,37 @@ connector_config(Config) ->
ref(Name) ->
hoconsc:ref(?MODULE, Name).
connector_overrides() ->
#{
authentication =>
mk(
ref(auth_username_password),
#{
default => #{},
required => true,
desc => ?DESC("authentication")
}
),
bootstrap_hosts =>
mk(
binary(),
#{
required => true,
validator => emqx_schema:servers_validator(
host_opts(), _Required = true
)
}
),
ssl => mk(ref(ssl_client_opts), #{default => #{<<"enable">> => true}}),
type => mk(
?AEH_CONNECTOR_TYPE,
#{
required => true,
desc => ?DESC("connector_type")
}
)
}.
producer_overrides() ->
#{
authentication =>
@ -208,10 +354,26 @@ producer_overrides() ->
required => true,
validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1
}),
ssl => mk(ref("ssl_client_opts"), #{default => #{<<"enable">> => true}}),
ssl => mk(ref(ssl_client_opts), #{default => #{<<"enable">> => true}}),
type => mk(azure_event_hub_producer, #{required => true})
}.
bridge_v2_overrides() ->
#{
kafka =>
mk(ref(producer_kafka_opts), #{
required => true,
validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1
}),
ssl => mk(ref(ssl_client_opts), #{default => #{<<"enable">> => true}}),
type => mk(
?AEH_CONNECTOR_TYPE,
#{
required => true,
desc => ?DESC("bridge_v2_type")
}
)
}.
auth_overrides() ->
#{
mechanism =>
@ -228,19 +390,11 @@ auth_overrides() ->
})
}.
%% Kafka has SSL disabled by default
%% Azure must use SSL
ssl_overrides() ->
#{
%% FIXME: change this once the config option is defined
%% "cacerts" => mk(boolean(), #{default => true}),
"enable" => mk(true, #{default => true}),
"server_name_indication" =>
mk(
hoconsc:union([disable, auto, string()]),
#{
example => auto,
default => <<"auto">>
}
)
"enable" => mk(true, #{default => true})
}.
kafka_producer_overrides() ->

View File

@ -13,7 +13,6 @@
-define(BRIDGE_TYPE, azure_event_hub_producer).
-define(BRIDGE_TYPE_BIN, <<"azure_event_hub_producer">>).
-define(KAFKA_BRIDGE_TYPE, kafka).
-define(APPS, [emqx_resource, emqx_bridge, emqx_rule_engine]).
-import(emqx_common_test_helpers, [on_exit/1]).
@ -41,6 +40,7 @@ init_per_suite(Config) ->
emqx_resource,
emqx_bridge_azure_event_hub,
emqx_bridge,
emqx_rule_engine,
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
],
#{work_dir => ?config(priv_dir, Config)}
@ -281,8 +281,6 @@ t_sync_query(Config) ->
t_same_name_azure_kafka_bridges(AehConfig) ->
ConfigKafka = lists:keyreplace(bridge_type, 1, AehConfig, {bridge_type, ?KAFKA_BRIDGE_TYPE}),
BridgeName = ?config(bridge_name, AehConfig),
AehResourceId = emqx_bridge_testlib:resource_id(AehConfig),
KafkaResourceId = emqx_bridge_testlib:resource_id(ConfigKafka),
TracePoint = emqx_bridge_kafka_impl_producer_sync_query,
%% creates the AEH bridge and check it's working
ok = emqx_bridge_testlib:t_sync_query(
@ -293,6 +291,8 @@ t_same_name_azure_kafka_bridges(AehConfig) ->
),
%% than creates a Kafka bridge with same name and delete it after creation
ok = emqx_bridge_testlib:t_create_via_http(ConfigKafka),
AehResourceId = emqx_bridge_testlib:resource_id(AehConfig),
KafkaResourceId = emqx_bridge_testlib:resource_id(ConfigKafka),
%% check that both bridges are healthy
?assertEqual({ok, connected}, emqx_resource_manager:health_check(AehResourceId)),
?assertEqual({ok, connected}, emqx_resource_manager:health_check(KafkaResourceId)),
@ -307,7 +307,8 @@ t_same_name_azure_kafka_bridges(AehConfig) ->
% check that AEH bridge is still working
?check_trace(
begin
Message = {send_message, make_message()},
BridgeId = emqx_bridge_v2_testlib:bridge_id(AehConfig),
Message = {BridgeId, make_message()},
?assertEqual(ok, emqx_resource:simple_sync_query(AehResourceId, Message)),
ok
end,

View File

@ -0,0 +1,343 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_azure_event_hub_v2_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-define(BRIDGE_TYPE, azure_event_hub_producer).
-define(BRIDGE_TYPE_BIN, <<"azure_event_hub_producer">>).
-define(CONNECTOR_TYPE, azure_event_hub_producer).
-define(CONNECTOR_TYPE_BIN, <<"azure_event_hub_producer">>).
-define(KAFKA_BRIDGE_TYPE, kafka_producer).
-import(emqx_common_test_helpers, [on_exit/1]).
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
KafkaHost = os:getenv("KAFKA_SASL_SSL_HOST", "toxiproxy.emqx.net"),
KafkaPort = list_to_integer(os:getenv("KAFKA_SASL_SSL_PORT", "9295")),
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
ProxyName = "kafka_sasl_ssl",
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
case emqx_common_test_helpers:is_tcp_server_available(KafkaHost, KafkaPort) of
true ->
Apps = emqx_cth_suite:start(
[
emqx_conf,
emqx,
emqx_management,
emqx_resource,
emqx_bridge_azure_event_hub,
emqx_bridge,
emqx_rule_engine,
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
],
#{work_dir => ?config(priv_dir, Config)}
),
{ok, Api} = emqx_common_test_http:create_default_app(),
[
{tc_apps, Apps},
{api, Api},
{proxy_name, ProxyName},
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
{kafka_host, KafkaHost},
{kafka_port, KafkaPort}
| Config
];
false ->
case os:getenv("IS_CI") of
"yes" ->
throw(no_kafka);
_ ->
{skip, no_kafka}
end
end.
end_per_suite(Config) ->
Apps = ?config(tc_apps, Config),
emqx_cth_suite:stop(Apps),
ok.
init_per_testcase(TestCase, Config) ->
common_init_per_testcase(TestCase, Config).
common_init_per_testcase(TestCase, Config) ->
ct:timetrap(timer:seconds(60)),
emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(),
emqx_config:delete_override_conf_files(),
UniqueNum = integer_to_binary(erlang:unique_integer()),
Name = iolist_to_binary([atom_to_binary(TestCase), UniqueNum]),
KafkaHost = ?config(kafka_host, Config),
KafkaPort = ?config(kafka_port, Config),
KafkaTopic = Name,
ConnectorConfig = connector_config(Name, KafkaHost, KafkaPort),
{BridgeConfig, ExtraConfig} = bridge_config(Name, Name, KafkaTopic),
ensure_topic(Config, KafkaTopic, _Opts = #{}),
ok = snabbkaffe:start_trace(),
ExtraConfig ++
[
{connector_type, ?CONNECTOR_TYPE},
{connector_name, Name},
{connector_config, ConnectorConfig},
{bridge_type, ?BRIDGE_TYPE},
{bridge_name, Name},
{bridge_config, BridgeConfig}
| Config
].
end_per_testcase(_Testcase, Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ok;
false ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(),
emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
connector_config(Name, KafkaHost, KafkaPort) ->
InnerConfigMap0 =
#{
<<"enable">> => true,
<<"bootstrap_hosts">> => iolist_to_binary([KafkaHost, ":", integer_to_binary(KafkaPort)]),
<<"authentication">> =>
#{
<<"mechanism">> => <<"plain">>,
<<"username">> => <<"emqxuser">>,
<<"password">> => <<"password">>
},
<<"connect_timeout">> => <<"5s">>,
<<"socket_opts">> =>
#{
<<"nodelay">> => true,
<<"recbuf">> => <<"1024KB">>,
<<"sndbuf">> => <<"1024KB">>,
<<"tcp_keepalive">> => <<"none">>
},
<<"ssl">> =>
#{
<<"cacertfile">> => shared_secret(client_cacertfile),
<<"certfile">> => shared_secret(client_certfile),
<<"keyfile">> => shared_secret(client_keyfile),
<<"ciphers">> => [],
<<"depth">> => 10,
<<"enable">> => true,
<<"hibernate_after">> => <<"5s">>,
<<"log_level">> => <<"notice">>,
<<"reuse_sessions">> => true,
<<"secure_renegotiate">> => true,
<<"server_name_indication">> => <<"disable">>,
%% currently, it seems our CI kafka certs fail peer verification
<<"verify">> => <<"verify_none">>,
<<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
},
InnerConfigMap = serde_roundtrip(InnerConfigMap0),
parse_and_check_connector_config(InnerConfigMap, Name).
parse_and_check_connector_config(InnerConfigMap, Name) ->
TypeBin = ?CONNECTOR_TYPE_BIN,
RawConf = #{<<"connectors">> => #{TypeBin => #{Name => InnerConfigMap}}},
#{<<"connectors">> := #{TypeBin := #{Name := Config}}} =
hocon_tconf:check_plain(emqx_connector_schema, RawConf, #{
required => false, atom_key => false
}),
ct:pal("parsed config: ~p", [Config]),
InnerConfigMap.
bridge_config(Name, ConnectorId, KafkaTopic) ->
InnerConfigMap0 =
#{
<<"enable">> => true,
<<"connector">> => ConnectorId,
<<"kafka">> =>
#{
<<"buffer">> =>
#{
<<"memory_overload_protection">> => true,
<<"mode">> => <<"memory">>,
<<"per_partition_limit">> => <<"2GB">>,
<<"segment_bytes">> => <<"100MB">>
},
<<"compression">> => <<"no_compression">>,
<<"kafka_header_value_encode_mode">> => <<"none">>,
<<"max_batch_bytes">> => <<"896KB">>,
<<"max_inflight">> => <<"10">>,
<<"message">> =>
#{
<<"key">> => <<"${.clientid}">>,
<<"value">> => <<"${.}">>
},
<<"partition_count_refresh_interval">> => <<"60s">>,
<<"partition_strategy">> => <<"random">>,
<<"query_mode">> => <<"async">>,
<<"required_acks">> => <<"all_isr">>,
<<"sync_query_timeout">> => <<"5s">>,
<<"topic">> => KafkaTopic
},
<<"local_topic">> => <<"t/aeh">>
%%,
},
InnerConfigMap = serde_roundtrip(InnerConfigMap0),
ExtraConfig =
[{kafka_topic, KafkaTopic}],
{parse_and_check_bridge_config(InnerConfigMap, Name), ExtraConfig}.
%% check it serializes correctly
serde_roundtrip(InnerConfigMap0) ->
IOList = hocon_pp:do(InnerConfigMap0, #{}),
{ok, InnerConfigMap} = hocon:binary(IOList),
InnerConfigMap.
parse_and_check_bridge_config(InnerConfigMap, Name) ->
TypeBin = ?BRIDGE_TYPE_BIN,
RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}},
hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}),
InnerConfigMap.
shared_secret_path() ->
os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret").
shared_secret(client_keyfile) ->
filename:join([shared_secret_path(), "client.key"]);
shared_secret(client_certfile) ->
filename:join([shared_secret_path(), "client.crt"]);
shared_secret(client_cacertfile) ->
filename:join([shared_secret_path(), "ca.crt"]);
shared_secret(rig_keytab) ->
filename:join([shared_secret_path(), "rig.keytab"]).
ensure_topic(Config, KafkaTopic, Opts) ->
KafkaHost = ?config(kafka_host, Config),
KafkaPort = ?config(kafka_port, Config),
NumPartitions = maps:get(num_partitions, Opts, 3),
Endpoints = [{KafkaHost, KafkaPort}],
TopicConfigs = [
#{
name => KafkaTopic,
num_partitions => NumPartitions,
replication_factor => 1,
assignments => [],
configs => []
}
],
RequestConfig = #{timeout => 5_000},
ConnConfig =
#{
ssl => emqx_tls_lib:to_client_opts(
#{
keyfile => shared_secret(client_keyfile),
certfile => shared_secret(client_certfile),
cacertfile => shared_secret(client_cacertfile),
verify => verify_none,
enable => true
}
),
sasl => {plain, <<"emqxuser">>, <<"password">>}
},
case brod:create_topics(Endpoints, TopicConfigs, RequestConfig, ConnConfig) of
ok -> ok;
{error, topic_already_exists} -> ok
end.
make_message() ->
Time = erlang:unique_integer(),
BinTime = integer_to_binary(Time),
Payload = emqx_guid:to_hexstr(emqx_guid:gen()),
#{
clientid => BinTime,
payload => Payload,
timestamp => Time
}.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_start_stop(Config) ->
emqx_bridge_v2_testlib:t_start_stop(Config, kafka_producer_stopped),
ok.
t_create_via_http(Config) ->
emqx_bridge_v2_testlib:t_create_via_http(Config),
ok.
t_on_get_status(Config) ->
emqx_bridge_v2_testlib:t_on_get_status(Config, #{failure_status => connecting}),
ok.
t_sync_query(Config) ->
ok = emqx_bridge_v2_testlib:t_sync_query(
Config,
fun make_message/0,
fun(Res) -> ?assertEqual(ok, Res) end,
emqx_bridge_kafka_impl_producer_sync_query
),
ok.
t_same_name_azure_kafka_bridges(Config) ->
BridgeName = ?config(bridge_name, Config),
TracePoint = emqx_bridge_kafka_impl_producer_sync_query,
%% creates the AEH bridge and check it's working
ok = emqx_bridge_v2_testlib:t_sync_query(
Config,
fun make_message/0,
fun(Res) -> ?assertEqual(ok, Res) end,
TracePoint
),
%% then create a Kafka bridge with same name and delete it after creation
ConfigKafka0 = lists:keyreplace(bridge_type, 1, Config, {bridge_type, ?KAFKA_BRIDGE_TYPE}),
ConfigKafka = lists:keyreplace(
connector_type, 1, ConfigKafka0, {connector_type, ?KAFKA_BRIDGE_TYPE}
),
ok = emqx_bridge_v2_testlib:t_create_via_http(ConfigKafka),
AehResourceId = emqx_bridge_v2_testlib:resource_id(Config),
KafkaResourceId = emqx_bridge_v2_testlib:resource_id(ConfigKafka),
%% check that both bridges are healthy
?assertEqual({ok, connected}, emqx_resource_manager:health_check(AehResourceId)),
?assertEqual({ok, connected}, emqx_resource_manager:health_check(KafkaResourceId)),
?assertMatch(
{{ok, _}, {ok, _}},
?wait_async_action(
emqx_connector:disable_enable(disable, ?KAFKA_BRIDGE_TYPE, BridgeName),
#{?snk_kind := kafka_producer_stopped},
5_000
)
),
% check that AEH bridge is still working
?check_trace(
begin
BridgeId = emqx_bridge_v2_testlib:bridge_id(Config),
Message = {BridgeId, make_message()},
?assertEqual(ok, emqx_resource:simple_sync_query(AehResourceId, Message)),
ok
end,
fun(Trace) ->
?assertMatch([#{instance_id := AehResourceId}], ?of_kind(TracePoint, Trace))
end
),
ok.

View File

@ -177,8 +177,7 @@ make_bridge(Config) ->
delete_bridge() ->
Type = <<"clickhouse">>,
Name = atom_to_binary(?MODULE),
{ok, _} = emqx_bridge:remove(Type, Name),
ok.
ok = emqx_bridge:remove(Type, Name).
reset_table(Config) ->
ClickhouseConnection = proplists:get_value(clickhouse_connection, Config),

View File

@ -891,7 +891,7 @@ t_start_stop(Config) ->
{ok, _} = snabbkaffe:receive_events(SRef0),
?assertMatch({ok, connected}, emqx_resource_manager:health_check(ResourceId)),
?assertMatch({ok, _}, remove_bridge(Config)),
?assertMatch(ok, remove_bridge(Config)),
ok
end,
[

View File

@ -28,6 +28,7 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("emqx/include/asserts.hrl").
-define(BRIDGE_TYPE, <<"webhook">>).
-define(BRIDGE_NAME, atom_to_binary(?MODULE)).
@ -58,9 +59,20 @@ suite() ->
init_per_testcase(t_bad_bridge_config, Config) ->
Config;
init_per_testcase(t_send_async_connection_timeout, Config) ->
HTTPPath = <<"/path">>,
ServerSSLOpts = false,
{ok, {HTTPPort, _Pid}} = emqx_bridge_http_connector_test_server:start_link(
_Port = random, HTTPPath, ServerSSLOpts
),
ResponseDelayMS = 500,
Server = start_http_server(#{response_delay_ms => ResponseDelayMS}),
[{http_server, Server}, {response_delay_ms, ResponseDelayMS} | Config];
ok = emqx_bridge_http_connector_test_server:set_handler(
success_http_handler(#{response_delay => ResponseDelayMS})
),
[
{http_server, #{port => HTTPPort, path => HTTPPath}},
{response_delay_ms, ResponseDelayMS}
| Config
];
init_per_testcase(t_path_not_found, Config) ->
HTTPPath = <<"/nonexisting/path">>,
ServerSSLOpts = false,
@ -98,7 +110,8 @@ end_per_testcase(TestCase, _Config) when
TestCase =:= t_path_not_found;
TestCase =:= t_too_many_requests;
TestCase =:= t_rule_action_expired;
TestCase =:= t_bridge_probes_header_atoms
TestCase =:= t_bridge_probes_header_atoms;
TestCase =:= t_send_async_connection_timeout
->
ok = emqx_bridge_http_connector_test_server:stop(),
persistent_term:erase({?MODULE, times_called}),
@ -302,11 +315,18 @@ make_bridge(Config) ->
emqx_bridge_resource:bridge_id(Type, Name).
success_http_handler() ->
success_http_handler(#{response_delay => 0}).
success_http_handler(Opts) ->
ResponseDelay = maps:get(response_delay, Opts, 0),
TestPid = self(),
fun(Req0, State) ->
{ok, Body, Req} = cowboy_req:read_body(Req0),
Headers = cowboy_req:headers(Req),
ct:pal("http request received: ~p", [#{body => Body, headers => Headers}]),
ct:pal("http request received: ~p", [
#{body => Body, headers => Headers, response_delay => ResponseDelay}
]),
ResponseDelay > 0 andalso timer:sleep(ResponseDelay),
TestPid ! {http, Headers, Body},
Rep = cowboy_req:reply(
200,
@ -380,9 +400,10 @@ wait_http_request() ->
%% When the connection time out all the queued requests where dropped in
t_send_async_connection_timeout(Config) ->
ResponseDelayMS = ?config(response_delay_ms, Config),
#{port := Port} = ?config(http_server, Config),
#{port := Port, path := Path} = ?config(http_server, Config),
BridgeID = make_bridge(#{
port => Port,
path => Path,
pool_size => 1,
query_mode => "async",
connect_timeout => integer_to_list(ResponseDelayMS * 2) ++ "ms",
@ -724,16 +745,17 @@ receive_request_notifications(MessageIDs, _ResponseDelay, _Acc) when map_size(Me
ok;
receive_request_notifications(MessageIDs, ResponseDelay, Acc) ->
receive
{http_server, received, Req} ->
RemainingMessageIDs = remove_message_id(MessageIDs, Req),
receive_request_notifications(RemainingMessageIDs, ResponseDelay, [Req | Acc])
{http, _Headers, Body} ->
RemainingMessageIDs = remove_message_id(MessageIDs, Body),
receive_request_notifications(RemainingMessageIDs, ResponseDelay, [Body | Acc])
after (30 * 1000) ->
ct:pal("Waited a long time but did not get any message"),
ct:pal("Messages received so far:\n ~p", [Acc]),
ct:pal("Mailbox:\n ~p", [?drainMailbox()]),
ct:fail("All requests did not reach server at least once")
end.
remove_message_id(MessageIDs, #{body := IDBin}) ->
remove_message_id(MessageIDs, IDBin) ->
ID = erlang:binary_to_integer(IDBin),
%% It is acceptable to get the same message more than once
maps:without([ID], MessageIDs).

View File

@ -1,6 +1,6 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.7"}}}
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.8.0"}}}
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}

View File

@ -3,7 +3,6 @@
%%--------------------------------------------------------------------
-module(emqx_bridge_kafka).
-include_lib("emqx_connector/include/emqx_connector.hrl").
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
@ -18,7 +17,9 @@
-import(hoconsc, [mk/2, enum/1, ref/2]).
-export([
conn_bridge_examples/1
bridge_v2_examples/1,
conn_bridge_examples/1,
connector_examples/1
]).
-export([
@ -26,7 +27,8 @@
roots/0,
fields/1,
desc/1,
host_opts/0
host_opts/0,
ssl_client_opts_fields/0
]).
-export([kafka_producer_converter/2, producer_strategy_key_validator/1]).
@ -34,13 +36,33 @@
%% -------------------------------------------------------------------------------------------------
%% api
connector_examples(_Method) ->
[
#{
<<"kafka_producer">> => #{
summary => <<"Kafka Connector">>,
value => maps:merge(
#{name => <<"my_connector">>, type => <<"kafka_producer">>},
values(common_config)
)
}
}
].
bridge_v2_examples(Method) ->
[
#{
<<"kafka_producer">> => #{
summary => <<"Kafka Bridge v2">>,
value => values({Method, bridge_v2_producer})
}
}
].
conn_bridge_examples(Method) ->
[
#{
%% TODO: rename this to `kafka_producer' after alias
%% support is added to hocon; keeping this as just `kafka'
%% for backwards compatibility.
<<"kafka">> => #{
<<"kafka_producer">> => #{
summary => <<"Kafka Producer Bridge">>,
value => values({Method, producer})
}
@ -54,11 +76,41 @@ conn_bridge_examples(Method) ->
].
values({get, KafkaType}) ->
values({post, KafkaType});
maps:merge(
#{
status => <<"connected">>,
node_status => [
#{
node => <<"emqx@localhost">>,
status => <<"connected">>
}
]
},
values({post, KafkaType})
);
values({post, KafkaType}) ->
maps:merge(values(common_config), values(KafkaType));
maps:merge(
#{
name => <<"my_kafka_bridge">>,
type => <<"kafka_producer">>
},
values({put, KafkaType})
);
values({put, KafkaType}) when KafkaType =:= bridge_v2_producer ->
values(KafkaType);
values({put, KafkaType}) ->
values({post, KafkaType});
maps:merge(values(common_config), values(KafkaType));
values(bridge_v2_producer) ->
maps:merge(
#{
enable => true,
connector => <<"my_kafka_connector">>,
resource_opts => #{
health_check_interval => "32s"
}
},
values(producer)
);
values(common_config) ->
#{
authentication => #{
@ -142,25 +194,73 @@ values(consumer) ->
%% -------------------------------------------------------------------------------------------------
%% Hocon Schema Definitions
%% In addition to the common ssl client options defined in emqx_schema module
%% Kafka supports a special value 'auto' in order to support different bootstrap endpoints
%% as well as partition leaders.
%% A static SNI is quite unusual for Kafka, but it's kept anyway.
ssl_overrides() ->
#{
"server_name_indication" =>
mk(
hoconsc:union([auto, disable, string()]),
#{
example => auto,
default => <<"auto">>,
importance => ?IMPORTANCE_LOW,
desc => ?DESC("server_name_indication")
}
)
}.
override(Fields, Overrides) ->
lists:map(
fun({Name, Sc}) ->
case maps:find(Name, Overrides) of
{ok, Override} ->
{Name, hocon_schema:override(Sc, Override)};
error ->
{Name, Sc}
end
end,
Fields
).
ssl_client_opts_fields() ->
override(emqx_schema:client_ssl_opts_schema(#{}), ssl_overrides()).
host_opts() ->
#{default_port => 9092}.
namespace() -> "bridge_kafka".
roots() -> ["config_consumer", "config_producer"].
roots() -> ["config_consumer", "config_producer", "config_bridge_v2"].
fields("post_" ++ Type) ->
[type_field(), name_field() | fields("config_" ++ Type)];
[type_field(Type), name_field() | fields("config_" ++ Type)];
fields("put_" ++ Type) ->
fields("config_" ++ Type);
fields("get_" ++ Type) ->
emqx_bridge_schema:status_fields() ++ fields("post_" ++ Type);
fields("config_bridge_v2") ->
fields(kafka_producer_action);
fields("config_connector") ->
fields(kafka_connector);
fields("config_producer") ->
fields(kafka_producer);
fields("config_consumer") ->
fields(kafka_consumer);
fields(kafka_connector) ->
fields("config");
fields(kafka_producer) ->
fields("config") ++ fields(producer_opts);
fields(kafka_producer_action) ->
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{connector,
mk(binary(), #{
desc => ?DESC(emqx_connector_schema, "connector_field"), required => true
})}
] ++ fields(producer_opts);
fields(kafka_consumer) ->
fields("config") ++ fields(consumer_opts);
fields("config") ->
@ -199,8 +299,11 @@ fields("config") ->
mk(hoconsc:union([none, ref(auth_username_password), ref(auth_gssapi_kerberos)]), #{
default => none, desc => ?DESC("authentication")
})},
{socket_opts, mk(ref(socket_opts), #{required => false, desc => ?DESC(socket_opts)})}
] ++ emqx_connector_schema_lib:ssl_fields();
{socket_opts, mk(ref(socket_opts), #{required => false, desc => ?DESC(socket_opts)})},
{ssl, mk(ref(ssl_client_opts), #{})}
];
fields(ssl_client_opts) ->
ssl_client_opts_fields();
fields(auth_username_password) ->
[
{mechanism,
@ -269,7 +372,7 @@ fields(producer_opts) ->
desc => ?DESC(producer_kafka_opts),
validator => fun producer_strategy_key_validator/1
})},
{resource_opts, mk(ref(resource_opts), #{default => #{}})}
{resource_opts, mk(ref(resource_opts), #{default => #{}, desc => ?DESC(resource_opts)})}
];
fields(producer_kafka_opts) ->
[
@ -444,7 +547,7 @@ fields(consumer_kafka_opts) ->
[
{max_batch_bytes,
mk(emqx_schema:bytesize(), #{
default => "896KB", desc => ?DESC(consumer_max_batch_bytes)
default => <<"896KB">>, desc => ?DESC(consumer_max_batch_bytes)
})},
{max_rejoin_attempts,
mk(non_neg_integer(), #{
@ -472,12 +575,20 @@ desc("config") ->
?DESC("desc_config");
desc(resource_opts) ->
?DESC(emqx_resource_schema, "resource_opts");
desc("get_" ++ Type) when Type =:= "consumer"; Type =:= "producer" ->
desc("get_" ++ Type) when
Type =:= "consumer"; Type =:= "producer"; Type =:= "connector"; Type =:= "bridge_v2"
->
["Configuration for Kafka using `GET` method."];
desc("put_" ++ Type) when Type =:= "consumer"; Type =:= "producer" ->
desc("put_" ++ Type) when
Type =:= "consumer"; Type =:= "producer"; Type =:= "connector"; Type =:= "bridge_v2"
->
["Configuration for Kafka using `PUT` method."];
desc("post_" ++ Type) when Type =:= "consumer"; Type =:= "producer" ->
desc("post_" ++ Type) when
Type =:= "consumer"; Type =:= "producer"; Type =:= "connector"; Type =:= "bridge_v2"
->
["Configuration for Kafka using `POST` method."];
desc(kafka_producer_action) ->
?DESC("kafka_producer_action");
desc(Name) ->
lists:member(Name, struct_names()) orelse throw({missing_desc, Name}),
?DESC(Name).
@ -496,17 +607,19 @@ struct_names() ->
consumer_opts,
consumer_kafka_opts,
consumer_topic_mapping,
producer_kafka_ext_headers
producer_kafka_ext_headers,
ssl_client_opts
].
%% -------------------------------------------------------------------------------------------------
%% internal
type_field() ->
type_field(BridgeV2Type) when BridgeV2Type =:= "connector"; BridgeV2Type =:= "bridge_v2" ->
{type, mk(enum([kafka_producer]), #{required => true, desc => ?DESC("desc_type")})};
type_field(_) ->
{type,
%% TODO: rename `kafka' to `kafka_producer' after alias
%% support is added to hocon; keeping this as just `kafka' for
%% backwards compatibility.
mk(enum([kafka_consumer, kafka]), #{required => true, desc => ?DESC("desc_type")})}.
mk(enum([kafka_consumer, kafka, kafka_producer]), #{
required => true, desc => ?DESC("desc_type")
})}.
name_field() ->
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.

View File

@ -16,7 +16,11 @@
on_stop/2,
on_query/3,
on_query_async/4,
on_get_status/2
on_get_status/2,
on_add_channel/4,
on_remove_channel/3,
on_get_channels/1,
on_get_channel_status/3
]).
-export([
@ -27,7 +31,7 @@
-include_lib("emqx/include/logger.hrl").
%% Allocatable resources
-define(kafka_resource_id, kafka_resource_id).
-define(kafka_telemetry_id, kafka_telemetry_id).
-define(kafka_client_id, kafka_client_id).
-define(kafka_producers, kafka_producers).
@ -38,50 +42,54 @@ query_mode(_) ->
callback_mode() -> async_if_possible.
check_config(Key, Config) when is_map_key(Key, Config) ->
tr_config(Key, maps:get(Key, Config));
check_config(Key, _Config) ->
throw(#{
reason => missing_required_config,
missing_config => Key
}).
tr_config(bootstrap_hosts, Hosts) ->
emqx_bridge_kafka_impl:hosts(Hosts);
tr_config(authentication, Auth) ->
emqx_bridge_kafka_impl:sasl(Auth);
tr_config(ssl, Ssl) ->
ssl(Ssl);
tr_config(socket_opts, Opts) ->
emqx_bridge_kafka_impl:socket_opts(Opts);
tr_config(_Key, Value) ->
Value.
%% @doc Config schema is defined in emqx_bridge_kafka.
on_start(InstId, Config) ->
#{
authentication := Auth,
bootstrap_hosts := Hosts0,
bridge_name := BridgeName,
bridge_type := BridgeType,
connect_timeout := ConnTimeout,
kafka := KafkaConfig = #{
message := MessageTemplate,
topic := KafkaTopic,
sync_query_timeout := SyncQueryTimeout
},
metadata_request_timeout := MetaReqTimeout,
min_metadata_refresh_interval := MinMetaRefreshInterval,
socket_opts := SocketOpts,
ssl := SSL
} = Config,
KafkaHeadersTokens = preproc_kafka_headers(maps:get(kafka_headers, KafkaConfig, undefined)),
KafkaExtHeadersTokens = preproc_ext_headers(maps:get(kafka_ext_headers, KafkaConfig, [])),
KafkaHeadersValEncodeMode = maps:get(kafka_header_value_encode_mode, KafkaConfig, none),
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
ok = emqx_resource:allocate_resource(InstId, ?kafka_resource_id, ResourceId),
_ = maybe_install_wolff_telemetry_handlers(ResourceId),
Hosts = emqx_bridge_kafka_impl:hosts(Hosts0),
ClientId = emqx_bridge_kafka_impl:make_client_id(BridgeType, BridgeName),
ok = emqx_resource:allocate_resource(InstId, ?kafka_client_id, ClientId),
C = fun(Key) -> check_config(Key, Config) end,
Hosts = C(bootstrap_hosts),
ClientConfig = #{
min_metadata_refresh_interval => MinMetaRefreshInterval,
connect_timeout => ConnTimeout,
client_id => ClientId,
request_timeout => MetaReqTimeout,
extra_sock_opts => emqx_bridge_kafka_impl:socket_opts(SocketOpts),
sasl => emqx_bridge_kafka_impl:sasl(Auth),
ssl => ssl(SSL)
min_metadata_refresh_interval => C(min_metadata_refresh_interval),
connect_timeout => C(connect_timeout),
request_timeout => C(metadata_request_timeout),
extra_sock_opts => C(socket_opts),
sasl => C(authentication),
ssl => C(ssl)
},
case do_get_topic_status(Hosts, KafkaConfig, KafkaTopic) of
unhealthy_target ->
throw(unhealthy_target);
_ ->
ok
end,
ClientId = InstId,
ok = emqx_resource:allocate_resource(InstId, ?kafka_client_id, ClientId),
case wolff:ensure_supervised_client(ClientId, Hosts, ClientConfig) of
{ok, _} ->
case wolff_client_sup:find_client(ClientId) of
{ok, Pid} ->
case wolff_client:check_connectivity(Pid) of
ok ->
ok;
{error, Error} ->
deallocate_client(ClientId),
throw({failed_to_connect, Error})
end;
{error, Reason} ->
deallocate_client(ClientId),
throw({failed_to_find_created_client, Reason})
end,
?SLOG(info, #{
msg => "kafka_client_started",
instance_id => InstId,
@ -89,7 +97,7 @@ on_start(InstId, Config) ->
});
{error, Reason} ->
?SLOG(error, #{
msg => "failed_to_start_kafka_client",
msg => failed_to_start_kafka_client,
instance_id => InstId,
kafka_hosts => Hosts,
reason => Reason
@ -97,7 +105,48 @@ on_start(InstId, Config) ->
throw(failed_to_start_kafka_client)
end,
%% Check if this is a dry run
TestIdStart = string:find(InstId, ?TEST_ID_PREFIX),
{ok, #{
client_id => ClientId,
installed_bridge_v2s => #{}
}}.
on_add_channel(
InstId,
#{
client_id := ClientId,
installed_bridge_v2s := InstalledBridgeV2s
} = OldState,
BridgeV2Id,
BridgeV2Config
) ->
%% The following will throw an exception if the bridge producers fails to start
{ok, BridgeV2State} = create_producers_for_bridge_v2(
InstId, BridgeV2Id, ClientId, BridgeV2Config
),
NewInstalledBridgeV2s = maps:put(BridgeV2Id, BridgeV2State, InstalledBridgeV2s),
%% Update state
NewState = OldState#{installed_bridge_v2s => NewInstalledBridgeV2s},
{ok, NewState}.
create_producers_for_bridge_v2(
InstId,
BridgeV2Id,
ClientId,
#{
bridge_type := BridgeType,
kafka := KafkaConfig
}
) ->
#{
message := MessageTemplate,
topic := KafkaTopic,
sync_query_timeout := SyncQueryTimeout
} = KafkaConfig,
KafkaHeadersTokens = preproc_kafka_headers(maps:get(kafka_headers, KafkaConfig, undefined)),
KafkaExtHeadersTokens = preproc_ext_headers(maps:get(kafka_ext_headers, KafkaConfig, [])),
KafkaHeadersValEncodeMode = maps:get(kafka_header_value_encode_mode, KafkaConfig, none),
{_BridgeType, BridgeName} = emqx_bridge_v2:parse_id(BridgeV2Id),
TestIdStart = string:find(BridgeV2Id, ?TEST_ID_PREFIX),
IsDryRun =
case TestIdStart of
nomatch ->
@ -105,18 +154,25 @@ on_start(InstId, Config) ->
_ ->
string:equal(TestIdStart, InstId)
end,
WolffProducerConfig = producers_config(BridgeType, BridgeName, ClientId, KafkaConfig, IsDryRun),
ok = check_topic_and_leader_connections(ClientId, KafkaTopic),
WolffProducerConfig = producers_config(
BridgeType, BridgeName, ClientId, KafkaConfig, IsDryRun, BridgeV2Id
),
case wolff:ensure_supervised_producers(ClientId, KafkaTopic, WolffProducerConfig) of
{ok, Producers} ->
ok = emqx_resource:allocate_resource(InstId, ?kafka_producers, Producers),
ok = emqx_resource:allocate_resource(InstId, {?kafka_producers, BridgeV2Id}, Producers),
ok = emqx_resource:allocate_resource(
InstId, {?kafka_telemetry_id, BridgeV2Id}, BridgeV2Id
),
_ = maybe_install_wolff_telemetry_handlers(BridgeV2Id),
{ok, #{
message_template => compile_message_template(MessageTemplate),
client_id => ClientId,
kafka_client_id => ClientId,
kafka_topic => KafkaTopic,
producers => Producers,
resource_id => ResourceId,
resource_id => BridgeV2Id,
connector_resource_id => InstId,
sync_query_timeout => SyncQueryTimeout,
hosts => Hosts,
kafka_config => KafkaConfig,
headers_tokens => KafkaHeadersTokens,
ext_headers_tokens => KafkaExtHeadersTokens,
@ -126,24 +182,10 @@ on_start(InstId, Config) ->
?SLOG(error, #{
msg => "failed_to_start_kafka_producer",
instance_id => InstId,
kafka_hosts => Hosts,
kafka_client_id => ClientId,
kafka_topic => KafkaTopic,
reason => Reason2
}),
%% Need to stop the already running client; otherwise, the
%% next `on_start' call will try to ensure the client
%% exists and it will be already present and using the old
%% config. This is specially bad if the original crash
%% was due to misconfiguration and we are trying to fix
%% it...
_ = with_log_at_error(
fun() -> wolff:stop_and_delete_supervised_client(ClientId) end,
#{
msg => "failed_to_delete_kafka_client",
client_id => ClientId
}
),
throw(
"Failed to start Kafka client. Please check the logs for errors and check"
" the connection parameters."
@ -151,68 +193,95 @@ on_start(InstId, Config) ->
end.
on_stop(InstanceId, _State) ->
case emqx_resource:get_allocated_resources(InstanceId) of
#{
?kafka_client_id := ClientId,
?kafka_producers := Producers,
?kafka_resource_id := ResourceId
} ->
_ = with_log_at_error(
fun() -> wolff:stop_and_delete_supervised_producers(Producers) end,
#{
msg => "failed_to_delete_kafka_producer",
client_id => ClientId
}
),
_ = with_log_at_error(
fun() -> wolff:stop_and_delete_supervised_client(ClientId) end,
#{
msg => "failed_to_delete_kafka_client",
client_id => ClientId
}
),
_ = with_log_at_error(
fun() -> uninstall_telemetry_handlers(ResourceId) end,
#{
msg => "failed_to_uninstall_telemetry_handlers",
resource_id => ResourceId
}
),
AllocatedResources = emqx_resource:get_allocated_resources(InstanceId),
ClientId = maps:get(?kafka_client_id, AllocatedResources, undefined),
case ClientId of
undefined ->
ok;
#{?kafka_client_id := ClientId, ?kafka_resource_id := ResourceId} ->
_ = with_log_at_error(
fun() -> wolff:stop_and_delete_supervised_client(ClientId) end,
#{
msg => "failed_to_delete_kafka_client",
client_id => ClientId
}
),
_ = with_log_at_error(
fun() -> uninstall_telemetry_handlers(ResourceId) end,
#{
msg => "failed_to_uninstall_telemetry_handlers",
resource_id => ResourceId
}
),
ok;
#{?kafka_resource_id := ResourceId} ->
_ = with_log_at_error(
fun() -> uninstall_telemetry_handlers(ResourceId) end,
#{
msg => "failed_to_uninstall_telemetry_handlers",
resource_id => ResourceId
}
),
ok;
_ ->
ok
ClientId ->
deallocate_client(ClientId)
end,
maps:foreach(
fun
({?kafka_producers, _BridgeV2Id}, Producers) ->
deallocate_producers(ClientId, Producers);
({?kafka_telemetry_id, _BridgeV2Id}, TelemetryId) ->
deallocate_telemetry_handlers(TelemetryId);
(_, _) ->
ok
end,
AllocatedResources
),
?tp(kafka_producer_stopped, #{instance_id => InstanceId}),
ok.
deallocate_client(ClientId) ->
_ = with_log_at_error(
fun() -> wolff:stop_and_delete_supervised_client(ClientId) end,
#{
msg => "failed_to_delete_kafka_client",
client_id => ClientId
}
),
ok.
deallocate_producers(ClientId, Producers) ->
_ = with_log_at_error(
fun() -> wolff:stop_and_delete_supervised_producers(Producers) end,
#{
msg => "failed_to_delete_kafka_producer",
client_id => ClientId
}
).
deallocate_telemetry_handlers(TelemetryId) ->
_ = with_log_at_error(
fun() -> uninstall_telemetry_handlers(TelemetryId) end,
#{
msg => "failed_to_uninstall_telemetry_handlers",
resource_id => TelemetryId
}
).
remove_producers_for_bridge_v2(
InstId, BridgeV2Id
) ->
AllocatedResources = emqx_resource:get_allocated_resources(InstId),
ClientId = maps:get(?kafka_client_id, AllocatedResources, no_client_id),
maps:foreach(
fun
({?kafka_producers, BridgeV2IdCheck}, Producers) when BridgeV2IdCheck =:= BridgeV2Id ->
deallocate_producers(ClientId, Producers);
({?kafka_telemetry_id, BridgeV2IdCheck}, TelemetryId) when
BridgeV2IdCheck =:= BridgeV2Id
->
deallocate_telemetry_handlers(TelemetryId);
(_, _) ->
ok
end,
AllocatedResources
),
ok.
on_remove_channel(
InstId,
#{
client_id := _ClientId,
installed_bridge_v2s := InstalledBridgeV2s
} = OldState,
BridgeV2Id
) ->
ok = remove_producers_for_bridge_v2(InstId, BridgeV2Id),
NewInstalledBridgeV2s = maps:remove(BridgeV2Id, InstalledBridgeV2s),
%% Update state
NewState = OldState#{installed_bridge_v2s => NewInstalledBridgeV2s},
{ok, NewState}.
on_query(
InstId,
{send_message, Message},
{MessageTag, Message},
#{installed_bridge_v2s := BridgeV2Configs} = _ConnectorState
) ->
#{
message_template := Template,
producers := Producers,
@ -220,8 +289,7 @@ on_query(
headers_tokens := KafkaHeadersTokens,
ext_headers_tokens := KafkaExtHeadersTokens,
headers_val_encode_mode := KafkaHeadersValEncodeMode
}
) ->
} = maps:get(MessageTag, BridgeV2Configs),
KafkaHeaders = #{
headers_tokens => KafkaHeadersTokens,
ext_headers_tokens => KafkaExtHeadersTokens,
@ -257,6 +325,9 @@ on_query(
{error, {unrecoverable_error, Error}}
end.
on_get_channels(ResId) ->
emqx_bridge_v2:get_channels_for_connector(ResId).
%% @doc The callback API for rule-engine (or bridge without rules)
%% The input argument `Message' is an enriched format (as a map())
%% of the original #message{} record.
@ -265,16 +336,17 @@ on_query(
%% or the direct mapping from an MQTT message.
on_query_async(
InstId,
{send_message, Message},
{MessageTag, Message},
AsyncReplyFn,
#{installed_bridge_v2s := BridgeV2Configs} = _ConnectorState
) ->
#{
message_template := Template,
producers := Producers,
headers_tokens := KafkaHeadersTokens,
ext_headers_tokens := KafkaExtHeadersTokens,
headers_val_encode_mode := KafkaHeadersValEncodeMode
}
) ->
} = maps:get(MessageTag, BridgeV2Configs),
KafkaHeaders = #{
headers_tokens => KafkaHeadersTokens,
ext_headers_tokens => KafkaExtHeadersTokens,
@ -399,68 +471,109 @@ on_kafka_ack(_Partition, buffer_overflow_discarded, _Callback) ->
%% Note: since wolff client has its own replayq that is not managed by
%% `emqx_resource_buffer_worker', we must avoid returning `disconnected' here. Otherwise,
%% `emqx_resource_manager' will kill the wolff producers and messages might be lost.
on_get_status(_InstId, #{client_id := ClientId} = State) ->
on_get_status(
_InstId,
#{client_id := ClientId} = State
) ->
case wolff_client_sup:find_client(ClientId) of
{ok, Pid} ->
case do_get_status(Pid, State) of
case wolff_client:check_connectivity(Pid) of
ok -> connected;
unhealthy_target -> {disconnected, State, unhealthy_target};
error -> connecting
{error, Error} -> {connecting, State, Error}
end;
{error, _Reason} ->
connecting
end.
do_get_status(Client, #{kafka_topic := KafkaTopic, hosts := Hosts, kafka_config := KafkaConfig}) ->
case do_get_topic_status(Hosts, KafkaConfig, KafkaTopic) of
unhealthy_target ->
unhealthy_target;
_ ->
case do_get_healthy_leaders(Client, KafkaTopic) of
[] -> error;
_ -> ok
end
end.
do_get_healthy_leaders(Client, KafkaTopic) ->
case wolff_client:get_leader_connections(Client, KafkaTopic) of
{ok, Leaders} ->
%% Kafka is considered healthy as long as any of the partition leader is reachable.
lists:filtermap(
fun({_Partition, Pid}) ->
case is_pid(Pid) andalso erlang:is_process_alive(Pid) of
true -> {true, Pid};
_ -> false
end
end,
Leaders
);
{error, _} ->
[]
end.
do_get_topic_status(Hosts, KafkaConfig, KafkaTopic) ->
CheckTopicFun =
fun() ->
wolff_client:check_if_topic_exists(Hosts, KafkaConfig, KafkaTopic)
end,
on_get_channel_status(
_ResId,
ChannelId,
#{
client_id := ClientId,
installed_bridge_v2s := Channels
} = _State
) ->
#{kafka_topic := KafkaTopic} = maps:get(ChannelId, Channels),
try
case emqx_utils:nolink_apply(CheckTopicFun, 5_000) of
ok -> ok;
{error, unknown_topic_or_partition} -> unhealthy_target;
_ -> error
end
ok = check_topic_and_leader_connections(ClientId, KafkaTopic),
connected
catch
_:_ ->
error
throw:#{reason := restarting} ->
conneting
end.
check_topic_and_leader_connections(ClientId, KafkaTopic) ->
case wolff_client_sup:find_client(ClientId) of
{ok, Pid} ->
ok = check_topic_status(ClientId, Pid, KafkaTopic),
ok = check_if_healthy_leaders(ClientId, Pid, KafkaTopic);
{error, no_such_client} ->
throw(#{
reason => cannot_find_kafka_client,
kafka_client => ClientId,
kafka_topic => KafkaTopic
});
{error, restarting} ->
throw(#{
reason => restarting,
kafka_client => ClientId,
kafka_topic => KafkaTopic
})
end.
check_if_healthy_leaders(ClientId, ClientPid, KafkaTopic) when is_pid(ClientPid) ->
Leaders =
case wolff_client:get_leader_connections(ClientPid, KafkaTopic) of
{ok, LeadersToCheck} ->
%% Kafka is considered healthy as long as any of the partition leader is reachable.
lists:filtermap(
fun({_Partition, Pid}) ->
case is_pid(Pid) andalso erlang:is_process_alive(Pid) of
true -> {true, Pid};
_ -> false
end
end,
LeadersToCheck
);
{error, _} ->
[]
end,
case Leaders of
[] ->
throw(#{
error => no_connected_partition_leader,
kafka_client => ClientId,
kafka_topic => KafkaTopic
});
_ ->
ok
end.
check_topic_status(ClientId, WolffClientPid, KafkaTopic) ->
case wolff_client:check_topic_exists_with_client_pid(WolffClientPid, KafkaTopic) of
ok ->
ok;
{error, unknown_topic_or_partition} ->
throw(#{
error => unknown_kafka_topic,
kafka_client_id => ClientId,
kafka_topic => KafkaTopic
});
{error, Reason} ->
throw(#{
error => failed_to_check_topic_status,
kafka_client_id => ClientId,
reason => Reason,
kafka_topic => KafkaTopic
})
end.
ssl(#{enable := true} = SSL) ->
emqx_tls_lib:to_client_opts(SSL);
ssl(_) ->
[].
false.
producers_config(BridgeType, BridgeName, ClientId, Input, IsDryRun) ->
producers_config(BridgeType, BridgeName, ClientId, Input, IsDryRun, BridgeV2Id) ->
#{
max_batch_bytes := MaxBatchBytes,
compression := Compression,
@ -486,7 +599,6 @@ producers_config(BridgeType, BridgeName, ClientId, Input, IsDryRun) ->
disk -> {false, replayq_dir(ClientId)};
hybrid -> {true, replayq_dir(ClientId)}
end,
ResourceID = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
#{
name => make_producer_name(BridgeType, BridgeName, IsDryRun),
partitioner => partitioner(PartitionStrategy),
@ -500,7 +612,7 @@ producers_config(BridgeType, BridgeName, ClientId, Input, IsDryRun) ->
max_batch_bytes => MaxBatchBytes,
max_send_ahead => MaxInflight - 1,
compression => Compression,
telemetry_meta_data => #{bridge_id => ResourceID}
telemetry_meta_data => #{bridge_id => BridgeV2Id}
}.
%% Wolff API is a batch API.

View File

@ -2187,7 +2187,7 @@ t_resource_manager_crash_after_subscriber_started(Config) ->
_ ->
ct:fail("unexpected result: ~p", [Res])
end,
?assertMatch({ok, _}, delete_bridge(Config)),
?assertMatch(ok, delete_bridge(Config)),
?retry(
_Sleep = 50,
_Attempts = 50,
@ -2244,7 +2244,7 @@ t_resource_manager_crash_before_subscriber_started(Config) ->
_ ->
ct:fail("unexpected result: ~p", [Res])
end,
?assertMatch({ok, _}, delete_bridge(Config)),
?assertMatch(ok, delete_bridge(Config)),
?retry(
_Sleep = 50,
_Attempts = 50,

View File

@ -19,7 +19,7 @@ kafka_producer_test() ->
#{
<<"bridges">> :=
#{
<<"kafka">> :=
<<"kafka_producer">> :=
#{
<<"myproducer">> :=
#{<<"kafka">> := #{}}
@ -32,7 +32,7 @@ kafka_producer_test() ->
#{
<<"bridges">> :=
#{
<<"kafka">> :=
<<"kafka_producer">> :=
#{
<<"myproducer">> :=
#{<<"local_topic">> := _}
@ -45,7 +45,7 @@ kafka_producer_test() ->
#{
<<"bridges">> :=
#{
<<"kafka">> :=
<<"kafka_producer">> :=
#{
<<"myproducer">> :=
#{
@ -61,7 +61,7 @@ kafka_producer_test() ->
#{
<<"bridges">> :=
#{
<<"kafka">> :=
<<"kafka_producer">> :=
#{
<<"myproducer">> :=
#{
@ -161,7 +161,7 @@ message_key_dispatch_validations_test() ->
?assertThrow(
{_, [
#{
path := "bridges.kafka.myproducer.kafka",
path := "bridges.kafka_producer.myproducer.kafka",
reason := "Message key cannot be empty when `key_dispatch` strategy is used"
}
]},
@ -170,7 +170,7 @@ message_key_dispatch_validations_test() ->
?assertThrow(
{_, [
#{
path := "bridges.kafka.myproducer.kafka",
path := "bridges.kafka_producer.myproducer.kafka",
reason := "Message key cannot be empty when `key_dispatch` strategy is used"
}
]},

View File

@ -0,0 +1,245 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_kafka_producer_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("brod/include/brod.hrl").
-define(TYPE, kafka_producer).
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
_ = application:load(emqx_conf),
ok = emqx_common_test_helpers:start_apps(apps_to_start_and_stop()),
application:ensure_all_started(telemetry),
application:ensure_all_started(wolff),
application:ensure_all_started(brod),
emqx_bridge_kafka_impl_producer_SUITE:wait_until_kafka_is_up(),
Config.
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps(apps_to_start_and_stop()).
apps_to_start_and_stop() ->
[
emqx,
emqx_conf,
emqx_connector,
emqx_bridge,
emqx_rule_engine
].
t_create_remove_list(_) ->
[] = emqx_bridge_v2:list(),
ConnectorConfig = connector_config(),
{ok, _} = emqx_connector:create(?TYPE, test_connector, ConnectorConfig),
Config = bridge_v2_config(<<"test_connector">>),
{ok, _Config} = emqx_bridge_v2:create(?TYPE, test_bridge_v2, Config),
[BridgeV2Info] = emqx_bridge_v2:list(),
#{
name := <<"test_bridge_v2">>,
type := <<"kafka_producer">>,
raw_config := _RawConfig
} = BridgeV2Info,
{ok, _Config2} = emqx_bridge_v2:create(?TYPE, test_bridge_v2_2, Config),
2 = length(emqx_bridge_v2:list()),
ok = emqx_bridge_v2:remove(?TYPE, test_bridge_v2),
1 = length(emqx_bridge_v2:list()),
ok = emqx_bridge_v2:remove(?TYPE, test_bridge_v2_2),
[] = emqx_bridge_v2:list(),
emqx_connector:remove(?TYPE, test_connector),
ok.
%% Test sending a message to a bridge V2
t_send_message(_) ->
BridgeV2Config = bridge_v2_config(<<"test_connector2">>),
ConnectorConfig = connector_config(),
{ok, _} = emqx_connector:create(?TYPE, test_connector2, ConnectorConfig),
{ok, _} = emqx_bridge_v2:create(?TYPE, test_bridge_v2_1, BridgeV2Config),
%% Use the bridge to send a message
check_send_message_with_bridge(test_bridge_v2_1),
%% Create a few more bridges with the same connector and test them
BridgeNames1 = [
list_to_atom("test_bridge_v2_" ++ integer_to_list(I))
|| I <- lists:seq(2, 10)
],
lists:foreach(
fun(BridgeName) ->
{ok, _} = emqx_bridge_v2:create(?TYPE, BridgeName, BridgeV2Config),
check_send_message_with_bridge(BridgeName)
end,
BridgeNames1
),
BridgeNames = [test_bridge_v2_1 | BridgeNames1],
%% Send more messages to the bridges
lists:foreach(
fun(BridgeName) ->
lists:foreach(
fun(_) ->
check_send_message_with_bridge(BridgeName)
end,
lists:seq(1, 10)
)
end,
BridgeNames
),
%% Remove all the bridges
lists:foreach(
fun(BridgeName) ->
ok = emqx_bridge_v2:remove(?TYPE, BridgeName)
end,
BridgeNames
),
emqx_connector:remove(?TYPE, test_connector2),
ok.
%% Test that we can get the status of the bridge V2
t_health_check(_) ->
BridgeV2Config = bridge_v2_config(<<"test_connector3">>),
ConnectorConfig = connector_config(),
{ok, _} = emqx_connector:create(?TYPE, test_connector3, ConnectorConfig),
{ok, _} = emqx_bridge_v2:create(?TYPE, test_bridge_v2, BridgeV2Config),
connected = emqx_bridge_v2:health_check(?TYPE, test_bridge_v2),
ok = emqx_bridge_v2:remove(?TYPE, test_bridge_v2),
%% Check behaviour when bridge does not exist
{error, bridge_not_found} = emqx_bridge_v2:health_check(?TYPE, test_bridge_v2),
ok = emqx_connector:remove(?TYPE, test_connector3),
ok.
t_local_topic(_) ->
BridgeV2Config = bridge_v2_config(<<"test_connector">>),
ConnectorConfig = connector_config(),
{ok, _} = emqx_connector:create(?TYPE, test_connector, ConnectorConfig),
{ok, _} = emqx_bridge_v2:create(?TYPE, test_bridge, BridgeV2Config),
%% Send a message to the local topic
Payload = <<"local_topic_payload">>,
Offset = resolve_kafka_offset(),
emqx:publish(emqx_message:make(<<"kafka_t/hej">>, Payload)),
check_kafka_message_payload(Offset, Payload),
ok = emqx_bridge_v2:remove(?TYPE, test_bridge),
ok = emqx_connector:remove(?TYPE, test_connector),
ok.
check_send_message_with_bridge(BridgeName) ->
%% ######################################
%% Create Kafka message
%% ######################################
Time = erlang:unique_integer(),
BinTime = integer_to_binary(Time),
Payload = list_to_binary("payload" ++ integer_to_list(Time)),
Msg = #{
clientid => BinTime,
payload => Payload,
timestamp => Time
},
Offset = resolve_kafka_offset(),
%% ######################################
%% Send message
%% ######################################
emqx_bridge_v2:send_message(?TYPE, BridgeName, Msg, #{}),
%% ######################################
%% Check if message is sent to Kafka
%% ######################################
check_kafka_message_payload(Offset, Payload).
resolve_kafka_offset() ->
KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(),
Partition = 0,
Hosts = emqx_bridge_kafka_impl_producer_SUITE:kafka_hosts(),
{ok, Offset0} = emqx_bridge_kafka_impl_producer_SUITE:resolve_kafka_offset(
Hosts, KafkaTopic, Partition
),
Offset0.
check_kafka_message_payload(Offset, ExpectedPayload) ->
KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(),
Partition = 0,
Hosts = emqx_bridge_kafka_impl_producer_SUITE:kafka_hosts(),
{ok, {_, [KafkaMsg0]}} = brod:fetch(Hosts, KafkaTopic, Partition, Offset),
?assertMatch(#kafka_message{value = ExpectedPayload}, KafkaMsg0).
bridge_v2_config(ConnectorName) ->
#{
<<"connector">> => ConnectorName,
<<"enable">> => true,
<<"kafka">> => #{
<<"buffer">> => #{
<<"memory_overload_protection">> => false,
<<"mode">> => <<"memory">>,
<<"per_partition_limit">> => <<"2GB">>,
<<"segment_bytes">> => <<"100MB">>
},
<<"compression">> => <<"no_compression">>,
<<"kafka_header_value_encode_mode">> => <<"none">>,
<<"max_batch_bytes">> => <<"896KB">>,
<<"max_inflight">> => 10,
<<"message">> => #{
<<"key">> => <<"${.clientid}">>,
<<"timestamp">> => <<"${.timestamp}">>,
<<"value">> => <<"${.payload}">>
},
<<"partition_count_refresh_interval">> => <<"60s">>,
<<"partition_strategy">> => <<"random">>,
<<"query_mode">> => <<"sync">>,
<<"required_acks">> => <<"all_isr">>,
<<"sync_query_timeout">> => <<"5s">>,
<<"topic">> => emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition()
},
<<"local_topic">> => <<"kafka_t/#">>,
<<"resource_opts">> => #{
<<"health_check_interval">> => <<"15s">>
}
}.
connector_config() ->
#{
<<"authentication">> => <<"none">>,
<<"bootstrap_hosts">> => iolist_to_binary(kafka_hosts_string()),
<<"connect_timeout">> => <<"5s">>,
<<"enable">> => true,
<<"metadata_request_timeout">> => <<"5s">>,
<<"min_metadata_refresh_interval">> => <<"3s">>,
<<"socket_opts">> =>
#{
<<"recbuf">> => <<"1024KB">>,
<<"sndbuf">> => <<"1024KB">>,
<<"tcp_keepalive">> => <<"none">>
},
<<"ssl">> =>
#{
<<"ciphers">> => [],
<<"depth">> => 10,
<<"enable">> => false,
<<"hibernate_after">> => <<"5s">>,
<<"log_level">> => <<"notice">>,
<<"reuse_sessions">> => true,
<<"secure_renegotiate">> => true,
<<"verify">> => <<"verify_peer">>,
<<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
}.
kafka_hosts_string() ->
KafkaHost = os:getenv("KAFKA_PLAIN_HOST", "kafka-1.emqx.net"),
KafkaPort = os:getenv("KAFKA_PLAIN_PORT", "9092"),
KafkaHost ++ ":" ++ KafkaPort.

View File

@ -530,7 +530,7 @@ t_use_legacy_protocol_option(Config) ->
Expected0 = maps:from_keys(WorkerPids0, true),
LegacyOptions0 = maps:from_list([{Pid, mc_utils:use_legacy_protocol(Pid)} || Pid <- WorkerPids0]),
?assertEqual(Expected0, LegacyOptions0),
{ok, _} = delete_bridge(Config),
ok = delete_bridge(Config),
{ok, _} = create_bridge(Config, #{<<"use_legacy_protocol">> => <<"false">>}),
?retry(

View File

@ -179,7 +179,7 @@ clear_resources() ->
),
lists:foreach(
fun(#{type := Type, name := Name}) ->
{ok, _} = emqx_bridge:remove(Type, Name)
ok = emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
).

View File

@ -1040,7 +1040,7 @@ t_resource_manager_crash_after_producers_started(Config) ->
Producers =/= undefined,
10_000
),
?assertMatch({ok, _}, delete_bridge(Config)),
?assertMatch(ok, delete_bridge(Config)),
?assertEqual([], get_pulsar_producers()),
ok
end,
@ -1073,7 +1073,7 @@ t_resource_manager_crash_before_producers_started(Config) ->
#{?snk_kind := pulsar_bridge_stopped, pulsar_producers := undefined},
10_000
),
?assertMatch({ok, _}, delete_bridge(Config)),
?assertMatch(ok, delete_bridge(Config)),
?assertEqual([], get_pulsar_producers()),
ok
end,

View File

@ -242,8 +242,7 @@ make_bridge(Config) ->
delete_bridge() ->
Type = <<"rabbitmq">>,
Name = atom_to_binary(?MODULE),
{ok, _} = emqx_bridge:remove(Type, Name),
ok.
ok = emqx_bridge:remove(Type, Name).
%%------------------------------------------------------------------------------
%% Test Cases

View File

@ -214,7 +214,7 @@ t_create_delete_bridge(Config) ->
%% check export through local topic
_ = check_resource_queries(ResourceId, <<"local_topic/test">>, IsBatch),
{ok, _} = emqx_bridge:remove(Type, Name).
ok = emqx_bridge:remove(Type, Name).
% check that we provide correct examples
t_check_values(_Config) ->
@ -294,7 +294,7 @@ t_check_replay(Config) ->
)
end
),
{ok, _} = emqx_bridge:remove(Type, Name).
ok = emqx_bridge:remove(Type, Name).
t_permanent_error(_Config) ->
Name = <<"invalid_command_bridge">>,
@ -322,7 +322,7 @@ t_permanent_error(_Config) ->
)
end
),
{ok, _} = emqx_bridge:remove(Type, Name).
ok = emqx_bridge:remove(Type, Name).
t_auth_username_password(_Config) ->
Name = <<"mybridge">>,
@ -338,7 +338,7 @@ t_auth_username_password(_Config) ->
emqx_resource:health_check(ResourceId),
5
),
{ok, _} = emqx_bridge:remove(Type, Name).
ok = emqx_bridge:remove(Type, Name).
t_auth_error_username_password(_Config) ->
Name = <<"mybridge">>,
@ -359,7 +359,7 @@ t_auth_error_username_password(_Config) ->
{ok, _, #{error := {unhealthy_target, _Msg}}},
emqx_resource_manager:lookup(ResourceId)
),
{ok, _} = emqx_bridge:remove(Type, Name).
ok = emqx_bridge:remove(Type, Name).
t_auth_error_password_only(_Config) ->
Name = <<"mybridge">>,
@ -379,7 +379,7 @@ t_auth_error_password_only(_Config) ->
{ok, _, #{error := {unhealthy_target, _Msg}}},
emqx_resource_manager:lookup(ResourceId)
),
{ok, _} = emqx_bridge:remove(Type, Name).
ok = emqx_bridge:remove(Type, Name).
t_create_disconnected(Config) ->
Name = <<"toxic_bridge">>,
@ -399,7 +399,7 @@ t_create_disconnected(Config) ->
ok
end
),
{ok, _} = emqx_bridge:remove(Type, Name).
ok = emqx_bridge:remove(Type, Name).
%%------------------------------------------------------------------------------
%% Helper functions

View File

@ -44,6 +44,7 @@
namespace/0, roots/0, fields/1, translations/0, translation/1, validations/0, desc/1, tags/0
]).
-export([conf_get/2, conf_get/3, keys/2, filter/1]).
-export([upgrade_raw_conf/1]).
%% internal exports for `emqx_enterprise_schema' only.
-export([ensure_unicode_path/2, convert_rotation/2, log_handler_common_confs/2]).
@ -53,6 +54,8 @@
%% by nodetool to generate app.<time>.config before EMQX is started
-define(MERGED_CONFIGS, [
emqx_bridge_schema,
emqx_connector_schema,
emqx_bridge_v2_schema,
emqx_retainer_schema,
emqx_authn_schema,
emqx_authz_schema,
@ -79,6 +82,10 @@
%% 1 million default ports counter
-define(DEFAULT_MAX_PORTS, 1024 * 1024).
%% Callback to upgrade config after loaded from config file but before validation.
upgrade_raw_conf(RawConf) ->
emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2(RawConf).
%% root config should not have a namespace
namespace() -> undefined.
@ -953,7 +960,7 @@ fields("rpc") ->
sc(
string(),
#{
default => "0.0.0.0",
default => <<"0.0.0.0">>,
desc => ?DESC(rpc_listen_address),
importance => ?IMPORTANCE_MEDIUM
}
@ -1150,7 +1157,9 @@ translation("emqx") ->
];
translation("gen_rpc") ->
[
{"default_client_driver", fun tr_default_config_driver/1},
{"default_client_driver", fun tr_gen_rpc_default_client_driver/1},
{"tcp_client_port", fun tr_gen_rpc_tcp_client_port/1},
{"ssl_client_port", fun tr_gen_rpc_ssl_client_port/1},
{"ssl_client_options", fun tr_gen_rpc_ssl_options/1},
{"ssl_server_options", fun tr_gen_rpc_ssl_options/1},
{"socket_ip", fun(Conf) ->
@ -1223,8 +1232,14 @@ tr_vm_msacc_collector(Conf) ->
collector_enabled(enabled, Collector) -> [Collector];
collector_enabled(disabled, _) -> [].
tr_default_config_driver(Conf) ->
conf_get("rpc.driver", Conf).
tr_gen_rpc_default_client_driver(Conf) ->
conf_get("rpc.protocol", Conf).
tr_gen_rpc_tcp_client_port(Conf) ->
conf_get("rpc.tcp_server_port", Conf).
tr_gen_rpc_ssl_client_port(Conf) ->
conf_get("rpc.ssl_server_port", Conf).
tr_gen_rpc_ssl_options(Conf) ->
Ciphers = conf_get("rpc.ciphers", Conf),

View File

@ -0,0 +1,460 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector).
-behaviour(emqx_config_handler).
-behaviour(emqx_config_backup).
-include_lib("emqx/include/logger.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-export([
pre_config_update/3,
post_config_update/5
]).
-export([
create/3,
disable_enable/3,
get_metrics/2,
list/0,
load/0,
lookup/1,
lookup/2,
remove/2,
unload/0,
update/3
]).
-export([config_key_path/0]).
%% exported for `emqx_telemetry'
-export([get_basic_usage_info/0]).
%% Data backup
-export([
import_config/1
]).
-define(ROOT_KEY, connectors).
load() ->
Connectors = emqx:get_config([?ROOT_KEY], #{}),
lists:foreach(
fun({Type, NamedConf}) ->
lists:foreach(
fun({Name, Conf}) ->
safe_load_connector(Type, Name, Conf)
end,
maps:to_list(NamedConf)
)
end,
maps:to_list(Connectors)
).
unload() ->
Connectors = emqx:get_config([?ROOT_KEY], #{}),
lists:foreach(
fun({Type, NamedConf}) ->
lists:foreach(
fun({Name, _Conf}) ->
_ = emqx_connector_resource:stop(Type, Name)
end,
maps:to_list(NamedConf)
)
end,
maps:to_list(Connectors)
).
safe_load_connector(Type, Name, Conf) ->
try
_Res = emqx_connector_resource:create(Type, Name, Conf),
?tp(
emqx_connector_loaded,
#{
type => Type,
name => Name,
res => _Res
}
)
catch
Err:Reason:ST ->
?SLOG(error, #{
msg => "load_connector_failed",
type => Type,
name => Name,
error => Err,
reason => Reason,
stacktrace => ST
})
end.
config_key_path() ->
[?ROOT_KEY].
pre_config_update([?ROOT_KEY], RawConf, RawConf) ->
{ok, RawConf};
pre_config_update([?ROOT_KEY], NewConf, _RawConf) ->
{ok, convert_certs(NewConf)};
pre_config_update(_, {_Oper, _, _}, undefined) ->
{error, connector_not_found};
pre_config_update(_, {Oper, _Type, _Name}, OldConfig) ->
%% to save the 'enable' to the config files
{ok, OldConfig#{<<"enable">> => operation_to_enable(Oper)}};
pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) ->
case emqx_connector_ssl:convert_certs(filename:join(Path), Conf) of
{error, Reason} ->
{error, Reason};
{ok, ConfNew} ->
{ok, ConfNew}
end.
operation_to_enable(disable) -> false;
operation_to_enable(enable) -> true.
post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) ->
#{added := Added, removed := Removed, changed := Updated} =
diff_confs(NewConf, OldConf),
case ensure_no_channels(Removed) of
ok ->
%% The config update will be failed if any task in `perform_connector_changes` failed.
Result = perform_connector_changes([
#{action => fun emqx_connector_resource:remove/4, data => Removed},
#{
action => fun emqx_connector_resource:create/4,
data => Added,
on_exception_fn => fun emqx_connector_resource:remove/4
},
#{action => fun emqx_connector_resource:update/4, data => Updated}
]),
?tp(connector_post_config_update_done, #{}),
Result;
{error, Error} ->
{error, Error}
end;
post_config_update([?ROOT_KEY, Type, Name], '$remove', _, _OldConf, _AppEnvs) ->
case emqx_connector_resource:get_channels(Type, Name) of
{ok, []} ->
ok = emqx_connector_resource:remove(Type, Name),
?tp(connector_post_config_update_done, #{}),
ok;
{ok, Channels} ->
{error, {active_channels, Channels}}
end;
post_config_update([?ROOT_KEY, Type, Name], _Req, NewConf, undefined, _AppEnvs) ->
ResOpts = emqx_resource:fetch_creation_opts(NewConf),
ok = emqx_connector_resource:create(Type, Name, NewConf, ResOpts),
?tp(connector_post_config_update_done, #{}),
ok;
post_config_update([?ROOT_KEY, Type, Name], _Req, NewConf, OldConf, _AppEnvs) ->
ResOpts = emqx_resource:fetch_creation_opts(NewConf),
ok = emqx_connector_resource:update(Type, Name, {OldConf, NewConf}, ResOpts),
?tp(connector_post_config_update_done, #{}),
ok.
list() ->
maps:fold(
fun(Type, NameAndConf, Connectors) ->
maps:fold(
fun(Name, RawConf, Acc) ->
case lookup(Type, Name, RawConf) of
{error, not_found} -> Acc;
{ok, Res} -> [Res | Acc]
end
end,
Connectors,
NameAndConf
)
end,
[],
emqx:get_raw_config([connectors], #{})
).
lookup(Id) ->
{Type, Name} = emqx_connector_resource:parse_connector_id(Id),
lookup(Type, Name).
lookup(Type, Name) ->
RawConf = emqx:get_raw_config([connectors, Type, Name], #{}),
lookup(Type, Name, RawConf).
lookup(Type, Name, RawConf) ->
case emqx_resource:get_instance(emqx_connector_resource:resource_id(Type, Name)) of
{error, not_found} ->
{error, not_found};
{ok, _, Data} ->
{ok, #{
type => Type,
name => Name,
resource_data => Data,
raw_config => RawConf
}}
end.
get_metrics(Type, Name) ->
emqx_resource:get_metrics(emqx_connector_resource:resource_id(Type, Name)).
disable_enable(Action, ConnectorType, ConnectorName) when
Action =:= disable; Action =:= enable
->
emqx_conf:update(
config_key_path() ++ [ConnectorType, ConnectorName],
{Action, ConnectorType, ConnectorName},
#{override_to => cluster}
).
create(ConnectorType, ConnectorName, RawConf) ->
?SLOG(debug, #{
connector_action => create,
connector_type => ConnectorType,
connector_name => ConnectorName,
connector_raw_config => emqx_utils:redact(RawConf)
}),
emqx_conf:update(
emqx_connector:config_key_path() ++ [ConnectorType, ConnectorName],
RawConf,
#{override_to => cluster}
).
remove(ConnectorType, ConnectorName) ->
?SLOG(debug, #{
brige_action => remove,
connector_type => ConnectorType,
connector_name => ConnectorName
}),
case
emqx_conf:remove(
emqx_connector:config_key_path() ++ [ConnectorType, ConnectorName],
#{override_to => cluster}
)
of
{ok, _} ->
ok;
{error, Reason} ->
{error, Reason}
end.
update(ConnectorType, ConnectorName, RawConf) ->
?SLOG(debug, #{
connector_action => update,
connector_type => ConnectorType,
connector_name => ConnectorName,
connector_raw_config => emqx_utils:redact(RawConf)
}),
case lookup(ConnectorType, ConnectorName) of
{ok, _Conf} ->
emqx_conf:update(
emqx_connector:config_key_path() ++ [ConnectorType, ConnectorName],
RawConf,
#{override_to => cluster}
);
Error ->
Error
end.
%%----------------------------------------------------------------------------------------
%% Data backup
%%----------------------------------------------------------------------------------------
import_config(RawConf) ->
RootKeyPath = config_key_path(),
ConnectorsConf = maps:get(<<"connectors">>, RawConf, #{}),
OldConnectorsConf = emqx:get_raw_config(RootKeyPath, #{}),
MergedConf = merge_confs(OldConnectorsConf, ConnectorsConf),
case emqx_conf:update(RootKeyPath, MergedConf, #{override_to => cluster}) of
{ok, #{raw_config := NewRawConf}} ->
{ok, #{root_key => ?ROOT_KEY, changed => changed_paths(OldConnectorsConf, NewRawConf)}};
Error ->
{error, #{root_key => ?ROOT_KEY, reason => Error}}
end.
merge_confs(OldConf, NewConf) ->
AllTypes = maps:keys(maps:merge(OldConf, NewConf)),
lists:foldr(
fun(Type, Acc) ->
NewConnectors = maps:get(Type, NewConf, #{}),
OldConnectors = maps:get(Type, OldConf, #{}),
Acc#{Type => maps:merge(OldConnectors, NewConnectors)}
end,
#{},
AllTypes
).
changed_paths(OldRawConf, NewRawConf) ->
maps:fold(
fun(Type, Connectors, ChangedAcc) ->
OldConnectors = maps:get(Type, OldRawConf, #{}),
Changed = maps:get(changed, emqx_utils_maps:diff_maps(Connectors, OldConnectors)),
[[?ROOT_KEY, Type, K] || K <- maps:keys(Changed)] ++ ChangedAcc
end,
[],
NewRawConf
).
%%========================================================================================
%% Helper functions
%%========================================================================================
convert_certs(ConnectorsConf) ->
maps:map(
fun(Type, Connectors) ->
maps:map(
fun(Name, ConnectorConf) ->
Path = filename:join([?ROOT_KEY, Type, Name]),
case emqx_connector_ssl:convert_certs(Path, ConnectorConf) of
{error, Reason} ->
?SLOG(error, #{
msg => "bad_ssl_config",
type => Type,
name => Name,
reason => Reason
}),
throw({bad_ssl_config, Reason});
{ok, ConnectorConf1} ->
ConnectorConf1
end
end,
Connectors
)
end,
ConnectorsConf
).
perform_connector_changes(Tasks) ->
perform_connector_changes(Tasks, ok).
perform_connector_changes([], Result) ->
Result;
perform_connector_changes([#{action := Action, data := MapConfs} = Task | Tasks], Result0) ->
OnException = maps:get(on_exception_fn, Task, fun(_Type, _Name, _Conf, _Opts) -> ok end),
Result = maps:fold(
fun
({_Type, _Name}, _Conf, {error, Reason}) ->
{error, Reason};
%% for emqx_connector_resource:update/4
({Type, Name}, {OldConf, Conf}, _) ->
ResOpts = emqx_resource:fetch_creation_opts(Conf),
case Action(Type, Name, {OldConf, Conf}, ResOpts) of
{error, Reason} -> {error, Reason};
Return -> Return
end;
({Type, Name}, Conf, _) ->
ResOpts = emqx_resource:fetch_creation_opts(Conf),
try Action(Type, Name, Conf, ResOpts) of
{error, Reason} -> {error, Reason};
Return -> Return
catch
Kind:Error:Stacktrace ->
?SLOG(error, #{
msg => "connector_config_update_exception",
kind => Kind,
error => Error,
type => Type,
name => Name,
stacktrace => Stacktrace
}),
OnException(Type, Name, Conf, ResOpts),
erlang:raise(Kind, Error, Stacktrace)
end
end,
Result0,
MapConfs
),
perform_connector_changes(Tasks, Result).
diff_confs(NewConfs, OldConfs) ->
emqx_utils_maps:diff_maps(
flatten_confs(NewConfs),
flatten_confs(OldConfs)
).
flatten_confs(Conf0) ->
maps:from_list(
lists:flatmap(
fun({Type, Conf}) ->
do_flatten_confs(Type, Conf)
end,
maps:to_list(Conf0)
)
).
do_flatten_confs(Type, Conf0) ->
[{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)].
-spec get_basic_usage_info() ->
#{
num_connectors => non_neg_integer(),
count_by_type =>
#{ConnectorType => non_neg_integer()}
}
when
ConnectorType :: atom().
get_basic_usage_info() ->
InitialAcc = #{num_connectors => 0, count_by_type => #{}},
try
lists:foldl(
fun
(#{resource_data := #{config := #{enable := false}}}, Acc) ->
Acc;
(#{type := ConnectorType}, Acc) ->
NumConnectors = maps:get(num_connectors, Acc),
CountByType0 = maps:get(count_by_type, Acc),
CountByType = maps:update_with(
binary_to_atom(ConnectorType, utf8),
fun(X) -> X + 1 end,
1,
CountByType0
),
Acc#{
num_connectors => NumConnectors + 1,
count_by_type => CountByType
}
end,
InitialAcc,
list()
)
catch
%% for instance, when the connector app is not ready yet.
_:_ ->
InitialAcc
end.
ensure_no_channels(Configs) ->
Pipeline =
lists:map(
fun({Type, ConnectorName}) ->
fun(_) ->
case emqx_connector_resource:get_channels(Type, ConnectorName) of
{ok, []} ->
ok;
{ok, Channels} ->
{error, #{
reason => "connector_has_active_channels",
type => Type,
connector_name => ConnectorName,
active_channels => Channels
}}
end
end
end,
maps:keys(Configs)
),
case emqx_utils:pipeline(Pipeline, unused, unused) of
{ok, _, _} ->
ok;
{error, Reason, _State} ->
{error, Reason}
end.

View File

@ -0,0 +1,768 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector_api).
-behaviour(minirest_api).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("emqx_utils/include/emqx_utils_api.hrl").
-import(hoconsc, [mk/2, array/1, enum/1]).
%% Swagger specs from hocon schema
-export([
api_spec/0,
paths/0,
schema/1,
namespace/0
]).
%% API callbacks
-export([
'/connectors'/2,
'/connectors/:id'/2,
'/connectors/:id/enable/:enable'/2,
'/connectors/:id/:operation'/2,
'/nodes/:node/connectors/:id/:operation'/2,
'/connectors_probe'/2
]).
-export([lookup_from_local_node/2]).
-define(CONNECTOR_NOT_ENABLED,
?BAD_REQUEST(<<"Forbidden operation, connector not enabled">>)
).
-define(CONNECTOR_NOT_FOUND(CONNECTOR_TYPE, CONNECTOR_NAME),
?NOT_FOUND(
<<"Connector lookup failed: connector named '", (bin(CONNECTOR_NAME))/binary, "' of type ",
(bin(CONNECTOR_TYPE))/binary, " does not exist.">>
)
).
%% Don't turn connector_name to atom, it's maybe not a existing atom.
-define(TRY_PARSE_ID(ID, EXPR),
try emqx_connector_resource:parse_connector_id(Id, #{atom_name => false}) of
{ConnectorType, ConnectorName} ->
EXPR
catch
throw:#{reason := Reason} ->
?NOT_FOUND(<<"Invalid connector ID, ", Reason/binary>>)
end
).
namespace() -> "connector".
api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
paths() ->
[
"/connectors",
"/connectors/:id",
"/connectors/:id/enable/:enable",
"/connectors/:id/:operation",
"/nodes/:node/connectors/:id/:operation",
"/connectors_probe"
].
error_schema(Code, Message) when is_atom(Code) ->
error_schema([Code], Message);
error_schema(Codes, Message) when is_list(Message) ->
error_schema(Codes, list_to_binary(Message));
error_schema(Codes, Message) when is_list(Codes) andalso is_binary(Message) ->
emqx_dashboard_swagger:error_codes(Codes, Message).
get_response_body_schema() ->
emqx_dashboard_swagger:schema_with_examples(
emqx_connector_schema:get_response(),
connector_info_examples(get)
).
param_path_operation_cluster() ->
{operation,
mk(
enum([start, stop, restart]),
#{
in => path,
required => true,
example => <<"start">>,
desc => ?DESC("desc_param_path_operation_cluster")
}
)}.
param_path_operation_on_node() ->
{operation,
mk(
enum([start, stop, restart]),
#{
in => path,
required => true,
example => <<"start">>,
desc => ?DESC("desc_param_path_operation_on_node")
}
)}.
param_path_node() ->
{node,
mk(
binary(),
#{
in => path,
required => true,
example => <<"emqx@127.0.0.1">>,
desc => ?DESC("desc_param_path_node")
}
)}.
param_path_id() ->
{id,
mk(
binary(),
#{
in => path,
required => true,
example => <<"webhook:webhook_example">>,
desc => ?DESC("desc_param_path_id")
}
)}.
param_path_enable() ->
{enable,
mk(
boolean(),
#{
in => path,
required => true,
desc => ?DESC("desc_param_path_enable"),
example => true
}
)}.
connector_info_array_example(Method) ->
lists:map(fun(#{value := Config}) -> Config end, maps:values(connector_info_examples(Method))).
connector_info_examples(Method) ->
maps:merge(
#{},
emqx_enterprise_connector_examples(Method)
).
-if(?EMQX_RELEASE_EDITION == ee).
emqx_enterprise_connector_examples(Method) ->
emqx_connector_ee_schema:examples(Method).
-else.
emqx_enterprise_connector_examples(_Method) -> #{}.
-endif.
schema("/connectors") ->
#{
'operationId' => '/connectors',
get => #{
tags => [<<"connectors">>],
summary => <<"List connectors">>,
description => ?DESC("desc_api1"),
responses => #{
200 => emqx_dashboard_swagger:schema_with_example(
array(emqx_connector_schema:get_response()),
connector_info_array_example(get)
)
}
},
post => #{
tags => [<<"connectors">>],
summary => <<"Create connector">>,
description => ?DESC("desc_api2"),
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_connector_schema:post_request(),
connector_info_examples(post)
),
responses => #{
201 => get_response_body_schema(),
400 => error_schema('ALREADY_EXISTS', "Connector already exists")
}
}
};
schema("/connectors/:id") ->
#{
'operationId' => '/connectors/:id',
get => #{
tags => [<<"connectors">>],
summary => <<"Get connector">>,
description => ?DESC("desc_api3"),
parameters => [param_path_id()],
responses => #{
200 => get_response_body_schema(),
404 => error_schema('NOT_FOUND', "Connector not found")
}
},
put => #{
tags => [<<"connectors">>],
summary => <<"Update connector">>,
description => ?DESC("desc_api4"),
parameters => [param_path_id()],
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_connector_schema:put_request(),
connector_info_examples(put)
),
responses => #{
200 => get_response_body_schema(),
404 => error_schema('NOT_FOUND', "Connector not found"),
400 => error_schema('BAD_REQUEST', "Update connector failed")
}
},
delete => #{
tags => [<<"connectors">>],
summary => <<"Delete connector">>,
description => ?DESC("desc_api5"),
parameters => [param_path_id()],
responses => #{
204 => <<"Connector deleted">>,
400 => error_schema(
'BAD_REQUEST',
"Cannot delete connector while active rules are defined for this connector"
),
404 => error_schema('NOT_FOUND', "Connector not found"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/connectors/:id/enable/:enable") ->
#{
'operationId' => '/connectors/:id/enable/:enable',
put =>
#{
tags => [<<"connectors">>],
summary => <<"Enable or disable connector">>,
desc => ?DESC("desc_enable_connector"),
parameters => [param_path_id(), param_path_enable()],
responses =>
#{
204 => <<"Success">>,
404 => error_schema(
'NOT_FOUND', "Connector not found or invalid operation"
),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/connectors/:id/:operation") ->
#{
'operationId' => '/connectors/:id/:operation',
post => #{
tags => [<<"connectors">>],
summary => <<"Stop, start or restart connector">>,
description => ?DESC("desc_api7"),
parameters => [
param_path_id(),
param_path_operation_cluster()
],
responses => #{
204 => <<"Operation success">>,
400 => error_schema(
'BAD_REQUEST', "Problem with configuration of external service"
),
404 => error_schema('NOT_FOUND', "Connector not found or invalid operation"),
501 => error_schema('NOT_IMPLEMENTED', "Not Implemented"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/nodes/:node/connectors/:id/:operation") ->
#{
'operationId' => '/nodes/:node/connectors/:id/:operation',
post => #{
tags => [<<"connectors">>],
summary => <<"Stop, start or restart connector">>,
description => ?DESC("desc_api8"),
parameters => [
param_path_node(),
param_path_id(),
param_path_operation_on_node()
],
responses => #{
204 => <<"Operation success">>,
400 => error_schema(
'BAD_REQUEST',
"Problem with configuration of external service or connector not enabled"
),
404 => error_schema(
'NOT_FOUND', "Connector or node not found or invalid operation"
),
501 => error_schema('NOT_IMPLEMENTED', "Not Implemented"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
};
schema("/connectors_probe") ->
#{
'operationId' => '/connectors_probe',
post => #{
tags => [<<"connectors">>],
desc => ?DESC("desc_api9"),
summary => <<"Test creating connector">>,
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_connector_schema:post_request(),
connector_info_examples(post)
),
responses => #{
204 => <<"Test connector OK">>,
400 => error_schema(['TEST_FAILED'], "connector test failed")
}
}
}.
'/connectors'(post, #{body := #{<<"type">> := ConnectorType, <<"name">> := ConnectorName} = Conf0}) ->
case emqx_connector:lookup(ConnectorType, ConnectorName) of
{ok, _} ->
?BAD_REQUEST('ALREADY_EXISTS', <<"connector already exists">>);
{error, not_found} ->
Conf = filter_out_request_body(Conf0),
create_connector(ConnectorType, ConnectorName, Conf)
end;
'/connectors'(get, _Params) ->
Nodes = mria:running_nodes(),
NodeReplies = emqx_connector_proto_v1:list_connectors_on_nodes(Nodes),
case is_ok(NodeReplies) of
{ok, NodeConnectors} ->
AllConnectors = [
[format_resource(Data, Node) || Data <- Connectors]
|| {Node, Connectors} <- lists:zip(Nodes, NodeConnectors)
],
?OK(zip_connectors(AllConnectors));
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end.
'/connectors/:id'(get, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(Id, lookup_from_all_nodes(ConnectorType, ConnectorName, 200));
'/connectors/:id'(put, #{bindings := #{id := Id}, body := Conf0}) ->
Conf1 = filter_out_request_body(Conf0),
?TRY_PARSE_ID(
Id,
case emqx_connector:lookup(ConnectorType, ConnectorName) of
{ok, _} ->
RawConf = emqx:get_raw_config([connectors, ConnectorType, ConnectorName], #{}),
Conf = deobfuscate(Conf1, RawConf),
update_connector(ConnectorType, ConnectorName, Conf);
{error, not_found} ->
?CONNECTOR_NOT_FOUND(ConnectorType, ConnectorName)
end
);
'/connectors/:id'(delete, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(
Id,
case emqx_connector:lookup(ConnectorType, ConnectorName) of
{ok, _} ->
case emqx_connector:remove(ConnectorType, ConnectorName) of
ok ->
?NO_CONTENT;
{error, {active_channels, Channels}} ->
?BAD_REQUEST(
{<<"Cannot delete connector while there are active channels defined for this connector">>,
Channels}
);
{error, timeout} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end;
{error, not_found} ->
?CONNECTOR_NOT_FOUND(ConnectorType, ConnectorName)
end
).
'/connectors_probe'(post, Request) ->
RequestMeta = #{module => ?MODULE, method => post, path => "/connectors_probe"},
case emqx_dashboard_swagger:filter_check_request_and_translate_body(Request, RequestMeta) of
{ok, #{body := #{<<"type">> := ConnType} = Params}} ->
Params1 = maybe_deobfuscate_connector_probe(Params),
case
emqx_connector_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1))
of
ok ->
?NO_CONTENT;
{error, #{kind := validation_error} = Reason0} ->
Reason = redact(Reason0),
?BAD_REQUEST('TEST_FAILED', map_to_json(Reason));
{error, Reason0} when not is_tuple(Reason0); element(1, Reason0) =/= 'exit' ->
Reason1 =
case Reason0 of
{unhealthy_target, Message} -> Message;
_ -> Reason0
end,
Reason = redact(Reason1),
?BAD_REQUEST('TEST_FAILED', Reason)
end;
BadRequest ->
redact(BadRequest)
end.
maybe_deobfuscate_connector_probe(
#{<<"type">> := ConnectorType, <<"name">> := ConnectorName} = Params
) ->
case emqx_connector:lookup(ConnectorType, ConnectorName) of
{ok, _} ->
RawConf = emqx:get_raw_config([connectors, ConnectorType, ConnectorName], #{}),
deobfuscate(Params, RawConf);
_ ->
%% A connector may be probed before it's created, so not finding it here is fine
Params
end;
maybe_deobfuscate_connector_probe(Params) ->
Params.
lookup_from_all_nodes(ConnectorType, ConnectorName, SuccCode) ->
Nodes = mria:running_nodes(),
case
is_ok(emqx_connector_proto_v1:lookup_from_all_nodes(Nodes, ConnectorType, ConnectorName))
of
{ok, [{ok, _} | _] = Results} ->
{SuccCode, format_connector_info([R || {ok, R} <- Results])};
{ok, [{error, not_found} | _]} ->
?CONNECTOR_NOT_FOUND(ConnectorType, ConnectorName);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end.
lookup_from_local_node(ConnectorType, ConnectorName) ->
case emqx_connector:lookup(ConnectorType, ConnectorName) of
{ok, Res} -> {ok, format_resource(Res, node())};
Error -> Error
end.
create_connector(ConnectorType, ConnectorName, Conf) ->
create_or_update_connector(ConnectorType, ConnectorName, Conf, 201).
update_connector(ConnectorType, ConnectorName, Conf) ->
create_or_update_connector(ConnectorType, ConnectorName, Conf, 200).
create_or_update_connector(ConnectorType, ConnectorName, Conf, HttpStatusCode) ->
case emqx_connector:create(ConnectorType, ConnectorName, Conf) of
{ok, _} ->
lookup_from_all_nodes(ConnectorType, ConnectorName, HttpStatusCode);
{error, Reason} when is_map(Reason) ->
?BAD_REQUEST(map_to_json(redact(Reason)))
end.
'/connectors/:id/enable/:enable'(put, #{bindings := #{id := Id, enable := Enable}}) ->
?TRY_PARSE_ID(
Id,
case emqx_connector:disable_enable(enable_func(Enable), ConnectorType, ConnectorName) of
{ok, _} ->
?NO_CONTENT;
{error, {pre_config_update, _, connector_not_found}} ->
?CONNECTOR_NOT_FOUND(ConnectorType, ConnectorName);
{error, {_, _, timeout}} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, timeout} ->
?SERVICE_UNAVAILABLE(<<"request timeout">>);
{error, Reason} ->
?INTERNAL_ERROR(Reason)
end
).
'/connectors/:id/:operation'(post, #{
bindings :=
#{id := Id, operation := Op}
}) ->
?TRY_PARSE_ID(
Id,
begin
OperFunc = operation_func(all, Op),
Nodes = mria:running_nodes(),
call_operation_if_enabled(all, OperFunc, [Nodes, ConnectorType, ConnectorName])
end
).
'/nodes/:node/connectors/:id/:operation'(post, #{
bindings :=
#{id := Id, operation := Op, node := Node}
}) ->
?TRY_PARSE_ID(
Id,
case emqx_utils:safe_to_existing_atom(Node, utf8) of
{ok, TargetNode} ->
OperFunc = operation_func(TargetNode, Op),
call_operation_if_enabled(TargetNode, OperFunc, [
TargetNode, ConnectorType, ConnectorName
]);
{error, _} ->
?NOT_FOUND(<<"Invalid node name: ", Node/binary>>)
end
).
call_operation_if_enabled(NodeOrAll, OperFunc, [Nodes, BridgeType, BridgeName]) ->
try is_enabled_connector(BridgeType, BridgeName) of
false ->
?CONNECTOR_NOT_ENABLED;
true ->
call_operation(NodeOrAll, OperFunc, [Nodes, BridgeType, BridgeName])
catch
throw:not_found ->
?CONNECTOR_NOT_FOUND(BridgeType, BridgeName)
end.
is_enabled_connector(ConnectorType, ConnectorName) ->
try emqx:get_config([connectors, ConnectorType, binary_to_existing_atom(ConnectorName)]) of
ConfMap ->
maps:get(enable, ConfMap, false)
catch
error:{config_not_found, _} ->
throw(not_found);
error:badarg ->
%% catch non-existing atom,
%% none-existing atom means it is not available in config PT storage.
throw(not_found)
end.
operation_func(all, restart) -> restart_connectors_to_all_nodes;
operation_func(all, start) -> start_connectors_to_all_nodes;
operation_func(all, stop) -> stop_connectors_to_all_nodes;
operation_func(_Node, restart) -> restart_connector_to_node;
operation_func(_Node, start) -> start_connector_to_node;
operation_func(_Node, stop) -> stop_connector_to_node.
enable_func(true) -> enable;
enable_func(false) -> disable.
zip_connectors([ConnectorsFirstNode | _] = ConnectorsAllNodes) ->
lists:foldl(
fun(#{type := Type, name := Name}, Acc) ->
Connectors = pick_connectors_by_id(Type, Name, ConnectorsAllNodes),
[format_connector_info(Connectors) | Acc]
end,
[],
ConnectorsFirstNode
).
pick_connectors_by_id(Type, Name, ConnectorsAllNodes) ->
lists:foldl(
fun(ConnectorsOneNode, Acc) ->
case
[
Connector
|| Connector = #{type := Type0, name := Name0} <- ConnectorsOneNode,
Type0 == Type,
Name0 == Name
]
of
[ConnectorInfo] ->
[ConnectorInfo | Acc];
[] ->
?SLOG(warning, #{
msg => "connector_inconsistent_in_cluster",
reason => not_found,
type => Type,
name => Name,
connector => emqx_connector_resource:connector_id(Type, Name)
}),
Acc
end
end,
[],
ConnectorsAllNodes
).
format_connector_info([FirstConnector | _] = Connectors) ->
Res = maps:remove(node, FirstConnector),
NodeStatus = node_status(Connectors),
redact(Res#{
status => aggregate_status(NodeStatus),
node_status => NodeStatus
}).
node_status(Connectors) ->
[maps:with([node, status, status_reason], B) || B <- Connectors].
aggregate_status(AllStatus) ->
Head = fun([A | _]) -> A end,
HeadVal = maps:get(status, Head(AllStatus), connecting),
AllRes = lists:all(fun(#{status := Val}) -> Val == HeadVal end, AllStatus),
case AllRes of
true -> HeadVal;
false -> inconsistent
end.
format_resource(
#{
type := Type,
name := ConnectorName,
raw_config := RawConf,
resource_data := ResourceData
},
Node
) ->
redact(
maps:merge(
RawConf#{
type => Type,
name => maps:get(<<"name">>, RawConf, ConnectorName),
node => Node
},
format_resource_data(ResourceData)
)
).
format_resource_data(ResData) ->
maps:fold(fun format_resource_data/3, #{}, maps:with([status, error], ResData)).
format_resource_data(error, undefined, Result) ->
Result;
format_resource_data(error, Error, Result) ->
Result#{status_reason => emqx_utils:readable_error_msg(Error)};
format_resource_data(K, V, Result) ->
Result#{K => V}.
is_ok(ok) ->
ok;
is_ok(OkResult = {ok, _}) ->
OkResult;
is_ok(Error = {error, _}) ->
Error;
is_ok(ResL) ->
case
lists:filter(
fun
({ok, _}) -> false;
(ok) -> false;
(_) -> true
end,
ResL
)
of
[] -> {ok, [Res || {ok, Res} <- ResL]};
ErrL -> hd(ErrL)
end.
filter_out_request_body(Conf) ->
ExtraConfs = [
<<"id">>,
<<"type">>,
<<"name">>,
<<"status">>,
<<"status_reason">>,
<<"node_status">>,
<<"node">>
],
maps:without(ExtraConfs, Conf).
bin(S) when is_list(S) ->
list_to_binary(S);
bin(S) when is_atom(S) ->
atom_to_binary(S, utf8);
bin(S) when is_binary(S) ->
S.
call_operation(NodeOrAll, OperFunc, Args = [_Nodes, ConnectorType, ConnectorName]) ->
case is_ok(do_bpapi_call(NodeOrAll, OperFunc, Args)) of
Ok when Ok =:= ok; is_tuple(Ok), element(1, Ok) =:= ok ->
?NO_CONTENT;
{error, not_implemented} ->
?NOT_IMPLEMENTED;
{error, timeout} ->
?BAD_REQUEST(<<"Request timeout">>);
{error, {start_pool_failed, Name, Reason}} ->
Msg = bin(
io_lib:format("Failed to start ~p pool for reason ~p", [Name, redact(Reason)])
),
?BAD_REQUEST(Msg);
{error, not_found} ->
ConnectorId = emqx_connector_resource:connector_id(ConnectorType, ConnectorName),
?SLOG(warning, #{
msg => "connector_inconsistent_in_cluster_for_call_operation",
reason => not_found,
type => ConnectorType,
name => ConnectorName,
connector => ConnectorId
}),
?SERVICE_UNAVAILABLE(<<"Connector not found on remote node: ", ConnectorId/binary>>);
{error, {node_not_found, Node}} ->
?NOT_FOUND(<<"Node not found: ", (atom_to_binary(Node))/binary>>);
{error, {unhealthy_target, Message}} ->
?BAD_REQUEST(Message);
{error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' ->
?BAD_REQUEST(redact(Reason))
end.
do_bpapi_call(all, Call, Args) ->
maybe_unwrap(
do_bpapi_call_vsn(emqx_bpapi:supported_version(emqx_connector), Call, Args)
);
do_bpapi_call(Node, Call, Args) ->
case lists:member(Node, mria:running_nodes()) of
true ->
do_bpapi_call_vsn(emqx_bpapi:supported_version(Node, emqx_connector), Call, Args);
false ->
{error, {node_not_found, Node}}
end.
do_bpapi_call_vsn(Version, Call, Args) ->
case is_supported_version(Version, Call) of
true ->
apply(emqx_connector_proto_v1, Call, Args);
false ->
{error, not_implemented}
end.
is_supported_version(Version, Call) ->
lists:member(Version, supported_versions(Call)).
supported_versions(_Call) -> [1].
maybe_unwrap({error, not_implemented}) ->
{error, not_implemented};
maybe_unwrap(RpcMulticallResult) ->
emqx_rpc:unwrap_erpc(RpcMulticallResult).
redact(Term) ->
emqx_utils:redact(Term).
deobfuscate(NewConf, OldConf) ->
maps:fold(
fun(K, V, Acc) ->
case maps:find(K, OldConf) of
error ->
Acc#{K => V};
{ok, OldV} when is_map(V), is_map(OldV) ->
Acc#{K => deobfuscate(V, OldV)};
{ok, OldV} ->
case emqx_utils:is_redacted(K, V) of
true ->
Acc#{K => OldV};
_ ->
Acc#{K => V}
end
end
end,
#{},
NewConf
).
map_to_json(M0) ->
%% When dealing with Hocon validation errors, `value' might contain non-serializable
%% values (e.g.: user_lookup_fun), so we try again without that key if serialization
%% fails as a best effort.
M1 = emqx_utils_maps:jsonable_map(M0, fun(K, V) -> {K, emqx_utils_maps:binary_string(V)} end),
try
emqx_utils_json:encode(M1)
catch
error:_ ->
M2 = maps:without([value, <<"value">>], M1),
emqx_utils_json:encode(M2)
end.

View File

@ -20,7 +20,13 @@
-export([start/2, stop/1]).
-define(TOP_LELVE_HDLR_PATH, (emqx_connector:config_key_path())).
-define(LEAF_NODE_HDLR_PATH, (emqx_connector:config_key_path() ++ ['?', '?'])).
start(_StartType, _StartArgs) ->
ok = emqx_connector:load(),
ok = emqx_config_handler:add_handler(?TOP_LELVE_HDLR_PATH, emqx_connector),
ok = emqx_config_handler:add_handler(?LEAF_NODE_HDLR_PATH, emqx_connector),
emqx_connector_sup:start_link().
stop(_State) ->

View File

@ -0,0 +1,432 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector_resource).
-include_lib("emqx_bridge/include/emqx_bridge_resource.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-export([
connector_to_resource_type/1,
resource_id/1,
resource_id/2,
connector_id/2,
parse_connector_id/1,
parse_connector_id/2,
connector_hookpoint/1,
connector_hookpoint_to_connector_id/1
]).
-export([
create/3,
create/4,
create_dry_run/2,
create_dry_run/3,
recreate/2,
recreate/3,
remove/1,
remove/2,
remove/4,
restart/2,
start/2,
stop/2,
update/2,
update/3,
update/4,
get_channels/2
]).
-callback connector_config(ParsedConfig) ->
ParsedConfig
when
ParsedConfig :: #{atom() => any()}.
-optional_callbacks([connector_config/1]).
-if(?EMQX_RELEASE_EDITION == ee).
connector_to_resource_type(ConnectorType) ->
try
emqx_connector_ee_schema:resource_type(ConnectorType)
catch
error:{unknown_connector_type, _} ->
%% maybe it's a CE connector
connector_to_resource_type_ce(ConnectorType)
end.
connector_impl_module(ConnectorType) ->
emqx_connector_ee_schema:connector_impl_module(ConnectorType).
-else.
connector_to_resource_type(ConnectorType) ->
connector_to_resource_type_ce(ConnectorType).
connector_impl_module(_ConnectorType) ->
undefined.
-endif.
connector_to_resource_type_ce(_ConnectorType) ->
no_bridge_v2_for_c2_so_far.
resource_id(ConnectorId) when is_binary(ConnectorId) ->
<<"connector:", ConnectorId/binary>>.
resource_id(ConnectorType, ConnectorName) ->
ConnectorId = connector_id(ConnectorType, ConnectorName),
resource_id(ConnectorId).
connector_id(ConnectorType, ConnectorName) ->
Name = bin(ConnectorName),
Type = bin(ConnectorType),
<<Type/binary, ":", Name/binary>>.
parse_connector_id(ConnectorId) ->
parse_connector_id(ConnectorId, #{atom_name => true}).
-spec parse_connector_id(list() | binary() | atom(), #{atom_name => boolean()}) ->
{atom(), atom() | binary()}.
parse_connector_id(ConnectorId, Opts) ->
case string:split(bin(ConnectorId), ":", all) of
[Type, Name] ->
{to_type_atom(Type), validate_name(Name, Opts)};
[_, Type, Name] ->
{to_type_atom(Type), validate_name(Name, Opts)};
_ ->
invalid_data(
<<"should be of pattern {type}:{name} or connector:{type}:{name}, but got ",
ConnectorId/binary>>
)
end.
connector_hookpoint(ConnectorId) ->
<<"$connectors/", (bin(ConnectorId))/binary>>.
connector_hookpoint_to_connector_id(?BRIDGE_HOOKPOINT(ConnectorId)) ->
{ok, ConnectorId};
connector_hookpoint_to_connector_id(_) ->
{error, bad_connector_hookpoint}.
validate_name(Name0, Opts) ->
Name = unicode:characters_to_list(Name0, utf8),
case is_list(Name) andalso Name =/= [] of
true ->
case lists:all(fun is_id_char/1, Name) of
true ->
case maps:get(atom_name, Opts, true) of
% NOTE
% Rule may be created before connector, thus not `list_to_existing_atom/1`,
% also it is infrequent user input anyway.
true -> list_to_atom(Name);
false -> Name0
end;
false ->
invalid_data(<<"bad name: ", Name0/binary>>)
end;
false ->
invalid_data(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>)
end.
-spec invalid_data(binary()) -> no_return().
invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}).
is_id_char(C) when C >= $0 andalso C =< $9 -> true;
is_id_char(C) when C >= $a andalso C =< $z -> true;
is_id_char(C) when C >= $A andalso C =< $Z -> true;
is_id_char($_) -> true;
is_id_char($-) -> true;
is_id_char($.) -> true;
is_id_char(_) -> false.
to_type_atom(Type) ->
try
erlang:binary_to_existing_atom(Type, utf8)
catch
_:_ ->
invalid_data(<<"unknown connector type: ", Type/binary>>)
end.
restart(Type, Name) ->
emqx_resource:restart(resource_id(Type, Name)).
stop(Type, Name) ->
emqx_resource:stop(resource_id(Type, Name)).
start(Type, Name) ->
emqx_resource:start(resource_id(Type, Name)).
create(Type, Name, Conf) ->
create(Type, Name, Conf, #{}).
create(Type, Name, Conf0, Opts) ->
?SLOG(info, #{
msg => "create connector",
type => Type,
name => Name,
config => emqx_utils:redact(Conf0)
}),
TypeBin = bin(Type),
Conf = Conf0#{connector_type => TypeBin, connector_name => Name},
{ok, _Data} = emqx_resource:create_local(
resource_id(Type, Name),
<<"emqx_connector">>,
?MODULE:connector_to_resource_type(Type),
parse_confs(TypeBin, Name, Conf),
parse_opts(Conf, Opts)
),
ok.
update(ConnectorId, {OldConf, Conf}) ->
{ConnectorType, ConnectorName} = parse_connector_id(ConnectorId),
update(ConnectorType, ConnectorName, {OldConf, Conf}).
update(Type, Name, {OldConf, Conf}) ->
update(Type, Name, {OldConf, Conf}, #{}).
update(Type, Name, {OldConf, Conf}, Opts) ->
%% TODO: sometimes its not necessary to restart the connector connection.
%%
%% - if the connection related configs like `servers` is updated, we should restart/start
%% or stop connectors according to the change.
%% - if the connection related configs are not update, only non-connection configs like
%% the `method` or `headers` of a WebHook is changed, then the connector can be updated
%% without restarting the connector.
%%
case emqx_utils_maps:if_only_to_toggle_enable(OldConf, Conf) of
false ->
?SLOG(info, #{
msg => "update connector",
type => Type,
name => Name,
config => emqx_utils:redact(Conf)
}),
case recreate(Type, Name, Conf, Opts) of
{ok, _} ->
ok;
{error, not_found} ->
?SLOG(warning, #{
msg => "updating_a_non_existing_connector",
type => Type,
name => Name,
config => emqx_utils:redact(Conf)
}),
create(Type, Name, Conf, Opts);
{error, Reason} ->
{error, {update_connector_failed, Reason}}
end;
true ->
%% we don't need to recreate the connector if this config change is only to
%% toggole the config 'connector.{type}.{name}.enable'
_ =
case maps:get(enable, Conf, true) of
true ->
restart(Type, Name);
false ->
stop(Type, Name)
end,
ok
end.
get_channels(Type, Name) ->
emqx_resource:get_channels(resource_id(Type, Name)).
recreate(Type, Name) ->
recreate(Type, Name, emqx:get_config([connectors, Type, Name])).
recreate(Type, Name, Conf) ->
recreate(Type, Name, Conf, #{}).
recreate(Type, Name, Conf, Opts) ->
TypeBin = bin(Type),
emqx_resource:recreate_local(
resource_id(Type, Name),
?MODULE:connector_to_resource_type(Type),
parse_confs(TypeBin, Name, Conf),
parse_opts(Conf, Opts)
).
create_dry_run(Type, Conf) ->
create_dry_run(Type, Conf, fun(_) -> ok end).
create_dry_run(Type, Conf0, Callback) ->
%% Already typechecked, no need to catch errors
TypeBin = bin(Type),
TypeAtom = safe_atom(Type),
%% We use a fixed name here to avoid creating an atom
TmpName = iolist_to_binary([?TEST_ID_PREFIX, TypeBin, ":", <<"probedryrun">>]),
TmpPath = emqx_utils:safe_filename(TmpName),
Conf1 = maps:without([<<"name">>], Conf0),
RawConf = #{<<"connectors">> => #{TypeBin => #{<<"temp_name">> => Conf1}}},
try
CheckedConf1 =
hocon_tconf:check_plain(
emqx_connector_schema,
RawConf,
#{atom_key => true, required => false}
),
CheckedConf2 = get_temp_conf(TypeAtom, CheckedConf1),
CheckedConf = CheckedConf2#{connector_type => TypeBin, connector_name => TmpName},
case emqx_connector_ssl:convert_certs(TmpPath, CheckedConf) of
{error, Reason} ->
{error, Reason};
{ok, ConfNew} ->
ParseConf = parse_confs(bin(Type), TmpName, ConfNew),
emqx_resource:create_dry_run_local(
TmpName, ?MODULE:connector_to_resource_type(Type), ParseConf, Callback
)
end
catch
%% validation errors
throw:Reason1 ->
{error, Reason1}
after
_ = file:del_dir_r(emqx_tls_lib:pem_dir(TmpPath))
end.
get_temp_conf(TypeAtom, CheckedConf) ->
case CheckedConf of
#{connectors := #{TypeAtom := #{temp_name := Conf}}} ->
Conf;
#{connectors := #{TypeAtom := #{<<"temp_name">> := Conf}}} ->
Conf
end.
remove(ConnectorId) ->
{ConnectorType, ConnectorName} = parse_connector_id(ConnectorId),
remove(ConnectorType, ConnectorName, #{}, #{}).
remove(Type, Name) ->
remove(Type, Name, #{}, #{}).
%% just for perform_connector_changes/1
remove(Type, Name, _Conf, _Opts) ->
?SLOG(info, #{msg => "remove_connector", type => Type, name => Name}),
emqx_resource:remove_local(resource_id(Type, Name)).
%% convert connector configs to what the connector modules want
parse_confs(
<<"webhook">>,
_Name,
#{
url := Url,
method := Method,
headers := Headers,
max_retries := Retry
} = Conf
) ->
Url1 = bin(Url),
{BaseUrl, Path} = parse_url(Url1),
BaseUrl1 =
case emqx_http_lib:uri_parse(BaseUrl) of
{ok, BUrl} ->
BUrl;
{error, Reason} ->
Reason1 = emqx_utils:readable_error_msg(Reason),
invalid_data(<<"Invalid URL: ", Url1/binary, ", details: ", Reason1/binary>>)
end,
RequestTTL = emqx_utils_maps:deep_get(
[resource_opts, request_ttl],
Conf
),
Conf#{
base_url => BaseUrl1,
request =>
#{
path => Path,
method => Method,
body => maps:get(body, Conf, undefined),
headers => Headers,
request_ttl => RequestTTL,
max_retries => Retry
}
};
parse_confs(<<"iotdb">>, Name, Conf) ->
%% [FIXME] this has no place here, it's used in parse_confs/3, which should
%% rather delegate to a behavior callback than implementing domain knowledge
%% here (reversed dependency)
InsertTabletPathV1 = <<"rest/v1/insertTablet">>,
InsertTabletPathV2 = <<"rest/v2/insertTablet">>,
#{
base_url := BaseURL,
authentication :=
#{
username := Username,
password := Password
}
} = Conf,
BasicToken = base64:encode(<<Username/binary, ":", Password/binary>>),
%% This version atom correspond to the macro ?VSN_1_1_X in
%% emqx_connector_iotdb.hrl. It would be better to use the macro directly, but
%% this cannot be done without introducing a dependency on the
%% emqx_iotdb_connector app (which is an EE app).
DefaultIOTDBConnector = 'v1.1.x',
Version = maps:get(iotdb_version, Conf, DefaultIOTDBConnector),
InsertTabletPath =
case Version of
DefaultIOTDBConnector -> InsertTabletPathV2;
_ -> InsertTabletPathV1
end,
WebhookConfig =
Conf#{
method => <<"post">>,
url => <<BaseURL/binary, InsertTabletPath/binary>>,
headers => [
{<<"Content-type">>, <<"application/json">>},
{<<"Authorization">>, BasicToken}
]
},
parse_confs(
<<"webhook">>,
Name,
WebhookConfig
);
parse_confs(ConnectorType, _Name, Config) ->
connector_config(ConnectorType, Config).
connector_config(ConnectorType, Config) ->
Mod = connector_impl_module(ConnectorType),
case erlang:function_exported(Mod, connector_config, 1) of
true ->
Mod:connector_config(Config);
false ->
Config
end.
parse_url(Url) ->
case string:split(Url, "//", leading) of
[Scheme, UrlRem] ->
case string:split(UrlRem, "/", leading) of
[HostPort, Path] ->
{iolist_to_binary([Scheme, "//", HostPort]), Path};
[HostPort] ->
{iolist_to_binary([Scheme, "//", HostPort]), <<>>}
end;
[Url] ->
invalid_data(<<"Missing scheme in URL: ", Url/binary>>)
end.
bin(Bin) when is_binary(Bin) -> Bin;
bin(Str) when is_list(Str) -> list_to_binary(Str);
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
safe_atom(Bin) when is_binary(Bin) -> binary_to_existing_atom(Bin, utf8);
safe_atom(Atom) when is_atom(Atom) -> Atom.
parse_opts(Conf, Opts0) ->
override_start_after_created(Conf, Opts0).
override_start_after_created(Config, Opts) ->
Enabled = maps:get(enable, Config, true),
StartAfterCreated = Enabled andalso maps:get(start_after_created, Opts, Enabled),
Opts#{start_after_created => StartAfterCreated}.

View File

@ -0,0 +1,123 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector_proto_v1).
-behaviour(emqx_bpapi).
-export([
introduced_in/0,
list_connectors_on_nodes/1,
restart_connector_to_node/3,
start_connector_to_node/3,
stop_connector_to_node/3,
lookup_from_all_nodes/3,
restart_connectors_to_all_nodes/3,
start_connectors_to_all_nodes/3,
stop_connectors_to_all_nodes/3
]).
-include_lib("emqx/include/bpapi.hrl").
-define(TIMEOUT, 15000).
introduced_in() ->
"5.3.1".
-spec list_connectors_on_nodes([node()]) ->
emqx_rpc:erpc_multicall([emqx_resource:resource_data()]).
list_connectors_on_nodes(Nodes) ->
erpc:multicall(Nodes, emqx_connector, list, [], ?TIMEOUT).
-type key() :: atom() | binary() | [byte()].
-spec restart_connector_to_node(node(), key(), key()) ->
term().
restart_connector_to_node(Node, ConnectorType, ConnectorName) ->
rpc:call(
Node,
emqx_connector_resource,
restart,
[ConnectorType, ConnectorName],
?TIMEOUT
).
-spec start_connector_to_node(node(), key(), key()) ->
term().
start_connector_to_node(Node, ConnectorType, ConnectorName) ->
rpc:call(
Node,
emqx_connector_resource,
start,
[ConnectorType, ConnectorName],
?TIMEOUT
).
-spec stop_connector_to_node(node(), key(), key()) ->
term().
stop_connector_to_node(Node, ConnectorType, ConnectorName) ->
rpc:call(
Node,
emqx_connector_resource,
stop,
[ConnectorType, ConnectorName],
?TIMEOUT
).
-spec restart_connectors_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
restart_connectors_to_all_nodes(Nodes, ConnectorType, ConnectorName) ->
erpc:multicall(
Nodes,
emqx_connector_resource,
restart,
[ConnectorType, ConnectorName],
?TIMEOUT
).
-spec start_connectors_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
start_connectors_to_all_nodes(Nodes, ConnectorType, ConnectorName) ->
erpc:multicall(
Nodes,
emqx_connector_resource,
start,
[ConnectorType, ConnectorName],
?TIMEOUT
).
-spec stop_connectors_to_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
stop_connectors_to_all_nodes(Nodes, ConnectorType, ConnectorName) ->
erpc:multicall(
Nodes,
emqx_connector_resource,
stop,
[ConnectorType, ConnectorName],
?TIMEOUT
).
-spec lookup_from_all_nodes([node()], key(), key()) ->
emqx_rpc:erpc_multicall().
lookup_from_all_nodes(Nodes, ConnectorType, ConnectorName) ->
erpc:multicall(
Nodes,
emqx_connector_api,
lookup_from_local_node,
[ConnectorType, ConnectorName],
?TIMEOUT
).

View File

@ -0,0 +1,93 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_connector_ee_schema).
-if(?EMQX_RELEASE_EDITION == ee).
-export([
resource_type/1,
connector_impl_module/1
]).
-import(hoconsc, [mk/2, enum/1, ref/2]).
-export([
api_schemas/1,
fields/1,
examples/1
]).
resource_type(Type) when is_binary(Type) ->
resource_type(binary_to_atom(Type, utf8));
resource_type(kafka_producer) ->
emqx_bridge_kafka_impl_producer;
%% We use AEH's Kafka interface.
resource_type(azure_event_hub_producer) ->
emqx_bridge_kafka_impl_producer;
resource_type(Type) ->
error({unknown_connector_type, Type}).
%% For connectors that need to override connector configurations.
connector_impl_module(ConnectorType) when is_binary(ConnectorType) ->
connector_impl_module(binary_to_atom(ConnectorType, utf8));
connector_impl_module(azure_event_hub_producer) ->
emqx_bridge_azure_event_hub;
connector_impl_module(_ConnectorType) ->
undefined.
fields(connectors) ->
connector_structs().
connector_structs() ->
[
{kafka_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_kafka, "config")),
#{
desc => <<"Kafka Connector Config">>,
required => false
}
)},
{azure_event_hub_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_azure_event_hub, "config_connector")),
#{
desc => <<"Azure Event Hub Connector Config">>,
required => false
}
)}
].
examples(Method) ->
MergeFun =
fun(Example, Examples) ->
maps:merge(Examples, Example)
end,
Fun =
fun(Module, Examples) ->
ConnectorExamples = erlang:apply(Module, connector_examples, [Method]),
lists:foldl(MergeFun, Examples, ConnectorExamples)
end,
lists:foldl(Fun, #{}, schema_modules()).
schema_modules() ->
[
emqx_bridge_kafka,
emqx_bridge_azure_event_hub
].
api_schemas(Method) ->
[
%% We need to map the `type' field of a request (binary) to a
%% connector schema module.
api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_connector"),
api_ref(emqx_bridge_azure_event_hub, <<"azure_event_hub_producer">>, Method ++ "_connector")
].
api_ref(Module, Type, Method) ->
{Type, ref(Module, Method)}.
-else.
-endif.

View File

@ -0,0 +1,294 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector_schema).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-import(hoconsc, [mk/2, ref/2]).
-export([transform_bridges_v1_to_connectors_and_bridges_v2/1]).
-export([roots/0, fields/1, desc/1, namespace/0, tags/0]).
-export([get_response/0, put_request/0, post_request/0]).
-if(?EMQX_RELEASE_EDITION == ee).
enterprise_api_schemas(Method) ->
%% We *must* do this to ensure the module is really loaded, especially when we use
%% `call_hocon' from `nodetool' to generate initial configurations.
_ = emqx_connector_ee_schema:module_info(),
case erlang:function_exported(emqx_connector_ee_schema, api_schemas, 1) of
true -> emqx_connector_ee_schema:api_schemas(Method);
false -> []
end.
enterprise_fields_connectors() ->
%% We *must* do this to ensure the module is really loaded, especially when we use
%% `call_hocon' from `nodetool' to generate initial configurations.
_ = emqx_connector_ee_schema:module_info(),
case erlang:function_exported(emqx_connector_ee_schema, fields, 1) of
true ->
emqx_connector_ee_schema:fields(connectors);
false ->
[]
end.
-else.
enterprise_api_schemas(_Method) -> [].
enterprise_fields_connectors() -> [].
-endif.
connector_type_to_bridge_types(kafka_producer) -> [kafka, kafka_producer];
connector_type_to_bridge_types(azure_event_hub_producer) -> [azure_event_hub_producer].
actions_config_name() -> <<"bridges_v2">>.
has_connector_field(BridgeConf, ConnectorFields) ->
lists:any(
fun({ConnectorFieldName, _Spec}) ->
maps:is_key(to_bin(ConnectorFieldName), BridgeConf)
end,
ConnectorFields
).
bridge_configs_to_transform(_BridgeType, [] = _BridgeNameBridgeConfList, _ConnectorFields) ->
[];
bridge_configs_to_transform(BridgeType, [{BridgeName, BridgeConf} | Rest], ConnectorFields) ->
case has_connector_field(BridgeConf, ConnectorFields) of
true ->
[
{BridgeType, BridgeName, BridgeConf, ConnectorFields}
| bridge_configs_to_transform(BridgeType, Rest, ConnectorFields)
];
false ->
bridge_configs_to_transform(BridgeType, Rest, ConnectorFields)
end.
split_bridge_to_connector_and_action(
{ConnectorsMap, {BridgeType, BridgeName, BridgeConf, ConnectorFields}}
) ->
%% Get connector fields from bridge config
ConnectorMap = lists:foldl(
fun({ConnectorFieldName, _Spec}, ToTransformSoFar) ->
case maps:is_key(to_bin(ConnectorFieldName), BridgeConf) of
true ->
NewToTransform = maps:put(
to_bin(ConnectorFieldName),
maps:get(to_bin(ConnectorFieldName), BridgeConf),
ToTransformSoFar
),
NewToTransform;
false ->
ToTransformSoFar
end
end,
#{},
ConnectorFields
),
%% Remove connector fields from bridge config to create Action
ActionMap0 = lists:foldl(
fun
({enable, _Spec}, ToTransformSoFar) ->
%% Enable filed is used in both
ToTransformSoFar;
({ConnectorFieldName, _Spec}, ToTransformSoFar) ->
case maps:is_key(to_bin(ConnectorFieldName), BridgeConf) of
true ->
maps:remove(to_bin(ConnectorFieldName), ToTransformSoFar);
false ->
ToTransformSoFar
end
end,
BridgeConf,
ConnectorFields
),
%% Generate a connector name
ConnectorName = generate_connector_name(ConnectorsMap, BridgeName, 0),
%% Add connector field to action map
ActionMap = maps:put(<<"connector">>, ConnectorName, ActionMap0),
{BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}.
generate_connector_name(ConnectorsMap, BridgeName, Attempt) ->
ConnectorNameList =
case Attempt of
0 ->
io_lib:format("connector_~s", [BridgeName]);
_ ->
io_lib:format("connector_~s_~p", [BridgeName, Attempt + 1])
end,
ConnectorName = iolist_to_binary(ConnectorNameList),
case maps:is_key(ConnectorName, ConnectorsMap) of
true ->
generate_connector_name(ConnectorsMap, BridgeName, Attempt + 1);
false ->
ConnectorName
end.
transform_old_style_bridges_to_connector_and_actions_of_type(
{ConnectorType, #{type := {map, name, {ref, ConnectorConfSchemaMod, ConnectorConfSchemaName}}}},
RawConfig
) ->
ConnectorFields = ConnectorConfSchemaMod:fields(ConnectorConfSchemaName),
BridgeTypes = connector_type_to_bridge_types(ConnectorType),
BridgesConfMap = maps:get(<<"bridges">>, RawConfig, #{}),
ConnectorsConfMap = maps:get(<<"connectors">>, RawConfig, #{}),
BridgeConfigsToTransform1 =
lists:foldl(
fun(BridgeType, ToTranformSoFar) ->
BridgeNameToBridgeMap = maps:get(to_bin(BridgeType), BridgesConfMap, #{}),
BridgeNameBridgeConfList = maps:to_list(BridgeNameToBridgeMap),
NewToTransform = bridge_configs_to_transform(
BridgeType, BridgeNameBridgeConfList, ConnectorFields
),
[NewToTransform, ToTranformSoFar]
end,
[],
BridgeTypes
),
BridgeConfigsToTransform = lists:flatten(BridgeConfigsToTransform1),
ConnectorsWithTypeMap = maps:get(to_bin(ConnectorType), ConnectorsConfMap, #{}),
BridgeConfigsToTransformWithConnectorConf = lists:zip(
lists:duplicate(length(BridgeConfigsToTransform), ConnectorsWithTypeMap),
BridgeConfigsToTransform
),
ActionConnectorTuples = lists:map(
fun split_bridge_to_connector_and_action/1,
BridgeConfigsToTransformWithConnectorConf
),
%% Add connectors and actions and remove bridges
lists:foldl(
fun({BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}, RawConfigSoFar) ->
%% Add connector
RawConfigSoFar1 = emqx_utils_maps:deep_put(
[<<"connectors">>, to_bin(ConnectorType), ConnectorName],
RawConfigSoFar,
ConnectorMap
),
%% Remove bridge (v1)
RawConfigSoFar2 = emqx_utils_maps:deep_remove(
[<<"bridges">>, to_bin(BridgeType), BridgeName],
RawConfigSoFar1
),
%% Add bridge_v2
RawConfigSoFar3 = emqx_utils_maps:deep_put(
[actions_config_name(), to_bin(maybe_rename(BridgeType)), BridgeName],
RawConfigSoFar2,
ActionMap
),
RawConfigSoFar3
end,
RawConfig,
ActionConnectorTuples
).
transform_bridges_v1_to_connectors_and_bridges_v2(RawConfig) ->
ConnectorFields = fields(connectors),
NewRawConf = lists:foldl(
fun transform_old_style_bridges_to_connector_and_actions_of_type/2,
RawConfig,
ConnectorFields
),
NewRawConf.
%% v1 uses 'kafka' as bridge type v2 uses 'kafka_producer'
maybe_rename(kafka) ->
kafka_producer;
maybe_rename(Name) ->
Name.
%%======================================================================================
%% HOCON Schema Callbacks
%%======================================================================================
%% For HTTP APIs
get_response() ->
api_schema("get").
put_request() ->
api_schema("put").
post_request() ->
api_schema("post").
api_schema(Method) ->
EE = enterprise_api_schemas(Method),
hoconsc:union(connector_api_union(EE)).
connector_api_union(Refs) ->
Index = maps:from_list(Refs),
fun
(all_union_members) ->
maps:values(Index);
({value, V}) ->
case V of
#{<<"type">> := T} ->
case maps:get(T, Index, undefined) of
undefined ->
throw(#{
field_name => type,
value => T,
reason => <<"unknown connector type">>
});
Ref ->
[Ref]
end;
_ ->
maps:values(Index)
end
end.
%% general config
namespace() -> "connector".
tags() ->
[<<"Connector">>].
-dialyzer({nowarn_function, roots/0}).
roots() ->
case fields(connectors) of
[] ->
[
{connectors,
?HOCON(hoconsc:map(name, typerefl:map()), #{importance => ?IMPORTANCE_LOW})}
];
_ ->
[{connectors, ?HOCON(?R_REF(connectors), #{importance => ?IMPORTANCE_LOW})}]
end.
fields(connectors) ->
[] ++ enterprise_fields_connectors().
desc(connectors) ->
?DESC("desc_connectors");
desc(_) ->
undefined.
%%======================================================================================
%% Helper Functions
%%======================================================================================
to_bin(Atom) when is_atom(Atom) ->
list_to_binary(atom_to_list(Atom));
to_bin(Bin) when is_binary(Bin) ->
Bin;
to_bin(Something) ->
Something.

View File

@ -0,0 +1,236 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-define(START_APPS, [emqx, emqx_conf, emqx_connector]).
-define(CONNECTOR, dummy_connector_impl).
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
_ = application:load(emqx_conf),
ok = emqx_common_test_helpers:start_apps(?START_APPS),
Config.
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps(?START_APPS).
init_per_testcase(TestCase, Config) ->
?MODULE:TestCase({init, Config}).
end_per_testcase(TestCase, Config) ->
?MODULE:TestCase({'end', Config}).
%% the 2 test cases below are based on kafka connector which is ee only
-if(?EMQX_RELEASE_EDITION == ee).
t_connector_lifecycle({init, Config}) ->
meck:new(emqx_connector_ee_schema, [passthrough]),
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR),
meck:new(?CONNECTOR, [non_strict]),
meck:expect(?CONNECTOR, callback_mode, 0, async_if_possible),
meck:expect(?CONNECTOR, on_start, 2, {ok, connector_state}),
meck:expect(?CONNECTOR, on_stop, 2, ok),
meck:expect(?CONNECTOR, on_get_status, 2, connected),
[{mocked_mods, [?CONNECTOR, emqx_connector_ee_schema]} | Config];
t_connector_lifecycle({'end', Config}) ->
MockedMods = ?config(mocked_mods, Config),
meck:unload(MockedMods),
Config;
t_connector_lifecycle(_Config) ->
?assertEqual(
[],
emqx_connector:list()
),
?assertMatch(
{ok, _},
emqx_connector:create(kafka_producer, my_connector, connector_config())
),
?assertMatch(
{ok, #{name := my_connector, type := kafka_producer}},
emqx_connector:lookup(<<"connector:kafka_producer:my_connector">>)
),
?assertMatch(
{ok, #{
name := my_connector, type := kafka_producer, resource_data := #{status := connected}
}},
emqx_connector:lookup(<<"kafka_producer:my_connector">>)
),
?assertMatch(
{ok, #{
name := my_connector, type := kafka_producer, resource_data := #{status := connected}
}},
emqx_connector:lookup(kafka_producer, my_connector)
),
?assertMatch(
[#{name := <<"my_connector">>, type := <<"kafka_producer">>}],
emqx_connector:list()
),
?assertMatch(
{ok, #{config := #{enable := false}}},
emqx_connector:disable_enable(disable, kafka_producer, my_connector)
),
?assertMatch(
{ok, #{resource_data := #{status := stopped}}},
emqx_connector:lookup(kafka_producer, my_connector)
),
?assertMatch(
{ok, #{config := #{enable := true}}},
emqx_connector:disable_enable(enable, kafka_producer, my_connector)
),
?assertMatch(
{ok, #{resource_data := #{status := connected}}},
emqx_connector:lookup(kafka_producer, my_connector)
),
?assertMatch(
{ok, #{config := #{connect_timeout := 10000}}},
emqx_connector:update(kafka_producer, my_connector, (connector_config())#{
<<"connect_timeout">> => <<"10s">>
})
),
?assertMatch(
{ok, #{resource_data := #{config := #{connect_timeout := 10000}}}},
emqx_connector:lookup(kafka_producer, my_connector)
),
?assertMatch(
ok,
emqx_connector:remove(kafka_producer, my_connector)
),
?assertEqual(
[],
emqx_connector:list()
),
?assert(meck:validate(?CONNECTOR)),
?assertMatch(
[
{_, {?CONNECTOR, callback_mode, []}, _},
{_, {?CONNECTOR, on_start, [_, _]}, {ok, connector_state}},
{_, {?CONNECTOR, on_get_status, [_, connector_state]}, connected},
{_, {?CONNECTOR, on_stop, [_, connector_state]}, ok},
{_, {?CONNECTOR, on_stop, [_, connector_state]}, ok},
{_, {?CONNECTOR, on_start, [_, _]}, {ok, connector_state}},
{_, {?CONNECTOR, on_get_status, [_, connector_state]}, connected},
{_, {?CONNECTOR, on_stop, [_, connector_state]}, ok},
{_, {?CONNECTOR, callback_mode, []}, _},
{_, {?CONNECTOR, on_start, [_, _]}, {ok, connector_state}},
{_, {?CONNECTOR, on_get_status, [_, connector_state]}, connected},
{_, {?CONNECTOR, on_stop, [_, connector_state]}, ok}
],
meck:history(?CONNECTOR)
),
ok.
t_remove_fail({'init', Config}) ->
meck:new(emqx_connector_ee_schema, [passthrough]),
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR),
meck:new(?CONNECTOR, [non_strict]),
meck:expect(?CONNECTOR, callback_mode, 0, async_if_possible),
meck:expect(?CONNECTOR, on_start, 2, {ok, connector_state}),
meck:expect(?CONNECTOR, on_get_channels, 1, [{<<"my_channel">>, #{}}]),
meck:expect(?CONNECTOR, on_add_channel, 4, {ok, connector_state}),
meck:expect(?CONNECTOR, on_stop, 2, ok),
meck:expect(?CONNECTOR, on_get_status, 2, connected),
[{mocked_mods, [?CONNECTOR, emqx_connector_ee_schema]} | Config];
t_remove_fail({'end', Config}) ->
MockedMods = ?config(mocked_mods, Config),
meck:unload(MockedMods),
Config;
t_remove_fail(_Config) ->
?assertEqual(
[],
emqx_connector:list()
),
?assertMatch(
{ok, _},
emqx_connector:create(kafka_producer, my_failing_connector, connector_config())
),
?assertMatch(
{error, {post_config_update, emqx_connector, {active_channels, [{<<"my_channel">>, _}]}}},
emqx_connector:remove(kafka_producer, my_failing_connector)
),
?assertNotEqual(
[],
emqx_connector:list()
),
?assert(meck:validate(?CONNECTOR)),
?assertMatch(
[
{_, {?CONNECTOR, callback_mode, []}, _},
{_, {?CONNECTOR, on_start, [_, _]}, {ok, connector_state}},
{_, {?CONNECTOR, on_get_channels, [_]}, _},
{_, {?CONNECTOR, on_get_status, [_, connector_state]}, connected},
{_, {?CONNECTOR, on_get_channels, [_]}, _},
{_, {?CONNECTOR, on_add_channel, _}, {ok, connector_state}},
{_, {?CONNECTOR, on_get_channels, [_]}, _}
],
meck:history(?CONNECTOR)
),
ok.
%% helpers
connector_config() ->
#{
<<"authentication">> => <<"none">>,
<<"bootstrap_hosts">> => <<"127.0.0.1:9092">>,
<<"connect_timeout">> => <<"5s">>,
<<"enable">> => true,
<<"metadata_request_timeout">> => <<"5s">>,
<<"min_metadata_refresh_interval">> => <<"3s">>,
<<"socket_opts">> =>
#{
<<"recbuf">> => <<"1024KB">>,
<<"sndbuf">> => <<"1024KB">>,
<<"tcp_keepalive">> => <<"none">>
},
<<"ssl">> =>
#{
<<"ciphers">> => [],
<<"depth">> => 10,
<<"enable">> => false,
<<"hibernate_after">> => <<"5s">>,
<<"log_level">> => <<"notice">>,
<<"reuse_sessions">> => true,
<<"secure_renegotiate">> => true,
<<"verify">> => <<"verify_peer">>,
<<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
}.
-endif.

View File

@ -0,0 +1,764 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector_api_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-import(emqx_mgmt_api_test_util, [uri/1]).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/test_macros.hrl").
-define(CONNECTOR_NAME, (atom_to_binary(?FUNCTION_NAME))).
-define(CONNECTOR(NAME, TYPE), #{
%<<"ssl">> => #{<<"enable">> => false},
<<"type">> => TYPE,
<<"name">> => NAME
}).
-define(CONNECTOR_TYPE_STR, "kafka_producer").
-define(CONNECTOR_TYPE, <<?CONNECTOR_TYPE_STR>>).
-define(KAFKA_BOOTSTRAP_HOST, <<"127.0.0.1:9092">>).
-define(KAFKA_CONNECTOR_BASE(BootstrapHosts), #{
<<"authentication">> => <<"none">>,
<<"bootstrap_hosts">> => BootstrapHosts,
<<"connect_timeout">> => <<"5s">>,
<<"enable">> => true,
<<"metadata_request_timeout">> => <<"5s">>,
<<"min_metadata_refresh_interval">> => <<"3s">>,
<<"socket_opts">> =>
#{
<<"nodelay">> => true,
<<"recbuf">> => <<"1024KB">>,
<<"sndbuf">> => <<"1024KB">>,
<<"tcp_keepalive">> => <<"none">>
}
}).
-define(KAFKA_CONNECTOR_BASE, ?KAFKA_CONNECTOR_BASE(?KAFKA_BOOTSTRAP_HOST)).
-define(KAFKA_CONNECTOR(Name, BootstrapHosts),
maps:merge(
?CONNECTOR(Name, ?CONNECTOR_TYPE),
?KAFKA_CONNECTOR_BASE(BootstrapHosts)
)
).
-define(KAFKA_CONNECTOR(Name), ?KAFKA_CONNECTOR(Name, ?KAFKA_BOOTSTRAP_HOST)).
%% -define(CONNECTOR_TYPE_MQTT, <<"mqtt">>).
%% -define(MQTT_CONNECTOR(SERVER, NAME), ?CONNECTOR(NAME, ?CONNECTOR_TYPE_MQTT)#{
%% <<"server">> => SERVER,
%% <<"username">> => <<"user1">>,
%% <<"password">> => <<"">>,
%% <<"proto_ver">> => <<"v5">>,
%% <<"egress">> => #{
%% <<"remote">> => #{
%% <<"topic">> => <<"emqx/${topic}">>,
%% <<"qos">> => <<"${qos}">>,
%% <<"retain">> => false
%% }
%% }
%% }).
%% -define(MQTT_CONNECTOR(SERVER), ?MQTT_CONNECTOR(SERVER, <<"mqtt_egress_test_connector">>)).
%% -define(CONNECTOR_TYPE_HTTP, <<"kafka_producer">>).
%% -define(HTTP_CONNECTOR(URL, NAME), ?CONNECTOR(NAME, ?CONNECTOR_TYPE_HTTP)#{
%% <<"url">> => URL,
%% <<"local_topic">> => <<"emqx_webhook/#">>,
%% <<"method">> => <<"post">>,
%% <<"body">> => <<"${payload}">>,
%% <<"headers">> => #{
%% % NOTE
%% % The Pascal-Case is important here.
%% % The reason is kinda ridiculous: `emqx_connector_resource:create_dry_run/2` converts
%% % connector config keys into atoms, and the atom 'Content-Type' exists in the ERTS
%% % when this happens (while the 'content-type' does not).
%% <<"Content-Type">> => <<"application/json">>
%% }
%% }).
%% -define(HTTP_CONNECTOR(URL), ?HTTP_CONNECTOR(URL, ?CONNECTOR_NAME)).
%% -define(URL(PORT, PATH),
%% list_to_binary(
%% io_lib:format(
%% "http://localhost:~s/~s",
%% [integer_to_list(PORT), PATH]
%% )
%% )
%% ).
-define(APPSPECS, [
emqx_conf,
emqx,
emqx_auth,
emqx_management,
{emqx_connector, "connectors {}"}
]).
-define(APPSPEC_DASHBOARD,
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
).
-if(?EMQX_RELEASE_EDITION == ee).
%% For now we got only kafka_producer implementing `bridge_v2` and that is enterprise only.
all() ->
[
{group, single},
%{group, cluster_later_join},
{group, cluster}
].
-else.
all() ->
[].
-endif.
groups() ->
AllTCs = emqx_common_test_helpers:all(?MODULE),
SingleOnlyTests = [
t_connectors_probe
],
ClusterLaterJoinOnlyTCs = [
% t_cluster_later_join_metrics
],
[
{single, [], AllTCs -- ClusterLaterJoinOnlyTCs},
{cluster_later_join, [], ClusterLaterJoinOnlyTCs},
{cluster, [], (AllTCs -- SingleOnlyTests) -- ClusterLaterJoinOnlyTCs}
].
suite() ->
[{timetrap, {seconds, 60}}].
init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok.
init_per_group(cluster = Name, Config) ->
Nodes = [NodePrimary | _] = mk_cluster(Name, Config),
init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
%% init_per_group(cluster_later_join = Name, Config) ->
%% Nodes = [NodePrimary | _] = mk_cluster(Name, Config, #{join_to => undefined}),
%% init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
init_per_group(Name, Config) ->
WorkDir = filename:join(?config(priv_dir, Config), Name),
Apps = emqx_cth_suite:start(?APPSPECS ++ [?APPSPEC_DASHBOARD], #{work_dir => WorkDir}),
init_api([{group, single}, {group_apps, Apps}, {node, node()} | Config]).
init_api(Config) ->
Node = ?config(node, Config),
{ok, ApiKey} = erpc:call(Node, emqx_common_test_http, create_default_app, []),
[{api_key, ApiKey} | Config].
mk_cluster(Name, Config) ->
mk_cluster(Name, Config, #{}).
mk_cluster(Name, Config, Opts) ->
Node1Apps = ?APPSPECS ++ [?APPSPEC_DASHBOARD],
Node2Apps = ?APPSPECS,
emqx_cth_cluster:start(
[
{emqx_bridge_api_SUITE_1, Opts#{role => core, apps => Node1Apps}},
{emqx_bridge_api_SUITE_2, Opts#{role => core, apps => Node2Apps}}
],
#{work_dir => filename:join(?config(priv_dir, Config), Name)}
).
end_per_group(Group, Config) when
Group =:= cluster;
Group =:= cluster_later_join
->
ok = emqx_cth_cluster:stop(?config(cluster_nodes, Config));
end_per_group(_, Config) ->
emqx_cth_suite:stop(?config(group_apps, Config)),
ok.
init_per_testcase(_TestCase, Config) ->
case ?config(cluster_nodes, Config) of
undefined ->
init_mocks();
Nodes ->
[erpc:call(Node, ?MODULE, init_mocks, []) || Node <- Nodes]
end,
Config.
end_per_testcase(_TestCase, Config) ->
case ?config(cluster_nodes, Config) of
undefined ->
meck:unload();
Nodes ->
[erpc:call(Node, meck, unload, []) || Node <- Nodes]
end,
Node = ?config(node, Config),
ok = emqx_common_test_helpers:call_janitor(),
ok = erpc:call(Node, fun clear_resources/0),
ok.
-define(CONNECTOR_IMPL, dummy_connector_impl).
init_mocks() ->
meck:new(emqx_connector_ee_schema, [passthrough, no_link]),
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR_IMPL),
meck:new(?CONNECTOR_IMPL, [non_strict, no_link]),
meck:expect(?CONNECTOR_IMPL, callback_mode, 0, async_if_possible),
meck:expect(
?CONNECTOR_IMPL,
on_start,
fun
(<<"connector:", ?CONNECTOR_TYPE_STR, ":bad_", _/binary>>, _C) ->
{ok, bad_connector_state};
(_I, _C) ->
{ok, connector_state}
end
),
meck:expect(?CONNECTOR_IMPL, on_stop, 2, ok),
meck:expect(
?CONNECTOR_IMPL,
on_get_status,
fun
(_, bad_connector_state) -> connecting;
(_, _) -> connected
end
),
[?CONNECTOR_IMPL, emqx_connector_ee_schema].
clear_resources() ->
lists:foreach(
fun(#{type := Type, name := Name}) ->
ok = emqx_connector:remove(Type, Name)
end,
emqx_connector:list()
).
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
%% We have to pretend testing a kafka_producer connector since at this point that's the
%% only one that's implemented.
t_connectors_lifecycle(Config) ->
%% assert we there's no bridges at first
{ok, 200, []} = request_json(get, uri(["connectors"]), Config),
{ok, 404, _} = request(get, uri(["connectors", "foo"]), Config),
{ok, 404, _} = request(get, uri(["connectors", "kafka_producer:foo"]), Config),
%% need a var for patterns below
ConnectorName = ?CONNECTOR_NAME,
?assertMatch(
{ok, 201, #{
<<"type">> := ?CONNECTOR_TYPE,
<<"name">> := ConnectorName,
<<"enable">> := true,
<<"bootstrap_hosts">> := _,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _]
}},
request_json(
post,
uri(["connectors"]),
?KAFKA_CONNECTOR(?CONNECTOR_NAME),
Config
)
),
%% list all connectors, assert Connector is in it
?assertMatch(
{ok, 200, [
#{
<<"type">> := ?CONNECTOR_TYPE,
<<"name">> := ConnectorName,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _]
}
]},
request_json(get, uri(["connectors"]), Config)
),
ConnectorID = emqx_connector_resource:connector_id(?CONNECTOR_TYPE, ?CONNECTOR_NAME),
?assertMatch(
{ok, 200, #{
<<"type">> := ?CONNECTOR_TYPE,
<<"name">> := ConnectorName,
<<"bootstrap_hosts">> := <<"foobla:1234">>,
<<"status">> := _,
<<"node_status">> := [_ | _]
}},
request_json(
put,
uri(["connectors", ConnectorID]),
?KAFKA_CONNECTOR_BASE(<<"foobla:1234">>),
Config
)
),
%% list all connectors, assert Connector is in it
?assertMatch(
{ok, 200, [
#{
<<"type">> := ?CONNECTOR_TYPE,
<<"name">> := ConnectorName,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _]
}
]},
request_json(get, uri(["connectors"]), Config)
),
%% get the connector by id
?assertMatch(
{ok, 200, #{
<<"type">> := ?CONNECTOR_TYPE,
<<"name">> := ConnectorName,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _]
}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
?assertMatch(
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := _
}},
request_json(post, uri(["connectors", ConnectorID, "brababbel"]), Config)
),
%% delete the connector
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnectorID]), Config),
{ok, 200, []} = request_json(get, uri(["connectors"]), Config),
%% update a deleted connector returns an error
?assertMatch(
{ok, 404, #{
<<"code">> := <<"NOT_FOUND">>,
<<"message">> := _
}},
request_json(
put,
uri(["connectors", ConnectorID]),
?KAFKA_CONNECTOR_BASE,
Config
)
),
%% Deleting a non-existing connector should result in an error
?assertMatch(
{ok, 404, #{
<<"code">> := <<"NOT_FOUND">>,
<<"message">> := _
}},
request_json(delete, uri(["connectors", ConnectorID]), Config)
),
%% try delete unknown connector id
?assertMatch(
{ok, 404, #{
<<"code">> := <<"NOT_FOUND">>,
<<"message">> := <<"Invalid connector ID", _/binary>>
}},
request_json(delete, uri(["connectors", "foo"]), Config)
),
%% Try create connector with bad characters as name
{ok, 400, _} = request(post, uri(["connectors"]), ?KAFKA_CONNECTOR(<<"隋达"/utf8>>), Config),
ok.
t_start_connector_unknown_node(Config) ->
{ok, 404, _} =
request(
post,
uri(["nodes", "thisbetterbenotanatomyet", "connectors", "kafka_producer:foo", start]),
Config
),
{ok, 404, _} =
request(
post,
uri(["nodes", "undefined", "connectors", "kafka_producer:foo", start]),
Config
).
t_start_stop_connectors_node(Config) ->
do_start_stop_connectors(node, Config).
t_start_stop_connectors_cluster(Config) ->
do_start_stop_connectors(cluster, Config).
do_start_stop_connectors(TestType, Config) ->
%% assert we there's no connectors at first
{ok, 200, []} = request_json(get, uri(["connectors"]), Config),
Name = atom_to_binary(TestType),
?assertMatch(
{ok, 201, #{
<<"type">> := ?CONNECTOR_TYPE,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _]
}},
request_json(
post,
uri(["connectors"]),
?KAFKA_CONNECTOR(Name),
Config
)
),
ConnectorID = emqx_connector_resource:connector_id(?CONNECTOR_TYPE, Name),
ExpectedStatus =
case ?config(group, Config) of
cluster when TestType == node ->
<<"inconsistent">>;
_ ->
<<"stopped">>
end,
%% stop it
{ok, 204, <<>>} = request(post, {operation, TestType, stop, ConnectorID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := ExpectedStatus}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% start again
{ok, 204, <<>>} = request(post, {operation, TestType, start, ConnectorID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% start a started connector
{ok, 204, <<>>} = request(post, {operation, TestType, start, ConnectorID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% restart an already started connector
{ok, 204, <<>>} = request(post, {operation, TestType, restart, ConnectorID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% stop it again
{ok, 204, <<>>} = request(post, {operation, TestType, stop, ConnectorID}, Config),
%% restart a stopped connector
{ok, 204, <<>>} = request(post, {operation, TestType, restart, ConnectorID}, Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
{ok, 400, _} = request(post, {operation, TestType, invalidop, ConnectorID}, Config),
%% delete the connector
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnectorID]), Config),
{ok, 200, []} = request_json(get, uri(["connectors"]), Config),
%% Fail parse-id check
{ok, 404, _} = request(post, {operation, TestType, start, <<"wreckbook_fugazi">>}, Config),
%% Looks ok but doesn't exist
{ok, 404, _} = request(post, {operation, TestType, start, <<"webhook:cptn_hook">>}, Config),
%% Create broken connector
{ListenPort, Sock} = listen_on_random_port(),
%% Connecting to this endpoint should always timeout
BadServer = iolist_to_binary(io_lib:format("localhost:~B", [ListenPort])),
BadName = <<"bad_", (atom_to_binary(TestType))/binary>>,
?assertMatch(
{ok, 201, #{
<<"type">> := ?CONNECTOR_TYPE,
<<"name">> := BadName,
<<"enable">> := true,
<<"bootstrap_hosts">> := BadServer,
<<"status">> := <<"connecting">>,
<<"node_status">> := [_ | _]
}},
request_json(
post,
uri(["connectors"]),
?KAFKA_CONNECTOR(BadName, BadServer),
Config
)
),
BadConnectorID = emqx_connector_resource:connector_id(?CONNECTOR_TYPE, BadName),
?assertMatch(
%% request from product: return 400 on such errors
{ok, SC, _} when SC == 500 orelse SC == 400,
request(post, {operation, TestType, start, BadConnectorID}, Config)
),
ok = gen_tcp:close(Sock),
ok.
t_start_stop_inconsistent_connector_node(Config) ->
start_stop_inconsistent_connector(node, Config).
t_start_stop_inconsistent_connector_cluster(Config) ->
start_stop_inconsistent_connector(cluster, Config).
start_stop_inconsistent_connector(Type, Config) ->
Node = ?config(node, Config),
erpc:call(Node, fun() ->
meck:new(emqx_connector_resource, [passthrough, no_link]),
meck:expect(
emqx_connector_resource,
stop,
fun
(_, <<"connector_not_found">>) -> {error, not_found};
(ConnectorType, Name) -> meck:passthrough([ConnectorType, Name])
end
)
end),
emqx_common_test_helpers:on_exit(fun() ->
erpc:call(Node, fun() ->
meck:unload([emqx_connector_resource])
end)
end),
{ok, 201, _Connector} = request(
post,
uri(["connectors"]),
?KAFKA_CONNECTOR(<<"connector_not_found">>),
Config
),
{ok, 503, _} = request(
post, {operation, Type, stop, <<"kafka_producer:connector_not_found">>}, Config
).
t_enable_disable_connectors(Config) ->
%% assert we there's no connectors at first
{ok, 200, []} = request_json(get, uri(["connectors"]), Config),
Name = ?CONNECTOR_NAME,
?assertMatch(
{ok, 201, #{
<<"type">> := ?CONNECTOR_TYPE,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _]
}},
request_json(
post,
uri(["connectors"]),
?KAFKA_CONNECTOR(Name),
Config
)
),
ConnectorID = emqx_connector_resource:connector_id(?CONNECTOR_TYPE, Name),
%% disable it
{ok, 204, <<>>} = request(put, enable_path(false, ConnectorID), Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"stopped">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% enable again
{ok, 204, <<>>} = request(put, enable_path(true, ConnectorID), Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% enable an already started connector
{ok, 204, <<>>} = request(put, enable_path(true, ConnectorID), Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% disable it again
{ok, 204, <<>>} = request(put, enable_path(false, ConnectorID), Config),
%% bad param
{ok, 400, _} = request(put, enable_path(foo, ConnectorID), Config),
{ok, 404, _} = request(put, enable_path(true, "foo"), Config),
{ok, 404, _} = request(put, enable_path(true, "webhook:foo"), Config),
{ok, 400, Res} = request(post, {operation, node, start, ConnectorID}, <<>>, fun json/1, Config),
?assertEqual(
#{
<<"code">> => <<"BAD_REQUEST">>,
<<"message">> => <<"Forbidden operation, connector not enabled">>
},
Res
),
{ok, 400, Res} = request(
post, {operation, cluster, start, ConnectorID}, <<>>, fun json/1, Config
),
%% enable a stopped connector
{ok, 204, <<>>} = request(put, enable_path(true, ConnectorID), Config),
?assertMatch(
{ok, 200, #{<<"status">> := <<"connected">>}},
request_json(get, uri(["connectors", ConnectorID]), Config)
),
%% delete the connector
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnectorID]), Config),
{ok, 200, []} = request_json(get, uri(["connectors"]), Config).
t_with_redact_update(Config) ->
Name = <<"redact_update">>,
Password = <<"123456">>,
Template = (?KAFKA_CONNECTOR(Name))#{
<<"authentication">> => #{
<<"mechanism">> => <<"plain">>,
<<"username">> => <<"test">>,
<<"password">> => Password
}
},
{ok, 201, _} = request(
post,
uri(["connectors"]),
Template,
Config
),
%% update with redacted config
ConnectorUpdatedConf = maps:without([<<"name">>, <<"type">>], emqx_utils:redact(Template)),
ConnectorID = emqx_connector_resource:connector_id(?CONNECTOR_TYPE, Name),
{ok, 200, _} = request(put, uri(["connectors", ConnectorID]), ConnectorUpdatedConf, Config),
?assertEqual(
Password,
get_raw_config([connectors, ?CONNECTOR_TYPE, Name, authentication, password], Config)
),
ok.
t_connectors_probe(Config) ->
{ok, 204, <<>>} = request(
post,
uri(["connectors_probe"]),
?KAFKA_CONNECTOR(?CONNECTOR_NAME),
Config
),
%% second time with same name is ok since no real connector created
{ok, 204, <<>>} = request(
post,
uri(["connectors_probe"]),
?KAFKA_CONNECTOR(?CONNECTOR_NAME),
Config
),
meck:expect(?CONNECTOR_IMPL, on_start, 2, {error, on_start_error}),
?assertMatch(
{ok, 400, #{
<<"code">> := <<"TEST_FAILED">>,
<<"message">> := _
}},
request_json(
post,
uri(["connectors_probe"]),
?KAFKA_CONNECTOR(<<"broken_connector">>, <<"brokenhost:1234">>),
Config
)
),
meck:expect(?CONNECTOR_IMPL, on_start, 2, {ok, connector_state}),
?assertMatch(
{ok, 400, #{<<"code">> := <<"BAD_REQUEST">>}},
request_json(
post,
uri(["connectors_probe"]),
?CONNECTOR(<<"broken_connector">>, <<"unknown_type">>),
Config
)
),
ok.
%%% helpers
listen_on_random_port() ->
SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}],
case gen_tcp:listen(0, SockOpts) of
{ok, Sock} ->
{ok, Port} = inet:port(Sock),
{Port, Sock};
{error, Reason} when Reason /= eaddrinuse ->
{error, Reason}
end.
request(Method, URL, Config) ->
request(Method, URL, [], Config).
request(Method, {operation, Type, Op, BridgeID}, Body, Config) ->
URL = operation_path(Type, Op, BridgeID, Config),
request(Method, URL, Body, Config);
request(Method, URL, Body, Config) ->
AuthHeader = emqx_common_test_http:auth_header(?config(api_key, Config)),
Opts = #{compatible_mode => true, httpc_req_opts => [{body_format, binary}]},
emqx_mgmt_api_test_util:request_api(Method, URL, [], AuthHeader, Body, Opts).
request(Method, URL, Body, Decoder, Config) ->
case request(Method, URL, Body, Config) of
{ok, Code, Response} ->
case Decoder(Response) of
{error, _} = Error -> Error;
Decoded -> {ok, Code, Decoded}
end;
Otherwise ->
Otherwise
end.
request_json(Method, URLLike, Config) ->
request(Method, URLLike, [], fun json/1, Config).
request_json(Method, URLLike, Body, Config) ->
request(Method, URLLike, Body, fun json/1, Config).
operation_path(node, Oper, ConnectorID, Config) ->
uri(["nodes", ?config(node, Config), "connectors", ConnectorID, Oper]);
operation_path(cluster, Oper, ConnectorID, _Config) ->
uri(["connectors", ConnectorID, Oper]).
enable_path(Enable, ConnectorID) ->
uri(["connectors", ConnectorID, "enable", Enable]).
publish_message(Topic, Body, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx, publish, [emqx_message:make(Topic, Body)]).
update_config(Path, Value, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx, update_config, [Path, Value]).
get_raw_config(Path, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx, get_raw_config, [Path]).
add_user_auth(Chain, AuthenticatorID, User, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx_authentication, add_user, [Chain, AuthenticatorID, User]).
delete_user_auth(Chain, AuthenticatorID, User, Config) ->
Node = ?config(node, Config),
erpc:call(Node, emqx_authentication, delete_user, [Chain, AuthenticatorID, User]).
str(S) when is_list(S) -> S;
str(S) when is_binary(S) -> binary_to_list(S).
json(B) when is_binary(B) ->
case emqx_utils_json:safe_decode(B, [return_maps]) of
{ok, Term} ->
Term;
{error, Reason} = Error ->
ct:pal("Failed to decode json: ~p~n~p", [Reason, B]),
Error
end.

View File

@ -14,7 +14,11 @@
%% limitations under the License.
%%--------------------------------------------------------------------
%% This module is for dashboard to retrieve the schema hot config and bridges.
%% This module is for dashboard to retrieve the schema of
%% 1. hot-config
%% 2. bridge
%% 3. bridge_v2
%% 4. connector
-module(emqx_dashboard_schema_api).
-behaviour(minirest_api).
@ -41,11 +45,12 @@ paths() ->
%% This is a rather hidden API, so we don't need to add translations for the description.
schema("/schemas/:name") ->
Schemas = [hotconf, bridges, bridges_v2, connectors],
#{
'operationId' => get_schema,
get => #{
parameters => [
{name, hoconsc:mk(hoconsc:enum([hotconf, bridges]), #{in => path})}
{name, hoconsc:mk(hoconsc:enum(Schemas), #{in => path})}
],
desc => <<
"Get the schema JSON of the specified name. "
@ -73,4 +78,23 @@ get_schema(get, _) ->
gen_schema(hotconf) ->
emqx_conf:hotconf_schema_json();
gen_schema(bridges) ->
emqx_conf:bridge_schema_json().
emqx_conf:bridge_schema_json();
gen_schema(bridges_v2) ->
bridge_v2_schema_json();
gen_schema(connectors) ->
connectors_schema_json().
bridge_v2_schema_json() ->
SchemaInfo = #{title => <<"EMQX Data Bridge V2 API Schema">>, version => <<"0.1.0">>},
gen_api_schema_json_iodata(emqx_bridge_v2_api, SchemaInfo).
connectors_schema_json() ->
SchemaInfo = #{title => <<"EMQX Connectors Schema">>, version => <<"0.1.0">>},
gen_api_schema_json_iodata(emqx_connector_api, SchemaInfo).
gen_api_schema_json_iodata(SchemaMod, SchemaInfo) ->
emqx_dashboard_swagger:gen_api_schema_json_iodata(
SchemaMod,
SchemaInfo,
fun emqx_conf:hocon_schema_to_spec/2
).

View File

@ -1,6 +1,6 @@
{application, emqx_enterprise, [
{description, "EMQX Enterprise Edition"},
{vsn, "0.1.3"},
{vsn, "0.1.4"},
{registered, []},
{applications, [
kernel,

View File

@ -10,6 +10,7 @@
-include_lib("hocon/include/hoconsc.hrl").
-export([namespace/0, roots/0, fields/1, translations/0, translation/1, desc/1, validations/0]).
-export([upgrade_raw_conf/1]).
-define(EE_SCHEMA_MODULES, [
emqx_license_schema,
@ -17,6 +18,10 @@
emqx_ft_schema
]).
%% Callback to upgrade config after loaded from config file but before validation.
upgrade_raw_conf(RawConf) ->
emqx_conf_schema:upgrade_raw_conf(RawConf).
namespace() ->
emqx_conf_schema:namespace().

View File

@ -76,8 +76,8 @@ fields(file_transfer) ->
#{
desc => ?DESC("init_timeout"),
required => false,
importance => ?IMPORTANCE_HIDDEN,
default => "10s"
importance => ?IMPORTANCE_LOW,
default => <<"10s">>
}
)},
{store_segment_timeout,
@ -86,8 +86,8 @@ fields(file_transfer) ->
#{
desc => ?DESC("store_segment_timeout"),
required => false,
importance => ?IMPORTANCE_HIDDEN,
default => "5m"
importance => ?IMPORTANCE_LOW,
default => <<"5m">>
}
)},
{assemble_timeout,
@ -97,7 +97,7 @@ fields(file_transfer) ->
desc => ?DESC("assemble_timeout"),
required => false,
importance => ?IMPORTANCE_LOW,
default => "5m"
default => <<"5m">>
}
)},
{storage,
@ -208,7 +208,7 @@ fields(local_storage_segments_gc) ->
#{
desc => ?DESC("storage_gc_interval"),
required => false,
default => "1h"
default => <<"1h">>
}
)},
{maximum_segments_ttl,
@ -218,7 +218,7 @@ fields(local_storage_segments_gc) ->
#{
desc => ?DESC("storage_gc_max_segments_ttl"),
required => false,
default => "24h"
default => <<"24h">>
}
)},
{minimum_segments_ttl,
@ -228,7 +228,7 @@ fields(local_storage_segments_gc) ->
#{
desc => ?DESC("storage_gc_min_segments_ttl"),
required => false,
default => "5m",
default => <<"5m">>,
% NOTE
% This setting does not seem to be useful to an end-user.
hidden => true

View File

@ -192,6 +192,7 @@ api_key(post, #{body := App}) ->
} = App,
ExpiredAt = ensure_expired_at(App),
Desc = unicode:characters_to_binary(Desc0, unicode),
%% create api_key with random api_key and api_secret from Dashboard
case emqx_mgmt_auth:create(Name, Enable, ExpiredAt, Desc) of
{ok, NewApp} ->
{200, emqx_mgmt_auth:format(NewApp)};

View File

@ -14,6 +14,7 @@
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_mgmt_auth).
-include_lib("emqx_mgmt.hrl").
-include_lib("emqx/include/emqx.hrl").
-include_lib("emqx/include/logger.hrl").
@ -43,12 +44,13 @@
-export([
do_update/4,
do_delete/1,
do_create_app/3,
do_force_create_app/3
do_create_app/1,
do_force_create_app/1
]).
-ifdef(TEST).
-export([create/5]).
-export([create/6]).
-export([trans/2, force_create_app/1]).
-endif.
-define(APP, emqx_app).
@ -63,6 +65,8 @@
created_at = 0 :: integer() | '_'
}).
-define(DEFAULT_HASH_LEN, 16).
mnesia(boot) ->
ok = mria:create_table(?APP, [
{type, set},
@ -97,11 +101,12 @@ init_bootstrap_file() ->
create(Name, Enable, ExpiredAt, Desc) ->
ApiSecret = generate_api_secret(),
create(Name, ApiSecret, Enable, ExpiredAt, Desc).
ApiKey = generate_unique_api_key(Name),
create(Name, ApiKey, ApiSecret, Enable, ExpiredAt, Desc).
create(Name, ApiSecret, Enable, ExpiredAt, Desc) ->
create(Name, ApiKey, ApiSecret, Enable, ExpiredAt, Desc) ->
case mnesia:table_info(?APP, size) < 100 of
true -> create_app(Name, ApiSecret, Enable, ExpiredAt, Desc);
true -> create_app(Name, ApiKey, ApiSecret, Enable, ExpiredAt, Desc);
false -> {error, "Maximum ApiKey"}
end.
@ -202,7 +207,7 @@ to_map(#?APP{name = N, api_key = K, enable = E, expired_at = ET, created_at = CT
is_expired(undefined) -> false;
is_expired(ExpiredTime) -> ExpiredTime < erlang:system_time(second).
create_app(Name, ApiSecret, Enable, ExpiredAt, Desc) ->
create_app(Name, ApiKey, ApiSecret, Enable, ExpiredAt, Desc) ->
App =
#?APP{
name = Name,
@ -211,7 +216,7 @@ create_app(Name, ApiSecret, Enable, ExpiredAt, Desc) ->
desc = Desc,
created_at = erlang:system_time(second),
api_secret_hash = emqx_dashboard_admin:hash(ApiSecret),
api_key = list_to_binary(emqx_utils:gen_id(16))
api_key = ApiKey
},
case create_app(App) of
{ok, Res} ->
@ -220,13 +225,13 @@ create_app(Name, ApiSecret, Enable, ExpiredAt, Desc) ->
Error
end.
create_app(App = #?APP{api_key = ApiKey, name = Name}) ->
trans(fun ?MODULE:do_create_app/3, [App, ApiKey, Name]).
create_app(App) ->
trans(fun ?MODULE:do_create_app/1, [App]).
force_create_app(NamePrefix, App = #?APP{api_key = ApiKey}) ->
trans(fun ?MODULE:do_force_create_app/3, [App, ApiKey, NamePrefix]).
force_create_app(App) ->
trans(fun ?MODULE:do_force_create_app/1, [App]).
do_create_app(App, ApiKey, Name) ->
do_create_app(App = #?APP{api_key = ApiKey, name = Name}) ->
case mnesia:read(?APP, Name) of
[_] ->
mnesia:abort(name_already_existed);
@ -240,21 +245,58 @@ do_create_app(App, ApiKey, Name) ->
end
end.
do_force_create_app(App, ApiKey, NamePrefix) ->
do_force_create_app(App) ->
_ = maybe_cleanup_api_key(App),
ok = mnesia:write(App).
maybe_cleanup_api_key(#?APP{name = Name, api_key = ApiKey}) ->
case mnesia:match_object(?APP, #?APP{api_key = ApiKey, _ = '_'}, read) of
[] ->
NewName = generate_unique_name(NamePrefix),
ok = mnesia:write(App#?APP{name = NewName});
ok;
[#?APP{name = Name}] ->
ok = mnesia:write(App#?APP{name = Name})
?SLOG(debug, #{
msg => "same_apikey_detected",
info => <<"The last `KEY:SECRET` in bootstrap file will be used.">>
}),
ok;
[_App1] ->
?SLOG(info, #{
msg => "update_apikey_name_from_old_version",
info =>
<<"Update ApiKey name with new name rule, see also: ",
"https://github.com/emqx/emqx/pull/11798">>
}),
ok;
Existed ->
%% Duplicated or upgraded from old version:
%% Which `Name` and `ApiKey` are not related in old version.
%% So delete it/(them) and write a new record with a name strongly related to the apikey.
%% The apikeys generated from the file do not have names.
%% Generate a name for the apikey from the apikey itself by rule:
%% Use `from_bootstrap_file_` as the prefix, and the first 16 digits of the
%% sha512 hexadecimal value of the `ApiKey` as the suffix to form the name of the apikey.
%% e.g. The name of the apikey: `example-api-key:secret_xxxx` is `from_bootstrap_file_53280fb165b6cd37`
?SLOG(info, #{
msg => "duplicated_apikey_detected",
info => <<"Delete duplicated apikeys and write a new one from bootstrap file">>
}),
_ = lists:map(
fun(#?APP{name = N}) -> ok = mnesia:delete({?APP, N}) end, Existed
),
ok
end.
generate_unique_name(NamePrefix) ->
New = list_to_binary(NamePrefix ++ emqx_utils:gen_id(16)),
case mnesia:read(?APP, New) of
[] -> New;
_ -> generate_unique_name(NamePrefix)
end.
hash_string_from_seed(Seed, PrefixLen) ->
<<Integer:512>> = crypto:hash(sha512, Seed),
list_to_binary(string:slice(io_lib:format("~128.16.0b", [Integer]), 0, PrefixLen)).
%% Form Dashboard API Key pannel, only `Name` provided for users
generate_unique_api_key(Name) ->
hash_string_from_seed(Name, ?DEFAULT_HASH_LEN).
%% Form BootStrap File, only `ApiKey` provided from file, no `Name`
generate_unique_name(NamePrefix, ApiKey) ->
<<NamePrefix/binary, (hash_string_from_seed(ApiKey, ?DEFAULT_HASH_LEN))/binary>>.
trans(Fun, Args) ->
case mria:transaction(?COMMON_SHARD, Fun, Args) of
@ -300,22 +342,24 @@ init_bootstrap_file(File, Dev, MP) ->
end.
-define(BOOTSTRAP_TAG, <<"Bootstrapped From File">>).
-define(FROM_BOOTSTRAP_FILE_PREFIX, <<"from_bootstrap_file_">>).
add_bootstrap_file(File, Dev, MP, Line) ->
case file:read_line(Dev) of
{ok, Bin} ->
case re:run(Bin, MP, [global, {capture, all_but_first, binary}]) of
{match, [[AppKey, ApiSecret]]} ->
{match, [[ApiKey, ApiSecret]]} ->
App =
#?APP{
name = generate_unique_name(?FROM_BOOTSTRAP_FILE_PREFIX, ApiKey),
api_key = ApiKey,
api_secret_hash = emqx_dashboard_admin:hash(ApiSecret),
enable = true,
expired_at = infinity,
desc = ?BOOTSTRAP_TAG,
created_at = erlang:system_time(second),
api_secret_hash = emqx_dashboard_admin:hash(ApiSecret),
api_key = AppKey
expired_at = infinity
},
case force_create_app("from_bootstrap_file_", App) of
case force_create_app(App) of
{ok, ok} ->
add_bootstrap_file(File, Dev, MP, Line + 1);
{error, Reason} ->

View File

@ -61,6 +61,12 @@
<<"slow_subs">>
]).
%% emqx_bridge_v2 depends on emqx_connector, so connectors need to be imported first
-define(IMPORT_ORDER, [
emqx_connector,
emqx_bridge_v2
]).
-define(DEFAULT_OPTS, #{}).
-define(tar(_FileName_), _FileName_ ++ ?TAR_SUFFIX).
-define(fmt_tar_err(_Expr_),
@ -462,11 +468,12 @@ import_cluster_hocon(BackupDir, Opts) ->
case filelib:is_regular(HoconFileName) of
true ->
{ok, RawConf} = hocon:files([HoconFileName]),
{ok, _} = validate_cluster_hocon(RawConf),
RawConf1 = upgrade_raw_conf(emqx_conf:schema_module(), RawConf),
{ok, _} = validate_cluster_hocon(RawConf1),
maybe_print("Importing cluster configuration...~n", [], Opts),
%% At this point, when all validations have been passed, we want to log errors (if any)
%% but proceed with the next items, instead of aborting the whole import operation
do_import_conf(RawConf, Opts);
do_import_conf(RawConf1, Opts);
false ->
maybe_print("No cluster configuration to be imported.~n", [], Opts),
?SLOG(info, #{
@ -476,6 +483,16 @@ import_cluster_hocon(BackupDir, Opts) ->
#{}
end.
upgrade_raw_conf(SchemaMod, RawConf) ->
_ = SchemaMod:module_info(),
case erlang:function_exported(SchemaMod, upgrade_raw_conf, 1) of
true ->
%% TODO make it a schema module behaviour in hocon_schema
apply(SchemaMod, upgrade_raw_conf, [RawConf]);
false ->
RawConf
end.
read_data_files(RawConf) ->
DataDir = bin(emqx:data_dir()),
{ok, Cwd} = file:get_cwd(),
@ -523,7 +540,7 @@ do_import_conf(RawConf, Opts) ->
GenConfErrs = filter_errors(maps:from_list(import_generic_conf(RawConf))),
maybe_print_errors(GenConfErrs, Opts),
Errors =
lists:foldr(
lists:foldl(
fun(Module, ErrorsAcc) ->
case Module:import_config(RawConf) of
{ok, #{changed := Changed}} ->
@ -534,11 +551,27 @@ do_import_conf(RawConf, Opts) ->
end
end,
GenConfErrs,
find_behaviours(emqx_config_backup)
sort_importer_modules(find_behaviours(emqx_config_backup))
),
maybe_print_errors(Errors, Opts),
Errors.
sort_importer_modules(Modules) ->
lists:sort(
fun(M1, M2) -> order(M1, ?IMPORT_ORDER) =< order(M2, ?IMPORT_ORDER) end,
Modules
).
order(Elem, List) ->
order(Elem, List, 0).
order(_Elem, [], Order) ->
Order;
order(Elem, [Elem | _], Order) ->
Order;
order(Elem, [_ | T], Order) ->
order(Elem, T, Order + 1).
import_generic_conf(Data) ->
lists:map(
fun(Key) ->

View File

@ -19,6 +19,19 @@
-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-define(APP, emqx_app).
-record(?APP, {
name = <<>> :: binary() | '_',
api_key = <<>> :: binary() | '_',
api_secret_hash = <<>> :: binary() | '_',
enable = true :: boolean() | '_',
desc = <<>> :: binary() | '_',
expired_at = 0 :: integer() | undefined | infinity | '_',
created_at = 0 :: integer() | '_'
}).
all() -> [{group, parallel}, {group, sequence}].
suite() -> [{timetrap, {minutes, 1}}].
@ -72,6 +85,97 @@ t_bootstrap_file(_) ->
update_file(<<>>),
ok.
t_bootstrap_file_override(_) ->
TestPath = <<"/api/v5/status">>,
Bin =
<<"test-1:secret-1\ntest-1:duplicated-secret-1\ntest-2:secret-2\ntest-2:duplicated-secret-2">>,
File = "./bootstrap_api_keys.txt",
ok = file:write_file(File, Bin),
update_file(File),
?assertEqual(ok, emqx_mgmt_auth:init_bootstrap_file()),
MatchFun = fun(ApiKey) -> mnesia:match_object(#?APP{api_key = ApiKey, _ = '_'}) end,
?assertMatch(
{ok, [
#?APP{
name = <<"from_bootstrap_file_18926f94712af04e">>,
api_key = <<"test-1">>
}
]},
emqx_mgmt_auth:trans(MatchFun, [<<"test-1">>])
),
?assertEqual(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-1">>, <<"duplicated-secret-1">>)),
?assertMatch(
{ok, [
#?APP{
name = <<"from_bootstrap_file_de1c28a2e610e734">>,
api_key = <<"test-2">>
}
]},
emqx_mgmt_auth:trans(MatchFun, [<<"test-2">>])
),
?assertEqual(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-2">>, <<"duplicated-secret-2">>)),
ok.
t_bootstrap_file_dup_override(_) ->
TestPath = <<"/api/v5/status">>,
TestApiKey = <<"test-1">>,
Bin = <<"test-1:secret-1">>,
File = "./bootstrap_api_keys.txt",
ok = file:write_file(File, Bin),
update_file(File),
?assertEqual(ok, emqx_mgmt_auth:init_bootstrap_file()),
SameAppWithDiffName = #?APP{
name = <<"name-1">>,
api_key = <<"test-1">>,
api_secret_hash = emqx_dashboard_admin:hash(<<"duplicated-secret-1">>),
enable = true,
desc = <<"dup api key">>,
created_at = erlang:system_time(second),
expired_at = infinity
},
WriteFun = fun(App) -> mnesia:write(App) end,
MatchFun = fun(ApiKey) -> mnesia:match_object(#?APP{api_key = ApiKey, _ = '_'}) end,
?assertEqual({ok, ok}, emqx_mgmt_auth:trans(WriteFun, [SameAppWithDiffName])),
%% as erlang term order
?assertMatch(
{ok, [
#?APP{
name = <<"name-1">>,
api_key = <<"test-1">>
},
#?APP{
name = <<"from_bootstrap_file_18926f94712af04e">>,
api_key = <<"test-1">>
}
]},
emqx_mgmt_auth:trans(MatchFun, [TestApiKey])
),
update_file(File),
%% Similar to loading bootstrap file at node startup
%% the duplicated apikey in mnesia will be cleaned up
?assertEqual(ok, emqx_mgmt_auth:init_bootstrap_file()),
?assertMatch(
{ok, [
#?APP{
name = <<"from_bootstrap_file_18926f94712af04e">>,
api_key = <<"test-1">>
}
]},
emqx_mgmt_auth:trans(MatchFun, [<<"test-1">>])
),
%% the last apikey in bootstrap file will override the all in mnesia and the previous one(s) in bootstrap file
?assertEqual(ok, emqx_mgmt_auth:authorize(TestPath, <<"test-1">>, <<"secret-1">>)),
ok.
update_file(File) ->
?assertMatch({ok, _}, emqx:update_config([<<"api_key">>], #{<<"bootstrap_file">> => File})).

View File

@ -1,6 +1,6 @@
{application, emqx_opentelemetry, [
{description, "OpenTelemetry for EMQX Broker"},
{vsn, "0.1.2"},
{vsn, "0.1.3"},
{registered, []},
{mod, {emqx_otel_app, []}},
{applications, [kernel, stdlib, emqx]},

View File

@ -62,7 +62,7 @@ fields("exporter") ->
emqx_schema:url(),
#{
mapping => "opentelemetry_exporter.otlp_endpoint",
default => "http://localhost:4317",
default => <<"http://localhost:4317">>,
desc => ?DESC(endpoint)
}
)},

View File

@ -15,12 +15,14 @@
%%--------------------------------------------------------------------
-type resource_type() :: module().
-type resource_id() :: binary().
-type channel_id() :: binary().
-type raw_resource_config() :: binary() | raw_term_resource_config().
-type raw_term_resource_config() :: #{binary() => term()} | [raw_term_resource_config()].
-type resource_config() :: term().
-type resource_spec() :: map().
-type resource_state() :: term().
-type resource_status() :: connected | disconnected | connecting | stopped.
-type channel_status() :: connected | connecting.
-type callback_mode() :: always_sync | async_if_possible.
-type query_mode() ::
simple_sync
@ -43,7 +45,9 @@
expire_at => infinity | integer(),
async_reply_fun => reply_fun(),
simple_query => boolean(),
reply_to => reply_fun()
reply_to => reply_fun(),
query_mode => query_mode(),
query_mode_cache_override => boolean()
}.
-type resource_data() :: #{
id := resource_id(),
@ -53,7 +57,8 @@
config := resource_config(),
error := term(),
state := resource_state(),
status := resource_status()
status := resource_status(),
added_channels := term()
}.
-type resource_group() :: binary().
-type creation_opts() :: #{

View File

@ -1,30 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-define(SAFE_CALL(_EXP_),
?SAFE_CALL(_EXP_, {error, {_EXCLASS_, _EXCPTION_, _ST_}})
).
-define(SAFE_CALL(_EXP_, _EXP_ON_FAIL_),
fun() ->
try
(_EXP_)
catch
_EXCLASS_:_EXCPTION_:_ST_ ->
_EXP_ON_FAIL_
end
end()
).

View File

@ -17,7 +17,6 @@
-module(emqx_resource).
-include("emqx_resource.hrl").
-include("emqx_resource_utils.hrl").
-include("emqx_resource_errors.hrl").
-include_lib("emqx/include/logger.hrl").
@ -50,6 +49,8 @@
%% run start/2, health_check/2 and stop/1 sequentially
create_dry_run/2,
create_dry_run_local/2,
create_dry_run_local/3,
create_dry_run_local/4,
%% this will do create_dry_run, stop the old instance and start a new one
recreate/3,
recreate/4,
@ -59,11 +60,15 @@
remove/1,
remove_local/1,
reset_metrics/1,
reset_metrics_local/1
reset_metrics_local/1,
%% Create metrics for a resource ID
create_metrics/1,
%% Delete metrics for a resource ID
clear_metrics/1
]).
%% Calls to the callback module with current resource state
%% They also save the state after the call finished (except query/2,3).
%% They also save the state after the call finished (except call_get_channel_config/3).
-export([
start/1,
@ -72,6 +77,8 @@
restart/2,
%% verify if the resource is working normally
health_check/1,
channel_health_check/2,
get_channels/1,
%% set resource status to disconnected
set_resource_status_connecting/1,
%% stop the instance
@ -87,7 +94,9 @@
has_allocated_resources/1,
get_allocated_resources/1,
get_allocated_resources_list/1,
forget_allocated_resources/1
forget_allocated_resources/1,
%% Get channel config from resource
call_get_channel_config/3
]).
%% Direct calls to the callback module
@ -99,10 +108,18 @@
call_start/3,
%% verify if the resource is working normally
call_health_check/3,
%% verify if the resource channel is working normally
call_channel_health_check/4,
%% stop the instance
call_stop/3,
%% get the query mode of the resource
query_mode/3
query_mode/3,
%% Add channel to resource
call_add_channel/5,
%% Remove channel from resource
call_remove_channel/4,
%% Get channels from resource
call_get_channels/2
]).
%% list all the instances, id only.
@ -125,6 +142,7 @@
-export_type([
query_mode/0,
resource_id/0,
channel_id/0,
resource_data/0,
resource_status/0
]).
@ -135,6 +153,10 @@
on_query_async/4,
on_batch_query_async/4,
on_get_status/2,
on_get_channel_status/3,
on_add_channel/4,
on_remove_channel/3,
on_get_channels/1,
query_mode/1
]).
@ -176,8 +198,56 @@
| {resource_status(), resource_state()}
| {resource_status(), resource_state(), term()}.
-callback on_get_channel_status(resource_id(), channel_id(), resource_state()) ->
channel_status()
| {error, term()}.
-callback query_mode(Config :: term()) -> query_mode().
%% This callback handles the installation of a specified channel.
%%
%% If the channel cannot be successfully installed, the callback shall
%% throw an exception or return an error tuple.
-callback on_add_channel(
ResId :: term(), ResourceState :: term(), ChannelId :: binary(), ChannelConfig :: map()
) -> {ok, term()} | {error, term()}.
%% This callback handles the removal of a specified channel resource.
%%
%% It's guaranteed that the provided channel is installed when this
%% function is invoked. Upon successful deinstallation, the function should return
%% a new state
%%
%% If the channel cannot be successfully deinstalled, the callback should
%% log an error.
%%
-callback on_remove_channel(
ResId :: term(), ResourceState :: term(), ChannelId :: binary()
) -> {ok, NewState :: term()}.
%% This callback shall return a list of channel configs that are currently active
%% for the resource with the given id.
-callback on_get_channels(
ResId :: term()
) -> [term()].
-define(SAFE_CALL(EXPR),
(fun() ->
try
EXPR
catch
throw:Reason ->
{error, Reason};
C:E:S ->
{error, #{
execption => C,
reason => emqx_utils:redact(E),
stacktrace => emqx_utils:redact(S)
}}
end
end)()
).
-spec list_types() -> [module()].
list_types() ->
discover_resource_mods().
@ -234,6 +304,16 @@ create_dry_run(ResourceType, Config) ->
create_dry_run_local(ResourceType, Config) ->
emqx_resource_manager:create_dry_run(ResourceType, Config).
create_dry_run_local(ResId, ResourceType, Config) ->
emqx_resource_manager:create_dry_run(ResId, ResourceType, Config).
-spec create_dry_run_local(resource_id(), resource_type(), resource_config(), OnReadyCallback) ->
ok | {error, Reason :: term()}
when
OnReadyCallback :: fun((resource_id()) -> ok | {error, Reason :: term()}).
create_dry_run_local(ResId, ResourceType, Config, OnReadyCallback) ->
emqx_resource_manager:create_dry_run(ResId, ResourceType, Config, OnReadyCallback).
-spec recreate(resource_id(), resource_type(), resource_config()) ->
{ok, resource_data()} | {error, Reason :: term()}.
recreate(ResId, ResourceType, Config) ->
@ -273,8 +353,7 @@ remove_local(ResId) ->
resource_id => ResId
}),
ok
end,
ok.
end.
-spec reset_metrics_local(resource_id()) -> ok.
reset_metrics_local(ResId) ->
@ -292,46 +371,61 @@ query(ResId, Request) ->
-spec query(resource_id(), Request :: term(), query_opts()) ->
Result :: term().
query(ResId, Request, Opts) ->
case emqx_resource_manager:lookup_cached(ResId) of
{ok, _Group, #{query_mode := QM, error := Error}} ->
case {QM, Error} of
{_, unhealthy_target} ->
emqx_resource_metrics:matched_inc(ResId),
emqx_resource_metrics:dropped_resource_stopped_inc(ResId),
?RESOURCE_ERROR(unhealthy_target, "unhealthy target");
{_, {unhealthy_target, _Message}} ->
emqx_resource_metrics:matched_inc(ResId),
emqx_resource_metrics:dropped_resource_stopped_inc(ResId),
?RESOURCE_ERROR(unhealthy_target, "unhealthy target");
{simple_async, _} ->
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
%% so the buffer worker does not need to lookup the cache again
emqx_resource_buffer_worker:simple_async_query(ResId, Request, Opts);
{simple_sync, _} ->
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
%% so the buffer worker does not need to lookup the cache again
emqx_resource_buffer_worker:simple_sync_query(ResId, Request, Opts);
{simple_async_internal_buffer, _} ->
%% This is for bridges/connectors that have internal buffering, such
%% as Kafka and Pulsar producers.
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
%% so the buffer worker does not need to lookup the cache again
emqx_resource_buffer_worker:simple_async_query(ResId, Request, Opts);
{simple_sync_internal_buffer, _} ->
%% This is for bridges/connectors that have internal buffering, such
%% as Kafka and Pulsar producers.
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
%% so the buffer worker does not need to lookup the cache again
emqx_resource_buffer_worker:simple_sync_internal_buffer_query(
ResId, Request, Opts
);
{sync, _} ->
emqx_resource_buffer_worker:sync_query(ResId, Request, Opts);
{async, _} ->
emqx_resource_buffer_worker:async_query(ResId, Request, Opts)
case get_query_mode_error(ResId, Opts) of
{error, _} = ErrorTuple ->
ErrorTuple;
{_, unhealthy_target} ->
emqx_resource_metrics:matched_inc(ResId),
emqx_resource_metrics:dropped_resource_stopped_inc(ResId),
?RESOURCE_ERROR(unhealthy_target, "unhealthy target");
{_, {unhealthy_target, _Message}} ->
emqx_resource_metrics:matched_inc(ResId),
emqx_resource_metrics:dropped_resource_stopped_inc(ResId),
?RESOURCE_ERROR(unhealthy_target, "unhealthy target");
{simple_async, _} ->
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
%% so the buffer worker does not need to lookup the cache again
emqx_resource_buffer_worker:simple_async_query(ResId, Request, Opts);
{simple_sync, _} ->
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
%% so the buffer worker does not need to lookup the cache again
emqx_resource_buffer_worker:simple_sync_query(ResId, Request, Opts);
{simple_async_internal_buffer, _} ->
%% This is for bridges/connectors that have internal buffering, such
%% as Kafka and Pulsar producers.
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
%% so the buffer worker does not need to lookup the cache again
emqx_resource_buffer_worker:simple_async_query(ResId, Request, Opts);
{simple_sync_internal_buffer, _} ->
%% This is for bridges/connectors that have internal buffering, such
%% as Kafka and Pulsar producers.
%% TODO(5.1.1): pass Resource instead of ResId to simple APIs
%% so the buffer worker does not need to lookup the cache again
emqx_resource_buffer_worker:simple_sync_internal_buffer_query(
ResId, Request, Opts
);
{sync, _} ->
emqx_resource_buffer_worker:sync_query(ResId, Request, Opts);
{async, _} ->
emqx_resource_buffer_worker:async_query(ResId, Request, Opts)
end.
get_query_mode_error(ResId, Opts) ->
case maps:get(query_mode_cache_override, Opts, true) of
false ->
case Opts of
#{query_mode := QueryMode} ->
{QueryMode, ok};
_ ->
{async, unhealthy_target}
end;
{error, not_found} ->
?RESOURCE_ERROR(not_found, "resource not found")
true ->
case emqx_resource_manager:lookup_cached(ResId) of
{ok, _Group, #{query_mode := QM, error := Error}} ->
{QM, Error};
{error, not_found} ->
{error, not_found}
end
end.
-spec simple_sync_query(resource_id(), Request :: term()) -> Result :: term().
@ -362,6 +456,15 @@ stop(ResId) ->
health_check(ResId) ->
emqx_resource_manager:health_check(ResId).
-spec channel_health_check(resource_id(), channel_id()) ->
{ok, resource_status()} | {error, term()}.
channel_health_check(ResId, ChannelId) ->
emqx_resource_manager:channel_health_check(ResId, ChannelId).
-spec get_channels(resource_id()) -> {ok, [{binary(), map()}]} | {error, term()}.
get_channels(ResId) ->
emqx_resource_manager:get_channels(ResId).
set_resource_status_connecting(ResId) ->
emqx_resource_manager:set_resource_status_connecting(ResId).
@ -412,21 +515,14 @@ get_callback_mode(Mod) ->
-spec call_start(resource_id(), module(), resource_config()) ->
{ok, resource_state()} | {error, Reason :: term()}.
call_start(ResId, Mod, Config) ->
try
%% If the previous manager process crashed without cleaning up
%% allocated resources, clean them up.
clean_allocated_resources(ResId, Mod),
Mod:on_start(ResId, Config)
catch
throw:Error ->
{error, Error};
Kind:Error:Stacktrace ->
{error, #{
exception => Kind,
reason => Error,
stacktrace => emqx_utils:redact(Stacktrace)
}}
end.
?SAFE_CALL(
begin
%% If the previous manager process crashed without cleaning up
%% allocated resources, clean them up.
clean_allocated_resources(ResId, Mod),
Mod:on_start(ResId, Config)
end
).
-spec call_health_check(resource_id(), module(), resource_state()) ->
resource_status()
@ -436,6 +532,67 @@ call_start(ResId, Mod, Config) ->
call_health_check(ResId, Mod, ResourceState) ->
?SAFE_CALL(Mod:on_get_status(ResId, ResourceState)).
-spec call_channel_health_check(resource_id(), channel_id(), module(), resource_state()) ->
channel_status()
| {error, term()}.
call_channel_health_check(ResId, ChannelId, Mod, ResourceState) ->
?SAFE_CALL(Mod:on_get_channel_status(ResId, ChannelId, ResourceState)).
call_add_channel(ResId, Mod, ResourceState, ChannelId, ChannelConfig) ->
%% Check if on_add_channel is exported
case erlang:function_exported(Mod, on_add_channel, 4) of
true ->
?SAFE_CALL(
Mod:on_add_channel(
ResId, ResourceState, ChannelId, ChannelConfig
)
);
false ->
{error,
<<<<"on_add_channel callback function not available for connector with resource id ">>/binary,
ResId/binary>>}
end.
call_remove_channel(ResId, Mod, ResourceState, ChannelId) ->
%% Check if maybe_install_insert_template is exported
case erlang:function_exported(Mod, on_remove_channel, 3) of
true ->
?SAFE_CALL(
Mod:on_remove_channel(
ResId, ResourceState, ChannelId
)
);
false ->
{error,
<<<<"on_remove_channel callback function not available for connector with resource id ">>/binary,
ResId/binary>>}
end.
call_get_channels(ResId, Mod) ->
case erlang:function_exported(Mod, on_get_channels, 1) of
true ->
Mod:on_get_channels(ResId);
false ->
[]
end.
call_get_channel_config(ResId, ChannelId, Mod) ->
case erlang:function_exported(Mod, on_get_channels, 1) of
true ->
ChConfigs = Mod:on_get_channels(ResId),
case [Conf || {ChId, Conf} <- ChConfigs, ChId =:= ChannelId] of
[ChannelConf] ->
ChannelConf;
_ ->
{error,
<<"Channel ", ChannelId/binary,
"not found. There seems to be a broken reference">>}
end;
false ->
{error,
<<"on_get_channels callback function not available for resource id", ResId/binary>>}
end.
-spec call_stop(resource_id(), module(), resource_state()) -> term().
call_stop(ResId, Mod, ResourceState) ->
?SAFE_CALL(begin
@ -575,6 +732,33 @@ forget_allocated_resources(InstanceId) ->
true = ets:delete(?RESOURCE_ALLOCATION_TAB, InstanceId),
ok.
-spec create_metrics(resource_id()) -> ok.
create_metrics(ResId) ->
emqx_metrics_worker:create_metrics(
?RES_METRICS,
ResId,
[
'matched',
'retried',
'retried.success',
'retried.failed',
'success',
'late_reply',
'failed',
'dropped',
'dropped.expired',
'dropped.queue_full',
'dropped.resource_not_found',
'dropped.resource_stopped',
'dropped.other',
'received'
],
[matched]
).
-spec clear_metrics(resource_id()) -> ok.
clear_metrics(ResId) ->
emqx_metrics_worker:clear_metrics(?RES_METRICS, ResId).
%% =================================================================================
filter_instances(Filter) ->

View File

@ -1076,7 +1076,7 @@ handle_async_worker_down(Data0, Pid) ->
-spec call_query(force_sync | async_if_possible, _, _, _, _, _) -> _.
call_query(QM, Id, Index, Ref, Query, QueryOpts) ->
?tp(call_query_enter, #{id => Id, query => Query, query_mode => QM}),
case emqx_resource_manager:lookup_cached(Id) of
case emqx_resource_manager:lookup_cached(extract_connector_id(Id)) of
{ok, _Group, #{status := stopped}} ->
?RESOURCE_ERROR(stopped, "resource stopped or disabled");
{ok, _Group, #{status := connecting, error := unhealthy_target}} ->
@ -1087,20 +1087,65 @@ call_query(QM, Id, Index, Ref, Query, QueryOpts) ->
?RESOURCE_ERROR(not_found, "resource not found")
end.
%% bridge_v2:kafka_producer:myproducer1:connector:kafka_producer:mykakfaclient1
extract_connector_id(Id) when is_binary(Id) ->
case binary:split(Id, <<":">>, [global]) of
[
_ChannelGlobalType,
_ChannelSubType,
_ChannelName,
<<"connector">>,
ConnectorType,
ConnectorName
] ->
<<"connector:", ConnectorType/binary, ":", ConnectorName/binary>>;
_ ->
Id
end;
extract_connector_id(Id) ->
Id.
is_channel_id(Id) ->
extract_connector_id(Id) =/= Id.
%% Check if channel is installed in the connector state.
%% There is no need to query the conncector if the channel is not
%% installed as the query will fail anyway.
pre_query_channel_check({Id, _} = _Request, Channels) when
is_map_key(Id, Channels),
(map_get(Id, Channels) =:= connected orelse map_get(Id, Channels) =:= connecting)
->
ok;
pre_query_channel_check({Id, _} = _Request, _Channels) ->
%% Fail with a recoverable error if the channel is not installed
%% so that the operation can be retried. It is emqx_resource_manager's
%% responsibility to ensure that the channel installation is retried.
case is_channel_id(Id) of
true ->
error(
{recoverable_error,
iolist_to_binary(io_lib:format("channel: \"~s\" not operational", [Id]))}
);
false ->
ok
end;
pre_query_channel_check(_Request, _Channels) ->
ok.
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, #{query_mode := ResQM} = Resource) when
ResQM =:= simple_sync_internal_buffer; ResQM =:= simple_async_internal_buffer
->
%% The connector supports buffer, send even in disconnected state
#{mod := Mod, state := ResSt, callback_mode := CBM} = Resource,
#{mod := Mod, state := ResSt, callback_mode := CBM, added_channels := Channels} = Resource,
CallMode = call_mode(QM, CBM),
apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, QueryOpts);
apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, Channels, QueryOpts);
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, #{status := connected} = Resource) ->
%% when calling from the buffer worker or other simple queries,
%% only apply the query fun when it's at connected status
#{mod := Mod, state := ResSt, callback_mode := CBM} = Resource,
#{mod := Mod, state := ResSt, callback_mode := CBM, added_channels := Channels} = Resource,
CallMode = call_mode(QM, CBM),
apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, QueryOpts);
do_call_query(_QM, _Id, _Index, _Ref, _Query, _QueryOpts, _Resource) ->
apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, Channels, QueryOpts);
do_call_query(_QM, _Id, _Index, _Ref, _Query, _QueryOpts, _Data) ->
?RESOURCE_ERROR(not_connected, "resource not connected").
-define(APPLY_RESOURCE(NAME, EXPR, REQ),
@ -1131,14 +1176,23 @@ do_call_query(_QM, _Id, _Index, _Ref, _Query, _QueryOpts, _Resource) ->
).
apply_query_fun(
sync, Mod, Id, _Index, _Ref, ?QUERY(_, Request, _, _) = _Query, ResSt, QueryOpts
sync, Mod, Id, _Index, _Ref, ?QUERY(_, Request, _, _) = _Query, ResSt, Channels, QueryOpts
) ->
?tp(call_query, #{id => Id, mod => Mod, query => _Query, res_st => ResSt, call_mode => sync}),
maybe_reply_to(
?APPLY_RESOURCE(call_query, Mod:on_query(Id, Request, ResSt), Request),
?APPLY_RESOURCE(
call_query,
begin
pre_query_channel_check(Request, Channels),
Mod:on_query(extract_connector_id(Id), Request, ResSt)
end,
Request
),
QueryOpts
);
apply_query_fun(async, Mod, Id, Index, Ref, ?QUERY(_, Request, _, _) = Query, ResSt, QueryOpts) ->
apply_query_fun(
async, Mod, Id, Index, Ref, ?QUERY(_, Request, _, _) = Query, ResSt, Channels, QueryOpts
) ->
?tp(call_query_async, #{
id => Id, mod => Mod, query => Query, res_st => ResSt, call_mode => async
}),
@ -1160,23 +1214,51 @@ apply_query_fun(async, Mod, Id, Index, Ref, ?QUERY(_, Request, _, _) = Query, Re
AsyncWorkerMRef = undefined,
InflightItem = ?INFLIGHT_ITEM(Ref, Query, IsRetriable, AsyncWorkerMRef),
ok = inflight_append(InflightTID, InflightItem),
Result = Mod:on_query_async(Id, Request, {ReplyFun, [ReplyContext]}, ResSt),
pre_query_channel_check(Request, Channels),
Result = Mod:on_query_async(
extract_connector_id(Id), Request, {ReplyFun, [ReplyContext]}, ResSt
),
{async_return, Result}
end,
Request
);
apply_query_fun(
sync, Mod, Id, _Index, _Ref, [?QUERY(_, _, _, _) | _] = Batch, ResSt, QueryOpts
sync,
Mod,
Id,
_Index,
_Ref,
[?QUERY(_, FirstRequest, _, _) | _] = Batch,
ResSt,
Channels,
QueryOpts
) ->
?tp(call_batch_query, #{
id => Id, mod => Mod, batch => Batch, res_st => ResSt, call_mode => sync
}),
Requests = lists:map(fun(?QUERY(_ReplyTo, Request, _, _ExpireAt)) -> Request end, Batch),
maybe_reply_to(
?APPLY_RESOURCE(call_batch_query, Mod:on_batch_query(Id, Requests, ResSt), Batch),
?APPLY_RESOURCE(
call_batch_query,
begin
pre_query_channel_check(FirstRequest, Channels),
Mod:on_batch_query(extract_connector_id(Id), Requests, ResSt)
end,
Batch
),
QueryOpts
);
apply_query_fun(async, Mod, Id, Index, Ref, [?QUERY(_, _, _, _) | _] = Batch, ResSt, QueryOpts) ->
apply_query_fun(
async,
Mod,
Id,
Index,
Ref,
[?QUERY(_, FirstRequest, _, _) | _] = Batch,
ResSt,
Channels,
QueryOpts
) ->
?tp(call_batch_query_async, #{
id => Id, mod => Mod, batch => Batch, res_st => ResSt, call_mode => async
}),
@ -1201,7 +1283,10 @@ apply_query_fun(async, Mod, Id, Index, Ref, [?QUERY(_, _, _, _) | _] = Batch, Re
AsyncWorkerMRef = undefined,
InflightItem = ?INFLIGHT_ITEM(Ref, Batch, IsRetriable, AsyncWorkerMRef),
ok = inflight_append(InflightTID, InflightItem),
Result = Mod:on_batch_query_async(Id, Requests, {ReplyFun, [ReplyContext]}, ResSt),
pre_query_channel_check(FirstRequest, Channels),
Result = Mod:on_batch_query_async(
extract_connector_id(Id), Requests, {ReplyFun, [ReplyContext]}, ResSt
),
{async_return, Result}
end,
Batch

View File

@ -26,10 +26,16 @@
recreate/4,
remove/1,
create_dry_run/2,
create_dry_run/3,
create_dry_run/4,
restart/2,
start/2,
stop/1,
health_check/1
health_check/1,
channel_health_check/2,
add_channel/3,
remove_channel/2,
get_channels/1
]).
-export([
@ -64,6 +70,7 @@
state,
error,
pid,
added_channels,
extra
}).
-type data() :: #data{}.
@ -123,27 +130,8 @@ create_and_return_data(ResId, Group, ResourceType, Config, Opts) ->
create(ResId, Group, ResourceType, Config, Opts) ->
% The state machine will make the actual call to the callback/resource module after init
ok = emqx_resource_manager_sup:ensure_child(ResId, Group, ResourceType, Config, Opts),
ok = emqx_metrics_worker:create_metrics(
?RES_METRICS,
ResId,
[
'matched',
'retried',
'retried.success',
'retried.failed',
'success',
'late_reply',
'failed',
'dropped',
'dropped.expired',
'dropped.queue_full',
'dropped.resource_not_found',
'dropped.resource_stopped',
'dropped.other',
'received'
],
[matched]
),
% Create metrics for the resource
ok = emqx_resource:create_metrics(ResId),
QueryMode = emqx_resource:query_mode(ResourceType, Config, Opts),
case QueryMode of
%% the resource has built-in buffer, so there is no need for resource workers
@ -173,6 +161,19 @@ create(ResId, Group, ResourceType, Config, Opts) ->
ok | {error, Reason :: term()}.
create_dry_run(ResourceType, Config) ->
ResId = make_test_id(),
create_dry_run(ResId, ResourceType, Config).
create_dry_run(ResId, ResourceType, Config) ->
create_dry_run(ResId, ResourceType, Config, fun do_nothing_on_ready/1).
do_nothing_on_ready(_ResId) ->
ok.
-spec create_dry_run(resource_id(), resource_type(), resource_config(), OnReadyCallback) ->
ok | {error, Reason :: term()}
when
OnReadyCallback :: fun((resource_id()) -> ok | {error, Reason :: term()}).
create_dry_run(ResId, ResourceType, Config, OnReadyCallback) ->
Opts =
case is_map(Config) of
true -> maps:get(resource_opts, Config, #{});
@ -183,7 +184,19 @@ create_dry_run(ResourceType, Config) ->
Timeout = emqx_utils:clamp(HealthCheckInterval, 5_000, 60_000),
case wait_for_ready(ResId, Timeout) of
ok ->
remove(ResId);
CallbackResult =
try
OnReadyCallback(ResId)
catch
_:CallbackReason ->
{error, CallbackReason}
end,
case remove(ResId) of
ok ->
CallbackResult;
{error, _} = Error ->
Error
end;
{error, Reason} ->
_ = remove(ResId),
{error, Reason};
@ -292,6 +305,23 @@ list_group(Group) ->
health_check(ResId) ->
safe_call(ResId, health_check, ?T_OPERATION).
-spec channel_health_check(resource_id(), channel_id()) ->
{ok, resource_status()} | {error, term()}.
channel_health_check(ResId, ChannelId) ->
%% Do normal health check first to trigger health checks for channels
%% and update the cached health status for the channels
_ = health_check(ResId),
safe_call(ResId, {channel_health_check, ChannelId}, ?T_OPERATION).
add_channel(ResId, ChannelId, Config) ->
safe_call(ResId, {add_channel, ChannelId, Config}, ?T_OPERATION).
remove_channel(ResId, ChannelId) ->
safe_call(ResId, {remove_channel, ChannelId}, ?T_OPERATION).
get_channels(ResId) ->
safe_call(ResId, get_channels, ?T_OPERATION).
%% Server start/stop callbacks
%% @doc Function called from the supervisor to actually start the server
@ -310,7 +340,8 @@ start_link(ResId, Group, ResourceType, Config, Opts) ->
config = Config,
opts = Opts,
state = undefined,
error = undefined
error = undefined,
added_channels = #{}
},
gen_statem:start_link(?REF(ResId), ?MODULE, {Data, Opts}, []).
@ -374,8 +405,13 @@ handle_event({call, From}, lookup, _State, #data{group = Group} = Data) ->
handle_event({call, From}, health_check, stopped, _Data) ->
Actions = [{reply, From, {error, resource_is_stopped}}],
{keep_state_and_data, Actions};
handle_event({call, From}, {channel_health_check, _}, stopped, _Data) ->
Actions = [{reply, From, {error, resource_is_stopped}}],
{keep_state_and_data, Actions};
handle_event({call, From}, health_check, _State, Data) ->
handle_manually_health_check(From, Data);
handle_event({call, From}, {channel_health_check, ChannelId}, _State, Data) ->
handle_manually_channel_health_check(From, Data, ChannelId);
% State: CONNECTING
handle_event(enter, _OldState, connecting = State, Data) ->
ok = log_state_consistency(State, Data),
@ -394,6 +430,14 @@ handle_event(enter, _OldState, connected = State, Data) ->
{keep_state_and_data, health_check_actions(Data)};
handle_event(state_timeout, health_check, connected, Data) ->
handle_connected_health_check(Data);
handle_event(
{call, From}, {add_channel, ChannelId, Config}, connected = _State, Data
) ->
handle_add_channel(From, Data, ChannelId, Config);
handle_event(
{call, From}, {remove_channel, ChannelId}, connected = _State, Data
) ->
handle_remove_channel(From, ChannelId, Data);
%% State: DISCONNECTED
handle_event(enter, _OldState, disconnected = State, Data) ->
ok = log_state_consistency(State, Data),
@ -407,6 +451,20 @@ handle_event(state_timeout, auto_retry, disconnected, Data) ->
handle_event(enter, _OldState, stopped = State, Data) ->
ok = log_state_consistency(State, Data),
{keep_state_and_data, []};
%% The following events can be handled in any other state
handle_event(
{call, From}, {add_channel, ChannelId, _Config}, State, Data
) ->
handle_not_connected_add_channel(From, ChannelId, State, Data);
handle_event(
{call, From}, {remove_channel, ChannelId}, _State, Data
) ->
handle_not_connected_remove_channel(From, ChannelId, Data);
handle_event(
{call, From}, get_channels, _State, Data
) ->
Channels = emqx_resource:call_get_channels(Data#data.id, Data#data.mod),
{keep_state_and_data, {reply, From, {ok, Channels}}};
% Ignore all other events
handle_event(EventType, EventData, State, Data) ->
?SLOG(
@ -483,10 +541,11 @@ start_resource(Data, From) ->
%% in case the emqx_resource:call_start/2 hangs, the lookup/1 can read status from the cache
case emqx_resource:call_start(Data#data.id, Data#data.mod, Data#data.config) of
{ok, ResourceState} ->
UpdatedData = Data#data{status = connecting, state = ResourceState},
UpdatedData1 = Data#data{status = connecting, state = ResourceState},
%% Perform an initial health_check immediately before transitioning into a connected state
UpdatedData2 = add_channels(UpdatedData1),
Actions = maybe_reply([{state_timeout, 0, health_check}], From, ok),
{next_state, connecting, update_state(UpdatedData, Data), Actions};
{next_state, connecting, update_state(UpdatedData2, Data), Actions};
{error, Reason} = Err ->
?SLOG(warning, #{
msg => "start_resource_failed",
@ -494,11 +553,63 @@ start_resource(Data, From) ->
reason => Reason
}),
_ = maybe_alarm(disconnected, Data#data.id, Err, Data#data.error),
%% Add channels and raise alarms
NewData1 = channels_health_check(disconnected, add_channels(Data)),
%% Keep track of the error reason why the connection did not work
%% so that the Reason can be returned when the verification call is made.
UpdatedData = Data#data{status = disconnected, error = Err},
Actions = maybe_reply(retry_actions(UpdatedData), From, Err),
{next_state, disconnected, update_state(UpdatedData, Data), Actions}
NewData2 = NewData1#data{status = disconnected, error = Err},
Actions = maybe_reply(retry_actions(NewData2), From, Err),
{next_state, disconnected, update_state(NewData2, Data), Actions}
end.
add_channels(Data) ->
%% Add channels to the Channels map but not to the resource state
%% Channels will be added to the resouce state after the initial health_check
%% if that succeeds.
ChannelIDConfigTuples = emqx_resource:call_get_channels(Data#data.id, Data#data.mod),
Channels = Data#data.added_channels,
NewChannels = lists:foldl(
fun({ChannelID, _Conf}, Acc) ->
maps:put(ChannelID, {error, connecting}, Acc)
end,
Channels,
ChannelIDConfigTuples
),
Data#data{added_channels = NewChannels}.
add_channels_in_list([], Data) ->
Data;
add_channels_in_list([{ChannelID, ChannelConfig} | Rest], Data) ->
case
emqx_resource:call_add_channel(
Data#data.id, Data#data.mod, Data#data.state, ChannelID, ChannelConfig
)
of
{ok, NewState} ->
AddedChannelsMap = Data#data.added_channels,
%% Set the channel status to connecting to indicate that
%% we have not yet performed the initial health_check
NewAddedChannelsMap = maps:put(ChannelID, connecting, AddedChannelsMap),
NewData = Data#data{
state = NewState,
added_channels = NewAddedChannelsMap
},
add_channels_in_list(Rest, NewData);
{error, Reason} = Error ->
?SLOG(warning, #{
msg => add_channel_failed,
id => Data#data.id,
channel_id => ChannelID,
reason => Reason
}),
AddedChannelsMap = Data#data.added_channels,
NewAddedChannelsMap = maps:put(ChannelID, Error, AddedChannelsMap),
NewData = Data#data{
added_channels = NewAddedChannelsMap
},
%% Raise an alarm since the channel could not be added
_ = maybe_alarm(disconnected, ChannelID, Error, no_prev_error),
add_channels_in_list(Rest, NewData)
end.
maybe_stop_resource(#data{status = Status} = Data) when Status /= stopped ->
@ -511,40 +622,210 @@ stop_resource(#data{state = ResState, id = ResId} = Data) ->
%% The callback mod should make sure the resource is stopped after on_stop/2
%% is returned.
HasAllocatedResources = emqx_resource:has_allocated_resources(ResId),
%% Before stop is called we remove all the channels from the resource
NewData = remove_channels(Data),
case ResState =/= undefined orelse HasAllocatedResources of
true ->
%% we clear the allocated resources after stop is successful
emqx_resource:call_stop(Data#data.id, Data#data.mod, ResState);
emqx_resource:call_stop(NewData#data.id, NewData#data.mod, ResState);
false ->
ok
end,
_ = maybe_clear_alarm(ResId),
ok = emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId),
Data#data{status = stopped}.
NewData#data{status = stopped}.
remove_channels(Data) ->
Channels = maps:keys(Data#data.added_channels),
remove_channels_in_list(Channels, Data, false).
remove_channels_in_list([], Data, _KeepInChannelMap) ->
Data;
remove_channels_in_list([ChannelID | Rest], Data, KeepInChannelMap) ->
AddedChannelsMap = Data#data.added_channels,
NewAddedChannelsMap =
case KeepInChannelMap of
true ->
AddedChannelsMap;
false ->
maybe_clear_alarm(ChannelID),
maps:remove(ChannelID, AddedChannelsMap)
end,
case safe_call_remove_channel(Data#data.id, Data#data.mod, Data#data.state, ChannelID) of
{ok, NewState} ->
NewData = Data#data{
state = NewState,
added_channels = NewAddedChannelsMap
},
remove_channels_in_list(Rest, NewData, KeepInChannelMap);
{error, Reason} ->
?SLOG(warning, #{
msg => remove_channel_failed,
id => Data#data.id,
channel_id => ChannelID,
reason => Reason
}),
NewData = Data#data{
added_channels = NewAddedChannelsMap
},
remove_channels_in_list(Rest, NewData, KeepInChannelMap)
end.
safe_call_remove_channel(_ResId, _Mod, undefined = State, _ChannelID) ->
{ok, State};
safe_call_remove_channel(ResId, Mod, State, ChannelID) ->
emqx_resource:call_remove_channel(ResId, Mod, State, ChannelID).
make_test_id() ->
RandId = iolist_to_binary(emqx_utils:gen_id(16)),
<<?TEST_ID_PREFIX, RandId/binary>>.
handle_add_channel(From, Data, ChannelId, ChannelConfig) ->
Channels = Data#data.added_channels,
case maps:get(ChannelId, Channels, {error, not_added}) of
{error, _Reason} ->
%% The channel is not installed in the connector state
%% We need to install it
handle_add_channel_need_insert(From, Data, ChannelId, Data, ChannelConfig);
_ ->
%% The channel is already installed in the connector state
%% We don't need to install it again
{keep_state_and_data, [{reply, From, ok}]}
end.
handle_add_channel_need_insert(From, Data, ChannelId, Data, ChannelConfig) ->
NewData = add_channel_need_insert_update_data(Data, ChannelId, ChannelConfig),
%% Trigger a health check to raise alarm if channel is not healthy
{keep_state, NewData, [{reply, From, ok}, {state_timeout, 0, health_check}]}.
add_channel_need_insert_update_data(Data, ChannelId, ChannelConfig) ->
case
emqx_resource:call_add_channel(
Data#data.id, Data#data.mod, Data#data.state, ChannelId, ChannelConfig
)
of
{ok, NewState} ->
AddedChannelsMap = Data#data.added_channels,
%% Setting channel status to connecting to indicate that an health check
%% has not been performed yet
NewAddedChannelsMap = maps:put(ChannelId, connecting, AddedChannelsMap),
UpdatedData = Data#data{
state = NewState,
added_channels = NewAddedChannelsMap
},
update_state(UpdatedData, Data);
{error, _Reason} = Error ->
ChannelsMap = Data#data.added_channels,
NewChannelsMap = maps:put(ChannelId, Error, ChannelsMap),
UpdatedData = Data#data{
added_channels = NewChannelsMap
},
update_state(UpdatedData, Data)
end.
handle_not_connected_add_channel(From, ChannelId, State, Data) ->
%% When state is not connected the channel will be added to the channels
%% map but nothing else will happen.
Channels = Data#data.added_channels,
NewChannels = maps:put(ChannelId, {error, resource_not_operational}, Channels),
NewData1 = Data#data{added_channels = NewChannels},
%% Do channel health check to trigger alarm
NewData2 = channels_health_check(State, NewData1),
{keep_state, update_state(NewData2, Data), [{reply, From, ok}]}.
handle_remove_channel(From, ChannelId, Data) ->
Channels = Data#data.added_channels,
%% Deactivate alarm
_ = maybe_clear_alarm(ChannelId),
case maps:get(ChannelId, Channels, {error, not_added}) of
{error, _} ->
%% The channel is already not installed in the connector state.
%% We still need to remove it from the added_channels map
AddedChannels = Data#data.added_channels,
NewAddedChannels = maps:remove(ChannelId, AddedChannels),
NewData = Data#data{
added_channels = NewAddedChannels
},
{keep_state, NewData, [{reply, From, ok}]};
_ ->
%% The channel is installed in the connector state
handle_remove_channel_exists(From, ChannelId, Data)
end.
handle_remove_channel_exists(From, ChannelId, Data) ->
case
emqx_resource:call_remove_channel(
Data#data.id, Data#data.mod, Data#data.state, ChannelId
)
of
{ok, NewState} ->
AddedChannelsMap = Data#data.added_channels,
NewAddedChannelsMap = maps:remove(ChannelId, AddedChannelsMap),
UpdatedData = Data#data{
state = NewState,
added_channels = NewAddedChannelsMap
},
{keep_state, update_state(UpdatedData, Data), [{reply, From, ok}]};
{error, Reason} = Error ->
%% Log the error as a warning
?SLOG(warning, #{
msg => remove_channel_failed,
id => Data#data.id,
channel_id => ChannelId,
reason => Reason
}),
{keep_state_and_data, [{reply, From, Error}]}
end.
handle_not_connected_remove_channel(From, ChannelId, Data) ->
%% When state is not connected the channel will be removed from the channels
%% map but nothing else will happen.
Channels = Data#data.added_channels,
NewChannels = maps:remove(ChannelId, Channels),
NewData = Data#data{added_channels = NewChannels},
_ = maybe_clear_alarm(ChannelId),
{keep_state, update_state(NewData, Data), [{reply, From, ok}]}.
handle_manually_health_check(From, Data) ->
with_health_check(
Data,
fun(Status, UpdatedData) ->
Actions = [{reply, From, {ok, Status}}],
{next_state, Status, UpdatedData, Actions}
{next_state, Status, channels_health_check(Status, UpdatedData), Actions}
end
).
handle_manually_channel_health_check(From, #data{state = undefined}, _ChannelId) ->
{keep_state_and_data, [{reply, From, {ok, disconnected}}]};
handle_manually_channel_health_check(
From,
#data{added_channels = Channels} = _Data,
ChannelId
) when
is_map_key(ChannelId, Channels)
->
{keep_state_and_data, [{reply, From, maps:get(ChannelId, Channels)}]};
handle_manually_channel_health_check(
From,
_Data,
_ChannelId
) ->
{keep_state_and_data, [{reply, From, {error, channel_not_found}}]}.
get_channel_status_channel_added(#data{id = ResId, mod = Mod, state = State}, ChannelId) ->
emqx_resource:call_channel_health_check(ResId, ChannelId, Mod, State).
handle_connecting_health_check(Data) ->
with_health_check(
Data,
fun
(connected, UpdatedData) ->
{next_state, connected, UpdatedData};
{next_state, connected, channels_health_check(connected, UpdatedData)};
(connecting, UpdatedData) ->
{keep_state, UpdatedData, health_check_actions(UpdatedData)};
{keep_state, channels_health_check(connecting, UpdatedData),
health_check_actions(UpdatedData)};
(disconnected, UpdatedData) ->
{next_state, disconnected, UpdatedData}
{next_state, disconnected, channels_health_check(disconnected, UpdatedData)}
end
).
@ -553,14 +834,15 @@ handle_connected_health_check(Data) ->
Data,
fun
(connected, UpdatedData) ->
{keep_state, UpdatedData, health_check_actions(UpdatedData)};
{keep_state, channels_health_check(connected, UpdatedData),
health_check_actions(UpdatedData)};
(Status, UpdatedData) ->
?SLOG(warning, #{
msg => "health_check_failed",
id => Data#data.id,
status => Status
}),
{next_state, Status, UpdatedData}
{next_state, Status, channels_health_check(Status, UpdatedData)}
end
).
@ -577,6 +859,126 @@ with_health_check(#data{error = PrevError} = Data, Func) ->
},
Func(Status, update_state(UpdatedData, Data)).
channels_health_check(connected = _ResourceStatus, Data0) ->
Channels = maps:to_list(Data0#data.added_channels),
%% All channels with an error status are considered not added
ChannelsNotAdded = [
ChannelId
|| {ChannelId, Status} <- Channels,
not is_channel_added(Status)
],
%% Attempt to add channels that are not added
ChannelsNotAddedWithConfigs = get_config_for_channels(Data0, ChannelsNotAdded),
Data1 = add_channels_in_list(ChannelsNotAddedWithConfigs, Data0),
%% Now that we have done the adding, we can get the status of all channels
Data2 = channel_status_for_all_channels(Data1),
update_state(Data2, Data0);
channels_health_check(ResourceStatus, Data0) ->
%% Whenever the resource is not connected:
%% 1. Remove all added channels
%% 2. Change the status to an error status
%% 3. Raise alarms
Channels = Data0#data.added_channels,
ChannelsToRemove = [
ChannelId
|| {ChannelId, Status} <- maps:to_list(Channels),
is_channel_added(Status)
],
Data1 = remove_channels_in_list(ChannelsToRemove, Data0, true),
ChannelsWithNewAndOldStatuses =
[
{ChannelId, OldStatus,
{error, resource_not_connected_channel_error_msg(ResourceStatus, ChannelId, Data1)}}
|| {ChannelId, OldStatus} <- maps:to_list(Data1#data.added_channels)
],
%% Raise alarms
_ = lists:foreach(
fun({ChannelId, OldStatus, NewStatus}) ->
_ = maybe_alarm(NewStatus, ChannelId, NewStatus, OldStatus)
end,
ChannelsWithNewAndOldStatuses
),
%% Update the channels map
NewChannels = lists:foldl(
fun({ChannelId, _, NewStatus}, Acc) ->
maps:put(ChannelId, NewStatus, Acc)
end,
Channels,
ChannelsWithNewAndOldStatuses
),
Data2 = Data1#data{added_channels = NewChannels},
update_state(Data2, Data0).
resource_not_connected_channel_error_msg(ResourceStatus, ChannelId, Data1) ->
ResourceId = Data1#data.id,
iolist_to_binary(
io_lib:format(
"Resource ~s for channel ~s is not connected. "
"Resource status: ~p",
[
ResourceId,
ChannelId,
ResourceStatus
]
)
).
channel_status_for_all_channels(Data) ->
Channels = maps:to_list(Data#data.added_channels),
AddedChannelsWithOldAndNewStatus = [
{ChannelId, OldStatus, get_channel_status_channel_added(Data, ChannelId)}
|| {ChannelId, OldStatus} <- Channels,
is_channel_added(OldStatus)
],
%% Remove the added channels with a new error statuses
ChannelsToRemove = [
ChannelId
|| {ChannelId, _, {error, _}} <- AddedChannelsWithOldAndNewStatus
],
Data1 = remove_channels_in_list(ChannelsToRemove, Data, true),
%% Raise/clear alarms
lists:foreach(
fun
({ID, _OldStatus, connected}) ->
_ = maybe_clear_alarm(ID);
({ID, OldStatus, NewStatus}) ->
_ = maybe_alarm(NewStatus, ID, NewStatus, OldStatus)
end,
AddedChannelsWithOldAndNewStatus
),
%% Update the ChannelsMap
ChannelsMap = Data1#data.added_channels,
NewChannelsMap =
lists:foldl(
fun({ChannelId, _, NewStatus}, Acc) ->
maps:put(ChannelId, NewStatus, Acc)
end,
ChannelsMap,
AddedChannelsWithOldAndNewStatus
),
Data1#data{added_channels = NewChannelsMap}.
is_channel_added({error, _}) ->
false;
is_channel_added(_) ->
true.
get_config_for_channels(Data0, ChannelsWithoutConfig) ->
ResId = Data0#data.id,
Mod = Data0#data.mod,
Channels = emqx_resource:call_get_channels(ResId, Mod),
ChannelIdToConfig = maps:from_list(Channels),
ChannelsWithConfig = [
{Id, maps:get(Id, ChannelIdToConfig, no_config)}
|| Id <- ChannelsWithoutConfig
],
%% Filter out channels without config
[
ChConf
|| {_Id, Conf} = ChConf <- ChannelsWithConfig,
Conf =/= no_config
].
update_state(Data) ->
update_state(Data, undefined).
@ -600,7 +1002,8 @@ maybe_alarm(_Status, ResId, Error, _PrevError) ->
HrError =
case Error of
{error, undefined} -> <<"Unknown reason">>;
{error, Reason} -> emqx_utils:readable_error_msg(Reason)
{error, Reason} -> emqx_utils:readable_error_msg(Reason);
Error -> emqx_utils:readable_error_msg(Error)
end,
emqx_alarm:safe_activate(
ResId,
@ -663,7 +1066,8 @@ data_record_to_external_map(Data) ->
query_mode => Data#data.query_mode,
config => Data#data.config,
status => Data#data.status,
state => Data#data.state
state => Data#data.state,
added_channels => Data#data.added_channels
}.
-spec wait_for_ready(resource_id(), integer()) -> ok | timeout | {error, term()}.

View File

@ -26,7 +26,14 @@
-export([init/1]).
ensure_child(ResId, Group, ResourceType, Config, Opts) ->
_ = supervisor:start_child(?MODULE, child_spec(ResId, Group, ResourceType, Config, Opts)),
case supervisor:start_child(?MODULE, child_spec(ResId, Group, ResourceType, Config, Opts)) of
{error, Reason} ->
%% This should not happen in production but it can be a huge time sink in
%% development environments if the error is just silently ignored.
error(Reason);
_ ->
ok
end,
ok.
delete_child(ResId) ->

View File

@ -167,7 +167,7 @@ t_create_remove_local(_) ->
?assertMatch(ok, emqx_resource:remove_local(?ID)),
?assertMatch(
?RESOURCE_ERROR(not_found),
{error, not_found},
emqx_resource:query(?ID, get_state)
),
@ -235,7 +235,7 @@ t_query(_) ->
{ok, #{pid := _}} = emqx_resource:query(?ID, get_state),
?assertMatch(
?RESOURCE_ERROR(not_found),
{error, not_found},
emqx_resource:query(<<"unknown">>, get_state)
),

View File

@ -30,7 +30,7 @@
{profiles, [
{test, [
{deps, [
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.0"}}}
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.1"}}}
]}
]}
]}.

View File

@ -43,6 +43,23 @@
%%--------------------------------------------------------------------
%% APIs
%%--------------------------------------------------------------------
parse_action(BridgeId) when is_binary(BridgeId) ->
{Type, Name} = emqx_bridge_resource:parse_bridge_id(BridgeId),
case emqx_bridge_v2:is_bridge_v2_type(Type) of
true ->
%% Could be an old bridge V1 type that should be converted to a V2 type
try emqx_bridge_v2:bridge_v1_type_to_bridge_v2_type(Type) of
BridgeV2Type ->
{bridge_v2, BridgeV2Type, Name}
catch
_:_ ->
%% We got a bridge v2 type that is not also a bridge v1
%% type
{bridge_v2, Type, Name}
end;
false ->
{bridge, Type, Name, emqx_bridge_resource:resource_id(Type, Name)}
end;
parse_action(#{function := ActionFunc} = Action) ->
{Mod, Func} = parse_action_func(ActionFunc),
Res = #{mod => Mod, func => Func},

View File

@ -515,11 +515,8 @@ do_delete_rule_index(#{id := Id, from := From}) ->
parse_actions(Actions) ->
[do_parse_action(Act) || Act <- Actions].
do_parse_action(Action) when is_map(Action) ->
emqx_rule_actions:parse_action(Action);
do_parse_action(BridgeId) when is_binary(BridgeId) ->
{Type, Name} = emqx_bridge_resource:parse_bridge_id(BridgeId),
{bridge, Type, Name, emqx_bridge_resource:resource_id(Type, Name)}.
do_parse_action(Action) ->
emqx_rule_actions:parse_action(Action).
get_all_records(Tab) ->
[Rule#{id => Id} || {Id, Rule} <- ets:tab2list(Tab)].

View File

@ -521,6 +521,8 @@ format_action(Actions) ->
do_format_action({bridge, BridgeType, BridgeName, _ResId}) ->
emqx_bridge_resource:bridge_id(BridgeType, BridgeName);
do_format_action({bridge_v2, BridgeType, BridgeName}) ->
emqx_bridge_resource:bridge_id(BridgeType, BridgeName);
do_format_action(#{mod := Mod, func := Func, args := Args}) ->
#{
function => printable_function_name(Mod, Func),

View File

@ -361,6 +361,33 @@ do_handle_action(RuleId, {bridge, BridgeType, BridgeName, ResId}, Selected, _Env
Result ->
Result
end;
do_handle_action(
RuleId,
{bridge_v2, BridgeType, BridgeName},
Selected,
_Envs
) ->
?TRACE(
"BRIDGE",
"bridge_action",
#{bridge_id => {bridge_v2, BridgeType, BridgeName}}
),
ReplyTo = {fun ?MODULE:inc_action_metrics/2, [RuleId], #{reply_dropped => true}},
case
emqx_bridge_v2:send_message(
BridgeType,
BridgeName,
Selected,
#{reply_to => ReplyTo}
)
of
{error, Reason} when Reason == bridge_not_found; Reason == bridge_stopped ->
throw(out_of_service);
?RESOURCE_ERROR_M(R, _) when ?IS_RES_DOWN(R) ->
throw(out_of_service);
Result ->
Result
end;
do_handle_action(RuleId, #{mod := Mod, func := Func} = Action, Selected, Envs) ->
%% the function can also throw 'out_of_service'
Args = maps:get(args, Action, []),

View File

@ -1,6 +1,6 @@
{application, emqx_s3, [
{description, "EMQX S3"},
{vsn, "5.0.9"},
{vsn, "5.0.10"},
{modules, []},
{registered, [emqx_s3_sup]},
{applications, [

View File

@ -74,7 +74,7 @@ fields(s3) ->
%% not used in a `receive ... after' block, just timestamp comparison
emqx_schema:duration_s(),
#{
default => "1h",
default => <<"1h">>,
desc => ?DESC("url_expire_time"),
required => false
}
@ -83,7 +83,7 @@ fields(s3) ->
mk(
emqx_schema:bytesize(),
#{
default => "5mb",
default => <<"5mb">>,
desc => ?DESC("min_part_size"),
required => true,
validator => fun part_size_validator/1
@ -93,7 +93,7 @@ fields(s3) ->
mk(
emqx_schema:bytesize(),
#{
default => "5gb",
default => <<"5gb">>,
desc => ?DESC("max_part_size"),
required => true,
validator => fun part_size_validator/1

View File

@ -28,6 +28,8 @@
-define(NOT_FOUND(REASON), {404, ?ERROR_MSG('NOT_FOUND', REASON)}).
-define(METHOD_NOT_ALLOWED, 405).
-define(INTERNAL_ERROR(REASON), {500, ?ERROR_MSG('INTERNAL_ERROR', REASON)}).
-define(NOT_IMPLEMENTED, 501).

View File

@ -1 +0,0 @@
升级至 quicer 版本 0.0.200,为启用 OpenSSL3 以支持 QUIC 传输做准备。

View File

@ -0,0 +1 @@
Upgraded jq library from v0.3.10 to v0.3.11. In this version, jq_port programs are initiated on-demand and will not appear in users' processes unless the jq function in EMQX is used. Additionally, idle jq_port programs will auto-terminate after a set period. Note: Most EMQX users, running jq in NIF mode, will be unaffected by this update.

View File

@ -1,2 +0,0 @@
更新 QUIC 栈至 msquic 2.2.3

View File

@ -0,0 +1,2 @@
Fix rpc schema, ensure client/server use same transport driver.

View File

@ -0,0 +1,5 @@
Fixed the issue where the node could not start after executing `./bin/emqx data import [FILE]`.
And strongly bind apikey's `apikey_key` to `apikey_name`.
- `apikey_key` will generate a unique value from the given human-readable `apikey_name` when generating an apikey using dashboard.
- `apikey_name` will be a unique value generated by `apikey_key` when using bootstrap file to generate apikey, .

View File

@ -0,0 +1,4 @@
Fix schema: RPC client ssl port alighn with configured server port.
And ensure RPC ports are opened in helm chart.

View File

@ -0,0 +1,3 @@
Upgrade opentelemetry library to v1.3.1-emqx
This opentelemetry release fixes invalid metrics timestamps in the exported metrics.

View File

@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 5.3.1-alpha.1
version: 5.3.1-alpha.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 5.3.1-alpha.1
appVersion: 5.3.1-alpha.2

View File

@ -121,6 +121,8 @@ spec:
{{- end }}
- name: ekka
containerPort: 4370
- name: genrpc-manual
containerPort: 5369
envFrom:
- configMapRef:
name: {{ include "emqx.fullname" . }}-env

View File

@ -121,6 +121,8 @@ spec:
{{- end }}
- name: ekka
containerPort: 4370
- name: genrpc-manual
containerPort: 5369
envFrom:
- configMapRef:
name: {{ include "emqx.fullname" . }}-env

18
mix.exs
View File

@ -56,7 +56,7 @@ defmodule EMQXUmbrella.MixProject do
{:esockd, github: "emqx/esockd", tag: "5.9.7", override: true},
{:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-1", override: true},
{:ekka, github: "emqx/ekka", tag: "0.15.16", override: true},
{:gen_rpc, github: "emqx/gen_rpc", tag: "3.2.0", override: true},
{:gen_rpc, github: "emqx/gen_rpc", tag: "3.2.1", override: true},
{:grpc, github: "emqx/grpc-erl", tag: "0.6.8", override: true},
{:minirest, github: "emqx/minirest", tag: "1.3.13", override: true},
{:ecpool, github: "emqx/ecpool", tag: "0.5.4", override: true},
@ -64,7 +64,7 @@ defmodule EMQXUmbrella.MixProject do
{:pbkdf2, github: "emqx/erlang-pbkdf2", tag: "2.0.4", override: true},
# maybe forbid to fetch quicer
{:emqtt,
github: "emqx/emqtt", tag: "1.9.0", override: true, system_env: maybe_no_quic_env()},
github: "emqx/emqtt", tag: "1.9.1", override: true, system_env: maybe_no_quic_env()},
{:rulesql, github: "emqx/rulesql", tag: "0.1.7"},
{:observer_cli, "1.7.1"},
{:system_monitor, github: "ieQu1/system_monitor", tag: "3.0.3"},
@ -102,31 +102,31 @@ defmodule EMQXUmbrella.MixProject do
{:opentelemetry_api,
github: "emqx/opentelemetry-erlang",
sparse: "apps/opentelemetry_api",
tag: "v1.3.0-emqx",
tag: "v1.3.1-emqx",
override: true,
runtime: false},
{:opentelemetry,
github: "emqx/opentelemetry-erlang",
sparse: "apps/opentelemetry",
tag: "v1.3.0-emqx",
tag: "v1.3.1-emqx",
override: true,
runtime: false},
{:opentelemetry_api_experimental,
github: "emqx/opentelemetry-erlang",
sparse: "apps/opentelemetry_api_experimental",
tag: "v1.3.0-emqx",
tag: "v1.3.1-emqx",
override: true,
runtime: false},
{:opentelemetry_experimental,
github: "emqx/opentelemetry-erlang",
sparse: "apps/opentelemetry_experimental",
tag: "v1.3.0-emqx",
tag: "v1.3.1-emqx",
override: true,
runtime: false},
{:opentelemetry_exporter,
github: "emqx/opentelemetry-erlang",
sparse: "apps/opentelemetry_exporter",
tag: "v1.3.0-emqx",
tag: "v1.3.1-emqx",
override: true,
runtime: false}
] ++
@ -222,7 +222,7 @@ defmodule EMQXUmbrella.MixProject do
[
{:hstreamdb_erl, github: "hstreamdb/hstreamdb_erl", tag: "0.4.5+v0.16.1"},
{:influxdb, github: "emqx/influxdb-client-erl", tag: "1.1.11", override: true},
{:wolff, github: "kafka4beam/wolff", tag: "1.7.7"},
{:wolff, github: "kafka4beam/wolff", tag: "1.8.0"},
{:kafka_protocol, github: "kafka4beam/kafka_protocol", tag: "4.1.3", override: true},
{:brod_gssapi, github: "kafka4beam/brod_gssapi", tag: "v0.1.0"},
{:brod, github: "kafka4beam/brod", tag: "3.16.8"},
@ -814,7 +814,7 @@ defmodule EMQXUmbrella.MixProject do
defp jq_dep() do
if enable_jq?(),
do: [{:jq, github: "emqx/jq", tag: "v0.3.10", override: true}],
do: [{:jq, github: "emqx/jq", tag: "v0.3.11", override: true}],
else: []
end

View File

@ -63,13 +63,13 @@
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.7"}}}
, {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-1"}}}
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.16"}}}
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.0"}}}
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.1"}}}
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.8"}}}
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.13"}}}
, {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.4"}}}
, {replayq, {git, "https://github.com/emqx/replayq.git", {tag, "0.3.7"}}}
, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}
, {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.0"}}}
, {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.1"}}}
, {rulesql, {git, "https://github.com/emqx/rulesql", {tag, "0.1.7"}}}
, {observer_cli, "1.7.1"} % NOTE: depends on recon 2.5.x
, {system_monitor, {git, "https://github.com/ieQu1/system_monitor", {tag, "3.0.3"}}}
@ -85,13 +85,13 @@
, {jsone, {git, "https://github.com/emqx/jsone.git", {tag, "1.7.1"}}}
, {uuid, {git, "https://github.com/okeuday/uuid.git", {tag, "v2.0.6"}}}
%% trace
, {opentelemetry_api, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.0-emqx"}, "apps/opentelemetry_api"}}
, {opentelemetry, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.0-emqx"}, "apps/opentelemetry"}}
, {opentelemetry_api, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.1-emqx"}, "apps/opentelemetry_api"}}
, {opentelemetry, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.1-emqx"}, "apps/opentelemetry"}}
%% log metrics
, {opentelemetry_experimental, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.0-emqx"}, "apps/opentelemetry_experimental"}}
, {opentelemetry_api_experimental, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.0-emqx"}, "apps/opentelemetry_api_experimental"}}
, {opentelemetry_experimental, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.1-emqx"}, "apps/opentelemetry_experimental"}}
, {opentelemetry_api_experimental, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.1-emqx"}, "apps/opentelemetry_api_experimental"}}
%% export
, {opentelemetry_exporter, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.0-emqx"}, "apps/opentelemetry_exporter"}}
, {opentelemetry_exporter, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.1-emqx"}, "apps/opentelemetry_exporter"}}
]}.
{xref_ignores,

Some files were not shown because too many files have changed in this diff Show More