Merge branch 'master' into licence-conf-update
This commit is contained in:
commit
f97ed9bab9
|
@ -1,6 +1,6 @@
|
||||||
Fixes <issue-or-jira-number>
|
Fixes <issue-or-jira-number>
|
||||||
|
|
||||||
<!-- Make sure to target release-50 branch if this PR is intended to fix the issues for the release candidate. -->
|
<!-- Make sure to target release-51 branch if this PR is intended to fix the issues for the release candidate. -->
|
||||||
|
|
||||||
## Summary
|
## Summary
|
||||||
copilot:summary
|
copilot:summary
|
||||||
|
|
|
@ -97,7 +97,6 @@ jobs:
|
||||||
env:
|
env:
|
||||||
PROFILE: ${{ steps.get_profile.outputs.PROFILE }}
|
PROFILE: ${{ steps.get_profile.outputs.PROFILE }}
|
||||||
run: |
|
run: |
|
||||||
PROFILE=$PROFILE make -C source deps-$PROFILE
|
|
||||||
zip -ryq source.zip source/* source/.[^.]*
|
zip -ryq source.zip source/* source/.[^.]*
|
||||||
- uses: actions/upload-artifact@v3
|
- uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -23,7 +23,7 @@ jobs:
|
||||||
matrix:
|
matrix:
|
||||||
profile:
|
profile:
|
||||||
- ['emqx', 'master']
|
- ['emqx', 'master']
|
||||||
- ['emqx-enterprise', 'release-50']
|
- ['emqx-enterprise', 'release-51']
|
||||||
otp:
|
otp:
|
||||||
- 24.3.4.2-3
|
- 24.3.4.2-3
|
||||||
arch:
|
arch:
|
||||||
|
|
|
@ -8,7 +8,7 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
- release-50
|
- release-51
|
||||||
pull_request:
|
pull_request:
|
||||||
# GitHub pull_request action is by default triggered when
|
# GitHub pull_request action is by default triggered when
|
||||||
# opened reopened or synchronize,
|
# opened reopened or synchronize,
|
||||||
|
|
10
Makefile
10
Makefile
|
@ -15,7 +15,7 @@ endif
|
||||||
|
|
||||||
# Dashbord version
|
# Dashbord version
|
||||||
# from https://github.com/emqx/emqx-dashboard5
|
# from https://github.com/emqx/emqx-dashboard5
|
||||||
export EMQX_DASHBOARD_VERSION ?= v1.2.5
|
export EMQX_DASHBOARD_VERSION ?= v1.2.5-1
|
||||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.7
|
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.7
|
||||||
|
|
||||||
# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used
|
# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used
|
||||||
|
@ -104,7 +104,7 @@ APPS=$(shell $(SCRIPTS)/find-apps.sh)
|
||||||
|
|
||||||
.PHONY: $(APPS:%=%-ct)
|
.PHONY: $(APPS:%=%-ct)
|
||||||
define gen-app-ct-target
|
define gen-app-ct-target
|
||||||
$1-ct: $(REBAR) merge-config
|
$1-ct: $(REBAR) merge-config clean-test-cluster-config
|
||||||
$(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1))
|
$(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1))
|
||||||
ifneq ($(SUITES),)
|
ifneq ($(SUITES),)
|
||||||
@ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
|
@ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \
|
||||||
|
@ -127,7 +127,7 @@ endef
|
||||||
$(foreach app,$(APPS),$(eval $(call gen-app-prop-target,$(app))))
|
$(foreach app,$(APPS),$(eval $(call gen-app-prop-target,$(app))))
|
||||||
|
|
||||||
.PHONY: ct-suite
|
.PHONY: ct-suite
|
||||||
ct-suite: $(REBAR) merge-config
|
ct-suite: $(REBAR) merge-config clean-test-cluster-config
|
||||||
ifneq ($(TESTCASE),)
|
ifneq ($(TESTCASE),)
|
||||||
ifneq ($(GROUP),)
|
ifneq ($(GROUP),)
|
||||||
$(REBAR) ct -v --readable=$(CT_READABLE) --name $(CT_NODE_NAME) --suite $(SUITE) --case $(TESTCASE) --group $(GROUP)
|
$(REBAR) ct -v --readable=$(CT_READABLE) --name $(CT_NODE_NAME) --suite $(SUITE) --case $(TESTCASE) --group $(GROUP)
|
||||||
|
@ -294,3 +294,7 @@ fmt: $(REBAR)
|
||||||
@$(SCRIPTS)/erlfmt -w '{apps,lib-ee}/*/{src,include,test}/**/*.{erl,hrl,app.src}'
|
@$(SCRIPTS)/erlfmt -w '{apps,lib-ee}/*/{src,include,test}/**/*.{erl,hrl,app.src}'
|
||||||
@$(SCRIPTS)/erlfmt -w 'rebar.config.erl'
|
@$(SCRIPTS)/erlfmt -w 'rebar.config.erl'
|
||||||
@mix format
|
@mix format
|
||||||
|
|
||||||
|
.PHONY: clean-test-cluster-config
|
||||||
|
clean-test-cluster-config:
|
||||||
|
@rm -f apps/emqx_conf/data/configs/cluster.hocon || true
|
||||||
|
|
|
@ -32,10 +32,10 @@
|
||||||
%% `apps/emqx/src/bpapi/README.md'
|
%% `apps/emqx/src/bpapi/README.md'
|
||||||
|
|
||||||
%% Community edition
|
%% Community edition
|
||||||
-define(EMQX_RELEASE_CE, "5.0.25").
|
-define(EMQX_RELEASE_CE, "5.1.0-alpha.1").
|
||||||
|
|
||||||
%% Enterprise edition
|
%% Enterprise edition
|
||||||
-define(EMQX_RELEASE_EE, "5.0.4").
|
-define(EMQX_RELEASE_EE, "5.1.0-alpha.1").
|
||||||
|
|
||||||
%% the HTTP API version
|
%% the HTTP API version
|
||||||
-define(EMQX_API_VERSION, "5.0").
|
-define(EMQX_API_VERSION, "5.0").
|
||||||
|
|
|
@ -149,6 +149,7 @@ After releasing, let's say, 5.1.0, the following actions should be performed to
|
||||||
|
|
||||||
1. Checkout 5.1.0 tag
|
1. Checkout 5.1.0 tag
|
||||||
1. Build the code
|
1. Build the code
|
||||||
|
1. Replace api version string `"master"` in `apps/emqx/test/emqx_static_checks_data/master.bpapi` with `"5.1"`
|
||||||
1. Rename `apps/emqx/test/emqx_static_checks_data/master.bpapi` to `apps/emqx/test/emqx_static_checks_data/5.1.bpapi`
|
1. Rename `apps/emqx/test/emqx_static_checks_data/master.bpapi` to `apps/emqx/test/emqx_static_checks_data/5.1.bpapi`
|
||||||
1. Add `apps/emqx/test/emqx_static_checks_data/5.1.bpapi` to the repo
|
1. Add `apps/emqx/test/emqx_static_checks_data/5.1.bpapi` to the repo
|
||||||
1. Delete the previous file (e.g. `5.0.bpapi`), unless there is plan to support rolling upgrade from 5.0 to 5.2
|
1. Delete the previous file (e.g. `5.0.bpapi`), unless there is plan to support rolling upgrade from 5.0 to 5.2
|
||||||
|
|
|
@ -2,8 +2,7 @@
|
||||||
{application, emqx, [
|
{application, emqx, [
|
||||||
{id, "emqx"},
|
{id, "emqx"},
|
||||||
{description, "EMQX Core"},
|
{description, "EMQX Core"},
|
||||||
% strict semver, bump manually!
|
{vsn, "5.1.0"},
|
||||||
{vsn, "5.0.27"},
|
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -229,7 +229,7 @@ discard_if_present(ClientID) ->
|
||||||
ok
|
ok
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec discard(binary(), emgx_session:session()) -> emgx_session:session().
|
-spec discard(binary(), emqx_session:session()) -> emqx_session:session().
|
||||||
discard(ClientID, Session) ->
|
discard(ClientID, Session) ->
|
||||||
discard_opt(is_store_enabled(), ClientID, Session).
|
discard_opt(is_store_enabled(), ClientID, Session).
|
||||||
|
|
||||||
|
|
|
@ -120,6 +120,3 @@ clientinfo(InitProps) ->
|
||||||
},
|
},
|
||||||
InitProps
|
InitProps
|
||||||
).
|
).
|
||||||
|
|
||||||
toggle_auth(Bool) when is_boolean(Bool) ->
|
|
||||||
emqx_config:put_zone_conf(default, [auth, enable], Bool).
|
|
||||||
|
|
|
@ -98,6 +98,8 @@ init_per_suite(Config) ->
|
||||||
LogLevel = emqx_logger:get_primary_log_level(),
|
LogLevel = emqx_logger:get_primary_log_level(),
|
||||||
ok = emqx_logger:set_log_level(debug),
|
ok = emqx_logger:set_log_level(debug),
|
||||||
application:set_env(ekka, strict_mode, true),
|
application:set_env(ekka, strict_mode, true),
|
||||||
|
emqx_config:erase_all(),
|
||||||
|
emqx_common_test_helpers:stop_apps([]),
|
||||||
emqx_common_test_helpers:boot_modules(all),
|
emqx_common_test_helpers:boot_modules(all),
|
||||||
emqx_common_test_helpers:start_apps([]),
|
emqx_common_test_helpers:start_apps([]),
|
||||||
[{log_level, LogLevel} | Config].
|
[{log_level, LogLevel} | Config].
|
||||||
|
|
|
@ -145,11 +145,12 @@ t_kick(_) ->
|
||||||
t_session_taken(_) ->
|
t_session_taken(_) ->
|
||||||
erlang:process_flag(trap_exit, true),
|
erlang:process_flag(trap_exit, true),
|
||||||
Topic = <<"t/banned">>,
|
Topic = <<"t/banned">>,
|
||||||
ClientId2 = <<"t_session_taken">>,
|
ClientId2 = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||||
MsgNum = 3,
|
MsgNum = 3,
|
||||||
Connect = fun() ->
|
Connect = fun() ->
|
||||||
|
ClientId = emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||||
{ok, C} = emqtt:start_link([
|
{ok, C} = emqtt:start_link([
|
||||||
{clientid, <<"client1">>},
|
{clientid, ClientId},
|
||||||
{proto_ver, v5},
|
{proto_ver, v5},
|
||||||
{clean_start, false},
|
{clean_start, false},
|
||||||
{properties, #{'Session-Expiry-Interval' => 120}}
|
{properties, #{'Session-Expiry-Interval' => 120}}
|
||||||
|
@ -188,9 +189,9 @@ t_session_taken(_) ->
|
||||||
end,
|
end,
|
||||||
15_000
|
15_000
|
||||||
),
|
),
|
||||||
Publish(),
|
|
||||||
|
|
||||||
C2 = Connect(),
|
C2 = Connect(),
|
||||||
|
Publish(),
|
||||||
?assertEqual(MsgNum, length(receive_messages(MsgNum + 1))),
|
?assertEqual(MsgNum, length(receive_messages(MsgNum + 1))),
|
||||||
ok = emqtt:disconnect(C2),
|
ok = emqtt:disconnect(C2),
|
||||||
|
|
||||||
|
|
|
@ -93,6 +93,7 @@ end_per_group(_Group, _Config) ->
|
||||||
emqx_common_test_helpers:stop_apps([]).
|
emqx_common_test_helpers:stop_apps([]).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
|
emqx_common_test_helpers:clear_screen(),
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_suite(_Config) ->
|
end_per_suite(_Config) ->
|
||||||
|
@ -442,7 +443,7 @@ t_connected_client_count_persistent(Config) when is_list(Config) ->
|
||||||
emqx_cm_process_down
|
emqx_cm_process_down
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
?assertEqual(0, emqx_cm:get_connected_client_count()),
|
?retry(_Sleep = 100, _Retries = 20, ?assertEqual(0, emqx_cm:get_connected_client_count())),
|
||||||
ok;
|
ok;
|
||||||
t_connected_client_count_persistent({'end', _Config}) ->
|
t_connected_client_count_persistent({'end', _Config}) ->
|
||||||
snabbkaffe:stop(),
|
snabbkaffe:stop(),
|
||||||
|
|
|
@ -892,7 +892,7 @@ t_handle_kicked_publish_will_msg(_) ->
|
||||||
),
|
),
|
||||||
receive
|
receive
|
||||||
{pub, Msg} -> ok
|
{pub, Msg} -> ok
|
||||||
after 200 -> exit(will_message_not_published)
|
after 10_000 -> exit(will_message_not_published)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
t_handle_call_discard(_) ->
|
t_handle_call_discard(_) ->
|
||||||
|
|
|
@ -34,6 +34,7 @@
|
||||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
|
emqx_common_test_helpers:boot_modules(all),
|
||||||
emqx_common_test_helpers:start_apps([]),
|
emqx_common_test_helpers:start_apps([]),
|
||||||
ok = ekka:start(),
|
ok = ekka:start(),
|
||||||
OldConf = emqx:get_config([zones], #{}),
|
OldConf = emqx:get_config([zones], #{}),
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
|
emqx_common_test_helpers:boot_modules(all),
|
||||||
emqx_common_test_helpers:start_apps([]),
|
emqx_common_test_helpers:start_apps([]),
|
||||||
OldSch = erlang:system_flag(schedulers_online, 1),
|
OldSch = erlang:system_flag(schedulers_online, 1),
|
||||||
[{old_sch, OldSch} | Config].
|
[{old_sch, OldSch} | Config].
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
-include_lib("common_test/include/ct.hrl").
|
-include_lib("common_test/include/ct.hrl").
|
||||||
-include_lib("quicer/include/quicer.hrl").
|
-include_lib("quicer/include/quicer.hrl").
|
||||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
suite() ->
|
suite() ->
|
||||||
[{timetrap, {seconds, 30}}].
|
[{timetrap, {seconds, 30}}].
|
||||||
|
@ -1089,6 +1090,11 @@ t_multi_streams_unsub(Config) ->
|
||||||
),
|
),
|
||||||
|
|
||||||
emqtt:unsubscribe_via(C, SubVia, Topic),
|
emqtt:unsubscribe_via(C, SubVia, Topic),
|
||||||
|
?retry(
|
||||||
|
_Sleep2 = 100,
|
||||||
|
_Attempts2 = 50,
|
||||||
|
false = emqx_router:has_routes(Topic)
|
||||||
|
),
|
||||||
|
|
||||||
case emqtt:publish_via(C, PubVia, Topic, #{}, <<6, 7, 8, 9>>, [{qos, PubQos}]) of
|
case emqtt:publish_via(C, PubVia, Topic, #{}, <<6, 7, 8, 9>>, [{qos, PubQos}]) of
|
||||||
ok when PubQos == 0 ->
|
ok when PubQos == 0 ->
|
||||||
|
@ -1569,7 +1575,7 @@ t_multi_streams_remote_shutdown(Config) ->
|
||||||
|
|
||||||
ok = stop_emqx(),
|
ok = stop_emqx(),
|
||||||
%% Client should be closed
|
%% Client should be closed
|
||||||
assert_client_die(C, 100, 50).
|
assert_client_die(C, 100, 200).
|
||||||
|
|
||||||
t_multi_streams_remote_shutdown_with_reconnect(Config) ->
|
t_multi_streams_remote_shutdown_with_reconnect(Config) ->
|
||||||
erlang:process_flag(trap_exit, true),
|
erlang:process_flag(trap_exit, true),
|
||||||
|
|
|
@ -666,7 +666,7 @@ t_local_fallback(Config) when is_list(Config) ->
|
||||||
{true, UsedSubPid1} = last_message(<<"hello1">>, [ConnPid1]),
|
{true, UsedSubPid1} = last_message(<<"hello1">>, [ConnPid1]),
|
||||||
|
|
||||||
rpc:call(Node, emqx, publish, [Message2]),
|
rpc:call(Node, emqx, publish, [Message2]),
|
||||||
{true, UsedSubPid2} = last_message(<<"hello2">>, [ConnPid1]),
|
{true, UsedSubPid2} = last_message(<<"hello2">>, [ConnPid1], 2_000),
|
||||||
|
|
||||||
emqtt:stop(ConnPid1),
|
emqtt:stop(ConnPid1),
|
||||||
stop_slave(Node),
|
stop_slave(Node),
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_bridge, [
|
{application, emqx_bridge, [
|
||||||
{description, "EMQX bridges"},
|
{description, "EMQX bridges"},
|
||||||
{vsn, "0.1.20"},
|
{vsn, "0.1.21"},
|
||||||
{registered, [emqx_bridge_sup]},
|
{registered, [emqx_bridge_sup]},
|
||||||
{mod, {emqx_bridge_app, []}},
|
{mod, {emqx_bridge_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -227,9 +227,13 @@ post_config_update(_, _Req, NewConf, OldConf, _AppEnv) ->
|
||||||
diff_confs(NewConf, OldConf),
|
diff_confs(NewConf, OldConf),
|
||||||
%% The config update will be failed if any task in `perform_bridge_changes` failed.
|
%% The config update will be failed if any task in `perform_bridge_changes` failed.
|
||||||
Result = perform_bridge_changes([
|
Result = perform_bridge_changes([
|
||||||
{fun emqx_bridge_resource:remove/4, Removed},
|
#{action => fun emqx_bridge_resource:remove/4, data => Removed},
|
||||||
{fun emqx_bridge_resource:create/4, Added},
|
#{
|
||||||
{fun emqx_bridge_resource:update/4, Updated}
|
action => fun emqx_bridge_resource:create/4,
|
||||||
|
data => Added,
|
||||||
|
on_exception_fn => fun emqx_bridge_resource:remove/4
|
||||||
|
},
|
||||||
|
#{action => fun emqx_bridge_resource:update/4, data => Updated}
|
||||||
]),
|
]),
|
||||||
ok = unload_hook(),
|
ok = unload_hook(),
|
||||||
ok = load_hook(NewConf),
|
ok = load_hook(NewConf),
|
||||||
|
@ -345,7 +349,8 @@ perform_bridge_changes(Tasks) ->
|
||||||
|
|
||||||
perform_bridge_changes([], Result) ->
|
perform_bridge_changes([], Result) ->
|
||||||
Result;
|
Result;
|
||||||
perform_bridge_changes([{Action, MapConfs} | Tasks], Result0) ->
|
perform_bridge_changes([#{action := Action, data := MapConfs} = Task | Tasks], Result0) ->
|
||||||
|
OnException = maps:get(on_exception_fn, Task, fun(_Type, _Name, _Conf, _Opts) -> ok end),
|
||||||
Result = maps:fold(
|
Result = maps:fold(
|
||||||
fun
|
fun
|
||||||
({_Type, _Name}, _Conf, {error, Reason}) ->
|
({_Type, _Name}, _Conf, {error, Reason}) ->
|
||||||
|
@ -359,9 +364,21 @@ perform_bridge_changes([{Action, MapConfs} | Tasks], Result0) ->
|
||||||
end;
|
end;
|
||||||
({Type, Name}, Conf, _) ->
|
({Type, Name}, Conf, _) ->
|
||||||
ResOpts = emqx_resource:fetch_creation_opts(Conf),
|
ResOpts = emqx_resource:fetch_creation_opts(Conf),
|
||||||
case Action(Type, Name, Conf, ResOpts) of
|
try Action(Type, Name, Conf, ResOpts) of
|
||||||
{error, Reason} -> {error, Reason};
|
{error, Reason} -> {error, Reason};
|
||||||
Return -> Return
|
Return -> Return
|
||||||
|
catch
|
||||||
|
Kind:Error:Stacktrace ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "bridge_config_update_exception",
|
||||||
|
kind => Kind,
|
||||||
|
error => Error,
|
||||||
|
type => Type,
|
||||||
|
name => Name,
|
||||||
|
stacktrace => Stacktrace
|
||||||
|
}),
|
||||||
|
OnException(Type, Name, Conf, ResOpts),
|
||||||
|
erlang:raise(Kind, Error, Stacktrace)
|
||||||
end
|
end
|
||||||
end,
|
end,
|
||||||
Result0,
|
Result0,
|
||||||
|
|
|
@ -58,14 +58,14 @@
|
||||||
).
|
).
|
||||||
|
|
||||||
-if(?EMQX_RELEASE_EDITION == ee).
|
-if(?EMQX_RELEASE_EDITION == ee).
|
||||||
bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt;
|
bridge_to_resource_type(<<"mqtt">>) -> emqx_bridge_mqtt_connector;
|
||||||
bridge_to_resource_type(mqtt) -> emqx_connector_mqtt;
|
bridge_to_resource_type(mqtt) -> emqx_bridge_mqtt_connector;
|
||||||
bridge_to_resource_type(<<"webhook">>) -> emqx_connector_http;
|
bridge_to_resource_type(<<"webhook">>) -> emqx_connector_http;
|
||||||
bridge_to_resource_type(webhook) -> emqx_connector_http;
|
bridge_to_resource_type(webhook) -> emqx_connector_http;
|
||||||
bridge_to_resource_type(BridgeType) -> emqx_ee_bridge:resource_type(BridgeType).
|
bridge_to_resource_type(BridgeType) -> emqx_ee_bridge:resource_type(BridgeType).
|
||||||
-else.
|
-else.
|
||||||
bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt;
|
bridge_to_resource_type(<<"mqtt">>) -> emqx_bridge_mqtt_connector;
|
||||||
bridge_to_resource_type(mqtt) -> emqx_connector_mqtt;
|
bridge_to_resource_type(mqtt) -> emqx_bridge_mqtt_connector;
|
||||||
bridge_to_resource_type(<<"webhook">>) -> emqx_connector_http;
|
bridge_to_resource_type(<<"webhook">>) -> emqx_connector_http;
|
||||||
bridge_to_resource_type(webhook) -> emqx_connector_http.
|
bridge_to_resource_type(webhook) -> emqx_connector_http.
|
||||||
-endif.
|
-endif.
|
||||||
|
|
|
@ -47,7 +47,14 @@
|
||||||
<<"server">> => SERVER,
|
<<"server">> => SERVER,
|
||||||
<<"username">> => <<"user1">>,
|
<<"username">> => <<"user1">>,
|
||||||
<<"password">> => <<"">>,
|
<<"password">> => <<"">>,
|
||||||
<<"proto_ver">> => <<"v5">>
|
<<"proto_ver">> => <<"v5">>,
|
||||||
|
<<"egress">> => #{
|
||||||
|
<<"remote">> => #{
|
||||||
|
<<"topic">> => <<"emqx/${topic}">>,
|
||||||
|
<<"qos">> => <<"${qos}">>,
|
||||||
|
<<"retain">> => false
|
||||||
|
}
|
||||||
|
}
|
||||||
}).
|
}).
|
||||||
-define(MQTT_BRIDGE(SERVER), ?MQTT_BRIDGE(SERVER, <<"mqtt_egress_test_bridge">>)).
|
-define(MQTT_BRIDGE(SERVER), ?MQTT_BRIDGE(SERVER, <<"mqtt_egress_test_bridge">>)).
|
||||||
|
|
||||||
|
|
|
@ -541,7 +541,9 @@ t_write_failure(Config) ->
|
||||||
end),
|
end),
|
||||||
fun(Trace0) ->
|
fun(Trace0) ->
|
||||||
ct:pal("trace: ~p", [Trace0]),
|
ct:pal("trace: ~p", [Trace0]),
|
||||||
Trace = ?of_kind(buffer_worker_flush_nack, Trace0),
|
Trace = ?of_kind(
|
||||||
|
[buffer_worker_flush_nack, buffer_worker_retry_inflight_failed], Trace0
|
||||||
|
),
|
||||||
[#{result := Result} | _] = Trace,
|
[#{result := Result} | _] = Trace,
|
||||||
case Result of
|
case Result of
|
||||||
{async_return, {error, {resource_error, _}}} ->
|
{async_return, {error, {resource_error, _}}} ->
|
||||||
|
@ -606,11 +608,12 @@ t_missing_data(Config) ->
|
||||||
%% to ecql driver
|
%% to ecql driver
|
||||||
?check_trace(
|
?check_trace(
|
||||||
begin
|
begin
|
||||||
?wait_async_action(
|
{_, {ok, _}} =
|
||||||
send_message(Config, #{}),
|
?wait_async_action(
|
||||||
#{?snk_kind := handle_async_reply, result := {error, {8704, _}}},
|
send_message(Config, #{}),
|
||||||
10_000
|
#{?snk_kind := handle_async_reply, result := {error, {8704, _}}},
|
||||||
),
|
30_000
|
||||||
|
),
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
fun(Trace0) ->
|
fun(Trace0) ->
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
%% -*- mode: erlang; -*-
|
%% -*- mode: erlang; -*-
|
||||||
{erl_opts, [debug_info]}.
|
{erl_opts, [debug_info]}.
|
||||||
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.5"}}}
|
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.5"}}}
|
||||||
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.2"}}}
|
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}
|
||||||
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
|
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
|
||||||
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}
|
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}
|
||||||
|
, {snappyer, "1.2.9"}
|
||||||
, {emqx_connector, {path, "../../apps/emqx_connector"}}
|
, {emqx_connector, {path, "../../apps/emqx_connector"}}
|
||||||
, {emqx_resource, {path, "../../apps/emqx_resource"}}
|
, {emqx_resource, {path, "../../apps/emqx_resource"}}
|
||||||
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
|
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_kafka, [
|
{application, emqx_bridge_kafka, [
|
||||||
{description, "EMQX Enterprise Kafka Bridge"},
|
{description, "EMQX Enterprise Kafka Bridge"},
|
||||||
{vsn, "0.1.2"},
|
{vsn, "0.1.3"},
|
||||||
{registered, [emqx_bridge_kafka_consumer_sup]},
|
{registered, [emqx_bridge_kafka_consumer_sup]},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -247,7 +247,8 @@ fields(producer_opts) ->
|
||||||
{kafka,
|
{kafka,
|
||||||
mk(ref(producer_kafka_opts), #{
|
mk(ref(producer_kafka_opts), #{
|
||||||
required => true,
|
required => true,
|
||||||
desc => ?DESC(producer_kafka_opts)
|
desc => ?DESC(producer_kafka_opts),
|
||||||
|
validator => fun producer_strategy_key_validator/1
|
||||||
})}
|
})}
|
||||||
];
|
];
|
||||||
fields(producer_kafka_opts) ->
|
fields(producer_kafka_opts) ->
|
||||||
|
@ -459,3 +460,11 @@ consumer_topic_mapping_validator(TopicMapping = [_ | _]) ->
|
||||||
false ->
|
false ->
|
||||||
{error, "Kafka topics must not be repeated in a bridge"}
|
{error, "Kafka topics must not be repeated in a bridge"}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
producer_strategy_key_validator(#{
|
||||||
|
<<"partition_strategy">> := key_dispatch,
|
||||||
|
<<"message">> := #{<<"key">> := ""}
|
||||||
|
}) ->
|
||||||
|
{error, "Message key cannot be empty when `key_dispatch` strategy is used"};
|
||||||
|
producer_strategy_key_validator(_) ->
|
||||||
|
ok.
|
||||||
|
|
|
@ -101,6 +101,10 @@
|
||||||
" the connection parameters."
|
" the connection parameters."
|
||||||
).
|
).
|
||||||
|
|
||||||
|
%% Allocatable resources
|
||||||
|
-define(kafka_client_id, kafka_client_id).
|
||||||
|
-define(kafka_subscriber_id, kafka_subscriber_id).
|
||||||
|
|
||||||
%%-------------------------------------------------------------------------------------
|
%%-------------------------------------------------------------------------------------
|
||||||
%% `emqx_resource' API
|
%% `emqx_resource' API
|
||||||
%%-------------------------------------------------------------------------------------
|
%%-------------------------------------------------------------------------------------
|
||||||
|
@ -140,6 +144,7 @@ on_start(ResourceId, Config) ->
|
||||||
Auth -> [{sasl, emqx_bridge_kafka_impl:sasl(Auth)}]
|
Auth -> [{sasl, emqx_bridge_kafka_impl:sasl(Auth)}]
|
||||||
end,
|
end,
|
||||||
ClientOpts = add_ssl_opts(ClientOpts0, SSL),
|
ClientOpts = add_ssl_opts(ClientOpts0, SSL),
|
||||||
|
ok = emqx_resource:allocate_resource(ResourceId, ?kafka_client_id, ClientID),
|
||||||
case brod:start_client(BootstrapHosts, ClientID, ClientOpts) of
|
case brod:start_client(BootstrapHosts, ClientID, ClientOpts) of
|
||||||
ok ->
|
ok ->
|
||||||
?tp(
|
?tp(
|
||||||
|
@ -163,7 +168,21 @@ on_start(ResourceId, Config) ->
|
||||||
start_consumer(Config, ResourceId, ClientID).
|
start_consumer(Config, ResourceId, ClientID).
|
||||||
|
|
||||||
-spec on_stop(resource_id(), state()) -> ok.
|
-spec on_stop(resource_id(), state()) -> ok.
|
||||||
on_stop(_ResourceID, State) ->
|
on_stop(ResourceId, _State = undefined) ->
|
||||||
|
case emqx_resource:get_allocated_resources(ResourceId) of
|
||||||
|
#{?kafka_client_id := ClientID, ?kafka_subscriber_id := SubscriberId} ->
|
||||||
|
stop_subscriber(SubscriberId),
|
||||||
|
stop_client(ClientID),
|
||||||
|
?tp(kafka_consumer_subcriber_and_client_stopped, #{}),
|
||||||
|
ok;
|
||||||
|
#{?kafka_client_id := ClientID} ->
|
||||||
|
stop_client(ClientID),
|
||||||
|
?tp(kafka_consumer_just_client_stopped, #{}),
|
||||||
|
ok;
|
||||||
|
_ ->
|
||||||
|
ok
|
||||||
|
end;
|
||||||
|
on_stop(_ResourceId, State) ->
|
||||||
#{
|
#{
|
||||||
subscriber_id := SubscriberId,
|
subscriber_id := SubscriberId,
|
||||||
kafka_client_id := ClientID
|
kafka_client_id := ClientID
|
||||||
|
@ -333,6 +352,9 @@ start_consumer(Config, ResourceId, ClientID) ->
|
||||||
%% spawns one worker for each assigned topic-partition
|
%% spawns one worker for each assigned topic-partition
|
||||||
%% automatically, so we should not spawn duplicate workers.
|
%% automatically, so we should not spawn duplicate workers.
|
||||||
SubscriberId = make_subscriber_id(BridgeName),
|
SubscriberId = make_subscriber_id(BridgeName),
|
||||||
|
?tp(kafka_consumer_about_to_start_subscriber, #{}),
|
||||||
|
ok = emqx_resource:allocate_resource(ResourceId, ?kafka_subscriber_id, SubscriberId),
|
||||||
|
?tp(kafka_consumer_subscriber_allocated, #{}),
|
||||||
case emqx_bridge_kafka_consumer_sup:start_child(SubscriberId, GroupSubscriberConfig) of
|
case emqx_bridge_kafka_consumer_sup:start_child(SubscriberId, GroupSubscriberConfig) of
|
||||||
{ok, _ConsumerPid} ->
|
{ok, _ConsumerPid} ->
|
||||||
?tp(
|
?tp(
|
||||||
|
@ -359,7 +381,13 @@ start_consumer(Config, ResourceId, ClientID) ->
|
||||||
stop_subscriber(SubscriberId) ->
|
stop_subscriber(SubscriberId) ->
|
||||||
_ = log_when_error(
|
_ = log_when_error(
|
||||||
fun() ->
|
fun() ->
|
||||||
emqx_bridge_kafka_consumer_sup:ensure_child_deleted(SubscriberId)
|
try
|
||||||
|
emqx_bridge_kafka_consumer_sup:ensure_child_deleted(SubscriberId)
|
||||||
|
catch
|
||||||
|
exit:{noproc, _} ->
|
||||||
|
%% may happen when node is shutting down
|
||||||
|
ok
|
||||||
|
end
|
||||||
end,
|
end,
|
||||||
#{
|
#{
|
||||||
msg => "failed_to_delete_kafka_subscriber",
|
msg => "failed_to_delete_kafka_subscriber",
|
||||||
|
@ -443,16 +471,22 @@ do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
are_subscriber_workers_alive(SubscriberId) ->
|
are_subscriber_workers_alive(SubscriberId) ->
|
||||||
Children = supervisor:which_children(emqx_bridge_kafka_consumer_sup),
|
try
|
||||||
case lists:keyfind(SubscriberId, 1, Children) of
|
Children = supervisor:which_children(emqx_bridge_kafka_consumer_sup),
|
||||||
false ->
|
case lists:keyfind(SubscriberId, 1, Children) of
|
||||||
false;
|
false ->
|
||||||
{_, Pid, _, _} ->
|
false;
|
||||||
Workers = brod_group_subscriber_v2:get_workers(Pid),
|
{_, Pid, _, _} ->
|
||||||
%% we can't enforce the number of partitions on a single
|
Workers = brod_group_subscriber_v2:get_workers(Pid),
|
||||||
%% node, as the group might be spread across an emqx
|
%% we can't enforce the number of partitions on a single
|
||||||
%% cluster.
|
%% node, as the group might be spread across an emqx
|
||||||
lists:all(fun is_process_alive/1, maps:values(Workers))
|
%% cluster.
|
||||||
|
lists:all(fun is_process_alive/1, maps:values(Workers))
|
||||||
|
end
|
||||||
|
catch
|
||||||
|
exit:{shutdown, _} ->
|
||||||
|
%% may happen if node is shutting down
|
||||||
|
false
|
||||||
end.
|
end.
|
||||||
|
|
||||||
log_when_error(Fun, Log) ->
|
log_when_error(Fun, Log) ->
|
||||||
|
|
|
@ -23,6 +23,11 @@
|
||||||
|
|
||||||
-include_lib("emqx/include/logger.hrl").
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
|
||||||
|
%% Allocatable resources
|
||||||
|
-define(kafka_resource_id, kafka_resource_id).
|
||||||
|
-define(kafka_client_id, kafka_client_id).
|
||||||
|
-define(kafka_producers, kafka_producers).
|
||||||
|
|
||||||
%% TODO: rename this to `kafka_producer' after alias support is added
|
%% TODO: rename this to `kafka_producer' after alias support is added
|
||||||
%% to hocon; keeping this as just `kafka' for backwards compatibility.
|
%% to hocon; keeping this as just `kafka' for backwards compatibility.
|
||||||
-define(BRIDGE_TYPE, kafka).
|
-define(BRIDGE_TYPE, kafka).
|
||||||
|
@ -46,9 +51,11 @@ on_start(InstId, Config) ->
|
||||||
} = Config,
|
} = Config,
|
||||||
BridgeType = ?BRIDGE_TYPE,
|
BridgeType = ?BRIDGE_TYPE,
|
||||||
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
|
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
|
||||||
|
ok = emqx_resource:allocate_resource(InstId, ?kafka_resource_id, ResourceId),
|
||||||
_ = maybe_install_wolff_telemetry_handlers(ResourceId),
|
_ = maybe_install_wolff_telemetry_handlers(ResourceId),
|
||||||
Hosts = emqx_bridge_kafka_impl:hosts(Hosts0),
|
Hosts = emqx_bridge_kafka_impl:hosts(Hosts0),
|
||||||
ClientId = emqx_bridge_kafka_impl:make_client_id(BridgeType, BridgeName),
|
ClientId = emqx_bridge_kafka_impl:make_client_id(BridgeType, BridgeName),
|
||||||
|
ok = emqx_resource:allocate_resource(InstId, ?kafka_client_id, ClientId),
|
||||||
ClientConfig = #{
|
ClientConfig = #{
|
||||||
min_metadata_refresh_interval => MinMetaRefreshInterval,
|
min_metadata_refresh_interval => MinMetaRefreshInterval,
|
||||||
connect_timeout => ConnTimeout,
|
connect_timeout => ConnTimeout,
|
||||||
|
@ -86,6 +93,7 @@ on_start(InstId, Config) ->
|
||||||
WolffProducerConfig = producers_config(BridgeName, ClientId, KafkaConfig, IsDryRun),
|
WolffProducerConfig = producers_config(BridgeName, ClientId, KafkaConfig, IsDryRun),
|
||||||
case wolff:ensure_supervised_producers(ClientId, KafkaTopic, WolffProducerConfig) of
|
case wolff:ensure_supervised_producers(ClientId, KafkaTopic, WolffProducerConfig) of
|
||||||
{ok, Producers} ->
|
{ok, Producers} ->
|
||||||
|
ok = emqx_resource:allocate_resource(InstId, ?kafka_producers, Producers),
|
||||||
{ok, #{
|
{ok, #{
|
||||||
message_template => compile_message_template(MessageTemplate),
|
message_template => compile_message_template(MessageTemplate),
|
||||||
client_id => ClientId,
|
client_id => ClientId,
|
||||||
|
@ -120,28 +128,63 @@ on_start(InstId, Config) ->
|
||||||
)
|
)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_stop(_InstanceID, #{client_id := ClientID, producers := Producers, resource_id := ResourceID}) ->
|
on_stop(InstanceId, _State) ->
|
||||||
_ = with_log_at_error(
|
case emqx_resource:get_allocated_resources(InstanceId) of
|
||||||
fun() -> wolff:stop_and_delete_supervised_producers(Producers) end,
|
|
||||||
#{
|
#{
|
||||||
msg => "failed_to_delete_kafka_producer",
|
?kafka_client_id := ClientId,
|
||||||
client_id => ClientID
|
?kafka_producers := Producers,
|
||||||
}
|
?kafka_resource_id := ResourceId
|
||||||
),
|
} ->
|
||||||
_ = with_log_at_error(
|
_ = with_log_at_error(
|
||||||
fun() -> wolff:stop_and_delete_supervised_client(ClientID) end,
|
fun() -> wolff:stop_and_delete_supervised_producers(Producers) end,
|
||||||
#{
|
#{
|
||||||
msg => "failed_to_delete_kafka_client",
|
msg => "failed_to_delete_kafka_producer",
|
||||||
client_id => ClientID
|
client_id => ClientId
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
with_log_at_error(
|
_ = with_log_at_error(
|
||||||
fun() -> uninstall_telemetry_handlers(ResourceID) end,
|
fun() -> wolff:stop_and_delete_supervised_client(ClientId) end,
|
||||||
#{
|
#{
|
||||||
msg => "failed_to_uninstall_telemetry_handlers",
|
msg => "failed_to_delete_kafka_client",
|
||||||
client_id => ClientID
|
client_id => ClientId
|
||||||
}
|
}
|
||||||
).
|
),
|
||||||
|
_ = with_log_at_error(
|
||||||
|
fun() -> uninstall_telemetry_handlers(ResourceId) end,
|
||||||
|
#{
|
||||||
|
msg => "failed_to_uninstall_telemetry_handlers",
|
||||||
|
resource_id => ResourceId
|
||||||
|
}
|
||||||
|
),
|
||||||
|
ok;
|
||||||
|
#{?kafka_client_id := ClientId, ?kafka_resource_id := ResourceId} ->
|
||||||
|
_ = with_log_at_error(
|
||||||
|
fun() -> wolff:stop_and_delete_supervised_client(ClientId) end,
|
||||||
|
#{
|
||||||
|
msg => "failed_to_delete_kafka_client",
|
||||||
|
client_id => ClientId
|
||||||
|
}
|
||||||
|
),
|
||||||
|
_ = with_log_at_error(
|
||||||
|
fun() -> uninstall_telemetry_handlers(ResourceId) end,
|
||||||
|
#{
|
||||||
|
msg => "failed_to_uninstall_telemetry_handlers",
|
||||||
|
resource_id => ResourceId
|
||||||
|
}
|
||||||
|
),
|
||||||
|
ok;
|
||||||
|
#{?kafka_resource_id := ResourceId} ->
|
||||||
|
_ = with_log_at_error(
|
||||||
|
fun() -> uninstall_telemetry_handlers(ResourceId) end,
|
||||||
|
#{
|
||||||
|
msg => "failed_to_uninstall_telemetry_handlers",
|
||||||
|
resource_id => ResourceId
|
||||||
|
}
|
||||||
|
),
|
||||||
|
ok;
|
||||||
|
_ ->
|
||||||
|
ok
|
||||||
|
end.
|
||||||
|
|
||||||
on_query(
|
on_query(
|
||||||
_InstId,
|
_InstId,
|
||||||
|
|
|
@ -59,7 +59,9 @@ only_once_tests() ->
|
||||||
t_cluster_group,
|
t_cluster_group,
|
||||||
t_node_joins_existing_cluster,
|
t_node_joins_existing_cluster,
|
||||||
t_cluster_node_down,
|
t_cluster_node_down,
|
||||||
t_multiple_topic_mappings
|
t_multiple_topic_mappings,
|
||||||
|
t_resource_manager_crash_after_subscriber_started,
|
||||||
|
t_resource_manager_crash_before_subscriber_started
|
||||||
].
|
].
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
|
@ -333,6 +335,7 @@ init_per_testcase(TestCase, Config) ->
|
||||||
common_init_per_testcase(TestCase, Config0) ->
|
common_init_per_testcase(TestCase, Config0) ->
|
||||||
ct:timetrap(timer:seconds(60)),
|
ct:timetrap(timer:seconds(60)),
|
||||||
delete_all_bridges(),
|
delete_all_bridges(),
|
||||||
|
emqx_config:delete_override_conf_files(),
|
||||||
KafkaTopic =
|
KafkaTopic =
|
||||||
<<
|
<<
|
||||||
(atom_to_binary(TestCase))/binary,
|
(atom_to_binary(TestCase))/binary,
|
||||||
|
@ -1117,6 +1120,24 @@ stop_async_publisher(Pid) ->
|
||||||
end,
|
end,
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
kill_resource_managers() ->
|
||||||
|
ct:pal("gonna kill resource managers"),
|
||||||
|
lists:foreach(
|
||||||
|
fun({_, Pid, _, _}) ->
|
||||||
|
ct:pal("terminating resource manager ~p", [Pid]),
|
||||||
|
Ref = monitor(process, Pid),
|
||||||
|
exit(Pid, kill),
|
||||||
|
receive
|
||||||
|
{'DOWN', Ref, process, Pid, killed} ->
|
||||||
|
ok
|
||||||
|
after 500 ->
|
||||||
|
ct:fail("pid ~p didn't die!", [Pid])
|
||||||
|
end,
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
supervisor:which_children(emqx_resource_manager_sup)
|
||||||
|
).
|
||||||
|
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
%% Testcases
|
%% Testcases
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
@ -2019,3 +2040,118 @@ t_begin_offset_earliest(Config) ->
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
t_resource_manager_crash_after_subscriber_started(Config) ->
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
?force_ordering(
|
||||||
|
#{?snk_kind := kafka_consumer_subscriber_allocated},
|
||||||
|
#{?snk_kind := will_kill_resource_manager}
|
||||||
|
),
|
||||||
|
?force_ordering(
|
||||||
|
#{?snk_kind := resource_manager_killed},
|
||||||
|
#{?snk_kind := kafka_consumer_subscriber_started}
|
||||||
|
),
|
||||||
|
spawn_link(fun() ->
|
||||||
|
?tp(will_kill_resource_manager, #{}),
|
||||||
|
kill_resource_managers(),
|
||||||
|
?tp(resource_manager_killed, #{}),
|
||||||
|
ok
|
||||||
|
end),
|
||||||
|
|
||||||
|
%% even if the resource manager is dead, we can still
|
||||||
|
%% clear the allocated resources.
|
||||||
|
|
||||||
|
%% We avoid asserting only the `config_update_crashed'
|
||||||
|
%% error here because there's a race condition (just a
|
||||||
|
%% problem for the test assertion below) in which the
|
||||||
|
%% `emqx_resource_manager:create/5' call returns a failure
|
||||||
|
%% (not checked) and then `lookup' in that module is
|
||||||
|
%% delayed enough so that the manager supervisor has time
|
||||||
|
%% to restart the manager process and for the latter to
|
||||||
|
%% startup successfully. Occurs frequently in CI...
|
||||||
|
|
||||||
|
{Res, {ok, _}} =
|
||||||
|
?wait_async_action(
|
||||||
|
create_bridge(Config),
|
||||||
|
#{?snk_kind := kafka_consumer_subcriber_and_client_stopped},
|
||||||
|
10_000
|
||||||
|
),
|
||||||
|
case Res of
|
||||||
|
{error, {config_update_crashed, {killed, _}}} ->
|
||||||
|
ok;
|
||||||
|
{ok, _} ->
|
||||||
|
%% the new manager may have had time to startup
|
||||||
|
%% before the resource status cache is read...
|
||||||
|
ok;
|
||||||
|
_ ->
|
||||||
|
ct:fail("unexpected result: ~p", [Res])
|
||||||
|
end,
|
||||||
|
?assertMatch({ok, _}, delete_bridge(Config)),
|
||||||
|
?retry(
|
||||||
|
_Sleep = 50,
|
||||||
|
_Attempts = 50,
|
||||||
|
?assertEqual([], supervisor:which_children(emqx_bridge_kafka_consumer_sup))
|
||||||
|
),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
[]
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
t_resource_manager_crash_before_subscriber_started(Config) ->
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
?force_ordering(
|
||||||
|
#{?snk_kind := kafka_consumer_client_started},
|
||||||
|
#{?snk_kind := will_kill_resource_manager}
|
||||||
|
),
|
||||||
|
?force_ordering(
|
||||||
|
#{?snk_kind := resource_manager_killed},
|
||||||
|
#{?snk_kind := kafka_consumer_about_to_start_subscriber}
|
||||||
|
),
|
||||||
|
spawn_link(fun() ->
|
||||||
|
?tp(will_kill_resource_manager, #{}),
|
||||||
|
kill_resource_managers(),
|
||||||
|
?tp(resource_manager_killed, #{}),
|
||||||
|
ok
|
||||||
|
end),
|
||||||
|
|
||||||
|
%% even if the resource manager is dead, we can still
|
||||||
|
%% clear the allocated resources.
|
||||||
|
|
||||||
|
%% We avoid asserting only the `config_update_crashed'
|
||||||
|
%% error here because there's a race condition (just a
|
||||||
|
%% problem for the test assertion below) in which the
|
||||||
|
%% `emqx_resource_manager:create/5' call returns a failure
|
||||||
|
%% (not checked) and then `lookup' in that module is
|
||||||
|
%% delayed enough so that the manager supervisor has time
|
||||||
|
%% to restart the manager process and for the latter to
|
||||||
|
%% startup successfully. Occurs frequently in CI...
|
||||||
|
{Res, {ok, _}} =
|
||||||
|
?wait_async_action(
|
||||||
|
create_bridge(Config),
|
||||||
|
#{?snk_kind := kafka_consumer_just_client_stopped},
|
||||||
|
10_000
|
||||||
|
),
|
||||||
|
case Res of
|
||||||
|
{error, {config_update_crashed, {killed, _}}} ->
|
||||||
|
ok;
|
||||||
|
{ok, _} ->
|
||||||
|
%% the new manager may have had time to startup
|
||||||
|
%% before the resource status cache is read...
|
||||||
|
ok;
|
||||||
|
_ ->
|
||||||
|
ct:fail("unexpected result: ~p", [Res])
|
||||||
|
end,
|
||||||
|
?assertMatch({ok, _}, delete_bridge(Config)),
|
||||||
|
?retry(
|
||||||
|
_Sleep = 50,
|
||||||
|
_Attempts = 50,
|
||||||
|
?assertEqual([], supervisor:which_children(emqx_bridge_kafka_consumer_sup))
|
||||||
|
),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
[]
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
|
@ -446,6 +446,8 @@ t_failed_creation_then_fix(Config) ->
|
||||||
?assertMatch(#kafka_message{key = BinTime}, KafkaMsg),
|
?assertMatch(#kafka_message{key = BinTime}, KafkaMsg),
|
||||||
%% TODO: refactor those into init/end per testcase
|
%% TODO: refactor those into init/end per testcase
|
||||||
ok = ?PRODUCER:on_stop(ResourceId, State),
|
ok = ?PRODUCER:on_stop(ResourceId, State),
|
||||||
|
?assertEqual([], supervisor:which_children(wolff_client_sup)),
|
||||||
|
?assertEqual([], supervisor:which_children(wolff_producers_sup)),
|
||||||
ok = emqx_bridge_resource:remove(BridgeId),
|
ok = emqx_bridge_resource:remove(BridgeId),
|
||||||
delete_all_bridges(),
|
delete_all_bridges(),
|
||||||
ok.
|
ok.
|
||||||
|
|
|
@ -138,6 +138,36 @@ kafka_consumer_test() ->
|
||||||
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
message_key_dispatch_validations_test() ->
|
||||||
|
Conf0 = kafka_producer_new_hocon(),
|
||||||
|
Conf1 =
|
||||||
|
Conf0 ++
|
||||||
|
"\n"
|
||||||
|
"bridges.kafka.myproducer.kafka.message.key = \"\""
|
||||||
|
"\n"
|
||||||
|
"bridges.kafka.myproducer.kafka.partition_strategy = \"key_dispatch\"",
|
||||||
|
Conf = parse(Conf1),
|
||||||
|
?assertMatch(
|
||||||
|
#{
|
||||||
|
<<"kafka">> :=
|
||||||
|
#{
|
||||||
|
<<"partition_strategy">> := <<"key_dispatch">>,
|
||||||
|
<<"message">> := #{<<"key">> := <<>>}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
emqx_utils_maps:deep_get([<<"bridges">>, <<"kafka">>, <<"myproducer">>], Conf)
|
||||||
|
),
|
||||||
|
?assertThrow(
|
||||||
|
{_, [
|
||||||
|
#{
|
||||||
|
path := "bridges.kafka.myproducer.kafka",
|
||||||
|
reason := "Message key cannot be empty when `key_dispatch` strategy is used"
|
||||||
|
}
|
||||||
|
]},
|
||||||
|
check(Conf)
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
%%===========================================================================
|
%%===========================================================================
|
||||||
%% Helper functions
|
%% Helper functions
|
||||||
%%===========================================================================
|
%%===========================================================================
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
# EMQX MQTT Broker Bridge
|
||||||
|
|
||||||
|
This application connects EMQX to virtually any MQTT broker adhering to either [MQTTv3][1] or [MQTTv5][2] standard. The connection is facilitated through the _MQTT bridge_ abstraction, allowing for the flow of data in both directions: from the remote broker to EMQX (ingress) and from EMQX to the remote broker (egress).
|
||||||
|
|
||||||
|
User can create a rule and easily ingest into a remote MQTT broker by leveraging [EMQX Rules][3].
|
||||||
|
|
||||||
|
|
||||||
|
# Documentation
|
||||||
|
|
||||||
|
- Refer to [Bridge Data into MQTT Broker][4] for how to use EMQX dashboard to set up ingress or egress bridge, or even both at the same time.
|
||||||
|
|
||||||
|
- Refer to [EMQX Rules][3] for the EMQX rules engine introduction.
|
||||||
|
|
||||||
|
|
||||||
|
# HTTP APIs
|
||||||
|
|
||||||
|
Several APIs are provided for bridge management, refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information.
|
||||||
|
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
Please see our [contributing guide](../../CONTRIBUTING.md).
|
||||||
|
|
||||||
|
|
||||||
|
# License
|
||||||
|
|
||||||
|
Apache License 2.0, see [LICENSE](../../APL.txt).
|
||||||
|
|
||||||
|
[1]: https://docs.oasis-open.org/mqtt/mqtt/v3.1.1
|
||||||
|
[2]: https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html
|
||||||
|
[3]: https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html
|
||||||
|
[4]: https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-mqtt.html
|
|
@ -0,0 +1,3 @@
|
||||||
|
{deps, [
|
||||||
|
{emqx, {path, "../../apps/emqx"}}
|
||||||
|
]}.
|
|
@ -0,0 +1,18 @@
|
||||||
|
%% -*- mode: erlang -*-
|
||||||
|
{application, emqx_bridge_mqtt, [
|
||||||
|
{description, "EMQX MQTT Broker Bridge"},
|
||||||
|
{vsn, "0.1.0"},
|
||||||
|
{registered, []},
|
||||||
|
{applications, [
|
||||||
|
kernel,
|
||||||
|
stdlib,
|
||||||
|
emqx,
|
||||||
|
emqx_resource,
|
||||||
|
emqx_bridge,
|
||||||
|
emqtt
|
||||||
|
]},
|
||||||
|
{env, []},
|
||||||
|
{modules, []},
|
||||||
|
{licenses, ["Apache 2.0"]},
|
||||||
|
{links, []}
|
||||||
|
]}.
|
|
@ -0,0 +1,340 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_bridge_mqtt_connector).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
|
||||||
|
-behaviour(emqx_resource).
|
||||||
|
|
||||||
|
-export([on_message_received/3]).
|
||||||
|
|
||||||
|
%% callbacks of behaviour emqx_resource
|
||||||
|
-export([
|
||||||
|
callback_mode/0,
|
||||||
|
on_start/2,
|
||||||
|
on_stop/2,
|
||||||
|
on_query/3,
|
||||||
|
on_query_async/4,
|
||||||
|
on_get_status/2
|
||||||
|
]).
|
||||||
|
|
||||||
|
-export([on_async_result/2]).
|
||||||
|
|
||||||
|
-define(HEALTH_CHECK_TIMEOUT, 1000).
|
||||||
|
|
||||||
|
%% ===================================================================
|
||||||
|
%% When use this bridge as a data source, ?MODULE:on_message_received will be called
|
||||||
|
%% if the bridge received msgs from the remote broker.
|
||||||
|
on_message_received(Msg, HookPoint, ResId) ->
|
||||||
|
emqx_resource_metrics:received_inc(ResId),
|
||||||
|
emqx:run_hook(HookPoint, [Msg]).
|
||||||
|
|
||||||
|
%% ===================================================================
|
||||||
|
callback_mode() -> async_if_possible.
|
||||||
|
|
||||||
|
on_start(ResourceId, Conf) ->
|
||||||
|
?SLOG(info, #{
|
||||||
|
msg => "starting_mqtt_connector",
|
||||||
|
connector => ResourceId,
|
||||||
|
config => emqx_utils:redact(Conf)
|
||||||
|
}),
|
||||||
|
case start_ingress(ResourceId, Conf) of
|
||||||
|
{ok, Result1} ->
|
||||||
|
case start_egress(ResourceId, Conf) of
|
||||||
|
{ok, Result2} ->
|
||||||
|
{ok, maps:merge(Result1, Result2)};
|
||||||
|
{error, Reason} ->
|
||||||
|
_ = stop_ingress(Result1),
|
||||||
|
{error, Reason}
|
||||||
|
end;
|
||||||
|
{error, Reason} ->
|
||||||
|
{error, Reason}
|
||||||
|
end.
|
||||||
|
|
||||||
|
start_ingress(ResourceId, Conf) ->
|
||||||
|
ClientOpts = mk_client_opts(ResourceId, "ingress", Conf),
|
||||||
|
case mk_ingress_config(ResourceId, Conf) of
|
||||||
|
Ingress = #{} ->
|
||||||
|
start_ingress(ResourceId, Ingress, ClientOpts);
|
||||||
|
undefined ->
|
||||||
|
{ok, #{}}
|
||||||
|
end.
|
||||||
|
|
||||||
|
start_ingress(ResourceId, Ingress, ClientOpts) ->
|
||||||
|
PoolName = <<ResourceId/binary, ":ingress">>,
|
||||||
|
PoolSize = choose_ingress_pool_size(ResourceId, Ingress),
|
||||||
|
Options = [
|
||||||
|
{name, PoolName},
|
||||||
|
{pool_size, PoolSize},
|
||||||
|
{ingress, Ingress},
|
||||||
|
{client_opts, ClientOpts}
|
||||||
|
],
|
||||||
|
case emqx_resource_pool:start(PoolName, emqx_bridge_mqtt_ingress, Options) of
|
||||||
|
ok ->
|
||||||
|
{ok, #{ingress_pool_name => PoolName}};
|
||||||
|
{error, {start_pool_failed, _, Reason}} ->
|
||||||
|
{error, Reason}
|
||||||
|
end.
|
||||||
|
|
||||||
|
choose_ingress_pool_size(
|
||||||
|
ResourceId,
|
||||||
|
#{remote := #{topic := RemoteTopic}, pool_size := PoolSize}
|
||||||
|
) ->
|
||||||
|
case emqx_topic:parse(RemoteTopic) of
|
||||||
|
{_Filter, #{share := _Name}} ->
|
||||||
|
% NOTE: this is shared subscription, many workers may subscribe
|
||||||
|
PoolSize;
|
||||||
|
{_Filter, #{}} ->
|
||||||
|
% NOTE: this is regular subscription, only one worker should subscribe
|
||||||
|
?SLOG(warning, #{
|
||||||
|
msg => "mqtt_bridge_ingress_pool_size_ignored",
|
||||||
|
connector => ResourceId,
|
||||||
|
reason =>
|
||||||
|
"Remote topic filter is not a shared subscription, "
|
||||||
|
"ingress pool will start with a single worker",
|
||||||
|
config_pool_size => PoolSize,
|
||||||
|
pool_size => 1
|
||||||
|
}),
|
||||||
|
1
|
||||||
|
end.
|
||||||
|
|
||||||
|
start_egress(ResourceId, Conf) ->
|
||||||
|
% NOTE
|
||||||
|
% We are ignoring the user configuration here because there's currently no reliable way
|
||||||
|
% to ensure proper session recovery according to the MQTT spec.
|
||||||
|
ClientOpts = maps:put(clean_start, true, mk_client_opts(ResourceId, "egress", Conf)),
|
||||||
|
case mk_egress_config(Conf) of
|
||||||
|
Egress = #{} ->
|
||||||
|
start_egress(ResourceId, Egress, ClientOpts);
|
||||||
|
undefined ->
|
||||||
|
{ok, #{}}
|
||||||
|
end.
|
||||||
|
|
||||||
|
start_egress(ResourceId, Egress, ClientOpts) ->
|
||||||
|
PoolName = <<ResourceId/binary, ":egress">>,
|
||||||
|
PoolSize = maps:get(pool_size, Egress),
|
||||||
|
Options = [
|
||||||
|
{name, PoolName},
|
||||||
|
{pool_size, PoolSize},
|
||||||
|
{client_opts, ClientOpts}
|
||||||
|
],
|
||||||
|
case emqx_resource_pool:start(PoolName, emqx_bridge_mqtt_egress, Options) of
|
||||||
|
ok ->
|
||||||
|
{ok, #{
|
||||||
|
egress_pool_name => PoolName,
|
||||||
|
egress_config => emqx_bridge_mqtt_egress:config(Egress)
|
||||||
|
}};
|
||||||
|
{error, {start_pool_failed, _, Reason}} ->
|
||||||
|
{error, Reason}
|
||||||
|
end.
|
||||||
|
|
||||||
|
on_stop(ResourceId, State) ->
|
||||||
|
?SLOG(info, #{
|
||||||
|
msg => "stopping_mqtt_connector",
|
||||||
|
connector => ResourceId
|
||||||
|
}),
|
||||||
|
ok = stop_ingress(State),
|
||||||
|
ok = stop_egress(State).
|
||||||
|
|
||||||
|
stop_ingress(#{ingress_pool_name := PoolName}) ->
|
||||||
|
emqx_resource_pool:stop(PoolName);
|
||||||
|
stop_ingress(#{}) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
stop_egress(#{egress_pool_name := PoolName}) ->
|
||||||
|
emqx_resource_pool:stop(PoolName);
|
||||||
|
stop_egress(#{}) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
on_query(
|
||||||
|
ResourceId,
|
||||||
|
{send_message, Msg},
|
||||||
|
#{egress_pool_name := PoolName, egress_config := Config}
|
||||||
|
) ->
|
||||||
|
?TRACE("QUERY", "send_msg_to_remote_node", #{message => Msg, connector => ResourceId}),
|
||||||
|
handle_send_result(with_egress_client(PoolName, send, [Msg, Config]));
|
||||||
|
on_query(ResourceId, {send_message, Msg}, #{}) ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "forwarding_unavailable",
|
||||||
|
connector => ResourceId,
|
||||||
|
message => Msg,
|
||||||
|
reason => "Egress is not configured"
|
||||||
|
}).
|
||||||
|
|
||||||
|
on_query_async(
|
||||||
|
ResourceId,
|
||||||
|
{send_message, Msg},
|
||||||
|
CallbackIn,
|
||||||
|
#{egress_pool_name := PoolName, egress_config := Config}
|
||||||
|
) ->
|
||||||
|
?TRACE("QUERY", "async_send_msg_to_remote_node", #{message => Msg, connector => ResourceId}),
|
||||||
|
Callback = {fun on_async_result/2, [CallbackIn]},
|
||||||
|
Result = with_egress_client(PoolName, send_async, [Msg, Callback, Config]),
|
||||||
|
case Result of
|
||||||
|
ok ->
|
||||||
|
ok;
|
||||||
|
{ok, Pid} when is_pid(Pid) ->
|
||||||
|
{ok, Pid};
|
||||||
|
{error, Reason} ->
|
||||||
|
{error, classify_error(Reason)}
|
||||||
|
end;
|
||||||
|
on_query_async(ResourceId, {send_message, Msg}, _Callback, #{}) ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "forwarding_unavailable",
|
||||||
|
connector => ResourceId,
|
||||||
|
message => Msg,
|
||||||
|
reason => "Egress is not configured"
|
||||||
|
}).
|
||||||
|
|
||||||
|
with_egress_client(ResourceId, Fun, Args) ->
|
||||||
|
ecpool:pick_and_do(ResourceId, {emqx_bridge_mqtt_egress, Fun, Args}, no_handover).
|
||||||
|
|
||||||
|
on_async_result(Callback, Result) ->
|
||||||
|
apply_callback_function(Callback, handle_send_result(Result)).
|
||||||
|
|
||||||
|
apply_callback_function(F, Result) when is_function(F) ->
|
||||||
|
erlang:apply(F, [Result]);
|
||||||
|
apply_callback_function({F, A}, Result) when is_function(F), is_list(A) ->
|
||||||
|
erlang:apply(F, A ++ [Result]);
|
||||||
|
apply_callback_function({M, F, A}, Result) when is_atom(M), is_atom(F), is_list(A) ->
|
||||||
|
erlang:apply(M, F, A ++ [Result]).
|
||||||
|
|
||||||
|
handle_send_result(ok) ->
|
||||||
|
ok;
|
||||||
|
handle_send_result({ok, #{reason_code := ?RC_SUCCESS}}) ->
|
||||||
|
ok;
|
||||||
|
handle_send_result({ok, #{reason_code := ?RC_NO_MATCHING_SUBSCRIBERS}}) ->
|
||||||
|
ok;
|
||||||
|
handle_send_result({ok, Reply}) ->
|
||||||
|
{error, classify_reply(Reply)};
|
||||||
|
handle_send_result({error, Reason}) ->
|
||||||
|
{error, classify_error(Reason)}.
|
||||||
|
|
||||||
|
classify_reply(Reply = #{reason_code := _}) ->
|
||||||
|
{unrecoverable_error, Reply}.
|
||||||
|
|
||||||
|
classify_error(disconnected = Reason) ->
|
||||||
|
{recoverable_error, Reason};
|
||||||
|
classify_error(ecpool_empty) ->
|
||||||
|
{recoverable_error, disconnected};
|
||||||
|
classify_error({disconnected, _RC, _} = Reason) ->
|
||||||
|
{recoverable_error, Reason};
|
||||||
|
classify_error({shutdown, _} = Reason) ->
|
||||||
|
{recoverable_error, Reason};
|
||||||
|
classify_error(shutdown = Reason) ->
|
||||||
|
{recoverable_error, Reason};
|
||||||
|
classify_error(Reason) ->
|
||||||
|
{unrecoverable_error, Reason}.
|
||||||
|
|
||||||
|
on_get_status(_ResourceId, State) ->
|
||||||
|
Pools = maps:to_list(maps:with([ingress_pool_name, egress_pool_name], State)),
|
||||||
|
Workers = [{Pool, Worker} || {Pool, PN} <- Pools, {_Name, Worker} <- ecpool:workers(PN)],
|
||||||
|
try emqx_utils:pmap(fun get_status/1, Workers, ?HEALTH_CHECK_TIMEOUT) of
|
||||||
|
Statuses ->
|
||||||
|
combine_status(Statuses)
|
||||||
|
catch
|
||||||
|
exit:timeout ->
|
||||||
|
connecting
|
||||||
|
end.
|
||||||
|
|
||||||
|
get_status({Pool, Worker}) ->
|
||||||
|
case ecpool_worker:client(Worker) of
|
||||||
|
{ok, Client} when Pool == ingress_pool_name ->
|
||||||
|
emqx_bridge_mqtt_ingress:status(Client);
|
||||||
|
{ok, Client} when Pool == egress_pool_name ->
|
||||||
|
emqx_bridge_mqtt_egress:status(Client);
|
||||||
|
{error, _} ->
|
||||||
|
disconnected
|
||||||
|
end.
|
||||||
|
|
||||||
|
combine_status(Statuses) ->
|
||||||
|
%% NOTE
|
||||||
|
%% Natural order of statuses: [connected, connecting, disconnected]
|
||||||
|
%% * `disconnected` wins over any other status
|
||||||
|
%% * `connecting` wins over `connected`
|
||||||
|
case lists:reverse(lists:usort(Statuses)) of
|
||||||
|
[Status | _] ->
|
||||||
|
Status;
|
||||||
|
[] ->
|
||||||
|
disconnected
|
||||||
|
end.
|
||||||
|
|
||||||
|
mk_ingress_config(
|
||||||
|
ResourceId,
|
||||||
|
#{
|
||||||
|
ingress := Ingress = #{remote := _},
|
||||||
|
server := Server,
|
||||||
|
hookpoint := HookPoint
|
||||||
|
}
|
||||||
|
) ->
|
||||||
|
Ingress#{
|
||||||
|
server => Server,
|
||||||
|
on_message_received => {?MODULE, on_message_received, [HookPoint, ResourceId]}
|
||||||
|
};
|
||||||
|
mk_ingress_config(ResourceId, #{ingress := #{remote := _}} = Conf) ->
|
||||||
|
error({no_hookpoint_provided, ResourceId, Conf});
|
||||||
|
mk_ingress_config(_ResourceId, #{}) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
mk_egress_config(#{egress := Egress = #{remote := _}}) ->
|
||||||
|
Egress;
|
||||||
|
mk_egress_config(#{}) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
mk_client_opts(
|
||||||
|
ResourceId,
|
||||||
|
ClientScope,
|
||||||
|
Config = #{
|
||||||
|
server := Server,
|
||||||
|
keepalive := KeepAlive,
|
||||||
|
ssl := #{enable := EnableSsl} = Ssl
|
||||||
|
}
|
||||||
|
) ->
|
||||||
|
HostPort = emqx_bridge_mqtt_connector_schema:parse_server(Server),
|
||||||
|
Options = maps:with(
|
||||||
|
[
|
||||||
|
proto_ver,
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
clean_start,
|
||||||
|
retry_interval,
|
||||||
|
max_inflight,
|
||||||
|
% Opening a connection in bridge mode will form a non-standard mqtt connection message.
|
||||||
|
% A load balancing server (such as haproxy) is often set up before the emqx broker server.
|
||||||
|
% When the load balancing server enables mqtt connection packet inspection,
|
||||||
|
% non-standard mqtt connection packets might be filtered out by LB.
|
||||||
|
bridge_mode
|
||||||
|
],
|
||||||
|
Config
|
||||||
|
),
|
||||||
|
Options#{
|
||||||
|
hosts => [HostPort],
|
||||||
|
clientid => clientid(ResourceId, ClientScope, Config),
|
||||||
|
connect_timeout => 30,
|
||||||
|
keepalive => ms_to_s(KeepAlive),
|
||||||
|
force_ping => true,
|
||||||
|
ssl => EnableSsl,
|
||||||
|
ssl_opts => maps:to_list(maps:remove(enable, Ssl))
|
||||||
|
}.
|
||||||
|
|
||||||
|
ms_to_s(Ms) ->
|
||||||
|
erlang:ceil(Ms / 1000).
|
||||||
|
|
||||||
|
clientid(Id, ClientScope, _Conf = #{clientid_prefix := Prefix}) when is_binary(Prefix) ->
|
||||||
|
iolist_to_binary([Prefix, ":", Id, ":", ClientScope, ":", atom_to_list(node())]);
|
||||||
|
clientid(Id, ClientScope, _Conf) ->
|
||||||
|
iolist_to_binary([Id, ":", ClientScope, ":", atom_to_list(node())]).
|
|
@ -14,7 +14,7 @@
|
||||||
%% limitations under the License.
|
%% limitations under the License.
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
-module(emqx_connector_mqtt_schema).
|
-module(emqx_bridge_mqtt_connector_schema).
|
||||||
|
|
||||||
-include_lib("typerefl/include/types.hrl").
|
-include_lib("typerefl/include/types.hrl").
|
||||||
-include_lib("hocon/include/hoconsc.hrl").
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
@ -68,7 +68,8 @@ fields("server_configs") ->
|
||||||
hoconsc:enum([cluster_shareload]),
|
hoconsc:enum([cluster_shareload]),
|
||||||
#{
|
#{
|
||||||
default => cluster_shareload,
|
default => cluster_shareload,
|
||||||
desc => ?DESC("mode")
|
desc => ?DESC("mode"),
|
||||||
|
deprecated => {since, "v5.1.0 & e5.1.0"}
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{server, emqx_schema:servers_sc(#{desc => ?DESC("server")}, ?MQTT_HOST_OPTS)},
|
{server, emqx_schema:servers_sc(#{desc => ?DESC("server")}, ?MQTT_HOST_OPTS)},
|
||||||
|
@ -133,16 +134,17 @@ fields("server_configs") ->
|
||||||
] ++ emqx_connector_schema_lib:ssl_fields();
|
] ++ emqx_connector_schema_lib:ssl_fields();
|
||||||
fields("ingress") ->
|
fields("ingress") ->
|
||||||
[
|
[
|
||||||
{"remote",
|
{pool_size, fun ingress_pool_size/1},
|
||||||
|
{remote,
|
||||||
mk(
|
mk(
|
||||||
ref(?MODULE, "ingress_remote"),
|
ref(?MODULE, "ingress_remote"),
|
||||||
#{desc => ?DESC(emqx_connector_mqtt_schema, "ingress_remote")}
|
#{desc => ?DESC("ingress_remote")}
|
||||||
)},
|
)},
|
||||||
{"local",
|
{local,
|
||||||
mk(
|
mk(
|
||||||
ref(?MODULE, "ingress_local"),
|
ref(?MODULE, "ingress_local"),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(emqx_connector_mqtt_schema, "ingress_local")
|
desc => ?DESC("ingress_local")
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
|
@ -204,19 +206,20 @@ fields("ingress_local") ->
|
||||||
];
|
];
|
||||||
fields("egress") ->
|
fields("egress") ->
|
||||||
[
|
[
|
||||||
{"local",
|
{pool_size, fun egress_pool_size/1},
|
||||||
|
{local,
|
||||||
mk(
|
mk(
|
||||||
ref(?MODULE, "egress_local"),
|
ref(?MODULE, "egress_local"),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(emqx_connector_mqtt_schema, "egress_local"),
|
desc => ?DESC("egress_local"),
|
||||||
required => false
|
required => false
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"remote",
|
{remote,
|
||||||
mk(
|
mk(
|
||||||
ref(?MODULE, "egress_remote"),
|
ref(?MODULE, "egress_remote"),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(emqx_connector_mqtt_schema, "egress_remote"),
|
desc => ?DESC("egress_remote"),
|
||||||
required => true
|
required => true
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
|
@ -272,6 +275,16 @@ fields("egress_remote") ->
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
||||||
|
ingress_pool_size(desc) ->
|
||||||
|
?DESC("ingress_pool_size");
|
||||||
|
ingress_pool_size(Prop) ->
|
||||||
|
emqx_connector_schema_lib:pool_size(Prop).
|
||||||
|
|
||||||
|
egress_pool_size(desc) ->
|
||||||
|
?DESC("egress_pool_size");
|
||||||
|
egress_pool_size(Prop) ->
|
||||||
|
emqx_connector_schema_lib:pool_size(Prop).
|
||||||
|
|
||||||
desc("server_configs") ->
|
desc("server_configs") ->
|
||||||
?DESC("server_configs");
|
?DESC("server_configs");
|
||||||
desc("ingress") ->
|
desc("ingress") ->
|
|
@ -0,0 +1,162 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_bridge_mqtt_egress).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
-include_lib("emqx/include/emqx.hrl").
|
||||||
|
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||||
|
|
||||||
|
-behaviour(ecpool_worker).
|
||||||
|
|
||||||
|
%% ecpool
|
||||||
|
-export([connect/1]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
config/1,
|
||||||
|
send/3,
|
||||||
|
send_async/4
|
||||||
|
]).
|
||||||
|
|
||||||
|
%% management APIs
|
||||||
|
-export([
|
||||||
|
status/1,
|
||||||
|
info/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-type name() :: term().
|
||||||
|
-type message() :: emqx_types:message() | map().
|
||||||
|
-type callback() :: {function(), [_Arg]} | {module(), atom(), [_Arg]}.
|
||||||
|
-type remote_message() :: #mqtt_msg{}.
|
||||||
|
|
||||||
|
-type option() ::
|
||||||
|
{name, name()}
|
||||||
|
%% see `emqtt:option()`
|
||||||
|
| {client_opts, map()}.
|
||||||
|
|
||||||
|
-type egress() :: #{
|
||||||
|
local => #{
|
||||||
|
topic => emqx_topic:topic()
|
||||||
|
},
|
||||||
|
remote := emqx_bridge_mqtt_msg:msgvars()
|
||||||
|
}.
|
||||||
|
|
||||||
|
%% @doc Start an ingress bridge worker.
|
||||||
|
-spec connect([option() | {ecpool_worker_id, pos_integer()}]) ->
|
||||||
|
{ok, pid()} | {error, _Reason}.
|
||||||
|
connect(Options) ->
|
||||||
|
?SLOG(debug, #{
|
||||||
|
msg => "egress_client_starting",
|
||||||
|
options => emqx_utils:redact(Options)
|
||||||
|
}),
|
||||||
|
Name = proplists:get_value(name, Options),
|
||||||
|
WorkerId = proplists:get_value(ecpool_worker_id, Options),
|
||||||
|
ClientOpts = proplists:get_value(client_opts, Options),
|
||||||
|
case emqtt:start_link(mk_client_opts(WorkerId, ClientOpts)) of
|
||||||
|
{ok, Pid} ->
|
||||||
|
connect(Pid, Name);
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "egress_client_start_failed",
|
||||||
|
config => emqx_utils:redact(ClientOpts),
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
mk_client_opts(WorkerId, ClientOpts = #{clientid := ClientId}) ->
|
||||||
|
ClientOpts#{clientid := mk_clientid(WorkerId, ClientId)}.
|
||||||
|
|
||||||
|
mk_clientid(WorkerId, ClientId) ->
|
||||||
|
iolist_to_binary([ClientId, $: | integer_to_list(WorkerId)]).
|
||||||
|
|
||||||
|
connect(Pid, Name) ->
|
||||||
|
case emqtt:connect(Pid) of
|
||||||
|
{ok, _Props} ->
|
||||||
|
{ok, Pid};
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
?SLOG(warning, #{
|
||||||
|
msg => "egress_client_connect_failed",
|
||||||
|
reason => Reason,
|
||||||
|
name => Name
|
||||||
|
}),
|
||||||
|
_ = catch emqtt:stop(Pid),
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
-spec config(map()) ->
|
||||||
|
egress().
|
||||||
|
config(#{remote := RC = #{}} = Conf) ->
|
||||||
|
Conf#{remote => emqx_bridge_mqtt_msg:parse(RC)}.
|
||||||
|
|
||||||
|
-spec send(pid(), message(), egress()) ->
|
||||||
|
ok.
|
||||||
|
send(Pid, MsgIn, Egress) ->
|
||||||
|
emqtt:publish(Pid, export_msg(MsgIn, Egress)).
|
||||||
|
|
||||||
|
-spec send_async(pid(), message(), callback(), egress()) ->
|
||||||
|
ok | {ok, pid()}.
|
||||||
|
send_async(Pid, MsgIn, Callback, Egress) ->
|
||||||
|
ok = emqtt:publish_async(Pid, export_msg(MsgIn, Egress), _Timeout = infinity, Callback),
|
||||||
|
{ok, Pid}.
|
||||||
|
|
||||||
|
export_msg(Msg, #{remote := Remote}) ->
|
||||||
|
to_remote_msg(Msg, Remote).
|
||||||
|
|
||||||
|
-spec to_remote_msg(message(), emqx_bridge_mqtt_msg:msgvars()) ->
|
||||||
|
remote_message().
|
||||||
|
to_remote_msg(#message{flags = Flags} = Msg, Vars) ->
|
||||||
|
{EventMsg, _} = emqx_rule_events:eventmsg_publish(Msg),
|
||||||
|
to_remote_msg(EventMsg#{retain => maps:get(retain, Flags, false)}, Vars);
|
||||||
|
to_remote_msg(Msg = #{}, Remote) ->
|
||||||
|
#{
|
||||||
|
topic := Topic,
|
||||||
|
payload := Payload,
|
||||||
|
qos := QoS,
|
||||||
|
retain := Retain
|
||||||
|
} = emqx_bridge_mqtt_msg:render(Msg, Remote),
|
||||||
|
PubProps = maps:get(pub_props, Msg, #{}),
|
||||||
|
#mqtt_msg{
|
||||||
|
qos = QoS,
|
||||||
|
retain = Retain,
|
||||||
|
topic = Topic,
|
||||||
|
props = emqx_utils:pub_props_to_packet(PubProps),
|
||||||
|
payload = Payload
|
||||||
|
}.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
-spec info(pid()) ->
|
||||||
|
[{atom(), term()}].
|
||||||
|
info(Pid) ->
|
||||||
|
emqtt:info(Pid).
|
||||||
|
|
||||||
|
-spec status(pid()) ->
|
||||||
|
emqx_resource:resource_status().
|
||||||
|
status(Pid) ->
|
||||||
|
try
|
||||||
|
case proplists:get_value(socket, info(Pid)) of
|
||||||
|
Socket when Socket /= undefined ->
|
||||||
|
connected;
|
||||||
|
undefined ->
|
||||||
|
connecting
|
||||||
|
end
|
||||||
|
catch
|
||||||
|
exit:{noproc, _} ->
|
||||||
|
disconnected
|
||||||
|
end.
|
|
@ -0,0 +1,274 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_bridge_mqtt_ingress).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
|
||||||
|
-behaviour(ecpool_worker).
|
||||||
|
|
||||||
|
%% ecpool
|
||||||
|
-export([connect/1]).
|
||||||
|
|
||||||
|
%% management APIs
|
||||||
|
-export([
|
||||||
|
status/1,
|
||||||
|
info/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-export([handle_publish/5]).
|
||||||
|
-export([handle_disconnect/1]).
|
||||||
|
|
||||||
|
-type name() :: term().
|
||||||
|
|
||||||
|
-type option() ::
|
||||||
|
{name, name()}
|
||||||
|
| {ingress, map()}
|
||||||
|
%% see `emqtt:option()`
|
||||||
|
| {client_opts, map()}.
|
||||||
|
|
||||||
|
-type ingress() :: #{
|
||||||
|
server := string(),
|
||||||
|
remote := #{
|
||||||
|
topic := emqx_topic:topic(),
|
||||||
|
qos => emqx_types:qos()
|
||||||
|
},
|
||||||
|
local := emqx_bridge_mqtt_msg:msgvars(),
|
||||||
|
on_message_received := {module(), atom(), [term()]}
|
||||||
|
}.
|
||||||
|
|
||||||
|
%% @doc Start an ingress bridge worker.
|
||||||
|
-spec connect([option() | {ecpool_worker_id, pos_integer()}]) ->
|
||||||
|
{ok, pid()} | {error, _Reason}.
|
||||||
|
connect(Options) ->
|
||||||
|
?SLOG(debug, #{
|
||||||
|
msg => "ingress_client_starting",
|
||||||
|
options => emqx_utils:redact(Options)
|
||||||
|
}),
|
||||||
|
Name = proplists:get_value(name, Options),
|
||||||
|
WorkerId = proplists:get_value(ecpool_worker_id, Options),
|
||||||
|
Ingress = config(proplists:get_value(ingress, Options), Name),
|
||||||
|
ClientOpts = proplists:get_value(client_opts, Options),
|
||||||
|
case emqtt:start_link(mk_client_opts(Name, WorkerId, Ingress, ClientOpts)) of
|
||||||
|
{ok, Pid} ->
|
||||||
|
connect(Pid, Name, Ingress);
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "client_start_failed",
|
||||||
|
config => emqx_utils:redact(ClientOpts),
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
mk_client_opts(Name, WorkerId, Ingress, ClientOpts = #{clientid := ClientId}) ->
|
||||||
|
ClientOpts#{
|
||||||
|
clientid := mk_clientid(WorkerId, ClientId),
|
||||||
|
msg_handler => mk_client_event_handler(Name, Ingress)
|
||||||
|
}.
|
||||||
|
|
||||||
|
mk_clientid(WorkerId, ClientId) ->
|
||||||
|
iolist_to_binary([ClientId, $: | integer_to_list(WorkerId)]).
|
||||||
|
|
||||||
|
mk_client_event_handler(Name, Ingress = #{}) ->
|
||||||
|
IngressVars = maps:with([server], Ingress),
|
||||||
|
OnMessage = maps:get(on_message_received, Ingress, undefined),
|
||||||
|
LocalPublish =
|
||||||
|
case Ingress of
|
||||||
|
#{local := Local = #{topic := _}} ->
|
||||||
|
Local;
|
||||||
|
#{} ->
|
||||||
|
undefined
|
||||||
|
end,
|
||||||
|
#{
|
||||||
|
publish => {fun ?MODULE:handle_publish/5, [Name, OnMessage, LocalPublish, IngressVars]},
|
||||||
|
disconnected => {fun ?MODULE:handle_disconnect/1, []}
|
||||||
|
}.
|
||||||
|
|
||||||
|
-spec connect(pid(), name(), ingress()) ->
|
||||||
|
{ok, pid()} | {error, _Reason}.
|
||||||
|
connect(Pid, Name, Ingress) ->
|
||||||
|
case emqtt:connect(Pid) of
|
||||||
|
{ok, _Props} ->
|
||||||
|
case subscribe_remote_topic(Pid, Ingress) of
|
||||||
|
{ok, _, _RCs} ->
|
||||||
|
{ok, Pid};
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "ingress_client_subscribe_failed",
|
||||||
|
ingress => Ingress,
|
||||||
|
name => Name,
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
_ = catch emqtt:stop(Pid),
|
||||||
|
Error
|
||||||
|
end;
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
?SLOG(warning, #{
|
||||||
|
msg => "ingress_client_connect_failed",
|
||||||
|
reason => Reason,
|
||||||
|
name => Name
|
||||||
|
}),
|
||||||
|
_ = catch emqtt:stop(Pid),
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
subscribe_remote_topic(Pid, #{remote := #{topic := RemoteTopic, qos := QoS}}) ->
|
||||||
|
emqtt:subscribe(Pid, RemoteTopic, QoS).
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
-spec config(map(), name()) ->
|
||||||
|
ingress().
|
||||||
|
config(#{remote := RC, local := LC} = Conf, BridgeName) ->
|
||||||
|
Conf#{
|
||||||
|
remote => parse_remote(RC, BridgeName),
|
||||||
|
local => emqx_bridge_mqtt_msg:parse(LC)
|
||||||
|
}.
|
||||||
|
|
||||||
|
parse_remote(#{qos := QoSIn} = Conf, BridgeName) ->
|
||||||
|
QoS = downgrade_ingress_qos(QoSIn),
|
||||||
|
case QoS of
|
||||||
|
QoSIn ->
|
||||||
|
ok;
|
||||||
|
_ ->
|
||||||
|
?SLOG(warning, #{
|
||||||
|
msg => "downgraded_unsupported_ingress_qos",
|
||||||
|
qos_configured => QoSIn,
|
||||||
|
qos_used => QoS,
|
||||||
|
name => BridgeName
|
||||||
|
})
|
||||||
|
end,
|
||||||
|
Conf#{qos => QoS}.
|
||||||
|
|
||||||
|
downgrade_ingress_qos(2) ->
|
||||||
|
1;
|
||||||
|
downgrade_ingress_qos(QoS) ->
|
||||||
|
QoS.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
-spec info(pid()) ->
|
||||||
|
[{atom(), term()}].
|
||||||
|
info(Pid) ->
|
||||||
|
emqtt:info(Pid).
|
||||||
|
|
||||||
|
-spec status(pid()) ->
|
||||||
|
emqx_resource:resource_status().
|
||||||
|
status(Pid) ->
|
||||||
|
try
|
||||||
|
case proplists:get_value(socket, info(Pid)) of
|
||||||
|
Socket when Socket /= undefined ->
|
||||||
|
connected;
|
||||||
|
undefined ->
|
||||||
|
connecting
|
||||||
|
end
|
||||||
|
catch
|
||||||
|
exit:{noproc, _} ->
|
||||||
|
disconnected
|
||||||
|
end.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
handle_publish(#{properties := Props} = MsgIn, Name, OnMessage, LocalPublish, IngressVars) ->
|
||||||
|
Msg = import_msg(MsgIn, IngressVars),
|
||||||
|
?SLOG(debug, #{
|
||||||
|
msg => "ingress_publish_local",
|
||||||
|
message => Msg,
|
||||||
|
name => Name
|
||||||
|
}),
|
||||||
|
maybe_on_message_received(Msg, OnMessage),
|
||||||
|
maybe_publish_local(Msg, LocalPublish, Props).
|
||||||
|
|
||||||
|
handle_disconnect(_Reason) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
maybe_on_message_received(Msg, {Mod, Func, Args}) ->
|
||||||
|
erlang:apply(Mod, Func, [Msg | Args]);
|
||||||
|
maybe_on_message_received(_Msg, undefined) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
maybe_publish_local(Msg, Local = #{}, Props) ->
|
||||||
|
emqx_broker:publish(to_broker_msg(Msg, Local, Props));
|
||||||
|
maybe_publish_local(_Msg, undefined, _Props) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
import_msg(
|
||||||
|
#{
|
||||||
|
dup := Dup,
|
||||||
|
payload := Payload,
|
||||||
|
properties := Props,
|
||||||
|
qos := QoS,
|
||||||
|
retain := Retain,
|
||||||
|
topic := Topic
|
||||||
|
},
|
||||||
|
#{server := Server}
|
||||||
|
) ->
|
||||||
|
#{
|
||||||
|
id => emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||||
|
server => Server,
|
||||||
|
payload => Payload,
|
||||||
|
topic => Topic,
|
||||||
|
qos => QoS,
|
||||||
|
dup => Dup,
|
||||||
|
retain => Retain,
|
||||||
|
pub_props => printable_maps(Props),
|
||||||
|
message_received_at => erlang:system_time(millisecond)
|
||||||
|
}.
|
||||||
|
|
||||||
|
printable_maps(undefined) ->
|
||||||
|
#{};
|
||||||
|
printable_maps(Headers) ->
|
||||||
|
maps:fold(
|
||||||
|
fun
|
||||||
|
('User-Property', V0, AccIn) when is_list(V0) ->
|
||||||
|
AccIn#{
|
||||||
|
'User-Property' => maps:from_list(V0),
|
||||||
|
'User-Property-Pairs' => [
|
||||||
|
#{
|
||||||
|
key => Key,
|
||||||
|
value => Value
|
||||||
|
}
|
||||||
|
|| {Key, Value} <- V0
|
||||||
|
]
|
||||||
|
};
|
||||||
|
(K, V0, AccIn) ->
|
||||||
|
AccIn#{K => V0}
|
||||||
|
end,
|
||||||
|
#{},
|
||||||
|
Headers
|
||||||
|
).
|
||||||
|
|
||||||
|
%% published from remote node over a MQTT connection
|
||||||
|
to_broker_msg(Msg, Vars, undefined) ->
|
||||||
|
to_broker_msg(Msg, Vars, #{});
|
||||||
|
to_broker_msg(#{dup := Dup} = Msg, Local, Props) ->
|
||||||
|
#{
|
||||||
|
topic := Topic,
|
||||||
|
payload := Payload,
|
||||||
|
qos := QoS,
|
||||||
|
retain := Retain
|
||||||
|
} = emqx_bridge_mqtt_msg:render(Msg, Local),
|
||||||
|
PubProps = maps:get(pub_props, Msg, #{}),
|
||||||
|
emqx_message:set_headers(
|
||||||
|
Props#{properties => emqx_utils:pub_props_to_packet(PubProps)},
|
||||||
|
emqx_message:set_flags(
|
||||||
|
#{dup => Dup, retain => Retain},
|
||||||
|
emqx_message:make(bridge, QoS, Topic, Payload)
|
||||||
|
)
|
||||||
|
).
|
|
@ -0,0 +1,95 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_bridge_mqtt_msg).
|
||||||
|
|
||||||
|
-export([parse/1]).
|
||||||
|
-export([render/2]).
|
||||||
|
|
||||||
|
-export_type([msgvars/0]).
|
||||||
|
|
||||||
|
-type template() :: emqx_plugin_libs_rule:tmpl_token().
|
||||||
|
|
||||||
|
-type msgvars() :: #{
|
||||||
|
topic => template(),
|
||||||
|
qos => template() | emqx_types:qos(),
|
||||||
|
retain => template() | boolean(),
|
||||||
|
payload => template() | undefined
|
||||||
|
}.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
-spec parse(#{
|
||||||
|
topic => iodata(),
|
||||||
|
qos => iodata() | emqx_types:qos(),
|
||||||
|
retain => iodata() | boolean(),
|
||||||
|
payload => iodata()
|
||||||
|
}) ->
|
||||||
|
msgvars().
|
||||||
|
parse(Conf) ->
|
||||||
|
Acc1 = parse_field(topic, Conf, Conf),
|
||||||
|
Acc2 = parse_field(qos, Conf, Acc1),
|
||||||
|
Acc3 = parse_field(payload, Conf, Acc2),
|
||||||
|
parse_field(retain, Conf, Acc3).
|
||||||
|
|
||||||
|
parse_field(Key, Conf, Acc) ->
|
||||||
|
case Conf of
|
||||||
|
#{Key := Val} when is_binary(Val) ->
|
||||||
|
Acc#{Key => emqx_plugin_libs_rule:preproc_tmpl(Val)};
|
||||||
|
#{Key := Val} ->
|
||||||
|
Acc#{Key => Val};
|
||||||
|
#{} ->
|
||||||
|
Acc
|
||||||
|
end.
|
||||||
|
|
||||||
|
render(
|
||||||
|
Msg,
|
||||||
|
#{
|
||||||
|
topic := TopicToken,
|
||||||
|
qos := QoSToken,
|
||||||
|
retain := RetainToken
|
||||||
|
} = Vars
|
||||||
|
) ->
|
||||||
|
#{
|
||||||
|
topic => render_string(TopicToken, Msg),
|
||||||
|
payload => render_payload(Vars, Msg),
|
||||||
|
qos => render_simple_var(QoSToken, Msg),
|
||||||
|
retain => render_simple_var(RetainToken, Msg)
|
||||||
|
}.
|
||||||
|
|
||||||
|
render_payload(From, MapMsg) ->
|
||||||
|
do_render_payload(maps:get(payload, From, undefined), MapMsg).
|
||||||
|
|
||||||
|
do_render_payload(undefined, Msg) ->
|
||||||
|
emqx_utils_json:encode(Msg);
|
||||||
|
do_render_payload(Tks, Msg) ->
|
||||||
|
render_string(Tks, Msg).
|
||||||
|
|
||||||
|
%% Replace a string contains vars to another string in which the placeholders are replace by the
|
||||||
|
%% corresponding values. For example, given "a: ${var}", if the var=1, the result string will be:
|
||||||
|
%% "a: 1".
|
||||||
|
render_string(Tokens, Data) when is_list(Tokens) ->
|
||||||
|
emqx_placeholder:proc_tmpl(Tokens, Data, #{return => full_binary});
|
||||||
|
render_string(Val, _Data) ->
|
||||||
|
Val.
|
||||||
|
|
||||||
|
%% Replace a simple var to its value. For example, given "${var}", if the var=1, then the result
|
||||||
|
%% value will be an integer 1.
|
||||||
|
render_simple_var(Tokens, Data) when is_list(Tokens) ->
|
||||||
|
[Var] = emqx_placeholder:proc_tmpl(Tokens, Data, #{return => rawlist}),
|
||||||
|
Var;
|
||||||
|
render_simple_var(Val, _Data) ->
|
||||||
|
Val.
|
|
@ -42,7 +42,7 @@ fields("config") ->
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
] ++
|
] ++
|
||||||
emqx_connector_mqtt_schema:fields("config");
|
emqx_bridge_mqtt_connector_schema:fields("config");
|
||||||
fields("creation_opts") ->
|
fields("creation_opts") ->
|
||||||
Opts = emqx_resource_schema:fields("creation_opts"),
|
Opts = emqx_resource_schema:fields("creation_opts"),
|
||||||
[O || {Field, _} = O <- Opts, not is_hidden_opts(Field)];
|
[O || {Field, _} = O <- Opts, not is_hidden_opts(Field)];
|
|
@ -22,9 +22,7 @@
|
||||||
|
|
||||||
-include("emqx/include/emqx.hrl").
|
-include("emqx/include/emqx.hrl").
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
-include_lib("common_test/include/ct.hrl").
|
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
-include("emqx_dashboard/include/emqx_dashboard.hrl").
|
|
||||||
|
|
||||||
%% output functions
|
%% output functions
|
||||||
-export([inspect/3]).
|
-export([inspect/3]).
|
||||||
|
@ -132,13 +130,11 @@ suite() ->
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
_ = application:load(emqx_conf),
|
_ = application:load(emqx_conf),
|
||||||
%% some testcases (may from other app) already get emqx_connector started
|
|
||||||
_ = application:stop(emqx_resource),
|
|
||||||
_ = application:stop(emqx_connector),
|
|
||||||
ok = emqx_common_test_helpers:start_apps(
|
ok = emqx_common_test_helpers:start_apps(
|
||||||
[
|
[
|
||||||
emqx_rule_engine,
|
emqx_rule_engine,
|
||||||
emqx_bridge,
|
emqx_bridge,
|
||||||
|
emqx_bridge_mqtt,
|
||||||
emqx_dashboard
|
emqx_dashboard
|
||||||
],
|
],
|
||||||
fun set_special_configs/1
|
fun set_special_configs/1
|
||||||
|
@ -152,9 +148,10 @@ init_per_suite(Config) ->
|
||||||
|
|
||||||
end_per_suite(_Config) ->
|
end_per_suite(_Config) ->
|
||||||
emqx_common_test_helpers:stop_apps([
|
emqx_common_test_helpers:stop_apps([
|
||||||
emqx_rule_engine,
|
emqx_dashboard,
|
||||||
|
emqx_bridge_mqtt,
|
||||||
emqx_bridge,
|
emqx_bridge,
|
||||||
emqx_dashboard
|
emqx_rule_engine
|
||||||
]),
|
]),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
@ -221,6 +218,12 @@ t_mqtt_conn_bridge_ingress(_) ->
|
||||||
request(put, uri(["bridges", BridgeIDIngress]), ServerConf)
|
request(put, uri(["bridges", BridgeIDIngress]), ServerConf)
|
||||||
),
|
),
|
||||||
|
|
||||||
|
%% non-shared subscription, verify that only one client is subscribed
|
||||||
|
?assertEqual(
|
||||||
|
1,
|
||||||
|
length(emqx:subscribers(<<?INGRESS_REMOTE_TOPIC, "/#">>))
|
||||||
|
),
|
||||||
|
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
RemoteTopic = <<?INGRESS_REMOTE_TOPIC, "/1">>,
|
RemoteTopic = <<?INGRESS_REMOTE_TOPIC, "/1">>,
|
||||||
LocalTopic = <<?INGRESS_LOCAL_TOPIC, "/", RemoteTopic/binary>>,
|
LocalTopic = <<?INGRESS_LOCAL_TOPIC, "/", RemoteTopic/binary>>,
|
||||||
|
@ -245,6 +248,48 @@ t_mqtt_conn_bridge_ingress(_) ->
|
||||||
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
t_mqtt_conn_bridge_ingress_shared_subscription(_) ->
|
||||||
|
PoolSize = 4,
|
||||||
|
Ns = lists:seq(1, 10),
|
||||||
|
BridgeName = atom_to_binary(?FUNCTION_NAME),
|
||||||
|
BridgeID = create_bridge(
|
||||||
|
?SERVER_CONF(<<>>)#{
|
||||||
|
<<"type">> => ?TYPE_MQTT,
|
||||||
|
<<"name">> => BridgeName,
|
||||||
|
<<"ingress">> => #{
|
||||||
|
<<"pool_size">> => PoolSize,
|
||||||
|
<<"remote">> => #{
|
||||||
|
<<"topic">> => <<"$share/ingress/", ?INGRESS_REMOTE_TOPIC, "/#">>,
|
||||||
|
<<"qos">> => 1
|
||||||
|
},
|
||||||
|
<<"local">> => #{
|
||||||
|
<<"topic">> => <<?INGRESS_LOCAL_TOPIC, "/${topic}">>,
|
||||||
|
<<"qos">> => <<"${qos}">>,
|
||||||
|
<<"payload">> => <<"${clientid}">>,
|
||||||
|
<<"retain">> => <<"${retain}">>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
|
||||||
|
RemoteTopic = <<?INGRESS_REMOTE_TOPIC, "/1">>,
|
||||||
|
LocalTopic = <<?INGRESS_LOCAL_TOPIC, "/", RemoteTopic/binary>>,
|
||||||
|
ok = emqx:subscribe(LocalTopic),
|
||||||
|
|
||||||
|
_ = emqx_utils:pmap(
|
||||||
|
fun emqx:publish/1,
|
||||||
|
[emqx_message:make(RemoteTopic, <<>>) || _ <- Ns]
|
||||||
|
),
|
||||||
|
_ = [assert_mqtt_msg_received(LocalTopic) || _ <- Ns],
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
PoolSize,
|
||||||
|
length(emqx_shared_sub:subscribers(<<"ingress">>, <<?INGRESS_REMOTE_TOPIC, "/#">>))
|
||||||
|
),
|
||||||
|
|
||||||
|
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []),
|
||||||
|
ok.
|
||||||
|
|
||||||
t_mqtt_egress_bridge_ignores_clean_start(_) ->
|
t_mqtt_egress_bridge_ignores_clean_start(_) ->
|
||||||
BridgeName = atom_to_binary(?FUNCTION_NAME),
|
BridgeName = atom_to_binary(?FUNCTION_NAME),
|
||||||
BridgeID = create_bridge(
|
BridgeID = create_bridge(
|
||||||
|
@ -256,11 +301,17 @@ t_mqtt_egress_bridge_ignores_clean_start(_) ->
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
|
|
||||||
{ok, _, #{state := #{name := WorkerName}}} =
|
ResourceID = emqx_bridge_resource:resource_id(BridgeID),
|
||||||
emqx_resource:get_instance(emqx_bridge_resource:resource_id(BridgeID)),
|
{ok, _Group, #{state := #{egress_pool_name := EgressPoolName}}} =
|
||||||
|
emqx_resource_manager:lookup_cached(ResourceID),
|
||||||
|
ClientInfo = ecpool:pick_and_do(
|
||||||
|
EgressPoolName,
|
||||||
|
{emqx_bridge_mqtt_egress, info, []},
|
||||||
|
no_handover
|
||||||
|
),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
#{clean_start := true},
|
#{clean_start := true},
|
||||||
maps:from_list(emqx_connector_mqtt_worker:info(WorkerName))
|
maps:from_list(ClientInfo)
|
||||||
),
|
),
|
||||||
|
|
||||||
%% delete the bridge
|
%% delete the bridge
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_pulsar, [
|
{application, emqx_bridge_pulsar, [
|
||||||
{description, "EMQX Pulsar Bridge"},
|
{description, "EMQX Pulsar Bridge"},
|
||||||
{vsn, "0.1.3"},
|
{vsn, "0.1.4"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -18,6 +18,8 @@
|
||||||
%% emqx_ee_bridge "unofficial" API
|
%% emqx_ee_bridge "unofficial" API
|
||||||
-export([conn_bridge_examples/1]).
|
-export([conn_bridge_examples/1]).
|
||||||
|
|
||||||
|
-export([producer_strategy_key_validator/1]).
|
||||||
|
|
||||||
%%-------------------------------------------------------------------------------------------------
|
%%-------------------------------------------------------------------------------------------------
|
||||||
%% `hocon_schema' API
|
%% `hocon_schema' API
|
||||||
%%-------------------------------------------------------------------------------------------------
|
%%-------------------------------------------------------------------------------------------------
|
||||||
|
@ -218,6 +220,14 @@ conn_bridge_examples(_Method) ->
|
||||||
}
|
}
|
||||||
].
|
].
|
||||||
|
|
||||||
|
producer_strategy_key_validator(#{
|
||||||
|
<<"strategy">> := key_dispatch,
|
||||||
|
<<"message">> := #{<<"key">> := ""}
|
||||||
|
}) ->
|
||||||
|
{error, "Message key cannot be empty when `key_dispatch` strategy is used"};
|
||||||
|
producer_strategy_key_validator(_) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
%%-------------------------------------------------------------------------------------------------
|
%%-------------------------------------------------------------------------------------------------
|
||||||
%% Internal fns
|
%% Internal fns
|
||||||
%%-------------------------------------------------------------------------------------------------
|
%%-------------------------------------------------------------------------------------------------
|
||||||
|
|
|
@ -60,6 +60,10 @@
|
||||||
sync_timeout := emqx_schema:duration_ms()
|
sync_timeout := emqx_schema:duration_ms()
|
||||||
}.
|
}.
|
||||||
|
|
||||||
|
%% Allocatable resources
|
||||||
|
-define(pulsar_client_id, pulsar_client_id).
|
||||||
|
-define(pulsar_producers, pulsar_producers).
|
||||||
|
|
||||||
%%-------------------------------------------------------------------------------------
|
%%-------------------------------------------------------------------------------------
|
||||||
%% `emqx_resource' API
|
%% `emqx_resource' API
|
||||||
%%-------------------------------------------------------------------------------------
|
%%-------------------------------------------------------------------------------------
|
||||||
|
@ -81,7 +85,7 @@ on_start(InstanceId, Config) ->
|
||||||
} = Config,
|
} = Config,
|
||||||
Servers = format_servers(Servers0),
|
Servers = format_servers(Servers0),
|
||||||
ClientId = make_client_id(InstanceId, BridgeName),
|
ClientId = make_client_id(InstanceId, BridgeName),
|
||||||
ok = emqx_resource:allocate_resource(InstanceId, pulsar_client_id, ClientId),
|
ok = emqx_resource:allocate_resource(InstanceId, ?pulsar_client_id, ClientId),
|
||||||
SSLOpts = emqx_tls_lib:to_client_opts(SSL),
|
SSLOpts = emqx_tls_lib:to_client_opts(SSL),
|
||||||
ConnectTimeout = maps:get(connect_timeout, Config, timer:seconds(5)),
|
ConnectTimeout = maps:get(connect_timeout, Config, timer:seconds(5)),
|
||||||
ClientOpts = #{
|
ClientOpts = #{
|
||||||
|
@ -119,7 +123,7 @@ on_start(InstanceId, Config) ->
|
||||||
-spec on_stop(resource_id(), state()) -> ok.
|
-spec on_stop(resource_id(), state()) -> ok.
|
||||||
on_stop(InstanceId, _State) ->
|
on_stop(InstanceId, _State) ->
|
||||||
case emqx_resource:get_allocated_resources(InstanceId) of
|
case emqx_resource:get_allocated_resources(InstanceId) of
|
||||||
#{pulsar_client_id := ClientId, pulsar_producers := Producers} ->
|
#{?pulsar_client_id := ClientId, ?pulsar_producers := Producers} ->
|
||||||
stop_producers(ClientId, Producers),
|
stop_producers(ClientId, Producers),
|
||||||
stop_client(ClientId),
|
stop_client(ClientId),
|
||||||
?tp(pulsar_bridge_stopped, #{
|
?tp(pulsar_bridge_stopped, #{
|
||||||
|
@ -128,7 +132,7 @@ on_stop(InstanceId, _State) ->
|
||||||
pulsar_producers => Producers
|
pulsar_producers => Producers
|
||||||
}),
|
}),
|
||||||
ok;
|
ok;
|
||||||
#{pulsar_client_id := ClientId} ->
|
#{?pulsar_client_id := ClientId} ->
|
||||||
stop_client(ClientId),
|
stop_client(ClientId),
|
||||||
?tp(pulsar_bridge_stopped, #{
|
?tp(pulsar_bridge_stopped, #{
|
||||||
instance_id => InstanceId,
|
instance_id => InstanceId,
|
||||||
|
@ -340,7 +344,7 @@ start_producer(Config, InstanceId, ClientId, ClientOpts) ->
|
||||||
?tp(pulsar_producer_about_to_start_producers, #{producer_name => ProducerName}),
|
?tp(pulsar_producer_about_to_start_producers, #{producer_name => ProducerName}),
|
||||||
try pulsar:ensure_supervised_producers(ClientId, PulsarTopic, ProducerOpts) of
|
try pulsar:ensure_supervised_producers(ClientId, PulsarTopic, ProducerOpts) of
|
||||||
{ok, Producers} ->
|
{ok, Producers} ->
|
||||||
ok = emqx_resource:allocate_resource(InstanceId, pulsar_producers, Producers),
|
ok = emqx_resource:allocate_resource(InstanceId, ?pulsar_producers, Producers),
|
||||||
?tp(pulsar_producer_producers_allocated, #{}),
|
?tp(pulsar_producer_producers_allocated, #{}),
|
||||||
State = #{
|
State = #{
|
||||||
pulsar_client_id => ClientId,
|
pulsar_client_id => ClientId,
|
||||||
|
|
|
@ -285,6 +285,11 @@ create_bridge(Config, Overrides) ->
|
||||||
PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides),
|
PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides),
|
||||||
emqx_bridge:create(Type, Name, PulsarConfig).
|
emqx_bridge:create(Type, Name, PulsarConfig).
|
||||||
|
|
||||||
|
delete_bridge(Config) ->
|
||||||
|
Type = ?BRIDGE_TYPE_BIN,
|
||||||
|
Name = ?config(pulsar_name, Config),
|
||||||
|
emqx_bridge:remove(Type, Name).
|
||||||
|
|
||||||
create_bridge_api(Config) ->
|
create_bridge_api(Config) ->
|
||||||
create_bridge_api(Config, _Overrides = #{}).
|
create_bridge_api(Config, _Overrides = #{}).
|
||||||
|
|
||||||
|
@ -541,8 +546,14 @@ kill_resource_managers() ->
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun({_, Pid, _, _}) ->
|
fun({_, Pid, _, _}) ->
|
||||||
ct:pal("terminating resource manager ~p", [Pid]),
|
ct:pal("terminating resource manager ~p", [Pid]),
|
||||||
%% sys:terminate(Pid, stop),
|
Ref = monitor(process, Pid),
|
||||||
exit(Pid, kill),
|
exit(Pid, kill),
|
||||||
|
receive
|
||||||
|
{'DOWN', Ref, process, Pid, killed} ->
|
||||||
|
ok
|
||||||
|
after 500 ->
|
||||||
|
ct:fail("pid ~p didn't die!", [Pid])
|
||||||
|
end,
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
supervisor:which_children(emqx_resource_manager_sup)
|
supervisor:which_children(emqx_resource_manager_sup)
|
||||||
|
@ -1002,6 +1013,8 @@ t_resource_manager_crash_after_producers_started(Config) ->
|
||||||
Producers =/= undefined,
|
Producers =/= undefined,
|
||||||
10_000
|
10_000
|
||||||
),
|
),
|
||||||
|
?assertMatch({ok, _}, delete_bridge(Config)),
|
||||||
|
?assertEqual([], get_pulsar_producers()),
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
[]
|
[]
|
||||||
|
@ -1033,6 +1046,8 @@ t_resource_manager_crash_before_producers_started(Config) ->
|
||||||
#{?snk_kind := pulsar_bridge_stopped, pulsar_producers := undefined},
|
#{?snk_kind := pulsar_bridge_stopped, pulsar_producers := undefined},
|
||||||
10_000
|
10_000
|
||||||
),
|
),
|
||||||
|
?assertMatch({ok, _}, delete_bridge(Config)),
|
||||||
|
?assertEqual([], get_pulsar_producers()),
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
[]
|
[]
|
||||||
|
|
|
@ -0,0 +1,75 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_bridge_pulsar_tests).
|
||||||
|
|
||||||
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
|
||||||
|
%%===========================================================================
|
||||||
|
%% Test cases
|
||||||
|
%%===========================================================================
|
||||||
|
|
||||||
|
pulsar_producer_validations_test() ->
|
||||||
|
Conf0 = pulsar_producer_hocon(),
|
||||||
|
Conf1 =
|
||||||
|
Conf0 ++
|
||||||
|
"\n"
|
||||||
|
"bridges.pulsar_producer.my_producer.strategy = key_dispatch"
|
||||||
|
"\n"
|
||||||
|
"bridges.pulsar_producer.my_producer.message.key = \"\"",
|
||||||
|
Conf = parse(Conf1),
|
||||||
|
?assertMatch(
|
||||||
|
#{
|
||||||
|
<<"strategy">> := <<"key_dispatch">>,
|
||||||
|
<<"message">> := #{<<"key">> := <<>>}
|
||||||
|
},
|
||||||
|
emqx_utils_maps:deep_get([<<"bridges">>, <<"pulsar_producer">>, <<"my_producer">>], Conf)
|
||||||
|
),
|
||||||
|
?assertThrow(
|
||||||
|
{_, [
|
||||||
|
#{
|
||||||
|
path := "bridges.pulsar_producer.my_producer",
|
||||||
|
reason := "Message key cannot be empty when `key_dispatch` strategy is used"
|
||||||
|
}
|
||||||
|
]},
|
||||||
|
check(Conf)
|
||||||
|
),
|
||||||
|
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%%===========================================================================
|
||||||
|
%% Helper functions
|
||||||
|
%%===========================================================================
|
||||||
|
|
||||||
|
parse(Hocon) ->
|
||||||
|
{ok, Conf} = hocon:binary(Hocon),
|
||||||
|
Conf.
|
||||||
|
|
||||||
|
check(Conf) when is_map(Conf) ->
|
||||||
|
hocon_tconf:check_plain(emqx_bridge_schema, Conf).
|
||||||
|
|
||||||
|
%%===========================================================================
|
||||||
|
%% Data section
|
||||||
|
%%===========================================================================
|
||||||
|
|
||||||
|
%% erlfmt-ignore
|
||||||
|
pulsar_producer_hocon() ->
|
||||||
|
"""
|
||||||
|
bridges.pulsar_producer.my_producer {
|
||||||
|
enable = true
|
||||||
|
servers = \"localhost:6650\"
|
||||||
|
pulsar_topic = pulsar_topic
|
||||||
|
strategy = random
|
||||||
|
message {
|
||||||
|
key = \"${.clientid}\"
|
||||||
|
value = \"${.}\"
|
||||||
|
}
|
||||||
|
authentication = none
|
||||||
|
ssl {
|
||||||
|
enable = false
|
||||||
|
verify = verify_none
|
||||||
|
server_name_indication = \"auto\"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
""".
|
|
@ -414,17 +414,7 @@ doc_gen_test() ->
|
||||||
60,
|
60,
|
||||||
fun() ->
|
fun() ->
|
||||||
Dir = "tmp",
|
Dir = "tmp",
|
||||||
ok = filelib:ensure_dir(filename:join("tmp", foo)),
|
ok = emqx_conf:dump_schema(Dir, emqx_conf_schema)
|
||||||
I18nFile = filename:join([
|
|
||||||
"_build",
|
|
||||||
"test",
|
|
||||||
"lib",
|
|
||||||
"emqx_dashboard",
|
|
||||||
"priv",
|
|
||||||
"i18n.conf"
|
|
||||||
]),
|
|
||||||
_ = emqx_conf:dump_schema(Dir, emqx_conf_schema, I18nFile),
|
|
||||||
ok
|
|
||||||
end
|
end
|
||||||
}.
|
}.
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_connector, [
|
{application, emqx_connector, [
|
||||||
{description, "EMQX Data Integration Connectors"},
|
{description, "EMQX Data Integration Connectors"},
|
||||||
{vsn, "0.1.24"},
|
{vsn, "0.1.25"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_connector_app, []}},
|
{mod, {emqx_connector_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -1,319 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%
|
|
||||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
%% you may not use this file except in compliance with the License.
|
|
||||||
%% You may obtain a copy of the License at
|
|
||||||
%%
|
|
||||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
%%
|
|
||||||
%% Unless required by applicable law or agreed to in writing, software
|
|
||||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
%% See the License for the specific language governing permissions and
|
|
||||||
%% limitations under the License.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
-module(emqx_connector_mqtt).
|
|
||||||
|
|
||||||
-include("emqx_connector.hrl").
|
|
||||||
|
|
||||||
-include_lib("typerefl/include/types.hrl").
|
|
||||||
-include_lib("hocon/include/hoconsc.hrl").
|
|
||||||
-include_lib("emqx/include/logger.hrl").
|
|
||||||
|
|
||||||
-behaviour(supervisor).
|
|
||||||
-behaviour(emqx_resource).
|
|
||||||
|
|
||||||
%% API and callbacks for supervisor
|
|
||||||
-export([
|
|
||||||
callback_mode/0,
|
|
||||||
start_link/0,
|
|
||||||
init/1,
|
|
||||||
create_bridge/1,
|
|
||||||
drop_bridge/1,
|
|
||||||
bridges/0
|
|
||||||
]).
|
|
||||||
|
|
||||||
-export([on_message_received/3]).
|
|
||||||
|
|
||||||
%% callbacks of behaviour emqx_resource
|
|
||||||
-export([
|
|
||||||
on_start/2,
|
|
||||||
on_stop/2,
|
|
||||||
on_query/3,
|
|
||||||
on_query_async/4,
|
|
||||||
on_get_status/2
|
|
||||||
]).
|
|
||||||
|
|
||||||
-export([on_async_result/2]).
|
|
||||||
|
|
||||||
-behaviour(hocon_schema).
|
|
||||||
|
|
||||||
-import(hoconsc, [mk/2]).
|
|
||||||
|
|
||||||
-export([
|
|
||||||
roots/0,
|
|
||||||
fields/1
|
|
||||||
]).
|
|
||||||
|
|
||||||
%%=====================================================================
|
|
||||||
%% Hocon schema
|
|
||||||
roots() ->
|
|
||||||
fields("config").
|
|
||||||
|
|
||||||
fields("config") ->
|
|
||||||
emqx_connector_mqtt_schema:fields("config");
|
|
||||||
fields("get") ->
|
|
||||||
[
|
|
||||||
{num_of_bridges,
|
|
||||||
mk(
|
|
||||||
integer(),
|
|
||||||
#{desc => ?DESC("num_of_bridges")}
|
|
||||||
)}
|
|
||||||
] ++ fields("post");
|
|
||||||
fields("put") ->
|
|
||||||
emqx_connector_mqtt_schema:fields("server_configs");
|
|
||||||
fields("post") ->
|
|
||||||
[
|
|
||||||
{type,
|
|
||||||
mk(
|
|
||||||
mqtt,
|
|
||||||
#{
|
|
||||||
required => true,
|
|
||||||
desc => ?DESC("type")
|
|
||||||
}
|
|
||||||
)},
|
|
||||||
{name,
|
|
||||||
mk(
|
|
||||||
binary(),
|
|
||||||
#{
|
|
||||||
required => true,
|
|
||||||
desc => ?DESC("name")
|
|
||||||
}
|
|
||||||
)}
|
|
||||||
] ++ fields("put").
|
|
||||||
|
|
||||||
%% ===================================================================
|
|
||||||
%% supervisor APIs
|
|
||||||
start_link() ->
|
|
||||||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
|
||||||
|
|
||||||
init([]) ->
|
|
||||||
SupFlag = #{
|
|
||||||
strategy => one_for_one,
|
|
||||||
intensity => 100,
|
|
||||||
period => 10
|
|
||||||
},
|
|
||||||
{ok, {SupFlag, []}}.
|
|
||||||
|
|
||||||
bridge_spec(Config) ->
|
|
||||||
{Name, NConfig} = maps:take(name, Config),
|
|
||||||
#{
|
|
||||||
id => Name,
|
|
||||||
start => {emqx_connector_mqtt_worker, start_link, [Name, NConfig]},
|
|
||||||
restart => temporary,
|
|
||||||
shutdown => 1000
|
|
||||||
}.
|
|
||||||
|
|
||||||
-spec bridges() -> [{_Name, _Status}].
|
|
||||||
bridges() ->
|
|
||||||
[
|
|
||||||
{Name, emqx_connector_mqtt_worker:status(Name)}
|
|
||||||
|| {Name, _Pid, _, _} <- supervisor:which_children(?MODULE)
|
|
||||||
].
|
|
||||||
|
|
||||||
create_bridge(Config) ->
|
|
||||||
supervisor:start_child(?MODULE, bridge_spec(Config)).
|
|
||||||
|
|
||||||
drop_bridge(Name) ->
|
|
||||||
case supervisor:terminate_child(?MODULE, Name) of
|
|
||||||
ok ->
|
|
||||||
supervisor:delete_child(?MODULE, Name);
|
|
||||||
{error, not_found} ->
|
|
||||||
ok;
|
|
||||||
{error, Error} ->
|
|
||||||
{error, Error}
|
|
||||||
end.
|
|
||||||
|
|
||||||
%% ===================================================================
|
|
||||||
%% When use this bridge as a data source, ?MODULE:on_message_received will be called
|
|
||||||
%% if the bridge received msgs from the remote broker.
|
|
||||||
on_message_received(Msg, HookPoint, ResId) ->
|
|
||||||
emqx_resource_metrics:received_inc(ResId),
|
|
||||||
emqx:run_hook(HookPoint, [Msg]).
|
|
||||||
|
|
||||||
%% ===================================================================
|
|
||||||
callback_mode() -> async_if_possible.
|
|
||||||
|
|
||||||
on_start(InstanceId, Conf) ->
|
|
||||||
?SLOG(info, #{
|
|
||||||
msg => "starting_mqtt_connector",
|
|
||||||
connector => InstanceId,
|
|
||||||
config => emqx_utils:redact(Conf)
|
|
||||||
}),
|
|
||||||
BasicConf = basic_config(Conf),
|
|
||||||
BridgeConf = BasicConf#{
|
|
||||||
name => InstanceId,
|
|
||||||
clientid => clientid(InstanceId, Conf),
|
|
||||||
subscriptions => make_sub_confs(maps:get(ingress, Conf, undefined), Conf, InstanceId),
|
|
||||||
forwards => make_forward_confs(maps:get(egress, Conf, undefined))
|
|
||||||
},
|
|
||||||
case ?MODULE:create_bridge(BridgeConf) of
|
|
||||||
{ok, _Pid} ->
|
|
||||||
ensure_mqtt_worker_started(InstanceId, BridgeConf);
|
|
||||||
{error, {already_started, _Pid}} ->
|
|
||||||
ok = ?MODULE:drop_bridge(InstanceId),
|
|
||||||
{ok, _} = ?MODULE:create_bridge(BridgeConf),
|
|
||||||
ensure_mqtt_worker_started(InstanceId, BridgeConf);
|
|
||||||
{error, Reason} ->
|
|
||||||
{error, Reason}
|
|
||||||
end.
|
|
||||||
|
|
||||||
on_stop(_InstId, #{name := InstanceId}) ->
|
|
||||||
?SLOG(info, #{
|
|
||||||
msg => "stopping_mqtt_connector",
|
|
||||||
connector => InstanceId
|
|
||||||
}),
|
|
||||||
case ?MODULE:drop_bridge(InstanceId) of
|
|
||||||
ok ->
|
|
||||||
ok;
|
|
||||||
{error, not_found} ->
|
|
||||||
ok;
|
|
||||||
{error, Reason} ->
|
|
||||||
?SLOG(error, #{
|
|
||||||
msg => "stop_mqtt_connector_error",
|
|
||||||
connector => InstanceId,
|
|
||||||
reason => Reason
|
|
||||||
})
|
|
||||||
end.
|
|
||||||
|
|
||||||
on_query(_InstId, {send_message, Msg}, #{name := InstanceId}) ->
|
|
||||||
?TRACE("QUERY", "send_msg_to_remote_node", #{message => Msg, connector => InstanceId}),
|
|
||||||
case emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg) of
|
|
||||||
ok ->
|
|
||||||
ok;
|
|
||||||
{error, Reason} ->
|
|
||||||
classify_error(Reason)
|
|
||||||
end.
|
|
||||||
|
|
||||||
on_query_async(_InstId, {send_message, Msg}, CallbackIn, #{name := InstanceId}) ->
|
|
||||||
?TRACE("QUERY", "async_send_msg_to_remote_node", #{message => Msg, connector => InstanceId}),
|
|
||||||
Callback = {fun on_async_result/2, [CallbackIn]},
|
|
||||||
case emqx_connector_mqtt_worker:send_to_remote_async(InstanceId, Msg, Callback) of
|
|
||||||
ok ->
|
|
||||||
ok;
|
|
||||||
{ok, Pid} ->
|
|
||||||
{ok, Pid};
|
|
||||||
{error, Reason} ->
|
|
||||||
classify_error(Reason)
|
|
||||||
end.
|
|
||||||
|
|
||||||
on_async_result(Callback, ok) ->
|
|
||||||
apply_callback_function(Callback, ok);
|
|
||||||
on_async_result(Callback, {ok, _} = Ok) ->
|
|
||||||
apply_callback_function(Callback, Ok);
|
|
||||||
on_async_result(Callback, {error, Reason}) ->
|
|
||||||
apply_callback_function(Callback, classify_error(Reason)).
|
|
||||||
|
|
||||||
apply_callback_function(F, Result) when is_function(F) ->
|
|
||||||
erlang:apply(F, [Result]);
|
|
||||||
apply_callback_function({F, A}, Result) when is_function(F), is_list(A) ->
|
|
||||||
erlang:apply(F, A ++ [Result]);
|
|
||||||
apply_callback_function({M, F, A}, Result) when is_atom(M), is_atom(F), is_list(A) ->
|
|
||||||
erlang:apply(M, F, A ++ [Result]).
|
|
||||||
|
|
||||||
on_get_status(_InstId, #{name := InstanceId}) ->
|
|
||||||
emqx_connector_mqtt_worker:status(InstanceId).
|
|
||||||
|
|
||||||
classify_error(disconnected = Reason) ->
|
|
||||||
{error, {recoverable_error, Reason}};
|
|
||||||
classify_error({disconnected, _RC, _} = Reason) ->
|
|
||||||
{error, {recoverable_error, Reason}};
|
|
||||||
classify_error({shutdown, _} = Reason) ->
|
|
||||||
{error, {recoverable_error, Reason}};
|
|
||||||
classify_error(shutdown = Reason) ->
|
|
||||||
{error, {recoverable_error, Reason}};
|
|
||||||
classify_error(Reason) ->
|
|
||||||
{error, {unrecoverable_error, Reason}}.
|
|
||||||
|
|
||||||
ensure_mqtt_worker_started(InstanceId, BridgeConf) ->
|
|
||||||
case emqx_connector_mqtt_worker:connect(InstanceId) of
|
|
||||||
{ok, Properties} ->
|
|
||||||
{ok, #{name => InstanceId, config => BridgeConf, props => Properties}};
|
|
||||||
{error, Reason} ->
|
|
||||||
{error, Reason}
|
|
||||||
end.
|
|
||||||
|
|
||||||
make_sub_confs(EmptyMap, _Conf, _) when map_size(EmptyMap) == 0 ->
|
|
||||||
undefined;
|
|
||||||
make_sub_confs(undefined, _Conf, _) ->
|
|
||||||
undefined;
|
|
||||||
make_sub_confs(SubRemoteConf, Conf, ResourceId) ->
|
|
||||||
case maps:find(hookpoint, Conf) of
|
|
||||||
error ->
|
|
||||||
error({no_hookpoint_provided, Conf});
|
|
||||||
{ok, HookPoint} ->
|
|
||||||
MFA = {?MODULE, on_message_received, [HookPoint, ResourceId]},
|
|
||||||
SubRemoteConf#{on_message_received => MFA}
|
|
||||||
end.
|
|
||||||
|
|
||||||
make_forward_confs(EmptyMap) when map_size(EmptyMap) == 0 ->
|
|
||||||
undefined;
|
|
||||||
make_forward_confs(undefined) ->
|
|
||||||
undefined;
|
|
||||||
make_forward_confs(FrowardConf) ->
|
|
||||||
FrowardConf.
|
|
||||||
|
|
||||||
basic_config(
|
|
||||||
#{
|
|
||||||
server := Server,
|
|
||||||
proto_ver := ProtoVer,
|
|
||||||
bridge_mode := BridgeMode,
|
|
||||||
clean_start := CleanStart,
|
|
||||||
keepalive := KeepAlive,
|
|
||||||
retry_interval := RetryIntv,
|
|
||||||
max_inflight := MaxInflight,
|
|
||||||
ssl := #{enable := EnableSsl} = Ssl
|
|
||||||
} = Conf
|
|
||||||
) ->
|
|
||||||
BasicConf = #{
|
|
||||||
%% connection opts
|
|
||||||
server => Server,
|
|
||||||
%% 30s
|
|
||||||
connect_timeout => 30,
|
|
||||||
auto_reconnect => true,
|
|
||||||
proto_ver => ProtoVer,
|
|
||||||
%% Opening bridge_mode will form a non-standard mqtt connection message.
|
|
||||||
%% A load balancing server (such as haproxy) is often set up before the emqx broker server.
|
|
||||||
%% When the load balancing server enables mqtt connection packet inspection,
|
|
||||||
%% non-standard mqtt connection packets will be filtered out by LB.
|
|
||||||
%% So let's disable bridge_mode.
|
|
||||||
bridge_mode => BridgeMode,
|
|
||||||
keepalive => ms_to_s(KeepAlive),
|
|
||||||
clean_start => CleanStart,
|
|
||||||
retry_interval => RetryIntv,
|
|
||||||
max_inflight => MaxInflight,
|
|
||||||
ssl => EnableSsl,
|
|
||||||
ssl_opts => maps:to_list(maps:remove(enable, Ssl))
|
|
||||||
},
|
|
||||||
maybe_put_fields([username, password], Conf, BasicConf).
|
|
||||||
|
|
||||||
maybe_put_fields(Fields, Conf, Acc0) ->
|
|
||||||
lists:foldl(
|
|
||||||
fun(Key, Acc) ->
|
|
||||||
case maps:find(Key, Conf) of
|
|
||||||
error -> Acc;
|
|
||||||
{ok, Val} -> Acc#{Key => Val}
|
|
||||||
end
|
|
||||||
end,
|
|
||||||
Acc0,
|
|
||||||
Fields
|
|
||||||
).
|
|
||||||
|
|
||||||
ms_to_s(Ms) ->
|
|
||||||
erlang:ceil(Ms / 1000).
|
|
||||||
|
|
||||||
clientid(Id, _Conf = #{clientid_prefix := Prefix = <<_/binary>>}) ->
|
|
||||||
iolist_to_binary([Prefix, ":", Id, ":", atom_to_list(node())]);
|
|
||||||
clientid(Id, _Conf) ->
|
|
||||||
iolist_to_binary([Id, ":", atom_to_list(node())]).
|
|
|
@ -33,7 +33,6 @@ init([]) ->
|
||||||
period => 20
|
period => 20
|
||||||
},
|
},
|
||||||
ChildSpecs = [
|
ChildSpecs = [
|
||||||
child_spec(emqx_connector_mqtt),
|
|
||||||
child_spec(emqx_connector_jwt_sup)
|
child_spec(emqx_connector_jwt_sup)
|
||||||
],
|
],
|
||||||
{ok, {SupFlags, ChildSpecs}}.
|
{ok, {SupFlags, ChildSpecs}}.
|
||||||
|
|
|
@ -1,168 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%
|
|
||||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
%% you may not use this file except in compliance with the License.
|
|
||||||
%% You may obtain a copy of the License at
|
|
||||||
%%
|
|
||||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
%%
|
|
||||||
%% Unless required by applicable law or agreed to in writing, software
|
|
||||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
%% See the License for the specific language governing permissions and
|
|
||||||
%% limitations under the License.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
-module(emqx_connector_mqtt_msg).
|
|
||||||
|
|
||||||
-export([
|
|
||||||
to_binary/1,
|
|
||||||
from_binary/1,
|
|
||||||
make_pub_vars/2,
|
|
||||||
to_remote_msg/2,
|
|
||||||
to_broker_msg/3,
|
|
||||||
estimate_size/1
|
|
||||||
]).
|
|
||||||
|
|
||||||
-export([
|
|
||||||
replace_vars_in_str/2,
|
|
||||||
replace_simple_var/2
|
|
||||||
]).
|
|
||||||
|
|
||||||
-export_type([msg/0]).
|
|
||||||
|
|
||||||
-include_lib("emqx/include/emqx.hrl").
|
|
||||||
|
|
||||||
-include_lib("emqtt/include/emqtt.hrl").
|
|
||||||
|
|
||||||
-type msg() :: emqx_types:message().
|
|
||||||
-type exp_msg() :: emqx_types:message() | #mqtt_msg{}.
|
|
||||||
-type remote_config() :: #{
|
|
||||||
topic := binary(),
|
|
||||||
qos := original | integer(),
|
|
||||||
retain := original | boolean(),
|
|
||||||
payload := binary()
|
|
||||||
}.
|
|
||||||
-type variables() :: #{
|
|
||||||
mountpoint := undefined | binary(),
|
|
||||||
remote := remote_config()
|
|
||||||
}.
|
|
||||||
|
|
||||||
make_pub_vars(_, undefined) ->
|
|
||||||
undefined;
|
|
||||||
make_pub_vars(Mountpoint, Conf) when is_map(Conf) ->
|
|
||||||
Conf#{mountpoint => Mountpoint}.
|
|
||||||
|
|
||||||
%% @doc Make export format:
|
|
||||||
%% 1. Mount topic to a prefix
|
|
||||||
%% 2. Fix QoS to 1
|
|
||||||
%% @end
|
|
||||||
%% Shame that we have to know the callback module here
|
|
||||||
%% would be great if we can get rid of #mqtt_msg{} record
|
|
||||||
%% and use #message{} in all places.
|
|
||||||
-spec to_remote_msg(msg() | map(), variables()) ->
|
|
||||||
exp_msg().
|
|
||||||
to_remote_msg(#message{flags = Flags0} = Msg, Vars) ->
|
|
||||||
Retain0 = maps:get(retain, Flags0, false),
|
|
||||||
{Columns, _} = emqx_rule_events:eventmsg_publish(Msg),
|
|
||||||
MapMsg = maps:put(retain, Retain0, Columns),
|
|
||||||
to_remote_msg(MapMsg, Vars);
|
|
||||||
to_remote_msg(MapMsg, #{
|
|
||||||
remote := #{
|
|
||||||
topic := TopicToken,
|
|
||||||
qos := QoSToken,
|
|
||||||
retain := RetainToken
|
|
||||||
} = Remote,
|
|
||||||
mountpoint := Mountpoint
|
|
||||||
}) when is_map(MapMsg) ->
|
|
||||||
Topic = replace_vars_in_str(TopicToken, MapMsg),
|
|
||||||
Payload = process_payload(Remote, MapMsg),
|
|
||||||
QoS = replace_simple_var(QoSToken, MapMsg),
|
|
||||||
Retain = replace_simple_var(RetainToken, MapMsg),
|
|
||||||
PubProps = maps:get(pub_props, MapMsg, #{}),
|
|
||||||
#mqtt_msg{
|
|
||||||
qos = QoS,
|
|
||||||
retain = Retain,
|
|
||||||
topic = topic(Mountpoint, Topic),
|
|
||||||
props = emqx_utils:pub_props_to_packet(PubProps),
|
|
||||||
payload = Payload
|
|
||||||
};
|
|
||||||
to_remote_msg(#message{topic = Topic} = Msg, #{mountpoint := Mountpoint}) ->
|
|
||||||
Msg#message{topic = topic(Mountpoint, Topic)}.
|
|
||||||
|
|
||||||
%% published from remote node over a MQTT connection
|
|
||||||
to_broker_msg(Msg, Vars, undefined) ->
|
|
||||||
to_broker_msg(Msg, Vars, #{});
|
|
||||||
to_broker_msg(
|
|
||||||
#{dup := Dup} = MapMsg,
|
|
||||||
#{
|
|
||||||
local := #{
|
|
||||||
topic := TopicToken,
|
|
||||||
qos := QoSToken,
|
|
||||||
retain := RetainToken
|
|
||||||
} = Local,
|
|
||||||
mountpoint := Mountpoint
|
|
||||||
},
|
|
||||||
Props
|
|
||||||
) ->
|
|
||||||
Topic = replace_vars_in_str(TopicToken, MapMsg),
|
|
||||||
Payload = process_payload(Local, MapMsg),
|
|
||||||
QoS = replace_simple_var(QoSToken, MapMsg),
|
|
||||||
Retain = replace_simple_var(RetainToken, MapMsg),
|
|
||||||
PubProps = maps:get(pub_props, MapMsg, #{}),
|
|
||||||
set_headers(
|
|
||||||
Props#{properties => emqx_utils:pub_props_to_packet(PubProps)},
|
|
||||||
emqx_message:set_flags(
|
|
||||||
#{dup => Dup, retain => Retain},
|
|
||||||
emqx_message:make(bridge, QoS, topic(Mountpoint, Topic), Payload)
|
|
||||||
)
|
|
||||||
).
|
|
||||||
|
|
||||||
process_payload(From, MapMsg) ->
|
|
||||||
do_process_payload(maps:get(payload, From, undefined), MapMsg).
|
|
||||||
|
|
||||||
do_process_payload(undefined, Msg) ->
|
|
||||||
emqx_utils_json:encode(Msg);
|
|
||||||
do_process_payload(Tks, Msg) ->
|
|
||||||
replace_vars_in_str(Tks, Msg).
|
|
||||||
|
|
||||||
%% Replace a string contains vars to another string in which the placeholders are replace by the
|
|
||||||
%% corresponding values. For example, given "a: ${var}", if the var=1, the result string will be:
|
|
||||||
%% "a: 1".
|
|
||||||
replace_vars_in_str(Tokens, Data) when is_list(Tokens) ->
|
|
||||||
emqx_plugin_libs_rule:proc_tmpl(Tokens, Data, #{return => full_binary});
|
|
||||||
replace_vars_in_str(Val, _Data) ->
|
|
||||||
Val.
|
|
||||||
|
|
||||||
%% Replace a simple var to its value. For example, given "${var}", if the var=1, then the result
|
|
||||||
%% value will be an integer 1.
|
|
||||||
replace_simple_var(Tokens, Data) when is_list(Tokens) ->
|
|
||||||
[Var] = emqx_plugin_libs_rule:proc_tmpl(Tokens, Data, #{return => rawlist}),
|
|
||||||
Var;
|
|
||||||
replace_simple_var(Val, _Data) ->
|
|
||||||
Val.
|
|
||||||
|
|
||||||
%% @doc Make `binary()' in order to make iodata to be persisted on disk.
|
|
||||||
-spec to_binary(msg()) -> binary().
|
|
||||||
to_binary(Msg) -> term_to_binary(Msg).
|
|
||||||
|
|
||||||
%% @doc Unmarshal binary into `msg()'.
|
|
||||||
-spec from_binary(binary()) -> msg().
|
|
||||||
from_binary(Bin) -> binary_to_term(Bin).
|
|
||||||
|
|
||||||
%% @doc Estimate the size of a message.
|
|
||||||
%% Count only the topic length + payload size
|
|
||||||
%% There is no topic and payload for event message. So count all `Msg` term
|
|
||||||
-spec estimate_size(msg()) -> integer().
|
|
||||||
estimate_size(#message{topic = Topic, payload = Payload}) ->
|
|
||||||
size(Topic) + size(Payload);
|
|
||||||
estimate_size(#{topic := Topic, payload := Payload}) ->
|
|
||||||
size(Topic) + size(Payload);
|
|
||||||
estimate_size(Term) ->
|
|
||||||
erlang:external_size(Term).
|
|
||||||
|
|
||||||
set_headers(Val, Msg) ->
|
|
||||||
emqx_message:set_headers(Val, Msg).
|
|
||||||
topic(undefined, Topic) -> Topic;
|
|
||||||
topic(Prefix, Topic) -> emqx_topic:prepend(Prefix, Topic).
|
|
|
@ -1,465 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%
|
|
||||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
%% you may not use this file except in compliance with the License.
|
|
||||||
%% You may obtain a copy of the License at
|
|
||||||
%%
|
|
||||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
%%
|
|
||||||
%% Unless required by applicable law or agreed to in writing, software
|
|
||||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
%% See the License for the specific language governing permissions and
|
|
||||||
%% limitations under the License.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
%% @doc Bridge works in two layers (1) batching layer (2) transport layer
|
|
||||||
%% The `bridge' batching layer collects local messages in batches and sends over
|
|
||||||
%% to remote MQTT node/cluster via `connection' transport layer.
|
|
||||||
%% In case `REMOTE' is also an EMQX node, `connection' is recommended to be
|
|
||||||
%% the `gen_rpc' based implementation `emqx_bridge_rpc'. Otherwise `connection'
|
|
||||||
%% has to be `emqx_connector_mqtt_mod'.
|
|
||||||
%%
|
|
||||||
%% ```
|
|
||||||
%% +------+ +--------+
|
|
||||||
%% | EMQX | | REMOTE |
|
|
||||||
%% | | | |
|
|
||||||
%% | (bridge) <==(connection)==> | |
|
|
||||||
%% | | | |
|
|
||||||
%% | | | |
|
|
||||||
%% +------+ +--------+
|
|
||||||
%% '''
|
|
||||||
%%
|
|
||||||
%%
|
|
||||||
%% This module implements 2 kinds of APIs with regards to batching and
|
|
||||||
%% messaging protocol. (1) A `gen_statem' based local batch collector;
|
|
||||||
%% (2) APIs for incoming remote batches/messages.
|
|
||||||
%%
|
|
||||||
%% Batch collector state diagram
|
|
||||||
%%
|
|
||||||
%% [idle] --(0) --> [connecting] --(2)--> [connected]
|
|
||||||
%% | ^ |
|
|
||||||
%% | | |
|
|
||||||
%% '--(1)---'--------(3)------'
|
|
||||||
%%
|
|
||||||
%% (0): auto or manual start
|
|
||||||
%% (1): retry timeout
|
|
||||||
%% (2): successfully connected to remote node/cluster
|
|
||||||
%% (3): received {disconnected, Reason} OR
|
|
||||||
%% failed to send to remote node/cluster.
|
|
||||||
%%
|
|
||||||
%% NOTE: A bridge worker may subscribe to multiple (including wildcard)
|
|
||||||
%% local topics, and the underlying `emqx_bridge_connect' may subscribe to
|
|
||||||
%% multiple remote topics, however, worker/connections are not designed
|
|
||||||
%% to support automatic load-balancing, i.e. in case it can not keep up
|
|
||||||
%% with the amount of messages coming in, administrator should split and
|
|
||||||
%% balance topics between worker/connections manually.
|
|
||||||
%%
|
|
||||||
%% NOTES:
|
|
||||||
%% * Local messages are all normalised to QoS-1 when exporting to remote
|
|
||||||
|
|
||||||
-module(emqx_connector_mqtt_worker).
|
|
||||||
|
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
|
||||||
-include_lib("emqx/include/logger.hrl").
|
|
||||||
|
|
||||||
%% APIs
|
|
||||||
-export([
|
|
||||||
start_link/2,
|
|
||||||
stop/1
|
|
||||||
]).
|
|
||||||
|
|
||||||
%% management APIs
|
|
||||||
-export([
|
|
||||||
connect/1,
|
|
||||||
status/1,
|
|
||||||
ping/1,
|
|
||||||
info/1,
|
|
||||||
send_to_remote/2,
|
|
||||||
send_to_remote_async/3
|
|
||||||
]).
|
|
||||||
|
|
||||||
-export([handle_publish/3]).
|
|
||||||
-export([handle_disconnect/1]).
|
|
||||||
|
|
||||||
-export_type([
|
|
||||||
config/0,
|
|
||||||
ack_ref/0
|
|
||||||
]).
|
|
||||||
|
|
||||||
-type name() :: term().
|
|
||||||
% -type qos() :: emqx_types:qos().
|
|
||||||
-type config() :: map().
|
|
||||||
-type ack_ref() :: term().
|
|
||||||
% -type topic() :: emqx_types:topic().
|
|
||||||
|
|
||||||
-include_lib("emqx/include/logger.hrl").
|
|
||||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
|
||||||
|
|
||||||
-define(REF(Name), {via, gproc, ?NAME(Name)}).
|
|
||||||
-define(NAME(Name), {n, l, Name}).
|
|
||||||
|
|
||||||
%% @doc Start a bridge worker. Supported configs:
|
|
||||||
%% mountpoint: The topic mount point for messages sent to remote node/cluster
|
|
||||||
%% `undefined', `<<>>' or `""' to disable
|
|
||||||
%% forwards: Local topics to subscribe.
|
|
||||||
%%
|
|
||||||
%% Find more connection specific configs in the callback modules
|
|
||||||
%% of emqx_bridge_connect behaviour.
|
|
||||||
-spec start_link(name(), map()) ->
|
|
||||||
{ok, pid()} | {error, _Reason}.
|
|
||||||
start_link(Name, BridgeOpts) ->
|
|
||||||
?SLOG(debug, #{
|
|
||||||
msg => "client_starting",
|
|
||||||
name => Name,
|
|
||||||
options => BridgeOpts
|
|
||||||
}),
|
|
||||||
Conf = init_config(Name, BridgeOpts),
|
|
||||||
Options = mk_client_options(Conf, BridgeOpts),
|
|
||||||
case emqtt:start_link(Options) of
|
|
||||||
{ok, Pid} ->
|
|
||||||
true = gproc:reg_other(?NAME(Name), Pid, Conf),
|
|
||||||
{ok, Pid};
|
|
||||||
{error, Reason} = Error ->
|
|
||||||
?SLOG(error, #{
|
|
||||||
msg => "client_start_failed",
|
|
||||||
config => emqx_utils:redact(BridgeOpts),
|
|
||||||
reason => Reason
|
|
||||||
}),
|
|
||||||
Error
|
|
||||||
end.
|
|
||||||
|
|
||||||
init_config(Name, Opts) ->
|
|
||||||
Mountpoint = maps:get(forward_mountpoint, Opts, undefined),
|
|
||||||
Subscriptions = maps:get(subscriptions, Opts, undefined),
|
|
||||||
Forwards = maps:get(forwards, Opts, undefined),
|
|
||||||
#{
|
|
||||||
mountpoint => format_mountpoint(Mountpoint),
|
|
||||||
subscriptions => pre_process_subscriptions(Subscriptions, Name, Opts),
|
|
||||||
forwards => pre_process_forwards(Forwards)
|
|
||||||
}.
|
|
||||||
|
|
||||||
mk_client_options(Conf, BridgeOpts) ->
|
|
||||||
Server = iolist_to_binary(maps:get(server, BridgeOpts)),
|
|
||||||
HostPort = emqx_connector_mqtt_schema:parse_server(Server),
|
|
||||||
Mountpoint = maps:get(receive_mountpoint, BridgeOpts, undefined),
|
|
||||||
Subscriptions = maps:get(subscriptions, Conf),
|
|
||||||
Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Subscriptions),
|
|
||||||
CleanStart =
|
|
||||||
case Subscriptions of
|
|
||||||
#{remote := _} ->
|
|
||||||
maps:get(clean_start, BridgeOpts);
|
|
||||||
undefined ->
|
|
||||||
%% NOTE
|
|
||||||
%% We are ignoring the user configuration here because there's currently no reliable way
|
|
||||||
%% to ensure proper session recovery according to the MQTT spec.
|
|
||||||
true
|
|
||||||
end,
|
|
||||||
Opts = maps:without(
|
|
||||||
[
|
|
||||||
address,
|
|
||||||
auto_reconnect,
|
|
||||||
conn_type,
|
|
||||||
mountpoint,
|
|
||||||
forwards,
|
|
||||||
receive_mountpoint,
|
|
||||||
subscriptions
|
|
||||||
],
|
|
||||||
BridgeOpts
|
|
||||||
),
|
|
||||||
Opts#{
|
|
||||||
msg_handler => mk_client_event_handler(Vars, #{server => Server}),
|
|
||||||
hosts => [HostPort],
|
|
||||||
clean_start => CleanStart,
|
|
||||||
force_ping => true,
|
|
||||||
proto_ver => maps:get(proto_ver, BridgeOpts, v4)
|
|
||||||
}.
|
|
||||||
|
|
||||||
mk_client_event_handler(Vars, Opts) when Vars /= undefined ->
|
|
||||||
#{
|
|
||||||
publish => {fun ?MODULE:handle_publish/3, [Vars, Opts]},
|
|
||||||
disconnected => {fun ?MODULE:handle_disconnect/1, []}
|
|
||||||
};
|
|
||||||
mk_client_event_handler(undefined, _Opts) ->
|
|
||||||
undefined.
|
|
||||||
|
|
||||||
connect(Name) ->
|
|
||||||
#{subscriptions := Subscriptions} = get_config(Name),
|
|
||||||
case emqtt:connect(get_pid(Name)) of
|
|
||||||
{ok, Properties} ->
|
|
||||||
case subscribe_remote_topics(Name, Subscriptions) of
|
|
||||||
ok ->
|
|
||||||
{ok, Properties};
|
|
||||||
{ok, _, _RCs} ->
|
|
||||||
{ok, Properties};
|
|
||||||
{error, Reason} = Error ->
|
|
||||||
?SLOG(error, #{
|
|
||||||
msg => "client_subscribe_failed",
|
|
||||||
subscriptions => Subscriptions,
|
|
||||||
reason => Reason
|
|
||||||
}),
|
|
||||||
Error
|
|
||||||
end;
|
|
||||||
{error, Reason} = Error ->
|
|
||||||
?SLOG(warning, #{
|
|
||||||
msg => "client_connect_failed",
|
|
||||||
reason => Reason,
|
|
||||||
name => Name
|
|
||||||
}),
|
|
||||||
Error
|
|
||||||
end.
|
|
||||||
subscribe_remote_topics(Ref, #{remote := #{topic := FromTopic, qos := QoS}}) ->
|
|
||||||
emqtt:subscribe(ref(Ref), FromTopic, QoS);
|
|
||||||
subscribe_remote_topics(_Ref, undefined) ->
|
|
||||||
ok.
|
|
||||||
|
|
||||||
stop(Ref) ->
|
|
||||||
emqtt:stop(ref(Ref)).
|
|
||||||
|
|
||||||
info(Ref) ->
|
|
||||||
emqtt:info(ref(Ref)).
|
|
||||||
|
|
||||||
status(Ref) ->
|
|
||||||
try
|
|
||||||
case proplists:get_value(socket, info(Ref)) of
|
|
||||||
Socket when Socket /= undefined ->
|
|
||||||
connected;
|
|
||||||
undefined ->
|
|
||||||
connecting
|
|
||||||
end
|
|
||||||
catch
|
|
||||||
exit:{noproc, _} ->
|
|
||||||
disconnected
|
|
||||||
end.
|
|
||||||
|
|
||||||
ping(Ref) ->
|
|
||||||
emqtt:ping(ref(Ref)).
|
|
||||||
|
|
||||||
send_to_remote(Name, MsgIn) ->
|
|
||||||
trycall(fun() -> do_send(Name, export_msg(Name, MsgIn)) end).
|
|
||||||
|
|
||||||
do_send(Name, {true, Msg}) ->
|
|
||||||
case emqtt:publish(get_pid(Name), Msg) of
|
|
||||||
ok ->
|
|
||||||
ok;
|
|
||||||
{ok, #{reason_code := RC}} when
|
|
||||||
RC =:= ?RC_SUCCESS;
|
|
||||||
RC =:= ?RC_NO_MATCHING_SUBSCRIBERS
|
|
||||||
->
|
|
||||||
ok;
|
|
||||||
{ok, #{reason_code := RC, reason_code_name := Reason}} ->
|
|
||||||
?SLOG(warning, #{
|
|
||||||
msg => "remote_publish_failed",
|
|
||||||
message => Msg,
|
|
||||||
reason_code => RC,
|
|
||||||
reason_code_name => Reason
|
|
||||||
}),
|
|
||||||
{error, Reason};
|
|
||||||
{error, Reason} ->
|
|
||||||
?SLOG(info, #{
|
|
||||||
msg => "client_failed",
|
|
||||||
reason => Reason
|
|
||||||
}),
|
|
||||||
{error, Reason}
|
|
||||||
end;
|
|
||||||
do_send(_Name, false) ->
|
|
||||||
ok.
|
|
||||||
|
|
||||||
send_to_remote_async(Name, MsgIn, Callback) ->
|
|
||||||
trycall(fun() -> do_send_async(Name, export_msg(Name, MsgIn), Callback) end).
|
|
||||||
|
|
||||||
do_send_async(Name, {true, Msg}, Callback) ->
|
|
||||||
Pid = get_pid(Name),
|
|
||||||
ok = emqtt:publish_async(Pid, Msg, _Timeout = infinity, Callback),
|
|
||||||
{ok, Pid};
|
|
||||||
do_send_async(_Name, false, _Callback) ->
|
|
||||||
ok.
|
|
||||||
|
|
||||||
ref(Pid) when is_pid(Pid) ->
|
|
||||||
Pid;
|
|
||||||
ref(Term) ->
|
|
||||||
?REF(Term).
|
|
||||||
|
|
||||||
trycall(Fun) ->
|
|
||||||
try
|
|
||||||
Fun()
|
|
||||||
catch
|
|
||||||
throw:noproc ->
|
|
||||||
{error, disconnected};
|
|
||||||
exit:{noproc, _} ->
|
|
||||||
{error, disconnected}
|
|
||||||
end.
|
|
||||||
|
|
||||||
format_mountpoint(undefined) ->
|
|
||||||
undefined;
|
|
||||||
format_mountpoint(Prefix) ->
|
|
||||||
binary:replace(iolist_to_binary(Prefix), <<"${node}">>, atom_to_binary(node(), utf8)).
|
|
||||||
|
|
||||||
pre_process_subscriptions(undefined, _, _) ->
|
|
||||||
undefined;
|
|
||||||
pre_process_subscriptions(
|
|
||||||
#{remote := RC, local := LC} = Conf,
|
|
||||||
BridgeName,
|
|
||||||
BridgeOpts
|
|
||||||
) when is_map(Conf) ->
|
|
||||||
Conf#{
|
|
||||||
remote => pre_process_in_remote(RC, BridgeName, BridgeOpts),
|
|
||||||
local => pre_process_in_out_common(LC)
|
|
||||||
};
|
|
||||||
pre_process_subscriptions(Conf, _, _) when is_map(Conf) ->
|
|
||||||
%% have no 'local' field in the config
|
|
||||||
undefined.
|
|
||||||
|
|
||||||
pre_process_forwards(undefined) ->
|
|
||||||
undefined;
|
|
||||||
pre_process_forwards(#{remote := RC} = Conf) when is_map(Conf) ->
|
|
||||||
Conf#{remote => pre_process_in_out_common(RC)};
|
|
||||||
pre_process_forwards(Conf) when is_map(Conf) ->
|
|
||||||
%% have no 'remote' field in the config
|
|
||||||
undefined.
|
|
||||||
|
|
||||||
pre_process_in_out_common(Conf0) ->
|
|
||||||
Conf1 = pre_process_conf(topic, Conf0),
|
|
||||||
Conf2 = pre_process_conf(qos, Conf1),
|
|
||||||
Conf3 = pre_process_conf(payload, Conf2),
|
|
||||||
pre_process_conf(retain, Conf3).
|
|
||||||
|
|
||||||
pre_process_conf(Key, Conf) ->
|
|
||||||
case maps:find(Key, Conf) of
|
|
||||||
error ->
|
|
||||||
Conf;
|
|
||||||
{ok, Val} when is_binary(Val) ->
|
|
||||||
Conf#{Key => emqx_plugin_libs_rule:preproc_tmpl(Val)};
|
|
||||||
{ok, Val} ->
|
|
||||||
Conf#{Key => Val}
|
|
||||||
end.
|
|
||||||
|
|
||||||
pre_process_in_remote(#{qos := QoSIn} = Conf, BridgeName, BridgeOpts) ->
|
|
||||||
QoS = downgrade_ingress_qos(QoSIn),
|
|
||||||
case QoS of
|
|
||||||
QoSIn ->
|
|
||||||
ok;
|
|
||||||
_ ->
|
|
||||||
?SLOG(warning, #{
|
|
||||||
msg => "downgraded_unsupported_ingress_qos",
|
|
||||||
qos_configured => QoSIn,
|
|
||||||
qos_used => QoS,
|
|
||||||
name => BridgeName,
|
|
||||||
options => BridgeOpts
|
|
||||||
})
|
|
||||||
end,
|
|
||||||
Conf#{qos => QoS}.
|
|
||||||
|
|
||||||
downgrade_ingress_qos(2) ->
|
|
||||||
1;
|
|
||||||
downgrade_ingress_qos(QoS) ->
|
|
||||||
QoS.
|
|
||||||
|
|
||||||
get_pid(Name) ->
|
|
||||||
case gproc:where(?NAME(Name)) of
|
|
||||||
Pid when is_pid(Pid) ->
|
|
||||||
Pid;
|
|
||||||
undefined ->
|
|
||||||
throw(noproc)
|
|
||||||
end.
|
|
||||||
|
|
||||||
get_config(Name) ->
|
|
||||||
try
|
|
||||||
gproc:lookup_value(?NAME(Name))
|
|
||||||
catch
|
|
||||||
error:badarg ->
|
|
||||||
throw(noproc)
|
|
||||||
end.
|
|
||||||
|
|
||||||
export_msg(Name, Msg) ->
|
|
||||||
case get_config(Name) of
|
|
||||||
#{forwards := Forwards = #{}, mountpoint := Mountpoint} ->
|
|
||||||
{true, export_msg(Mountpoint, Forwards, Msg)};
|
|
||||||
#{forwards := undefined} ->
|
|
||||||
?SLOG(error, #{
|
|
||||||
msg => "forwarding_unavailable",
|
|
||||||
message => Msg,
|
|
||||||
reason => "egress is not configured"
|
|
||||||
}),
|
|
||||||
false
|
|
||||||
end.
|
|
||||||
|
|
||||||
export_msg(Mountpoint, Forwards, Msg) ->
|
|
||||||
Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards),
|
|
||||||
emqx_connector_mqtt_msg:to_remote_msg(Msg, Vars).
|
|
||||||
|
|
||||||
%%
|
|
||||||
|
|
||||||
handle_publish(#{properties := Props} = MsgIn, Vars, Opts) ->
|
|
||||||
Msg = import_msg(MsgIn, Opts),
|
|
||||||
?SLOG(debug, #{
|
|
||||||
msg => "publish_local",
|
|
||||||
message => Msg,
|
|
||||||
vars => Vars
|
|
||||||
}),
|
|
||||||
case Vars of
|
|
||||||
#{on_message_received := {Mod, Func, Args}} ->
|
|
||||||
_ = erlang:apply(Mod, Func, [Msg | Args]);
|
|
||||||
_ ->
|
|
||||||
ok
|
|
||||||
end,
|
|
||||||
maybe_publish_local(Msg, Vars, Props).
|
|
||||||
|
|
||||||
handle_disconnect(_Reason) ->
|
|
||||||
ok.
|
|
||||||
|
|
||||||
maybe_publish_local(Msg, Vars, Props) ->
|
|
||||||
case emqx_utils_maps:deep_get([local, topic], Vars, undefined) of
|
|
||||||
%% local topic is not set, discard it
|
|
||||||
undefined ->
|
|
||||||
ok;
|
|
||||||
_ ->
|
|
||||||
emqx_broker:publish(emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props))
|
|
||||||
end.
|
|
||||||
|
|
||||||
import_msg(
|
|
||||||
#{
|
|
||||||
dup := Dup,
|
|
||||||
payload := Payload,
|
|
||||||
properties := Props,
|
|
||||||
qos := QoS,
|
|
||||||
retain := Retain,
|
|
||||||
topic := Topic
|
|
||||||
},
|
|
||||||
#{server := Server}
|
|
||||||
) ->
|
|
||||||
#{
|
|
||||||
id => emqx_guid:to_hexstr(emqx_guid:gen()),
|
|
||||||
server => Server,
|
|
||||||
payload => Payload,
|
|
||||||
topic => Topic,
|
|
||||||
qos => QoS,
|
|
||||||
dup => Dup,
|
|
||||||
retain => Retain,
|
|
||||||
pub_props => printable_maps(Props),
|
|
||||||
message_received_at => erlang:system_time(millisecond)
|
|
||||||
}.
|
|
||||||
|
|
||||||
printable_maps(undefined) ->
|
|
||||||
#{};
|
|
||||||
printable_maps(Headers) ->
|
|
||||||
maps:fold(
|
|
||||||
fun
|
|
||||||
('User-Property', V0, AccIn) when is_list(V0) ->
|
|
||||||
AccIn#{
|
|
||||||
'User-Property' => maps:from_list(V0),
|
|
||||||
'User-Property-Pairs' => [
|
|
||||||
#{
|
|
||||||
key => Key,
|
|
||||||
value => Value
|
|
||||||
}
|
|
||||||
|| {Key, Value} <- V0
|
|
||||||
]
|
|
||||||
};
|
|
||||||
(K, V0, AccIn) ->
|
|
||||||
AccIn#{K => V0}
|
|
||||||
end,
|
|
||||||
#{},
|
|
||||||
Headers
|
|
||||||
).
|
|
|
@ -2,7 +2,7 @@
|
||||||
{application, emqx_dashboard, [
|
{application, emqx_dashboard, [
|
||||||
{description, "EMQX Web Dashboard"},
|
{description, "EMQX Web Dashboard"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "5.0.22"},
|
{vsn, "5.0.23"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, [emqx_dashboard_sup]},
|
{registered, [emqx_dashboard_sup]},
|
||||||
{applications, [kernel, stdlib, mnesia, minirest, emqx, emqx_ctl]},
|
{applications, [kernel, stdlib, mnesia, minirest, emqx, emqx_ctl]},
|
||||||
|
|
|
@ -27,15 +27,12 @@
|
||||||
%% @doc Global ETS table to cache the description of the configuration items.
|
%% @doc Global ETS table to cache the description of the configuration items.
|
||||||
%% The table is owned by the emqx_dashboard_sup the root supervisor of emqx_dashboard.
|
%% The table is owned by the emqx_dashboard_sup the root supervisor of emqx_dashboard.
|
||||||
%% The cache is initialized with the default language (English) and
|
%% The cache is initialized with the default language (English) and
|
||||||
%% all the desc.<lang>.hocon files in the www/static directory (extracted from dashboard package).
|
%% all the desc.<lang>.hocon files in the app's priv directory
|
||||||
init() ->
|
init() ->
|
||||||
ok = ensure_app_loaded(emqx_dashboard),
|
ok = ensure_app_loaded(emqx_dashboard),
|
||||||
PrivDir = code:priv_dir(emqx_dashboard),
|
PrivDir = code:priv_dir(emqx_dashboard),
|
||||||
EngDesc = filename:join([PrivDir, "desc.en.hocon"]),
|
Files0 = filelib:wildcard("desc.*.hocon", PrivDir),
|
||||||
WwwStaticDir = filename:join([PrivDir, "www", "static"]),
|
Files = lists:map(fun(F) -> filename:join([PrivDir, F]) end, Files0),
|
||||||
OtherLangDesc0 = filelib:wildcard("desc.*.hocon", WwwStaticDir),
|
|
||||||
OtherLangDesc = lists:map(fun(F) -> filename:join([WwwStaticDir, F]) end, OtherLangDesc0),
|
|
||||||
Files = [EngDesc | OtherLangDesc],
|
|
||||||
ok = emqx_utils_ets:new(?MODULE, [public, ordered_set, {read_concurrency, true}]),
|
ok = emqx_utils_ets:new(?MODULE, [public, ordered_set, {read_concurrency, true}]),
|
||||||
ok = lists:foreach(fun(F) -> load_desc(?MODULE, F) end, Files).
|
ok = lists:foreach(fun(F) -> load_desc(?MODULE, F) end, Files).
|
||||||
|
|
||||||
|
|
|
@ -64,11 +64,6 @@ set_default_config(DefaultUsername, HAProxyEnabled, Opts) ->
|
||||||
i18n_lang => en
|
i18n_lang => en
|
||||||
},
|
},
|
||||||
emqx_config:put([dashboard], Config),
|
emqx_config:put([dashboard], Config),
|
||||||
I18nFile = filename:join([
|
|
||||||
code:priv_dir(emqx_dashboard),
|
|
||||||
"i18n.conf"
|
|
||||||
]),
|
|
||||||
application:set_env(emqx_dashboard, i18n_file, I18nFile),
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
request(Method, Url) ->
|
request(Method, Url) ->
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
# EMQX Enterprise Application
|
# EMQX Enterprise Application
|
||||||
|
|
||||||
This application so fart only holds EMQX config schema for enterprise edition.
|
This application only holds EMQX config schema for enterprise edition so far.
|
||||||
In the future this application will collect more responsibilities in managing
|
In the future this application will collect more responsibilities in managing
|
||||||
enterprise edition specific features.
|
enterprise edition specific features.
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_enterprise, [
|
{application, emqx_enterprise, [
|
||||||
{description, "EMQX Enterprise Edition"},
|
{description, "EMQX Enterprise Edition"},
|
||||||
{vsn, "0.1.0"},
|
{vsn, "0.1.1"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -64,7 +64,7 @@ redefine_roots(Roots) ->
|
||||||
override(Roots, Overrides).
|
override(Roots, Overrides).
|
||||||
|
|
||||||
redefine_node(Fields) ->
|
redefine_node(Fields) ->
|
||||||
Overrides = [{"applications", #{default => <<"emqx_license">>}}],
|
Overrides = [],
|
||||||
override(Fields, Overrides).
|
override(Fields, Overrides).
|
||||||
|
|
||||||
override(Fields, []) ->
|
override(Fields, []) ->
|
||||||
|
|
|
@ -13,16 +13,6 @@ doc_gen_test() ->
|
||||||
60,
|
60,
|
||||||
fun() ->
|
fun() ->
|
||||||
Dir = "tmp",
|
Dir = "tmp",
|
||||||
ok = filelib:ensure_dir(filename:join("tmp", foo)),
|
ok = emqx_conf:dump_schema(Dir, emqx_enterprise_schema)
|
||||||
I18nFile = filename:join([
|
|
||||||
"_build",
|
|
||||||
"test",
|
|
||||||
"lib",
|
|
||||||
"emqx_dashboard",
|
|
||||||
"priv",
|
|
||||||
"i18n.conf"
|
|
||||||
]),
|
|
||||||
_ = emqx_conf:dump_schema(Dir, emqx_enterprise_schema, I18nFile),
|
|
||||||
ok
|
|
||||||
end
|
end
|
||||||
}.
|
}.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_ft, [
|
{application, emqx_ft, [
|
||||||
{description, "EMQX file transfer over MQTT"},
|
{description, "EMQX file transfer over MQTT"},
|
||||||
{vsn, "0.1.1"},
|
{vsn, "0.1.2"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_ft_app, []}},
|
{mod, {emqx_ft_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
|
|
||||||
-export_type([export/0]).
|
-export_type([export/0]).
|
||||||
|
|
||||||
-type storage() :: emxt_ft_storage_fs:storage().
|
-type storage() :: emqx_ft_storage_fs:storage() | undefined.
|
||||||
-type transfer() :: emqx_ft:transfer().
|
-type transfer() :: emqx_ft:transfer().
|
||||||
-type filemeta() :: emqx_ft:filemeta().
|
-type filemeta() :: emqx_ft:filemeta().
|
||||||
-type checksum() :: emqx_ft:checksum().
|
-type checksum() :: emqx_ft:checksum().
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_resource, [
|
{application, emqx_resource, [
|
||||||
{description, "Manager for all external resources"},
|
{description, "Manager for all external resources"},
|
||||||
{vsn, "0.1.17"},
|
{vsn, "0.1.18"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_resource_app, []}},
|
{mod, {emqx_resource_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -121,7 +121,8 @@
|
||||||
|
|
||||||
-export_type([
|
-export_type([
|
||||||
resource_id/0,
|
resource_id/0,
|
||||||
resource_data/0
|
resource_data/0,
|
||||||
|
resource_status/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-optional_callbacks([
|
-optional_callbacks([
|
||||||
|
@ -533,7 +534,7 @@ clean_allocated_resources(ResourceId, ResourceMod) ->
|
||||||
true ->
|
true ->
|
||||||
%% The resource entries in the ETS table are erased inside
|
%% The resource entries in the ETS table are erased inside
|
||||||
%% `call_stop' if the call is successful.
|
%% `call_stop' if the call is successful.
|
||||||
ok = emqx_resource:call_stop(ResourceId, ResourceMod, _ResourceState = undefined),
|
ok = call_stop(ResourceId, ResourceMod, _ResourceState = undefined),
|
||||||
ok;
|
ok;
|
||||||
false ->
|
false ->
|
||||||
ok
|
ok
|
||||||
|
|
|
@ -247,7 +247,7 @@ running(info, Info, _St) ->
|
||||||
keep_state_and_data.
|
keep_state_and_data.
|
||||||
|
|
||||||
blocked(enter, _, #{resume_interval := ResumeT} = St0) ->
|
blocked(enter, _, #{resume_interval := ResumeT} = St0) ->
|
||||||
?tp(buffer_worker_enter_blocked, #{}),
|
?tp(buffer_worker_enter_blocked, #{buffer_worker => self()}),
|
||||||
%% discard the old timer, new timer will be started when entering running state again
|
%% discard the old timer, new timer will be started when entering running state again
|
||||||
St = cancel_flush_timer(St0),
|
St = cancel_flush_timer(St0),
|
||||||
{keep_state, St, {state_timeout, ResumeT, unblock}};
|
{keep_state, St, {state_timeout, ResumeT, unblock}};
|
||||||
|
@ -403,7 +403,8 @@ retry_inflight_sync(Ref, QueryOrBatch, Data0) ->
|
||||||
buffer_worker_retry_inflight_failed,
|
buffer_worker_retry_inflight_failed,
|
||||||
#{
|
#{
|
||||||
ref => Ref,
|
ref => Ref,
|
||||||
query_or_batch => QueryOrBatch
|
query_or_batch => QueryOrBatch,
|
||||||
|
result => Result
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
{keep_state, Data1, {state_timeout, ResumeT, unblock}};
|
{keep_state, Data1, {state_timeout, ResumeT, unblock}};
|
||||||
|
@ -976,7 +977,7 @@ handle_async_worker_down(Data0, Pid) ->
|
||||||
{AsyncWorkerMRef, AsyncWorkers} = maps:take(Pid, AsyncWorkers0),
|
{AsyncWorkerMRef, AsyncWorkers} = maps:take(Pid, AsyncWorkers0),
|
||||||
Data = Data0#{async_workers := AsyncWorkers},
|
Data = Data0#{async_workers := AsyncWorkers},
|
||||||
mark_inflight_items_as_retriable(Data, AsyncWorkerMRef),
|
mark_inflight_items_as_retriable(Data, AsyncWorkerMRef),
|
||||||
{keep_state, Data}.
|
{next_state, blocked, Data}.
|
||||||
|
|
||||||
-spec call_query(force_sync | async_if_possible, _, _, _, _, _) -> _.
|
-spec call_query(force_sync | async_if_possible, _, _, _, _, _) -> _.
|
||||||
call_query(QM, Id, Index, Ref, Query, QueryOpts) ->
|
call_query(QM, Id, Index, Ref, Query, QueryOpts) ->
|
||||||
|
@ -1256,6 +1257,13 @@ handle_async_batch_reply2([Inflight], ReplyContext, Result, Now) ->
|
||||||
%% some queries are not expired, put them back to the inflight batch
|
%% some queries are not expired, put them back to the inflight batch
|
||||||
%% so it can be either acked now or retried later
|
%% so it can be either acked now or retried later
|
||||||
ok = update_inflight_item(InflightTID, Ref, RealNotExpired, NumExpired),
|
ok = update_inflight_item(InflightTID, Ref, RealNotExpired, NumExpired),
|
||||||
|
?tp_ignore_side_effects_in_prod(
|
||||||
|
handle_async_reply_partially_expired,
|
||||||
|
#{
|
||||||
|
inflight_count => inflight_count(InflightTID),
|
||||||
|
num_inflight_messages => inflight_num_msgs(InflightTID)
|
||||||
|
}
|
||||||
|
),
|
||||||
do_handle_async_batch_reply(ReplyContext#{min_batch := RealNotExpired}, Result)
|
do_handle_async_batch_reply(ReplyContext#{min_batch := RealNotExpired}, Result)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
@ -1556,7 +1564,7 @@ mark_inflight_items_as_retriable(Data, AsyncWorkerMRef) ->
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
_NumAffected = ets:select_replace(InflightTID, MatchSpec),
|
_NumAffected = ets:select_replace(InflightTID, MatchSpec),
|
||||||
?tp(buffer_worker_async_agent_down, #{num_affected => _NumAffected}),
|
?tp(buffer_worker_async_agent_down, #{num_affected => _NumAffected, buffer_worker => self()}),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
%% used to update a batch after dropping expired individual queries.
|
%% used to update a batch after dropping expired individual queries.
|
||||||
|
|
|
@ -192,14 +192,13 @@ remove(ResId) when is_binary(ResId) ->
|
||||||
%% @doc Stops a running resource_manager and optionally clears the metrics for the resource
|
%% @doc Stops a running resource_manager and optionally clears the metrics for the resource
|
||||||
-spec remove(resource_id(), boolean()) -> ok | {error, Reason :: term()}.
|
-spec remove(resource_id(), boolean()) -> ok | {error, Reason :: term()}.
|
||||||
remove(ResId, ClearMetrics) when is_binary(ResId) ->
|
remove(ResId, ClearMetrics) when is_binary(ResId) ->
|
||||||
ResourceManagerPid = gproc:whereis_name(?NAME(ResId)),
|
|
||||||
try
|
try
|
||||||
safe_call(ResId, {remove, ClearMetrics}, ?T_OPERATION)
|
safe_call(ResId, {remove, ClearMetrics}, ?T_OPERATION)
|
||||||
after
|
after
|
||||||
%% Ensure the supervisor has it removed, otherwise the immediate re-add will see a stale process
|
%% Ensure the supervisor has it removed, otherwise the immediate re-add will see a stale process
|
||||||
%% If the 'remove' call babove had succeeded, this is mostly a no-op but still needed to avoid race condition.
|
%% If the 'remove' call babove had succeeded, this is mostly a no-op but still needed to avoid race condition.
|
||||||
%% Otherwise this is a 'infinity' shutdown, so it may take arbitrary long.
|
%% Otherwise this is a 'infinity' shutdown, so it may take arbitrary long.
|
||||||
emqx_resource_manager_sup:delete_child(ResourceManagerPid)
|
emqx_resource_manager_sup:delete_child(ResId)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%% @doc Stops and then starts an instance that was already running
|
%% @doc Stops and then starts an instance that was already running
|
||||||
|
|
|
@ -26,12 +26,12 @@
|
||||||
-export([init/1]).
|
-export([init/1]).
|
||||||
|
|
||||||
ensure_child(ResId, Group, ResourceType, Config, Opts) ->
|
ensure_child(ResId, Group, ResourceType, Config, Opts) ->
|
||||||
_ = supervisor:start_child(?MODULE, [ResId, Group, ResourceType, Config, Opts]),
|
_ = supervisor:start_child(?MODULE, child_spec(ResId, Group, ResourceType, Config, Opts)),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
delete_child(Pid) ->
|
delete_child(ResId) ->
|
||||||
_ = supervisor:terminate_child(?MODULE, Pid),
|
_ = supervisor:terminate_child(?MODULE, ResId),
|
||||||
_ = supervisor:delete_child(?MODULE, Pid),
|
_ = supervisor:delete_child(?MODULE, ResId),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
start_link() ->
|
start_link() ->
|
||||||
|
@ -44,18 +44,19 @@ init([]) ->
|
||||||
public,
|
public,
|
||||||
{read_concurrency, true}
|
{read_concurrency, true}
|
||||||
]),
|
]),
|
||||||
ChildSpecs = [
|
ChildSpecs = [],
|
||||||
#{
|
SupFlags = #{strategy => one_for_one, intensity => 10, period => 10},
|
||||||
id => emqx_resource_manager,
|
|
||||||
start => {emqx_resource_manager, start_link, []},
|
|
||||||
restart => transient,
|
|
||||||
%% never force kill a resource manager.
|
|
||||||
%% becasue otherwise it may lead to release leak,
|
|
||||||
%% resource_manager's terminate callback calls resource on_stop
|
|
||||||
shutdown => infinity,
|
|
||||||
type => worker,
|
|
||||||
modules => [emqx_resource_manager]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
SupFlags = #{strategy => simple_one_for_one, intensity => 10, period => 10},
|
|
||||||
{ok, {SupFlags, ChildSpecs}}.
|
{ok, {SupFlags, ChildSpecs}}.
|
||||||
|
|
||||||
|
child_spec(ResId, Group, ResourceType, Config, Opts) ->
|
||||||
|
#{
|
||||||
|
id => ResId,
|
||||||
|
start => {emqx_resource_manager, start_link, [ResId, Group, ResourceType, Config, Opts]},
|
||||||
|
restart => transient,
|
||||||
|
%% never force kill a resource manager.
|
||||||
|
%% becasue otherwise it may lead to release leak,
|
||||||
|
%% resource_manager's terminate callback calls resource on_stop
|
||||||
|
shutdown => infinity,
|
||||||
|
type => worker,
|
||||||
|
modules => [emqx_resource_manager]
|
||||||
|
}.
|
||||||
|
|
|
@ -1832,6 +1832,18 @@ t_async_pool_worker_death(_Config) ->
|
||||||
NumReqs,
|
NumReqs,
|
||||||
lists:sum([N || #{num_affected := N} <- Events])
|
lists:sum([N || #{num_affected := N} <- Events])
|
||||||
),
|
),
|
||||||
|
|
||||||
|
%% The `DOWN' signal must trigger the transition to the `blocked' state,
|
||||||
|
%% otherwise the request won't be retried until the buffer worker is `blocked'
|
||||||
|
%% for other reasons.
|
||||||
|
?assert(
|
||||||
|
?strict_causality(
|
||||||
|
#{?snk_kind := buffer_worker_async_agent_down, buffer_worker := _Pid0},
|
||||||
|
#{?snk_kind := buffer_worker_enter_blocked, buffer_worker := _Pid1},
|
||||||
|
_Pid0 =:= _Pid1,
|
||||||
|
Trace
|
||||||
|
)
|
||||||
|
),
|
||||||
ok
|
ok
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
|
@ -2248,6 +2260,15 @@ do_t_expiration_async_after_reply(IsBatch) ->
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
?of_kind(handle_async_reply_expired, Trace)
|
?of_kind(handle_async_reply_expired, Trace)
|
||||||
|
),
|
||||||
|
?assertMatch(
|
||||||
|
[
|
||||||
|
#{
|
||||||
|
inflight_count := 1,
|
||||||
|
num_inflight_messages := 1
|
||||||
|
}
|
||||||
|
],
|
||||||
|
?of_kind(handle_async_reply_partially_expired, Trace)
|
||||||
);
|
);
|
||||||
single ->
|
single ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{application, emqx_rule_engine, [
|
{application, emqx_rule_engine, [
|
||||||
{description, "EMQX Rule Engine"},
|
{description, "EMQX Rule Engine"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "5.0.18"},
|
{vsn, "5.0.19"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, [emqx_rule_engine_sup, emqx_rule_engine]},
|
{registered, [emqx_rule_engine_sup, emqx_rule_engine]},
|
||||||
{applications, [kernel, stdlib, rulesql, getopt, emqx_ctl]},
|
{applications, [kernel, stdlib, rulesql, getopt, emqx_ctl]},
|
||||||
|
|
|
@ -529,47 +529,69 @@ printable_function_name(Mod, Func) ->
|
||||||
list_to_binary(lists:concat([Mod, ":", Func])).
|
list_to_binary(lists:concat([Mod, ":", Func])).
|
||||||
|
|
||||||
get_rule_metrics(Id) ->
|
get_rule_metrics(Id) ->
|
||||||
Format = fun(
|
Format = fun
|
||||||
Node,
|
(
|
||||||
#{
|
Node,
|
||||||
counters :=
|
#{
|
||||||
#{
|
counters :=
|
||||||
'matched' := Matched,
|
#{
|
||||||
'passed' := Passed,
|
'matched' := Matched,
|
||||||
'failed' := Failed,
|
'passed' := Passed,
|
||||||
'failed.exception' := FailedEx,
|
'failed' := Failed,
|
||||||
'failed.no_result' := FailedNoRes,
|
'failed.exception' := FailedEx,
|
||||||
'actions.total' := OTotal,
|
'failed.no_result' := FailedNoRes,
|
||||||
'actions.failed' := OFailed,
|
'actions.total' := OTotal,
|
||||||
'actions.failed.out_of_service' := OFailedOOS,
|
'actions.failed' := OFailed,
|
||||||
'actions.failed.unknown' := OFailedUnknown,
|
'actions.failed.out_of_service' := OFailedOOS,
|
||||||
'actions.success' := OFailedSucc
|
'actions.failed.unknown' := OFailedUnknown,
|
||||||
},
|
'actions.success' := OFailedSucc
|
||||||
rate :=
|
},
|
||||||
#{
|
rate :=
|
||||||
'matched' :=
|
#{
|
||||||
#{current := Current, max := Max, last5m := Last5M}
|
'matched' :=
|
||||||
}
|
#{current := Current, max := Max, last5m := Last5M}
|
||||||
}
|
}
|
||||||
) ->
|
}
|
||||||
#{
|
) ->
|
||||||
metrics => ?METRICS(
|
#{
|
||||||
Matched,
|
metrics => ?METRICS(
|
||||||
Passed,
|
Matched,
|
||||||
Failed,
|
Passed,
|
||||||
FailedEx,
|
Failed,
|
||||||
FailedNoRes,
|
FailedEx,
|
||||||
OTotal,
|
FailedNoRes,
|
||||||
OFailed,
|
OTotal,
|
||||||
OFailedOOS,
|
OFailed,
|
||||||
OFailedUnknown,
|
OFailedOOS,
|
||||||
OFailedSucc,
|
OFailedUnknown,
|
||||||
Current,
|
OFailedSucc,
|
||||||
Max,
|
Current,
|
||||||
Last5M
|
Max,
|
||||||
),
|
Last5M
|
||||||
node => Node
|
),
|
||||||
}
|
node => Node
|
||||||
|
};
|
||||||
|
(Node, _Metrics) ->
|
||||||
|
%% Empty metrics: can happen when a node joins another and a bridge is not yet
|
||||||
|
%% replicated to it, so the counters map is empty.
|
||||||
|
#{
|
||||||
|
metrics => ?METRICS(
|
||||||
|
_Matched = 0,
|
||||||
|
_Passed = 0,
|
||||||
|
_Failed = 0,
|
||||||
|
_FailedEx = 0,
|
||||||
|
_FailedNoRes = 0,
|
||||||
|
_OTotal = 0,
|
||||||
|
_OFailed = 0,
|
||||||
|
_OFailedOOS = 0,
|
||||||
|
_OFailedUnknown = 0,
|
||||||
|
_OFailedSucc = 0,
|
||||||
|
_Current = 0,
|
||||||
|
_Max = 0,
|
||||||
|
_Last5M = 0
|
||||||
|
),
|
||||||
|
node => Node
|
||||||
|
}
|
||||||
end,
|
end,
|
||||||
[
|
[
|
||||||
Format(Node, emqx_plugin_libs_proto_v1:get_metrics(Node, rule_metrics, Id))
|
Format(Node, emqx_plugin_libs_proto_v1:get_metrics(Node, rule_metrics, Id))
|
||||||
|
|
|
@ -228,7 +228,9 @@
|
||||||
format_date/3,
|
format_date/3,
|
||||||
format_date/4,
|
format_date/4,
|
||||||
date_to_unix_ts/3,
|
date_to_unix_ts/3,
|
||||||
date_to_unix_ts/4
|
date_to_unix_ts/4,
|
||||||
|
timezone_to_second/1,
|
||||||
|
timezone_to_offset_seconds/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
%% MongoDB specific date functions. These functions return a date tuple. The
|
%% MongoDB specific date functions. These functions return a date tuple. The
|
||||||
|
@ -1104,6 +1106,12 @@ date_to_unix_ts(TimeUnit, Offset, FormatString, InputString) ->
|
||||||
OffsetDelta = erlang:convert_time_unit(OffsetSecond, second, Unit),
|
OffsetDelta = erlang:convert_time_unit(OffsetSecond, second, Unit),
|
||||||
date_to_unix_ts(Unit, FormatString, InputString) - OffsetDelta.
|
date_to_unix_ts(Unit, FormatString, InputString) - OffsetDelta.
|
||||||
|
|
||||||
|
timezone_to_second(TimeZone) ->
|
||||||
|
timezone_to_offset_seconds(TimeZone).
|
||||||
|
|
||||||
|
timezone_to_offset_seconds(TimeZone) ->
|
||||||
|
emqx_calendar:offset_second(TimeZone).
|
||||||
|
|
||||||
%% @doc This is for sql funcs that should be handled in the specific modules.
|
%% @doc This is for sql funcs that should be handled in the specific modules.
|
||||||
%% Here the emqx_rule_funcs module acts as a proxy, forwarding
|
%% Here the emqx_rule_funcs module acts as a proxy, forwarding
|
||||||
%% the function handling to the worker module.
|
%% the function handling to the worker module.
|
||||||
|
|
|
@ -94,6 +94,26 @@ t_crud_rule_api(_Config) ->
|
||||||
ct:pal("RMetrics : ~p", [Metrics]),
|
ct:pal("RMetrics : ~p", [Metrics]),
|
||||||
?assertMatch(#{id := RuleId, metrics := _, node_metrics := _}, Metrics),
|
?assertMatch(#{id := RuleId, metrics := _, node_metrics := _}, Metrics),
|
||||||
|
|
||||||
|
%% simulating a node joining a cluster and lagging the configuration replication; in
|
||||||
|
%% such cases, when fetching metrics, a rule may exist in the cluster but not on the
|
||||||
|
%% new node. We just check that it doesn't provoke a crash.
|
||||||
|
emqx_common_test_helpers:with_mock(
|
||||||
|
emqx_metrics_worker,
|
||||||
|
get_metrics,
|
||||||
|
fun(HandlerName, MetricId) ->
|
||||||
|
%% change the metric id to some unknown id.
|
||||||
|
meck:passthrough([HandlerName, <<"unknown-", MetricId/binary>>])
|
||||||
|
end,
|
||||||
|
fun() ->
|
||||||
|
{200, Metrics1} = emqx_rule_engine_api:'/rules/:id/metrics'(get, #{
|
||||||
|
bindings => #{id => RuleId}
|
||||||
|
}),
|
||||||
|
ct:pal("RMetrics : ~p", [Metrics1]),
|
||||||
|
?assertMatch(#{id := RuleId, metrics := _, node_metrics := _}, Metrics1),
|
||||||
|
ok
|
||||||
|
end
|
||||||
|
),
|
||||||
|
|
||||||
{200, Rule2} = emqx_rule_engine_api:'/rules/:id'(put, #{
|
{200, Rule2} = emqx_rule_engine_api:'/rules/:id'(put, #{
|
||||||
bindings => #{id => RuleId},
|
bindings => #{id => RuleId},
|
||||||
body => ?SIMPLE_RULE(RuleId)#{<<"sql">> => <<"select * from \"t/b\"">>}
|
body => ?SIMPLE_RULE(RuleId)#{<<"sql">> => <<"select * from \"t/b\"">>}
|
||||||
|
|
|
@ -1012,6 +1012,25 @@ prop_format_date_fun() ->
|
||||||
Args3DTUS = [<<"second">>, <<"+04:00">>, <<"--%m--%d--%Y---%H:%M:%S">>, Formatters3],
|
Args3DTUS = [<<"second">>, <<"+04:00">>, <<"--%m--%d--%Y---%H:%M:%S">>, Formatters3],
|
||||||
Second == apply_func(date_to_unix_ts, Args3DTUS).
|
Second == apply_func(date_to_unix_ts, Args3DTUS).
|
||||||
|
|
||||||
|
t_timezone_to_offset_seconds(_) ->
|
||||||
|
timezone_to_offset_seconds_helper(timezone_to_offset_seconds),
|
||||||
|
%% The timezone_to_second function is kept for compatibility with 4.X.
|
||||||
|
timezone_to_offset_seconds_helper(timezone_to_second).
|
||||||
|
|
||||||
|
timezone_to_offset_seconds_helper(FunctionName) ->
|
||||||
|
?assertEqual(120 * 60, apply_func(FunctionName, [<<"+02:00:00">>])),
|
||||||
|
?assertEqual(-120 * 60, apply_func(FunctionName, [<<"-02:00:00">>])),
|
||||||
|
?assertEqual(102, apply_func(FunctionName, [<<"+00:01:42">>])),
|
||||||
|
?assertEqual(0, apply_func(FunctionName, [<<"z">>])),
|
||||||
|
?assertEqual(0, apply_func(FunctionName, [<<"Z">>])),
|
||||||
|
?assertEqual(42, apply_func(FunctionName, [42])),
|
||||||
|
?assertEqual(0, apply_func(FunctionName, [undefined])),
|
||||||
|
%% Check that the following does not crash
|
||||||
|
apply_func(FunctionName, [<<"local">>]),
|
||||||
|
apply_func(FunctionName, ["local"]),
|
||||||
|
apply_func(FunctionName, [local]),
|
||||||
|
ok.
|
||||||
|
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
%% Utility functions
|
%% Utility functions
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
|
@ -430,25 +430,25 @@ t_num_clients(_Config) ->
|
||||||
{port, 1883},
|
{port, 1883},
|
||||||
{clean_start, false}
|
{clean_start, false}
|
||||||
]),
|
]),
|
||||||
{{ok, _}, _} = ?wait_async_action(
|
{{ok, _}, {ok, _}} = ?wait_async_action(
|
||||||
{ok, _} = emqtt:connect(Client),
|
{ok, _} = emqtt:connect(Client),
|
||||||
#{
|
#{
|
||||||
?snk_kind := emqx_stats_setstat,
|
?snk_kind := emqx_stats_setstat,
|
||||||
count_stat := 'live_connections.count',
|
count_stat := 'live_connections.count',
|
||||||
value := 1
|
value := 1
|
||||||
},
|
},
|
||||||
2000
|
5_000
|
||||||
),
|
),
|
||||||
{ok, TelemetryData0} = emqx_telemetry:get_telemetry(),
|
{ok, TelemetryData0} = emqx_telemetry:get_telemetry(),
|
||||||
?assertEqual(1, get_value(num_clients, TelemetryData0)),
|
?assertEqual(1, get_value(num_clients, TelemetryData0)),
|
||||||
{ok, _} = ?wait_async_action(
|
{ok, {ok, _}} = ?wait_async_action(
|
||||||
ok = emqtt:disconnect(Client),
|
ok = emqtt:disconnect(Client),
|
||||||
#{
|
#{
|
||||||
?snk_kind := emqx_stats_setstat,
|
?snk_kind := emqx_stats_setstat,
|
||||||
count_stat := 'live_connections.count',
|
count_stat := 'live_connections.count',
|
||||||
value := 0
|
value := 0
|
||||||
},
|
},
|
||||||
2000
|
5_000
|
||||||
),
|
),
|
||||||
{ok, TelemetryData1} = emqx_telemetry:get_telemetry(),
|
{ok, TelemetryData1} = emqx_telemetry:get_telemetry(),
|
||||||
?assertEqual(0, get_value(num_clients, TelemetryData1)),
|
?assertEqual(0, get_value(num_clients, TelemetryData1)),
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
A new utility function timezone_to_offset_seconds/1 has been added to the rule engine SQL language. This function converts a timezone string (for example, "+02:00", "Z" and "local") to the corresponding offset in seconds.
|
|
@ -0,0 +1 @@
|
||||||
|
Fixes an issue where trying to get rule info or metrics could result in a crash when a node is joining a cluster.
|
|
@ -0,0 +1,3 @@
|
||||||
|
Fixed a potential issue where requests to bridges might take a long time to be retried.
|
||||||
|
|
||||||
|
This only affected low throughput scenarios, where the buffering layer could take a long time to detect connectivity and driver problems.
|
|
@ -0,0 +1,3 @@
|
||||||
|
The MQTT bridge has been enhanced to utilize connection pooling and leverage available parallelism, substantially improving throughput.
|
||||||
|
|
||||||
|
As a consequence, single MQTT bridge now uses a pool of `clientid`s to connect to the remote broker.
|
|
@ -1 +1 @@
|
||||||
Refactored Pulsar Producer bridge to avoid leaking resources during crashes.
|
Refactored Pulsar Producer bridge to avoid leaking resources during crashes at creation.
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
Refactored Kafka Producer and Consumer bridges to avoid leaking resources during crashes at creation.
|
|
@ -0,0 +1 @@
|
||||||
|
Added a schema validation to ensure message key is not empty when dispatching by key in Kafka and Pulsar Producer bridges.
|
|
@ -0,0 +1,118 @@
|
||||||
|
# v5.0.26
|
||||||
|
|
||||||
|
## Enhancements
|
||||||
|
|
||||||
|
- [#10584](https://github.com/emqx/emqx/pull/10584) Add log level configuration to SSL communication
|
||||||
|
|
||||||
|
- [#10702](https://github.com/emqx/emqx/pull/10702) Introduce a more straightforward configuration option `keepalive_multiplier` and
|
||||||
|
deprecate the old `keepalive_backoff` configuration.
|
||||||
|
After this enhancement, EMQX checks the client's keepalive timeout status
|
||||||
|
period by multiplying the "Client Requested Keepalive Interval" with `keepalive_multiplier`.
|
||||||
|
|
||||||
|
- [#10713](https://github.com/emqx/emqx/pull/10713) We hide the request_timeout in resource_option of the webhook to keep it consistent with the http request_timeout of the webhook.
|
||||||
|
From now on, when configuring a webhook through API or configuration files,
|
||||||
|
it is no longer necessary to configure the request_timeout of the resource. Only configuring the http request_timeout is sufficient, and the request_timeout in the resource will automatically be consistent with the http request_timeout.
|
||||||
|
|
||||||
|
- [#10511](https://github.com/emqx/emqx/pull/10511) Improve the security and privacy of some resource logs by masking sensitive information in the data.
|
||||||
|
|
||||||
|
- [#10678](https://github.com/emqx/emqx/pull/10678) Optimized counter increment calls to avoid work if increment is zero.
|
||||||
|
|
||||||
|
- [#10690](https://github.com/emqx/emqx/pull/10690) Added a retry mechanism to webhook bridge that attempts to improve throughput.
|
||||||
|
|
||||||
|
This optimization retries request failures without blocking the buffering layer, which can improve throughput in situations of high messaging rate.
|
||||||
|
|
||||||
|
- [#10698](https://github.com/emqx/emqx/pull/10698) Optimize memory usage when accessing the configuration during runtime.
|
||||||
|
|
||||||
|
## Bug Fixes
|
||||||
|
|
||||||
|
- [#10340](https://github.com/emqx/emqx/pull/10340) Fixed the issue that could lead to crash logs being printed when stopping EMQX via systemd.
|
||||||
|
```
|
||||||
|
2023-03-29T16:43:25.915761+08:00 [error] Generic server memsup terminating. Reason: {port_died,normal}. Last message: {'EXIT',<0.2117.0>,{port_died,normal}}. State: [{data,[{"Timeout",60000}]},{items,{"Memory Usage",[{"Allocated",929959936},{"Total",3832242176}]}},{items,{"Worst Memory User",[{"Pid",<0.2031.0>},{"Memory",4720472}]}}].
|
||||||
|
2023-03-29T16:43:25.924764+08:00 [error] crasher: initial call: memsup:init/1, pid: <0.2116.0>, registered_name: memsup, exit: {{port_died,normal},[{gen_server,handle_common_reply,8,[{file,"gen_server.erl"},{line,811}]},{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,226}]}]}, ancestors: [os_mon_sup,<0.2114.0>], message_queue_len: 0, messages: [], links: [<0.2115.0>], dictionary: [], trap_exit: true, status: running, heap_size: 4185, stack_size: 29, reductions: 187637; neighbours:
|
||||||
|
2023-03-29T16:43:25.924979+08:00 [error] Supervisor: {local,os_mon_sup}. Context: child_terminated. Reason: {port_died,normal}. Offender: id=memsup,pid=<0.2116.0>.
|
||||||
|
```
|
||||||
|
|
||||||
|
- [#10563](https://github.com/emqx/emqx/pull/10563) Corrected an issue where the no_local flag was not functioning correctly.
|
||||||
|
|
||||||
|
|
||||||
|
- [#10600](https://github.com/emqx/emqx/pull/10600) Deleted emqx_statsd application.
|
||||||
|
|
||||||
|
|
||||||
|
- [#10653](https://github.com/emqx/emqx/pull/10653) Store gateway authentication TLS certificates and keys in the data directory.
|
||||||
|
|
||||||
|
- [#10677](https://github.com/emqx/emqx/pull/10677) In Rule API, reapond with 404 HTTP error code when trying to delete a rule that does not exist.
|
||||||
|
|
||||||
|
- [#10682](https://github.com/emqx/emqx/pull/10682) Fix the timestamp for the will message is incorrectly assigned at the session creation time, now this timestamp is the disconnected time of the session.
|
||||||
|
|
||||||
|
- [#10701](https://github.com/emqx/emqx/pull/10701) RPM package for Amazon Linux 2 did not support TLS v1.3 as it was assembled with Erlang/OTP built with openssl 1.0.
|
||||||
|
|
||||||
|
- [#10715](https://github.com/emqx/emqx/pull/10715) Postpone trimming the connection information structure until after `client.connected` hooks have been executed. These hooks once again have access to the client's peer certificate.
|
||||||
|
|
||||||
|
- [#10717](https://github.com/emqx/emqx/pull/10717) Fixed an issue where the buffering layer processes could use a lot of CPU when inflight window is full.
|
||||||
|
|
||||||
|
- [#10724](https://github.com/emqx/emqx/pull/10724) A summary has been added for all endpoints in the HTTP API documentation (accessible at "http://emqx_host_name:18083/api-docs").
|
||||||
|
|
||||||
|
- [#10726](https://github.com/emqx/emqx/pull/10726) Validate Health Check Interval and Auto Restart Interval against the range from 1ms to 1 hour.
|
||||||
|
|
||||||
|
- [#10728](https://github.com/emqx/emqx/pull/10728) Fixed an issue where the rule engine was unable to access variables exported by `FOREACH` in the `DO` clause.
|
||||||
|
|
||||||
|
Given a payload: `{"date": "2023-05-06", "array": ["a"]}`, as well as the following SQL statement:
|
||||||
|
```
|
||||||
|
FOREACH payload.date as date, payload.array as elem
|
||||||
|
DO date, elem
|
||||||
|
FROM "t/#"
|
||||||
|
```
|
||||||
|
Prior to the fix, the `date` variable exported by `FOREACH` could not be accessed in the `DO` clause of the above SQL, resulting in the following output for the SQL statement:
|
||||||
|
`[{"elem": "a","date": "undefined"}]`.
|
||||||
|
After the fix, the output of the SQL statement is: `[{"elem": "a","date": "2023-05-06"}]`
|
||||||
|
|
||||||
|
- [#10737](https://github.com/emqx/emqx/pull/10737) Fix the issue where the HTTP API interface of Gateway cannot handle ClientIDs with
|
||||||
|
special characters, such as: `!@#$%^&*()_+{}:"<>?/`.
|
||||||
|
|
||||||
|
- [#10742](https://github.com/emqx/emqx/pull/10742) Check the correctness of the rules before saving the authorization file source.
|
||||||
|
Previously, Saving wrong rules could lead to restart failure.
|
||||||
|
|
||||||
|
- [#10743](https://github.com/emqx/emqx/pull/10743) Fixes an issue where trying to get a bridge info or metrics could result in a crash when a node is joining a cluster.
|
||||||
|
|
||||||
|
- [#10746](https://github.com/emqx/emqx/pull/10746) Add missing support of the event `$events/delivery_dropped` into the rule engine test API `rule_test`.
|
||||||
|
|
||||||
|
- [#10747](https://github.com/emqx/emqx/pull/10747) Refactor date and time functions, `format_date` and `date_to_unix_ts`, in the rule engine to fix the implementation problem.
|
||||||
|
|
||||||
|
- [#10755](https://github.com/emqx/emqx/pull/10755) Fixed data bridge resource update race condition.
|
||||||
|
|
||||||
|
In the 'delete + create' process for EMQX resource updates,
|
||||||
|
long bridge creation times could cause dashboard request timeouts.
|
||||||
|
If a bridge resource update was initiated before completion of its creation,
|
||||||
|
it led to an erroneous deletion from the runtime, despite being present in the config file.
|
||||||
|
|
||||||
|
This fix addresses the race condition in bridge resource updates,
|
||||||
|
ensuring the accurate identification and addition of new resources,
|
||||||
|
maintaining consistency between runtime and configuration file statuses.
|
||||||
|
|
||||||
|
- [#10760](https://github.com/emqx/emqx/pull/10760) Fix Internal Error 500 that occurred sometimes when bridge statistics page was updated while a node was (re)joining the cluster.
|
||||||
|
|
||||||
|
- [#10761](https://github.com/emqx/emqx/pull/10761) Fixing the issue where the default value of SSL certificate for Dashboard Listener was not correctly interpolated, which caused HTTPS to be inaccessible when verify_peer and cacertfile were using the default configuration.
|
||||||
|
|
||||||
|
- [#10785](https://github.com/emqx/emqx/pull/10785) Ensure `EMQX_LOG_DIR` is set by Windows boot script.
|
||||||
|
|
||||||
|
The environment variable `EMQX_LOG_DIR` was missing in v5.0.25, caused EMQX Windows package fail to boot unless set by sysadmin.
|
||||||
|
|
||||||
|
- [#10801](https://github.com/emqx/emqx/pull/10801) Avoid duplicated percent decode the topic name in API `/topics/{topic}` and `/topics`.
|
||||||
|
|
||||||
|
- [#10809](https://github.com/emqx/emqx/pull/10809) Address `** ERROR ** Mnesia post_commit hook failed: error:badarg` error messages happening during node shutdown or restart.
|
||||||
|
Mria pull request: https://github.com/emqx/mria/pull/142
|
||||||
|
|
||||||
|
- [#10817](https://github.com/emqx/emqx/pull/10817) Fix the error of not being able to configure `auto_restart_interval` as infinity
|
||||||
|
|
||||||
|
- [#10818](https://github.com/emqx/emqx/pull/10818) Fixing `emqx_ctl traces` command.
|
||||||
|
|
||||||
|
- [#10820](https://github.com/emqx/emqx/pull/10820) In case the cluster updated license before the new node join in. The new node will not apply the updated license.
|
||||||
|
After this change, the new joined node will use the cluster's license key.
|
||||||
|
|
||||||
|
Sometimes the new node must start with a outdated license.
|
||||||
|
e.g. use emqx-operator deployed and needed to scale up after license expired.
|
||||||
|
At the time the cluster's license key already updated by API/CLI, but the new node won't use it.
|
||||||
|
|
||||||
|
- [#10833](https://github.com/emqx/emqx/pull/10833) Only include enabled authenticators and authorizers in telemetry report, not all of them.
|
||||||
|
|
||||||
|
- [#10851](https://github.com/emqx/emqx/pull/10851) Obfuscated sensitive data in the bad API logging.
|
|
@ -14,8 +14,8 @@ type: application
|
||||||
|
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
version: 5.0.4
|
version: 5.1.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application.
|
# incremented each time you make changes to the application.
|
||||||
appVersion: 5.0.4
|
appVersion: 5.1.0
|
||||||
|
|
|
@ -14,8 +14,8 @@ type: application
|
||||||
|
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
version: 5.0.25
|
version: 5.1.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application.
|
# incremented each time you make changes to the application.
|
||||||
appVersion: 5.0.25
|
appVersion: 5.1.0
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_ee_bridge, [
|
{application, emqx_ee_bridge, [
|
||||||
{description, "EMQX Enterprise data bridges"},
|
{description, "EMQX Enterprise data bridges"},
|
||||||
{vsn, "0.1.14"},
|
{vsn, "0.1.15"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -242,7 +242,8 @@ pulsar_structs() ->
|
||||||
hoconsc:map(name, ref(emqx_bridge_pulsar, pulsar_producer)),
|
hoconsc:map(name, ref(emqx_bridge_pulsar, pulsar_producer)),
|
||||||
#{
|
#{
|
||||||
desc => <<"Pulsar Producer Bridge Config">>,
|
desc => <<"Pulsar Producer Bridge Config">>,
|
||||||
required => false
|
required => false,
|
||||||
|
validator => fun emqx_bridge_pulsar:producer_strategy_key_validator/1
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
|
@ -82,7 +82,7 @@ check_license_watermark(Conf) ->
|
||||||
%% @doc The default license key.
|
%% @doc The default license key.
|
||||||
%% This default license has 1000 connections limit.
|
%% This default license has 1000 connections limit.
|
||||||
%% It is issued on 2023-01-09 and valid for 5 years (1825 days)
|
%% It is issued on 2023-01-09 and valid for 5 years (1825 days)
|
||||||
%% NOTE: when updating a new key, the schema doc in emqx_license_schema_i18n.conf
|
%% NOTE: when updating a new key, the schema doc in emqx_license_schema.hocon
|
||||||
%% should be updated accordingly
|
%% should be updated accordingly
|
||||||
default_license() ->
|
default_license() ->
|
||||||
<<
|
<<
|
||||||
|
|
10
mix.exs
10
mix.exs
|
@ -54,12 +54,12 @@ defmodule EMQXUmbrella.MixProject do
|
||||||
{:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true},
|
{:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true},
|
||||||
{:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true},
|
{:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true},
|
||||||
{:esockd, github: "emqx/esockd", tag: "5.9.6", override: true},
|
{:esockd, github: "emqx/esockd", tag: "5.9.6", override: true},
|
||||||
{:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.7.2-emqx-9", override: true},
|
{:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.7.2-emqx-11", override: true},
|
||||||
{:ekka, github: "emqx/ekka", tag: "0.15.2", override: true},
|
{:ekka, github: "emqx/ekka", tag: "0.15.2", override: true},
|
||||||
{:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true},
|
{:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true},
|
||||||
{:grpc, github: "emqx/grpc-erl", tag: "0.6.7", override: true},
|
{:grpc, github: "emqx/grpc-erl", tag: "0.6.7", override: true},
|
||||||
{:minirest, github: "emqx/minirest", tag: "1.3.10", override: true},
|
{:minirest, github: "emqx/minirest", tag: "1.3.10", override: true},
|
||||||
{:ecpool, github: "emqx/ecpool", tag: "0.5.3", override: true},
|
{:ecpool, github: "emqx/ecpool", tag: "0.5.4", override: true},
|
||||||
{:replayq, github: "emqx/replayq", tag: "0.3.7", override: true},
|
{:replayq, github: "emqx/replayq", tag: "0.3.7", override: true},
|
||||||
{:pbkdf2, github: "emqx/erlang-pbkdf2", tag: "2.0.4", override: true},
|
{:pbkdf2, github: "emqx/erlang-pbkdf2", tag: "2.0.4", override: true},
|
||||||
# maybe forbid to fetch quicer
|
# maybe forbid to fetch quicer
|
||||||
|
@ -195,10 +195,10 @@ defmodule EMQXUmbrella.MixProject do
|
||||||
{:hstreamdb_erl, github: "hstreamdb/hstreamdb_erl", tag: "0.2.5"},
|
{:hstreamdb_erl, github: "hstreamdb/hstreamdb_erl", tag: "0.2.5"},
|
||||||
{:influxdb, github: "emqx/influxdb-client-erl", tag: "1.1.9", override: true},
|
{:influxdb, github: "emqx/influxdb-client-erl", tag: "1.1.9", override: true},
|
||||||
{:wolff, github: "kafka4beam/wolff", tag: "1.7.5"},
|
{:wolff, github: "kafka4beam/wolff", tag: "1.7.5"},
|
||||||
{:kafka_protocol, github: "kafka4beam/kafka_protocol", tag: "4.1.2", override: true},
|
{:kafka_protocol, github: "kafka4beam/kafka_protocol", tag: "4.1.3", override: true},
|
||||||
{:brod_gssapi, github: "kafka4beam/brod_gssapi", tag: "v0.1.0"},
|
{:brod_gssapi, github: "kafka4beam/brod_gssapi", tag: "v0.1.0"},
|
||||||
{:brod, github: "kafka4beam/brod", tag: "3.16.8"},
|
{:brod, github: "kafka4beam/brod", tag: "3.16.8"},
|
||||||
{:snappyer, "1.2.8", override: true},
|
{:snappyer, "1.2.9", override: true},
|
||||||
{:crc32cer, "0.1.8", override: true},
|
{:crc32cer, "0.1.8", override: true},
|
||||||
{:supervisor3, "1.1.12", override: true},
|
{:supervisor3, "1.1.12", override: true},
|
||||||
{:opentsdb, github: "emqx/opentsdb-client-erl", tag: "v0.5.1", override: true},
|
{:opentsdb, github: "emqx/opentsdb-client-erl", tag: "v0.5.1", override: true},
|
||||||
|
@ -310,6 +310,7 @@ defmodule EMQXUmbrella.MixProject do
|
||||||
:emqx_connector,
|
:emqx_connector,
|
||||||
:emqx_exhook,
|
:emqx_exhook,
|
||||||
:emqx_bridge,
|
:emqx_bridge,
|
||||||
|
:emqx_bridge_mqtt,
|
||||||
:emqx_modules,
|
:emqx_modules,
|
||||||
:emqx_management,
|
:emqx_management,
|
||||||
:emqx_retainer,
|
:emqx_retainer,
|
||||||
|
@ -372,6 +373,7 @@ defmodule EMQXUmbrella.MixProject do
|
||||||
emqx_gateway_exproto: :permanent,
|
emqx_gateway_exproto: :permanent,
|
||||||
emqx_exhook: :permanent,
|
emqx_exhook: :permanent,
|
||||||
emqx_bridge: :permanent,
|
emqx_bridge: :permanent,
|
||||||
|
emqx_bridge_mqtt: :permanent,
|
||||||
emqx_rule_engine: :permanent,
|
emqx_rule_engine: :permanent,
|
||||||
emqx_modules: :permanent,
|
emqx_modules: :permanent,
|
||||||
emqx_management: :permanent,
|
emqx_management: :permanent,
|
||||||
|
|
|
@ -61,12 +61,12 @@
|
||||||
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}
|
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}
|
||||||
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}
|
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}
|
||||||
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}
|
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}
|
||||||
, {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.7.2-emqx-9"}}}
|
, {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.7.2-emqx-11"}}}
|
||||||
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.2"}}}
|
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.2"}}}
|
||||||
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}
|
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}
|
||||||
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.7"}}}
|
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.7"}}}
|
||||||
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.10"}}}
|
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.10"}}}
|
||||||
, {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.3"}}}
|
, {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.4"}}}
|
||||||
, {replayq, {git, "https://github.com/emqx/replayq.git", {tag, "0.3.7"}}}
|
, {replayq, {git, "https://github.com/emqx/replayq.git", {tag, "0.3.7"}}}
|
||||||
, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}
|
, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}
|
||||||
, {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.8.5"}}}
|
, {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.8.5"}}}
|
||||||
|
|
|
@ -91,6 +91,7 @@ is_community_umbrella_app("apps/emqx_bridge_matrix") -> false;
|
||||||
is_community_umbrella_app("apps/emqx_bridge_mongodb") -> false;
|
is_community_umbrella_app("apps/emqx_bridge_mongodb") -> false;
|
||||||
is_community_umbrella_app("apps/emqx_bridge_mysql") -> false;
|
is_community_umbrella_app("apps/emqx_bridge_mysql") -> false;
|
||||||
is_community_umbrella_app("apps/emqx_bridge_pgsql") -> false;
|
is_community_umbrella_app("apps/emqx_bridge_pgsql") -> false;
|
||||||
|
is_community_umbrella_app("apps/emqx_bridge_pulsar") -> false;
|
||||||
is_community_umbrella_app("apps/emqx_bridge_redis") -> false;
|
is_community_umbrella_app("apps/emqx_bridge_redis") -> false;
|
||||||
is_community_umbrella_app("apps/emqx_bridge_rocketmq") -> false;
|
is_community_umbrella_app("apps/emqx_bridge_rocketmq") -> false;
|
||||||
is_community_umbrella_app("apps/emqx_bridge_tdengine") -> false;
|
is_community_umbrella_app("apps/emqx_bridge_tdengine") -> false;
|
||||||
|
@ -426,6 +427,7 @@ relx_apps(ReleaseType, Edition) ->
|
||||||
emqx_gateway_exproto,
|
emqx_gateway_exproto,
|
||||||
emqx_exhook,
|
emqx_exhook,
|
||||||
emqx_bridge,
|
emqx_bridge,
|
||||||
|
emqx_bridge_mqtt,
|
||||||
emqx_rule_engine,
|
emqx_rule_engine,
|
||||||
emqx_modules,
|
emqx_modules,
|
||||||
emqx_management,
|
emqx_management,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
emqx_connector_mqtt_schema {
|
emqx_bridge_mqtt_connector_schema {
|
||||||
|
|
||||||
bridge_mode.desc:
|
bridge_mode.desc:
|
||||||
"""If enable bridge mode.
|
"""If enable bridge mode.
|
||||||
|
@ -32,6 +32,14 @@ is configured, then both the data got from the rule and the MQTT messages that m
|
||||||
egress_desc.label:
|
egress_desc.label:
|
||||||
"""Egress Configs"""
|
"""Egress Configs"""
|
||||||
|
|
||||||
|
egress_pool_size.desc:
|
||||||
|
"""Size of the pool of MQTT clients that will publish messages to the remote broker.<br/>
|
||||||
|
Each MQTT client will be assigned 'clientid' of the form '${clientid_prefix}:${bridge_name}:egress:${node}:${n}'
|
||||||
|
where 'n' is the number of a client inside the pool."""
|
||||||
|
|
||||||
|
egress_pool_size.label:
|
||||||
|
"""Pool Size"""
|
||||||
|
|
||||||
egress_local.desc:
|
egress_local.desc:
|
||||||
"""The configs about receiving messages from local broker."""
|
"""The configs about receiving messages from local broker."""
|
||||||
|
|
||||||
|
@ -75,6 +83,17 @@ ingress_desc.desc:
|
||||||
ingress_desc.label:
|
ingress_desc.label:
|
||||||
"""Ingress Configs"""
|
"""Ingress Configs"""
|
||||||
|
|
||||||
|
ingress_pool_size.desc:
|
||||||
|
"""Size of the pool of MQTT clients that will ingest messages from the remote broker.<br/>
|
||||||
|
This value will be respected only if 'remote.topic' is a shared subscription topic or topic-filter
|
||||||
|
(for example `$share/name1/topic1` or `$share/name2/topic2/#`), otherwise only a single MQTT client will be used.
|
||||||
|
Each MQTT client will be assigned 'clientid' of the form '${clientid_prefix}:${bridge_name}:ingress:${node}:${n}'
|
||||||
|
where 'n' is the number of a client inside the pool.
|
||||||
|
NOTE: Non-shared subscription will not work well when EMQX is clustered."""
|
||||||
|
|
||||||
|
ingress_pool_size.label:
|
||||||
|
"""Pool Size"""
|
||||||
|
|
||||||
ingress_local.desc:
|
ingress_local.desc:
|
||||||
"""The configs about sending message to the local broker."""
|
"""The configs about sending message to the local broker."""
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
emqx_connector_mqtt {
|
|
||||||
|
|
||||||
name.desc:
|
|
||||||
"""Connector name, used as a human-readable description of the connector."""
|
|
||||||
|
|
||||||
name.label:
|
|
||||||
"""Connector Name"""
|
|
||||||
|
|
||||||
num_of_bridges.desc:
|
|
||||||
"""The current number of bridges that are using this connector."""
|
|
||||||
|
|
||||||
num_of_bridges.label:
|
|
||||||
"""Num of Bridges"""
|
|
||||||
|
|
||||||
type.desc:
|
|
||||||
"""The Connector Type."""
|
|
||||||
|
|
||||||
type.label:
|
|
||||||
"""Connector Type"""
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,96 +0,0 @@
|
||||||
emqx_authn_api {
|
|
||||||
|
|
||||||
authentication_get.desc:
|
|
||||||
"""列出全局认证链上的认证器。"""
|
|
||||||
|
|
||||||
authentication_id_delete.desc:
|
|
||||||
"""删除全局认证链上的指定认证器。"""
|
|
||||||
|
|
||||||
authentication_id_get.desc:
|
|
||||||
"""获取全局认证链上的指定认证器。"""
|
|
||||||
|
|
||||||
authentication_id_position_put.desc:
|
|
||||||
"""更改全局认证链上指定认证器的顺序。"""
|
|
||||||
|
|
||||||
authentication_id_put.desc:
|
|
||||||
"""更新全局认证链上的指定认证器。"""
|
|
||||||
|
|
||||||
authentication_id_status_get.desc:
|
|
||||||
"""获取全局认证链上指定认证器的状态。"""
|
|
||||||
|
|
||||||
authentication_id_users_get.desc:
|
|
||||||
"""获取全局认证链上指定认证器中的用户数据。"""
|
|
||||||
|
|
||||||
authentication_id_users_post.desc:
|
|
||||||
"""为全局认证链上的指定认证器创建用户数据。"""
|
|
||||||
|
|
||||||
authentication_id_users_user_id_delete.desc:
|
|
||||||
"""删除全局认证链上指定认证器中的指定用户数据。"""
|
|
||||||
|
|
||||||
authentication_id_users_user_id_get.desc:
|
|
||||||
"""获取全局认证链上指定认证器中的指定用户数据。"""
|
|
||||||
|
|
||||||
authentication_id_users_user_id_put.desc:
|
|
||||||
"""更新全局认证链上指定认证器中的指定用户数据。"""
|
|
||||||
|
|
||||||
authentication_post.desc:
|
|
||||||
"""为全局认证链创建认证器。"""
|
|
||||||
|
|
||||||
is_superuser.desc:
|
|
||||||
"""是否是超级用户"""
|
|
||||||
|
|
||||||
like_user_id.desc:
|
|
||||||
"""使用用户 ID (username 或 clientid)模糊查询。"""
|
|
||||||
|
|
||||||
like_user_id.label:
|
|
||||||
"""like_user_id"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_get.desc:
|
|
||||||
"""列出监听器认证链上的认证器。"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_id_delete.desc:
|
|
||||||
"""删除监听器认证链上的指定认证器。"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_id_get.desc:
|
|
||||||
"""获取监听器认证链上的指定认证器。"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_id_position_put.desc:
|
|
||||||
"""更改监听器认证链上指定认证器的顺序。"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_id_put.desc:
|
|
||||||
"""更新监听器认证链上的指定认证器。"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_id_status_get.desc:
|
|
||||||
"""获取监听器认证链上指定认证器的状态。"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_id_users_get.desc:
|
|
||||||
"""列出监听器认证链上指定认证器中的用户数据。"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_id_users_post.desc:
|
|
||||||
"""为监听器认证链上的指定认证器创建用户数据。"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_id_users_user_id_delete.desc:
|
|
||||||
"""删除监听器认证链上指定认证器中的指定用户数据。"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_id_users_user_id_get.desc:
|
|
||||||
"""获取监听器认证链上指定认证器中的指定用户数据。"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_id_users_user_id_put.desc:
|
|
||||||
"""更新监听器认证链上指定认证器中的指定用户数据。"""
|
|
||||||
|
|
||||||
listeners_listener_id_authentication_post.desc:
|
|
||||||
"""在监听器认证链上创建认证器。"""
|
|
||||||
|
|
||||||
param_auth_id.desc:
|
|
||||||
"""认证器 ID。"""
|
|
||||||
|
|
||||||
param_listener_id.desc:
|
|
||||||
"""监听器 ID。"""
|
|
||||||
|
|
||||||
param_position.desc:
|
|
||||||
"""认证者在链中的位置。可能的值是 'front', 'rear', 'before:{other_authenticator}', 'after:{other_authenticator}'"""
|
|
||||||
|
|
||||||
param_user_id.desc:
|
|
||||||
"""用户 ID。"""
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,45 +0,0 @@
|
||||||
emqx_authn_http {
|
|
||||||
|
|
||||||
body.desc:
|
|
||||||
"""HTTP request body。"""
|
|
||||||
|
|
||||||
body.label:
|
|
||||||
"""Request Body"""
|
|
||||||
|
|
||||||
get.desc:
|
|
||||||
"""使用 HTTP Server 作为认证服务的认证器的配置项 (使用 GET 请求)。"""
|
|
||||||
|
|
||||||
headers.desc:
|
|
||||||
"""HTTP Headers 列表"""
|
|
||||||
|
|
||||||
headers.label:
|
|
||||||
"""请求头"""
|
|
||||||
|
|
||||||
headers_no_content_type.desc:
|
|
||||||
"""HTTP Headers 列表 (无 <code>content-type</code>) 。"""
|
|
||||||
|
|
||||||
headers_no_content_type.label:
|
|
||||||
"""请求头(无 content-type)"""
|
|
||||||
|
|
||||||
method.desc:
|
|
||||||
"""HTTP 请求方法。"""
|
|
||||||
|
|
||||||
method.label:
|
|
||||||
"""请求方法"""
|
|
||||||
|
|
||||||
post.desc:
|
|
||||||
"""使用 HTTP Server 作为认证服务的认证器的配置项 (使用 POST 请求)。"""
|
|
||||||
|
|
||||||
request_timeout.desc:
|
|
||||||
"""HTTP 请求超时时长。"""
|
|
||||||
|
|
||||||
request_timeout.label:
|
|
||||||
"""请求超时时间"""
|
|
||||||
|
|
||||||
url.desc:
|
|
||||||
"""认证 HTTP 服务器地址。"""
|
|
||||||
|
|
||||||
url.label:
|
|
||||||
"""URL"""
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,118 +0,0 @@
|
||||||
emqx_authn_jwt {
|
|
||||||
|
|
||||||
acl_claim_name.desc:
|
|
||||||
"""JWT claim name to use for getting ACL rules."""
|
|
||||||
|
|
||||||
acl_claim_name.label:
|
|
||||||
"""ACL claim name"""
|
|
||||||
|
|
||||||
algorithm.desc:
|
|
||||||
"""JWT 签名算法,支持 HMAC (配置为 <code>hmac-based</code>)和 RSA、ECDSA (配置为 <code>public-key</code>)。"""
|
|
||||||
|
|
||||||
algorithm.label:
|
|
||||||
"""JWT 签名算法"""
|
|
||||||
|
|
||||||
cacertfile.desc:
|
|
||||||
"""包含 PEM 编码的 CA 证书的文件的路径。"""
|
|
||||||
|
|
||||||
cacertfile.label:
|
|
||||||
"""CA 证书文件"""
|
|
||||||
|
|
||||||
certfile.desc:
|
|
||||||
"""包含用户证书的文件的路径。"""
|
|
||||||
|
|
||||||
certfile.label:
|
|
||||||
"""证书文件"""
|
|
||||||
|
|
||||||
enable.desc:
|
|
||||||
"""启用/禁用 SSL。"""
|
|
||||||
|
|
||||||
enable.label:
|
|
||||||
"""启用/禁用 SSL"""
|
|
||||||
|
|
||||||
endpoint.desc:
|
|
||||||
"""JWKS 端点, 它是一个以 JWKS 格式返回服务端的公钥集的只读端点。"""
|
|
||||||
|
|
||||||
endpoint.label:
|
|
||||||
"""JWKS Endpoint"""
|
|
||||||
|
|
||||||
from.desc:
|
|
||||||
"""要从中获取 JWT 的字段。"""
|
|
||||||
|
|
||||||
from.label:
|
|
||||||
"""源字段"""
|
|
||||||
|
|
||||||
jwt_hmac.desc:
|
|
||||||
"""用于认证的 JWT 使用 HMAC 算法签发时的配置。"""
|
|
||||||
|
|
||||||
jwt_jwks.desc:
|
|
||||||
"""用于认证的 JWTs 需要从 JWKS 端点获取时的配置。"""
|
|
||||||
|
|
||||||
keyfile.desc:
|
|
||||||
"""包含 PEM 编码的用户私钥的文件的路径。"""
|
|
||||||
|
|
||||||
keyfile.label:
|
|
||||||
"""私钥文件"""
|
|
||||||
|
|
||||||
jwt_public_key.desc:
|
|
||||||
"""用于认证的 JWT 使用 RSA 或 ECDSA 算法签发时的配置。"""
|
|
||||||
|
|
||||||
public_key.desc:
|
|
||||||
"""用于验证 JWT 的公钥。"""
|
|
||||||
|
|
||||||
public_key.label:
|
|
||||||
"""公钥"""
|
|
||||||
|
|
||||||
refresh_interval.desc:
|
|
||||||
"""JWKS 刷新间隔。"""
|
|
||||||
|
|
||||||
refresh_interval.label:
|
|
||||||
"""JWKS 刷新间隔"""
|
|
||||||
|
|
||||||
secret.desc:
|
|
||||||
"""使用 HMAC 算法时用于验证 JWT 的密钥"""
|
|
||||||
|
|
||||||
secret.label:
|
|
||||||
"""Secret"""
|
|
||||||
|
|
||||||
secret_base64_encoded.desc:
|
|
||||||
"""密钥是否为 Base64 编码。"""
|
|
||||||
|
|
||||||
secret_base64_encoded.label:
|
|
||||||
"""密钥是否为 Base64 编码"""
|
|
||||||
|
|
||||||
server_name_indication.desc:
|
|
||||||
"""服务器名称指示(SNI)。"""
|
|
||||||
|
|
||||||
server_name_indication.label:
|
|
||||||
"""服务器名称指示"""
|
|
||||||
|
|
||||||
ssl.desc:
|
|
||||||
"""SSL 选项。"""
|
|
||||||
|
|
||||||
ssl.label:
|
|
||||||
"""SSL 选项"""
|
|
||||||
|
|
||||||
use_jwks.desc:
|
|
||||||
"""是否使用 JWKS。"""
|
|
||||||
|
|
||||||
use_jwks.label:
|
|
||||||
"""是否使用 JWKS"""
|
|
||||||
|
|
||||||
verify.desc:
|
|
||||||
"""指定握手过程中是否校验对端证书。"""
|
|
||||||
|
|
||||||
verify.label:
|
|
||||||
"""Verify"""
|
|
||||||
|
|
||||||
verify_claims.desc:
|
|
||||||
"""需要验证的自定义声明列表,它是一个名称/值对列表。
|
|
||||||
值可以使用以下占位符:
|
|
||||||
- <code>${username}</code>: 将在运行时被替换为客户端连接时使用的用户名
|
|
||||||
- <code>${clientid}</code>: 将在运行时被替换为客户端连接时使用的客户端标识符
|
|
||||||
认证时将验证 JWT(取自 Password 字段)中 claims 的值是否与 <code>verify_claims</code> 中要求的相匹配。"""
|
|
||||||
|
|
||||||
verify_claims.label:
|
|
||||||
"""Verify Claims"""
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,12 +0,0 @@
|
||||||
emqx_authn_mnesia {
|
|
||||||
|
|
||||||
builtin_db.desc:
|
|
||||||
"""使用内置数据库作为认证数据源的认证器的配置项。"""
|
|
||||||
|
|
||||||
user_id_type.desc:
|
|
||||||
"""指定使用客户端ID `clientid` 还是用户名 `username` 进行认证。"""
|
|
||||||
|
|
||||||
user_id_type.label:
|
|
||||||
"""认证 ID 类型"""
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,45 +0,0 @@
|
||||||
emqx_authn_mongodb {
|
|
||||||
|
|
||||||
collection.desc:
|
|
||||||
"""存储认证数据的集合。"""
|
|
||||||
|
|
||||||
collection.label:
|
|
||||||
"""集合"""
|
|
||||||
|
|
||||||
filter.desc:
|
|
||||||
"""在查询中定义过滤条件的条件表达式。
|
|
||||||
过滤器支持如下占位符:
|
|
||||||
- <code>${username}</code>: 将在运行时被替换为客户端连接时使用的用户名
|
|
||||||
- <code>${clientid}</code>: 将在运行时被替换为客户端连接时使用的客户端标识符"""
|
|
||||||
|
|
||||||
filter.label:
|
|
||||||
"""过滤器"""
|
|
||||||
|
|
||||||
is_superuser_field.desc:
|
|
||||||
"""文档中用于定义用户是否具有超级用户权限的字段。"""
|
|
||||||
|
|
||||||
is_superuser_field.label:
|
|
||||||
"""超级用户字段"""
|
|
||||||
|
|
||||||
password_hash_field.desc:
|
|
||||||
"""文档中用于存放密码散列的字段。"""
|
|
||||||
|
|
||||||
password_hash_field.label:
|
|
||||||
"""密码散列字段"""
|
|
||||||
|
|
||||||
replica-set.desc:
|
|
||||||
"""使用 MongoDB (Replica Set) 作为认证数据源的认证器的配置项。"""
|
|
||||||
|
|
||||||
salt_field.desc:
|
|
||||||
"""文档中用于存放盐值的字段。"""
|
|
||||||
|
|
||||||
salt_field.label:
|
|
||||||
"""盐值字段"""
|
|
||||||
|
|
||||||
sharded-cluster.desc:
|
|
||||||
"""使用 MongoDB (Sharded Cluster) 作为认证数据源的认证器的配置项。"""
|
|
||||||
|
|
||||||
single.desc:
|
|
||||||
"""使用 MongoDB (Standalone) 作为认证数据源的认证器的配置项。"""
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
emqx_authn_mysql {
|
|
||||||
|
|
||||||
mysql.desc:
|
|
||||||
"""使用 MySQL 作为认证数据源的认证器的配置项。"""
|
|
||||||
|
|
||||||
query.desc:
|
|
||||||
"""用于查询密码散列等用于认证的数据的 SQL 语句。"""
|
|
||||||
|
|
||||||
query.label:
|
|
||||||
"""查询语句"""
|
|
||||||
|
|
||||||
query_timeout.desc:
|
|
||||||
"""SQL 查询的超时时间。"""
|
|
||||||
|
|
||||||
query_timeout.label:
|
|
||||||
"""查询超时"""
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,12 +0,0 @@
|
||||||
emqx_authn_pgsql {
|
|
||||||
|
|
||||||
postgresql.desc:
|
|
||||||
"""使用 PostgreSQL 作为认证数据源的认证器的配置项。"""
|
|
||||||
|
|
||||||
query.desc:
|
|
||||||
"""用于查询密码散列等用于认证的数据的 SQL 语句。"""
|
|
||||||
|
|
||||||
query.label:
|
|
||||||
"""查询语句"""
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
emqx_authn_redis {
|
|
||||||
|
|
||||||
cluster.desc:
|
|
||||||
"""使用 Redis (Cluster) 作为认证数据源的认证器的配置项。"""
|
|
||||||
|
|
||||||
cmd.desc:
|
|
||||||
"""用于查询密码散列等用于认证的数据的 Redis Command,目前仅支持 <code>HGET</code> 与 <code>HMGET</code>。"""
|
|
||||||
|
|
||||||
cmd.label:
|
|
||||||
"""Command"""
|
|
||||||
|
|
||||||
sentinel.desc:
|
|
||||||
"""使用 Redis (Sentinel) 作为认证数据源的认证器的配置项。"""
|
|
||||||
|
|
||||||
single.desc:
|
|
||||||
"""使用 Redis (Standalone) 作为认证数据源的认证器的配置项。"""
|
|
||||||
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue