Merge pull request #12215 from thalesmg/sync-r54-m-20231221
sync r54 to master
This commit is contained in:
commit
2d5f7e0a6d
4
Makefile
4
Makefile
|
@ -20,8 +20,8 @@ endif
|
||||||
|
|
||||||
# Dashboard version
|
# Dashboard version
|
||||||
# from https://github.com/emqx/emqx-dashboard5
|
# from https://github.com/emqx/emqx-dashboard5
|
||||||
export EMQX_DASHBOARD_VERSION ?= v1.5.2
|
export EMQX_DASHBOARD_VERSION ?= v1.6.0
|
||||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.4.0-beta.8
|
export EMQX_EE_DASHBOARD_VERSION ?= e1.4.0
|
||||||
|
|
||||||
PROFILE ?= emqx
|
PROFILE ?= emqx
|
||||||
REL_PROFILES := emqx emqx-enterprise
|
REL_PROFILES := emqx emqx-enterprise
|
||||||
|
|
|
@ -83,6 +83,28 @@
|
||||||
end)()
|
end)()
|
||||||
).
|
).
|
||||||
|
|
||||||
|
-define(assertExceptionOneOf(CT1, CT2, EXPR),
|
||||||
|
(fun() ->
|
||||||
|
X__Attrs = [
|
||||||
|
{module, ?MODULE},
|
||||||
|
{line, ?LINE},
|
||||||
|
{expression, (??EXPR)},
|
||||||
|
{pattern, "[ " ++ (??CT1) ++ ", " ++ (??CT2) ++ " ]"}
|
||||||
|
],
|
||||||
|
X__Exc =
|
||||||
|
try (EXPR) of
|
||||||
|
X__V -> erlang:error({assertException, [{unexpected_success, X__V} | X__Attrs]})
|
||||||
|
catch
|
||||||
|
X__C:X__T:X__S -> {X__C, X__T, X__S}
|
||||||
|
end,
|
||||||
|
case {element(1, X__Exc), element(2, X__Exc)} of
|
||||||
|
CT1 -> ok;
|
||||||
|
CT2 -> ok;
|
||||||
|
_ -> erlang:error({assertException, [{unexpected_exception, X__Exc} | X__Attrs]})
|
||||||
|
end
|
||||||
|
end)()
|
||||||
|
).
|
||||||
|
|
||||||
-define(retrying(CONFIG, NUM_RETRIES, TEST_BODY_FN), begin
|
-define(retrying(CONFIG, NUM_RETRIES, TEST_BODY_FN), begin
|
||||||
__TEST_CASE = ?FUNCTION_NAME,
|
__TEST_CASE = ?FUNCTION_NAME,
|
||||||
(fun
|
(fun
|
||||||
|
|
|
@ -32,10 +32,10 @@
|
||||||
%% `apps/emqx/src/bpapi/README.md'
|
%% `apps/emqx/src/bpapi/README.md'
|
||||||
|
|
||||||
%% Opensource edition
|
%% Opensource edition
|
||||||
-define(EMQX_RELEASE_CE, "5.4.0-alpha.2").
|
-define(EMQX_RELEASE_CE, "5.4.0").
|
||||||
|
|
||||||
%% Enterprise edition
|
%% Enterprise edition
|
||||||
-define(EMQX_RELEASE_EE, "5.4.0-alpha.2").
|
-define(EMQX_RELEASE_EE, "5.4.0").
|
||||||
|
|
||||||
%% The HTTP API version
|
%% The HTTP API version
|
||||||
-define(EMQX_API_VERSION, "5.0").
|
-define(EMQX_API_VERSION, "5.0").
|
||||||
|
|
|
@ -27,9 +27,9 @@
|
||||||
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
|
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
|
||||||
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
|
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
|
||||||
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
|
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
|
||||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.9"}}},
|
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.1"}}},
|
||||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.17.0"}}},
|
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.17.0"}}},
|
||||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.0"}}},
|
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
|
||||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.40.3"}}},
|
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.40.3"}}},
|
||||||
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
|
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
|
||||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||||
|
|
|
@ -94,6 +94,7 @@
|
||||||
|
|
||||||
-export([ensure_atom_conf_path/2]).
|
-export([ensure_atom_conf_path/2]).
|
||||||
-export([load_config_files/2]).
|
-export([load_config_files/2]).
|
||||||
|
-export([upgrade_raw_conf/2]).
|
||||||
|
|
||||||
-ifdef(TEST).
|
-ifdef(TEST).
|
||||||
-export([erase_all/0, backup_and_write/2]).
|
-export([erase_all/0, backup_and_write/2]).
|
||||||
|
|
|
@ -55,7 +55,6 @@
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([pre_config_update/3, post_config_update/5]).
|
-export([pre_config_update/3, post_config_update/5]).
|
||||||
-export([create_listener/3, remove_listener/3, update_listener/3]).
|
|
||||||
|
|
||||||
-export([format_bind/1]).
|
-export([format_bind/1]).
|
||||||
|
|
||||||
|
@ -66,6 +65,11 @@
|
||||||
-export_type([listener_id/0]).
|
-export_type([listener_id/0]).
|
||||||
|
|
||||||
-type listener_id() :: atom() | binary().
|
-type listener_id() :: atom() | binary().
|
||||||
|
-type listener_type() :: tcp | ssl | ws | wss | quic | dtls.
|
||||||
|
|
||||||
|
-define(ESOCKD_LISTENER(T), (T == tcp orelse T == ssl)).
|
||||||
|
-define(COWBOY_LISTENER(T), (T == ws orelse T == wss)).
|
||||||
|
|
||||||
-define(ROOT_KEY, listeners).
|
-define(ROOT_KEY, listeners).
|
||||||
-define(CONF_KEY_PATH, [?ROOT_KEY, '?', '?']).
|
-define(CONF_KEY_PATH, [?ROOT_KEY, '?', '?']).
|
||||||
-define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]).
|
-define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]).
|
||||||
|
@ -140,15 +144,9 @@ format_raw_listeners({Type0, Conf}) ->
|
||||||
|
|
||||||
-spec is_running(ListenerId :: atom()) -> boolean() | {error, not_found}.
|
-spec is_running(ListenerId :: atom()) -> boolean() | {error, not_found}.
|
||||||
is_running(ListenerId) ->
|
is_running(ListenerId) ->
|
||||||
case
|
case lists:keyfind(ListenerId, 1, list()) of
|
||||||
[
|
{_Id, #{running := Running}} -> Running;
|
||||||
Running
|
false -> {error, not_found}
|
||||||
|| {Id, #{running := Running}} <- list(),
|
|
||||||
Id =:= ListenerId
|
|
||||||
]
|
|
||||||
of
|
|
||||||
[] -> {error, not_found};
|
|
||||||
[IsRunning] -> IsRunning
|
|
||||||
end.
|
end.
|
||||||
|
|
||||||
is_running(Type, ListenerId, Conf) when Type =:= tcp; Type =:= ssl ->
|
is_running(Type, ListenerId, Conf) when Type =:= tcp; Type =:= ssl ->
|
||||||
|
@ -229,24 +227,26 @@ start() ->
|
||||||
start_listener(ListenerId) ->
|
start_listener(ListenerId) ->
|
||||||
apply_on_listener(ListenerId, fun start_listener/3).
|
apply_on_listener(ListenerId, fun start_listener/3).
|
||||||
|
|
||||||
-spec start_listener(atom(), atom(), map()) -> ok | {error, term()}.
|
-spec start_listener(listener_type(), atom(), map()) -> ok | {error, term()}.
|
||||||
start_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
|
start_listener(Type, Name, #{bind := Bind, enable := true} = Conf) ->
|
||||||
case do_start_listener(Type, ListenerName, Conf) of
|
ListenerId = listener_id(Type, Name),
|
||||||
|
Limiter = limiter(Conf),
|
||||||
|
ok = add_limiter_bucket(ListenerId, Limiter),
|
||||||
|
case do_start_listener(Type, Name, ListenerId, Conf) of
|
||||||
{ok, {skipped, Reason}} when
|
{ok, {skipped, Reason}} when
|
||||||
Reason =:= listener_disabled;
|
|
||||||
Reason =:= quic_app_missing
|
Reason =:= quic_app_missing
|
||||||
->
|
->
|
||||||
?tp(listener_not_started, #{type => Type, bind => Bind, status => {skipped, Reason}}),
|
?tp(listener_not_started, #{type => Type, bind => Bind, status => {skipped, Reason}}),
|
||||||
console_print(
|
console_print(
|
||||||
"Listener ~ts is NOT started due to: ~p.~n",
|
"Listener ~ts is NOT started due to: ~p.~n",
|
||||||
[listener_id(Type, ListenerName), Reason]
|
[ListenerId, Reason]
|
||||||
),
|
),
|
||||||
ok;
|
ok;
|
||||||
{ok, _} ->
|
{ok, _} ->
|
||||||
?tp(listener_started, #{type => Type, bind => Bind}),
|
?tp(listener_started, #{type => Type, bind => Bind}),
|
||||||
console_print(
|
console_print(
|
||||||
"Listener ~ts on ~ts started.~n",
|
"Listener ~ts on ~ts started.~n",
|
||||||
[listener_id(Type, ListenerName), format_bind(Bind)]
|
[ListenerId, format_bind(Bind)]
|
||||||
),
|
),
|
||||||
ok;
|
ok;
|
||||||
{error, {already_started, Pid}} ->
|
{error, {already_started, Pid}} ->
|
||||||
|
@ -255,8 +255,8 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
|
||||||
}),
|
}),
|
||||||
{error, {already_started, Pid}};
|
{error, {already_started, Pid}};
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
|
ok = del_limiter_bucket(ListenerId, Limiter),
|
||||||
?tp(listener_not_started, #{type => Type, bind => Bind, status => {error, Reason}}),
|
?tp(listener_not_started, #{type => Type, bind => Bind, status => {error, Reason}}),
|
||||||
ListenerId = listener_id(Type, ListenerName),
|
|
||||||
BindStr = format_bind(Bind),
|
BindStr = format_bind(Bind),
|
||||||
?ELOG(
|
?ELOG(
|
||||||
"Failed to start listener ~ts on ~ts: ~0p.~n",
|
"Failed to start listener ~ts on ~ts: ~0p.~n",
|
||||||
|
@ -269,7 +269,13 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
{error, {failed_to_start, Msg}}
|
{error, {failed_to_start, Msg}}
|
||||||
end.
|
end;
|
||||||
|
start_listener(Type, Name, #{enable := false}) ->
|
||||||
|
console_print(
|
||||||
|
"Listener ~ts is NOT started due to: disabled.~n",
|
||||||
|
[listener_id(Type, Name)]
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
%% @doc Restart all listeners
|
%% @doc Restart all listeners
|
||||||
-spec restart() -> ok.
|
-spec restart() -> ok.
|
||||||
|
@ -280,16 +286,33 @@ restart() ->
|
||||||
restart_listener(ListenerId) ->
|
restart_listener(ListenerId) ->
|
||||||
apply_on_listener(ListenerId, fun restart_listener/3).
|
apply_on_listener(ListenerId, fun restart_listener/3).
|
||||||
|
|
||||||
-spec restart_listener(atom(), atom(), map() | {map(), map()}) -> ok | {error, term()}.
|
-spec restart_listener(listener_type(), atom(), map()) -> ok | {error, term()}.
|
||||||
restart_listener(Type, ListenerName, {OldConf, NewConf}) ->
|
|
||||||
restart_listener(Type, ListenerName, OldConf, NewConf);
|
|
||||||
restart_listener(Type, ListenerName, Conf) ->
|
restart_listener(Type, ListenerName, Conf) ->
|
||||||
restart_listener(Type, ListenerName, Conf, Conf).
|
restart_listener(Type, ListenerName, Conf, Conf).
|
||||||
|
|
||||||
restart_listener(Type, ListenerName, OldConf, NewConf) ->
|
update_listener(_Type, _Name, #{enable := false}, #{enable := false}) ->
|
||||||
case stop_listener(Type, ListenerName, OldConf) of
|
ok;
|
||||||
ok -> start_listener(Type, ListenerName, NewConf);
|
update_listener(Type, Name, Conf = #{enable := true}, #{enable := false}) ->
|
||||||
{error, Reason} -> {error, Reason}
|
stop_listener(Type, Name, Conf);
|
||||||
|
update_listener(Type, Name, #{enable := false}, Conf = #{enable := true}) ->
|
||||||
|
start_listener(Type, Name, Conf);
|
||||||
|
update_listener(Type, Name, OldConf, NewConf) ->
|
||||||
|
Id = listener_id(Type, Name),
|
||||||
|
ok = update_limiter_bucket(Id, limiter(OldConf), limiter(NewConf)),
|
||||||
|
case do_update_listener(Type, Name, OldConf, NewConf) of
|
||||||
|
ok ->
|
||||||
|
ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf),
|
||||||
|
ok;
|
||||||
|
{error, _Reason} ->
|
||||||
|
restart_listener(Type, Name, OldConf, NewConf)
|
||||||
|
end.
|
||||||
|
|
||||||
|
restart_listener(Type, Name, OldConf, NewConf) ->
|
||||||
|
case stop_listener(Type, Name, OldConf) of
|
||||||
|
ok ->
|
||||||
|
start_listener(Type, Name, NewConf);
|
||||||
|
{error, Reason} ->
|
||||||
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%% @doc Stop all listeners.
|
%% @doc Stop all listeners.
|
||||||
|
@ -305,9 +328,10 @@ stop() ->
|
||||||
stop_listener(ListenerId) ->
|
stop_listener(ListenerId) ->
|
||||||
apply_on_listener(ListenerId, fun stop_listener/3).
|
apply_on_listener(ListenerId, fun stop_listener/3).
|
||||||
|
|
||||||
stop_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
|
stop_listener(Type, Name, #{bind := Bind} = Conf) ->
|
||||||
Id = listener_id(Type, ListenerName),
|
Id = listener_id(Type, Name),
|
||||||
ok = del_limiter_bucket(Id, Conf),
|
ok = del_limiter_bucket(Id, limiter(Conf)),
|
||||||
|
ok = unregister_ocsp_stapling_refresh(Type, Name),
|
||||||
case do_stop_listener(Type, Id, Conf) of
|
case do_stop_listener(Type, Id, Conf) of
|
||||||
ok ->
|
ok ->
|
||||||
console_print(
|
console_print(
|
||||||
|
@ -325,11 +349,10 @@ stop_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec do_stop_listener(atom(), atom(), map()) -> ok | {error, term()}.
|
-spec do_stop_listener(listener_type(), atom(), map()) -> ok | {error, term()}.
|
||||||
|
do_stop_listener(Type, Id, #{bind := ListenOn}) when ?ESOCKD_LISTENER(Type) ->
|
||||||
do_stop_listener(Type, Id, #{bind := ListenOn}) when Type == tcp; Type == ssl ->
|
|
||||||
esockd:close(Id, ListenOn);
|
esockd:close(Id, ListenOn);
|
||||||
do_stop_listener(Type, Id, #{bind := ListenOn}) when Type == ws; Type == wss ->
|
do_stop_listener(Type, Id, #{bind := ListenOn}) when ?COWBOY_LISTENER(Type) ->
|
||||||
case cowboy:stop_listener(Id) of
|
case cowboy:stop_listener(Id) of
|
||||||
ok ->
|
ok ->
|
||||||
wait_listener_stopped(ListenOn);
|
wait_listener_stopped(ListenOn);
|
||||||
|
@ -369,45 +392,25 @@ console_print(Fmt, Args) -> ?ULOG(Fmt, Args).
|
||||||
console_print(_Fmt, _Args) -> ok.
|
console_print(_Fmt, _Args) -> ok.
|
||||||
-endif.
|
-endif.
|
||||||
|
|
||||||
%% Start MQTT/TCP listener
|
-spec do_start_listener(listener_type(), atom(), listener_id(), map()) ->
|
||||||
-spec do_start_listener(atom(), atom(), map()) ->
|
|
||||||
{ok, pid() | {skipped, atom()}} | {error, term()}.
|
{ok, pid() | {skipped, atom()}} | {error, term()}.
|
||||||
do_start_listener(_Type, _ListenerName, #{enable := false}) ->
|
%% Start MQTT/TCP listener
|
||||||
{ok, {skipped, listener_disabled}};
|
do_start_listener(Type, Name, Id, #{bind := ListenOn} = Opts) when ?ESOCKD_LISTENER(Type) ->
|
||||||
do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
|
|
||||||
Type == tcp; Type == ssl
|
|
||||||
->
|
|
||||||
Id = listener_id(Type, ListenerName),
|
|
||||||
Limiter = limiter(Opts),
|
|
||||||
add_limiter_bucket(Id, Limiter),
|
|
||||||
esockd:open(
|
esockd:open(
|
||||||
Id,
|
Id,
|
||||||
ListenOn,
|
ListenOn,
|
||||||
merge_default(esockd_opts(Id, Type, Opts)),
|
merge_default(esockd_opts(Id, Type, Name, Opts))
|
||||||
{emqx_connection, start_link, [
|
|
||||||
#{
|
|
||||||
listener => {Type, ListenerName},
|
|
||||||
zone => zone(Opts),
|
|
||||||
limiter => Limiter,
|
|
||||||
enable_authn => enable_authn(Opts)
|
|
||||||
}
|
|
||||||
]}
|
|
||||||
);
|
);
|
||||||
%% Start MQTT/WS listener
|
%% Start MQTT/WS listener
|
||||||
do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
|
do_start_listener(Type, Name, Id, Opts) when ?COWBOY_LISTENER(Type) ->
|
||||||
Type == ws; Type == wss
|
RanchOpts = ranch_opts(Type, Opts),
|
||||||
->
|
WsOpts = ws_opts(Type, Name, Opts),
|
||||||
Id = listener_id(Type, ListenerName),
|
|
||||||
Limiter = limiter(Opts),
|
|
||||||
add_limiter_bucket(Id, Limiter),
|
|
||||||
RanchOpts = ranch_opts(Type, ListenOn, Opts),
|
|
||||||
WsOpts = ws_opts(Type, ListenerName, Opts, Limiter),
|
|
||||||
case Type of
|
case Type of
|
||||||
ws -> cowboy:start_clear(Id, RanchOpts, WsOpts);
|
ws -> cowboy:start_clear(Id, RanchOpts, WsOpts);
|
||||||
wss -> cowboy:start_tls(Id, RanchOpts, WsOpts)
|
wss -> cowboy:start_tls(Id, RanchOpts, WsOpts)
|
||||||
end;
|
end;
|
||||||
%% Start MQTT/QUIC listener
|
%% Start MQTT/QUIC listener
|
||||||
do_start_listener(quic, ListenerName, #{bind := Bind} = Opts) ->
|
do_start_listener(quic, Name, Id, #{bind := Bind} = Opts) ->
|
||||||
ListenOn =
|
ListenOn =
|
||||||
case Bind of
|
case Bind of
|
||||||
{Addr, Port} when tuple_size(Addr) == 4 ->
|
{Addr, Port} when tuple_size(Addr) == 4 ->
|
||||||
|
@ -457,16 +460,13 @@ do_start_listener(quic, ListenerName, #{bind := Bind} = Opts) ->
|
||||||
peer_unidi_stream_count => maps:get(peer_unidi_stream_count, Opts, 1),
|
peer_unidi_stream_count => maps:get(peer_unidi_stream_count, Opts, 1),
|
||||||
peer_bidi_stream_count => maps:get(peer_bidi_stream_count, Opts, 10),
|
peer_bidi_stream_count => maps:get(peer_bidi_stream_count, Opts, 10),
|
||||||
zone => zone(Opts),
|
zone => zone(Opts),
|
||||||
listener => {quic, ListenerName},
|
listener => {quic, Name},
|
||||||
limiter => Limiter
|
limiter => Limiter
|
||||||
},
|
},
|
||||||
StreamOpts = #{
|
StreamOpts = #{
|
||||||
stream_callback => emqx_quic_stream,
|
stream_callback => emqx_quic_stream,
|
||||||
active => 1
|
active => 1
|
||||||
},
|
},
|
||||||
|
|
||||||
Id = listener_id(quic, ListenerName),
|
|
||||||
add_limiter_bucket(Id, Limiter),
|
|
||||||
quicer:spawn_listener(
|
quicer:spawn_listener(
|
||||||
Id,
|
Id,
|
||||||
ListenOn,
|
ListenOn,
|
||||||
|
@ -476,6 +476,39 @@ do_start_listener(quic, ListenerName, #{bind := Bind} = Opts) ->
|
||||||
{ok, {skipped, quic_app_missing}}
|
{ok, {skipped, quic_app_missing}}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
do_update_listener(Type, Name, OldConf, NewConf = #{bind := ListenOn}) when
|
||||||
|
?ESOCKD_LISTENER(Type)
|
||||||
|
->
|
||||||
|
Id = listener_id(Type, Name),
|
||||||
|
case maps:get(bind, OldConf) of
|
||||||
|
ListenOn ->
|
||||||
|
esockd:set_options({Id, ListenOn}, esockd_opts(Id, Type, Name, NewConf));
|
||||||
|
_Different ->
|
||||||
|
%% TODO
|
||||||
|
%% Again, we're not strictly required to drop live connections in this case.
|
||||||
|
{error, not_supported}
|
||||||
|
end;
|
||||||
|
do_update_listener(Type, Name, OldConf, NewConf) when
|
||||||
|
?COWBOY_LISTENER(Type)
|
||||||
|
->
|
||||||
|
Id = listener_id(Type, Name),
|
||||||
|
RanchOpts = ranch_opts(Type, NewConf),
|
||||||
|
WsOpts = ws_opts(Type, Name, NewConf),
|
||||||
|
case ranch_opts(Type, OldConf) of
|
||||||
|
RanchOpts ->
|
||||||
|
%% Transport options did not change, no need to touch the listener.
|
||||||
|
ok;
|
||||||
|
_Different ->
|
||||||
|
%% Transport options changed, we need to tear down the listener.
|
||||||
|
ok = ranch:suspend_listener(Id),
|
||||||
|
ok = ranch:set_transport_options(Id, RanchOpts)
|
||||||
|
end,
|
||||||
|
ok = ranch:set_protocol_options(Id, WsOpts),
|
||||||
|
%% No-op if the listener was not suspended.
|
||||||
|
ranch:resume_listener(Id);
|
||||||
|
do_update_listener(_Type, _Name, _OldConf, _NewConf) ->
|
||||||
|
{error, not_supported}.
|
||||||
|
|
||||||
%% Update the listeners at runtime
|
%% Update the listeners at runtime
|
||||||
pre_config_update([?ROOT_KEY, Type, Name], {create, NewConf}, V) when
|
pre_config_update([?ROOT_KEY, Type, Name], {create, NewConf}, V) when
|
||||||
V =:= undefined orelse V =:= ?TOMBSTONE_VALUE
|
V =:= undefined orelse V =:= ?TOMBSTONE_VALUE
|
||||||
|
@ -501,69 +534,44 @@ pre_config_update([?ROOT_KEY], NewConf, _RawConf) ->
|
||||||
post_config_update([?ROOT_KEY, Type, Name], {create, _Request}, NewConf, OldConf, _AppEnvs) when
|
post_config_update([?ROOT_KEY, Type, Name], {create, _Request}, NewConf, OldConf, _AppEnvs) when
|
||||||
OldConf =:= undefined orelse OldConf =:= ?TOMBSTONE_TYPE
|
OldConf =:= undefined orelse OldConf =:= ?TOMBSTONE_TYPE
|
||||||
->
|
->
|
||||||
create_listener(Type, Name, NewConf);
|
start_listener(Type, Name, NewConf);
|
||||||
post_config_update([?ROOT_KEY, Type, Name], {update, _Request}, NewConf, OldConf, _AppEnvs) ->
|
post_config_update([?ROOT_KEY, Type, Name], {update, _Request}, NewConf, OldConf, _AppEnvs) ->
|
||||||
update_listener(Type, Name, {OldConf, NewConf});
|
update_listener(Type, Name, OldConf, NewConf);
|
||||||
post_config_update([?ROOT_KEY, Type, Name], ?MARK_DEL, _, OldConf = #{}, _AppEnvs) ->
|
post_config_update([?ROOT_KEY, Type, Name], ?MARK_DEL, _, OldConf = #{}, _AppEnvs) ->
|
||||||
remove_listener(Type, Name, OldConf);
|
stop_listener(Type, Name, OldConf);
|
||||||
post_config_update([?ROOT_KEY, Type, Name], {action, _Action, _}, NewConf, OldConf, _AppEnvs) ->
|
post_config_update([?ROOT_KEY, Type, Name], {action, _Action, _}, NewConf, OldConf, _AppEnvs) ->
|
||||||
#{enable := NewEnabled} = NewConf,
|
update_listener(Type, Name, OldConf, NewConf);
|
||||||
#{enable := OldEnabled} = OldConf,
|
|
||||||
case {NewEnabled, OldEnabled} of
|
|
||||||
{true, true} ->
|
|
||||||
ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf),
|
|
||||||
restart_listener(Type, Name, {OldConf, NewConf});
|
|
||||||
{true, false} ->
|
|
||||||
ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf),
|
|
||||||
start_listener(Type, Name, NewConf);
|
|
||||||
{false, true} ->
|
|
||||||
ok = unregister_ocsp_stapling_refresh(Type, Name),
|
|
||||||
stop_listener(Type, Name, OldConf);
|
|
||||||
{false, false} ->
|
|
||||||
ok = unregister_ocsp_stapling_refresh(Type, Name),
|
|
||||||
stop_listener(Type, Name, OldConf)
|
|
||||||
end;
|
|
||||||
post_config_update([?ROOT_KEY], _Request, OldConf, OldConf, _AppEnvs) ->
|
post_config_update([?ROOT_KEY], _Request, OldConf, OldConf, _AppEnvs) ->
|
||||||
ok;
|
ok;
|
||||||
post_config_update([?ROOT_KEY], _Request, NewConf, OldConf, _AppEnvs) ->
|
post_config_update([?ROOT_KEY], _Request, NewConf, OldConf, _AppEnvs) ->
|
||||||
#{added := Added, removed := Removed, changed := Changed} = diff_confs(NewConf, OldConf),
|
#{added := Added, removed := Removed, changed := Changed} = diff_confs(NewConf, OldConf),
|
||||||
Updated = lists:map(fun({{{T, N}, Old}, {_, New}}) -> {{T, N}, {Old, New}} end, Changed),
|
%% TODO
|
||||||
perform_listener_changes([
|
%% This currently lacks transactional semantics. If one of the changes fails,
|
||||||
{fun ?MODULE:remove_listener/3, Removed},
|
%% previous changes will not be rolled back.
|
||||||
{fun ?MODULE:update_listener/3, Updated},
|
perform_listener_changes(
|
||||||
{fun ?MODULE:create_listener/3, Added}
|
[{update, L} || L <- Changed] ++
|
||||||
]);
|
[{stop, L} || L <- Removed] ++
|
||||||
|
[{start, L} || L <- Added]
|
||||||
|
);
|
||||||
post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) ->
|
post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
create_listener(Type, Name, NewConf) ->
|
|
||||||
start_listener(Type, Name, NewConf).
|
|
||||||
|
|
||||||
remove_listener(Type, Name, OldConf) ->
|
|
||||||
ok = unregister_ocsp_stapling_refresh(Type, Name),
|
|
||||||
stop_listener(Type, Name, OldConf).
|
|
||||||
|
|
||||||
update_listener(Type, Name, {OldConf, NewConf}) ->
|
|
||||||
ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf),
|
|
||||||
restart_listener(Type, Name, {OldConf, NewConf}).
|
|
||||||
|
|
||||||
perform_listener_changes([]) ->
|
perform_listener_changes([]) ->
|
||||||
ok;
|
ok;
|
||||||
perform_listener_changes([{Action, ConfL} | Tasks]) ->
|
perform_listener_changes([{Action, Listener} | Rest]) ->
|
||||||
case perform_listener_changes(Action, ConfL) of
|
case perform_listener_change(Action, Listener) of
|
||||||
ok -> perform_listener_changes(Tasks);
|
ok -> perform_listener_changes(Rest);
|
||||||
{error, Reason} -> {error, Reason}
|
{error, Reason} -> {error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
perform_listener_changes(_Action, []) ->
|
perform_listener_change(start, {Type, Name, Conf}) ->
|
||||||
ok;
|
start_listener(Type, Name, Conf);
|
||||||
perform_listener_changes(Action, [{{Type, Name}, Diff} | MapConf]) ->
|
perform_listener_change(update, {{Type, Name, ConfOld}, {_, _, ConfNew}}) ->
|
||||||
case Action(Type, Name, Diff) of
|
update_listener(Type, Name, ConfOld, ConfNew);
|
||||||
ok -> perform_listener_changes(Action, MapConf);
|
perform_listener_change(stop, {Type, Name, Conf}) ->
|
||||||
{error, Reason} -> {error, Reason}
|
stop_listener(Type, Name, Conf).
|
||||||
end.
|
|
||||||
|
|
||||||
esockd_opts(ListenerId, Type, Opts0) ->
|
esockd_opts(ListenerId, Type, Name, Opts0) ->
|
||||||
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
|
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
|
||||||
Limiter = limiter(Opts0),
|
Limiter = limiter(Opts0),
|
||||||
Opts2 =
|
Opts2 =
|
||||||
|
@ -579,7 +587,16 @@ esockd_opts(ListenerId, Type, Opts0) ->
|
||||||
end,
|
end,
|
||||||
Opts3 = Opts2#{
|
Opts3 = Opts2#{
|
||||||
access_rules => esockd_access_rules(maps:get(access_rules, Opts0, [])),
|
access_rules => esockd_access_rules(maps:get(access_rules, Opts0, [])),
|
||||||
tune_fun => {emqx_olp, backoff_new_conn, [zone(Opts0)]}
|
tune_fun => {emqx_olp, backoff_new_conn, [zone(Opts0)]},
|
||||||
|
connection_mfargs =>
|
||||||
|
{emqx_connection, start_link, [
|
||||||
|
#{
|
||||||
|
listener => {Type, Name},
|
||||||
|
zone => zone(Opts0),
|
||||||
|
limiter => Limiter,
|
||||||
|
enable_authn => enable_authn(Opts0)
|
||||||
|
}
|
||||||
|
]}
|
||||||
},
|
},
|
||||||
maps:to_list(
|
maps:to_list(
|
||||||
case Type of
|
case Type of
|
||||||
|
@ -593,20 +610,21 @@ esockd_opts(ListenerId, Type, Opts0) ->
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
|
||||||
ws_opts(Type, ListenerName, Opts, Limiter) ->
|
ws_opts(Type, ListenerName, Opts) ->
|
||||||
WsPaths = [
|
WsPath = emqx_utils_maps:deep_get([websocket, mqtt_path], Opts, "/mqtt"),
|
||||||
{emqx_utils_maps:deep_get([websocket, mqtt_path], Opts, "/mqtt"), emqx_ws_connection, #{
|
WsRoutes = [
|
||||||
|
{WsPath, emqx_ws_connection, #{
|
||||||
zone => zone(Opts),
|
zone => zone(Opts),
|
||||||
listener => {Type, ListenerName},
|
listener => {Type, ListenerName},
|
||||||
limiter => Limiter,
|
limiter => limiter(Opts),
|
||||||
enable_authn => enable_authn(Opts)
|
enable_authn => enable_authn(Opts)
|
||||||
}}
|
}}
|
||||||
],
|
],
|
||||||
Dispatch = cowboy_router:compile([{'_', WsPaths}]),
|
Dispatch = cowboy_router:compile([{'_', WsRoutes}]),
|
||||||
ProxyProto = maps:get(proxy_protocol, Opts, false),
|
ProxyProto = maps:get(proxy_protocol, Opts, false),
|
||||||
#{env => #{dispatch => Dispatch}, proxy_header => ProxyProto}.
|
#{env => #{dispatch => Dispatch}, proxy_header => ProxyProto}.
|
||||||
|
|
||||||
ranch_opts(Type, ListenOn, Opts) ->
|
ranch_opts(Type, Opts = #{bind := ListenOn}) ->
|
||||||
NumAcceptors = maps:get(acceptors, Opts, 4),
|
NumAcceptors = maps:get(acceptors, Opts, 4),
|
||||||
MaxConnections = maps:get(max_connections, Opts, 1024),
|
MaxConnections = maps:get(max_connections, Opts, 1024),
|
||||||
SocketOpts =
|
SocketOpts =
|
||||||
|
@ -725,41 +743,47 @@ add_limiter_bucket(Id, Limiter) ->
|
||||||
maps:without([client], Limiter)
|
maps:without([client], Limiter)
|
||||||
).
|
).
|
||||||
|
|
||||||
del_limiter_bucket(Id, Conf) ->
|
del_limiter_bucket(_Id, undefined) ->
|
||||||
case limiter(Conf) of
|
ok;
|
||||||
undefined ->
|
del_limiter_bucket(Id, Limiter) ->
|
||||||
ok;
|
maps:foreach(
|
||||||
Limiter ->
|
fun(Type, _) ->
|
||||||
lists:foreach(
|
emqx_limiter_server:del_bucket(Id, Type)
|
||||||
fun(Type) ->
|
end,
|
||||||
emqx_limiter_server:del_bucket(Id, Type)
|
Limiter
|
||||||
end,
|
).
|
||||||
maps:keys(Limiter)
|
|
||||||
)
|
update_limiter_bucket(Id, Limiter, undefined) ->
|
||||||
end.
|
del_limiter_bucket(Id, Limiter);
|
||||||
|
update_limiter_bucket(Id, undefined, Limiter) ->
|
||||||
|
add_limiter_bucket(Id, Limiter);
|
||||||
|
update_limiter_bucket(Id, OldLimiter, NewLimiter) ->
|
||||||
|
ok = add_limiter_bucket(Id, NewLimiter),
|
||||||
|
Outdated = maps:without(maps:keys(NewLimiter), OldLimiter),
|
||||||
|
del_limiter_bucket(Id, Outdated).
|
||||||
|
|
||||||
diff_confs(NewConfs, OldConfs) ->
|
diff_confs(NewConfs, OldConfs) ->
|
||||||
emqx_utils:diff_lists(
|
emqx_utils:diff_lists(
|
||||||
flatten_confs(NewConfs),
|
flatten_confs(NewConfs),
|
||||||
flatten_confs(OldConfs),
|
flatten_confs(OldConfs),
|
||||||
fun({Key, _}) -> Key end
|
fun({Type, Name, _}) -> {Type, Name} end
|
||||||
).
|
).
|
||||||
|
|
||||||
flatten_confs(Conf0) ->
|
flatten_confs(Confs) ->
|
||||||
lists:flatmap(
|
lists:flatmap(
|
||||||
fun({Type, Conf}) ->
|
fun({Type, Listeners}) ->
|
||||||
do_flatten_confs(Type, Conf)
|
do_flatten_confs(Type, Listeners)
|
||||||
end,
|
end,
|
||||||
maps:to_list(Conf0)
|
maps:to_list(Confs)
|
||||||
).
|
).
|
||||||
|
|
||||||
do_flatten_confs(Type, Conf0) ->
|
do_flatten_confs(Type, Listeners) ->
|
||||||
FilterFun =
|
FilterFun =
|
||||||
fun
|
fun
|
||||||
({_Name, ?TOMBSTONE_TYPE}) -> false;
|
({_Name, ?TOMBSTONE_TYPE}) -> false;
|
||||||
({Name, Conf}) -> {true, {{Type, Name}, Conf}}
|
({Name, Conf}) -> {true, {Type, Name, Conf}}
|
||||||
end,
|
end,
|
||||||
lists:filtermap(FilterFun, maps:to_list(Conf0)).
|
lists:filtermap(FilterFun, maps:to_list(Listeners)).
|
||||||
|
|
||||||
enable_authn(Opts) ->
|
enable_authn(Opts) ->
|
||||||
maps:get(enable_authn, Opts, true).
|
maps:get(enable_authn, Opts, true).
|
||||||
|
|
|
@ -67,6 +67,11 @@
|
||||||
select_free_port/1
|
select_free_port/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
ssl_verify_fun_allow_any_host/0,
|
||||||
|
ssl_verify_fun_allow_any_host_impl/3
|
||||||
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
emqx_cluster/1,
|
emqx_cluster/1,
|
||||||
emqx_cluster/2,
|
emqx_cluster/2,
|
||||||
|
|
|
@ -58,7 +58,6 @@
|
||||||
-module(emqx_cth_suite).
|
-module(emqx_cth_suite).
|
||||||
|
|
||||||
-include_lib("common_test/include/ct.hrl").
|
-include_lib("common_test/include/ct.hrl").
|
||||||
-include_lib("emqx/include/emqx_access_control.hrl").
|
|
||||||
|
|
||||||
-export([start/2]).
|
-export([start/2]).
|
||||||
-export([stop/1]).
|
-export([stop/1]).
|
||||||
|
|
|
@ -0,0 +1,339 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_cth_tls).
|
||||||
|
|
||||||
|
-include_lib("public_key/include/public_key.hrl").
|
||||||
|
|
||||||
|
-export([gen_cert/1]).
|
||||||
|
-export([write_cert/2]).
|
||||||
|
-export([write_cert/3]).
|
||||||
|
-export([write_pem/2]).
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------
|
||||||
|
%% Certificate Issuing
|
||||||
|
%% Heavily inspired by: ${ERL_SRC}/lib/public_key/test/erl_make_certs.erl
|
||||||
|
%% -------------------------------------------------------------------
|
||||||
|
|
||||||
|
-type pem_entry() :: public_key:pem_entry().
|
||||||
|
-type certificate() :: pem_entry().
|
||||||
|
-type private_key() :: pem_entry().
|
||||||
|
|
||||||
|
-type cert_subject() :: #{
|
||||||
|
name => string(),
|
||||||
|
email => string(),
|
||||||
|
city => string(),
|
||||||
|
state => string(),
|
||||||
|
org => string(),
|
||||||
|
org_unit => string(),
|
||||||
|
country => string(),
|
||||||
|
serial => string(),
|
||||||
|
title => string(),
|
||||||
|
dnQualifer => string()
|
||||||
|
}.
|
||||||
|
|
||||||
|
-type cert_validity() ::
|
||||||
|
{_From :: calendar:date(), _To :: calendar:date()}.
|
||||||
|
|
||||||
|
-type cert_extensions() :: #{
|
||||||
|
basic_constraints => false | ca | _PathLenContraint :: pos_integer(),
|
||||||
|
key_usage => false | certsign
|
||||||
|
}.
|
||||||
|
|
||||||
|
%% @doc Generate a certificate and a private key.
|
||||||
|
%% If you need root (CA) certificate, use `root` as `issuer` option. By default, the
|
||||||
|
%% generated certificate will have according extensions (constraints, key usage, etc).
|
||||||
|
%% Once root certificate + private key pair is generated, you can use the result
|
||||||
|
%% as `issuer` option to generate other certificates signed by this root.
|
||||||
|
-spec gen_cert(Opts) -> {certificate(), private_key()} when
|
||||||
|
Opts :: #{
|
||||||
|
key := ec | rsa | PrivKeyIn,
|
||||||
|
issuer := root | {CertificateIn, PrivKeyIn},
|
||||||
|
subject => cert_subject(),
|
||||||
|
validity => cert_validity(),
|
||||||
|
extensions => cert_extensions() | false
|
||||||
|
},
|
||||||
|
CertificateIn :: certificate() | public_key:der_encoded() | #'OTPCertificate'{},
|
||||||
|
PrivKeyIn :: private_key() | _PEM :: binary().
|
||||||
|
gen_cert(Opts) ->
|
||||||
|
SubjectPrivateKey = get_privkey(Opts),
|
||||||
|
{TBSCert, IssuerKey} = make_tbs(SubjectPrivateKey, Opts),
|
||||||
|
Cert = public_key:pkix_sign(TBSCert, IssuerKey),
|
||||||
|
true = verify_signature(Cert, IssuerKey),
|
||||||
|
{encode_cert(Cert), encode_privkey(SubjectPrivateKey)}.
|
||||||
|
|
||||||
|
get_privkey(#{key := Algo}) when is_atom(Algo) ->
|
||||||
|
gen_privkey(Algo);
|
||||||
|
get_privkey(#{key := Key}) ->
|
||||||
|
decode_privkey(Key).
|
||||||
|
|
||||||
|
make_tbs(SubjectKey, Opts) ->
|
||||||
|
{Issuer, IssuerKey} = issuer(Opts, SubjectKey),
|
||||||
|
Subject =
|
||||||
|
case Opts of
|
||||||
|
#{issuer := root} ->
|
||||||
|
Issuer;
|
||||||
|
#{} ->
|
||||||
|
subject(Opts)
|
||||||
|
end,
|
||||||
|
{
|
||||||
|
#'OTPTBSCertificate'{
|
||||||
|
version = v3,
|
||||||
|
serialNumber = rand:uniform(1000000000000),
|
||||||
|
signature = sign_algorithm(IssuerKey, Opts),
|
||||||
|
issuer = Issuer,
|
||||||
|
validity = validity(Opts),
|
||||||
|
subject = Subject,
|
||||||
|
subjectPublicKeyInfo = publickey(SubjectKey),
|
||||||
|
extensions = extensions(Opts)
|
||||||
|
},
|
||||||
|
IssuerKey
|
||||||
|
}.
|
||||||
|
|
||||||
|
issuer(Opts = #{issuer := root}, SubjectKey) ->
|
||||||
|
%% Self signed
|
||||||
|
{subject(Opts), SubjectKey};
|
||||||
|
issuer(#{issuer := {Issuer, IssuerKey}}, _SubjectKey) ->
|
||||||
|
{issuer_subject(Issuer), decode_privkey(IssuerKey)}.
|
||||||
|
|
||||||
|
issuer_subject({'Certificate', IssuerDer, _}) when is_binary(IssuerDer) ->
|
||||||
|
issuer_subject(IssuerDer);
|
||||||
|
issuer_subject(IssuerDer) when is_binary(IssuerDer) ->
|
||||||
|
issuer_subject(public_key:pkix_decode_cert(IssuerDer, otp));
|
||||||
|
issuer_subject(#'OTPCertificate'{tbsCertificate = #'OTPTBSCertificate'{subject = Subject}}) ->
|
||||||
|
Subject.
|
||||||
|
|
||||||
|
subject(Opts = #{}) ->
|
||||||
|
Subject = maps:get(subject, Opts, #{}),
|
||||||
|
Entries = maps:map(
|
||||||
|
fun(N, V) -> [subject_entry(N, V)] end,
|
||||||
|
maps:merge(default_subject(Opts), Subject)
|
||||||
|
),
|
||||||
|
{rdnSequence, maps:values(Entries)}.
|
||||||
|
|
||||||
|
subject_entry(name, Name) ->
|
||||||
|
typed_attr(?'id-at-commonName', {printableString, Name});
|
||||||
|
subject_entry(email, Email) ->
|
||||||
|
typed_attr(?'id-emailAddress', Email);
|
||||||
|
subject_entry(city, City) ->
|
||||||
|
typed_attr(?'id-at-localityName', {printableString, City});
|
||||||
|
subject_entry(state, State) ->
|
||||||
|
typed_attr(?'id-at-stateOrProvinceName', {printableString, State});
|
||||||
|
subject_entry(org, Org) ->
|
||||||
|
typed_attr(?'id-at-organizationName', {printableString, Org});
|
||||||
|
subject_entry(org_unit, OrgUnit) ->
|
||||||
|
typed_attr(?'id-at-organizationalUnitName', {printableString, OrgUnit});
|
||||||
|
subject_entry(country, Country) ->
|
||||||
|
typed_attr(?'id-at-countryName', Country);
|
||||||
|
subject_entry(serial, Serial) ->
|
||||||
|
typed_attr(?'id-at-serialNumber', Serial);
|
||||||
|
subject_entry(title, Title) ->
|
||||||
|
typed_attr(?'id-at-title', {printableString, Title});
|
||||||
|
subject_entry(dnQualifer, DnQ) ->
|
||||||
|
typed_attr(?'id-at-dnQualifier', DnQ).
|
||||||
|
|
||||||
|
subject_info(Info, Subject, Default) ->
|
||||||
|
case subject_info(Info, Subject) of
|
||||||
|
undefined -> Default;
|
||||||
|
Value -> Value
|
||||||
|
end.
|
||||||
|
|
||||||
|
subject_info(Info, {rdnSequence, Entries}) ->
|
||||||
|
subject_info(Info, Entries);
|
||||||
|
subject_info(name, Entries) when is_list(Entries) ->
|
||||||
|
get_string(find_subject_entry(?'id-at-commonName', Entries));
|
||||||
|
subject_info(org, Entries) when is_list(Entries) ->
|
||||||
|
get_string(find_subject_entry(?'id-at-organizationName', Entries));
|
||||||
|
subject_info(org_unit, Entries) when is_list(Entries) ->
|
||||||
|
get_string(find_subject_entry(?'id-at-organizationalUnitName', Entries));
|
||||||
|
subject_info(country, Entries) when is_list(Entries) ->
|
||||||
|
find_subject_entry(?'id-at-countryName', Entries).
|
||||||
|
|
||||||
|
find_subject_entry(Oid, Entries) ->
|
||||||
|
emqx_maybe:from_list([
|
||||||
|
Value
|
||||||
|
|| Attrs <- Entries,
|
||||||
|
#'AttributeTypeAndValue'{type = T, value = Value} <- Attrs,
|
||||||
|
T =:= Oid
|
||||||
|
]).
|
||||||
|
|
||||||
|
get_string({printableString, String}) ->
|
||||||
|
String;
|
||||||
|
get_string(undefined) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
typed_attr(Type, Value) ->
|
||||||
|
#'AttributeTypeAndValue'{type = Type, value = Value}.
|
||||||
|
|
||||||
|
sign_algorithm(#'ECPrivateKey'{parameters = Parms}, _Opts) ->
|
||||||
|
#'SignatureAlgorithm'{
|
||||||
|
algorithm = ?'ecdsa-with-SHA256',
|
||||||
|
parameters = Parms
|
||||||
|
}.
|
||||||
|
|
||||||
|
validity(Opts) ->
|
||||||
|
{From, To} = maps:get(validity, Opts, default_validity()),
|
||||||
|
#'Validity'{
|
||||||
|
notBefore = {generalTime, format_date(From)},
|
||||||
|
notAfter = {generalTime, format_date(To)}
|
||||||
|
}.
|
||||||
|
|
||||||
|
publickey(#'ECPrivateKey'{parameters = Params, publicKey = PubKey}) ->
|
||||||
|
#'OTPSubjectPublicKeyInfo'{
|
||||||
|
algorithm = #'PublicKeyAlgorithm'{
|
||||||
|
algorithm = ?'id-ecPublicKey',
|
||||||
|
parameters = Params
|
||||||
|
},
|
||||||
|
subjectPublicKey = #'ECPoint'{point = PubKey}
|
||||||
|
}.
|
||||||
|
|
||||||
|
extensions(#{extensions := false}) ->
|
||||||
|
asn1_NOVALUE;
|
||||||
|
extensions(Opts) ->
|
||||||
|
Exts = maps:get(extensions, Opts, #{}),
|
||||||
|
Default = default_extensions(Opts),
|
||||||
|
maps:fold(
|
||||||
|
fun(Name, Data, Acc) -> Acc ++ extension(Name, Data) end,
|
||||||
|
[],
|
||||||
|
maps:merge(Default, Exts)
|
||||||
|
).
|
||||||
|
|
||||||
|
extension(basic_constraints, false) ->
|
||||||
|
[];
|
||||||
|
extension(basic_constraints, ca) ->
|
||||||
|
[
|
||||||
|
#'Extension'{
|
||||||
|
extnID = ?'id-ce-basicConstraints',
|
||||||
|
extnValue = #'BasicConstraints'{cA = true},
|
||||||
|
critical = true
|
||||||
|
}
|
||||||
|
];
|
||||||
|
extension(basic_constraints, Len) when is_integer(Len) ->
|
||||||
|
[
|
||||||
|
#'Extension'{
|
||||||
|
extnID = ?'id-ce-basicConstraints',
|
||||||
|
extnValue = #'BasicConstraints'{cA = true, pathLenConstraint = Len},
|
||||||
|
critical = true
|
||||||
|
}
|
||||||
|
];
|
||||||
|
extension(key_usage, false) ->
|
||||||
|
[];
|
||||||
|
extension(key_usage, certsign) ->
|
||||||
|
[
|
||||||
|
#'Extension'{
|
||||||
|
extnID = ?'id-ce-keyUsage',
|
||||||
|
extnValue = [keyCertSign],
|
||||||
|
critical = true
|
||||||
|
}
|
||||||
|
].
|
||||||
|
|
||||||
|
default_validity() ->
|
||||||
|
{shift_date(date(), -1), shift_date(date(), +7)}.
|
||||||
|
|
||||||
|
default_subject(#{issuer := root}) ->
|
||||||
|
#{
|
||||||
|
name => "RootCA",
|
||||||
|
org => "EMQ",
|
||||||
|
org_unit => "EMQX",
|
||||||
|
country => "CN"
|
||||||
|
};
|
||||||
|
default_subject(#{}) ->
|
||||||
|
#{
|
||||||
|
name => "Server",
|
||||||
|
org => "EMQ",
|
||||||
|
org_unit => "EMQX",
|
||||||
|
country => "CN"
|
||||||
|
}.
|
||||||
|
|
||||||
|
default_extensions(#{issuer := root}) ->
|
||||||
|
#{
|
||||||
|
basic_constraints => ca,
|
||||||
|
key_usage => certsign
|
||||||
|
};
|
||||||
|
default_extensions(#{}) ->
|
||||||
|
#{}.
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------
|
||||||
|
|
||||||
|
verify_signature(CertDer, #'ECPrivateKey'{parameters = Params, publicKey = PubKey}) ->
|
||||||
|
public_key:pkix_verify(CertDer, {#'ECPoint'{point = PubKey}, Params});
|
||||||
|
verify_signature(CertDer, KeyPem) ->
|
||||||
|
verify_signature(CertDer, decode_privkey(KeyPem)).
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------
|
||||||
|
|
||||||
|
gen_privkey(ec) ->
|
||||||
|
public_key:generate_key({namedCurve, secp256k1});
|
||||||
|
gen_privkey(rsa) ->
|
||||||
|
public_key:generate_key({rsa, 2048, 17}).
|
||||||
|
|
||||||
|
decode_privkey(#'ECPrivateKey'{} = Key) ->
|
||||||
|
Key;
|
||||||
|
decode_privkey(#'RSAPrivateKey'{} = Key) ->
|
||||||
|
Key;
|
||||||
|
decode_privkey(PemEntry = {_, _, _}) ->
|
||||||
|
public_key:pem_entry_decode(PemEntry);
|
||||||
|
decode_privkey(PemBinary) when is_binary(PemBinary) ->
|
||||||
|
[KeyInfo] = public_key:pem_decode(PemBinary),
|
||||||
|
decode_privkey(KeyInfo).
|
||||||
|
|
||||||
|
-spec encode_privkey(#'ECPrivateKey'{} | #'RSAPrivateKey'{}) -> private_key().
|
||||||
|
encode_privkey(Key = #'ECPrivateKey'{}) ->
|
||||||
|
{ok, Der} = 'OTP-PUB-KEY':encode('ECPrivateKey', Key),
|
||||||
|
{'ECPrivateKey', Der, not_encrypted};
|
||||||
|
encode_privkey(Key = #'RSAPrivateKey'{}) ->
|
||||||
|
{ok, Der} = 'OTP-PUB-KEY':encode('RSAPrivateKey', Key),
|
||||||
|
{'RSAPrivateKey', Der, not_encrypted}.
|
||||||
|
|
||||||
|
-spec encode_cert(public_key:der_encoded()) -> certificate().
|
||||||
|
encode_cert(Der) ->
|
||||||
|
{'Certificate', Der, not_encrypted}.
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------
|
||||||
|
|
||||||
|
shift_date(Date, Offset) ->
|
||||||
|
calendar:gregorian_days_to_date(calendar:date_to_gregorian_days(Date) + Offset).
|
||||||
|
|
||||||
|
format_date({Y, M, D}) ->
|
||||||
|
lists:flatten(io_lib:format("~w~2..0w~2..0w000000Z", [Y, M, D])).
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------
|
||||||
|
|
||||||
|
%% @doc Write certificate + private key pair to respective files.
|
||||||
|
%% Files are created in the given directory. The filenames are derived
|
||||||
|
%% from the subject information in the certificate.
|
||||||
|
-spec write_cert(_Dir :: file:name(), {certificate(), private_key()}) ->
|
||||||
|
{file:name(), file:name()}.
|
||||||
|
write_cert(Dir, {Cert, Key}) ->
|
||||||
|
Subject = issuer_subject(Cert),
|
||||||
|
Filename = subject_info(org, Subject, "ORG") ++ "." ++ subject_info(name, Subject, "XXX"),
|
||||||
|
write_cert(Dir, Filename, {Cert, Key}).
|
||||||
|
|
||||||
|
-spec write_cert(_Dir :: file:name(), _Prefix :: string(), {certificate(), private_key()}) ->
|
||||||
|
{file:name(), file:name()}.
|
||||||
|
write_cert(Dir, Filename, {Cert, Key}) ->
|
||||||
|
Certfile = filename:join(Dir, Filename ++ ".crt"),
|
||||||
|
Keyfile = filename:join(Dir, Filename ++ ".key"),
|
||||||
|
ok = write_pem(Certfile, Cert),
|
||||||
|
ok = write_pem(Keyfile, Key),
|
||||||
|
{Certfile, Keyfile}.
|
||||||
|
|
||||||
|
-spec write_pem(file:name(), pem_entry() | [pem_entry()]) ->
|
||||||
|
ok | {error, file:posix()}.
|
||||||
|
write_pem(Name, Entries = [_ | _]) ->
|
||||||
|
file:write_file(Name, public_key:pem_encode(Entries));
|
||||||
|
write_pem(Name, Entry) ->
|
||||||
|
write_pem(Name, [Entry]).
|
|
@ -20,122 +20,46 @@
|
||||||
-compile(nowarn_export_all).
|
-compile(nowarn_export_all).
|
||||||
|
|
||||||
-include_lib("emqx/include/emqx.hrl").
|
-include_lib("emqx/include/emqx.hrl").
|
||||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
-include_lib("emqx/include/emqx_schema.hrl").
|
||||||
|
-include_lib("emqx/include/asserts.hrl").
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
-include_lib("common_test/include/ct.hrl").
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
|
||||||
-define(CERTS_PATH(CertName), filename:join(["../../lib/emqx/etc/certs/", CertName])).
|
|
||||||
|
|
||||||
-define(SERVER_KEY_PASSWORD, "sErve7r8Key$!").
|
-define(SERVER_KEY_PASSWORD, "sErve7r8Key$!").
|
||||||
|
|
||||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
NewConfig = generate_config(),
|
|
||||||
application:ensure_all_started(esockd),
|
|
||||||
application:ensure_all_started(quicer),
|
|
||||||
application:ensure_all_started(cowboy),
|
|
||||||
generate_tls_certs(Config),
|
generate_tls_certs(Config),
|
||||||
lists:foreach(fun set_app_env/1, NewConfig),
|
WorkDir = emqx_cth_suite:work_dir(Config),
|
||||||
Config.
|
Apps = emqx_cth_suite:start([quicer, emqx], #{work_dir => WorkDir}),
|
||||||
|
[{apps, Apps} | Config].
|
||||||
|
|
||||||
end_per_suite(_Config) ->
|
end_per_suite(Config) ->
|
||||||
application:stop(esockd),
|
ok = emqx_cth_suite:stop(?config(apps, Config)).
|
||||||
application:stop(cowboy).
|
|
||||||
|
|
||||||
init_per_testcase(Case, Config) when
|
init_per_testcase(Case, Config) when
|
||||||
Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp
|
Case =:= t_start_stop_listeners;
|
||||||
|
Case =:= t_restart_listeners;
|
||||||
|
Case =:= t_restart_listeners_with_hibernate_after_disabled
|
||||||
->
|
->
|
||||||
catch emqx_config_handler:stop(),
|
ok = emqx_listeners:stop(),
|
||||||
Port = emqx_common_test_helpers:select_free_port(tcp),
|
Config;
|
||||||
{ok, _} = emqx_config_handler:start_link(),
|
|
||||||
PrevListeners = emqx_config:get([listeners], #{}),
|
|
||||||
PureListeners = remove_default_limiter(PrevListeners),
|
|
||||||
PureListeners2 = PureListeners#{
|
|
||||||
tcp => #{
|
|
||||||
listener_test => #{
|
|
||||||
bind => {"127.0.0.1", Port},
|
|
||||||
max_connections => 4321,
|
|
||||||
limiter => #{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
emqx_config:put([listeners], PureListeners2),
|
|
||||||
|
|
||||||
ok = emqx_listeners:start(),
|
|
||||||
[
|
|
||||||
{prev_listener_conf, PrevListeners},
|
|
||||||
{tcp_port, Port}
|
|
||||||
| Config
|
|
||||||
];
|
|
||||||
init_per_testcase(t_wss_conn, Config) ->
|
|
||||||
catch emqx_config_handler:stop(),
|
|
||||||
Port = emqx_common_test_helpers:select_free_port(ssl),
|
|
||||||
{ok, _} = emqx_config_handler:start_link(),
|
|
||||||
PrevListeners = emqx_config:get([listeners], #{}),
|
|
||||||
PureListeners = remove_default_limiter(PrevListeners),
|
|
||||||
PureListeners2 = PureListeners#{
|
|
||||||
wss => #{
|
|
||||||
listener_test => #{
|
|
||||||
bind => {{127, 0, 0, 1}, Port},
|
|
||||||
limiter => #{},
|
|
||||||
ssl_options => #{
|
|
||||||
cacertfile => ?CERTS_PATH("cacert.pem"),
|
|
||||||
certfile => ?CERTS_PATH("cert.pem"),
|
|
||||||
keyfile => ?CERTS_PATH("key.pem")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
emqx_config:put([listeners], PureListeners2),
|
|
||||||
|
|
||||||
ok = emqx_listeners:start(),
|
|
||||||
[
|
|
||||||
{prev_listener_conf, PrevListeners},
|
|
||||||
{wss_port, Port}
|
|
||||||
| Config
|
|
||||||
];
|
|
||||||
init_per_testcase(_, Config) ->
|
init_per_testcase(_, Config) ->
|
||||||
catch emqx_config_handler:stop(),
|
ok = emqx_listeners:start(),
|
||||||
{ok, _} = emqx_config_handler:start_link(),
|
Config.
|
||||||
PrevListeners = emqx_config:get([listeners], #{}),
|
|
||||||
PureListeners = remove_default_limiter(PrevListeners),
|
|
||||||
emqx_config:put([listeners], PureListeners),
|
|
||||||
[
|
|
||||||
{prev_listener_conf, PrevListeners}
|
|
||||||
| Config
|
|
||||||
].
|
|
||||||
|
|
||||||
end_per_testcase(Case, Config) when
|
end_per_testcase(_, _Config) ->
|
||||||
Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp
|
|
||||||
->
|
|
||||||
PrevListener = ?config(prev_listener_conf, Config),
|
|
||||||
emqx_listeners:stop(),
|
|
||||||
emqx_config:put([listeners], PrevListener),
|
|
||||||
_ = emqx_config_handler:stop(),
|
|
||||||
ok;
|
|
||||||
end_per_testcase(t_wss_conn, Config) ->
|
|
||||||
PrevListener = ?config(prev_listener_conf, Config),
|
|
||||||
emqx_listeners:stop(),
|
|
||||||
emqx_config:put([listeners], PrevListener),
|
|
||||||
_ = emqx_config_handler:stop(),
|
|
||||||
ok;
|
|
||||||
end_per_testcase(_, Config) ->
|
|
||||||
PrevListener = ?config(prev_listener_conf, Config),
|
|
||||||
emqx_config:put([listeners], PrevListener),
|
|
||||||
_ = emqx_config_handler:stop(),
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_start_stop_listeners(_) ->
|
t_start_stop_listeners(_) ->
|
||||||
ok = emqx_listeners:start(),
|
ok = emqx_listeners:start(),
|
||||||
?assertException(error, _, emqx_listeners:start_listener({ws, {"127.0.0.1", 8083}, []})),
|
?assertException(error, _, emqx_listeners:start_listener(ws, {"127.0.0.1", 8083}, #{})),
|
||||||
ok = emqx_listeners:stop().
|
ok = emqx_listeners:stop().
|
||||||
|
|
||||||
t_restart_listeners(_) ->
|
t_restart_listeners(_) ->
|
||||||
ok = emqx_listeners:start(),
|
ok = emqx_listeners:start(),
|
||||||
ok = emqx_listeners:stop(),
|
ok = emqx_listeners:stop(),
|
||||||
%% flakyness: eaddrinuse
|
|
||||||
timer:sleep(timer:seconds(2)),
|
|
||||||
ok = emqx_listeners:restart(),
|
ok = emqx_listeners:restart(),
|
||||||
ok = emqx_listeners:stop().
|
ok = emqx_listeners:stop().
|
||||||
|
|
||||||
|
@ -168,77 +92,315 @@ t_restart_listeners_with_hibernate_after_disabled(_Config) ->
|
||||||
),
|
),
|
||||||
ok = emqx_listeners:start(),
|
ok = emqx_listeners:start(),
|
||||||
ok = emqx_listeners:stop(),
|
ok = emqx_listeners:stop(),
|
||||||
%% flakyness: eaddrinuse
|
|
||||||
timer:sleep(timer:seconds(2)),
|
|
||||||
ok = emqx_listeners:restart(),
|
ok = emqx_listeners:restart(),
|
||||||
ok = emqx_listeners:stop(),
|
ok = emqx_listeners:stop(),
|
||||||
emqx_config:put([listeners], OldLConf).
|
emqx_config:put([listeners], OldLConf).
|
||||||
|
|
||||||
t_max_conns_tcp(Config) ->
|
t_max_conns_tcp(_Config) ->
|
||||||
%% Note: Using a string representation for the bind address like
|
%% Note: Using a string representation for the bind address like
|
||||||
%% "127.0.0.1" does not work
|
%% "127.0.0.1" does not work
|
||||||
?assertEqual(
|
Port = emqx_common_test_helpers:select_free_port(tcp),
|
||||||
4321,
|
Conf = #{
|
||||||
emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, ?config(tcp_port, Config)})
|
<<"bind">> => format_bind({"127.0.0.1", Port}),
|
||||||
).
|
<<"max_connections">> => 4321,
|
||||||
|
<<"limiter">> => #{}
|
||||||
|
},
|
||||||
|
with_listener(tcp, maxconns, Conf, fun() ->
|
||||||
|
?assertEqual(
|
||||||
|
4321,
|
||||||
|
emqx_listeners:max_conns('tcp:maxconns', {{127, 0, 0, 1}, Port})
|
||||||
|
)
|
||||||
|
end).
|
||||||
|
|
||||||
t_current_conns_tcp(Config) ->
|
t_current_conns_tcp(_Config) ->
|
||||||
?assertEqual(
|
Port = emqx_common_test_helpers:select_free_port(tcp),
|
||||||
0,
|
Conf = #{
|
||||||
emqx_listeners:current_conns('tcp:listener_test', {
|
<<"bind">> => format_bind({"127.0.0.1", Port}),
|
||||||
{127, 0, 0, 1}, ?config(tcp_port, Config)
|
<<"max_connections">> => 42,
|
||||||
})
|
<<"limiter">> => #{}
|
||||||
).
|
},
|
||||||
|
with_listener(tcp, curconns, Conf, fun() ->
|
||||||
|
?assertEqual(
|
||||||
|
0,
|
||||||
|
emqx_listeners:current_conns('tcp:curconns', {{127, 0, 0, 1}, Port})
|
||||||
|
)
|
||||||
|
end).
|
||||||
|
|
||||||
t_wss_conn(Config) ->
|
t_wss_conn(Config) ->
|
||||||
{ok, Socket} = ssl:connect(
|
PrivDir = ?config(priv_dir, Config),
|
||||||
{127, 0, 0, 1}, ?config(wss_port, Config), [{verify, verify_none}], 1000
|
Port = emqx_common_test_helpers:select_free_port(ssl),
|
||||||
),
|
Conf = #{
|
||||||
ok = ssl:close(Socket).
|
<<"bind">> => format_bind({"127.0.0.1", Port}),
|
||||||
|
<<"limiter">> => #{},
|
||||||
|
<<"ssl_options">> => #{
|
||||||
|
<<"cacertfile">> => filename:join(PrivDir, "ca.pem"),
|
||||||
|
<<"certfile">> => filename:join(PrivDir, "server.pem"),
|
||||||
|
<<"keyfile">> => filename:join(PrivDir, "server.key")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
with_listener(wss, wssconn, Conf, fun() ->
|
||||||
|
{ok, Socket} = ssl:connect({127, 0, 0, 1}, Port, [{verify, verify_none}], 1000),
|
||||||
|
ok = ssl:close(Socket)
|
||||||
|
end).
|
||||||
|
|
||||||
t_quic_conn(Config) ->
|
t_quic_conn(Config) ->
|
||||||
|
PrivDir = ?config(priv_dir, Config),
|
||||||
Port = emqx_common_test_helpers:select_free_port(quic),
|
Port = emqx_common_test_helpers:select_free_port(quic),
|
||||||
DataDir = ?config(data_dir, Config),
|
Conf = #{
|
||||||
SSLOpts = #{
|
<<"bind">> => format_bind({"127.0.0.1", Port}),
|
||||||
password => ?SERVER_KEY_PASSWORD,
|
<<"ssl_options">> => #{
|
||||||
certfile => filename:join(DataDir, "server-password.pem"),
|
<<"password">> => ?SERVER_KEY_PASSWORD,
|
||||||
cacertfile => filename:join(DataDir, "ca.pem"),
|
<<"certfile">> => filename:join(PrivDir, "server-password.pem"),
|
||||||
keyfile => filename:join(DataDir, "server-password.key")
|
<<"cacertfile">> => filename:join(PrivDir, "ca.pem"),
|
||||||
|
<<"keyfile">> => filename:join(PrivDir, "server-password.key")
|
||||||
|
}
|
||||||
},
|
},
|
||||||
emqx_common_test_helpers:ensure_quic_listener(?FUNCTION_NAME, Port, #{ssl_options => SSLOpts}),
|
with_listener(quic, ?FUNCTION_NAME, Conf, fun() ->
|
||||||
ct:pal("~p", [emqx_listeners:list()]),
|
{ok, Conn} = quicer:connect(
|
||||||
{ok, Conn} = quicer:connect(
|
{127, 0, 0, 1},
|
||||||
{127, 0, 0, 1},
|
Port,
|
||||||
Port,
|
[
|
||||||
[
|
{verify, verify_none},
|
||||||
{verify, verify_none},
|
{alpn, ["mqtt"]}
|
||||||
{alpn, ["mqtt"]}
|
],
|
||||||
],
|
1000
|
||||||
1000
|
),
|
||||||
),
|
ok = quicer:close_connection(Conn)
|
||||||
ok = quicer:close_connection(Conn),
|
end).
|
||||||
emqx_listeners:stop_listener(quic, ?FUNCTION_NAME, #{bind => Port}).
|
|
||||||
|
|
||||||
t_ssl_password_cert(Config) ->
|
t_ssl_password_cert(Config) ->
|
||||||
|
PrivDir = ?config(priv_dir, Config),
|
||||||
Port = emqx_common_test_helpers:select_free_port(ssl),
|
Port = emqx_common_test_helpers:select_free_port(ssl),
|
||||||
DataDir = ?config(data_dir, Config),
|
|
||||||
SSLOptsPWD = #{
|
SSLOptsPWD = #{
|
||||||
password => ?SERVER_KEY_PASSWORD,
|
<<"password">> => ?SERVER_KEY_PASSWORD,
|
||||||
certfile => filename:join(DataDir, "server-password.pem"),
|
<<"certfile">> => filename:join(PrivDir, "server-password.pem"),
|
||||||
cacertfile => filename:join(DataDir, "ca.pem"),
|
<<"cacertfile">> => filename:join(PrivDir, "ca.pem"),
|
||||||
keyfile => filename:join(DataDir, "server-password.key")
|
<<"keyfile">> => filename:join(PrivDir, "server-password.key")
|
||||||
},
|
},
|
||||||
LConf = #{
|
LConf = #{
|
||||||
enable => true,
|
<<"enable">> => true,
|
||||||
bind => {{127, 0, 0, 1}, Port},
|
<<"bind">> => format_bind({{127, 0, 0, 1}, Port}),
|
||||||
mountpoint => <<>>,
|
<<"ssl_options">> => SSLOptsPWD
|
||||||
zone => default,
|
|
||||||
ssl_options => SSLOptsPWD
|
|
||||||
},
|
},
|
||||||
ok = emqx_listeners:start_listener(ssl, ?FUNCTION_NAME, LConf),
|
with_listener(ssl, ?FUNCTION_NAME, LConf, fun() ->
|
||||||
{ok, SSLSocket} = ssl:connect("127.0.0.1", Port, [{verify, verify_none}]),
|
{ok, SSLSocket} = ssl:connect("127.0.0.1", Port, [{verify, verify_none}]),
|
||||||
ssl:close(SSLSocket),
|
ssl:close(SSLSocket)
|
||||||
emqx_listeners:stop_listener(ssl, ?FUNCTION_NAME, LConf).
|
end).
|
||||||
|
|
||||||
|
t_ssl_update_opts(Config) ->
|
||||||
|
PrivDir = ?config(priv_dir, Config),
|
||||||
|
Host = "127.0.0.1",
|
||||||
|
Port = emqx_common_test_helpers:select_free_port(ssl),
|
||||||
|
Conf = #{
|
||||||
|
<<"enable">> => true,
|
||||||
|
<<"bind">> => format_bind({Host, Port}),
|
||||||
|
<<"ssl_options">> => #{
|
||||||
|
<<"cacertfile">> => filename:join(PrivDir, "ca.pem"),
|
||||||
|
<<"password">> => ?SERVER_KEY_PASSWORD,
|
||||||
|
<<"certfile">> => filename:join(PrivDir, "server-password.pem"),
|
||||||
|
<<"keyfile">> => filename:join(PrivDir, "server-password.key"),
|
||||||
|
<<"verify">> => verify_none
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ClientSSLOpts = [
|
||||||
|
{verify, verify_peer},
|
||||||
|
{customize_hostname_check, [{match_fun, fun(_, _) -> true end}]}
|
||||||
|
],
|
||||||
|
with_listener(ssl, updated, Conf, fun() ->
|
||||||
|
%% Client connects successfully.
|
||||||
|
C1 = emqtt_connect_ssl(Host, Port, [
|
||||||
|
{cacertfile, filename:join(PrivDir, "ca.pem")} | ClientSSLOpts
|
||||||
|
]),
|
||||||
|
|
||||||
|
%% Change the listener SSL configuration: another set of cert/key files.
|
||||||
|
{ok, _} = emqx:update_config(
|
||||||
|
[listeners, ssl, updated],
|
||||||
|
{update, #{
|
||||||
|
<<"ssl_options">> => #{
|
||||||
|
<<"cacertfile">> => filename:join(PrivDir, "ca-next.pem"),
|
||||||
|
<<"certfile">> => filename:join(PrivDir, "server.pem"),
|
||||||
|
<<"keyfile">> => filename:join(PrivDir, "server.key")
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
),
|
||||||
|
|
||||||
|
%% Unable to connect with old SSL options, server's cert is signed by another CA.
|
||||||
|
?assertError(
|
||||||
|
{tls_alert, {unknown_ca, _}},
|
||||||
|
emqtt_connect_ssl(Host, Port, [
|
||||||
|
{cacertfile, filename:join(PrivDir, "ca.pem")} | ClientSSLOpts
|
||||||
|
])
|
||||||
|
),
|
||||||
|
|
||||||
|
C2 = emqtt_connect_ssl(Host, Port, [
|
||||||
|
{cacertfile, filename:join(PrivDir, "ca-next.pem")} | ClientSSLOpts
|
||||||
|
]),
|
||||||
|
|
||||||
|
%% Change the listener SSL configuration: require peer certificate.
|
||||||
|
{ok, _} = emqx:update_config(
|
||||||
|
[listeners, ssl, updated],
|
||||||
|
{update, #{
|
||||||
|
<<"ssl_options">> => #{
|
||||||
|
<<"verify">> => verify_peer,
|
||||||
|
<<"fail_if_no_peer_cert">> => true
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
),
|
||||||
|
|
||||||
|
%% Unable to connect with old SSL options, certificate is now required.
|
||||||
|
?assertExceptionOneOf(
|
||||||
|
{error, {ssl_error, _Socket, {tls_alert, {certificate_required, _}}}},
|
||||||
|
{error, closed},
|
||||||
|
emqtt_connect_ssl(Host, Port, [
|
||||||
|
{cacertfile, filename:join(PrivDir, "ca-next.pem")} | ClientSSLOpts
|
||||||
|
])
|
||||||
|
),
|
||||||
|
|
||||||
|
C3 = emqtt_connect_ssl(Host, Port, [
|
||||||
|
{cacertfile, filename:join(PrivDir, "ca-next.pem")},
|
||||||
|
{certfile, filename:join(PrivDir, "client.pem")},
|
||||||
|
{keyfile, filename:join(PrivDir, "client.key")}
|
||||||
|
| ClientSSLOpts
|
||||||
|
]),
|
||||||
|
|
||||||
|
%% Both pre- and post-update clients should be alive.
|
||||||
|
?assertEqual(pong, emqtt:ping(C1)),
|
||||||
|
?assertEqual(pong, emqtt:ping(C2)),
|
||||||
|
?assertEqual(pong, emqtt:ping(C3)),
|
||||||
|
|
||||||
|
ok = emqtt:stop(C1),
|
||||||
|
ok = emqtt:stop(C2),
|
||||||
|
ok = emqtt:stop(C3)
|
||||||
|
end).
|
||||||
|
|
||||||
|
t_wss_update_opts(Config) ->
|
||||||
|
PrivDir = ?config(priv_dir, Config),
|
||||||
|
Host = "127.0.0.1",
|
||||||
|
Port = emqx_common_test_helpers:select_free_port(ssl),
|
||||||
|
Conf = #{
|
||||||
|
<<"enable">> => true,
|
||||||
|
<<"bind">> => format_bind({Host, Port}),
|
||||||
|
<<"ssl_options">> => #{
|
||||||
|
<<"cacertfile">> => filename:join(PrivDir, "ca.pem"),
|
||||||
|
<<"certfile">> => filename:join(PrivDir, "server-password.pem"),
|
||||||
|
<<"keyfile">> => filename:join(PrivDir, "server-password.key"),
|
||||||
|
<<"password">> => ?SERVER_KEY_PASSWORD,
|
||||||
|
<<"verify">> => verify_none
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ClientSSLOpts = [
|
||||||
|
{verify, verify_peer},
|
||||||
|
{customize_hostname_check, [{match_fun, fun(_, _) -> true end}]}
|
||||||
|
],
|
||||||
|
with_listener(wss, updated, Conf, fun() ->
|
||||||
|
%% Start a client.
|
||||||
|
C1 = emqtt_connect_wss(Host, Port, [
|
||||||
|
{cacertfile, filename:join(PrivDir, "ca.pem")}
|
||||||
|
| ClientSSLOpts
|
||||||
|
]),
|
||||||
|
|
||||||
|
%% Change the listener SSL configuration.
|
||||||
|
%% 1. Another set of (password protected) cert/key files.
|
||||||
|
%% 2. Require peer certificate.
|
||||||
|
{ok, _} = emqx:update_config(
|
||||||
|
[listeners, wss, updated],
|
||||||
|
{update, #{
|
||||||
|
<<"ssl_options">> => #{
|
||||||
|
<<"cacertfile">> => filename:join(PrivDir, "ca-next.pem"),
|
||||||
|
<<"certfile">> => filename:join(PrivDir, "server.pem"),
|
||||||
|
<<"keyfile">> => filename:join(PrivDir, "server.key")
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
),
|
||||||
|
|
||||||
|
%% Unable to connect with old SSL options, server's cert is signed by another CA.
|
||||||
|
%% Due to a bug `emqtt` exits with `badmatch` in this case.
|
||||||
|
?assertExit(
|
||||||
|
_Badmatch,
|
||||||
|
emqtt_connect_wss(Host, Port, ClientSSLOpts)
|
||||||
|
),
|
||||||
|
|
||||||
|
C2 = emqtt_connect_wss(Host, Port, [
|
||||||
|
{cacertfile, filename:join(PrivDir, "ca-next.pem")}
|
||||||
|
| ClientSSLOpts
|
||||||
|
]),
|
||||||
|
|
||||||
|
%% Change the listener SSL configuration: require peer certificate.
|
||||||
|
{ok, _} = emqx:update_config(
|
||||||
|
[listeners, wss, updated],
|
||||||
|
{update, #{
|
||||||
|
<<"ssl_options">> => #{
|
||||||
|
<<"verify">> => verify_peer,
|
||||||
|
<<"fail_if_no_peer_cert">> => true
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
),
|
||||||
|
|
||||||
|
%% Unable to connect with old SSL options, certificate is now required.
|
||||||
|
%% Due to a bug `emqtt` does not instantly report that socket was closed.
|
||||||
|
?assertError(
|
||||||
|
timeout,
|
||||||
|
emqtt_connect_wss(Host, Port, [
|
||||||
|
{cacertfile, filename:join(PrivDir, "ca-next.pem")}
|
||||||
|
| ClientSSLOpts
|
||||||
|
])
|
||||||
|
),
|
||||||
|
|
||||||
|
C3 = emqtt_connect_wss(Host, Port, [
|
||||||
|
{cacertfile, filename:join(PrivDir, "ca-next.pem")},
|
||||||
|
{certfile, filename:join(PrivDir, "client.pem")},
|
||||||
|
{keyfile, filename:join(PrivDir, "client.key")}
|
||||||
|
| ClientSSLOpts
|
||||||
|
]),
|
||||||
|
|
||||||
|
%% Both pre- and post-update clients should be alive.
|
||||||
|
?assertEqual(pong, emqtt:ping(C1)),
|
||||||
|
?assertEqual(pong, emqtt:ping(C2)),
|
||||||
|
?assertEqual(pong, emqtt:ping(C3)),
|
||||||
|
|
||||||
|
ok = emqtt:stop(C1),
|
||||||
|
ok = emqtt:stop(C2),
|
||||||
|
ok = emqtt:stop(C3)
|
||||||
|
end).
|
||||||
|
|
||||||
|
with_listener(Type, Name, Config, Then) ->
|
||||||
|
{ok, _} = emqx:update_config([listeners, Type, Name], {create, Config}),
|
||||||
|
try
|
||||||
|
Then()
|
||||||
|
after
|
||||||
|
emqx:update_config([listeners, Type, Name], ?TOMBSTONE_CONFIG_CHANGE_REQ)
|
||||||
|
end.
|
||||||
|
|
||||||
|
emqtt_connect_ssl(Host, Port, SSLOpts) ->
|
||||||
|
emqtt_connect(fun emqtt:connect/1, #{
|
||||||
|
hosts => [{Host, Port}],
|
||||||
|
connect_timeout => 1,
|
||||||
|
ssl => true,
|
||||||
|
ssl_opts => SSLOpts
|
||||||
|
}).
|
||||||
|
|
||||||
|
emqtt_connect_wss(Host, Port, SSLOpts) ->
|
||||||
|
emqtt_connect(fun emqtt:ws_connect/1, #{
|
||||||
|
hosts => [{Host, Port}],
|
||||||
|
connect_timeout => 1,
|
||||||
|
ws_transport_options => [
|
||||||
|
{protocols, [http]},
|
||||||
|
{transport, tls},
|
||||||
|
{tls_opts, SSLOpts}
|
||||||
|
]
|
||||||
|
}).
|
||||||
|
|
||||||
|
emqtt_connect(Connect, Opts) ->
|
||||||
|
case emqtt:start_link(Opts) of
|
||||||
|
{ok, Client} ->
|
||||||
|
true = erlang:unlink(Client),
|
||||||
|
case Connect(Client) of
|
||||||
|
{ok, _} -> Client;
|
||||||
|
{error, Reason} -> error(Reason, [Opts])
|
||||||
|
end;
|
||||||
|
{error, Reason} ->
|
||||||
|
error(Reason, [Opts])
|
||||||
|
end.
|
||||||
|
|
||||||
t_format_bind(_) ->
|
t_format_bind(_) ->
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
|
@ -266,67 +428,15 @@ t_format_bind(_) ->
|
||||||
lists:flatten(emqx_listeners:format_bind(":1883"))
|
lists:flatten(emqx_listeners:format_bind(":1883"))
|
||||||
).
|
).
|
||||||
|
|
||||||
render_config_file() ->
|
|
||||||
Path = local_path(["etc", "emqx.conf"]),
|
|
||||||
{ok, Temp} = file:read_file(Path),
|
|
||||||
Vars0 = mustache_vars(),
|
|
||||||
Vars = [{atom_to_list(N), iolist_to_binary(V)} || {N, V} <- Vars0],
|
|
||||||
Targ = bbmustache:render(Temp, Vars),
|
|
||||||
NewName = Path ++ ".rendered",
|
|
||||||
ok = file:write_file(NewName, Targ),
|
|
||||||
NewName.
|
|
||||||
|
|
||||||
mustache_vars() ->
|
|
||||||
[
|
|
||||||
{platform_data_dir, local_path(["data"])},
|
|
||||||
{platform_etc_dir, local_path(["etc"])}
|
|
||||||
].
|
|
||||||
|
|
||||||
generate_config() ->
|
|
||||||
ConfFile = render_config_file(),
|
|
||||||
{ok, Conf} = hocon:load(ConfFile, #{format => richmap}),
|
|
||||||
hocon_tconf:generate(emqx_schema, Conf).
|
|
||||||
|
|
||||||
set_app_env({App, Lists}) ->
|
|
||||||
lists:foreach(
|
|
||||||
fun
|
|
||||||
({authz_file, _Var}) ->
|
|
||||||
application:set_env(App, authz_file, local_path(["etc", "authz.conf"]));
|
|
||||||
({Par, Var}) ->
|
|
||||||
application:set_env(App, Par, Var)
|
|
||||||
end,
|
|
||||||
Lists
|
|
||||||
).
|
|
||||||
|
|
||||||
local_path(Components, Module) ->
|
|
||||||
filename:join([get_base_dir(Module) | Components]).
|
|
||||||
|
|
||||||
local_path(Components) ->
|
|
||||||
local_path(Components, ?MODULE).
|
|
||||||
|
|
||||||
get_base_dir(Module) ->
|
|
||||||
{file, Here} = code:is_loaded(Module),
|
|
||||||
filename:dirname(filename:dirname(Here)).
|
|
||||||
|
|
||||||
get_base_dir() ->
|
|
||||||
get_base_dir(?MODULE).
|
|
||||||
|
|
||||||
remove_default_limiter(Listeners) ->
|
|
||||||
maps:map(
|
|
||||||
fun(_, X) ->
|
|
||||||
maps:map(
|
|
||||||
fun(_, E) ->
|
|
||||||
maps:remove(limiter, E)
|
|
||||||
end,
|
|
||||||
X
|
|
||||||
)
|
|
||||||
end,
|
|
||||||
Listeners
|
|
||||||
).
|
|
||||||
|
|
||||||
generate_tls_certs(Config) ->
|
generate_tls_certs(Config) ->
|
||||||
DataDir = ?config(data_dir, Config),
|
PrivDir = ?config(priv_dir, Config),
|
||||||
emqx_common_test_helpers:gen_ca(DataDir, "ca"),
|
emqx_common_test_helpers:gen_ca(PrivDir, "ca"),
|
||||||
emqx_common_test_helpers:gen_host_cert("server-password", "ca", DataDir, #{
|
emqx_common_test_helpers:gen_ca(PrivDir, "ca-next"),
|
||||||
|
emqx_common_test_helpers:gen_host_cert("server", "ca-next", PrivDir, #{}),
|
||||||
|
emqx_common_test_helpers:gen_host_cert("client", "ca-next", PrivDir, #{}),
|
||||||
|
emqx_common_test_helpers:gen_host_cert("server-password", "ca", PrivDir, #{
|
||||||
password => ?SERVER_KEY_PASSWORD
|
password => ?SERVER_KEY_PASSWORD
|
||||||
}).
|
}).
|
||||||
|
|
||||||
|
format_bind(Bind) ->
|
||||||
|
iolist_to_binary(emqx_listeners:format_bind(Bind)).
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
action_type_to_connector_type/1,
|
action_type_to_connector_type/1,
|
||||||
action_type_to_bridge_v1_type/2,
|
action_type_to_bridge_v1_type/2,
|
||||||
bridge_v1_type_to_action_type/1,
|
bridge_v1_type_to_action_type/1,
|
||||||
|
bridge_v1_type_name/1,
|
||||||
is_action_type/1,
|
is_action_type/1,
|
||||||
registered_schema_modules/0,
|
registered_schema_modules/0,
|
||||||
connector_action_config_to_bridge_v1_config/2,
|
connector_action_config_to_bridge_v1_config/2,
|
||||||
|
@ -144,6 +145,20 @@ get_confs(ActionType, #{<<"connector">> := ConnectorName} = ActionConfig) ->
|
||||||
get_confs(_, _) ->
|
get_confs(_, _) ->
|
||||||
undefined.
|
undefined.
|
||||||
|
|
||||||
|
%% We need this hack because of the bugs introduced by associating v2/action/source types
|
||||||
|
%% with v1 types unconditionally, like `mongodb' being a "valid" V1 bridge type, or
|
||||||
|
%% `confluent_producer', which has no v1 equivalent....
|
||||||
|
bridge_v1_type_name(ActionTypeBin) when is_binary(ActionTypeBin) ->
|
||||||
|
bridge_v1_type_name(binary_to_existing_atom(ActionTypeBin));
|
||||||
|
bridge_v1_type_name(ActionType) ->
|
||||||
|
Module = get_action_info_module(ActionType),
|
||||||
|
case erlang:function_exported(Module, bridge_v1_type_name, 0) of
|
||||||
|
true ->
|
||||||
|
{ok, Module:bridge_v1_type_name()};
|
||||||
|
false ->
|
||||||
|
{error, no_v1_equivalent}
|
||||||
|
end.
|
||||||
|
|
||||||
%% This function should return true for all inputs that are bridge V1 types for
|
%% This function should return true for all inputs that are bridge V1 types for
|
||||||
%% bridges that have been refactored to bridge V2s, and for all all bridge V2
|
%% bridges that have been refactored to bridge V2s, and for all all bridge V2
|
||||||
%% types. For everything else the function should return false.
|
%% types. For everything else the function should return false.
|
||||||
|
|
|
@ -621,6 +621,7 @@ lookup_from_all_nodes(BridgeType, BridgeName, SuccCode) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
lookup_from_local_node(ActionType, ActionName) ->
|
lookup_from_local_node(ActionType, ActionName) ->
|
||||||
|
%% TODO: BUG: shouldn't accept an action type here, only V1 types....
|
||||||
case emqx_bridge:lookup(ActionType, ActionName) of
|
case emqx_bridge:lookup(ActionType, ActionName) of
|
||||||
{ok, Res} -> {ok, format_resource(Res, node())};
|
{ok, Res} -> {ok, format_resource(Res, node())};
|
||||||
Error -> Error
|
Error -> Error
|
||||||
|
|
|
@ -416,7 +416,7 @@ uninstall_bridge_v2(
|
||||||
{error, _} ->
|
{error, _} ->
|
||||||
ok;
|
ok;
|
||||||
ok ->
|
ok ->
|
||||||
%% Deinstall from connector
|
%% uninstall from connector
|
||||||
ConnectorId = emqx_connector_resource:resource_id(
|
ConnectorId = emqx_connector_resource:resource_id(
|
||||||
connector_type(BridgeV2Type), ConnectorName
|
connector_type(BridgeV2Type), ConnectorName
|
||||||
),
|
),
|
||||||
|
@ -869,6 +869,8 @@ config_key_path() ->
|
||||||
config_key_path_leaf() ->
|
config_key_path_leaf() ->
|
||||||
[?ROOT_KEY, '?', '?'].
|
[?ROOT_KEY, '?', '?'].
|
||||||
|
|
||||||
|
pre_config_update(_, {force_update, Conf}, _OldConf) ->
|
||||||
|
{ok, Conf};
|
||||||
%% NOTE: We depend on the `emqx_bridge:pre_config_update/3` to restart/stop the
|
%% NOTE: We depend on the `emqx_bridge:pre_config_update/3` to restart/stop the
|
||||||
%% underlying resources.
|
%% underlying resources.
|
||||||
pre_config_update(_, {_Oper, _, _}, undefined) ->
|
pre_config_update(_, {_Oper, _, _}, undefined) ->
|
||||||
|
@ -882,55 +884,15 @@ pre_config_update(_Path, Conf, _OldConfig) when is_map(Conf) ->
|
||||||
operation_to_enable(disable) -> false;
|
operation_to_enable(disable) -> false;
|
||||||
operation_to_enable(enable) -> true.
|
operation_to_enable(enable) -> true.
|
||||||
|
|
||||||
|
%% A public API that can trigger this is:
|
||||||
|
%% bin/emqx ctl conf load data/configs/cluster.hocon
|
||||||
|
post_config_update([?ROOT_KEY], {force_update, _Req}, NewConf, OldConf, _AppEnv) ->
|
||||||
|
do_post_config_update(NewConf, OldConf, #{validate_referenced_connectors => false});
|
||||||
%% This top level handler will be triggered when the actions path is updated
|
%% This top level handler will be triggered when the actions path is updated
|
||||||
%% with calls to emqx_conf:update([actions], BridgesConf, #{}).
|
%% with calls to emqx_conf:update([actions], BridgesConf, #{}).
|
||||||
%%
|
%%
|
||||||
%% A public API that can trigger this is:
|
|
||||||
%% bin/emqx ctl conf load data/configs/cluster.hocon
|
|
||||||
post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) ->
|
post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) ->
|
||||||
#{added := Added, removed := Removed, changed := Updated} =
|
do_post_config_update(NewConf, OldConf, #{validate_referenced_connectors => true});
|
||||||
diff_confs(NewConf, OldConf),
|
|
||||||
%% new and updated bridges must have their connector references validated
|
|
||||||
UpdatedConfigs =
|
|
||||||
lists:map(
|
|
||||||
fun({{Type, BridgeName}, {_Old, New}}) ->
|
|
||||||
{Type, BridgeName, New}
|
|
||||||
end,
|
|
||||||
maps:to_list(Updated)
|
|
||||||
),
|
|
||||||
AddedConfigs =
|
|
||||||
lists:map(
|
|
||||||
fun({{Type, BridgeName}, AddedConf}) ->
|
|
||||||
{Type, BridgeName, AddedConf}
|
|
||||||
end,
|
|
||||||
maps:to_list(Added)
|
|
||||||
),
|
|
||||||
ToValidate = UpdatedConfigs ++ AddedConfigs,
|
|
||||||
case multi_validate_referenced_connectors(ToValidate) of
|
|
||||||
ok ->
|
|
||||||
%% The config update will be failed if any task in `perform_bridge_changes` failed.
|
|
||||||
RemoveFun = fun uninstall_bridge_v2/3,
|
|
||||||
CreateFun = fun install_bridge_v2/3,
|
|
||||||
UpdateFun = fun(Type, Name, {OldBridgeConf, Conf}) ->
|
|
||||||
uninstall_bridge_v2(Type, Name, OldBridgeConf),
|
|
||||||
install_bridge_v2(Type, Name, Conf)
|
|
||||||
end,
|
|
||||||
Result = perform_bridge_changes([
|
|
||||||
#{action => RemoveFun, data => Removed},
|
|
||||||
#{
|
|
||||||
action => CreateFun,
|
|
||||||
data => Added,
|
|
||||||
on_exception_fn => fun emqx_bridge_resource:remove/4
|
|
||||||
},
|
|
||||||
#{action => UpdateFun, data => Updated}
|
|
||||||
]),
|
|
||||||
ok = unload_message_publish_hook(),
|
|
||||||
ok = load_message_publish_hook(NewConf),
|
|
||||||
?tp(bridge_post_config_update_done, #{}),
|
|
||||||
Result;
|
|
||||||
{error, Error} ->
|
|
||||||
{error, Error}
|
|
||||||
end;
|
|
||||||
post_config_update([?ROOT_KEY, BridgeType, BridgeName], '$remove', _, _OldConf, _AppEnvs) ->
|
post_config_update([?ROOT_KEY, BridgeType, BridgeName], '$remove', _, _OldConf, _AppEnvs) ->
|
||||||
Conf = emqx:get_config([?ROOT_KEY, BridgeType, BridgeName]),
|
Conf = emqx:get_config([?ROOT_KEY, BridgeType, BridgeName]),
|
||||||
ok = uninstall_bridge_v2(BridgeType, BridgeName, Conf),
|
ok = uninstall_bridge_v2(BridgeType, BridgeName, Conf),
|
||||||
|
@ -970,6 +932,50 @@ post_config_update([?ROOT_KEY, BridgeType, BridgeName], _Req, NewConf, OldConf,
|
||||||
{error, Error}
|
{error, Error}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
do_post_config_update(NewConf, OldConf, #{validate_referenced_connectors := NeedValidate}) ->
|
||||||
|
#{added := Added, removed := Removed, changed := Updated} =
|
||||||
|
diff_confs(NewConf, OldConf),
|
||||||
|
UpdatedConfigs =
|
||||||
|
lists:map(
|
||||||
|
fun({{Type, BridgeName}, {_Old, New}}) ->
|
||||||
|
{Type, BridgeName, New}
|
||||||
|
end,
|
||||||
|
maps:to_list(Updated)
|
||||||
|
),
|
||||||
|
AddedConfigs =
|
||||||
|
lists:map(
|
||||||
|
fun({{Type, BridgeName}, AddedConf}) ->
|
||||||
|
{Type, BridgeName, AddedConf}
|
||||||
|
end,
|
||||||
|
maps:to_list(Added)
|
||||||
|
),
|
||||||
|
ToValidate = UpdatedConfigs ++ AddedConfigs,
|
||||||
|
case multi_validate_referenced_connectors(NeedValidate, ToValidate) of
|
||||||
|
ok ->
|
||||||
|
%% The config update will be failed if any task in `perform_bridge_changes` failed.
|
||||||
|
RemoveFun = fun uninstall_bridge_v2/3,
|
||||||
|
CreateFun = fun install_bridge_v2/3,
|
||||||
|
UpdateFun = fun(Type, Name, {OldBridgeConf, Conf}) ->
|
||||||
|
uninstall_bridge_v2(Type, Name, OldBridgeConf),
|
||||||
|
install_bridge_v2(Type, Name, Conf)
|
||||||
|
end,
|
||||||
|
Result = perform_bridge_changes([
|
||||||
|
#{action => RemoveFun, data => Removed},
|
||||||
|
#{
|
||||||
|
action => CreateFun,
|
||||||
|
data => Added,
|
||||||
|
on_exception_fn => fun emqx_bridge_resource:remove/4
|
||||||
|
},
|
||||||
|
#{action => UpdateFun, data => Updated}
|
||||||
|
]),
|
||||||
|
ok = unload_message_publish_hook(),
|
||||||
|
ok = load_message_publish_hook(NewConf),
|
||||||
|
?tp(bridge_post_config_update_done, #{}),
|
||||||
|
Result;
|
||||||
|
{error, Error} ->
|
||||||
|
{error, Error}
|
||||||
|
end.
|
||||||
|
|
||||||
diff_confs(NewConfs, OldConfs) ->
|
diff_confs(NewConfs, OldConfs) ->
|
||||||
emqx_utils_maps:diff_maps(
|
emqx_utils_maps:diff_maps(
|
||||||
flatten_confs(NewConfs),
|
flatten_confs(NewConfs),
|
||||||
|
@ -1086,7 +1092,8 @@ bridge_v1_lookup_and_transform(ActionType, Name) ->
|
||||||
case lookup(ActionType, Name) of
|
case lookup(ActionType, Name) of
|
||||||
{ok, #{raw_config := #{<<"connector">> := ConnectorName} = RawConfig} = ActionConfig} ->
|
{ok, #{raw_config := #{<<"connector">> := ConnectorName} = RawConfig} = ActionConfig} ->
|
||||||
BridgeV1Type = ?MODULE:bridge_v2_type_to_bridge_v1_type(ActionType, RawConfig),
|
BridgeV1Type = ?MODULE:bridge_v2_type_to_bridge_v1_type(ActionType, RawConfig),
|
||||||
case ?MODULE:bridge_v1_is_valid(BridgeV1Type, Name) of
|
HasBridgeV1Equivalent = has_bridge_v1_equivalent(ActionType),
|
||||||
|
case HasBridgeV1Equivalent andalso ?MODULE:bridge_v1_is_valid(BridgeV1Type, Name) of
|
||||||
true ->
|
true ->
|
||||||
ConnectorType = connector_type(ActionType),
|
ConnectorType = connector_type(ActionType),
|
||||||
case emqx_connector:lookup(ConnectorType, ConnectorName) of
|
case emqx_connector:lookup(ConnectorType, ConnectorName) of
|
||||||
|
@ -1112,6 +1119,12 @@ bridge_v1_lookup_and_transform(ActionType, Name) ->
|
||||||
not_bridge_v1_compatible_error() ->
|
not_bridge_v1_compatible_error() ->
|
||||||
{error, not_bridge_v1_compatible}.
|
{error, not_bridge_v1_compatible}.
|
||||||
|
|
||||||
|
has_bridge_v1_equivalent(ActionType) ->
|
||||||
|
case emqx_action_info:bridge_v1_type_name(ActionType) of
|
||||||
|
{ok, _} -> true;
|
||||||
|
{error, no_v1_equivalent} -> false
|
||||||
|
end.
|
||||||
|
|
||||||
connector_raw_config(Connector, ConnectorType) ->
|
connector_raw_config(Connector, ConnectorType) ->
|
||||||
get_raw_with_defaults(Connector, ConnectorType, <<"connectors">>, emqx_connector_schema).
|
get_raw_with_defaults(Connector, ConnectorType, <<"connectors">>, emqx_connector_schema).
|
||||||
|
|
||||||
|
@ -1593,7 +1606,9 @@ to_connector(ConnectorNameBin, BridgeType) ->
|
||||||
throw(not_found)
|
throw(not_found)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
multi_validate_referenced_connectors(Configs) ->
|
multi_validate_referenced_connectors(false, _Configs) ->
|
||||||
|
ok;
|
||||||
|
multi_validate_referenced_connectors(true, Configs) ->
|
||||||
Pipeline =
|
Pipeline =
|
||||||
lists:map(
|
lists:map(
|
||||||
fun({Type, BridgeName, #{connector := ConnectorName}}) ->
|
fun({Type, BridgeName, #{connector := ConnectorName}}) ->
|
||||||
|
|
|
@ -136,6 +136,9 @@ setup_mocks() ->
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
|
|
||||||
|
catch meck:new(emqx_action_info, MeckOpts),
|
||||||
|
meck:expect(emqx_action_info, bridge_v1_type_name, 1, {ok, bridge_type()}),
|
||||||
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
con_mod() ->
|
con_mod() ->
|
||||||
|
|
|
@ -343,6 +343,42 @@ probe_bridge_api(BridgeType, BridgeName, BridgeConfig) ->
|
||||||
ct:pal("bridge probe result: ~p", [Res]),
|
ct:pal("bridge probe result: ~p", [Res]),
|
||||||
Res.
|
Res.
|
||||||
|
|
||||||
|
list_bridges_http_api_v1() ->
|
||||||
|
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
|
||||||
|
ct:pal("list bridges (http v1)"),
|
||||||
|
Res = request(get, Path, _Params = []),
|
||||||
|
ct:pal("list bridges (http v1) result:\n ~p", [Res]),
|
||||||
|
Res.
|
||||||
|
|
||||||
|
list_actions_http_api() ->
|
||||||
|
Path = emqx_mgmt_api_test_util:api_path(["actions"]),
|
||||||
|
ct:pal("list actions (http v2)"),
|
||||||
|
Res = request(get, Path, _Params = []),
|
||||||
|
ct:pal("list actions (http v2) result:\n ~p", [Res]),
|
||||||
|
Res.
|
||||||
|
|
||||||
|
list_connectors_http_api() ->
|
||||||
|
Path = emqx_mgmt_api_test_util:api_path(["connectors"]),
|
||||||
|
ct:pal("list connectors"),
|
||||||
|
Res = request(get, Path, _Params = []),
|
||||||
|
ct:pal("list connectors result:\n ~p", [Res]),
|
||||||
|
Res.
|
||||||
|
|
||||||
|
update_rule_http(RuleId, Params) ->
|
||||||
|
Path = emqx_mgmt_api_test_util:api_path(["rules", RuleId]),
|
||||||
|
ct:pal("update rule ~p:\n ~p", [RuleId, Params]),
|
||||||
|
Res = request(put, Path, Params),
|
||||||
|
ct:pal("update rule ~p result:\n ~p", [RuleId, Res]),
|
||||||
|
Res.
|
||||||
|
|
||||||
|
enable_rule_http(RuleId) ->
|
||||||
|
Params = #{<<"enable">> => true},
|
||||||
|
update_rule_http(RuleId, Params).
|
||||||
|
|
||||||
|
is_rule_enabled(RuleId) ->
|
||||||
|
{ok, #{enable := Enable}} = emqx_rule_engine:get_rule(RuleId),
|
||||||
|
Enable.
|
||||||
|
|
||||||
try_decode_error(Body0) ->
|
try_decode_error(Body0) ->
|
||||||
case emqx_utils_json:safe_decode(Body0, [return_maps]) of
|
case emqx_utils_json:safe_decode(Body0, [return_maps]) of
|
||||||
{ok, #{<<"message">> := Msg0} = Body1} ->
|
{ok, #{<<"message">> := Msg0} = Body1} ->
|
||||||
|
|
|
@ -10,8 +10,8 @@
|
||||||
-include_lib("common_test/include/ct.hrl").
|
-include_lib("common_test/include/ct.hrl").
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
-define(BRIDGE_TYPE, confluent_producer).
|
-define(ACTION_TYPE, confluent_producer).
|
||||||
-define(BRIDGE_TYPE_BIN, <<"confluent_producer">>).
|
-define(ACTION_TYPE_BIN, <<"confluent_producer">>).
|
||||||
-define(CONNECTOR_TYPE, confluent_producer).
|
-define(CONNECTOR_TYPE, confluent_producer).
|
||||||
-define(CONNECTOR_TYPE_BIN, <<"confluent_producer">>).
|
-define(CONNECTOR_TYPE_BIN, <<"confluent_producer">>).
|
||||||
-define(KAFKA_BRIDGE_TYPE, kafka_producer).
|
-define(KAFKA_BRIDGE_TYPE, kafka_producer).
|
||||||
|
@ -93,7 +93,7 @@ common_init_per_testcase(TestCase, Config) ->
|
||||||
{connector_type, ?CONNECTOR_TYPE},
|
{connector_type, ?CONNECTOR_TYPE},
|
||||||
{connector_name, Name},
|
{connector_name, Name},
|
||||||
{connector_config, ConnectorConfig},
|
{connector_config, ConnectorConfig},
|
||||||
{bridge_type, ?BRIDGE_TYPE},
|
{bridge_type, ?ACTION_TYPE},
|
||||||
{bridge_name, Name},
|
{bridge_name, Name},
|
||||||
{bridge_config, BridgeConfig}
|
{bridge_config, BridgeConfig}
|
||||||
| Config
|
| Config
|
||||||
|
@ -212,7 +212,7 @@ serde_roundtrip(InnerConfigMap0) ->
|
||||||
InnerConfigMap.
|
InnerConfigMap.
|
||||||
|
|
||||||
parse_and_check_bridge_config(InnerConfigMap, Name) ->
|
parse_and_check_bridge_config(InnerConfigMap, Name) ->
|
||||||
TypeBin = ?BRIDGE_TYPE_BIN,
|
TypeBin = ?ACTION_TYPE_BIN,
|
||||||
RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}},
|
RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}},
|
||||||
hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}),
|
hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}),
|
||||||
InnerConfigMap.
|
InnerConfigMap.
|
||||||
|
@ -341,3 +341,43 @@ t_same_name_confluent_kafka_bridges(Config) ->
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
t_list_v1_bridges(Config) ->
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
{ok, _} = emqx_bridge_v2_testlib:create_bridge_api(Config),
|
||||||
|
|
||||||
|
?assertMatch(
|
||||||
|
{error, no_v1_equivalent},
|
||||||
|
emqx_action_info:bridge_v1_type_name(confluent_producer)
|
||||||
|
),
|
||||||
|
|
||||||
|
?assertMatch(
|
||||||
|
{ok, {{_, 200, _}, _, []}}, emqx_bridge_v2_testlib:list_bridges_http_api_v1()
|
||||||
|
),
|
||||||
|
?assertMatch(
|
||||||
|
{ok, {{_, 200, _}, _, [_]}}, emqx_bridge_v2_testlib:list_actions_http_api()
|
||||||
|
),
|
||||||
|
?assertMatch(
|
||||||
|
{ok, {{_, 200, _}, _, [_]}}, emqx_bridge_v2_testlib:list_connectors_http_api()
|
||||||
|
),
|
||||||
|
|
||||||
|
RuleTopic = <<"t/c">>,
|
||||||
|
{ok, #{<<"id">> := RuleId0}} =
|
||||||
|
emqx_bridge_v2_testlib:create_rule_and_action_http(
|
||||||
|
?ACTION_TYPE_BIN,
|
||||||
|
RuleTopic,
|
||||||
|
Config,
|
||||||
|
#{overrides => #{enable => true}}
|
||||||
|
),
|
||||||
|
?assert(emqx_bridge_v2_testlib:is_rule_enabled(RuleId0)),
|
||||||
|
?assertMatch(
|
||||||
|
{ok, {{_, 200, _}, _, _}}, emqx_bridge_v2_testlib:enable_rule_http(RuleId0)
|
||||||
|
),
|
||||||
|
?assert(emqx_bridge_v2_testlib:is_rule_enabled(RuleId0)),
|
||||||
|
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
[]
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
|
@ -566,6 +566,7 @@ do_acknowledge(State0) ->
|
||||||
Path = path(State1, ack),
|
Path = path(State1, ack),
|
||||||
Body = body(State1, ack, #{ack_ids => AckIds}),
|
Body = body(State1, ack, #{ack_ids => AckIds}),
|
||||||
PreparedRequest = {prepared_request, {Method, Path, Body}},
|
PreparedRequest = {prepared_request, {Method, Path, Body}},
|
||||||
|
?tp(gcp_pubsub_consumer_worker_will_acknowledge, #{acks => PendingAcks}),
|
||||||
Res = emqx_bridge_gcp_pubsub_client:query_sync(PreparedRequest, Client),
|
Res = emqx_bridge_gcp_pubsub_client:query_sync(PreparedRequest, Client),
|
||||||
case Res of
|
case Res of
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
|
|
|
@ -706,7 +706,9 @@ prop_all_pulled_are_acked(Trace) ->
|
||||||
|| #{messages := Msgs} <- ?of_kind(gcp_pubsub_consumer_worker_decoded_messages, Trace),
|
|| #{messages := Msgs} <- ?of_kind(gcp_pubsub_consumer_worker_decoded_messages, Trace),
|
||||||
#{<<"message">> := #{<<"messageId">> := MsgId}} <- Msgs
|
#{<<"message">> := #{<<"messageId">> := MsgId}} <- Msgs
|
||||||
],
|
],
|
||||||
AckedMsgIds0 = ?projection(acks, ?of_kind(gcp_pubsub_consumer_worker_acknowledged, Trace)),
|
%% we just need to check that it _tries_ to ack each id; the result itself doesn't
|
||||||
|
%% matter, as it might timeout.
|
||||||
|
AckedMsgIds0 = ?projection(acks, ?of_kind(gcp_pubsub_consumer_worker_will_acknowledge, Trace)),
|
||||||
AckedMsgIds1 = [
|
AckedMsgIds1 = [
|
||||||
MsgId
|
MsgId
|
||||||
|| PendingAcks <- AckedMsgIds0, {MsgId, _AckId} <- maps:to_list(PendingAcks)
|
|| PendingAcks <- AckedMsgIds0, {MsgId, _AckId} <- maps:to_list(PendingAcks)
|
||||||
|
@ -1172,7 +1174,12 @@ t_multiple_topic_mappings(Config) ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
{{ok, _}, {ok, _}},
|
{{ok, _}, {ok, _}},
|
||||||
?wait_async_action(
|
?wait_async_action(
|
||||||
create_bridge(Config),
|
create_bridge(
|
||||||
|
Config,
|
||||||
|
#{
|
||||||
|
<<"consumer">> => #{<<"ack_deadline">> => <<"10m">>}
|
||||||
|
}
|
||||||
|
),
|
||||||
#{?snk_kind := "gcp_pubsub_consumer_worker_subscription_ready"},
|
#{?snk_kind := "gcp_pubsub_consumer_worker_subscription_ready"},
|
||||||
40_000
|
40_000
|
||||||
)
|
)
|
||||||
|
@ -1233,7 +1240,7 @@ t_multiple_topic_mappings(Config) ->
|
||||||
],
|
],
|
||||||
Published
|
Published
|
||||||
),
|
),
|
||||||
wait_acked(#{n => 2}),
|
?block_until(#{?snk_kind := gcp_pubsub_consumer_worker_acknowledged}, 20_000),
|
||||||
?retry(
|
?retry(
|
||||||
_Interval = 200,
|
_Interval = 200,
|
||||||
_NAttempts = 20,
|
_NAttempts = 20,
|
||||||
|
@ -1275,10 +1282,6 @@ t_multiple_pull_workers(Config) ->
|
||||||
<<"ack_deadline">> => <<"10m">>,
|
<<"ack_deadline">> => <<"10m">>,
|
||||||
<<"ack_retry_interval">> => <<"1s">>,
|
<<"ack_retry_interval">> => <<"1s">>,
|
||||||
<<"consumer_workers_per_topic">> => NConsumers
|
<<"consumer_workers_per_topic">> => NConsumers
|
||||||
},
|
|
||||||
<<"resource_opts">> => #{
|
|
||||||
%% reduce flakiness
|
|
||||||
<<"request_ttl">> => <<"20s">>
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
|
|
|
@ -101,7 +101,14 @@ namespace() -> "bridge_redis".
|
||||||
roots() -> [].
|
roots() -> [].
|
||||||
|
|
||||||
fields(action_parameters) ->
|
fields(action_parameters) ->
|
||||||
[{command_template, fun command_template/1}];
|
[
|
||||||
|
command_template(),
|
||||||
|
{redis_type,
|
||||||
|
?HOCON(
|
||||||
|
?ENUM([single, sentinel, cluster]),
|
||||||
|
#{required => true, desc => ?DESC(redis_type)}
|
||||||
|
)}
|
||||||
|
];
|
||||||
fields("post_single") ->
|
fields("post_single") ->
|
||||||
method_fields(post, redis_single);
|
method_fields(post, redis_single);
|
||||||
fields("post_sentinel") ->
|
fields("post_sentinel") ->
|
||||||
|
@ -147,8 +154,8 @@ method_fields(put, ConnectorType) ->
|
||||||
redis_bridge_common_fields(Type) ->
|
redis_bridge_common_fields(Type) ->
|
||||||
emqx_bridge_schema:common_bridge_fields() ++
|
emqx_bridge_schema:common_bridge_fields() ++
|
||||||
[
|
[
|
||||||
{local_topic, mk(binary(), #{required => false, desc => ?DESC("desc_local_topic")})}
|
{local_topic, mk(binary(), #{required => false, desc => ?DESC("desc_local_topic")})},
|
||||||
| fields(action_parameters)
|
command_template()
|
||||||
] ++
|
] ++
|
||||||
v1_resource_fields(Type).
|
v1_resource_fields(Type).
|
||||||
|
|
||||||
|
@ -222,3 +229,6 @@ is_command_template_valid(CommandSegments) ->
|
||||||
"the value of the field 'command_template' should be a nonempty "
|
"the value of the field 'command_template' should be a nonempty "
|
||||||
"list of strings (templates for Redis command and arguments)"}
|
"list of strings (templates for Redis command and arguments)"}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
command_template() ->
|
||||||
|
{command_template, fun command_template/1}.
|
||||||
|
|
|
@ -11,8 +11,8 @@
|
||||||
action_type_name/0,
|
action_type_name/0,
|
||||||
connector_type_name/0,
|
connector_type_name/0,
|
||||||
schema_module/0,
|
schema_module/0,
|
||||||
bridge_v1_config_to_action_config/2,
|
|
||||||
connector_action_config_to_bridge_v1_config/2,
|
connector_action_config_to_bridge_v1_config/2,
|
||||||
|
bridge_v1_config_to_action_config/2,
|
||||||
bridge_v1_config_to_connector_config/1,
|
bridge_v1_config_to_connector_config/1,
|
||||||
bridge_v1_type_name_fun/1
|
bridge_v1_type_name_fun/1
|
||||||
]).
|
]).
|
||||||
|
@ -28,14 +28,25 @@ connector_type_name() -> redis.
|
||||||
|
|
||||||
schema_module() -> ?SCHEMA_MODULE.
|
schema_module() -> ?SCHEMA_MODULE.
|
||||||
|
|
||||||
|
%% redis_cluster don't have batch options
|
||||||
connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) ->
|
connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) ->
|
||||||
maps:merge(
|
Config0 = emqx_utils_maps:deep_merge(
|
||||||
maps:without(
|
maps:without(
|
||||||
[<<"connector">>],
|
[<<"connector">>],
|
||||||
map_unindent(<<"parameters">>, ActionConfig)
|
emqx_utils_maps:unindent(<<"parameters">>, ActionConfig)
|
||||||
),
|
),
|
||||||
map_unindent(<<"parameters">>, ConnectorConfig)
|
emqx_utils_maps:unindent(<<"parameters">>, ConnectorConfig)
|
||||||
).
|
),
|
||||||
|
Config1 =
|
||||||
|
case Config0 of
|
||||||
|
#{<<"resource_opts">> := ResOpts0, <<"redis_type">> := Type} ->
|
||||||
|
Schema = emqx_bridge_redis:fields("creation_opts_redis_" ++ binary_to_list(Type)),
|
||||||
|
ResOpts = maps:with(schema_keys(Schema), ResOpts0),
|
||||||
|
Config0#{<<"resource_opts">> => ResOpts};
|
||||||
|
_ ->
|
||||||
|
Config0
|
||||||
|
end,
|
||||||
|
maps:without([<<"description">>], Config1).
|
||||||
|
|
||||||
bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) ->
|
bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) ->
|
||||||
ActionTopLevelKeys = schema_keys(?SCHEMA_MODULE:fields(redis_action)),
|
ActionTopLevelKeys = schema_keys(?SCHEMA_MODULE:fields(redis_action)),
|
||||||
|
@ -81,22 +92,9 @@ v1_type(<<"cluster">>) -> redis_cluster.
|
||||||
|
|
||||||
bridge_v1_type_names() -> [redis_single, redis_sentinel, redis_cluster].
|
bridge_v1_type_names() -> [redis_single, redis_sentinel, redis_cluster].
|
||||||
|
|
||||||
map_unindent(Key, Map) ->
|
|
||||||
maps:merge(
|
|
||||||
maps:get(Key, Map),
|
|
||||||
maps:remove(Key, Map)
|
|
||||||
).
|
|
||||||
|
|
||||||
map_indent(IndentKey, PickKeys, Map) ->
|
|
||||||
maps:put(
|
|
||||||
IndentKey,
|
|
||||||
maps:with(PickKeys, Map),
|
|
||||||
maps:without(PickKeys, Map)
|
|
||||||
).
|
|
||||||
|
|
||||||
schema_keys(Schema) ->
|
schema_keys(Schema) ->
|
||||||
[bin(Key) || {Key, _} <- Schema].
|
[bin(Key) || {Key, _} <- Schema].
|
||||||
|
|
||||||
make_config_map(PickKeys, IndentKeys, Config) ->
|
make_config_map(PickKeys, IndentKeys, Config) ->
|
||||||
Conf0 = maps:with(PickKeys, Config),
|
Conf0 = maps:with(PickKeys, Config),
|
||||||
map_indent(<<"parameters">>, IndentKeys, Conf0).
|
emqx_utils_maps:indent(<<"parameters">>, IndentKeys, Conf0).
|
||||||
|
|
|
@ -76,13 +76,7 @@ fields(redis_action) ->
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
[ResOpts] = emqx_connector_schema:resource_opts_ref(?MODULE, action_resource_opts),
|
[ResOpts] = emqx_connector_schema:resource_opts_ref(?MODULE, action_resource_opts),
|
||||||
RedisType =
|
lists:keyreplace(resource_opts, 1, Schema, ResOpts);
|
||||||
{redis_type,
|
|
||||||
?HOCON(
|
|
||||||
?ENUM([single, sentinel, cluster]),
|
|
||||||
#{required => true, desc => ?DESC(redis_type)}
|
|
||||||
)},
|
|
||||||
[RedisType | lists:keyreplace(resource_opts, 1, Schema, ResOpts)];
|
|
||||||
fields(action_resource_opts) ->
|
fields(action_resource_opts) ->
|
||||||
emqx_bridge_v2_schema:resource_opts_fields([
|
emqx_bridge_v2_schema:resource_opts_fields([
|
||||||
{batch_size, #{desc => ?DESC(batch_size)}},
|
{batch_size, #{desc => ?DESC(batch_size)}},
|
||||||
|
@ -130,7 +124,7 @@ resource_opts_converter(Conf, _Opts) ->
|
||||||
maps:map(
|
maps:map(
|
||||||
fun(_Name, SubConf) ->
|
fun(_Name, SubConf) ->
|
||||||
case SubConf of
|
case SubConf of
|
||||||
#{<<"redis_type">> := <<"cluster">>} ->
|
#{<<"parameters">> := #{<<"redis_type">> := <<"cluster">>}} ->
|
||||||
ResOpts = maps:get(<<"resource_opts">>, SubConf, #{}),
|
ResOpts = maps:get(<<"resource_opts">>, SubConf, #{}),
|
||||||
%% cluster don't support batch
|
%% cluster don't support batch
|
||||||
SubConf#{
|
SubConf#{
|
||||||
|
@ -218,12 +212,12 @@ action_example(RedisType, get) ->
|
||||||
);
|
);
|
||||||
action_example(RedisType, put) ->
|
action_example(RedisType, put) ->
|
||||||
#{
|
#{
|
||||||
redis_type => RedisType,
|
|
||||||
enable => true,
|
enable => true,
|
||||||
connector => <<"my_connector_name">>,
|
connector => <<"my_connector_name">>,
|
||||||
description => <<"My action">>,
|
description => <<"My action">>,
|
||||||
parameters => #{
|
parameters => #{
|
||||||
command_template => [<<"LPUSH">>, <<"MSGS">>, <<"${payload}">>]
|
command_template => [<<"LPUSH">>, <<"MSGS">>, <<"${payload}">>],
|
||||||
|
redis_type => RedisType
|
||||||
},
|
},
|
||||||
resource_opts => #{batch_size => 1}
|
resource_opts => #{batch_size => 1}
|
||||||
}.
|
}.
|
||||||
|
|
|
@ -229,7 +229,10 @@ action_config(Name, Path, ConnectorId) ->
|
||||||
<<"enable">> => true,
|
<<"enable">> => true,
|
||||||
<<"connector">> => ConnectorId,
|
<<"connector">> => ConnectorId,
|
||||||
<<"parameters">> =>
|
<<"parameters">> =>
|
||||||
#{<<"command_template">> => [<<"RPUSH">>, <<"MSGS/${topic}">>, <<"${payload}">>]},
|
#{
|
||||||
|
<<"command_template">> => [<<"RPUSH">>, <<"MSGS/${topic}">>, <<"${payload}">>],
|
||||||
|
<<"redis_type">> => atom_to_binary(RedisType)
|
||||||
|
},
|
||||||
<<"local_topic">> => <<"t/redis">>,
|
<<"local_topic">> => <<"t/redis">>,
|
||||||
<<"resource_opts">> => #{
|
<<"resource_opts">> => #{
|
||||||
<<"batch_size">> => 1,
|
<<"batch_size">> => 1,
|
||||||
|
@ -246,18 +249,9 @@ action_config(Name, Path, ConnectorId) ->
|
||||||
<<"worker_pool_size">> => <<"1">>
|
<<"worker_pool_size">> => <<"1">>
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
PerTypeCfg = per_type_action_config(RedisType),
|
InnerConfigMap = serde_roundtrip(CommonCfg),
|
||||||
InnerConfigMap0 = emqx_utils_maps:deep_merge(CommonCfg, PerTypeCfg),
|
|
||||||
InnerConfigMap = serde_roundtrip(InnerConfigMap0),
|
|
||||||
parse_and_check_bridge_config(InnerConfigMap, Name).
|
parse_and_check_bridge_config(InnerConfigMap, Name).
|
||||||
|
|
||||||
per_type_action_config(single) ->
|
|
||||||
#{<<"redis_type">> => <<"single">>};
|
|
||||||
per_type_action_config(sentinel) ->
|
|
||||||
#{<<"redis_type">> => <<"sentinel">>};
|
|
||||||
per_type_action_config(cluster) ->
|
|
||||||
#{<<"redis_type">> => <<"cluster">>}.
|
|
||||||
|
|
||||||
%% check it serializes correctly
|
%% check it serializes correctly
|
||||||
serde_roundtrip(InnerConfigMap0) ->
|
serde_roundtrip(InnerConfigMap0) ->
|
||||||
IOList = hocon_pp:do(InnerConfigMap0, #{}),
|
IOList = hocon_pp:do(InnerConfigMap0, #{}),
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
-export([remove/2, remove/3]).
|
-export([remove/2, remove/3]).
|
||||||
-export([tombstone/2]).
|
-export([tombstone/2]).
|
||||||
-export([reset/2, reset/3]).
|
-export([reset/2, reset/3]).
|
||||||
-export([dump_schema/2, reformat_schema_dump/1]).
|
-export([dump_schema/2, reformat_schema_dump/2]).
|
||||||
-export([schema_module/0]).
|
-export([schema_module/0]).
|
||||||
|
|
||||||
%% TODO: move to emqx_dashboard when we stop building api schema at build time
|
%% TODO: move to emqx_dashboard when we stop building api schema at build time
|
||||||
|
@ -186,9 +186,9 @@ gen_schema_json(Dir, SchemaModule, Lang) ->
|
||||||
ok = gen_preformat_md_json_files(Dir, StructsJsonArray, Lang).
|
ok = gen_preformat_md_json_files(Dir, StructsJsonArray, Lang).
|
||||||
|
|
||||||
gen_preformat_md_json_files(Dir, StructsJsonArray, Lang) ->
|
gen_preformat_md_json_files(Dir, StructsJsonArray, Lang) ->
|
||||||
NestedStruct = reformat_schema_dump(StructsJsonArray),
|
NestedStruct = reformat_schema_dump(StructsJsonArray, Lang),
|
||||||
%% write to files
|
%% write to files
|
||||||
NestedJsonFile = filename:join([Dir, "schmea-v2-" ++ Lang ++ ".json"]),
|
NestedJsonFile = filename:join([Dir, "schema-v2-" ++ Lang ++ ".json"]),
|
||||||
io:format(user, "===< Generating: ~s~n", [NestedJsonFile]),
|
io:format(user, "===< Generating: ~s~n", [NestedJsonFile]),
|
||||||
ok = file:write_file(
|
ok = file:write_file(
|
||||||
NestedJsonFile, emqx_utils_json:encode(NestedStruct, [pretty, force_utf8])
|
NestedJsonFile, emqx_utils_json:encode(NestedStruct, [pretty, force_utf8])
|
||||||
|
@ -196,15 +196,17 @@ gen_preformat_md_json_files(Dir, StructsJsonArray, Lang) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
%% @doc This function is exported for scripts/schema-dump-reformat.escript
|
%% @doc This function is exported for scripts/schema-dump-reformat.escript
|
||||||
reformat_schema_dump(StructsJsonArray0) ->
|
reformat_schema_dump(StructsJsonArray0, Lang) ->
|
||||||
%% prepare
|
%% prepare
|
||||||
|
DescResolver = make_desc_resolver(Lang),
|
||||||
StructsJsonArray = deduplicate_by_full_name(StructsJsonArray0),
|
StructsJsonArray = deduplicate_by_full_name(StructsJsonArray0),
|
||||||
#{fields := RootFields} = hd(StructsJsonArray),
|
#{fields := RootFields} = hd(StructsJsonArray),
|
||||||
RootNames0 = lists:map(fun(#{name := RootName}) -> RootName end, RootFields),
|
RootNames0 = lists:map(fun(#{name := RootName}) -> RootName end, RootFields),
|
||||||
RootNames = lists:map(fun to_bin/1, RootNames0),
|
RootNames = lists:map(fun to_bin/1, RootNames0),
|
||||||
%% reformat
|
%% reformat
|
||||||
[Root | FlatStructs0] = lists:map(
|
[Root | FlatStructs0] = lists:map(
|
||||||
fun(Struct) -> gen_flat_doc(RootNames, Struct) end, StructsJsonArray
|
fun(Struct) -> gen_flat_doc(RootNames, Struct, DescResolver) end,
|
||||||
|
StructsJsonArray
|
||||||
),
|
),
|
||||||
FlatStructs = [Root#{text => <<"root">>, hash => <<"root">>} | FlatStructs0],
|
FlatStructs = [Root#{text => <<"root">>, hash => <<"root">>} | FlatStructs0],
|
||||||
gen_nested_doc(FlatStructs).
|
gen_nested_doc(FlatStructs).
|
||||||
|
@ -302,7 +304,7 @@ expand_ref(#{hash := FullName}, FindFn, Path) ->
|
||||||
|
|
||||||
%% generate flat docs for each struct.
|
%% generate flat docs for each struct.
|
||||||
%% using references to link to other structs.
|
%% using references to link to other structs.
|
||||||
gen_flat_doc(RootNames, #{full_name := FullName, fields := Fields} = S) ->
|
gen_flat_doc(RootNames, #{full_name := FullName, fields := Fields} = S, DescResolver) ->
|
||||||
ShortName = short_name(FullName),
|
ShortName = short_name(FullName),
|
||||||
case is_missing_namespace(ShortName, to_bin(FullName), RootNames) of
|
case is_missing_namespace(ShortName, to_bin(FullName), RootNames) of
|
||||||
true ->
|
true ->
|
||||||
|
@ -314,18 +316,19 @@ gen_flat_doc(RootNames, #{full_name := FullName, fields := Fields} = S) ->
|
||||||
text => short_name(FullName),
|
text => short_name(FullName),
|
||||||
hash => format_hash(FullName),
|
hash => format_hash(FullName),
|
||||||
doc => maps:get(desc, S, <<"">>),
|
doc => maps:get(desc, S, <<"">>),
|
||||||
fields => format_fields(Fields)
|
fields => format_fields(Fields, DescResolver)
|
||||||
}.
|
}.
|
||||||
|
|
||||||
format_fields([]) ->
|
format_fields(Fields, DescResolver) ->
|
||||||
[];
|
[format_field(F, DescResolver) || F <- Fields].
|
||||||
format_fields([Field | Fields]) ->
|
|
||||||
[format_field(Field) | format_fields(Fields)].
|
|
||||||
|
|
||||||
format_field(#{name := Name, aliases := Aliases, type := Type} = F) ->
|
format_field(#{name := Name, aliases := Aliases, type := Type} = F, DescResolver) ->
|
||||||
|
TypeDoc = format_type_desc(Type, DescResolver),
|
||||||
L = [
|
L = [
|
||||||
{text, Name},
|
{text, Name},
|
||||||
{type, format_type(Type)},
|
{type, format_type(Type)},
|
||||||
|
%% TODO: Make it into a separate field.
|
||||||
|
%% {typedoc, format_type_desc(Type, DescResolver)},
|
||||||
{refs, format_refs(Type)},
|
{refs, format_refs(Type)},
|
||||||
{aliases,
|
{aliases,
|
||||||
case Aliases of
|
case Aliases of
|
||||||
|
@ -333,7 +336,7 @@ format_field(#{name := Name, aliases := Aliases, type := Type} = F) ->
|
||||||
_ -> Aliases
|
_ -> Aliases
|
||||||
end},
|
end},
|
||||||
{default, maps:get(hocon, maps:get(default, F, #{}), undefined)},
|
{default, maps:get(hocon, maps:get(default, F, #{}), undefined)},
|
||||||
{doc, maps:get(desc, F, undefined)}
|
{doc, join_format([maps:get(desc, F, undefined), TypeDoc])}
|
||||||
],
|
],
|
||||||
maps:from_list([{K, V} || {K, V} <- L, V =/= undefined]).
|
maps:from_list([{K, V} || {K, V} <- L, V =/= undefined]).
|
||||||
|
|
||||||
|
@ -393,10 +396,26 @@ format_union_members([Member | Members], Acc) ->
|
||||||
NewAcc = [format_type(Member) | Acc],
|
NewAcc = [format_type(Member) | Acc],
|
||||||
format_union_members(Members, NewAcc).
|
format_union_members(Members, NewAcc).
|
||||||
|
|
||||||
|
format_type_desc(#{kind := primitive, name := Name}, DescResolver) ->
|
||||||
|
format_primitive_type_desc(Name, DescResolver);
|
||||||
|
format_type_desc(#{}, _DescResolver) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
format_primitive_type(TypeStr) ->
|
format_primitive_type(TypeStr) ->
|
||||||
Spec = emqx_conf_schema_types:readable_docgen(?MODULE, TypeStr),
|
Spec = get_primitive_typespec(TypeStr),
|
||||||
to_bin(maps:get(type, Spec)).
|
to_bin(maps:get(type, Spec)).
|
||||||
|
|
||||||
|
format_primitive_type_desc(TypeStr, DescResolver) ->
|
||||||
|
case get_primitive_typespec(TypeStr) of
|
||||||
|
#{desc := Desc} ->
|
||||||
|
DescResolver(Desc);
|
||||||
|
#{} ->
|
||||||
|
undefined
|
||||||
|
end.
|
||||||
|
|
||||||
|
get_primitive_typespec(TypeStr) ->
|
||||||
|
emqx_conf_schema_types:readable_docgen(?MODULE, TypeStr).
|
||||||
|
|
||||||
%% All types should have a namespace to avlid name clashing.
|
%% All types should have a namespace to avlid name clashing.
|
||||||
is_missing_namespace(ShortName, FullName, RootNames) ->
|
is_missing_namespace(ShortName, FullName, RootNames) ->
|
||||||
case lists:member(ShortName, RootNames) of
|
case lists:member(ShortName, RootNames) of
|
||||||
|
@ -560,6 +579,14 @@ hocon_schema_to_spec(Atom, _LocalModule) when is_atom(Atom) ->
|
||||||
typename_to_spec(TypeStr, Module) ->
|
typename_to_spec(TypeStr, Module) ->
|
||||||
emqx_conf_schema_types:readable_dashboard(Module, TypeStr).
|
emqx_conf_schema_types:readable_dashboard(Module, TypeStr).
|
||||||
|
|
||||||
|
join_format(Snippets) ->
|
||||||
|
case [S || S <- Snippets, S =/= undefined] of
|
||||||
|
[] ->
|
||||||
|
undefined;
|
||||||
|
NonEmpty ->
|
||||||
|
to_bin(lists:join("<br/>", NonEmpty))
|
||||||
|
end.
|
||||||
|
|
||||||
to_bin(List) when is_list(List) -> iolist_to_binary(List);
|
to_bin(List) when is_list(List) -> iolist_to_binary(List);
|
||||||
to_bin(Boolean) when is_boolean(Boolean) -> Boolean;
|
to_bin(Boolean) when is_boolean(Boolean) -> Boolean;
|
||||||
to_bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8);
|
to_bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8);
|
||||||
|
|
|
@ -233,7 +233,10 @@ load_config(Bin, Opts) when is_binary(Bin) ->
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
load_config_from_raw(RawConf, Opts) ->
|
load_config_from_raw(RawConf0, Opts) ->
|
||||||
|
SchemaMod = emqx_conf:schema_module(),
|
||||||
|
RawConf1 = emqx_config:upgrade_raw_conf(SchemaMod, RawConf0),
|
||||||
|
RawConf = emqx_config:fill_defaults(RawConf1),
|
||||||
case check_config(RawConf) of
|
case check_config(RawConf) of
|
||||||
ok ->
|
ok ->
|
||||||
Error =
|
Error =
|
||||||
|
@ -283,9 +286,16 @@ update_config_cluster(
|
||||||
check_res(Key, emqx_authn:merge_config(Conf), Conf, Opts);
|
check_res(Key, emqx_authn:merge_config(Conf), Conf, Opts);
|
||||||
update_config_cluster(Key, NewConf, #{mode := merge} = Opts) ->
|
update_config_cluster(Key, NewConf, #{mode := merge} = Opts) ->
|
||||||
Merged = merge_conf(Key, NewConf),
|
Merged = merge_conf(Key, NewConf),
|
||||||
check_res(Key, emqx_conf:update([Key], Merged, ?OPTIONS), NewConf, Opts);
|
Request = make_request(Key, Merged),
|
||||||
|
check_res(Key, emqx_conf:update([Key], Request, ?OPTIONS), NewConf, Opts);
|
||||||
update_config_cluster(Key, Value, #{mode := replace} = Opts) ->
|
update_config_cluster(Key, Value, #{mode := replace} = Opts) ->
|
||||||
check_res(Key, emqx_conf:update([Key], Value, ?OPTIONS), Value, Opts).
|
Request = make_request(Key, Value),
|
||||||
|
check_res(Key, emqx_conf:update([Key], Request, ?OPTIONS), Value, Opts).
|
||||||
|
|
||||||
|
make_request(Key, Value) when Key =:= <<"connectors">> orelse Key =:= <<"actions">> ->
|
||||||
|
{force_update, Value};
|
||||||
|
make_request(_Key, Value) ->
|
||||||
|
Value.
|
||||||
|
|
||||||
-define(LOCAL_OPTIONS, #{rawconf_with_defaults => true, persistent => false}).
|
-define(LOCAL_OPTIONS, #{rawconf_with_defaults => true, persistent => false}).
|
||||||
update_config_local(
|
update_config_local(
|
||||||
|
@ -302,9 +312,11 @@ update_config_local(
|
||||||
check_res(node(), Key, emqx_authn:merge_config_local(Conf, ?LOCAL_OPTIONS), Conf, Opts);
|
check_res(node(), Key, emqx_authn:merge_config_local(Conf, ?LOCAL_OPTIONS), Conf, Opts);
|
||||||
update_config_local(Key, NewConf, #{mode := merge} = Opts) ->
|
update_config_local(Key, NewConf, #{mode := merge} = Opts) ->
|
||||||
Merged = merge_conf(Key, NewConf),
|
Merged = merge_conf(Key, NewConf),
|
||||||
check_res(node(), Key, emqx:update_config([Key], Merged, ?LOCAL_OPTIONS), NewConf, Opts);
|
Request = make_request(Key, Merged),
|
||||||
|
check_res(node(), Key, emqx:update_config([Key], Request, ?LOCAL_OPTIONS), NewConf, Opts);
|
||||||
update_config_local(Key, Value, #{mode := replace} = Opts) ->
|
update_config_local(Key, Value, #{mode := replace} = Opts) ->
|
||||||
check_res(node(), Key, emqx:update_config([Key], Value, ?LOCAL_OPTIONS), Value, Opts).
|
Request = make_request(Key, Value),
|
||||||
|
check_res(node(), Key, emqx:update_config([Key], Request, ?LOCAL_OPTIONS), Value, Opts).
|
||||||
|
|
||||||
check_res(Key, Res, Conf, Opts) -> check_res(cluster, Key, Res, Conf, Opts).
|
check_res(Key, Res, Conf, Opts) -> check_res(cluster, Key, Res, Conf, Opts).
|
||||||
check_res(Node, Key, {ok, _}, _Conf, Opts) ->
|
check_res(Node, Key, {ok, _}, _Conf, Opts) ->
|
||||||
|
@ -452,8 +464,21 @@ sorted_fold(Func, Conf) ->
|
||||||
Error -> {error, Error}
|
Error -> {error, Error}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
to_sorted_list(Conf) ->
|
to_sorted_list(Conf0) ->
|
||||||
lists:keysort(1, maps:to_list(Conf)).
|
%% connectors > actions/bridges > rule_engine
|
||||||
|
Keys = [<<"connectors">>, <<"actions">>, <<"bridges">>, <<"rule_engine">>],
|
||||||
|
{HighPriorities, Conf1} = split_high_priority_conf(Keys, Conf0, []),
|
||||||
|
HighPriorities ++ lists:keysort(1, maps:to_list(Conf1)).
|
||||||
|
|
||||||
|
split_high_priority_conf([], Conf0, Acc) ->
|
||||||
|
{lists:reverse(Acc), Conf0};
|
||||||
|
split_high_priority_conf([Key | Keys], Conf0, Acc) ->
|
||||||
|
case maps:take(Key, Conf0) of
|
||||||
|
error ->
|
||||||
|
split_high_priority_conf(Keys, Conf0, Acc);
|
||||||
|
{Value, Conf1} ->
|
||||||
|
split_high_priority_conf(Keys, Conf1, [{Key, Value} | Acc])
|
||||||
|
end.
|
||||||
|
|
||||||
merge_conf(Key, NewConf) ->
|
merge_conf(Key, NewConf) ->
|
||||||
OldConf = emqx_conf:get_raw([Key]),
|
OldConf = emqx_conf:get_raw([Key]),
|
||||||
|
|
|
@ -596,7 +596,7 @@ fields("node") ->
|
||||||
#{
|
#{
|
||||||
mapping => "mria.shard_transport",
|
mapping => "mria.shard_transport",
|
||||||
importance => ?IMPORTANCE_HIDDEN,
|
importance => ?IMPORTANCE_HIDDEN,
|
||||||
default => gen_rpc,
|
default => distr,
|
||||||
desc => ?DESC(db_default_shard_transport)
|
desc => ?DESC(db_default_shard_transport)
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
|
|
||||||
-module(emqx_conf_schema_types).
|
-module(emqx_conf_schema_types).
|
||||||
|
|
||||||
|
-include_lib("hocon/include/hocon_types.hrl").
|
||||||
|
|
||||||
-export([readable/2]).
|
-export([readable/2]).
|
||||||
-export([readable_swagger/2, readable_dashboard/2, readable_docgen/2]).
|
-export([readable_swagger/2, readable_dashboard/2, readable_docgen/2]).
|
||||||
|
|
||||||
|
@ -165,37 +167,37 @@ readable("duration()") ->
|
||||||
#{
|
#{
|
||||||
swagger => #{type => string, example => <<"12m">>},
|
swagger => #{type => string, example => <<"12m">>},
|
||||||
dashboard => #{type => duration},
|
dashboard => #{type => duration},
|
||||||
docgen => #{type => "String", example => <<"12m">>}
|
docgen => #{type => "Duration", example => <<"12m">>, desc => ?DESC(duration)}
|
||||||
};
|
};
|
||||||
readable("duration_s()") ->
|
readable("duration_s()") ->
|
||||||
#{
|
#{
|
||||||
swagger => #{type => string, example => <<"1h">>},
|
swagger => #{type => string, example => <<"1h">>},
|
||||||
dashboard => #{type => duration},
|
dashboard => #{type => duration},
|
||||||
docgen => #{type => "String", example => <<"1h">>}
|
docgen => #{type => "Duration(s)", example => <<"1h">>, desc => ?DESC(duration)}
|
||||||
};
|
};
|
||||||
readable("duration_ms()") ->
|
readable("duration_ms()") ->
|
||||||
#{
|
#{
|
||||||
swagger => #{type => string, example => <<"32s">>},
|
swagger => #{type => string, example => <<"32s">>},
|
||||||
dashboard => #{type => duration},
|
dashboard => #{type => duration},
|
||||||
docgen => #{type => "String", example => <<"32s">>}
|
docgen => #{type => "Duration", example => <<"32s">>, desc => ?DESC(duration)}
|
||||||
};
|
};
|
||||||
readable("timeout_duration()") ->
|
readable("timeout_duration()") ->
|
||||||
#{
|
#{
|
||||||
swagger => #{type => string, example => <<"12m">>},
|
swagger => #{type => string, example => <<"12m">>},
|
||||||
dashboard => #{type => duration},
|
dashboard => #{type => duration},
|
||||||
docgen => #{type => "String", example => <<"12m">>}
|
docgen => #{type => "Duration", example => <<"12m">>, desc => ?DESC(duration)}
|
||||||
};
|
};
|
||||||
readable("timeout_duration_s()") ->
|
readable("timeout_duration_s()") ->
|
||||||
#{
|
#{
|
||||||
swagger => #{type => string, example => <<"1h">>},
|
swagger => #{type => string, example => <<"1h">>},
|
||||||
dashboard => #{type => duration},
|
dashboard => #{type => duration},
|
||||||
docgen => #{type => "String", example => <<"1h">>}
|
docgen => #{type => "Duration(s)", example => <<"1h">>, desc => ?DESC(duration)}
|
||||||
};
|
};
|
||||||
readable("timeout_duration_ms()") ->
|
readable("timeout_duration_ms()") ->
|
||||||
#{
|
#{
|
||||||
swagger => #{type => string, example => <<"32s">>},
|
swagger => #{type => string, example => <<"32s">>},
|
||||||
dashboard => #{type => duration},
|
dashboard => #{type => duration},
|
||||||
docgen => #{type => "String", example => <<"32s">>}
|
docgen => #{type => "Duration", example => <<"32s">>, desc => ?DESC(duration)}
|
||||||
};
|
};
|
||||||
readable("percent()") ->
|
readable("percent()") ->
|
||||||
#{
|
#{
|
||||||
|
@ -219,13 +221,13 @@ readable("bytesize()") ->
|
||||||
#{
|
#{
|
||||||
swagger => #{type => string, example => <<"32MB">>},
|
swagger => #{type => string, example => <<"32MB">>},
|
||||||
dashboard => #{type => 'byteSize'},
|
dashboard => #{type => 'byteSize'},
|
||||||
docgen => #{type => "String", example => <<"32MB">>}
|
docgen => #{type => "Bytesize", example => <<"32MB">>, desc => ?DESC(bytesize)}
|
||||||
};
|
};
|
||||||
readable("wordsize()") ->
|
readable("wordsize()") ->
|
||||||
#{
|
#{
|
||||||
swagger => #{type => string, example => <<"1024KB">>},
|
swagger => #{type => string, example => <<"1024KB">>},
|
||||||
dashboard => #{type => 'wordSize'},
|
dashboard => #{type => 'wordSize'},
|
||||||
docgen => #{type => "String", example => <<"1024KB">>}
|
docgen => #{type => "Bytesize", example => <<"1024KB">>, desc => ?DESC(bytesize)}
|
||||||
};
|
};
|
||||||
readable("map(" ++ Map) ->
|
readable("map(" ++ Map) ->
|
||||||
[$) | _MapArgs] = lists:reverse(Map),
|
[$) | _MapArgs] = lists:reverse(Map),
|
||||||
|
@ -287,7 +289,11 @@ readable("secret()") ->
|
||||||
#{
|
#{
|
||||||
swagger => #{type => string, example => <<"R4ND0M/S∃CЯ∃T"/utf8>>},
|
swagger => #{type => string, example => <<"R4ND0M/S∃CЯ∃T"/utf8>>},
|
||||||
dashboard => #{type => string},
|
dashboard => #{type => string},
|
||||||
docgen => #{type => "String", example => <<"R4ND0M/S∃CЯ∃T"/utf8>>}
|
docgen => #{
|
||||||
|
type => "Secret",
|
||||||
|
example => <<"R4ND0M/S∃CЯ∃T"/utf8>>,
|
||||||
|
desc => ?DESC(secret)
|
||||||
|
}
|
||||||
};
|
};
|
||||||
readable(TypeStr0) ->
|
readable(TypeStr0) ->
|
||||||
case string:split(TypeStr0, ":") of
|
case string:split(TypeStr0, ":") of
|
||||||
|
|
|
@ -40,7 +40,7 @@ t_load_config(Config) ->
|
||||||
ConfBin = hocon_pp:do(#{<<"authorization">> => #{<<"sources">> => []}}, #{}),
|
ConfBin = hocon_pp:do(#{<<"authorization">> => #{<<"sources">> => []}}, #{}),
|
||||||
ConfFile = prepare_conf_file(?FUNCTION_NAME, ConfBin, Config),
|
ConfFile = prepare_conf_file(?FUNCTION_NAME, ConfBin, Config),
|
||||||
ok = emqx_conf_cli:conf(["load", "--replace", ConfFile]),
|
ok = emqx_conf_cli:conf(["load", "--replace", ConfFile]),
|
||||||
?assertEqual(#{<<"sources">> => []}, emqx_conf:get_raw([Authz])),
|
?assertMatch(#{<<"sources">> := []}, emqx_conf:get_raw([Authz])),
|
||||||
|
|
||||||
ConfBin0 = hocon_pp:do(#{<<"authorization">> => Conf#{<<"sources">> => []}}, #{}),
|
ConfBin0 = hocon_pp:do(#{<<"authorization">> => Conf#{<<"sources">> => []}}, #{}),
|
||||||
ConfFile0 = prepare_conf_file(?FUNCTION_NAME, ConfBin0, Config),
|
ConfFile0 = prepare_conf_file(?FUNCTION_NAME, ConfBin0, Config),
|
||||||
|
@ -73,6 +73,10 @@ t_conflict_mix_conf(Config) ->
|
||||||
AuthNInit = emqx_conf:get_raw([authentication]),
|
AuthNInit = emqx_conf:get_raw([authentication]),
|
||||||
Redis = #{
|
Redis = #{
|
||||||
<<"backend">> => <<"redis">>,
|
<<"backend">> => <<"redis">>,
|
||||||
|
<<"database">> => 0,
|
||||||
|
<<"password_hash_algorithm">> =>
|
||||||
|
#{<<"name">> => <<"sha256">>, <<"salt_position">> => <<"prefix">>},
|
||||||
|
<<"pool_size">> => 8,
|
||||||
<<"cmd">> => <<"HMGET mqtt_user:${username} password_hash salt">>,
|
<<"cmd">> => <<"HMGET mqtt_user:${username} password_hash salt">>,
|
||||||
<<"enable">> => false,
|
<<"enable">> => false,
|
||||||
<<"mechanism">> => <<"password_based">>,
|
<<"mechanism">> => <<"password_based">>,
|
||||||
|
@ -85,10 +89,15 @@ t_conflict_mix_conf(Config) ->
|
||||||
ConfFile = prepare_conf_file(?FUNCTION_NAME, ConfBin, Config),
|
ConfFile = prepare_conf_file(?FUNCTION_NAME, ConfBin, Config),
|
||||||
%% init with redis sources
|
%% init with redis sources
|
||||||
ok = emqx_conf_cli:conf(["load", "--replace", ConfFile]),
|
ok = emqx_conf_cli:conf(["load", "--replace", ConfFile]),
|
||||||
?assertMatch([Redis], emqx_conf:get_raw([authentication])),
|
[RedisRaw] = emqx_conf:get_raw([authentication]),
|
||||||
|
?assertEqual(
|
||||||
|
maps:to_list(Redis),
|
||||||
|
maps:to_list(maps:remove(<<"ssl">>, RedisRaw)),
|
||||||
|
{Redis, RedisRaw}
|
||||||
|
),
|
||||||
%% change redis type from single to cluster
|
%% change redis type from single to cluster
|
||||||
%% the server field will become servers field
|
%% the server field will become servers field
|
||||||
RedisCluster = maps:remove(<<"server">>, Redis#{
|
RedisCluster = maps:without([<<"server">>, <<"database">>], Redis#{
|
||||||
<<"redis_type">> => cluster,
|
<<"redis_type">> => cluster,
|
||||||
<<"servers">> => [<<"127.0.0.1:6379">>]
|
<<"servers">> => [<<"127.0.0.1:6379">>]
|
||||||
}),
|
}),
|
||||||
|
|
|
@ -107,6 +107,8 @@ config_key_path() ->
|
||||||
|
|
||||||
pre_config_update([?ROOT_KEY], RawConf, RawConf) ->
|
pre_config_update([?ROOT_KEY], RawConf, RawConf) ->
|
||||||
{ok, RawConf};
|
{ok, RawConf};
|
||||||
|
pre_config_update([?ROOT_KEY], {force_update, NewConf}, RawConf) ->
|
||||||
|
pre_config_update([?ROOT_KEY], NewConf, RawConf);
|
||||||
pre_config_update([?ROOT_KEY], NewConf, _RawConf) ->
|
pre_config_update([?ROOT_KEY], NewConf, _RawConf) ->
|
||||||
case multi_validate_connector_names(NewConf) of
|
case multi_validate_connector_names(NewConf) of
|
||||||
ok ->
|
ok ->
|
||||||
|
@ -135,23 +137,16 @@ pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) ->
|
||||||
operation_to_enable(disable) -> false;
|
operation_to_enable(disable) -> false;
|
||||||
operation_to_enable(enable) -> true.
|
operation_to_enable(enable) -> true.
|
||||||
|
|
||||||
|
post_config_update([?ROOT_KEY], {force_update, _}, NewConf, OldConf, _AppEnv) ->
|
||||||
|
#{added := Added, removed := Removed, changed := Updated} =
|
||||||
|
diff_confs(NewConf, OldConf),
|
||||||
|
perform_connector_changes(Removed, Added, Updated);
|
||||||
post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) ->
|
post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) ->
|
||||||
#{added := Added, removed := Removed, changed := Updated} =
|
#{added := Added, removed := Removed, changed := Updated} =
|
||||||
diff_confs(NewConf, OldConf),
|
diff_confs(NewConf, OldConf),
|
||||||
case ensure_no_channels(Removed) of
|
case ensure_no_channels(Removed) of
|
||||||
ok ->
|
ok ->
|
||||||
%% The config update will be failed if any task in `perform_connector_changes` failed.
|
perform_connector_changes(Removed, Added, Updated);
|
||||||
Result = perform_connector_changes([
|
|
||||||
#{action => fun emqx_connector_resource:remove/4, data => Removed},
|
|
||||||
#{
|
|
||||||
action => fun emqx_connector_resource:create/4,
|
|
||||||
data => Added,
|
|
||||||
on_exception_fn => fun emqx_connector_resource:remove/4
|
|
||||||
},
|
|
||||||
#{action => fun emqx_connector_resource:update/4, data => Updated}
|
|
||||||
]),
|
|
||||||
?tp(connector_post_config_update_done, #{}),
|
|
||||||
Result;
|
|
||||||
{error, Error} ->
|
{error, Error} ->
|
||||||
{error, Error}
|
{error, Error}
|
||||||
end;
|
end;
|
||||||
|
@ -175,6 +170,20 @@ post_config_update([?ROOT_KEY, Type, Name], _Req, NewConf, OldConf, _AppEnvs) ->
|
||||||
?tp(connector_post_config_update_done, #{}),
|
?tp(connector_post_config_update_done, #{}),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
%% The config update will be failed if any task in `perform_connector_changes` failed.
|
||||||
|
perform_connector_changes(Removed, Added, Updated) ->
|
||||||
|
Result = perform_connector_changes([
|
||||||
|
#{action => fun emqx_connector_resource:remove/4, data => Removed},
|
||||||
|
#{
|
||||||
|
action => fun emqx_connector_resource:create/4,
|
||||||
|
data => Added,
|
||||||
|
on_exception_fn => fun emqx_connector_resource:remove/4
|
||||||
|
},
|
||||||
|
#{action => fun emqx_connector_resource:update/4, data => Updated}
|
||||||
|
]),
|
||||||
|
?tp(connector_post_config_update_done, #{}),
|
||||||
|
Result.
|
||||||
|
|
||||||
list() ->
|
list() ->
|
||||||
maps:fold(
|
maps:fold(
|
||||||
fun(Type, NameAndConf, Connectors) ->
|
fun(Type, NameAndConf, Connectors) ->
|
||||||
|
|
|
@ -315,8 +315,6 @@ t_none_ref(_Config) ->
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
namespace() -> undefined.
|
|
||||||
|
|
||||||
t_sub_fields(_Config) ->
|
t_sub_fields(_Config) ->
|
||||||
Spec = #{
|
Spec = #{
|
||||||
post => #{
|
post => #{
|
||||||
|
@ -815,6 +813,9 @@ to_schema(Body) ->
|
||||||
post => #{requestBody => Body, responses => #{200 => <<"ok">>}}
|
post => #{requestBody => Body, responses => #{200 => <<"ok">>}}
|
||||||
}.
|
}.
|
||||||
|
|
||||||
|
%% Don't warning hocon callback namespace/0 undef.
|
||||||
|
namespace() -> atom_to_list(?MODULE).
|
||||||
|
|
||||||
fields(good_ref) ->
|
fields(good_ref) ->
|
||||||
[
|
[
|
||||||
{'webhook-host', mk(emqx_schema:ip_port(), #{default => <<"127.0.0.1:80">>})},
|
{'webhook-host', mk(emqx_schema:ip_port(), #{default => <<"127.0.0.1:80">>})},
|
||||||
|
|
|
@ -197,6 +197,10 @@ subscriptions(get, #{
|
||||||
case emqx_gateway_http:list_client_subscriptions(GwName, ClientId) of
|
case emqx_gateway_http:list_client_subscriptions(GwName, ClientId) of
|
||||||
{error, not_found} ->
|
{error, not_found} ->
|
||||||
return_http_error(404, "client process not found");
|
return_http_error(404, "client process not found");
|
||||||
|
{error, ignored} ->
|
||||||
|
return_http_error(
|
||||||
|
400, "get subscriptions failed: unsupported"
|
||||||
|
);
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
return_http_error(400, Reason);
|
return_http_error(400, Reason);
|
||||||
{ok, Subs} ->
|
{ok, Subs} ->
|
||||||
|
@ -222,7 +226,13 @@ subscriptions(post, #{
|
||||||
)
|
)
|
||||||
of
|
of
|
||||||
{error, not_found} ->
|
{error, not_found} ->
|
||||||
return_http_error(404, "client process not found");
|
return_http_error(
|
||||||
|
404, "client process not found"
|
||||||
|
);
|
||||||
|
{error, ignored} ->
|
||||||
|
return_http_error(
|
||||||
|
400, "subscribe failed: unsupported"
|
||||||
|
);
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
return_http_error(400, Reason);
|
return_http_error(400, Reason);
|
||||||
{ok, {NTopic, NSubOpts}} ->
|
{ok, {NTopic, NSubOpts}} ->
|
||||||
|
@ -241,8 +251,14 @@ subscriptions(delete, #{
|
||||||
with_gateway(Name0, fun(GwName, _) ->
|
with_gateway(Name0, fun(GwName, _) ->
|
||||||
case lookup_topic(GwName, ClientId, Topic) of
|
case lookup_topic(GwName, ClientId, Topic) of
|
||||||
{ok, _} ->
|
{ok, _} ->
|
||||||
_ = emqx_gateway_http:client_unsubscribe(GwName, ClientId, Topic),
|
case emqx_gateway_http:client_unsubscribe(GwName, ClientId, Topic) of
|
||||||
{204};
|
{error, ignored} ->
|
||||||
|
return_http_error(
|
||||||
|
400, "unsubscribe failed: unsupported"
|
||||||
|
);
|
||||||
|
_ ->
|
||||||
|
{204}
|
||||||
|
end;
|
||||||
{error, not_found} ->
|
{error, not_found} ->
|
||||||
return_http_error(404, "Resource not found")
|
return_http_error(404, "Resource not found")
|
||||||
end
|
end
|
||||||
|
|
|
@ -378,6 +378,8 @@ client_call(GwName, ClientId, Req) ->
|
||||||
of
|
of
|
||||||
undefined ->
|
undefined ->
|
||||||
{error, not_found};
|
{error, not_found};
|
||||||
|
ignored ->
|
||||||
|
{error, ignored};
|
||||||
Res ->
|
Res ->
|
||||||
Res
|
Res
|
||||||
catch
|
catch
|
||||||
|
|
|
@ -174,7 +174,7 @@ fields(dtls_opts) ->
|
||||||
reuse_sessions => true,
|
reuse_sessions => true,
|
||||||
versions => dtls_all_available
|
versions => dtls_all_available
|
||||||
},
|
},
|
||||||
false
|
_IsRanchListener = false
|
||||||
).
|
).
|
||||||
|
|
||||||
desc(gateway) ->
|
desc(gateway) ->
|
||||||
|
|
|
@ -273,7 +273,7 @@ merge_default(Udp, Options) ->
|
||||||
udp ->
|
udp ->
|
||||||
{udp_options, default_udp_options()};
|
{udp_options, default_udp_options()};
|
||||||
dtls ->
|
dtls ->
|
||||||
{udp_options, default_udp_options()};
|
{dtls_options, default_udp_options()};
|
||||||
tcp ->
|
tcp ->
|
||||||
{tcp_options, default_tcp_options()};
|
{tcp_options, default_tcp_options()};
|
||||||
ssl ->
|
ssl ->
|
||||||
|
@ -525,9 +525,11 @@ esockd_opts(Type, Opts0) when ?IS_ESOCKD_LISTENER(Type) ->
|
||||||
udp ->
|
udp ->
|
||||||
Opts2#{udp_options => sock_opts(udp_options, Opts0)};
|
Opts2#{udp_options => sock_opts(udp_options, Opts0)};
|
||||||
dtls ->
|
dtls ->
|
||||||
|
UDPOpts = sock_opts(udp_options, Opts0),
|
||||||
|
DTLSOpts = ssl_opts(dtls_options, Opts0),
|
||||||
Opts2#{
|
Opts2#{
|
||||||
udp_options => sock_opts(udp_options, Opts0),
|
udp_options => UDPOpts,
|
||||||
dtls_options => ssl_opts(dtls_options, Opts0)
|
dtls_options => DTLSOpts
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
@ -541,12 +543,37 @@ sock_opts(Name, Opts) ->
|
||||||
).
|
).
|
||||||
|
|
||||||
ssl_opts(Name, Opts) ->
|
ssl_opts(Name, Opts) ->
|
||||||
Type =
|
SSLOpts = maps:get(Name, Opts, #{}),
|
||||||
case Name of
|
emqx_utils:run_fold(
|
||||||
ssl_options -> tls;
|
[
|
||||||
dtls_options -> dtls
|
fun ssl_opts_crl_config/2,
|
||||||
end,
|
fun ssl_opts_drop_unsupported/2,
|
||||||
emqx_tls_lib:to_server_opts(Type, maps:get(Name, Opts, #{})).
|
fun ssl_server_opts/2
|
||||||
|
],
|
||||||
|
SSLOpts,
|
||||||
|
Name
|
||||||
|
).
|
||||||
|
|
||||||
|
ssl_opts_crl_config(#{enable_crl_check := true} = SSLOpts, _Name) ->
|
||||||
|
HTTPTimeout = emqx_config:get([crl_cache, http_timeout], timer:seconds(15)),
|
||||||
|
NSSLOpts = maps:remove(enable_crl_check, SSLOpts),
|
||||||
|
NSSLOpts#{
|
||||||
|
%% `crl_check => true' doesn't work
|
||||||
|
crl_check => peer,
|
||||||
|
crl_cache => {emqx_ssl_crl_cache, {internal, [{http, HTTPTimeout}]}}
|
||||||
|
};
|
||||||
|
ssl_opts_crl_config(SSLOpts, _Name) ->
|
||||||
|
%% NOTE: Removing this because DTLS doesn't like any unknown options.
|
||||||
|
maps:remove(enable_crl_check, SSLOpts).
|
||||||
|
|
||||||
|
ssl_opts_drop_unsupported(SSLOpts, _Name) ->
|
||||||
|
%% TODO: Support OCSP stapling
|
||||||
|
maps:without([ocsp], SSLOpts).
|
||||||
|
|
||||||
|
ssl_server_opts(SSLOpts, ssl_options) ->
|
||||||
|
emqx_tls_lib:to_server_opts(tls, SSLOpts);
|
||||||
|
ssl_server_opts(SSLOpts, dtls_options) ->
|
||||||
|
emqx_tls_lib:to_server_opts(dtls, SSLOpts).
|
||||||
|
|
||||||
ranch_opts(Type, ListenOn, Opts) ->
|
ranch_opts(Type, ListenOn, Opts) ->
|
||||||
NumAcceptors = maps:get(acceptors, Opts, 4),
|
NumAcceptors = maps:get(acceptors, Opts, 4),
|
||||||
|
@ -635,7 +662,7 @@ default_tcp_options() ->
|
||||||
].
|
].
|
||||||
|
|
||||||
default_udp_options() ->
|
default_udp_options() ->
|
||||||
[binary].
|
[].
|
||||||
|
|
||||||
default_subopts() ->
|
default_subopts() ->
|
||||||
%% Retain Handling
|
%% Retain Handling
|
||||||
|
|
|
@ -238,9 +238,12 @@ http_authz_config() ->
|
||||||
init_gateway_conf() ->
|
init_gateway_conf() ->
|
||||||
ok = emqx_common_test_helpers:load_config(
|
ok = emqx_common_test_helpers:load_config(
|
||||||
emqx_gateway_schema,
|
emqx_gateway_schema,
|
||||||
merge_conf([X:default_config() || X <- ?CONFS], [])
|
merge_conf(list_gateway_conf(), [])
|
||||||
).
|
).
|
||||||
|
|
||||||
|
list_gateway_conf() ->
|
||||||
|
[X:default_config() || X <- ?CONFS].
|
||||||
|
|
||||||
merge_conf([Conf | T], Acc) ->
|
merge_conf([Conf | T], Acc) ->
|
||||||
case re:run(Conf, "\s*gateway\\.(.*)", [global, {capture, all_but_first, list}, dotall]) of
|
case re:run(Conf, "\s*gateway\\.(.*)", [global, {capture, all_but_first, list}, dotall]) of
|
||||||
{match, [[Content]]} ->
|
{match, [[Content]]} ->
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
-include_lib("common_test/include/ct.hrl").
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
|
||||||
-import(emqx_gateway_auth_ct, [init_gateway_conf/0, with_resource/3]).
|
-import(emqx_gateway_auth_ct, [with_resource/3]).
|
||||||
|
|
||||||
-define(checkMatch(Guard),
|
-define(checkMatch(Guard),
|
||||||
(fun(Expr) ->
|
(fun(Expr) ->
|
||||||
|
@ -54,40 +54,37 @@ groups() ->
|
||||||
emqx_gateway_auth_ct:init_groups(?MODULE, ?AUTHNS).
|
emqx_gateway_auth_ct:init_groups(?MODULE, ?AUTHNS).
|
||||||
|
|
||||||
init_per_group(AuthName, Conf) ->
|
init_per_group(AuthName, Conf) ->
|
||||||
ct:pal("on group start:~p~n", [AuthName]),
|
Apps = emqx_cth_suite:start(
|
||||||
{ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000),
|
[
|
||||||
emqx_gateway_auth_ct:start_auth(AuthName),
|
emqx_conf,
|
||||||
timer:sleep(500),
|
emqx_auth,
|
||||||
Conf.
|
emqx_auth_http,
|
||||||
|
emqx_management,
|
||||||
|
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"},
|
||||||
|
{emqx_gateway, emqx_gateway_auth_ct:list_gateway_conf()}
|
||||||
|
| emqx_gateway_test_utils:all_gateway_apps()
|
||||||
|
],
|
||||||
|
#{work_dir => emqx_cth_suite:work_dir(Conf)}
|
||||||
|
),
|
||||||
|
_ = emqx_common_test_http:create_default_app(),
|
||||||
|
ok = emqx_gateway_auth_ct:start_auth(AuthName),
|
||||||
|
[{group_apps, Apps} | Conf].
|
||||||
|
|
||||||
end_per_group(AuthName, Conf) ->
|
end_per_group(AuthName, Conf) ->
|
||||||
ct:pal("on group stop:~p~n", [AuthName]),
|
ok = emqx_gateway_auth_ct:stop_auth(AuthName),
|
||||||
emqx_gateway_auth_ct:stop_auth(AuthName),
|
_ = emqx_common_test_http:delete_default_app(),
|
||||||
|
ok = emqx_cth_suite:stop(?config(group_apps, Conf)),
|
||||||
Conf.
|
Conf.
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
emqx_gateway_test_utils:load_all_gateway_apps(),
|
{ok, Apps1} = application:ensure_all_started(grpc),
|
||||||
emqx_config:erase(gateway),
|
{ok, Apps2} = application:ensure_all_started(cowboy),
|
||||||
init_gateway_conf(),
|
{ok, _} = emqx_gateway_auth_ct:start(),
|
||||||
emqx_mgmt_api_test_util:init_suite([grpc, emqx_conf, emqx_auth, emqx_auth_http, emqx_gateway]),
|
[{suite_apps, Apps1 ++ Apps2} | Config].
|
||||||
application:ensure_all_started(cowboy),
|
|
||||||
emqx_gateway_auth_ct:start(),
|
|
||||||
timer:sleep(500),
|
|
||||||
Config.
|
|
||||||
|
|
||||||
end_per_suite(Config) ->
|
end_per_suite(Config) ->
|
||||||
emqx_gateway_auth_ct:stop(),
|
ok = emqx_gateway_auth_ct:stop(),
|
||||||
emqx_config:erase(gateway),
|
ok = emqx_cth_suite:stop_apps(?config(suite_apps, Config)),
|
||||||
emqx_mgmt_api_test_util:end_suite([
|
|
||||||
cowboy, emqx_conf, emqx_auth, emqx_auth_http, emqx_gateway, grpc
|
|
||||||
]),
|
|
||||||
Config.
|
|
||||||
|
|
||||||
init_per_testcase(_Case, Config) ->
|
|
||||||
{ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000),
|
|
||||||
Config.
|
|
||||||
|
|
||||||
end_per_testcase(_Case, Config) ->
|
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
-include_lib("common_test/include/ct.hrl").
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
|
||||||
-import(emqx_gateway_auth_ct, [init_gateway_conf/0, with_resource/3]).
|
-import(emqx_gateway_auth_ct, [with_resource/3]).
|
||||||
|
|
||||||
-define(checkMatch(Guard),
|
-define(checkMatch(Guard),
|
||||||
(fun(Expr) ->
|
(fun(Expr) ->
|
||||||
|
@ -54,44 +54,33 @@ groups() ->
|
||||||
emqx_gateway_auth_ct:init_groups(?MODULE, ?AUTHNS).
|
emqx_gateway_auth_ct:init_groups(?MODULE, ?AUTHNS).
|
||||||
|
|
||||||
init_per_group(AuthName, Conf) ->
|
init_per_group(AuthName, Conf) ->
|
||||||
{ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000),
|
Apps = emqx_cth_suite:start(
|
||||||
ok = emqx_authz_test_lib:reset_authorizers(),
|
[
|
||||||
emqx_gateway_auth_ct:start_auth(AuthName),
|
{emqx_conf, "authorization { no_match = deny, cache { enable = false } }"},
|
||||||
timer:sleep(500),
|
emqx_auth,
|
||||||
Conf.
|
emqx_auth_http,
|
||||||
|
{emqx_gateway, emqx_gateway_auth_ct:list_gateway_conf()}
|
||||||
|
| emqx_gateway_test_utils:all_gateway_apps()
|
||||||
|
],
|
||||||
|
#{work_dir => emqx_cth_suite:work_dir(Conf)}
|
||||||
|
),
|
||||||
|
ok = emqx_gateway_auth_ct:start_auth(AuthName),
|
||||||
|
[{group_apps, Apps} | Conf].
|
||||||
|
|
||||||
end_per_group(AuthName, Conf) ->
|
end_per_group(AuthName, Conf) ->
|
||||||
emqx_gateway_auth_ct:stop_auth(AuthName),
|
ok = emqx_gateway_auth_ct:stop_auth(AuthName),
|
||||||
|
ok = emqx_cth_suite:stop(?config(group_apps, Conf)),
|
||||||
Conf.
|
Conf.
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
emqx_config:erase(gateway),
|
{ok, Apps1} = application:ensure_all_started(grpc),
|
||||||
emqx_gateway_test_utils:load_all_gateway_apps(),
|
{ok, Apps2} = application:ensure_all_started(cowboy),
|
||||||
init_gateway_conf(),
|
{ok, _} = emqx_gateway_auth_ct:start(),
|
||||||
emqx_mgmt_api_test_util:init_suite([
|
[{suite_apps, Apps1 ++ Apps2} | Config].
|
||||||
grpc, emqx_conf, emqx_auth, emqx_auth_http, emqx_gateway
|
|
||||||
]),
|
|
||||||
meck:new(emqx_authz_file, [non_strict, passthrough, no_history, no_link]),
|
|
||||||
meck:expect(emqx_authz_file, create, fun(S) -> S end),
|
|
||||||
application:ensure_all_started(cowboy),
|
|
||||||
emqx_gateway_auth_ct:start(),
|
|
||||||
Config.
|
|
||||||
|
|
||||||
end_per_suite(Config) ->
|
end_per_suite(Config) ->
|
||||||
meck:unload(emqx_authz_file),
|
ok = emqx_gateway_auth_ct:stop(),
|
||||||
emqx_gateway_auth_ct:stop(),
|
ok = emqx_cth_suite:stop_apps(?config(suite_apps, Config)),
|
||||||
ok = emqx_authz_test_lib:restore_authorizers(),
|
|
||||||
emqx_config:erase(gateway),
|
|
||||||
emqx_mgmt_api_test_util:end_suite([
|
|
||||||
emqx_gateway, emqx_auth_http, emqx_auth, emqx_conf, grpc
|
|
||||||
]),
|
|
||||||
Config.
|
|
||||||
|
|
||||||
init_per_testcase(_Case, Config) ->
|
|
||||||
{ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000),
|
|
||||||
Config.
|
|
||||||
|
|
||||||
end_per_testcase(_Case, Config) ->
|
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
|
@ -103,12 +103,18 @@ assert_fields_exist(Ks, Map) ->
|
||||||
end,
|
end,
|
||||||
Ks
|
Ks
|
||||||
).
|
).
|
||||||
|
|
||||||
load_all_gateway_apps() ->
|
load_all_gateway_apps() ->
|
||||||
application:load(emqx_gateway_stomp),
|
emqx_cth_suite:load_apps(all_gateway_apps()).
|
||||||
application:load(emqx_gateway_mqttsn),
|
|
||||||
application:load(emqx_gateway_coap),
|
all_gateway_apps() ->
|
||||||
application:load(emqx_gateway_lwm2m),
|
[
|
||||||
application:load(emqx_gateway_exproto).
|
emqx_gateway_stomp,
|
||||||
|
emqx_gateway_mqttsn,
|
||||||
|
emqx_gateway_coap,
|
||||||
|
emqx_gateway_lwm2m,
|
||||||
|
emqx_gateway_exproto
|
||||||
|
].
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% http
|
%% http
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
-compile(nowarn_export_all).
|
-compile(nowarn_export_all).
|
||||||
|
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
-include_lib("emqx/include/emqx_hooks.hrl").
|
|
||||||
-include_lib("emqx/include/emqx.hrl").
|
-include_lib("emqx/include/emqx.hrl").
|
||||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
@ -44,14 +43,6 @@
|
||||||
-define(TCPOPTS, [binary, {active, false}]).
|
-define(TCPOPTS, [binary, {active, false}]).
|
||||||
-define(DTLSOPTS, [binary, {active, false}, {protocol, dtls}]).
|
-define(DTLSOPTS, [binary, {active, false}, {protocol, dtls}]).
|
||||||
|
|
||||||
-define(PORT, 7993).
|
|
||||||
|
|
||||||
-define(DEFAULT_CLIENT, #{
|
|
||||||
proto_name => <<"demo">>,
|
|
||||||
proto_ver => <<"v0.1">>,
|
|
||||||
clientid => <<"test_client_1">>
|
|
||||||
}).
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
-define(CONF_DEFAULT, <<
|
-define(CONF_DEFAULT, <<
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -126,15 +117,33 @@ init_per_group(_, Cfg) ->
|
||||||
|
|
||||||
init_per_group(LisType, ServiceName, Scheme, Cfg) ->
|
init_per_group(LisType, ServiceName, Scheme, Cfg) ->
|
||||||
Svrs = emqx_exproto_echo_svr:start(Scheme),
|
Svrs = emqx_exproto_echo_svr:start(Scheme),
|
||||||
application:load(emqx_gateway_exproto),
|
Addrs = lists:flatten(io_lib:format("~s://127.0.0.1:9001", [Scheme])),
|
||||||
emqx_common_test_helpers:start_apps(
|
GWConfig = #{
|
||||||
[emqx_conf, emqx_auth, emqx_gateway],
|
server => #{bind => 9100},
|
||||||
fun(App) ->
|
idle_timeout => 5000,
|
||||||
set_special_cfg(App, LisType, ServiceName, Scheme)
|
mountpoint => <<"ct/">>,
|
||||||
end
|
handler => #{
|
||||||
|
address => Addrs,
|
||||||
|
service_name => ServiceName,
|
||||||
|
ssl_options => #{enable => Scheme == https}
|
||||||
|
},
|
||||||
|
listeners => listener_confs(LisType)
|
||||||
|
},
|
||||||
|
Apps = emqx_cth_suite:start(
|
||||||
|
[
|
||||||
|
emqx_conf,
|
||||||
|
emqx_auth,
|
||||||
|
{emqx_gateway, #{
|
||||||
|
config =>
|
||||||
|
#{gateway => #{exproto => GWConfig}}
|
||||||
|
}},
|
||||||
|
emqx_gateway_exproto
|
||||||
|
],
|
||||||
|
#{work_dir => emqx_cth_suite:work_dir(Cfg)}
|
||||||
),
|
),
|
||||||
[
|
[
|
||||||
{servers, Svrs},
|
{servers, Svrs},
|
||||||
|
{apps, Apps},
|
||||||
{listener_type, LisType},
|
{listener_type, LisType},
|
||||||
{service_name, ServiceName},
|
{service_name, ServiceName},
|
||||||
{grpc_client_scheme, Scheme}
|
{grpc_client_scheme, Scheme}
|
||||||
|
@ -142,8 +151,7 @@ init_per_group(LisType, ServiceName, Scheme, Cfg) ->
|
||||||
].
|
].
|
||||||
|
|
||||||
end_per_group(_, Cfg) ->
|
end_per_group(_, Cfg) ->
|
||||||
emqx_config:erase(gateway),
|
ok = emqx_cth_suite:stop(proplists:get_value(apps, Cfg)),
|
||||||
emqx_common_test_helpers:stop_apps([emqx_gateway, emqx_auth, emqx_conf]),
|
|
||||||
emqx_exproto_echo_svr:stop(proplists:get_value(servers, Cfg)).
|
emqx_exproto_echo_svr:stop(proplists:get_value(servers, Cfg)).
|
||||||
|
|
||||||
init_per_testcase(TestCase, Cfg) when
|
init_per_testcase(TestCase, Cfg) when
|
||||||
|
@ -159,28 +167,13 @@ init_per_testcase(_TestCase, Cfg) ->
|
||||||
end_per_testcase(_TestCase, _Cfg) ->
|
end_per_testcase(_TestCase, _Cfg) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
set_special_cfg(emqx_gateway, LisType, ServiceName, Scheme) ->
|
|
||||||
Addrs = lists:flatten(io_lib:format("~s://127.0.0.1:9001", [Scheme])),
|
|
||||||
emqx_config:put(
|
|
||||||
[gateway, exproto],
|
|
||||||
#{
|
|
||||||
server => #{bind => 9100},
|
|
||||||
idle_timeout => 5000,
|
|
||||||
mountpoint => <<"ct/">>,
|
|
||||||
handler => #{
|
|
||||||
address => Addrs,
|
|
||||||
service_name => ServiceName,
|
|
||||||
ssl_options => #{enable => Scheme == https}
|
|
||||||
},
|
|
||||||
listeners => listener_confs(LisType)
|
|
||||||
}
|
|
||||||
);
|
|
||||||
set_special_cfg(_, _, _, _) ->
|
|
||||||
ok.
|
|
||||||
|
|
||||||
listener_confs(Type) ->
|
listener_confs(Type) ->
|
||||||
Default = #{bind => 7993, acceptors => 8},
|
Default = #{
|
||||||
#{Type => #{'default' => maps:merge(Default, server_socketopts(Type))}}.
|
bind => 7993,
|
||||||
|
max_connections => 64,
|
||||||
|
access_rules => ["allow all"]
|
||||||
|
},
|
||||||
|
#{Type => #{'default' => maps:merge(Default, socketopts(Type))}}.
|
||||||
|
|
||||||
default_config() ->
|
default_config() ->
|
||||||
?CONF_DEFAULT.
|
?CONF_DEFAULT.
|
||||||
|
@ -635,24 +628,29 @@ close({dtls, Sock}) ->
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Server-Opts
|
%% Server-Opts
|
||||||
|
|
||||||
server_socketopts(tcp) ->
|
socketopts(tcp) ->
|
||||||
#{tcp_options => server_tcp_opts()};
|
|
||||||
server_socketopts(ssl) ->
|
|
||||||
#{
|
#{
|
||||||
tcp_options => server_tcp_opts(),
|
acceptors => 8,
|
||||||
ssl_options => server_ssl_opts()
|
tcp_options => tcp_opts()
|
||||||
};
|
};
|
||||||
server_socketopts(udp) ->
|
socketopts(ssl) ->
|
||||||
#{udp_options => server_udp_opts()};
|
|
||||||
server_socketopts(dtls) ->
|
|
||||||
#{
|
#{
|
||||||
udp_options => server_udp_opts(),
|
acceptors => 8,
|
||||||
dtls_options => server_dtls_opts()
|
tcp_options => tcp_opts(),
|
||||||
|
ssl_options => ssl_opts()
|
||||||
|
};
|
||||||
|
socketopts(udp) ->
|
||||||
|
#{udp_options => udp_opts()};
|
||||||
|
socketopts(dtls) ->
|
||||||
|
#{
|
||||||
|
acceptors => 8,
|
||||||
|
udp_options => udp_opts(),
|
||||||
|
dtls_options => dtls_opts()
|
||||||
}.
|
}.
|
||||||
|
|
||||||
server_tcp_opts() ->
|
tcp_opts() ->
|
||||||
maps:merge(
|
maps:merge(
|
||||||
server_udp_opts(),
|
udp_opts(),
|
||||||
#{
|
#{
|
||||||
send_timeout => 15000,
|
send_timeout => 15000,
|
||||||
send_timeout_close => true,
|
send_timeout_close => true,
|
||||||
|
@ -661,15 +659,17 @@ server_tcp_opts() ->
|
||||||
}
|
}
|
||||||
).
|
).
|
||||||
|
|
||||||
server_udp_opts() ->
|
udp_opts() ->
|
||||||
#{
|
#{
|
||||||
recbuf => 1024,
|
%% NOTE
|
||||||
sndbuf => 1024,
|
%% Making those too small will lead to inability to accept connections.
|
||||||
buffer => 1024,
|
recbuf => 2048,
|
||||||
|
sndbuf => 2048,
|
||||||
|
buffer => 2048,
|
||||||
reuseaddr => true
|
reuseaddr => true
|
||||||
}.
|
}.
|
||||||
|
|
||||||
server_ssl_opts() ->
|
ssl_opts() ->
|
||||||
Certs = certs("key.pem", "cert.pem", "cacert.pem"),
|
Certs = certs("key.pem", "cert.pem", "cacert.pem"),
|
||||||
maps:merge(
|
maps:merge(
|
||||||
Certs,
|
Certs,
|
||||||
|
@ -684,8 +684,8 @@ server_ssl_opts() ->
|
||||||
}
|
}
|
||||||
).
|
).
|
||||||
|
|
||||||
server_dtls_opts() ->
|
dtls_opts() ->
|
||||||
maps:merge(server_ssl_opts(), #{versions => ['dtlsv1.2', 'dtlsv1']}).
|
maps:merge(ssl_opts(), #{versions => ['dtlsv1.2', 'dtlsv1']}).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Client-Opts
|
%% Client-Opts
|
||||||
|
|
|
@ -66,7 +66,6 @@
|
||||||
-elvis([{elvis_style, dont_repeat_yourself, disable}]).
|
-elvis([{elvis_style, dont_repeat_yourself, disable}]).
|
||||||
|
|
||||||
-define(CONF_DEFAULT, <<
|
-define(CONF_DEFAULT, <<
|
||||||
"\n"
|
|
||||||
"gateway.mqttsn {\n"
|
"gateway.mqttsn {\n"
|
||||||
" gateway_id = 1\n"
|
" gateway_id = 1\n"
|
||||||
" broadcast = true\n"
|
" broadcast = true\n"
|
||||||
|
@ -89,6 +88,20 @@
|
||||||
"}\n"
|
"}\n"
|
||||||
>>).
|
>>).
|
||||||
|
|
||||||
|
-define(CONF_DTLS, <<
|
||||||
|
"\n"
|
||||||
|
"gateway.mqttsn {"
|
||||||
|
" listeners.dtls.default {\n"
|
||||||
|
" bind = 1885\n"
|
||||||
|
" dtls_options {\n"
|
||||||
|
" cacertfile = \"${cacertfile}\"\n"
|
||||||
|
" certfile = \"${certfile}\"\n"
|
||||||
|
" keyfile = \"${keyfile}\"\n"
|
||||||
|
" }\n"
|
||||||
|
" }\n"
|
||||||
|
"}\n"
|
||||||
|
>>).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Setups
|
%% Setups
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -97,9 +110,22 @@ all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
|
PrivDir = ?config(priv_dir, Config),
|
||||||
|
Root = emqx_cth_tls:gen_cert(#{key => ec, issuer => root}),
|
||||||
|
Server = emqx_cth_tls:gen_cert(#{key => ec, issuer => Root}),
|
||||||
|
{CACertfile, _} = emqx_cth_tls:write_cert(PrivDir, Root),
|
||||||
|
{Certfile, Keyfile} = emqx_cth_tls:write_cert(PrivDir, Server),
|
||||||
|
Conf = emqx_template:render_strict(
|
||||||
|
emqx_template:parse([?CONF_DEFAULT, ?CONF_DTLS]),
|
||||||
|
#{
|
||||||
|
cacertfile => CACertfile,
|
||||||
|
certfile => Certfile,
|
||||||
|
keyfile => Keyfile
|
||||||
|
}
|
||||||
|
),
|
||||||
Apps = emqx_cth_suite:start(
|
Apps = emqx_cth_suite:start(
|
||||||
[
|
[
|
||||||
{emqx_conf, ?CONF_DEFAULT},
|
{emqx_conf, Conf},
|
||||||
emqx_gateway,
|
emqx_gateway,
|
||||||
emqx_auth,
|
emqx_auth,
|
||||||
emqx_management,
|
emqx_management,
|
||||||
|
@ -108,7 +134,7 @@ init_per_suite(Config) ->
|
||||||
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
),
|
),
|
||||||
emqx_common_test_http:create_default_app(),
|
emqx_common_test_http:create_default_app(),
|
||||||
[{suite_apps, Apps} | Config].
|
[{suite_apps, Apps}, {cacertfile, CACertfile} | Config].
|
||||||
|
|
||||||
end_per_suite(Config) ->
|
end_per_suite(Config) ->
|
||||||
{ok, _} = emqx:remove_config([gateway, mqttsn]),
|
{ok, _} = emqx:remove_config([gateway, mqttsn]),
|
||||||
|
@ -191,6 +217,25 @@ t_first_disconnect(_) ->
|
||||||
?assertEqual(<<2, ?SN_DISCONNECT>>, receive_response(Socket)),
|
?assertEqual(<<2, ?SN_DISCONNECT>>, receive_response(Socket)),
|
||||||
gen_udp:close(Socket).
|
gen_udp:close(Socket).
|
||||||
|
|
||||||
|
t_connect_dtls(Config) ->
|
||||||
|
SockName = {'mqttsn:dtls:default', 1885},
|
||||||
|
?assertEqual(true, lists:keymember(SockName, 1, esockd:listeners())),
|
||||||
|
|
||||||
|
ClientOpts = [
|
||||||
|
binary,
|
||||||
|
{active, false},
|
||||||
|
{protocol, dtls},
|
||||||
|
{cacertfile, ?config(cacertfile, Config)}
|
||||||
|
| emqx_common_test_helpers:ssl_verify_fun_allow_any_host()
|
||||||
|
],
|
||||||
|
{ok, Socket} = ssl:connect(?HOST, 1885, ClientOpts, 1000),
|
||||||
|
ok = ssl:send(Socket, make_connect_msg(<<"client_id_test1">>, 1)),
|
||||||
|
?assertEqual({ok, <<3, ?SN_CONNACK, 0>>}, ssl:recv(Socket, 0, 1000)),
|
||||||
|
|
||||||
|
ok = ssl:send(Socket, make_disconnect_msg(undefined)),
|
||||||
|
?assertEqual({ok, <<2, ?SN_DISCONNECT>>}, ssl:recv(Socket, 0, 1000)),
|
||||||
|
ssl:close(Socket).
|
||||||
|
|
||||||
t_subscribe(_) ->
|
t_subscribe(_) ->
|
||||||
Dup = 0,
|
Dup = 0,
|
||||||
QoS = 0,
|
QoS = 0,
|
||||||
|
@ -2444,10 +2489,7 @@ send_searchgw_msg(Socket) ->
|
||||||
Radius = 0,
|
Radius = 0,
|
||||||
ok = gen_udp:send(Socket, ?HOST, ?PORT, <<Length:8, MsgType:8, Radius:8>>).
|
ok = gen_udp:send(Socket, ?HOST, ?PORT, <<Length:8, MsgType:8, Radius:8>>).
|
||||||
|
|
||||||
send_connect_msg(Socket, ClientId) ->
|
make_connect_msg(ClientId, CleanSession) when
|
||||||
send_connect_msg(Socket, ClientId, 1).
|
|
||||||
|
|
||||||
send_connect_msg(Socket, ClientId, CleanSession) when
|
|
||||||
CleanSession == 0;
|
CleanSession == 0;
|
||||||
CleanSession == 1
|
CleanSession == 1
|
||||||
->
|
->
|
||||||
|
@ -2460,9 +2502,14 @@ send_connect_msg(Socket, ClientId, CleanSession) when
|
||||||
TopicIdType = 0,
|
TopicIdType = 0,
|
||||||
ProtocolId = 1,
|
ProtocolId = 1,
|
||||||
Duration = 10,
|
Duration = 10,
|
||||||
Packet =
|
<<Length:8, MsgType:8, Dup:1, QoS:2, Retain:1, Will:1, CleanSession:1, TopicIdType:2,
|
||||||
<<Length:8, MsgType:8, Dup:1, QoS:2, Retain:1, Will:1, CleanSession:1, TopicIdType:2,
|
ProtocolId:8, Duration:16, ClientId/binary>>.
|
||||||
ProtocolId:8, Duration:16, ClientId/binary>>,
|
|
||||||
|
send_connect_msg(Socket, ClientId) ->
|
||||||
|
send_connect_msg(Socket, ClientId, 1).
|
||||||
|
|
||||||
|
send_connect_msg(Socket, ClientId, CleanSession) ->
|
||||||
|
Packet = make_connect_msg(ClientId, CleanSession),
|
||||||
ok = gen_udp:send(Socket, ?HOST, ?PORT, Packet).
|
ok = gen_udp:send(Socket, ?HOST, ?PORT, Packet).
|
||||||
|
|
||||||
send_connect_msg_with_will(Socket, Duration, ClientId) ->
|
send_connect_msg_with_will(Socket, Duration, ClientId) ->
|
||||||
|
@ -2724,15 +2771,17 @@ send_pingreq_msg(Socket, ClientId) ->
|
||||||
?LOG("send_pingreq_msg ClientId=~p", [ClientId]),
|
?LOG("send_pingreq_msg ClientId=~p", [ClientId]),
|
||||||
ok = gen_udp:send(Socket, ?HOST, ?PORT, PingReqPacket).
|
ok = gen_udp:send(Socket, ?HOST, ?PORT, PingReqPacket).
|
||||||
|
|
||||||
send_disconnect_msg(Socket, Duration) ->
|
make_disconnect_msg(Duration) ->
|
||||||
Length = 2,
|
Length = 2,
|
||||||
Length2 = 4,
|
Length2 = 4,
|
||||||
MsgType = ?SN_DISCONNECT,
|
MsgType = ?SN_DISCONNECT,
|
||||||
DisConnectPacket =
|
case Duration of
|
||||||
case Duration of
|
undefined -> <<Length:8, MsgType:8>>;
|
||||||
undefined -> <<Length:8, MsgType:8>>;
|
Other -> <<Length2:8, MsgType:8, Other:16>>
|
||||||
Other -> <<Length2:8, MsgType:8, Other:16>>
|
end.
|
||||||
end,
|
|
||||||
|
send_disconnect_msg(Socket, Duration) ->
|
||||||
|
DisConnectPacket = make_disconnect_msg(Duration),
|
||||||
?LOG("send_disconnect_msg Duration=~p", [Duration]),
|
?LOG("send_disconnect_msg Duration=~p", [Duration]),
|
||||||
ok = gen_udp:send(Socket, ?HOST, ?PORT, DisConnectPacket).
|
ok = gen_udp:send(Socket, ?HOST, ?PORT, DisConnectPacket).
|
||||||
|
|
||||||
|
|
|
@ -186,10 +186,10 @@ info(timers, #channel{timers = Timers}) ->
|
||||||
|
|
||||||
-spec stats(channel()) -> emqx_types:stats().
|
-spec stats(channel()) -> emqx_types:stats().
|
||||||
stats(#channel{mqueue = MQueue}) ->
|
stats(#channel{mqueue = MQueue}) ->
|
||||||
%% XXX:
|
%% XXX: A fake stats for managed by emqx_management
|
||||||
SessionStats = [
|
SessionStats = [
|
||||||
{subscriptions_cnt, 0},
|
{subscriptions_cnt, 1},
|
||||||
{subscriptions_max, 0},
|
{subscriptions_max, 1},
|
||||||
{inflight_cnt, 0},
|
{inflight_cnt, 0},
|
||||||
{inflight_max, 0},
|
{inflight_max, 0},
|
||||||
{mqueue_len, queue:len(MQueue)},
|
{mqueue_len, queue:len(MQueue)},
|
||||||
|
@ -524,9 +524,13 @@ handle_out(Type, Data, Channel) ->
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
apply_frame(Frames, Channel) when is_list(Frames) ->
|
apply_frame(Frames, Channel) when is_list(Frames) ->
|
||||||
{Outgoings, NChannel} = lists:foldl(fun apply_frame/2, {[], Channel}, Frames),
|
{Outgoings, NChannel} = lists:foldl(fun do_apply_frame/2, {[], Channel}, Frames),
|
||||||
{lists:reverse(Outgoings), NChannel};
|
{lists:reverse(Outgoings), NChannel};
|
||||||
apply_frame(?IS_BootNotification_RESP(Payload), {Outgoings, Channel}) ->
|
apply_frame(Frames, Channel) ->
|
||||||
|
?SLOG(error, #{msg => "unexpected_frame_list", frames => Frames, channel => Channel}),
|
||||||
|
Channel.
|
||||||
|
|
||||||
|
do_apply_frame(?IS_BootNotification_RESP(Payload), {Outgoings, Channel}) ->
|
||||||
case maps:get(<<"status">>, Payload) of
|
case maps:get(<<"status">>, Payload) of
|
||||||
<<"Accepted">> ->
|
<<"Accepted">> ->
|
||||||
Intv = maps:get(<<"interval">>, Payload),
|
Intv = maps:get(<<"interval">>, Payload),
|
||||||
|
@ -535,8 +539,9 @@ apply_frame(?IS_BootNotification_RESP(Payload), {Outgoings, Channel}) ->
|
||||||
_ ->
|
_ ->
|
||||||
{Outgoings, Channel}
|
{Outgoings, Channel}
|
||||||
end;
|
end;
|
||||||
apply_frame(_, Channel) ->
|
do_apply_frame(Frame, Acc = {_Outgoings, Channel}) ->
|
||||||
Channel.
|
?SLOG(error, #{msg => "unexpected_frame", frame => Frame, channel => Channel}),
|
||||||
|
Acc.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Handle call
|
%% Handle call
|
||||||
|
|
|
@ -33,27 +33,27 @@
|
||||||
|
|
||||||
-define(HEARTBEAT, <<$\n>>).
|
-define(HEARTBEAT, <<$\n>>).
|
||||||
|
|
||||||
-define(CONF_DEFAULT, <<
|
%% erlfmt-ignore
|
||||||
"\n"
|
-define(CONF_DEFAULT, <<"
|
||||||
"gateway.ocpp {\n"
|
gateway.ocpp {
|
||||||
" mountpoint = \"ocpp/\"\n"
|
mountpoint = \"ocpp/\"
|
||||||
" default_heartbeat_interval = \"60s\"\n"
|
default_heartbeat_interval = \"60s\"
|
||||||
" heartbeat_checking_times_backoff = 1\n"
|
heartbeat_checking_times_backoff = 1
|
||||||
" message_format_checking = disable\n"
|
message_format_checking = disable
|
||||||
" upstream {\n"
|
upstream {
|
||||||
" topic = \"cp/${clientid}\"\n"
|
topic = \"cp/${clientid}\"
|
||||||
" reply_topic = \"cp/${clientid}/Reply\"\n"
|
reply_topic = \"cp/${clientid}/Reply\"
|
||||||
" error_topic = \"cp/${clientid}/Reply\"\n"
|
error_topic = \"cp/${clientid}/Reply\"
|
||||||
" }\n"
|
}
|
||||||
" dnstream {\n"
|
dnstream {
|
||||||
" topic = \"cs/${clientid}\"\n"
|
topic = \"cs/${clientid}\"
|
||||||
" }\n"
|
}
|
||||||
" listeners.ws.default {\n"
|
listeners.ws.default {
|
||||||
" bind = \"0.0.0.0:33033\"\n"
|
bind = \"0.0.0.0:33033\"
|
||||||
" websocket.path = \"/ocpp\"\n"
|
websocket.path = \"/ocpp\"
|
||||||
" }\n"
|
}
|
||||||
"}\n"
|
}
|
||||||
>>).
|
">>).
|
||||||
|
|
||||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
|
|
|
@ -356,10 +356,13 @@ configs(put, #{body := Conf, query_string := #{<<"mode">> := Mode}}, _Req) ->
|
||||||
case emqx_conf_cli:load_config(Conf, #{mode => Mode, log => none}) of
|
case emqx_conf_cli:load_config(Conf, #{mode => Mode, log => none}) of
|
||||||
ok ->
|
ok ->
|
||||||
{200};
|
{200};
|
||||||
{error, MsgList} ->
|
%% bad hocon format
|
||||||
|
{error, MsgList = [{_, _} | _]} ->
|
||||||
JsonFun = fun(K, V) -> {K, emqx_utils_maps:binary_string(V)} end,
|
JsonFun = fun(K, V) -> {K, emqx_utils_maps:binary_string(V)} end,
|
||||||
JsonMap = emqx_utils_maps:jsonable_map(maps:from_list(MsgList), JsonFun),
|
JsonMap = emqx_utils_maps:jsonable_map(maps:from_list(MsgList), JsonFun),
|
||||||
{400, #{<<"content-type">> => <<"text/plain">>}, JsonMap}
|
{400, #{<<"content-type">> => <<"text/plain">>}, JsonMap};
|
||||||
|
{error, Msg} ->
|
||||||
|
{400, #{<<"content-type">> => <<"text/plain">>}, Msg}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
find_suitable_accept(Headers, Preferences) when is_list(Preferences), length(Preferences) > 0 ->
|
find_suitable_accept(Headers, Preferences) when is_list(Preferences), length(Preferences) > 0 ->
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
-compile(nowarn_export_all).
|
-compile(nowarn_export_all).
|
||||||
|
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
|
||||||
all() ->
|
all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
@ -32,11 +33,15 @@ end_per_suite(_) ->
|
||||||
|
|
||||||
init_per_testcase(TestCase = t_configs_node, Config) ->
|
init_per_testcase(TestCase = t_configs_node, Config) ->
|
||||||
?MODULE:TestCase({'init', Config});
|
?MODULE:TestCase({'init', Config});
|
||||||
|
init_per_testcase(TestCase = t_create_webhook_v1_bridges_api, Config) ->
|
||||||
|
?MODULE:TestCase({'init', Config});
|
||||||
init_per_testcase(_TestCase, Config) ->
|
init_per_testcase(_TestCase, Config) ->
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_testcase(TestCase = t_configs_node, Config) ->
|
end_per_testcase(TestCase = t_configs_node, Config) ->
|
||||||
?MODULE:TestCase({'end', Config});
|
?MODULE:TestCase({'end', Config});
|
||||||
|
end_per_testcase(TestCase = t_create_webhook_v1_bridges_api, Config) ->
|
||||||
|
?MODULE:TestCase({'end', Config});
|
||||||
end_per_testcase(_TestCase, Config) ->
|
end_per_testcase(_TestCase, Config) ->
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
|
@ -372,6 +377,105 @@ t_get_configs_in_different_accept(_Config) ->
|
||||||
%% returns error if it set to other type
|
%% returns error if it set to other type
|
||||||
?assertMatch({400, "application/json", _}, Request(<<"application/xml">>)).
|
?assertMatch({400, "application/json", _}, Request(<<"application/xml">>)).
|
||||||
|
|
||||||
|
t_create_webhook_v1_bridges_api({'init', Config}) ->
|
||||||
|
lists:foreach(
|
||||||
|
fun(App) ->
|
||||||
|
_ = application:stop(App),
|
||||||
|
{ok, [App]} = application:ensure_all_started(App)
|
||||||
|
end,
|
||||||
|
[emqx_connector, emqx_bridge]
|
||||||
|
),
|
||||||
|
Config;
|
||||||
|
t_create_webhook_v1_bridges_api({'end', _}) ->
|
||||||
|
application:stop(emqx_bridge),
|
||||||
|
application:stop(emqx_connector),
|
||||||
|
ok;
|
||||||
|
t_create_webhook_v1_bridges_api(Config) ->
|
||||||
|
WebHookFile = filename:join(?config(data_dir, Config), "webhook_v1.conf"),
|
||||||
|
?assertMatch({ok, _}, hocon:files([WebHookFile])),
|
||||||
|
{ok, WebHookBin} = file:read_file(WebHookFile),
|
||||||
|
?assertEqual([], update_configs_with_binary(WebHookBin)),
|
||||||
|
Actions =
|
||||||
|
#{
|
||||||
|
<<"http">> =>
|
||||||
|
#{
|
||||||
|
<<"webhook_name">> =>
|
||||||
|
#{
|
||||||
|
<<"connector">> => <<"connector_webhook_name">>,
|
||||||
|
<<"description">> => <<>>,
|
||||||
|
<<"enable">> => true,
|
||||||
|
<<"parameters">> =>
|
||||||
|
#{
|
||||||
|
<<"body">> => <<"{\"value\": \"${value}\"}">>,
|
||||||
|
<<"headers">> => #{},
|
||||||
|
<<"max_retries">> => 3,
|
||||||
|
<<"method">> => <<"post">>,
|
||||||
|
<<"path">> => <<>>
|
||||||
|
},
|
||||||
|
<<"resource_opts">> =>
|
||||||
|
#{
|
||||||
|
<<"health_check_interval">> => <<"15s">>,
|
||||||
|
<<"inflight_window">> => 100,
|
||||||
|
<<"max_buffer_bytes">> => <<"256MB">>,
|
||||||
|
<<"query_mode">> => <<"async">>,
|
||||||
|
<<"request_ttl">> => <<"45s">>,
|
||||||
|
<<"worker_pool_size">> => 4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
?assertEqual(Actions, emqx_conf:get_raw([<<"actions">>])),
|
||||||
|
Connectors =
|
||||||
|
#{
|
||||||
|
<<"http">> =>
|
||||||
|
#{
|
||||||
|
<<"connector_webhook_name">> =>
|
||||||
|
#{
|
||||||
|
<<"connect_timeout">> => <<"15s">>,
|
||||||
|
<<"description">> => <<>>,
|
||||||
|
<<"enable">> => true,
|
||||||
|
<<"enable_pipelining">> => 100,
|
||||||
|
<<"headers">> =>
|
||||||
|
#{
|
||||||
|
<<"Authorization">> => <<"Bearer redacted">>,
|
||||||
|
<<"content-type">> => <<"application/json">>
|
||||||
|
},
|
||||||
|
<<"pool_size">> => 4,
|
||||||
|
<<"pool_type">> => <<"random">>,
|
||||||
|
<<"resource_opts">> =>
|
||||||
|
#{
|
||||||
|
<<"health_check_interval">> => <<"15s">>,
|
||||||
|
<<"start_after_created">> => true,
|
||||||
|
<<"start_timeout">> => <<"5s">>
|
||||||
|
},
|
||||||
|
<<"ssl">> =>
|
||||||
|
#{
|
||||||
|
<<"ciphers">> => [],
|
||||||
|
<<"depth">> => 10,
|
||||||
|
<<"enable">> => true,
|
||||||
|
<<"hibernate_after">> => <<"5s">>,
|
||||||
|
<<"log_level">> => <<"notice">>,
|
||||||
|
<<"reuse_sessions">> => true,
|
||||||
|
<<"secure_renegotiate">> => true,
|
||||||
|
<<"user_lookup_fun">> =>
|
||||||
|
<<"emqx_tls_psk:lookup">>,
|
||||||
|
<<"verify">> => <<"verify_none">>,
|
||||||
|
<<"versions">> =>
|
||||||
|
[
|
||||||
|
<<"tlsv1.3">>,
|
||||||
|
<<"tlsv1.2">>,
|
||||||
|
<<"tlsv1.1">>,
|
||||||
|
<<"tlsv1">>
|
||||||
|
]
|
||||||
|
},
|
||||||
|
<<"url">> => <<"https://127.0.0.1:18083">>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
?assertEqual(Connectors, emqx_conf:get_raw([<<"connectors">>])),
|
||||||
|
?assertEqual(#{<<"webhook">> => #{}}, emqx_conf:get_raw([<<"bridges">>])),
|
||||||
|
ok.
|
||||||
|
|
||||||
%% Helpers
|
%% Helpers
|
||||||
|
|
||||||
get_config(Name) ->
|
get_config(Name) ->
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
bridges {
|
||||||
|
webhook {
|
||||||
|
webhook_name {
|
||||||
|
body = "{\"value\": \"${value}\"}"
|
||||||
|
connect_timeout = "15s"
|
||||||
|
enable = true
|
||||||
|
enable_pipelining = 100
|
||||||
|
headers {Authorization = "Bearer redacted", "content-type" = "application/json"}
|
||||||
|
max_retries = 3
|
||||||
|
method = "post"
|
||||||
|
pool_size = 4
|
||||||
|
pool_type = "random"
|
||||||
|
request_timeout = "15s"
|
||||||
|
resource_opts {
|
||||||
|
async_inflight_window = 100
|
||||||
|
auto_restart_interval = "60s"
|
||||||
|
enable_queue = false
|
||||||
|
health_check_interval = "15s"
|
||||||
|
max_queue_bytes = "1GB"
|
||||||
|
query_mode = "async"
|
||||||
|
worker_pool_size = 4
|
||||||
|
}
|
||||||
|
ssl {
|
||||||
|
ciphers = []
|
||||||
|
depth = 10
|
||||||
|
enable = true
|
||||||
|
reuse_sessions = true
|
||||||
|
secure_renegotiate = true
|
||||||
|
user_lookup_fun = "emqx_tls_psk:lookup"
|
||||||
|
verify = "verify_none"
|
||||||
|
versions = ["tlsv1.3", "tlsv1.2", "tlsv1.1", "tlsv1"]
|
||||||
|
}
|
||||||
|
url = "https://127.0.0.1:18083"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -621,24 +621,36 @@ validate_bridge_existence_in_actions(#{actions := Actions, from := Froms} = _Rul
|
||||||
BridgeIDs0 =
|
BridgeIDs0 =
|
||||||
lists:map(
|
lists:map(
|
||||||
fun(BridgeID) ->
|
fun(BridgeID) ->
|
||||||
emqx_bridge_resource:parse_bridge_id(BridgeID, #{atom_name => false})
|
%% FIXME: this supposedly returns an upgraded type, but it's fuzzy: it
|
||||||
|
%% returns v1 types when attempting to "upgrade".....
|
||||||
|
{Type, Name} =
|
||||||
|
emqx_bridge_resource:parse_bridge_id(BridgeID, #{atom_name => false}),
|
||||||
|
case emqx_action_info:is_action_type(Type) of
|
||||||
|
true -> {action, Type, Name};
|
||||||
|
false -> {bridge_v1, Type, Name}
|
||||||
|
end
|
||||||
end,
|
end,
|
||||||
get_referenced_hookpoints(Froms)
|
get_referenced_hookpoints(Froms)
|
||||||
),
|
),
|
||||||
BridgeIDs1 =
|
BridgeIDs1 =
|
||||||
lists:filtermap(
|
lists:filtermap(
|
||||||
fun
|
fun
|
||||||
({bridge_v2, Type, Name}) -> {true, {Type, Name}};
|
({bridge_v2, Type, Name}) -> {true, {action, Type, Name}};
|
||||||
({bridge, Type, Name, _ResId}) -> {true, {Type, Name}};
|
({bridge, Type, Name, _ResId}) -> {true, {bridge_v1, Type, Name}};
|
||||||
(_) -> false
|
(_) -> false
|
||||||
end,
|
end,
|
||||||
Actions
|
Actions
|
||||||
),
|
),
|
||||||
NonExistentBridgeIDs =
|
NonExistentBridgeIDs =
|
||||||
lists:filter(
|
lists:filter(
|
||||||
fun({Type, Name}) ->
|
fun({Kind, Type, Name}) ->
|
||||||
|
LookupFn =
|
||||||
|
case Kind of
|
||||||
|
action -> fun emqx_bridge_v2:lookup/2;
|
||||||
|
bridge_v1 -> fun emqx_bridge:lookup/2
|
||||||
|
end,
|
||||||
try
|
try
|
||||||
case emqx_bridge:lookup(Type, Name) of
|
case LookupFn(Type, Name) of
|
||||||
{ok, _} -> false;
|
{ok, _} -> false;
|
||||||
{error, _} -> true
|
{error, _} -> true
|
||||||
end
|
end
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
Support hot update of TCP/SSL/WS/WSS MQTT listeners configuration, which allows changing most of the configuration parameters without restarting the listener and disconnecting the clients.
|
||||||
|
|
||||||
|
In case of TCP/SSL listeners, changes to the following parameters still require full listener restart:
|
||||||
|
* `bind`
|
||||||
|
* `tcp_options.backlog`
|
||||||
|
|
||||||
|
In case of WS/WSS listeners, any parameter can be freely changed without losing the connected clients. However, changing transport related parameters will cause listening socket to be re-opened, namely:
|
||||||
|
* `bind`
|
||||||
|
* `tcp_options.*`
|
||||||
|
* `ssl_options.*`
|
|
@ -1,7 +1,9 @@
|
||||||
Updated `gen_rpc` library to version 3.3.0. The new version includes
|
Updated `gen_rpc` library to version 3.3.1. The new version includes
|
||||||
several performance improvements:
|
several performance improvements:
|
||||||
|
|
||||||
- Avoid allocating extra memory for the packets before they are sent
|
- Avoid allocating extra memory for the packets before they are sent
|
||||||
to the wire in some cases
|
to the wire in some cases
|
||||||
|
|
||||||
- Bypass network for the local calls
|
- Bypass network for the local calls
|
||||||
|
|
||||||
|
- Avoid senstive data leaking in debug logs [#12202](https://github.com/emqx/emqx/pull/12202)
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
Fix an issue where DTLS enabled MQTT-SN gateways could not be started, caused by incompatibility of default listener configuration with the DTLS implementation.
|
|
@ -0,0 +1,120 @@
|
||||||
|
# e5.4.0
|
||||||
|
|
||||||
|
## Breaking Changes
|
||||||
|
|
||||||
|
- [#11994](https://github.com/emqx/emqx/pull/11994) Stop releasing packages for Windows.
|
||||||
|
|
||||||
|
- [#11998](https://github.com/emqx/emqx/pull/11998) Stop releasing packages for MacOS 11 (BigSur).
|
||||||
|
|
||||||
|
- [#12112](https://github.com/emqx/emqx/pull/12112) Stop supporting UDP multicast based clustering strategy.
|
||||||
|
|
||||||
|
- [#10976](https://github.com/emqx/emqx/pull/10976) Fix topic-filter overlapping handling in shared subscription.
|
||||||
|
* Hook callback `session.subscribed` and `client.subscribe` will now receive shared subscription in its full representation, e.g. `$share/group1/topic1/#`, and the `share` property is deleted from `subopts`.
|
||||||
|
* Hook callback `session.unsubscribed` and `client.unsubscribe` will now receive shared subscription in its full representation, e.g. `$share/group1/topic1/#` instead of just `topic1/#`.
|
||||||
|
* ExHook Proto changed. The `share` field in message `SubOpts` was deprecated.
|
||||||
|
ExHook Server will now receive shared subscription in its full representation, e.g. `$share/group1/topic1/#`, and the `share` property is deleted from message `SubOpts`.
|
||||||
|
* `session.subscribed` and `session.unsubscribed` rule-engine events will have shared subscriptions in their full representation for `topic`, e.g. `$share/group1/topic1/#` instead of just `topic1/#`.
|
||||||
|
|
||||||
|
## Enhancements
|
||||||
|
|
||||||
|
- [#11884](https://github.com/emqx/emqx/pull/11884) Modified the Prometheus API and configuration to implement the following improvements:
|
||||||
|
|
||||||
|
- Restructured configuration sections to group related settings, improving readability and maintainability.
|
||||||
|
- Introduced `enable_basic_auth` configuration for basic authentication on the scrape API endpoint, enhancing security.
|
||||||
|
- Maintained backwards compatibility while refactoring code, avoiding breaking changes.
|
||||||
|
|
||||||
|
- [#11896](https://github.com/emqx/emqx/pull/11896) Introduced an enhancement for configuring sensitive authentication fields in bridges, such as passwords, tokens, and secret keys. This improvement allows the use of secrets stored as files in the file system. These secrets can be securely referenced in configuration files using the special `file://` prefix, enhancing the security of sensitive data handling in bridge configurations.
|
||||||
|
|
||||||
|
- [#11921](https://github.com/emqx/emqx/pull/11921) Introduced Open Telemetry Logs Handler that allows to format log events in alignment with the Open Telemetry log data model. This handler facilitates the exportation of formatted log events to a configured Open Telemetry collector or back-end, thereby enhancing log management and integration capabilities.
|
||||||
|
|
||||||
|
- [#11935](https://github.com/emqx/emqx/pull/11935) Switched to the new `v2` routing store schema by default. New schema improves both subscription and routing performance, especially in scenarios with concurrent subscriptions to topic filters sharing common wildcard prefixes. However, it does come with a minor increase in memory usage. This schema also eliminates the need for a separate index, thus inconsistencies in the routing state rarely encountered in previous versions should no longer be possible.
|
||||||
|
|
||||||
|
If a cluster is rolling upgraded from older version, the cluster will continue to use `v1` store until a full cluster (non-rolling) restart happens.
|
||||||
|
|
||||||
|
Users can still opt for the previous schema by configuring the `broker.routing.storage_schema` option to `v1`. However, this also requires a complete, non-rolling restart of the cluster to take effect.
|
||||||
|
|
||||||
|
- [#11984](https://github.com/emqx/emqx/pull/11984) Implemented Open Telemetry distributed tracing feature.
|
||||||
|
|
||||||
|
- [#12017](https://github.com/emqx/emqx/pull/12017) Implemented a dedicated HTTP API for the import and export of configuration and user data.
|
||||||
|
|
||||||
|
- [#12040](https://github.com/emqx/emqx/pull/12040) Upgraded QUIC protocol stack.
|
||||||
|
|
||||||
|
- [#11766](https://github.com/emqx/emqx/pull/11766) Implemented a preliminary Role-Based Access Control for the REST API. In this version, there are three predefined roles:
|
||||||
|
- Administrator: This role can access all resources.
|
||||||
|
- Viewer: This role can only view resources and data, corresponding to all GET requests in the REST API.
|
||||||
|
- Publisher: Specifically tailored for MQTT message publishing, this role is confined to accessing endpoints related to message publication.
|
||||||
|
|
||||||
|
- [#12201](https://github.com/emqx/emqx/pull/11994) Support hot update of TCP/SSL/WS/WSS MQTT listeners configuration.
|
||||||
|
This allows changing most of the configuration parameters without restarting the listener and disconnecting the clients. The limitations are:
|
||||||
|
- For TCP/SSL listeners, changes to the following parameters still require listener restart and clients reconnect:
|
||||||
|
* `bind`
|
||||||
|
* `tcp_options.backlog`
|
||||||
|
- For WS/WSS (WebSocket) listeners, changing transport related parameters (listed below) will cause listening socket to be re-opened, but established connections will stay uninterrupted.
|
||||||
|
* `bind`
|
||||||
|
* `tcp_options.*`
|
||||||
|
* `ssl_options.*`
|
||||||
|
|
||||||
|
- [#11608](https://github.com/emqx/emqx/pull/11608) Integrated LDAP bind operation as a new authenticator, providing a more flexible and secure method for user authentication.
|
||||||
|
|
||||||
|
- [#11773](https://github.com/emqx/emqx/pull/11773) Implemented Dashboard support for audit log management. Users can utilize this page to view all change operations performed on EMQX devices and data, such as kicking out devices, creating/deleting rules, etc.
|
||||||
|
|
||||||
|
- [#11778](https://github.com/emqx/emqx/pull/11778) Integrated Microsoft Entra Identity (formerly known as Azure Active Directory) support into the SAML single sign-on (SSO) process.
|
||||||
|
|
||||||
|
|
||||||
|
- [#11811](https://github.com/emqx/emqx/pull/11811) Improved the format for the REST API key bootstrap file to support initializing key with a role.
|
||||||
|
|
||||||
|
The new form is:`api_key:api_secret:role`.
|
||||||
|
|
||||||
|
`role` is optional and its default value is `administrator`.
|
||||||
|
|
||||||
|
- [#11852](https://github.com/emqx/emqx/pull/11852) Introduced a new GB/T 32960 gateway, enabling vehicles to connect with EMQX via the GBT32960 vehicular networking protocol.
|
||||||
|
|
||||||
|
- [#11883](https://github.com/emqx/emqx/pull/11883) Introduced a new JT/T808 gateway, enabling vehicles to connect with EMQX via the JT/T 808 vehicular networking protocol.
|
||||||
|
|
||||||
|
- [#11885](https://github.com/emqx/emqx/pull/11885) Introduced a new OCPP gateway for Electric vehicle (EV) charging stations to access EMQX through the OCPP (Open Charge Point Protocol).
|
||||||
|
|
||||||
|
- [#11971](https://github.com/emqx/emqx/pull/11971) Made `/api/v5/load_rebalance/availability_check` public, meaning it no longer requires authentication. This change simplifies the setup of load balancers.
|
||||||
|
|
||||||
|
It improved the gracefulness of the rebalance/evacuation process during the wait health check phase. The connections to nodes marked for eviction are now not prohibited during this phase.
|
||||||
|
During this phase it is unknown whether these nodes are all marked unhealthy by the load balancer, so prohibiting connections to them may cause multiple unsuccessful reconnection attempts.
|
||||||
|
|
||||||
|
- [#12013](https://github.com/emqx/emqx/pull/12013) The data bridging design has been adjusted to split it into connectors and actions (Sinks). Connectors are used to manage the integration of data with external systems and can be reused across multiple actions, while actions are used to configure how data is processed. This design provides greater flexibility and scalability, resulting in clearer data integration configuration and management.
|
||||||
|
|
||||||
|
The adjusted data bridges includes PostgreSQL, Timescale, and Matrix, which have now been split into connectors and actions APIs, but they remain backward compatible with the old data bridge API.
|
||||||
|
|
||||||
|
- [#12016](https://github.com/emqx/emqx/pull/12016) Enhanced license key management.
|
||||||
|
|
||||||
|
EMQX can now load the license key from a specified file. This is enabled by setting the `license.key` configuration to a file path, which should be prefixed with `"file://"`.
|
||||||
|
Also added the ability to revert to the default trial license by setting `license.key = default`. This option simplifies the process of returning to the trial license if needed.
|
||||||
|
|
||||||
|
- [#12129](https://github.com/emqx/emqx/pull/12129) Default license renewal. Replaced old license issued in Jan 2023. New license supports up to 25 concurrent connections.
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- [#10976](https://github.com/emqx/emqx/pull/10976) Fixed topic-filter overlapping handling in shared subscription.
|
||||||
|
In the previous implementation, the storage method for subscription options did not provide adequate support for shared subscriptions. This resulted in message routing failures and leakage of routing tables between nodes during the "subscribe-unsubscribe" process with specific order and topics.
|
||||||
|
|
||||||
|
- [#12048](https://github.com/emqx/emqx/pull/12048) Fixed COAP gateway bug that caused it to ignore subscription options.
|
||||||
|
|
||||||
|
- [#12078](https://github.com/emqx/emqx/pull/12078) Upgraded grpc-erl to 0.6.12. This update addresses a potential deadlock issue where the grpc client started dependent apps lazily.
|
||||||
|
|
||||||
|
- [#12081](https://github.com/emqx/emqx/pull/12081) Updated `gen_rpc` library to version 3.3.1. The new version includes several performance improvements:
|
||||||
|
|
||||||
|
- Avoiding allocating extra memory for the packets before they are sent to the wire in some cases.
|
||||||
|
|
||||||
|
- Bypassing network for the local calls.
|
||||||
|
|
||||||
|
- Avoid senstive data leaking in debug logs [#12202](https://github.com/emqx/emqx/pull/12202)
|
||||||
|
|
||||||
|
- [#12111](https://github.com/emqx/emqx/pull/12111) Fixed an issue when API tokens were sometimes unavailable immediately after login due to race condition.
|
||||||
|
|
||||||
|
- [#12121](https://github.com/emqx/emqx/pull/12121) Fixed an issue where nodes in the cluster would occasionally return a stale view when updating configurations on different nodes concurrently.
|
||||||
|
|
||||||
|
- [#12158](https://github.com/emqx/emqx/pull/12158) Fixed an issue when the rule engine cannot connect to Redis hosted by Upstash.
|
||||||
|
|
||||||
|
Before the fix, after establishing a TCP connection with the Redis service, the Redis driver of EMQX used [Inline Commands](https://redis.io/docs/reference/protocol-spec/#inline-commands) to send AUTH and SELECT commands. However, the `upstash` Redis service does not support Inline Commands, which causes the rule engine to fail to connect to the `upstash` Redis service.
|
||||||
|
After the fix, the Redis driver of EMQX uses RESP (REdis Serialization Protocol) to send AUTH and SELECT commands.
|
||||||
|
|
||||||
|
- [#12176](https://github.com/emqx/emqx/pull/12176) Always acknowledge `DISCONNECT` packet to MQTT-SN client regardless of whether the connection has been successfully established before.
|
||||||
|
|
||||||
|
- [#12180](https://github.com/emqx/emqx/pull/12180) Fix an issue where DTLS enabled MQTT-SN gateways could not be started, caused by incompatibility of default listener configuration with the DTLS implementation.
|
|
@ -0,0 +1,82 @@
|
||||||
|
# v5.4.0
|
||||||
|
|
||||||
|
## Breaking Changes
|
||||||
|
|
||||||
|
- [#11994](https://github.com/emqx/emqx/pull/11994) Stop releasing packages for Windows.
|
||||||
|
|
||||||
|
- [#11998](https://github.com/emqx/emqx/pull/11998) Stop releasing packages for MacOS 11 (BigSur).
|
||||||
|
|
||||||
|
- [#12112](https://github.com/emqx/emqx/pull/12112) Stop supporting UDP multicast based clustering strategy.
|
||||||
|
|
||||||
|
- [#10976](https://github.com/emqx/emqx/pull/10976) Fix topic-filter overlapping handling in shared subscription.
|
||||||
|
* Hook callback `session.subscribed` and `client.subscribe` will now receive shared subscription in its full representation, e.g. `$share/group1/topic1/#`, and the `share` property is deleted from `subopts`.
|
||||||
|
* Hook callback `session.unsubscribed` and `client.unsubscribe` will now receive shared subscription in its full representation, e.g. `$share/group1/topic1/#` instead of just `topic1/#`.
|
||||||
|
* ExHook Proto changed. The `share` field in message `SubOpts` was deprecated.
|
||||||
|
ExHook Server will now receive shared subscription in its full representation, e.g. `$share/group1/topic1/#`, and the `share` property is deleted from message `SubOpts`.
|
||||||
|
* `session.subscribed` and `session.unsubscribed` rule-engine events will have shared subscriptions in their full representation for `topic`, e.g. `$share/group1/topic1/#` instead of just `topic1/#`.
|
||||||
|
|
||||||
|
## Enhancements
|
||||||
|
|
||||||
|
- [#11884](https://github.com/emqx/emqx/pull/11884) Modified the Prometheus API and configuration to implement the following improvements:
|
||||||
|
|
||||||
|
- Restructured configuration sections to group related settings, improving readability and maintainability.
|
||||||
|
- Introduced `enable_basic_auth` configuration for basic authentication on the scrape API endpoint, enhancing security.
|
||||||
|
- Maintained backwards compatibility while refactoring code, avoiding breaking changes.
|
||||||
|
|
||||||
|
- [#11896](https://github.com/emqx/emqx/pull/11896) Introduced an enhancement for configuring sensitive authentication fields in bridges, such as passwords, tokens, and secret keys. This improvement allows the use of secrets stored as files in the file system. These secrets can be securely referenced in configuration files using the special `file://` prefix, enhancing the security of sensitive data handling in bridge configurations.
|
||||||
|
|
||||||
|
- [#11921](https://github.com/emqx/emqx/pull/11921) Introduced Open Telemetry Logs Handler that allows to format log events in alignment with the Open Telemetry log data model. This handler facilitates the exportation of formatted log events to a configured Open Telemetry collector or back-end, thereby enhancing log management and integration capabilities.
|
||||||
|
|
||||||
|
- [#11935](https://github.com/emqx/emqx/pull/11935) Switched to the new `v2` routing store schema by default. New schema improves both subscription and routing performance, especially in scenarios with concurrent subscriptions to topic filters sharing common wildcard prefixes. However, it does come with a minor increase in memory usage. This schema also eliminates the need for a separate index, thus inconsistencies in the routing state rarely encountered in previous versions should no longer be possible.
|
||||||
|
|
||||||
|
If a cluster is rolling upgraded from older version, the cluster will continue to use `v1` store until a full cluster (non-rolling) restart happens.
|
||||||
|
|
||||||
|
Users can still opt for the previous schema by configuring the `broker.routing.storage_schema` option to `v1`. However, this also requires a complete, non-rolling restart of the cluster to take effect.
|
||||||
|
|
||||||
|
- [#11984](https://github.com/emqx/emqx/pull/11984) Implemented Open Telemetry distributed tracing feature.
|
||||||
|
|
||||||
|
- [#12017](https://github.com/emqx/emqx/pull/12017) Implemented a dedicated HTTP API for the import and export of configuration and user data.
|
||||||
|
|
||||||
|
- [#12040](https://github.com/emqx/emqx/pull/12040) Upgraded QUIC protocol stack.
|
||||||
|
|
||||||
|
- [#11766](https://github.com/emqx/emqx/pull/11766) Implemented a preliminary Role-Based Access Control for the REST API. In this version, there are three predefined roles:
|
||||||
|
- Administrator: This role can access all resources.
|
||||||
|
- Viewer: This role can only view resources and data, corresponding to all GET requests in the REST API.
|
||||||
|
- Publisher: Specifically tailored for MQTT message publishing, this role is confined to accessing endpoints related to message publication.
|
||||||
|
|
||||||
|
- [#12201](https://github.com/emqx/emqx/pull/11994) Support hot update of TCP/SSL/WS/WSS MQTT listeners configuration.
|
||||||
|
This allows changing most of the configuration parameters without restarting the listener and disconnecting the clients. The limitations are:
|
||||||
|
- For TCP/SSL listeners, changes to the following parameters still require listener restart and clients reconnect:
|
||||||
|
* `bind`
|
||||||
|
* `tcp_options.backlog`
|
||||||
|
- For WS/WSS (WebSocket) listeners, changing transport related parameters (listed below) will cause listening socket to be re-opened, but established connections will stay uninterrupted.
|
||||||
|
* `bind`
|
||||||
|
* `tcp_options.*`
|
||||||
|
* `ssl_options.*`
|
||||||
|
|
||||||
|
## Bug Fixes
|
||||||
|
|
||||||
|
- [#12048](https://github.com/emqx/emqx/pull/12048) Fixed COAP gateway bug that caused it to ignore subscription options.
|
||||||
|
|
||||||
|
- [#12078](https://github.com/emqx/emqx/pull/12078) Upgraded grpc-erl to 0.6.12. This update addresses a potential deadlock issue where the grpc client started dependent apps lazily.
|
||||||
|
|
||||||
|
- [#12081](https://github.com/emqx/emqx/pull/12081) Updated `gen_rpc` library to version 3.3.1. The new version includes several performance improvements:
|
||||||
|
|
||||||
|
- Avoiding allocating extra memory for the packets before they are sent to the wire in some cases.
|
||||||
|
|
||||||
|
- Bypassing network for the local calls.
|
||||||
|
|
||||||
|
- Avoid senstive data leaking in debug logs [#12202](https://github.com/emqx/emqx/pull/12202)
|
||||||
|
|
||||||
|
- [#12111](https://github.com/emqx/emqx/pull/12111) Fixed an issue when API tokens were sometimes unavailable immediately after login due to race condition.
|
||||||
|
|
||||||
|
- [#12121](https://github.com/emqx/emqx/pull/12121) Fixed an issue where nodes in the cluster would occasionally return a stale view when updating configurations on different nodes concurrently.
|
||||||
|
|
||||||
|
- [#12158](https://github.com/emqx/emqx/pull/12158) Fixed an issue when the rule engine cannot connect to Redis hosted by Upstash.
|
||||||
|
|
||||||
|
Before the fix, after establishing a TCP connection with the Redis service, the Redis driver of EMQX used [Inline Commands](https://redis.io/docs/reference/protocol-spec/#inline-commands) to send AUTH and SELECT commands. However, the `upstash` Redis service does not support Inline Commands, which causes the rule engine to fail to connect to the `upstash` Redis service.
|
||||||
|
After the fix, the Redis driver of EMQX uses RESP (REdis Serialization Protocol) to send AUTH and SELECT commands.
|
||||||
|
|
||||||
|
- [#12176](https://github.com/emqx/emqx/pull/12176) Always acknowledge `DISCONNECT` packet to MQTT-SN client regardless of whether the connection has been successfully established before.
|
||||||
|
|
||||||
|
- [#12180](https://github.com/emqx/emqx/pull/12180) Fix an issue where DTLS enabled MQTT-SN gateways could not be started, caused by incompatibility of default listener configuration with the DTLS implementation.
|
|
@ -14,8 +14,8 @@ type: application
|
||||||
|
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
version: 5.4.0-alpha.2
|
version: 5.4.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application.
|
# incremented each time you make changes to the application.
|
||||||
appVersion: 5.4.0-alpha.2
|
appVersion: 5.4.0
|
||||||
|
|
|
@ -14,8 +14,8 @@ type: application
|
||||||
|
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
version: 5.4.0-alpha.2
|
version: 5.4.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application.
|
# incremented each time you make changes to the application.
|
||||||
appVersion: 5.4.0-alpha.2
|
appVersion: 5.4.0
|
||||||
|
|
4
mix.exs
4
mix.exs
|
@ -53,10 +53,10 @@ defmodule EMQXUmbrella.MixProject do
|
||||||
{:gproc, github: "emqx/gproc", tag: "0.9.0.1", override: true},
|
{:gproc, github: "emqx/gproc", tag: "0.9.0.1", override: true},
|
||||||
{:jiffy, github: "emqx/jiffy", tag: "1.0.6", override: true},
|
{:jiffy, github: "emqx/jiffy", tag: "1.0.6", override: true},
|
||||||
{:cowboy, github: "emqx/cowboy", tag: "2.9.2", override: true},
|
{:cowboy, github: "emqx/cowboy", tag: "2.9.2", override: true},
|
||||||
{:esockd, github: "emqx/esockd", tag: "5.9.9", override: true},
|
{:esockd, github: "emqx/esockd", tag: "5.11.1", override: true},
|
||||||
{:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-2", override: true},
|
{:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-2", override: true},
|
||||||
{:ekka, github: "emqx/ekka", tag: "0.17.0", override: true},
|
{:ekka, github: "emqx/ekka", tag: "0.17.0", override: true},
|
||||||
{:gen_rpc, github: "emqx/gen_rpc", tag: "3.3.0", override: true},
|
{:gen_rpc, github: "emqx/gen_rpc", tag: "3.3.1", override: true},
|
||||||
{:grpc, github: "emqx/grpc-erl", tag: "0.6.12", override: true},
|
{:grpc, github: "emqx/grpc-erl", tag: "0.6.12", override: true},
|
||||||
{:minirest, github: "emqx/minirest", tag: "1.3.15", override: true},
|
{:minirest, github: "emqx/minirest", tag: "1.3.15", override: true},
|
||||||
{:ecpool, github: "emqx/ecpool", tag: "0.5.7", override: true},
|
{:ecpool, github: "emqx/ecpool", tag: "0.5.7", override: true},
|
||||||
|
|
|
@ -69,10 +69,10 @@
|
||||||
, {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}
|
, {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}
|
||||||
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.6"}}}
|
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.6"}}}
|
||||||
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}
|
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}
|
||||||
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.9"}}}
|
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.1"}}}
|
||||||
, {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-2"}}}
|
, {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-2"}}}
|
||||||
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.17.0"}}}
|
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.17.0"}}}
|
||||||
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.0"}}}
|
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}}
|
||||||
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.12"}}}
|
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.12"}}}
|
||||||
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.15"}}}
|
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.15"}}}
|
||||||
, {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.7"}}}
|
, {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.7"}}}
|
||||||
|
|
|
@ -1,5 +1,12 @@
|
||||||
emqx_bridge_redis {
|
emqx_bridge_redis {
|
||||||
|
|
||||||
|
redis_type.label:
|
||||||
|
"""Redis Type"""
|
||||||
|
redis_type.desc:
|
||||||
|
"""Single mode. Must be set to 'single' when Redis server is running in single mode.
|
||||||
|
Sentinel mode. Must be set to 'sentinel' when Redis server is running in sentinel mode.
|
||||||
|
Cluster mode. Must be set to 'cluster' when Redis server is running in clustered mode."""
|
||||||
|
|
||||||
command_template.desc:
|
command_template.desc:
|
||||||
"""Redis command template used to export messages. Each list element stands for a command name or its argument.
|
"""Redis command template used to export messages. Each list element stands for a command name or its argument.
|
||||||
For example, to push payloads in a Redis list by key `msgs`, the elements should be the following:
|
For example, to push payloads in a Redis list by key `msgs`, the elements should be the following:
|
||||||
|
|
|
@ -10,13 +10,6 @@ producer_action.desc:
|
||||||
producer_action.label:
|
producer_action.label:
|
||||||
"""Action Parameters"""
|
"""Action Parameters"""
|
||||||
|
|
||||||
redis_type.label:
|
|
||||||
"""Redis Type"""
|
|
||||||
redis_type.desc:
|
|
||||||
"""Single mode. Must be set to 'single' when Redis server is running in single mode.
|
|
||||||
Sentinel mode. Must be set to 'sentinel' when Redis server is running in sentinel mode.
|
|
||||||
Cluster mode. Must be set to 'cluster' when Redis server is running in clustered mode."""
|
|
||||||
|
|
||||||
batch_size.label:
|
batch_size.label:
|
||||||
"""Batch Size"""
|
"""Batch Size"""
|
||||||
batch_size.desc:
|
batch_size.desc:
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
emqx_conf_schema_types {
|
||||||
|
|
||||||
|
duration.desc:
|
||||||
|
"""A string that represents a time duration, for example: <code>10s</code>, <code>2.5m</code>, <code>1h30m</code>, <code>1W2D</code>, or <code>2345ms</code>, which is the smallest unit. When precision is specified, finer portions of the duration may be ignored: writing <code>1200ms</code> for <code>Duration(s)</code> is equivalent to writing <code>1s</code>. The unit part is case-insensitive."""
|
||||||
|
|
||||||
|
bytesize.desc:
|
||||||
|
"""A string that represents a number of bytes, for example: <code>10B</code>, <code>640kb</code>, <code>4MB</code>, <code>1GB</code>. Units are interpreted as powers of 1024, and the unit part is case-insensitive."""
|
||||||
|
|
||||||
|
secret.desc:
|
||||||
|
"""A string holding some sensitive information, such as a password. When secret starts with <code>file://</code>, the rest of the string is interpreted as a path to a file containing the secret itself: whole content of the file except any trailing whitespace characters is considered a secret value. Note: when clustered, all EMQX nodes should have the same file present before using <code>file://</code> secrets."""
|
||||||
|
|
||||||
|
}
|
|
@ -1,132 +0,0 @@
|
||||||
#!/usr/bin/env escript
|
|
||||||
|
|
||||||
%% This script translates the hocon_schema_json's schema dump to a new format.
|
|
||||||
%% It is used to convert older version EMQX's schema dumps to the new format
|
|
||||||
%% after all files are upgraded to the new format, this script can be removed.
|
|
||||||
|
|
||||||
-mode(compile).
|
|
||||||
|
|
||||||
main([Input]) ->
|
|
||||||
ok = add_libs(),
|
|
||||||
_ = atoms(),
|
|
||||||
{ok, Data} = file:read_file(Input),
|
|
||||||
Json = jsx:decode(Data),
|
|
||||||
NewJson = reformat(Json),
|
|
||||||
io:format("~s~n", [jsx:encode(NewJson)]);
|
|
||||||
main(_) ->
|
|
||||||
io:format("Usage: schema-dump-reformat.escript <input.json>~n"),
|
|
||||||
halt(1).
|
|
||||||
|
|
||||||
reformat(Json) ->
|
|
||||||
emqx_conf:reformat_schema_dump(fix(Json)).
|
|
||||||
|
|
||||||
%% fix old type specs to make them compatible with new type specs
|
|
||||||
fix(#{
|
|
||||||
<<"kind">> := <<"union">>,
|
|
||||||
<<"members">> := [#{<<"name">> := <<"string()">>}, #{<<"name">> := <<"function()">>}]
|
|
||||||
}) ->
|
|
||||||
%% s3_exporter.secret_access_key
|
|
||||||
#{
|
|
||||||
kind => primitive,
|
|
||||||
name => <<"string()">>
|
|
||||||
};
|
|
||||||
fix(#{<<"kind">> := <<"primitive">>, <<"name">> := <<"emqx_conf_schema:log_level()">>}) ->
|
|
||||||
#{
|
|
||||||
kind => enum,
|
|
||||||
symbols => [emergency, alert, critical, error, warning, notice, info, debug, none, all]
|
|
||||||
};
|
|
||||||
fix(#{<<"kind">> := <<"primitive">>, <<"name">> := <<"emqx_connector_http:pool_type()">>}) ->
|
|
||||||
#{kind => enum, symbols => [random, hash]};
|
|
||||||
fix(#{<<"kind">> := <<"primitive">>, <<"name">> := <<"emqx_bridge_http_connector:pool_type()">>}) ->
|
|
||||||
#{kind => enum, symbols => [random, hash]};
|
|
||||||
fix(Map) when is_map(Map) ->
|
|
||||||
maps:from_list(fix(maps:to_list(Map)));
|
|
||||||
fix(List) when is_list(List) ->
|
|
||||||
lists:map(fun fix/1, List);
|
|
||||||
fix({<<"kind">>, Kind}) ->
|
|
||||||
{kind, binary_to_atom(Kind, utf8)};
|
|
||||||
fix({<<"name">>, Type}) ->
|
|
||||||
{name, fix_type(Type)};
|
|
||||||
fix({K, V}) ->
|
|
||||||
{binary_to_atom(K, utf8), fix(V)};
|
|
||||||
fix(V) when is_number(V) ->
|
|
||||||
V;
|
|
||||||
fix(V) when is_atom(V) ->
|
|
||||||
V;
|
|
||||||
fix(V) when is_binary(V) ->
|
|
||||||
V.
|
|
||||||
|
|
||||||
%% ensure below ebin dirs are added to code path:
|
|
||||||
%% _build/default/lib/*/ebin
|
|
||||||
%% _build/emqx/lib/*/ebin
|
|
||||||
%% _build/emqx-enterprise/lib/*/ebin
|
|
||||||
add_libs() ->
|
|
||||||
Profile = os:getenv("PROFILE"),
|
|
||||||
case Profile of
|
|
||||||
"emqx" ->
|
|
||||||
ok;
|
|
||||||
"emqx-enterprise" ->
|
|
||||||
ok;
|
|
||||||
_ ->
|
|
||||||
io:format("PROFILE is not set~n"),
|
|
||||||
halt(1)
|
|
||||||
end,
|
|
||||||
Dirs =
|
|
||||||
filelib:wildcard("_build/default/lib/*/ebin") ++
|
|
||||||
filelib:wildcard("_build/" ++ Profile ++ "/lib/*/ebin"),
|
|
||||||
lists:foreach(fun add_lib/1, Dirs).
|
|
||||||
|
|
||||||
add_lib(Dir) ->
|
|
||||||
code:add_patha(Dir),
|
|
||||||
Beams = filelib:wildcard(Dir ++ "/*.beam"),
|
|
||||||
_ = spawn(fun() -> lists:foreach(fun load_beam/1, Beams) end),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
load_beam(BeamFile) ->
|
|
||||||
ModuleName = filename:basename(BeamFile, ".beam"),
|
|
||||||
Module = list_to_atom(ModuleName),
|
|
||||||
%% load the beams to make sure the atoms are existing
|
|
||||||
code:ensure_loaded(Module),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
fix_type(<<"[{string(), string()}]">>) ->
|
|
||||||
<<"map()">>;
|
|
||||||
fix_type(<<"[{binary(), binary()}]">>) ->
|
|
||||||
<<"map()">>;
|
|
||||||
fix_type(<<"emqx_limiter_schema:rate()">>) ->
|
|
||||||
<<"string()">>;
|
|
||||||
fix_type(<<"emqx_limiter_schema:burst_rate()">>) ->
|
|
||||||
<<"string()">>;
|
|
||||||
fix_type(<<"emqx_limiter_schema:capacity()">>) ->
|
|
||||||
<<"string()">>;
|
|
||||||
fix_type(<<"emqx_limiter_schema:initial()">>) ->
|
|
||||||
<<"string()">>;
|
|
||||||
fix_type(<<"emqx_limiter_schema:failure_strategy()">>) ->
|
|
||||||
<<"string()">>;
|
|
||||||
fix_type(<<"emqx_conf_schema:file()">>) ->
|
|
||||||
<<"string()">>;
|
|
||||||
fix_type(<<"#{term() => binary()}">>) ->
|
|
||||||
<<"map()">>;
|
|
||||||
fix_type(<<"[term()]">>) ->
|
|
||||||
%% jwt claims
|
|
||||||
<<"map()">>;
|
|
||||||
fix_type(<<"emqx_ee_bridge_influxdb:write_syntax()">>) ->
|
|
||||||
<<"string()">>;
|
|
||||||
fix_type(<<"emqx_bridge_influxdb:write_syntax()">>) ->
|
|
||||||
<<"string()">>;
|
|
||||||
fix_type(<<"emqx_schema:mqtt_max_packet_size()">>) ->
|
|
||||||
<<"non_neg_integer()">>;
|
|
||||||
fix_type(<<"emqx_s3_schema:secret_access_key()">>) ->
|
|
||||||
<<"string()">>;
|
|
||||||
fix_type(Type) ->
|
|
||||||
Type.
|
|
||||||
|
|
||||||
%% ensure atoms are loaded
|
|
||||||
%% these atoms are from older version of emqx
|
|
||||||
atoms() ->
|
|
||||||
[
|
|
||||||
emqx_ee_connector_clickhouse,
|
|
||||||
emqx_ee_bridge_gcp_pubsub,
|
|
||||||
emqx_ee_bridge_influxdb,
|
|
||||||
emqx_connector_http
|
|
||||||
].
|
|
Loading…
Reference in New Issue