Merge remote-tracking branch 'origin/master' into sync-m-r54-20231205
This commit is contained in:
commit
deb3fcd606
|
@ -29,7 +29,7 @@
|
||||||
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
|
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
|
||||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.8"}}},
|
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.8"}}},
|
||||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.16"}}},
|
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.16"}}},
|
||||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.2"}}},
|
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.0"}}},
|
||||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.40.0"}}},
|
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.40.0"}}},
|
||||||
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
|
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
|
||||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||||
|
|
|
@ -1211,7 +1211,7 @@ handle_info(
|
||||||
) when
|
) when
|
||||||
ConnState =:= connected orelse ConnState =:= reauthenticating
|
ConnState =:= connected orelse ConnState =:= reauthenticating
|
||||||
->
|
->
|
||||||
{Intent, Session1} = emqx_session:disconnect(ClientInfo, ConnInfo, Session),
|
{Intent, Session1} = session_disconnect(ClientInfo, ConnInfo, Session),
|
||||||
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(Channel)),
|
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(Channel)),
|
||||||
Channel2 = Channel1#channel{session = Session1},
|
Channel2 = Channel1#channel{session = Session1},
|
||||||
case maybe_shutdown(Reason, Intent, Channel2) of
|
case maybe_shutdown(Reason, Intent, Channel2) of
|
||||||
|
@ -2191,6 +2191,11 @@ ensure_disconnected(
|
||||||
emqx_cm:mark_channel_disconnected(ChanPid),
|
emqx_cm:mark_channel_disconnected(ChanPid),
|
||||||
Channel#channel{conninfo = NConnInfo, conn_state = disconnected}.
|
Channel#channel{conninfo = NConnInfo, conn_state = disconnected}.
|
||||||
|
|
||||||
|
session_disconnect(ClientInfo, ConnInfo, Session) when Session /= undefined ->
|
||||||
|
emqx_session:disconnect(ClientInfo, ConnInfo, Session);
|
||||||
|
session_disconnect(_ClientInfo, _ConnInfo, undefined) ->
|
||||||
|
{shutdown, undefined}.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Maybe Publish will msg
|
%% Maybe Publish will msg
|
||||||
|
|
||||||
|
|
|
@ -169,7 +169,8 @@ commit_offset(
|
||||||
-spec poll(reply_fun(), emqx_persistent_session_ds:id(), inflight(), pos_integer()) ->
|
-spec poll(reply_fun(), emqx_persistent_session_ds:id(), inflight(), pos_integer()) ->
|
||||||
{emqx_session:replies(), inflight()}.
|
{emqx_session:replies(), inflight()}.
|
||||||
poll(ReplyFun, SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < ?EPOCH_SIZE ->
|
poll(ReplyFun, SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < ?EPOCH_SIZE ->
|
||||||
FetchThreshold = max(1, WindowSize div 2),
|
MinBatchSize = emqx_config:get([session_persistence, min_batch_size]),
|
||||||
|
FetchThreshold = min(MinBatchSize, ceil(WindowSize / 2)),
|
||||||
FreeSpace = WindowSize - n_inflight(Inflight0),
|
FreeSpace = WindowSize - n_inflight(Inflight0),
|
||||||
case FreeSpace >= FetchThreshold of
|
case FreeSpace >= FetchThreshold of
|
||||||
false ->
|
false ->
|
||||||
|
|
|
@ -96,6 +96,12 @@
|
||||||
props := map(),
|
props := map(),
|
||||||
extra := map()
|
extra := map()
|
||||||
}.
|
}.
|
||||||
|
|
||||||
|
-define(TIMER_PULL, timer_pull).
|
||||||
|
-define(TIMER_GET_STREAMS, timer_get_streams).
|
||||||
|
-define(TIMER_BUMP_LAST_ALIVE_AT, timer_bump_last_alive_at).
|
||||||
|
-type timer() :: ?TIMER_PULL | ?TIMER_GET_STREAMS | ?TIMER_BUMP_LAST_ALIVE_AT.
|
||||||
|
|
||||||
-type session() :: #{
|
-type session() :: #{
|
||||||
%% Client ID
|
%% Client ID
|
||||||
id := id(),
|
id := id(),
|
||||||
|
@ -111,6 +117,8 @@
|
||||||
receive_maximum := pos_integer(),
|
receive_maximum := pos_integer(),
|
||||||
%% Connection Info
|
%% Connection Info
|
||||||
conninfo := emqx_types:conninfo(),
|
conninfo := emqx_types:conninfo(),
|
||||||
|
%% Timers
|
||||||
|
timer() => reference(),
|
||||||
%%
|
%%
|
||||||
props := map()
|
props := map()
|
||||||
}.
|
}.
|
||||||
|
@ -120,7 +128,6 @@
|
||||||
-type clientinfo() :: emqx_types:clientinfo().
|
-type clientinfo() :: emqx_types:clientinfo().
|
||||||
-type conninfo() :: emqx_session:conninfo().
|
-type conninfo() :: emqx_session:conninfo().
|
||||||
-type replies() :: emqx_session:replies().
|
-type replies() :: emqx_session:replies().
|
||||||
-type timer() :: pull | get_streams | bump_last_alive_at.
|
|
||||||
|
|
||||||
-define(STATS_KEYS, [
|
-define(STATS_KEYS, [
|
||||||
subscriptions_cnt,
|
subscriptions_cnt,
|
||||||
|
@ -144,8 +151,7 @@
|
||||||
session().
|
session().
|
||||||
create(#{clientid := ClientID}, ConnInfo, Conf) ->
|
create(#{clientid := ClientID}, ConnInfo, Conf) ->
|
||||||
% TODO: expiration
|
% TODO: expiration
|
||||||
ensure_timers(),
|
ensure_timers(ensure_session(ClientID, ConnInfo, Conf)).
|
||||||
ensure_session(ClientID, ConnInfo, Conf).
|
|
||||||
|
|
||||||
-spec open(clientinfo(), conninfo()) ->
|
-spec open(clientinfo(), conninfo()) ->
|
||||||
{_IsPresent :: true, session(), []} | false.
|
{_IsPresent :: true, session(), []} | false.
|
||||||
|
@ -159,10 +165,9 @@ open(#{clientid := ClientID} = _ClientInfo, ConnInfo) ->
|
||||||
ok = emqx_cm:discard_session(ClientID),
|
ok = emqx_cm:discard_session(ClientID),
|
||||||
case session_open(ClientID, ConnInfo) of
|
case session_open(ClientID, ConnInfo) of
|
||||||
Session0 = #{} ->
|
Session0 = #{} ->
|
||||||
ensure_timers(),
|
|
||||||
ReceiveMaximum = receive_maximum(ConnInfo),
|
ReceiveMaximum = receive_maximum(ConnInfo),
|
||||||
Session = Session0#{receive_maximum => ReceiveMaximum},
|
Session = Session0#{receive_maximum => ReceiveMaximum},
|
||||||
{true, Session, []};
|
{true, ensure_timers(Session), []};
|
||||||
false ->
|
false ->
|
||||||
false
|
false
|
||||||
end.
|
end.
|
||||||
|
@ -333,9 +338,9 @@ publish(_PacketId, Msg, Session) ->
|
||||||
puback(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) ->
|
puback(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) ->
|
||||||
case emqx_persistent_message_ds_replayer:commit_offset(Id, ack, PacketId, Inflight0) of
|
case emqx_persistent_message_ds_replayer:commit_offset(Id, ack, PacketId, Inflight0) of
|
||||||
{true, Inflight} ->
|
{true, Inflight} ->
|
||||||
%% TODO
|
%% TODO: we pass a bogus message into the hook:
|
||||||
Msg = emqx_message:make(Id, <<>>, <<>>),
|
Msg = emqx_message:make(Id, <<>>, <<>>),
|
||||||
{ok, Msg, [], Session#{inflight => Inflight}};
|
{ok, Msg, [], pull_now(Session#{inflight => Inflight})};
|
||||||
{false, _} ->
|
{false, _} ->
|
||||||
%% Invalid Packet Id
|
%% Invalid Packet Id
|
||||||
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND}
|
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND}
|
||||||
|
@ -351,9 +356,9 @@ puback(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) ->
|
||||||
pubrec(PacketId, Session = #{id := Id, inflight := Inflight0}) ->
|
pubrec(PacketId, Session = #{id := Id, inflight := Inflight0}) ->
|
||||||
case emqx_persistent_message_ds_replayer:commit_offset(Id, rec, PacketId, Inflight0) of
|
case emqx_persistent_message_ds_replayer:commit_offset(Id, rec, PacketId, Inflight0) of
|
||||||
{true, Inflight} ->
|
{true, Inflight} ->
|
||||||
%% TODO
|
%% TODO: we pass a bogus message into the hook:
|
||||||
Msg = emqx_message:make(Id, <<>>, <<>>),
|
Msg = emqx_message:make(Id, <<>>, <<>>),
|
||||||
{ok, Msg, Session#{inflight => Inflight}};
|
{ok, Msg, pull_now(Session#{inflight => Inflight})};
|
||||||
{false, _} ->
|
{false, _} ->
|
||||||
%% Invalid Packet Id
|
%% Invalid Packet Id
|
||||||
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND}
|
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND}
|
||||||
|
@ -399,9 +404,11 @@ deliver(_ClientInfo, _Delivers, Session) ->
|
||||||
{ok, replies(), session()} | {ok, replies(), timeout(), session()}.
|
{ok, replies(), session()} | {ok, replies(), timeout(), session()}.
|
||||||
handle_timeout(
|
handle_timeout(
|
||||||
_ClientInfo,
|
_ClientInfo,
|
||||||
pull,
|
?TIMER_PULL,
|
||||||
Session = #{id := Id, inflight := Inflight0, receive_maximum := ReceiveMaximum}
|
Session0 = #{id := Id, inflight := Inflight0, receive_maximum := ReceiveMaximum}
|
||||||
) ->
|
) ->
|
||||||
|
MaxBatchSize = emqx_config:get([session_persistence, max_batch_size]),
|
||||||
|
BatchSize = min(ReceiveMaximum, MaxBatchSize),
|
||||||
{Publishes, Inflight} = emqx_persistent_message_ds_replayer:poll(
|
{Publishes, Inflight} = emqx_persistent_message_ds_replayer:poll(
|
||||||
fun
|
fun
|
||||||
(_Seqno, Message = #message{qos = ?QOS_0}) ->
|
(_Seqno, Message = #message{qos = ?QOS_0}) ->
|
||||||
|
@ -412,7 +419,7 @@ handle_timeout(
|
||||||
end,
|
end,
|
||||||
Id,
|
Id,
|
||||||
Inflight0,
|
Inflight0,
|
||||||
ReceiveMaximum
|
BatchSize
|
||||||
),
|
),
|
||||||
IdlePollInterval = emqx_config:get([session_persistence, idle_poll_interval]),
|
IdlePollInterval = emqx_config:get([session_persistence, idle_poll_interval]),
|
||||||
Timeout =
|
Timeout =
|
||||||
|
@ -422,13 +429,12 @@ handle_timeout(
|
||||||
[_ | _] ->
|
[_ | _] ->
|
||||||
0
|
0
|
||||||
end,
|
end,
|
||||||
ensure_timer(pull, Timeout),
|
Session = emqx_session:ensure_timer(?TIMER_PULL, Timeout, Session0#{inflight := Inflight}),
|
||||||
{ok, Publishes, Session#{inflight := Inflight}};
|
{ok, Publishes, Session};
|
||||||
handle_timeout(_ClientInfo, get_streams, Session) ->
|
handle_timeout(_ClientInfo, ?TIMER_GET_STREAMS, Session) ->
|
||||||
renew_streams(Session),
|
renew_streams(Session),
|
||||||
ensure_timer(get_streams),
|
{ok, [], emqx_session:ensure_timer(?TIMER_GET_STREAMS, 100, Session)};
|
||||||
{ok, [], Session};
|
handle_timeout(_ClientInfo, ?TIMER_BUMP_LAST_ALIVE_AT, Session0) ->
|
||||||
handle_timeout(_ClientInfo, bump_last_alive_at, Session0) ->
|
|
||||||
%% Note: we take a pessimistic approach here and assume that the client will be alive
|
%% Note: we take a pessimistic approach here and assume that the client will be alive
|
||||||
%% until the next bump timeout. With this, we avoid garbage collecting this session
|
%% until the next bump timeout. With this, we avoid garbage collecting this session
|
||||||
%% too early in case the session/connection/node crashes earlier without having time
|
%% too early in case the session/connection/node crashes earlier without having time
|
||||||
|
@ -436,8 +442,8 @@ handle_timeout(_ClientInfo, bump_last_alive_at, Session0) ->
|
||||||
BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]),
|
BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]),
|
||||||
EstimatedLastAliveAt = now_ms() + BumpInterval,
|
EstimatedLastAliveAt = now_ms() + BumpInterval,
|
||||||
Session = session_set_last_alive_at_trans(Session0, EstimatedLastAliveAt),
|
Session = session_set_last_alive_at_trans(Session0, EstimatedLastAliveAt),
|
||||||
ensure_timer(bump_last_alive_at),
|
BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]),
|
||||||
{ok, [], Session}.
|
{ok, [], emqx_session:ensure_timer(?TIMER_BUMP_LAST_ALIVE_AT, BumpInterval, Session)}.
|
||||||
|
|
||||||
-spec replay(clientinfo(), [], session()) ->
|
-spec replay(clientinfo(), [], session()) ->
|
||||||
{ok, replies(), session()}.
|
{ok, replies(), session()}.
|
||||||
|
@ -957,22 +963,15 @@ export_record(_, _, [], Acc) ->
|
||||||
|
|
||||||
%% TODO: find a more reliable way to perform actions that have side
|
%% TODO: find a more reliable way to perform actions that have side
|
||||||
%% effects. Add `CBM:init' callback to the session behavior?
|
%% effects. Add `CBM:init' callback to the session behavior?
|
||||||
ensure_timers() ->
|
-spec ensure_timers(session()) -> session().
|
||||||
ensure_timer(pull),
|
ensure_timers(Session0) ->
|
||||||
ensure_timer(get_streams),
|
Session1 = emqx_session:ensure_timer(?TIMER_PULL, 100, Session0),
|
||||||
ensure_timer(bump_last_alive_at).
|
Session2 = emqx_session:ensure_timer(?TIMER_GET_STREAMS, 100, Session1),
|
||||||
|
emqx_session:ensure_timer(?TIMER_BUMP_LAST_ALIVE_AT, 100, Session2).
|
||||||
|
|
||||||
-spec ensure_timer(timer()) -> ok.
|
-spec pull_now(session()) -> session().
|
||||||
ensure_timer(bump_last_alive_at = Type) ->
|
pull_now(Session) ->
|
||||||
BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]),
|
emqx_session:reset_timer(?TIMER_PULL, 0, Session).
|
||||||
ensure_timer(Type, BumpInterval);
|
|
||||||
ensure_timer(Type) ->
|
|
||||||
ensure_timer(Type, 100).
|
|
||||||
|
|
||||||
-spec ensure_timer(timer(), non_neg_integer()) -> ok.
|
|
||||||
ensure_timer(Type, Timeout) ->
|
|
||||||
_ = emqx_utils:start_timer(Timeout, {emqx_session, Type}),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
-spec receive_maximum(conninfo()) -> pos_integer().
|
-spec receive_maximum(conninfo()) -> pos_integer().
|
||||||
receive_maximum(ConnInfo) ->
|
receive_maximum(ConnInfo) ->
|
||||||
|
|
|
@ -1773,6 +1773,22 @@ fields("session_persistence") ->
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
|
{"max_batch_size",
|
||||||
|
sc(
|
||||||
|
pos_integer(),
|
||||||
|
#{
|
||||||
|
default => 1000,
|
||||||
|
desc => ?DESC(session_ds_max_batch_size)
|
||||||
|
}
|
||||||
|
)},
|
||||||
|
{"min_batch_size",
|
||||||
|
sc(
|
||||||
|
pos_integer(),
|
||||||
|
#{
|
||||||
|
default => 100,
|
||||||
|
desc => ?DESC(session_ds_min_batch_size)
|
||||||
|
}
|
||||||
|
)},
|
||||||
{"idle_poll_interval",
|
{"idle_poll_interval",
|
||||||
sc(
|
sc(
|
||||||
timeout_duration(),
|
timeout_duration(),
|
||||||
|
|
|
@ -111,8 +111,7 @@
|
||||||
reply/0,
|
reply/0,
|
||||||
replies/0,
|
replies/0,
|
||||||
common_timer_name/0,
|
common_timer_name/0,
|
||||||
custom_timer_name/0,
|
custom_timer_name/0
|
||||||
timerset/0
|
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-type session_id() :: _TODO.
|
-type session_id() :: _TODO.
|
||||||
|
@ -154,8 +153,6 @@
|
||||||
emqx_session_mem:session()
|
emqx_session_mem:session()
|
||||||
| emqx_persistent_session_ds:session().
|
| emqx_persistent_session_ds:session().
|
||||||
|
|
||||||
-type timerset() :: #{custom_timer_name() => _TimerRef :: reference()}.
|
|
||||||
|
|
||||||
-define(INFO_KEYS, [
|
-define(INFO_KEYS, [
|
||||||
id,
|
id,
|
||||||
created_at,
|
created_at,
|
||||||
|
@ -477,28 +474,26 @@ handle_timeout(ClientInfo, Timer, Session) ->
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
-spec ensure_timer(custom_timer_name(), timeout(), timerset()) ->
|
-spec ensure_timer(custom_timer_name(), timeout(), map()) ->
|
||||||
timerset().
|
map().
|
||||||
ensure_timer(Name, _Time, Timers = #{}) when is_map_key(Name, Timers) ->
|
ensure_timer(Name, Time, Timers = #{}) when Time >= 0 ->
|
||||||
Timers;
|
|
||||||
ensure_timer(Name, Time, Timers = #{}) when Time > 0 ->
|
|
||||||
TRef = emqx_utils:start_timer(Time, {?MODULE, Name}),
|
TRef = emqx_utils:start_timer(Time, {?MODULE, Name}),
|
||||||
Timers#{Name => TRef}.
|
Timers#{Name => TRef}.
|
||||||
|
|
||||||
-spec reset_timer(custom_timer_name(), timeout(), timerset()) ->
|
-spec reset_timer(custom_timer_name(), timeout(), map()) ->
|
||||||
timerset().
|
map().
|
||||||
reset_timer(Name, Time, Channel) ->
|
reset_timer(Name, Time, Timers) ->
|
||||||
ensure_timer(Name, Time, cancel_timer(Name, Channel)).
|
ensure_timer(Name, Time, cancel_timer(Name, Timers)).
|
||||||
|
|
||||||
-spec cancel_timer(custom_timer_name(), timerset()) ->
|
-spec cancel_timer(custom_timer_name(), map()) ->
|
||||||
timerset().
|
map().
|
||||||
cancel_timer(Name, Timers) ->
|
cancel_timer(Name, Timers0) ->
|
||||||
case maps:take(Name, Timers) of
|
case maps:take(Name, Timers0) of
|
||||||
{TRef, NTimers} ->
|
{TRef, Timers} ->
|
||||||
ok = emqx_utils:cancel_timer(TRef),
|
ok = emqx_utils:cancel_timer(TRef),
|
||||||
NTimers;
|
Timers;
|
||||||
error ->
|
error ->
|
||||||
Timers
|
Timers0
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -611,8 +606,8 @@ maybe_mock_impl_mod({Mock, _State}) when is_atom(Mock) ->
|
||||||
Mock.
|
Mock.
|
||||||
-else.
|
-else.
|
||||||
-spec maybe_mock_impl_mod(_Session) -> no_return().
|
-spec maybe_mock_impl_mod(_Session) -> no_return().
|
||||||
maybe_mock_impl_mod(_) ->
|
maybe_mock_impl_mod(Session) ->
|
||||||
error(noimpl).
|
error(noimpl, [Session]).
|
||||||
-endif.
|
-endif.
|
||||||
|
|
||||||
-spec choose_impl_mod(conninfo()) -> module().
|
-spec choose_impl_mod(conninfo()) -> module().
|
||||||
|
|
|
@ -91,13 +91,11 @@ match([H | T1], [H | T2]) ->
|
||||||
match(T1, T2);
|
match(T1, T2);
|
||||||
match([_H | T1], ['+' | T2]) ->
|
match([_H | T1], ['+' | T2]) ->
|
||||||
match(T1, T2);
|
match(T1, T2);
|
||||||
|
match([<<>> | T1], ['' | T2]) ->
|
||||||
|
match(T1, T2);
|
||||||
match(_, ['#']) ->
|
match(_, ['#']) ->
|
||||||
true;
|
true;
|
||||||
match([_H1 | _], [_H2 | _]) ->
|
match(_, _) ->
|
||||||
false;
|
|
||||||
match([_H1 | _], []) ->
|
|
||||||
false;
|
|
||||||
match([], [_H | _T2]) ->
|
|
||||||
false.
|
false.
|
||||||
|
|
||||||
-spec match_share(Name, Filter) -> boolean() when
|
-spec match_share(Name, Filter) -> boolean() when
|
||||||
|
|
|
@ -70,8 +70,8 @@
|
||||||
emqx_cluster/2,
|
emqx_cluster/2,
|
||||||
start_ekka/0,
|
start_ekka/0,
|
||||||
start_epmd/0,
|
start_epmd/0,
|
||||||
start_slave/2,
|
start_peer/2,
|
||||||
stop_slave/1,
|
stop_peer/1,
|
||||||
listener_port/2
|
listener_port/2
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
@ -734,13 +734,11 @@ emqx_cluster(Specs0, CommonOpts) ->
|
||||||
|
|
||||||
%% Lower level starting API
|
%% Lower level starting API
|
||||||
|
|
||||||
-spec start_slave(shortname(), node_opts()) -> nodename().
|
-spec start_peer(shortname(), node_opts()) -> nodename().
|
||||||
start_slave(Name, Opts) when is_list(Opts) ->
|
start_peer(Name, Opts) when is_list(Opts) ->
|
||||||
start_slave(Name, maps:from_list(Opts));
|
start_peer(Name, maps:from_list(Opts));
|
||||||
start_slave(Name, Opts) when is_map(Opts) ->
|
start_peer(Name, Opts) when is_map(Opts) ->
|
||||||
SlaveMod = maps:get(peer_mod, Opts, ct_slave),
|
|
||||||
Node = node_name(Name),
|
Node = node_name(Name),
|
||||||
put_peer_mod(Node, SlaveMod),
|
|
||||||
Cookie = atom_to_list(erlang:get_cookie()),
|
Cookie = atom_to_list(erlang:get_cookie()),
|
||||||
PrivDataDir = maps:get(priv_data_dir, Opts, "/tmp"),
|
PrivDataDir = maps:get(priv_data_dir, Opts, "/tmp"),
|
||||||
NodeDataDir = filename:join([
|
NodeDataDir = filename:join([
|
||||||
|
@ -750,19 +748,13 @@ start_slave(Name, Opts) when is_map(Opts) ->
|
||||||
]),
|
]),
|
||||||
DoStart =
|
DoStart =
|
||||||
fun() ->
|
fun() ->
|
||||||
case SlaveMod of
|
|
||||||
ct_slave ->
|
|
||||||
ct:pal("~p: node data dir: ~s", [Node, NodeDataDir]),
|
ct:pal("~p: node data dir: ~s", [Node, NodeDataDir]),
|
||||||
Envs = [
|
Envs = [
|
||||||
{"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"},
|
{"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"},
|
||||||
{"EMQX_NODE__COOKIE", Cookie},
|
{"EMQX_NODE__COOKIE", Cookie},
|
||||||
{"EMQX_NODE__DATA_DIR", NodeDataDir}
|
{"EMQX_NODE__DATA_DIR", NodeDataDir}
|
||||||
],
|
],
|
||||||
emqx_cth_peer:start(Node, erl_flags(), Envs);
|
emqx_cth_peer:start(Node, erl_flags(), Envs)
|
||||||
slave ->
|
|
||||||
Envs = [{"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"}],
|
|
||||||
emqx_cth_peer:start(Node, ebin_path(), Envs)
|
|
||||||
end
|
|
||||||
end,
|
end,
|
||||||
case DoStart() of
|
case DoStart() of
|
||||||
{ok, _} ->
|
{ok, _} ->
|
||||||
|
@ -778,7 +770,7 @@ start_slave(Name, Opts) when is_map(Opts) ->
|
||||||
Node.
|
Node.
|
||||||
|
|
||||||
%% Node stopping
|
%% Node stopping
|
||||||
stop_slave(Node0) ->
|
stop_peer(Node0) ->
|
||||||
Node = node_name(Node0),
|
Node = node_name(Node0),
|
||||||
emqx_cth_peer:stop(Node).
|
emqx_cth_peer:stop(Node).
|
||||||
|
|
||||||
|
@ -939,7 +931,7 @@ setup_node(Node, Opts) when is_map(Opts) ->
|
||||||
ignore ->
|
ignore ->
|
||||||
ok;
|
ok;
|
||||||
Err ->
|
Err ->
|
||||||
stop_slave(Node),
|
stop_peer(Node),
|
||||||
error({failed_to_join_cluster, #{node => Node, error => Err}})
|
error({failed_to_join_cluster, #{node => Node, error => Err}})
|
||||||
end
|
end
|
||||||
end,
|
end,
|
||||||
|
@ -956,19 +948,6 @@ set_env_once(Var, Value) ->
|
||||||
end,
|
end,
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
put_peer_mod(Node, SlaveMod) ->
|
|
||||||
put({?MODULE, Node}, SlaveMod),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
get_peer_mod(Node) ->
|
|
||||||
case get({?MODULE, Node}) of
|
|
||||||
undefined -> ct_slave;
|
|
||||||
SlaveMod -> SlaveMod
|
|
||||||
end.
|
|
||||||
|
|
||||||
erase_peer_mod(Node) ->
|
|
||||||
erase({?MODULE, Node}).
|
|
||||||
|
|
||||||
node_name(Name) ->
|
node_name(Name) ->
|
||||||
case string:tokens(atom_to_list(Name), "@") of
|
case string:tokens(atom_to_list(Name), "@") of
|
||||||
[_Name, _Host] ->
|
[_Name, _Host] ->
|
||||||
|
|
|
@ -52,6 +52,7 @@
|
||||||
-define(TIMEOUT_NODE_START_MS, 15000).
|
-define(TIMEOUT_NODE_START_MS, 15000).
|
||||||
-define(TIMEOUT_APPS_START_MS, 30000).
|
-define(TIMEOUT_APPS_START_MS, 30000).
|
||||||
-define(TIMEOUT_NODE_STOP_S, 15).
|
-define(TIMEOUT_NODE_STOP_S, 15).
|
||||||
|
-define(TIMEOUT_CLUSTER_WAIT_MS, timer:seconds(10)).
|
||||||
|
|
||||||
%%
|
%%
|
||||||
|
|
||||||
|
@ -91,11 +92,7 @@
|
||||||
%% Working directory
|
%% Working directory
|
||||||
%% If this directory is not empty, starting up the node applications will fail
|
%% If this directory is not empty, starting up the node applications will fail
|
||||||
%% Default: "${ClusterOpts.work_dir}/${nodename}"
|
%% Default: "${ClusterOpts.work_dir}/${nodename}"
|
||||||
work_dir => file:name(),
|
work_dir => file:name()
|
||||||
|
|
||||||
% Tooling to manage nodes
|
|
||||||
% Default: `ct_slave`.
|
|
||||||
driver => ct_slave | slave
|
|
||||||
}}.
|
}}.
|
||||||
|
|
||||||
-spec start([nodespec()], ClusterOpts) ->
|
-spec start([nodespec()], ClusterOpts) ->
|
||||||
|
@ -118,11 +115,52 @@ start(NodeSpecs) ->
|
||||||
% 2. Start applications needed to enable clustering
|
% 2. Start applications needed to enable clustering
|
||||||
% Generally, this causes some applications to restart, but we deliberately don't
|
% Generally, this causes some applications to restart, but we deliberately don't
|
||||||
% start them yet.
|
% start them yet.
|
||||||
_ = lists:foreach(fun run_node_phase_cluster/1, NodeSpecs),
|
ShouldAppearInRunningNodes = lists:map(fun run_node_phase_cluster/1, NodeSpecs),
|
||||||
|
IsClustered = lists:member(true, ShouldAppearInRunningNodes),
|
||||||
% 3. Start applications after cluster is formed
|
% 3. Start applications after cluster is formed
|
||||||
% Cluster-joins are complete, so they shouldn't restart in the background anymore.
|
% Cluster-joins are complete, so they shouldn't restart in the background anymore.
|
||||||
_ = emqx_utils:pmap(fun run_node_phase_apps/1, NodeSpecs, ?TIMEOUT_APPS_START_MS),
|
_ = emqx_utils:pmap(fun run_node_phase_apps/1, NodeSpecs, ?TIMEOUT_APPS_START_MS),
|
||||||
[Node || #{name := Node} <- NodeSpecs].
|
Nodes = [Node || #{name := Node} <- NodeSpecs],
|
||||||
|
%% 4. Wait for the nodes to cluster
|
||||||
|
case IsClustered of
|
||||||
|
true ->
|
||||||
|
ok = wait_clustered(Nodes, ?TIMEOUT_CLUSTER_WAIT_MS);
|
||||||
|
false ->
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
Nodes.
|
||||||
|
|
||||||
|
%% Wait until all nodes see all nodes as mria running nodes
|
||||||
|
wait_clustered(Nodes, Timeout) ->
|
||||||
|
Check = fun(Node) ->
|
||||||
|
Running = erpc:call(Node, mria, running_nodes, []),
|
||||||
|
case Nodes -- Running of
|
||||||
|
[] ->
|
||||||
|
true;
|
||||||
|
NotRunning ->
|
||||||
|
{false, NotRunning}
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
wait_clustered(Nodes, Check, deadline(Timeout)).
|
||||||
|
|
||||||
|
wait_clustered([], _Check, _Deadline) ->
|
||||||
|
ok;
|
||||||
|
wait_clustered([Node | Nodes] = All, Check, Deadline) ->
|
||||||
|
IsOverdue = is_overdue(Deadline),
|
||||||
|
case Check(Node) of
|
||||||
|
true ->
|
||||||
|
wait_clustered(Nodes, Check, Deadline);
|
||||||
|
{false, NodesNotRunnging} when IsOverdue ->
|
||||||
|
error(
|
||||||
|
{timeout, #{
|
||||||
|
checking_from_node => Node,
|
||||||
|
nodes_not_running => NodesNotRunnging
|
||||||
|
}}
|
||||||
|
);
|
||||||
|
{false, Nodes} ->
|
||||||
|
timer:sleep(100),
|
||||||
|
wait_clustered(All, Check, Deadline)
|
||||||
|
end.
|
||||||
|
|
||||||
restart(Node, Spec) ->
|
restart(Node, Spec) ->
|
||||||
ct:pal("Stopping peer node ~p", [Node]),
|
ct:pal("Stopping peer node ~p", [Node]),
|
||||||
|
@ -162,8 +200,7 @@ mk_init_nodespec(N, Name, NodeOpts, ClusterOpts) ->
|
||||||
role => core,
|
role => core,
|
||||||
apps => [],
|
apps => [],
|
||||||
base_port => BasePort,
|
base_port => BasePort,
|
||||||
work_dir => filename:join([WorkDir, Node]),
|
work_dir => filename:join([WorkDir, Node])
|
||||||
driver => ct_slave
|
|
||||||
},
|
},
|
||||||
maps:merge(Defaults, NodeOpts).
|
maps:merge(Defaults, NodeOpts).
|
||||||
|
|
||||||
|
@ -309,15 +346,21 @@ start_bare_nodes(Names, Timeout) ->
|
||||||
end,
|
end,
|
||||||
Names
|
Names
|
||||||
),
|
),
|
||||||
Deadline = erlang:monotonic_time() + erlang:convert_time_unit(Timeout, millisecond, nanosecond),
|
Deadline = deadline(Timeout),
|
||||||
Nodes = wait_boot_complete(Waits, Deadline),
|
Nodes = wait_boot_complete(Waits, Deadline),
|
||||||
lists:foreach(fun(Node) -> pong = net_adm:ping(Node) end, Nodes),
|
lists:foreach(fun(Node) -> pong = net_adm:ping(Node) end, Nodes),
|
||||||
Nodes.
|
Nodes.
|
||||||
|
|
||||||
|
deadline(Timeout) ->
|
||||||
|
erlang:monotonic_time() + erlang:convert_time_unit(Timeout, millisecond, nanosecond).
|
||||||
|
|
||||||
|
is_overdue(Deadline) ->
|
||||||
|
erlang:monotonic_time() > Deadline.
|
||||||
|
|
||||||
wait_boot_complete([], _) ->
|
wait_boot_complete([], _) ->
|
||||||
[];
|
[];
|
||||||
wait_boot_complete(Waits, Deadline) ->
|
wait_boot_complete(Waits, Deadline) ->
|
||||||
case erlang:monotonic_time() > Deadline of
|
case is_overdue(Deadline) of
|
||||||
true ->
|
true ->
|
||||||
error({timeout, Waits});
|
error({timeout, Waits});
|
||||||
false ->
|
false ->
|
||||||
|
@ -340,11 +383,11 @@ node_init(Node) ->
|
||||||
ok = snabbkaffe:forward_trace(Node),
|
ok = snabbkaffe:forward_trace(Node),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
%% Returns 'true' if this node should appear in running nodes list.
|
||||||
run_node_phase_cluster(Spec = #{name := Node}) ->
|
run_node_phase_cluster(Spec = #{name := Node}) ->
|
||||||
ok = load_apps(Node, Spec),
|
ok = load_apps(Node, Spec),
|
||||||
ok = start_apps_clustering(Node, Spec),
|
ok = start_apps_clustering(Node, Spec),
|
||||||
ok = maybe_join_cluster(Node, Spec),
|
maybe_join_cluster(Node, Spec).
|
||||||
ok.
|
|
||||||
|
|
||||||
run_node_phase_apps(Spec = #{name := Node}) ->
|
run_node_phase_apps(Spec = #{name := Node}) ->
|
||||||
ok = start_apps(Node, Spec),
|
ok = start_apps(Node, Spec),
|
||||||
|
@ -368,18 +411,20 @@ start_apps(Node, #{apps := Apps} = Spec) ->
|
||||||
suite_opts(Spec) ->
|
suite_opts(Spec) ->
|
||||||
maps:with([work_dir, boot_type], Spec).
|
maps:with([work_dir, boot_type], Spec).
|
||||||
|
|
||||||
|
%% Returns 'true' if this node should appear in the cluster.
|
||||||
maybe_join_cluster(_Node, #{boot_type := restart}) ->
|
maybe_join_cluster(_Node, #{boot_type := restart}) ->
|
||||||
%% when restart, the node should already be in the cluster
|
%% when restart, the node should already be in the cluster
|
||||||
%% hence no need to (re)join
|
%% hence no need to (re)join
|
||||||
ok;
|
true;
|
||||||
maybe_join_cluster(_Node, #{role := replicant}) ->
|
maybe_join_cluster(_Node, #{role := replicant}) ->
|
||||||
ok;
|
true;
|
||||||
maybe_join_cluster(Node, Spec) ->
|
maybe_join_cluster(Node, Spec) ->
|
||||||
case get_cluster_seeds(Spec) of
|
case get_cluster_seeds(Spec) of
|
||||||
[JoinTo | _] ->
|
[JoinTo | _] ->
|
||||||
ok = join_cluster(Node, JoinTo);
|
ok = join_cluster(Node, JoinTo),
|
||||||
|
true;
|
||||||
[] ->
|
[] ->
|
||||||
ok
|
false
|
||||||
end.
|
end.
|
||||||
|
|
||||||
join_cluster(Node, JoinTo) ->
|
join_cluster(Node, JoinTo) ->
|
||||||
|
|
|
@ -282,6 +282,34 @@ t_publish_as_persistent(_Config) ->
|
||||||
emqtt:stop(Pub)
|
emqtt:stop(Pub)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
t_publish_empty_topic_levels(_Config) ->
|
||||||
|
Sub = connect(<<?MODULE_STRING "1">>, true, 30),
|
||||||
|
Pub = connect(<<?MODULE_STRING "2">>, true, 30),
|
||||||
|
try
|
||||||
|
{ok, _, [?RC_GRANTED_QOS_1]} = emqtt:subscribe(Sub, <<"t//+//#">>, qos1),
|
||||||
|
Messages = [
|
||||||
|
{<<"t//1">>, <<"1">>},
|
||||||
|
{<<"t//1/">>, <<"2">>},
|
||||||
|
{<<"t//2//">>, <<"3">>},
|
||||||
|
{<<"t//2//foo">>, <<"4">>},
|
||||||
|
{<<"t//2/foo">>, <<"5">>},
|
||||||
|
{<<"t/3/bar">>, <<"6">>}
|
||||||
|
],
|
||||||
|
[emqtt:publish(Pub, Topic, Payload, ?QOS_1) || {Topic, Payload} <- Messages],
|
||||||
|
Received = receive_messages(length(Messages), 1_500),
|
||||||
|
?assertMatch(
|
||||||
|
[
|
||||||
|
#{topic := <<"t//1/">>, payload := <<"2">>},
|
||||||
|
#{topic := <<"t//2//">>, payload := <<"3">>},
|
||||||
|
#{topic := <<"t//2//foo">>, payload := <<"4">>}
|
||||||
|
],
|
||||||
|
lists:sort(emqx_utils_maps:key_comparer(payload), Received)
|
||||||
|
)
|
||||||
|
after
|
||||||
|
emqtt:stop(Sub),
|
||||||
|
emqtt:stop(Pub)
|
||||||
|
end.
|
||||||
|
|
||||||
%%
|
%%
|
||||||
|
|
||||||
connect(ClientId, CleanStart, EI) ->
|
connect(ClientId, CleanStart, EI) ->
|
||||||
|
@ -322,15 +350,18 @@ consume(It) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
receive_messages(Count) ->
|
receive_messages(Count) ->
|
||||||
lists:reverse(receive_messages(Count, [])).
|
receive_messages(Count, 5_000).
|
||||||
|
|
||||||
receive_messages(0, Msgs) ->
|
receive_messages(Count, Timeout) ->
|
||||||
|
lists:reverse(receive_messages(Count, [], Timeout)).
|
||||||
|
|
||||||
|
receive_messages(0, Msgs, _Timeout) ->
|
||||||
Msgs;
|
Msgs;
|
||||||
receive_messages(Count, Msgs) ->
|
receive_messages(Count, Msgs, Timeout) ->
|
||||||
receive
|
receive
|
||||||
{publish, Msg} ->
|
{publish, Msg} ->
|
||||||
receive_messages(Count - 1, [Msg | Msgs])
|
receive_messages(Count - 1, [Msg | Msgs], Timeout)
|
||||||
after 5_000 ->
|
after Timeout ->
|
||||||
Msgs
|
Msgs
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,6 @@ init_per_suite(Config) ->
|
||||||
end,
|
end,
|
||||||
emqx_common_test_helpers:boot_modules(all),
|
emqx_common_test_helpers:boot_modules(all),
|
||||||
emqx_common_test_helpers:start_apps([]),
|
emqx_common_test_helpers:start_apps([]),
|
||||||
emqx_logger:set_log_level(debug),
|
|
||||||
[{dist_pid, DistPid} | Config].
|
[{dist_pid, DistPid} | Config].
|
||||||
|
|
||||||
end_per_suite(Config) ->
|
end_per_suite(Config) ->
|
||||||
|
@ -575,7 +574,7 @@ t_local(Config) when is_list(Config) ->
|
||||||
<<"sticky_group">> => sticky
|
<<"sticky_group">> => sticky
|
||||||
},
|
},
|
||||||
|
|
||||||
Node = start_slave('local_shared_sub_local_1', 21999),
|
Node = start_peer('local_shared_sub_local_1', 21999),
|
||||||
ok = ensure_group_config(GroupConfig),
|
ok = ensure_group_config(GroupConfig),
|
||||||
ok = ensure_group_config(Node, GroupConfig),
|
ok = ensure_group_config(Node, GroupConfig),
|
||||||
|
|
||||||
|
@ -606,7 +605,7 @@ t_local(Config) when is_list(Config) ->
|
||||||
|
|
||||||
emqtt:stop(ConnPid1),
|
emqtt:stop(ConnPid1),
|
||||||
emqtt:stop(ConnPid2),
|
emqtt:stop(ConnPid2),
|
||||||
stop_slave(Node),
|
stop_peer(Node),
|
||||||
|
|
||||||
?assertEqual(local, emqx_shared_sub:strategy(<<"local_group">>)),
|
?assertEqual(local, emqx_shared_sub:strategy(<<"local_group">>)),
|
||||||
?assertEqual(local, RemoteLocalGroupStrategy),
|
?assertEqual(local, RemoteLocalGroupStrategy),
|
||||||
|
@ -628,7 +627,7 @@ t_remote(Config) when is_list(Config) ->
|
||||||
<<"sticky_group">> => sticky
|
<<"sticky_group">> => sticky
|
||||||
},
|
},
|
||||||
|
|
||||||
Node = start_slave('remote_shared_sub_remote_1', 21999),
|
Node = start_peer('remote_shared_sub_remote_1', 21999),
|
||||||
ok = ensure_group_config(GroupConfig),
|
ok = ensure_group_config(GroupConfig),
|
||||||
ok = ensure_group_config(Node, GroupConfig),
|
ok = ensure_group_config(Node, GroupConfig),
|
||||||
|
|
||||||
|
@ -664,7 +663,7 @@ t_remote(Config) when is_list(Config) ->
|
||||||
after
|
after
|
||||||
emqtt:stop(ConnPidLocal),
|
emqtt:stop(ConnPidLocal),
|
||||||
emqtt:stop(ConnPidRemote),
|
emqtt:stop(ConnPidRemote),
|
||||||
stop_slave(Node)
|
stop_peer(Node)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
t_local_fallback(Config) when is_list(Config) ->
|
t_local_fallback(Config) when is_list(Config) ->
|
||||||
|
@ -677,7 +676,7 @@ t_local_fallback(Config) when is_list(Config) ->
|
||||||
Topic = <<"local_foo/bar">>,
|
Topic = <<"local_foo/bar">>,
|
||||||
ClientId1 = <<"ClientId1">>,
|
ClientId1 = <<"ClientId1">>,
|
||||||
ClientId2 = <<"ClientId2">>,
|
ClientId2 = <<"ClientId2">>,
|
||||||
Node = start_slave('local_fallback_shared_sub_1', 11888),
|
Node = start_peer('local_fallback_shared_sub_1', 11888),
|
||||||
|
|
||||||
{ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1}]),
|
{ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1}]),
|
||||||
{ok, _} = emqtt:connect(ConnPid1),
|
{ok, _} = emqtt:connect(ConnPid1),
|
||||||
|
@ -693,7 +692,7 @@ t_local_fallback(Config) when is_list(Config) ->
|
||||||
{true, UsedSubPid2} = last_message(<<"hello2">>, [ConnPid1], 2_000),
|
{true, UsedSubPid2} = last_message(<<"hello2">>, [ConnPid1], 2_000),
|
||||||
|
|
||||||
emqtt:stop(ConnPid1),
|
emqtt:stop(ConnPid1),
|
||||||
stop_slave(Node),
|
stop_peer(Node),
|
||||||
|
|
||||||
?assertEqual(UsedSubPid1, UsedSubPid2),
|
?assertEqual(UsedSubPid1, UsedSubPid2),
|
||||||
ok.
|
ok.
|
||||||
|
@ -1253,7 +1252,7 @@ recv_msgs(Count, Msgs) ->
|
||||||
Msgs
|
Msgs
|
||||||
end.
|
end.
|
||||||
|
|
||||||
start_slave(Name, Port) ->
|
start_peer(Name, Port) ->
|
||||||
{ok, Node} = emqx_cth_peer:start_link(
|
{ok, Node} = emqx_cth_peer:start_link(
|
||||||
Name,
|
Name,
|
||||||
ebin_path()
|
ebin_path()
|
||||||
|
@ -1262,7 +1261,7 @@ start_slave(Name, Port) ->
|
||||||
setup_node(Node, Port),
|
setup_node(Node, Port),
|
||||||
Node.
|
Node.
|
||||||
|
|
||||||
stop_slave(Node) ->
|
stop_peer(Node) ->
|
||||||
rpc:call(Node, mria, leave, []),
|
rpc:call(Node, mria, leave, []),
|
||||||
emqx_cth_peer:stop(Node).
|
emqx_cth_peer:stop(Node).
|
||||||
|
|
||||||
|
|
|
@ -145,15 +145,16 @@ assert_messages_missed(Ls1, Ls2) ->
|
||||||
|
|
||||||
assert_messages_order([], []) ->
|
assert_messages_order([], []) ->
|
||||||
ok;
|
ok;
|
||||||
assert_messages_order([Msg | Ls1], [#{payload := No} | Ls2]) ->
|
assert_messages_order([Msg | Expected], Received) ->
|
||||||
case emqx_message:payload(Msg) == No of
|
%% Account for duplicate messages:
|
||||||
false ->
|
case lists:splitwith(fun(#{payload := P}) -> emqx_message:payload(Msg) == P end, Received) of
|
||||||
|
{[], [#{payload := Mismatch} | _]} ->
|
||||||
ct:fail("Message order is not correct, expected: ~p, received: ~p", [
|
ct:fail("Message order is not correct, expected: ~p, received: ~p", [
|
||||||
emqx_message:payload(Msg), No
|
emqx_message:payload(Msg), Mismatch
|
||||||
]),
|
]),
|
||||||
error;
|
error;
|
||||||
true ->
|
{_Matching, Rest} ->
|
||||||
assert_messages_order(Ls1, Ls2)
|
assert_messages_order(Expected, Rest)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
messages(Offset, Cnt) ->
|
messages(Offset, Cnt) ->
|
||||||
|
|
|
@ -115,6 +115,12 @@ t_sys_match(_) ->
|
||||||
true = match(<<"a/b/$c">>, <<"a/b/#">>),
|
true = match(<<"a/b/$c">>, <<"a/b/#">>),
|
||||||
true = match(<<"a/b/$c">>, <<"a/#">>).
|
true = match(<<"a/b/$c">>, <<"a/#">>).
|
||||||
|
|
||||||
|
t_match_tokens(_) ->
|
||||||
|
true = match(emqx_topic:tokens(<<"a/b/c">>), words(<<"a/+/c">>)),
|
||||||
|
true = match(emqx_topic:tokens(<<"a//c">>), words(<<"a/+/c">>)),
|
||||||
|
false = match(emqx_topic:tokens(<<"a//c/">>), words(<<"a/+/c">>)),
|
||||||
|
true = match(emqx_topic:tokens(<<"a//c/">>), words(<<"a/+/c/#">>)).
|
||||||
|
|
||||||
t_match_perf(_) ->
|
t_match_perf(_) ->
|
||||||
true = match(<<"a/b/ccc">>, <<"a/#">>),
|
true = match(<<"a/b/ccc">>, <<"a/#">>),
|
||||||
Name = <<"/abkc/19383/192939/akakdkkdkak/xxxyyuya/akakak">>,
|
Name = <<"/abkc/19383/192939/akakdkkdkak/xxxyyuya/akakak">>,
|
||||||
|
|
|
@ -260,11 +260,10 @@ create(BridgeType, BridgeName, RawConf) ->
|
||||||
#{override_to => cluster}
|
#{override_to => cluster}
|
||||||
).
|
).
|
||||||
|
|
||||||
%% NOTE: This function can cause broken references from rules but it is only
|
|
||||||
%% called directly from test cases.
|
|
||||||
|
|
||||||
-spec remove(bridge_v2_type(), bridge_v2_name()) -> ok | {error, any()}.
|
-spec remove(bridge_v2_type(), bridge_v2_name()) -> ok | {error, any()}.
|
||||||
remove(BridgeType, BridgeName) ->
|
remove(BridgeType, BridgeName) ->
|
||||||
|
%% NOTE: This function can cause broken references from rules but it is only
|
||||||
|
%% called directly from test cases.
|
||||||
?SLOG(debug, #{
|
?SLOG(debug, #{
|
||||||
brige_action => remove,
|
brige_action => remove,
|
||||||
bridge_version => 2,
|
bridge_version => 2,
|
||||||
|
|
|
@ -16,6 +16,32 @@
|
||||||
-module(emqx_bridge_v2_tests).
|
-module(emqx_bridge_v2_tests).
|
||||||
|
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Helper fns
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
non_deprecated_fields(Fields) ->
|
||||||
|
[K || {K, Schema} <- Fields, not hocon_schema:is_deprecated(Schema)].
|
||||||
|
|
||||||
|
find_resource_opts_fields(SchemaMod, FieldName) ->
|
||||||
|
Fields = hocon_schema:fields(SchemaMod, FieldName),
|
||||||
|
case lists:keyfind(resource_opts, 1, Fields) of
|
||||||
|
false ->
|
||||||
|
undefined;
|
||||||
|
{resource_opts, ROSc} ->
|
||||||
|
get_resource_opts_subfields(ROSc)
|
||||||
|
end.
|
||||||
|
|
||||||
|
get_resource_opts_subfields(Sc) ->
|
||||||
|
?R_REF(SchemaModRO, FieldNameRO) = hocon_schema:field_schema(Sc, type),
|
||||||
|
ROFields = non_deprecated_fields(hocon_schema:fields(SchemaModRO, FieldNameRO)),
|
||||||
|
proplists:get_keys(ROFields).
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Testcases
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
resource_opts_union_connector_actions_test() ->
|
resource_opts_union_connector_actions_test() ->
|
||||||
%% The purpose of this test is to ensure we have split `resource_opts' fields
|
%% The purpose of this test is to ensure we have split `resource_opts' fields
|
||||||
|
@ -37,5 +63,47 @@ resource_opts_union_connector_actions_test() ->
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
non_deprecated_fields(Fields) ->
|
connector_resource_opts_test() ->
|
||||||
[K || {K, Schema} <- Fields, not hocon_schema:is_deprecated(Schema)].
|
%% The purpose of this test is to ensure that all connectors have the `resource_opts'
|
||||||
|
%% field with at least some sub-fields that should always be present.
|
||||||
|
%% These are used by `emqx_resource_manager' itself to manage the resource lifecycle.
|
||||||
|
MinimumROFields = [
|
||||||
|
health_check_interval,
|
||||||
|
query_mode,
|
||||||
|
start_after_created,
|
||||||
|
start_timeout
|
||||||
|
],
|
||||||
|
ConnectorSchemasRefs =
|
||||||
|
lists:map(
|
||||||
|
fun({Type, #{type := ?MAP(_, ?R_REF(SchemaMod, FieldName))}}) ->
|
||||||
|
{Type, find_resource_opts_fields(SchemaMod, FieldName)}
|
||||||
|
end,
|
||||||
|
emqx_connector_schema:fields(connectors)
|
||||||
|
),
|
||||||
|
ConnectorsMissingRO = [Type || {Type, undefined} <- ConnectorSchemasRefs],
|
||||||
|
ConnectorsMissingROSubfields =
|
||||||
|
lists:filtermap(
|
||||||
|
fun
|
||||||
|
({_Type, undefined}) ->
|
||||||
|
false;
|
||||||
|
({Type, Fs}) ->
|
||||||
|
case MinimumROFields -- Fs of
|
||||||
|
[] ->
|
||||||
|
false;
|
||||||
|
MissingFields ->
|
||||||
|
{true, {Type, MissingFields}}
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
ConnectorSchemasRefs
|
||||||
|
),
|
||||||
|
?assertEqual(
|
||||||
|
#{
|
||||||
|
missing_resource_opts_field => #{},
|
||||||
|
missing_subfields => #{}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
missing_resource_opts_field => maps:from_keys(ConnectorsMissingRO, true),
|
||||||
|
missing_subfields => maps:from_list(ConnectorsMissingROSubfields)
|
||||||
|
}
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
|
@ -588,7 +588,6 @@ cluster(Config) ->
|
||||||
[
|
[
|
||||||
{apps, [emqx_conf, emqx_rule_engine, emqx_bridge]},
|
{apps, [emqx_conf, emqx_rule_engine, emqx_bridge]},
|
||||||
{listener_ports, []},
|
{listener_ports, []},
|
||||||
{peer_mod, slave},
|
|
||||||
{priv_data_dir, PrivDataDir},
|
{priv_data_dir, PrivDataDir},
|
||||||
{load_schema, true},
|
{load_schema, true},
|
||||||
{start_autocluster, true},
|
{start_autocluster, true},
|
||||||
|
@ -611,7 +610,7 @@ start_cluster(Cluster) ->
|
||||||
Nodes = lists:map(
|
Nodes = lists:map(
|
||||||
fun({Name, Opts}) ->
|
fun({Name, Opts}) ->
|
||||||
ct:pal("starting ~p", [Name]),
|
ct:pal("starting ~p", [Name]),
|
||||||
emqx_common_test_helpers:start_slave(Name, Opts)
|
emqx_common_test_helpers:start_peer(Name, Opts)
|
||||||
end,
|
end,
|
||||||
Cluster
|
Cluster
|
||||||
),
|
),
|
||||||
|
@ -620,7 +619,7 @@ start_cluster(Cluster) ->
|
||||||
emqx_utils:pmap(
|
emqx_utils:pmap(
|
||||||
fun(N) ->
|
fun(N) ->
|
||||||
ct:pal("stopping ~p", [N]),
|
ct:pal("stopping ~p", [N]),
|
||||||
emqx_common_test_helpers:stop_slave(N)
|
emqx_common_test_helpers:stop_peer(N)
|
||||||
end,
|
end,
|
||||||
Nodes
|
Nodes
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_http, [
|
{application, emqx_bridge_http, [
|
||||||
{description, "EMQX HTTP Bridge and Connector Application"},
|
{description, "EMQX HTTP Bridge and Connector Application"},
|
||||||
{vsn, "0.1.5"},
|
{vsn, "0.1.6"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, emqx_connector, emqx_resource, ehttpc]},
|
{applications, [kernel, stdlib, emqx_connector, emqx_resource, ehttpc]},
|
||||||
{env, [{emqx_action_info_modules, [emqx_bridge_http_action_info]}]},
|
{env, [{emqx_action_info_modules, [emqx_bridge_http_action_info]}]},
|
||||||
|
|
|
@ -24,7 +24,6 @@
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
bridge_v2_examples/1,
|
bridge_v2_examples/1,
|
||||||
%%conn_bridge_examples/1,
|
|
||||||
connector_examples/1
|
connector_examples/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
@ -169,7 +168,7 @@ basic_config() ->
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{description, emqx_schema:description_schema()}
|
{description, emqx_schema:description_schema()}
|
||||||
] ++ http_resource_opts() ++ connector_opts().
|
] ++ connector_opts().
|
||||||
|
|
||||||
request_config() ->
|
request_config() ->
|
||||||
[
|
[
|
||||||
|
@ -321,7 +320,7 @@ http_resource_opts() ->
|
||||||
connector_opts() ->
|
connector_opts() ->
|
||||||
mark_request_field_deperecated(
|
mark_request_field_deperecated(
|
||||||
proplists:delete(max_retries, emqx_bridge_http_connector:fields(config))
|
proplists:delete(max_retries, emqx_bridge_http_connector:fields(config))
|
||||||
).
|
) ++ http_resource_opts().
|
||||||
|
|
||||||
mark_request_field_deperecated(Fields) ->
|
mark_request_field_deperecated(Fields) ->
|
||||||
lists:map(
|
lists:map(
|
||||||
|
|
|
@ -548,6 +548,8 @@ fields(consumer_kafka_opts) ->
|
||||||
#{default => <<"5s">>, desc => ?DESC(consumer_offset_commit_interval_seconds)}
|
#{default => <<"5s">>, desc => ?DESC(consumer_offset_commit_interval_seconds)}
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
|
fields(connector_resource_opts) ->
|
||||||
|
emqx_connector_schema:resource_opts_fields();
|
||||||
fields(resource_opts) ->
|
fields(resource_opts) ->
|
||||||
SupportedFields = [health_check_interval],
|
SupportedFields = [health_check_interval],
|
||||||
CreationOpts = emqx_bridge_v2_schema:resource_opts_fields(),
|
CreationOpts = emqx_bridge_v2_schema:resource_opts_fields(),
|
||||||
|
@ -568,6 +570,8 @@ desc("config_connector") ->
|
||||||
?DESC("desc_config");
|
?DESC("desc_config");
|
||||||
desc(resource_opts) ->
|
desc(resource_opts) ->
|
||||||
?DESC(emqx_resource_schema, "resource_opts");
|
?DESC(emqx_resource_schema, "resource_opts");
|
||||||
|
desc(connector_resource_opts) ->
|
||||||
|
?DESC(emqx_resource_schema, "resource_opts");
|
||||||
desc("get_" ++ Type) when
|
desc("get_" ++ Type) when
|
||||||
Type =:= "consumer"; Type =:= "producer"; Type =:= "connector"; Type =:= "bridge_v2"
|
Type =:= "consumer"; Type =:= "producer"; Type =:= "connector"; Type =:= "bridge_v2"
|
||||||
->
|
->
|
||||||
|
@ -626,7 +630,7 @@ kafka_connector_config_fields() ->
|
||||||
})},
|
})},
|
||||||
{socket_opts, mk(ref(socket_opts), #{required => false, desc => ?DESC(socket_opts)})},
|
{socket_opts, mk(ref(socket_opts), #{required => false, desc => ?DESC(socket_opts)})},
|
||||||
{ssl, mk(ref(ssl_client_opts), #{})}
|
{ssl, mk(ref(ssl_client_opts), #{})}
|
||||||
] ++ [resource_opts()].
|
] ++ emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts).
|
||||||
|
|
||||||
producer_opts(ActionOrBridgeV1) ->
|
producer_opts(ActionOrBridgeV1) ->
|
||||||
[
|
[
|
||||||
|
|
|
@ -1069,20 +1069,12 @@ setup_and_start_listeners(Node, NodeOpts) ->
|
||||||
|
|
||||||
cluster(Config) ->
|
cluster(Config) ->
|
||||||
PrivDataDir = ?config(priv_dir, Config),
|
PrivDataDir = ?config(priv_dir, Config),
|
||||||
PeerModule =
|
|
||||||
case os:getenv("IS_CI") of
|
|
||||||
false ->
|
|
||||||
slave;
|
|
||||||
_ ->
|
|
||||||
ct_slave
|
|
||||||
end,
|
|
||||||
ExtraEnvHandlerHook = setup_group_subscriber_spy_fn(),
|
ExtraEnvHandlerHook = setup_group_subscriber_spy_fn(),
|
||||||
Cluster = emqx_common_test_helpers:emqx_cluster(
|
Cluster = emqx_common_test_helpers:emqx_cluster(
|
||||||
[core, core],
|
[core, core],
|
||||||
[
|
[
|
||||||
{apps, [emqx_conf, emqx_rule_engine, emqx_bridge_kafka, emqx_bridge]},
|
{apps, [emqx_conf, emqx_rule_engine, emqx_bridge_kafka, emqx_bridge]},
|
||||||
{listener_ports, []},
|
{listener_ports, []},
|
||||||
{peer_mod, PeerModule},
|
|
||||||
{priv_data_dir, PrivDataDir},
|
{priv_data_dir, PrivDataDir},
|
||||||
{load_schema, true},
|
{load_schema, true},
|
||||||
{start_autocluster, true},
|
{start_autocluster, true},
|
||||||
|
@ -1744,14 +1736,14 @@ t_cluster_group(Config) ->
|
||||||
begin
|
begin
|
||||||
Nodes =
|
Nodes =
|
||||||
[_N1, N2 | _] = [
|
[_N1, N2 | _] = [
|
||||||
emqx_common_test_helpers:start_slave(Name, Opts)
|
emqx_common_test_helpers:start_peer(Name, Opts)
|
||||||
|| {Name, Opts} <- Cluster
|
|| {Name, Opts} <- Cluster
|
||||||
],
|
],
|
||||||
on_exit(fun() ->
|
on_exit(fun() ->
|
||||||
emqx_utils:pmap(
|
emqx_utils:pmap(
|
||||||
fun(N) ->
|
fun(N) ->
|
||||||
ct:pal("stopping ~p", [N]),
|
ct:pal("stopping ~p", [N]),
|
||||||
ok = emqx_common_test_helpers:stop_slave(N)
|
ok = emqx_common_test_helpers:stop_peer(N)
|
||||||
end,
|
end,
|
||||||
Nodes
|
Nodes
|
||||||
)
|
)
|
||||||
|
@ -1827,10 +1819,10 @@ t_node_joins_existing_cluster(Config) ->
|
||||||
begin
|
begin
|
||||||
[{Name1, Opts1}, {Name2, Opts2} | _] = Cluster,
|
[{Name1, Opts1}, {Name2, Opts2} | _] = Cluster,
|
||||||
ct:pal("starting ~p", [Name1]),
|
ct:pal("starting ~p", [Name1]),
|
||||||
N1 = emqx_common_test_helpers:start_slave(Name1, Opts1),
|
N1 = emqx_common_test_helpers:start_peer(Name1, Opts1),
|
||||||
on_exit(fun() ->
|
on_exit(fun() ->
|
||||||
ct:pal("stopping ~p", [N1]),
|
ct:pal("stopping ~p", [N1]),
|
||||||
ok = emqx_common_test_helpers:stop_slave(N1)
|
ok = emqx_common_test_helpers:stop_peer(N1)
|
||||||
end),
|
end),
|
||||||
{{ok, _}, {ok, _}} =
|
{{ok, _}, {ok, _}} =
|
||||||
?wait_async_action(
|
?wait_async_action(
|
||||||
|
@ -1870,10 +1862,10 @@ t_node_joins_existing_cluster(Config) ->
|
||||||
30_000
|
30_000
|
||||||
),
|
),
|
||||||
ct:pal("starting ~p", [Name2]),
|
ct:pal("starting ~p", [Name2]),
|
||||||
N2 = emqx_common_test_helpers:start_slave(Name2, Opts2),
|
N2 = emqx_common_test_helpers:start_peer(Name2, Opts2),
|
||||||
on_exit(fun() ->
|
on_exit(fun() ->
|
||||||
ct:pal("stopping ~p", [N2]),
|
ct:pal("stopping ~p", [N2]),
|
||||||
ok = emqx_common_test_helpers:stop_slave(N2)
|
ok = emqx_common_test_helpers:stop_peer(N2)
|
||||||
end),
|
end),
|
||||||
Nodes = [N1, N2],
|
Nodes = [N1, N2],
|
||||||
wait_for_cluster_rpc(N2),
|
wait_for_cluster_rpc(N2),
|
||||||
|
@ -1963,7 +1955,7 @@ t_cluster_node_down(Config) ->
|
||||||
lists:map(
|
lists:map(
|
||||||
fun({Name, Opts}) ->
|
fun({Name, Opts}) ->
|
||||||
ct:pal("starting ~p", [Name]),
|
ct:pal("starting ~p", [Name]),
|
||||||
emqx_common_test_helpers:start_slave(Name, Opts)
|
emqx_common_test_helpers:start_peer(Name, Opts)
|
||||||
end,
|
end,
|
||||||
Cluster
|
Cluster
|
||||||
),
|
),
|
||||||
|
@ -1971,7 +1963,7 @@ t_cluster_node_down(Config) ->
|
||||||
emqx_utils:pmap(
|
emqx_utils:pmap(
|
||||||
fun(N) ->
|
fun(N) ->
|
||||||
ct:pal("stopping ~p", [N]),
|
ct:pal("stopping ~p", [N]),
|
||||||
ok = emqx_common_test_helpers:stop_slave(N)
|
ok = emqx_common_test_helpers:stop_peer(N)
|
||||||
end,
|
end,
|
||||||
Nodes
|
Nodes
|
||||||
)
|
)
|
||||||
|
@ -2016,7 +2008,7 @@ t_cluster_node_down(Config) ->
|
||||||
{TId, Pid} = start_async_publisher(Config, KafkaTopic),
|
{TId, Pid} = start_async_publisher(Config, KafkaTopic),
|
||||||
|
|
||||||
ct:pal("stopping node ~p", [N1]),
|
ct:pal("stopping node ~p", [N1]),
|
||||||
ok = emqx_common_test_helpers:stop_slave(N1),
|
ok = emqx_common_test_helpers:stop_peer(N1),
|
||||||
|
|
||||||
%% Give some time for the consumers in remaining node to
|
%% Give some time for the consumers in remaining node to
|
||||||
%% rebalance.
|
%% rebalance.
|
||||||
|
|
|
@ -53,7 +53,8 @@ fields("config") ->
|
||||||
];
|
];
|
||||||
fields("config_connector") ->
|
fields("config_connector") ->
|
||||||
emqx_connector_schema:common_fields() ++
|
emqx_connector_schema:common_fields() ++
|
||||||
fields("connection_fields");
|
fields("connection_fields") ++
|
||||||
|
emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts);
|
||||||
fields("connection_fields") ->
|
fields("connection_fields") ->
|
||||||
[
|
[
|
||||||
{parameters,
|
{parameters,
|
||||||
|
@ -93,6 +94,8 @@ fields(action_parameters) ->
|
||||||
{collection, mk(binary(), #{desc => ?DESC("collection"), default => <<"mqtt">>})},
|
{collection, mk(binary(), #{desc => ?DESC("collection"), default => <<"mqtt">>})},
|
||||||
{payload_template, mk(binary(), #{required => false, desc => ?DESC("payload_template")})}
|
{payload_template, mk(binary(), #{required => false, desc => ?DESC("payload_template")})}
|
||||||
];
|
];
|
||||||
|
fields(connector_resource_opts) ->
|
||||||
|
emqx_connector_schema:resource_opts_fields();
|
||||||
fields(resource_opts) ->
|
fields(resource_opts) ->
|
||||||
fields("creation_opts");
|
fields("creation_opts");
|
||||||
fields(mongodb_rs) ->
|
fields(mongodb_rs) ->
|
||||||
|
@ -202,6 +205,8 @@ desc("creation_opts") ->
|
||||||
?DESC(emqx_resource_schema, "creation_opts");
|
?DESC(emqx_resource_schema, "creation_opts");
|
||||||
desc(resource_opts) ->
|
desc(resource_opts) ->
|
||||||
?DESC(emqx_resource_schema, "resource_opts");
|
?DESC(emqx_resource_schema, "resource_opts");
|
||||||
|
desc(connector_resource_opts) ->
|
||||||
|
?DESC(emqx_resource_schema, "resource_opts");
|
||||||
desc(mongodb_rs) ->
|
desc(mongodb_rs) ->
|
||||||
?DESC(mongodb_rs_conf);
|
?DESC(mongodb_rs_conf);
|
||||||
desc(mongodb_sharded) ->
|
desc(mongodb_sharded) ->
|
||||||
|
|
|
@ -54,7 +54,15 @@ bridge_v1_config_to_connector_config(BridgeV1Config) ->
|
||||||
ConnectorTopLevelKeys = schema_keys("config_connector"),
|
ConnectorTopLevelKeys = schema_keys("config_connector"),
|
||||||
ConnectorKeys = maps:keys(BridgeV1Config) -- (ActionKeys -- ConnectorTopLevelKeys),
|
ConnectorKeys = maps:keys(BridgeV1Config) -- (ActionKeys -- ConnectorTopLevelKeys),
|
||||||
ConnectorParametersKeys = ConnectorKeys -- ConnectorTopLevelKeys,
|
ConnectorParametersKeys = ConnectorKeys -- ConnectorTopLevelKeys,
|
||||||
make_config_map(ConnectorKeys, ConnectorParametersKeys, BridgeV1Config).
|
ConnConfig0 = make_config_map(ConnectorKeys, ConnectorParametersKeys, BridgeV1Config),
|
||||||
|
emqx_utils_maps:update_if_present(
|
||||||
|
<<"resource_opts">>,
|
||||||
|
fun(ResourceOpts) ->
|
||||||
|
CommonROSubfields = emqx_connector_schema:common_resource_opts_subfields_bin(),
|
||||||
|
maps:with(CommonROSubfields, ResourceOpts)
|
||||||
|
end,
|
||||||
|
ConnConfig0
|
||||||
|
).
|
||||||
|
|
||||||
make_config_map(PickKeys, IndentKeys, Config) ->
|
make_config_map(PickKeys, IndentKeys, Config) ->
|
||||||
Conf0 = maps:with(PickKeys, Config),
|
Conf0 = maps:with(PickKeys, Config),
|
||||||
|
|
|
@ -517,19 +517,11 @@ try_decode_json(Payload) ->
|
||||||
|
|
||||||
cluster(Config) ->
|
cluster(Config) ->
|
||||||
PrivDataDir = ?config(priv_dir, Config),
|
PrivDataDir = ?config(priv_dir, Config),
|
||||||
PeerModule =
|
|
||||||
case os:getenv("IS_CI") of
|
|
||||||
false ->
|
|
||||||
slave;
|
|
||||||
_ ->
|
|
||||||
ct_slave
|
|
||||||
end,
|
|
||||||
Cluster = emqx_common_test_helpers:emqx_cluster(
|
Cluster = emqx_common_test_helpers:emqx_cluster(
|
||||||
[core, core],
|
[core, core],
|
||||||
[
|
[
|
||||||
{apps, [emqx_conf] ++ ?APPS ++ [pulsar]},
|
{apps, [emqx_conf] ++ ?APPS ++ [pulsar]},
|
||||||
{listener_ports, []},
|
{listener_ports, []},
|
||||||
{peer_mod, PeerModule},
|
|
||||||
{priv_data_dir, PrivDataDir},
|
{priv_data_dir, PrivDataDir},
|
||||||
{load_schema, true},
|
{load_schema, true},
|
||||||
{start_autocluster, true},
|
{start_autocluster, true},
|
||||||
|
@ -551,7 +543,7 @@ cluster(Config) ->
|
||||||
start_cluster(Cluster) ->
|
start_cluster(Cluster) ->
|
||||||
Nodes =
|
Nodes =
|
||||||
[
|
[
|
||||||
emqx_common_test_helpers:start_slave(Name, Opts)
|
emqx_common_test_helpers:start_peer(Name, Opts)
|
||||||
|| {Name, Opts} <- Cluster
|
|| {Name, Opts} <- Cluster
|
||||||
],
|
],
|
||||||
NumNodes = length(Nodes),
|
NumNodes = length(Nodes),
|
||||||
|
@ -559,7 +551,7 @@ start_cluster(Cluster) ->
|
||||||
emqx_utils:pmap(
|
emqx_utils:pmap(
|
||||||
fun(N) ->
|
fun(N) ->
|
||||||
ct:pal("stopping ~p", [N]),
|
ct:pal("stopping ~p", [N]),
|
||||||
ok = emqx_common_test_helpers:stop_slave(N)
|
ok = emqx_common_test_helpers:stop_peer(N)
|
||||||
end,
|
end,
|
||||||
Nodes
|
Nodes
|
||||||
)
|
)
|
||||||
|
|
|
@ -51,6 +51,7 @@ fields("config_connector") ->
|
||||||
)}
|
)}
|
||||||
] ++
|
] ++
|
||||||
emqx_redis:redis_fields() ++
|
emqx_redis:redis_fields() ++
|
||||||
|
emqx_connector_schema:resource_opts_ref(?MODULE, resource_opts) ++
|
||||||
emqx_connector_schema_lib:ssl_fields();
|
emqx_connector_schema_lib:ssl_fields();
|
||||||
fields(action) ->
|
fields(action) ->
|
||||||
{?TYPE,
|
{?TYPE,
|
||||||
|
|
|
@ -93,7 +93,9 @@ roots() ->
|
||||||
[{config, #{type => hoconsc:ref(?MODULE, config)}}].
|
[{config, #{type => hoconsc:ref(?MODULE, config)}}].
|
||||||
|
|
||||||
fields(config) ->
|
fields(config) ->
|
||||||
emqx_connector_schema:common_fields() ++ fields("connection_fields");
|
emqx_connector_schema:common_fields() ++
|
||||||
|
fields("connection_fields") ++
|
||||||
|
emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts);
|
||||||
fields("connection_fields") ->
|
fields("connection_fields") ->
|
||||||
[
|
[
|
||||||
{server, server()},
|
{server, server()},
|
||||||
|
@ -114,6 +116,8 @@ fields("connection_fields") ->
|
||||||
emqx_connector_schema_lib:pool_size(Other)
|
emqx_connector_schema_lib:pool_size(Other)
|
||||||
end}
|
end}
|
||||||
];
|
];
|
||||||
|
fields(connector_resource_opts) ->
|
||||||
|
emqx_connector_schema:resource_opts_fields();
|
||||||
fields(Field) when
|
fields(Field) when
|
||||||
Field == "get";
|
Field == "get";
|
||||||
Field == "post";
|
Field == "post";
|
||||||
|
@ -125,6 +129,8 @@ fields(Field) when
|
||||||
|
|
||||||
desc(config) ->
|
desc(config) ->
|
||||||
?DESC("desc_config");
|
?DESC("desc_config");
|
||||||
|
desc(connector_resource_opts) ->
|
||||||
|
?DESC(emqx_resource_schema, "resource_opts");
|
||||||
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
|
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
|
||||||
["Configuration for Syskeeper Proxy using `", string:to_upper(Method), "` method."];
|
["Configuration for Syskeeper Proxy using `", string:to_upper(Method), "` method."];
|
||||||
desc(_) ->
|
desc(_) ->
|
||||||
|
|
|
@ -77,7 +77,9 @@ namespace() -> "connector_syskeeper_proxy".
|
||||||
roots() -> [].
|
roots() -> [].
|
||||||
|
|
||||||
fields(config) ->
|
fields(config) ->
|
||||||
emqx_connector_schema:common_fields() ++ fields("connection_fields");
|
emqx_connector_schema:common_fields() ++
|
||||||
|
fields("connection_fields") ++
|
||||||
|
emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts);
|
||||||
fields("connection_fields") ->
|
fields("connection_fields") ->
|
||||||
[
|
[
|
||||||
{listen, listen()},
|
{listen, listen()},
|
||||||
|
@ -92,6 +94,8 @@ fields("connection_fields") ->
|
||||||
#{desc => ?DESC(handshake_timeout), default => <<"10s">>}
|
#{desc => ?DESC(handshake_timeout), default => <<"10s">>}
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
|
fields(connector_resource_opts) ->
|
||||||
|
emqx_connector_schema:resource_opts_fields();
|
||||||
fields(Field) when
|
fields(Field) when
|
||||||
Field == "get";
|
Field == "get";
|
||||||
Field == "post";
|
Field == "post";
|
||||||
|
@ -103,6 +107,8 @@ fields(Field) when
|
||||||
|
|
||||||
desc(config) ->
|
desc(config) ->
|
||||||
?DESC("desc_config");
|
?DESC("desc_config");
|
||||||
|
desc(connector_resource_opts) ->
|
||||||
|
?DESC(emqx_resource_schema, "resource_opts");
|
||||||
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
|
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
|
||||||
["Configuration for Syskeeper Proxy using `", string:to_upper(Method), "` method."];
|
["Configuration for Syskeeper Proxy using `", string:to_upper(Method), "` method."];
|
||||||
desc(_) ->
|
desc(_) ->
|
||||||
|
|
|
@ -222,16 +222,16 @@ assert_config_load_done(Nodes) ->
|
||||||
).
|
).
|
||||||
|
|
||||||
stop_cluster(Nodes) ->
|
stop_cluster(Nodes) ->
|
||||||
emqx_utils:pmap(fun emqx_common_test_helpers:stop_slave/1, Nodes).
|
emqx_utils:pmap(fun emqx_common_test_helpers:stop_peer/1, Nodes).
|
||||||
|
|
||||||
start_cluster(Specs) ->
|
start_cluster(Specs) ->
|
||||||
[emqx_common_test_helpers:start_slave(Name, Opts) || {Name, Opts} <- Specs].
|
[emqx_common_test_helpers:start_peer(Name, Opts) || {Name, Opts} <- Specs].
|
||||||
|
|
||||||
start_cluster_async(Specs) ->
|
start_cluster_async(Specs) ->
|
||||||
[
|
[
|
||||||
begin
|
begin
|
||||||
Opts1 = maps:remove(join_to, Opts),
|
Opts1 = maps:remove(join_to, Opts),
|
||||||
spawn_link(fun() -> emqx_common_test_helpers:start_slave(Name, Opts1) end),
|
spawn_link(fun() -> emqx_common_test_helpers:start_peer(Name, Opts1) end),
|
||||||
timer:sleep(7_000)
|
timer:sleep(7_000)
|
||||||
end
|
end
|
||||||
|| {Name, Opts} <- Specs
|
|| {Name, Opts} <- Specs
|
||||||
|
|
|
@ -382,9 +382,13 @@ safe_atom(Bin) when is_binary(Bin) -> binary_to_existing_atom(Bin, utf8);
|
||||||
safe_atom(Atom) when is_atom(Atom) -> Atom.
|
safe_atom(Atom) when is_atom(Atom) -> Atom.
|
||||||
|
|
||||||
parse_opts(Conf, Opts0) ->
|
parse_opts(Conf, Opts0) ->
|
||||||
override_start_after_created(Conf, Opts0).
|
Opts1 = override_start_after_created(Conf, Opts0),
|
||||||
|
set_no_buffer_workers(Opts1).
|
||||||
|
|
||||||
override_start_after_created(Config, Opts) ->
|
override_start_after_created(Config, Opts) ->
|
||||||
Enabled = maps:get(enable, Config, true),
|
Enabled = maps:get(enable, Config, true),
|
||||||
StartAfterCreated = Enabled andalso maps:get(start_after_created, Opts, Enabled),
|
StartAfterCreated = Enabled andalso maps:get(start_after_created, Opts, Enabled),
|
||||||
Opts#{start_after_created => StartAfterCreated}.
|
Opts#{start_after_created => StartAfterCreated}.
|
||||||
|
|
||||||
|
set_no_buffer_workers(Opts) ->
|
||||||
|
Opts#{spawn_buffer_workers => false}.
|
||||||
|
|
|
@ -40,7 +40,13 @@
|
||||||
type_and_name_fields/1
|
type_and_name_fields/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([resource_opts_fields/0, resource_opts_fields/1]).
|
-export([
|
||||||
|
common_resource_opts_subfields/0,
|
||||||
|
common_resource_opts_subfields_bin/0,
|
||||||
|
resource_opts_fields/0,
|
||||||
|
resource_opts_fields/1,
|
||||||
|
resource_opts_ref/2
|
||||||
|
]).
|
||||||
|
|
||||||
-export([examples/1]).
|
-export([examples/1]).
|
||||||
|
|
||||||
|
@ -178,14 +184,19 @@ split_bridge_to_connector_and_action(
|
||||||
%% Get connector fields from bridge config
|
%% Get connector fields from bridge config
|
||||||
lists:foldl(
|
lists:foldl(
|
||||||
fun({ConnectorFieldName, _Spec}, ToTransformSoFar) ->
|
fun({ConnectorFieldName, _Spec}, ToTransformSoFar) ->
|
||||||
case maps:is_key(to_bin(ConnectorFieldName), BridgeV1Conf) of
|
ConnectorFieldNameBin = to_bin(ConnectorFieldName),
|
||||||
|
case maps:is_key(ConnectorFieldNameBin, BridgeV1Conf) of
|
||||||
true ->
|
true ->
|
||||||
NewToTransform = maps:put(
|
PrevFieldConfig =
|
||||||
to_bin(ConnectorFieldName),
|
project_to_connector_resource_opts(
|
||||||
maps:get(to_bin(ConnectorFieldName), BridgeV1Conf),
|
ConnectorFieldNameBin,
|
||||||
ToTransformSoFar
|
maps:get(ConnectorFieldNameBin, BridgeV1Conf)
|
||||||
),
|
),
|
||||||
NewToTransform;
|
maps:put(
|
||||||
|
ConnectorFieldNameBin,
|
||||||
|
PrevFieldConfig,
|
||||||
|
ToTransformSoFar
|
||||||
|
);
|
||||||
false ->
|
false ->
|
||||||
ToTransformSoFar
|
ToTransformSoFar
|
||||||
end
|
end
|
||||||
|
@ -213,6 +224,12 @@ split_bridge_to_connector_and_action(
|
||||||
end,
|
end,
|
||||||
{BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}.
|
{BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}.
|
||||||
|
|
||||||
|
project_to_connector_resource_opts(<<"resource_opts">>, OldResourceOpts) ->
|
||||||
|
Subfields = common_resource_opts_subfields_bin(),
|
||||||
|
maps:with(Subfields, OldResourceOpts);
|
||||||
|
project_to_connector_resource_opts(_, OldConfig) ->
|
||||||
|
OldConfig.
|
||||||
|
|
||||||
transform_bridge_v1_config_to_action_config(
|
transform_bridge_v1_config_to_action_config(
|
||||||
BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName
|
BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName
|
||||||
) ->
|
) ->
|
||||||
|
@ -497,19 +514,33 @@ status_and_actions_fields() ->
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
||||||
|
resource_opts_ref(Module, RefName) ->
|
||||||
|
[
|
||||||
|
{resource_opts,
|
||||||
|
mk(
|
||||||
|
ref(Module, RefName),
|
||||||
|
emqx_resource_schema:resource_opts_meta()
|
||||||
|
)}
|
||||||
|
].
|
||||||
|
|
||||||
|
common_resource_opts_subfields() ->
|
||||||
|
[
|
||||||
|
health_check_interval,
|
||||||
|
query_mode,
|
||||||
|
start_after_created,
|
||||||
|
start_timeout
|
||||||
|
].
|
||||||
|
|
||||||
|
common_resource_opts_subfields_bin() ->
|
||||||
|
lists:map(fun atom_to_binary/1, common_resource_opts_subfields()).
|
||||||
|
|
||||||
resource_opts_fields() ->
|
resource_opts_fields() ->
|
||||||
resource_opts_fields(_Overrides = []).
|
resource_opts_fields(_Overrides = []).
|
||||||
|
|
||||||
resource_opts_fields(Overrides) ->
|
resource_opts_fields(Overrides) ->
|
||||||
%% Note: these don't include buffer-related configurations because buffer workers are
|
%% Note: these don't include buffer-related configurations because buffer workers are
|
||||||
%% tied to the action.
|
%% tied to the action.
|
||||||
ConnectorROFields = [
|
ConnectorROFields = common_resource_opts_subfields(),
|
||||||
health_check_interval,
|
|
||||||
query_mode,
|
|
||||||
request_ttl,
|
|
||||||
start_after_created,
|
|
||||||
start_timeout
|
|
||||||
],
|
|
||||||
lists:filter(
|
lists:filter(
|
||||||
fun({Key, _Sc}) -> lists:member(Key, ConnectorROFields) end,
|
fun({Key, _Sc}) -> lists:member(Key, ConnectorROFields) end,
|
||||||
emqx_resource_schema:create_opts(Overrides)
|
emqx_resource_schema:create_opts(Overrides)
|
||||||
|
|
|
@ -163,11 +163,11 @@ t_remove_fail({'init', Config}) ->
|
||||||
meck:expect(?CONNECTOR, on_add_channel, 4, {ok, connector_state}),
|
meck:expect(?CONNECTOR, on_add_channel, 4, {ok, connector_state}),
|
||||||
meck:expect(?CONNECTOR, on_stop, 2, ok),
|
meck:expect(?CONNECTOR, on_stop, 2, ok),
|
||||||
meck:expect(?CONNECTOR, on_get_status, 2, connected),
|
meck:expect(?CONNECTOR, on_get_status, 2, connected),
|
||||||
[{mocked_mods, [?CONNECTOR, emqx_connector_ee_schema]} | Config];
|
meck:expect(?CONNECTOR, query_mode, 1, simple_async_internal_buffer),
|
||||||
t_remove_fail({'end', Config}) ->
|
|
||||||
MockedMods = ?config(mocked_mods, Config),
|
|
||||||
meck:unload(MockedMods),
|
|
||||||
Config;
|
Config;
|
||||||
|
t_remove_fail({'end', _Config}) ->
|
||||||
|
meck:unload(),
|
||||||
|
ok;
|
||||||
t_remove_fail(_Config) ->
|
t_remove_fail(_Config) ->
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
[],
|
[],
|
||||||
|
@ -200,7 +200,20 @@ t_remove_fail(_Config) ->
|
||||||
{_, {?CONNECTOR, on_add_channel, _}, {ok, connector_state}},
|
{_, {?CONNECTOR, on_add_channel, _}, {ok, connector_state}},
|
||||||
{_, {?CONNECTOR, on_get_channels, [_]}, _}
|
{_, {?CONNECTOR, on_get_channels, [_]}, _}
|
||||||
],
|
],
|
||||||
|
lists:filter(
|
||||||
|
fun({_, {?CONNECTOR, Fun, _Args}, _}) ->
|
||||||
|
lists:member(
|
||||||
|
Fun, [
|
||||||
|
callback_mode,
|
||||||
|
on_start,
|
||||||
|
on_get_channels,
|
||||||
|
on_get_status,
|
||||||
|
on_add_channel
|
||||||
|
]
|
||||||
|
)
|
||||||
|
end,
|
||||||
meck:history(?CONNECTOR)
|
meck:history(?CONNECTOR)
|
||||||
|
)
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
@ -269,6 +282,33 @@ t_create_with_bad_name_root_path(_Config) ->
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
t_no_buffer_workers({'init', Config}) ->
|
||||||
|
meck:new(emqx_connector_ee_schema, [passthrough]),
|
||||||
|
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR),
|
||||||
|
meck:new(?CONNECTOR, [non_strict]),
|
||||||
|
meck:expect(?CONNECTOR, callback_mode, 0, async_if_possible),
|
||||||
|
meck:expect(?CONNECTOR, on_start, 2, {ok, connector_state}),
|
||||||
|
meck:expect(?CONNECTOR, on_get_channels, 1, []),
|
||||||
|
meck:expect(?CONNECTOR, on_add_channel, 4, {ok, connector_state}),
|
||||||
|
meck:expect(?CONNECTOR, on_stop, 2, ok),
|
||||||
|
meck:expect(?CONNECTOR, on_get_status, 2, connected),
|
||||||
|
meck:expect(?CONNECTOR, query_mode, 1, sync),
|
||||||
|
[
|
||||||
|
{path, [connectors, kafka_producer, no_bws]}
|
||||||
|
| Config
|
||||||
|
];
|
||||||
|
t_no_buffer_workers({'end', Config}) ->
|
||||||
|
Path = ?config(path, Config),
|
||||||
|
{ok, _} = emqx:remove_config(Path),
|
||||||
|
meck:unload(),
|
||||||
|
ok;
|
||||||
|
t_no_buffer_workers(Config) ->
|
||||||
|
Path = ?config(path, Config),
|
||||||
|
ConnConfig = connector_config(),
|
||||||
|
?assertMatch({ok, _}, emqx:update_config(Path, ConnConfig)),
|
||||||
|
?assertEqual([], supervisor:which_children(emqx_resource_buffer_worker_sup)),
|
||||||
|
ok.
|
||||||
|
|
||||||
%% helpers
|
%% helpers
|
||||||
|
|
||||||
connector_config() ->
|
connector_config() ->
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
-module(emqx_connector_dummy_impl).
|
-module(emqx_connector_dummy_impl).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
query_mode/1,
|
||||||
callback_mode/0,
|
callback_mode/0,
|
||||||
on_start/2,
|
on_start/2,
|
||||||
on_stop/2,
|
on_stop/2,
|
||||||
|
@ -24,6 +25,7 @@
|
||||||
on_get_channel_status/3
|
on_get_channel_status/3
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
query_mode(_) -> error(unexpected).
|
||||||
callback_mode() -> error(unexpected).
|
callback_mode() -> error(unexpected).
|
||||||
on_start(_, _) -> error(unexpected).
|
on_start(_, _) -> error(unexpected).
|
||||||
on_stop(_, _) -> error(unexpected).
|
on_stop(_, _) -> error(unexpected).
|
||||||
|
|
|
@ -1,36 +1,50 @@
|
||||||
# EMQX Replay
|
# EMQX Replay
|
||||||
|
|
||||||
`emqx_ds` is a generic durable storage for MQTT messages within EMQX.
|
`emqx_ds` is an application implementing durable storage for MQTT messages within EMQX.
|
||||||
|
|
||||||
Concepts:
|
# Features
|
||||||
|
|
||||||
|
- Streams. Stream is an abstraction that encompasses topics, shards, different data layouts, etc.
|
||||||
|
The client application must only aware of the streams.
|
||||||
|
|
||||||
|
- Batching. All the API functions are batch-oriented.
|
||||||
|
|
||||||
> 0. App overview introduction
|
- Iterators. Iterators can be stored durably or transferred over network.
|
||||||
> 1. let people know what your project can do specifically. Is it a base
|
They take relatively small space.
|
||||||
> library dependency, or what kind of functionality is provided to the user?
|
|
||||||
> 2. Provide context and add a link to any reference visitors might be
|
|
||||||
> unfamiliar with.
|
|
||||||
> 3. Design details, implementation technology architecture, Roadmap, etc.
|
|
||||||
|
|
||||||
# [Features] - [Optional]
|
- Support for various backends. Almost any DBMS that supports range
|
||||||
> A List of features your application provided. If the feature is quite simple, just
|
queries can serve as a `emqx_durable_storage` backend.
|
||||||
> list in the previous section.
|
|
||||||
|
- Builtin backend based on RocksDB.
|
||||||
|
- Changing storage layout on the fly: it's achieved by creating a
|
||||||
|
new set of tables (known as "generation") and the schema.
|
||||||
|
- Sharding based on publisher's client ID
|
||||||
|
|
||||||
# Limitation
|
# Limitation
|
||||||
TBD
|
|
||||||
|
- Builtin backend currently doesn't replicate data across different sites
|
||||||
|
- There is no local cache of messages, which may result in transferring the same data multiple times
|
||||||
|
|
||||||
# Documentation links
|
# Documentation links
|
||||||
TBD
|
TBD
|
||||||
|
|
||||||
# Usage
|
# Usage
|
||||||
TBD
|
|
||||||
|
Currently it's only used to implement persistent sessions.
|
||||||
|
|
||||||
|
In the future it can serve as a storage for retained messages or as a generic message buffering layer for the bridges.
|
||||||
|
|
||||||
# Configurations
|
# Configurations
|
||||||
TBD
|
|
||||||
|
`emqx_durable_storage` doesn't have any configurable parameters.
|
||||||
|
Instead, it relies on the upper-level business applications to create
|
||||||
|
a correct configuration and pass it to `emqx_ds:open_db(DBName, Config)`
|
||||||
|
function according to its needs.
|
||||||
|
|
||||||
# HTTP APIs
|
# HTTP APIs
|
||||||
|
|
||||||
|
None
|
||||||
|
|
||||||
# Other
|
# Other
|
||||||
TBD
|
TBD
|
||||||
|
|
||||||
|
|
|
@ -368,7 +368,7 @@ check_message(
|
||||||
#{?tag := ?IT, ?start_time := StartTime, ?topic_filter := TopicFilter},
|
#{?tag := ?IT, ?start_time := StartTime, ?topic_filter := TopicFilter},
|
||||||
#message{timestamp = Timestamp, topic = Topic}
|
#message{timestamp = Timestamp, topic = Topic}
|
||||||
) when Timestamp >= StartTime ->
|
) when Timestamp >= StartTime ->
|
||||||
emqx_topic:match(emqx_topic:words(Topic), TopicFilter);
|
emqx_topic:match(emqx_topic:tokens(Topic), TopicFilter);
|
||||||
check_message(_Cutoff, _It, _Msg) ->
|
check_message(_Cutoff, _It, _Msg) ->
|
||||||
false.
|
false.
|
||||||
|
|
||||||
|
@ -378,7 +378,7 @@ format_key(KeyMapper, Key) ->
|
||||||
|
|
||||||
-spec make_key(s(), emqx_types:message()) -> {binary(), [binary()]}.
|
-spec make_key(s(), emqx_types:message()) -> {binary(), [binary()]}.
|
||||||
make_key(#s{keymappers = KeyMappers, trie = Trie}, #message{timestamp = Timestamp, topic = TopicBin}) ->
|
make_key(#s{keymappers = KeyMappers, trie = Trie}, #message{timestamp = Timestamp, topic = TopicBin}) ->
|
||||||
Tokens = emqx_topic:tokens(TopicBin),
|
Tokens = emqx_topic:words(TopicBin),
|
||||||
{TopicIndex, Varying} = emqx_ds_lts:topic_key(Trie, fun threshold_fun/1, Tokens),
|
{TopicIndex, Varying} = emqx_ds_lts:topic_key(Trie, fun threshold_fun/1, Tokens),
|
||||||
VaryingHashes = [hash_topic_level(I) || I <- Varying],
|
VaryingHashes = [hash_topic_level(I) || I <- Varying],
|
||||||
KeyMapper = array:get(length(Varying), KeyMappers),
|
KeyMapper = array:get(length(Varying), KeyMappers),
|
||||||
|
|
|
@ -69,7 +69,7 @@ make_iterator(Node, DB, Shard, Stream, TopicFilter, StartTime) ->
|
||||||
| {ok, end_of_stream}
|
| {ok, end_of_stream}
|
||||||
| {error, _}.
|
| {error, _}.
|
||||||
next(Node, DB, Shard, Iter, BatchSize) ->
|
next(Node, DB, Shard, Iter, BatchSize) ->
|
||||||
erpc:call(Node, emqx_ds_replication_layer, do_next_v1, [DB, Shard, Iter, BatchSize]).
|
emqx_rpc:call(Shard, Node, emqx_ds_replication_layer, do_next_v1, [DB, Shard, Iter, BatchSize]).
|
||||||
|
|
||||||
-spec store_batch(
|
-spec store_batch(
|
||||||
node(),
|
node(),
|
||||||
|
@ -80,7 +80,9 @@ next(Node, DB, Shard, Iter, BatchSize) ->
|
||||||
) ->
|
) ->
|
||||||
emqx_ds:store_batch_result().
|
emqx_ds:store_batch_result().
|
||||||
store_batch(Node, DB, Shard, Batch, Options) ->
|
store_batch(Node, DB, Shard, Batch, Options) ->
|
||||||
erpc:call(Node, emqx_ds_replication_layer, do_store_batch_v1, [DB, Shard, Batch, Options]).
|
emqx_rpc:call(Shard, Node, emqx_ds_replication_layer, do_store_batch_v1, [
|
||||||
|
DB, Shard, Batch, Options
|
||||||
|
]).
|
||||||
|
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
%% behavior callbacks
|
%% behavior callbacks
|
||||||
|
|
|
@ -50,9 +50,9 @@ end_per_suite(Config) ->
|
||||||
init_per_testcase(Case, Config) ->
|
init_per_testcase(Case, Config) ->
|
||||||
_ = emqx_eviction_agent:disable(test_eviction),
|
_ = emqx_eviction_agent:disable(test_eviction),
|
||||||
ok = snabbkaffe:start_trace(),
|
ok = snabbkaffe:start_trace(),
|
||||||
start_slave(Case, Config).
|
start_peer(Case, Config).
|
||||||
|
|
||||||
start_slave(t_explicit_session_takeover, Config) ->
|
start_peer(t_explicit_session_takeover, Config) ->
|
||||||
NodeNames =
|
NodeNames =
|
||||||
[
|
[
|
||||||
t_explicit_session_takeover_donor,
|
t_explicit_session_takeover_donor,
|
||||||
|
@ -65,19 +65,19 @@ start_slave(t_explicit_session_takeover, Config) ->
|
||||||
),
|
),
|
||||||
ok = snabbkaffe:start_trace(),
|
ok = snabbkaffe:start_trace(),
|
||||||
[{evacuate_nodes, ClusterNodes} | Config];
|
[{evacuate_nodes, ClusterNodes} | Config];
|
||||||
start_slave(_Case, Config) ->
|
start_peer(_Case, Config) ->
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_testcase(TestCase, Config) ->
|
end_per_testcase(TestCase, Config) ->
|
||||||
emqx_eviction_agent:disable(test_eviction),
|
emqx_eviction_agent:disable(test_eviction),
|
||||||
ok = snabbkaffe:stop(),
|
ok = snabbkaffe:stop(),
|
||||||
stop_slave(TestCase, Config).
|
stop_peer(TestCase, Config).
|
||||||
|
|
||||||
stop_slave(t_explicit_session_takeover, Config) ->
|
stop_peer(t_explicit_session_takeover, Config) ->
|
||||||
emqx_eviction_agent_test_helpers:stop_cluster(
|
emqx_eviction_agent_test_helpers:stop_cluster(
|
||||||
?config(evacuate_nodes, Config)
|
?config(evacuate_nodes, Config)
|
||||||
);
|
);
|
||||||
stop_slave(_Case, _Config) ->
|
stop_peer(_Case, _Config) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
|
@ -74,14 +74,16 @@ check_topic([]) ->
|
||||||
check_topic(Path) ->
|
check_topic(Path) ->
|
||||||
{ok, emqx_http_lib:uri_decode(iolist_to_binary(lists:join(<<"/">>, Path)))}.
|
{ok, emqx_http_lib:uri_decode(iolist_to_binary(lists:join(<<"/">>, Path)))}.
|
||||||
|
|
||||||
get_sub_opts(#coap_message{options = Opts} = Msg) ->
|
get_sub_opts(Msg) ->
|
||||||
SubOpts = maps:fold(fun parse_sub_opts/3, #{}, Opts),
|
SubOpts = maps:fold(
|
||||||
|
fun parse_sub_opts/3, #{}, emqx_coap_message:get_option(uri_query, Msg, #{})
|
||||||
|
),
|
||||||
case SubOpts of
|
case SubOpts of
|
||||||
#{qos := _} ->
|
#{qos := _} ->
|
||||||
maps:merge(SubOpts, ?SUBOPTS);
|
maps:merge(?SUBOPTS, SubOpts);
|
||||||
_ ->
|
_ ->
|
||||||
CfgType = emqx_conf:get([gateway, coap, subscribe_qos], ?QOS_0),
|
CfgType = emqx_conf:get([gateway, coap, subscribe_qos], ?QOS_0),
|
||||||
maps:merge(SubOpts, ?SUBOPTS#{qos => type_to_qos(CfgType, Msg)})
|
maps:merge(?SUBOPTS#{qos => type_to_qos(CfgType, Msg)}, SubOpts)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
parse_sub_opts(<<"qos">>, V, Opts) ->
|
parse_sub_opts(<<"qos">>, V, Opts) ->
|
||||||
|
|
|
@ -345,6 +345,45 @@ t_subscribe(_) ->
|
||||||
Topics
|
Topics
|
||||||
).
|
).
|
||||||
|
|
||||||
|
t_subscribe_with_qos_opt(_) ->
|
||||||
|
Topics = [
|
||||||
|
{<<"abc">>, 0},
|
||||||
|
{<<"/abc">>, 1},
|
||||||
|
{<<"abc/d">>, 2}
|
||||||
|
],
|
||||||
|
Fun = fun({Topic, Qos}, Channel, Token) ->
|
||||||
|
Payload = <<"123">>,
|
||||||
|
URI = pubsub_uri(binary_to_list(Topic), Token) ++ "&qos=" ++ integer_to_list(Qos),
|
||||||
|
Req = make_req(get, Payload, [{observe, 0}]),
|
||||||
|
{ok, content, _} = do_request(Channel, URI, Req),
|
||||||
|
?LOGT("observer topic:~ts~n", [Topic]),
|
||||||
|
|
||||||
|
%% ensure subscribe succeed
|
||||||
|
timer:sleep(100),
|
||||||
|
[SubPid] = emqx:subscribers(Topic),
|
||||||
|
?assert(is_pid(SubPid)),
|
||||||
|
?assertEqual(Qos, maps:get(qos, emqx_broker:get_subopts(SubPid, Topic))),
|
||||||
|
%% publish a message
|
||||||
|
emqx:publish(emqx_message:make(Topic, Payload)),
|
||||||
|
{ok, content, Notify} = with_response(Channel),
|
||||||
|
?LOGT("observer get Notif=~p", [Notify]),
|
||||||
|
|
||||||
|
#coap_content{payload = PayloadRecv} = Notify,
|
||||||
|
|
||||||
|
?assertEqual(Payload, PayloadRecv)
|
||||||
|
end,
|
||||||
|
|
||||||
|
with_connection(Topics, Fun),
|
||||||
|
|
||||||
|
%% subscription removed if coap client disconnected
|
||||||
|
timer:sleep(100),
|
||||||
|
lists:foreach(
|
||||||
|
fun({Topic, _Qos}) ->
|
||||||
|
?assertEqual([], emqx:subscribers(Topic))
|
||||||
|
end,
|
||||||
|
Topics
|
||||||
|
).
|
||||||
|
|
||||||
t_un_subscribe(_) ->
|
t_un_subscribe(_) ->
|
||||||
%% can unsubscribe to a normal topic
|
%% can unsubscribe to a normal topic
|
||||||
Topics = [
|
Topics = [
|
||||||
|
|
|
@ -57,14 +57,22 @@ all() ->
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
application:load(emqx_gateway_coap),
|
application:load(emqx_gateway_coap),
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT),
|
Apps = emqx_cth_suite:start(
|
||||||
emqx_mgmt_api_test_util:init_suite([emqx_auth, emqx_gateway]),
|
[
|
||||||
Config.
|
{emqx_conf, ?CONF_DEFAULT},
|
||||||
|
emqx_gateway,
|
||||||
|
emqx_auth,
|
||||||
|
emqx_management,
|
||||||
|
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
|
||||||
|
],
|
||||||
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
|
),
|
||||||
|
_ = emqx_common_test_http:create_default_app(),
|
||||||
|
[{suite_apps, Apps} | Config].
|
||||||
|
|
||||||
end_per_suite(Config) ->
|
end_per_suite(Config) ->
|
||||||
{ok, _} = emqx:remove_config([<<"gateway">>, <<"coap">>]),
|
emqx_cth_suite:stop(?config(suite_apps, Config)),
|
||||||
emqx_mgmt_api_test_util:end_suite([emqx_gateway, emqx_auth]),
|
emqx_config:delete_override_conf_files().
|
||||||
Config.
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Cases
|
%% Cases
|
||||||
|
|
|
@ -112,7 +112,7 @@ setup_test(TestCase, Config) when
|
||||||
end}
|
end}
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
Nodes = [emqx_common_test_helpers:start_slave(Name, Opts) || {Name, Opts} <- Cluster],
|
Nodes = [emqx_common_test_helpers:start_peer(Name, Opts) || {Name, Opts} <- Cluster],
|
||||||
[{nodes, Nodes}, {cluster, Cluster}, {old_license, LicenseKey}];
|
[{nodes, Nodes}, {cluster, Cluster}, {old_license, LicenseKey}];
|
||||||
setup_test(_TestCase, _Config) ->
|
setup_test(_TestCase, _Config) ->
|
||||||
[].
|
[].
|
||||||
|
|
|
@ -42,8 +42,8 @@ t_cluster_query(_Config) ->
|
||||||
ct:timetrap({seconds, 120}),
|
ct:timetrap({seconds, 120}),
|
||||||
snabbkaffe:fix_ct_logging(),
|
snabbkaffe:fix_ct_logging(),
|
||||||
[{Name, Opts}, {Name1, Opts1}] = cluster_specs(),
|
[{Name, Opts}, {Name1, Opts1}] = cluster_specs(),
|
||||||
Node1 = emqx_common_test_helpers:start_slave(Name, Opts),
|
Node1 = emqx_common_test_helpers:start_peer(Name, Opts),
|
||||||
Node2 = emqx_common_test_helpers:start_slave(Name1, Opts1),
|
Node2 = emqx_common_test_helpers:start_peer(Name1, Opts1),
|
||||||
try
|
try
|
||||||
process_flag(trap_exit, true),
|
process_flag(trap_exit, true),
|
||||||
ClientLs1 = [start_emqtt_client(Node1, I, 2883) || I <- lists:seq(1, 10)],
|
ClientLs1 = [start_emqtt_client(Node1, I, 2883) || I <- lists:seq(1, 10)],
|
||||||
|
@ -168,8 +168,8 @@ t_cluster_query(_Config) ->
|
||||||
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1),
|
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1),
|
||||||
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs2)
|
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs2)
|
||||||
after
|
after
|
||||||
emqx_common_test_helpers:stop_slave(Node1),
|
emqx_common_test_helpers:stop_peer(Node1),
|
||||||
emqx_common_test_helpers:stop_slave(Node2)
|
emqx_common_test_helpers:stop_peer(Node2)
|
||||||
end,
|
end,
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
|
|
@ -54,8 +54,6 @@ t_cluster_topology_api_empty_resp(_) ->
|
||||||
).
|
).
|
||||||
|
|
||||||
t_cluster_topology_api_replicants(Config) ->
|
t_cluster_topology_api_replicants(Config) ->
|
||||||
%% some time to stabilize
|
|
||||||
timer:sleep(3000),
|
|
||||||
[Core1, Core2, Replicant] = _NodesList = ?config(cluster, Config),
|
[Core1, Core2, Replicant] = _NodesList = ?config(cluster, Config),
|
||||||
{200, Core1Resp} = rpc:call(Core1, emqx_mgmt_api_cluster, cluster_topology, [get, #{}]),
|
{200, Core1Resp} = rpc:call(Core1, emqx_mgmt_api_cluster, cluster_topology, [get, #{}]),
|
||||||
{200, Core2Resp} = rpc:call(Core2, emqx_mgmt_api_cluster, cluster_topology, [get, #{}]),
|
{200, Core2Resp} = rpc:call(Core2, emqx_mgmt_api_cluster, cluster_topology, [get, #{}]),
|
||||||
|
|
|
@ -194,8 +194,8 @@ t_api_listeners_list_not_ready(Config) when is_list(Config) ->
|
||||||
snabbkaffe:fix_ct_logging(),
|
snabbkaffe:fix_ct_logging(),
|
||||||
Cluster = [{Name, Opts}, {Name1, Opts1}] = cluster([core, core]),
|
Cluster = [{Name, Opts}, {Name1, Opts1}] = cluster([core, core]),
|
||||||
ct:pal("Starting ~p", [Cluster]),
|
ct:pal("Starting ~p", [Cluster]),
|
||||||
Node1 = emqx_common_test_helpers:start_slave(Name, Opts),
|
Node1 = emqx_common_test_helpers:start_peer(Name, Opts),
|
||||||
Node2 = emqx_common_test_helpers:start_slave(Name1, Opts1),
|
Node2 = emqx_common_test_helpers:start_peer(Name1, Opts1),
|
||||||
try
|
try
|
||||||
L1 = get_tcp_listeners(Node1),
|
L1 = get_tcp_listeners(Node1),
|
||||||
|
|
||||||
|
@ -214,8 +214,8 @@ t_api_listeners_list_not_ready(Config) when is_list(Config) ->
|
||||||
?assert(length(L1) > length(L2), Comment),
|
?assert(length(L1) > length(L2), Comment),
|
||||||
?assertEqual(length(L2), length(L3), Comment)
|
?assertEqual(length(L2), length(L3), Comment)
|
||||||
after
|
after
|
||||||
emqx_common_test_helpers:stop_slave(Node1),
|
emqx_common_test_helpers:stop_peer(Node1),
|
||||||
emqx_common_test_helpers:stop_slave(Node2)
|
emqx_common_test_helpers:stop_peer(Node2)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
t_clear_certs(Config) when is_list(Config) ->
|
t_clear_certs(Config) when is_list(Config) ->
|
||||||
|
|
|
@ -129,8 +129,8 @@ t_multiple_nodes_api(_) ->
|
||||||
Seq2 = list_to_atom(atom_to_list(?MODULE) ++ "2"),
|
Seq2 = list_to_atom(atom_to_list(?MODULE) ++ "2"),
|
||||||
Cluster = [{Name, Opts}, {Name1, Opts1}] = cluster([{core, Seq1}, {core, Seq2}]),
|
Cluster = [{Name, Opts}, {Name1, Opts1}] = cluster([{core, Seq1}, {core, Seq2}]),
|
||||||
ct:pal("Starting ~p", [Cluster]),
|
ct:pal("Starting ~p", [Cluster]),
|
||||||
Node1 = emqx_common_test_helpers:start_slave(Name, Opts),
|
Node1 = emqx_common_test_helpers:start_peer(Name, Opts),
|
||||||
Node2 = emqx_common_test_helpers:start_slave(Name1, Opts1),
|
Node2 = emqx_common_test_helpers:start_peer(Name1, Opts1),
|
||||||
try
|
try
|
||||||
{200, NodesList} = rpc:call(Node1, emqx_mgmt_api_nodes, nodes, [get, #{}]),
|
{200, NodesList} = rpc:call(Node1, emqx_mgmt_api_nodes, nodes, [get, #{}]),
|
||||||
All = [Node1, Node2],
|
All = [Node1, Node2],
|
||||||
|
@ -148,8 +148,8 @@ t_multiple_nodes_api(_) ->
|
||||||
]),
|
]),
|
||||||
?assertMatch(#{node := Node1}, Node11)
|
?assertMatch(#{node := Node1}, Node11)
|
||||||
after
|
after
|
||||||
emqx_common_test_helpers:stop_slave(Node1),
|
emqx_common_test_helpers:stop_peer(Node1),
|
||||||
emqx_common_test_helpers:stop_slave(Node2)
|
emqx_common_test_helpers:stop_peer(Node2)
|
||||||
end,
|
end,
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
|
|
@ -27,12 +27,12 @@ all() ->
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
emqx_mgmt_api_test_util:init_suite(),
|
emqx_mgmt_api_test_util:init_suite(),
|
||||||
Slave = emqx_common_test_helpers:start_slave(some_node, []),
|
Peer = emqx_common_test_helpers:start_peer(node1, []),
|
||||||
[{slave, Slave} | Config].
|
[{peer, Peer} | Config].
|
||||||
|
|
||||||
end_per_suite(Config) ->
|
end_per_suite(Config) ->
|
||||||
Slave = ?config(slave, Config),
|
Peer = ?config(peer, Config),
|
||||||
emqx_common_test_helpers:stop_slave(Slave),
|
emqx_common_test_helpers:stop_peer(Peer),
|
||||||
mria:clear_table(?ROUTE_TAB),
|
mria:clear_table(?ROUTE_TAB),
|
||||||
emqx_mgmt_api_test_util:end_suite().
|
emqx_mgmt_api_test_util:end_suite().
|
||||||
|
|
||||||
|
@ -80,18 +80,18 @@ t_nodes_api(Config) ->
|
||||||
%% get topics/:topic
|
%% get topics/:topic
|
||||||
%% We add another route here to ensure that the response handles
|
%% We add another route here to ensure that the response handles
|
||||||
%% multiple routes for a single topic
|
%% multiple routes for a single topic
|
||||||
Slave = ?config(slave, Config),
|
Peer = ?config(peer, Config),
|
||||||
ok = emqx_router:add_route(Topic, Slave),
|
ok = emqx_router:add_route(Topic, Peer),
|
||||||
RoutePath = emqx_mgmt_api_test_util:api_path(["topics", Topic]),
|
RoutePath = emqx_mgmt_api_test_util:api_path(["topics", Topic]),
|
||||||
{ok, RouteResponse} = emqx_mgmt_api_test_util:request_api(get, RoutePath),
|
{ok, RouteResponse} = emqx_mgmt_api_test_util:request_api(get, RoutePath),
|
||||||
ok = emqx_router:delete_route(Topic, Slave),
|
ok = emqx_router:delete_route(Topic, Peer),
|
||||||
|
|
||||||
[
|
[
|
||||||
#{<<"topic">> := Topic, <<"node">> := Node1},
|
#{<<"topic">> := Topic, <<"node">> := Node1},
|
||||||
#{<<"topic">> := Topic, <<"node">> := Node2}
|
#{<<"topic">> := Topic, <<"node">> := Node2}
|
||||||
] = emqx_utils_json:decode(RouteResponse, [return_maps]),
|
] = emqx_utils_json:decode(RouteResponse, [return_maps]),
|
||||||
|
|
||||||
?assertEqual(lists:usort([Node, atom_to_binary(Slave)]), lists:usort([Node1, Node2])),
|
?assertEqual(lists:usort([Node, atom_to_binary(Peer)]), lists:usort([Node1, Node2])),
|
||||||
|
|
||||||
ok = emqtt:stop(Client).
|
ok = emqtt:stop(Client).
|
||||||
|
|
||||||
|
|
|
@ -136,7 +136,7 @@ t_rebalance_node_crash(Config) ->
|
||||||
?assertWaitEvent(
|
?assertWaitEvent(
|
||||||
begin
|
begin
|
||||||
ok = rpc:call(DonorNode, emqx_node_rebalance, start, [Opts]),
|
ok = rpc:call(DonorNode, emqx_node_rebalance, start, [Opts]),
|
||||||
emqx_common_test_helpers:stop_slave(RecipientNode)
|
emqx_common_test_helpers:stop_peer(RecipientNode)
|
||||||
end,
|
end,
|
||||||
#{?snk_kind := emqx_node_rebalance_started},
|
#{?snk_kind := emqx_node_rebalance_started},
|
||||||
1000
|
1000
|
||||||
|
|
|
@ -628,11 +628,11 @@ group_t_copy_plugin_to_a_new_node({init, Config}) ->
|
||||||
load_schema => false
|
load_schema => false
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
CopyFromNode = emqx_common_test_helpers:start_slave(
|
CopyFromNode = emqx_common_test_helpers:start_peer(
|
||||||
CopyFrom, maps:remove(join_to, CopyFromOpts)
|
CopyFrom, maps:remove(join_to, CopyFromOpts)
|
||||||
),
|
),
|
||||||
ok = rpc:call(CopyFromNode, emqx_plugins, put_config, [install_dir, FromInstallDir]),
|
ok = rpc:call(CopyFromNode, emqx_plugins, put_config, [install_dir, FromInstallDir]),
|
||||||
CopyToNode = emqx_common_test_helpers:start_slave(CopyTo, maps:remove(join_to, CopyToOpts)),
|
CopyToNode = emqx_common_test_helpers:start_peer(CopyTo, maps:remove(join_to, CopyToOpts)),
|
||||||
ok = rpc:call(CopyToNode, emqx_plugins, put_config, [install_dir, ToInstallDir]),
|
ok = rpc:call(CopyToNode, emqx_plugins, put_config, [install_dir, ToInstallDir]),
|
||||||
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
|
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
|
||||||
ok = rpc:call(CopyFromNode, emqx_plugins, ensure_installed, [NameVsn]),
|
ok = rpc:call(CopyFromNode, emqx_plugins, ensure_installed, [NameVsn]),
|
||||||
|
@ -662,8 +662,8 @@ group_t_copy_plugin_to_a_new_node({'end', Config}) ->
|
||||||
ok = rpc:call(CopyToNode, emqx_config, delete_override_conf_files, []),
|
ok = rpc:call(CopyToNode, emqx_config, delete_override_conf_files, []),
|
||||||
rpc:call(CopyToNode, ekka, leave, []),
|
rpc:call(CopyToNode, ekka, leave, []),
|
||||||
rpc:call(CopyFromNode, ekka, leave, []),
|
rpc:call(CopyFromNode, ekka, leave, []),
|
||||||
ok = emqx_common_test_helpers:stop_slave(CopyToNode),
|
ok = emqx_common_test_helpers:stop_peer(CopyToNode),
|
||||||
ok = emqx_common_test_helpers:stop_slave(CopyFromNode),
|
ok = emqx_common_test_helpers:stop_peer(CopyFromNode),
|
||||||
ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)),
|
ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)),
|
||||||
ok = file:del_dir_r(proplists:get_value(from_install_dir, Config));
|
ok = file:del_dir_r(proplists:get_value(from_install_dir, Config));
|
||||||
group_t_copy_plugin_to_a_new_node(Config) ->
|
group_t_copy_plugin_to_a_new_node(Config) ->
|
||||||
|
@ -737,7 +737,6 @@ group_t_copy_plugin_to_a_new_node_single_node({init, Config}) ->
|
||||||
end,
|
end,
|
||||||
priv_data_dir => PrivDataDir,
|
priv_data_dir => PrivDataDir,
|
||||||
schema_mod => emqx_conf_schema,
|
schema_mod => emqx_conf_schema,
|
||||||
peer_mod => slave,
|
|
||||||
load_schema => true
|
load_schema => true
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
|
@ -751,7 +750,7 @@ group_t_copy_plugin_to_a_new_node_single_node({init, Config}) ->
|
||||||
];
|
];
|
||||||
group_t_copy_plugin_to_a_new_node_single_node({'end', Config}) ->
|
group_t_copy_plugin_to_a_new_node_single_node({'end', Config}) ->
|
||||||
CopyToNode = proplists:get_value(copy_to_node_name, Config),
|
CopyToNode = proplists:get_value(copy_to_node_name, Config),
|
||||||
ok = emqx_common_test_helpers:stop_slave(CopyToNode),
|
ok = emqx_common_test_helpers:stop_peer(CopyToNode),
|
||||||
ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)),
|
ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)),
|
||||||
ok;
|
ok;
|
||||||
group_t_copy_plugin_to_a_new_node_single_node(Config) ->
|
group_t_copy_plugin_to_a_new_node_single_node(Config) ->
|
||||||
|
@ -762,7 +761,7 @@ group_t_copy_plugin_to_a_new_node_single_node(Config) ->
|
||||||
%% Start the node for the first time. The plugin should start
|
%% Start the node for the first time. The plugin should start
|
||||||
%% successfully even if it's not extracted yet. Simply starting
|
%% successfully even if it's not extracted yet. Simply starting
|
||||||
%% the node would crash if not working properly.
|
%% the node would crash if not working properly.
|
||||||
CopyToNode = emqx_common_test_helpers:start_slave(CopyTo, CopyToOpts),
|
CopyToNode = emqx_common_test_helpers:start_peer(CopyTo, CopyToOpts),
|
||||||
ct:pal("~p config:\n ~p", [
|
ct:pal("~p config:\n ~p", [
|
||||||
CopyToNode, erpc:call(CopyToNode, emqx_plugins, get_config, [[], #{}])
|
CopyToNode, erpc:call(CopyToNode, emqx_plugins, get_config, [[], #{}])
|
||||||
]),
|
]),
|
||||||
|
@ -805,11 +804,10 @@ group_t_cluster_leave({init, Config}) ->
|
||||||
end,
|
end,
|
||||||
priv_data_dir => PrivDataDir,
|
priv_data_dir => PrivDataDir,
|
||||||
schema_mod => emqx_conf_schema,
|
schema_mod => emqx_conf_schema,
|
||||||
peer_mod => slave,
|
|
||||||
load_schema => true
|
load_schema => true
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
Nodes = [emqx_common_test_helpers:start_slave(Name, Opts) || {Name, Opts} <- Cluster],
|
Nodes = [emqx_common_test_helpers:start_peer(Name, Opts) || {Name, Opts} <- Cluster],
|
||||||
[
|
[
|
||||||
{to_install_dir, ToInstallDir},
|
{to_install_dir, ToInstallDir},
|
||||||
{cluster, Cluster},
|
{cluster, Cluster},
|
||||||
|
@ -820,7 +818,7 @@ group_t_cluster_leave({init, Config}) ->
|
||||||
];
|
];
|
||||||
group_t_cluster_leave({'end', Config}) ->
|
group_t_cluster_leave({'end', Config}) ->
|
||||||
Nodes = proplists:get_value(nodes, Config),
|
Nodes = proplists:get_value(nodes, Config),
|
||||||
[ok = emqx_common_test_helpers:stop_slave(N) || N <- Nodes],
|
[ok = emqx_common_test_helpers:stop_peer(N) || N <- Nodes],
|
||||||
ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)),
|
ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)),
|
||||||
ok;
|
ok;
|
||||||
group_t_cluster_leave(Config) ->
|
group_t_cluster_leave(Config) ->
|
||||||
|
|
|
@ -49,7 +49,11 @@ fields("connection_fields") ->
|
||||||
adjust_fields(emqx_connector_schema_lib:relational_db_fields()) ++
|
adjust_fields(emqx_connector_schema_lib:relational_db_fields()) ++
|
||||||
emqx_connector_schema_lib:ssl_fields();
|
emqx_connector_schema_lib:ssl_fields();
|
||||||
fields("config_connector") ->
|
fields("config_connector") ->
|
||||||
fields("connection_fields") ++ emqx_connector_schema:common_fields();
|
fields("connection_fields") ++
|
||||||
|
emqx_connector_schema:common_fields() ++
|
||||||
|
emqx_connector_schema:resource_opts_ref(?MODULE, resource_opts);
|
||||||
|
fields(resource_opts) ->
|
||||||
|
emqx_connector_schema:resource_opts_fields();
|
||||||
fields(config) ->
|
fields(config) ->
|
||||||
fields("config_connector") ++
|
fields("config_connector") ++
|
||||||
fields(action);
|
fields(action);
|
||||||
|
@ -159,5 +163,7 @@ values(common) ->
|
||||||
|
|
||||||
desc("config_connector") ->
|
desc("config_connector") ->
|
||||||
?DESC("config_connector");
|
?DESC("config_connector");
|
||||||
|
desc(resource_opts) ->
|
||||||
|
?DESC(emqx_resource_schema, "resource_opts");
|
||||||
desc(_) ->
|
desc(_) ->
|
||||||
undefined.
|
undefined.
|
||||||
|
|
|
@ -101,7 +101,10 @@
|
||||||
max_buffer_bytes => pos_integer(),
|
max_buffer_bytes => pos_integer(),
|
||||||
query_mode => query_mode(),
|
query_mode => query_mode(),
|
||||||
resume_interval => pos_integer(),
|
resume_interval => pos_integer(),
|
||||||
inflight_window => pos_integer()
|
inflight_window => pos_integer(),
|
||||||
|
%% Only for `emqx_resource_manager' usage. If false, prevents spawning buffer
|
||||||
|
%% workers, regardless of resource query mode.
|
||||||
|
spawn_buffer_workers => boolean()
|
||||||
}.
|
}.
|
||||||
-type query_result() ::
|
-type query_result() ::
|
||||||
ok
|
ok
|
||||||
|
|
|
@ -201,9 +201,9 @@
|
||||||
|
|
||||||
%% when calling emqx_resource:health_check/2
|
%% when calling emqx_resource:health_check/2
|
||||||
-callback on_get_status(resource_id(), resource_state()) ->
|
-callback on_get_status(resource_id(), resource_state()) ->
|
||||||
resource_status()
|
health_check_status()
|
||||||
| {resource_status(), resource_state()}
|
| {health_check_status(), resource_state()}
|
||||||
| {resource_status(), resource_state(), term()}.
|
| {health_check_status(), resource_state(), term()}.
|
||||||
|
|
||||||
-callback on_get_channel_status(resource_id(), channel_id(), resource_state()) ->
|
-callback on_get_channel_status(resource_id(), channel_id(), resource_state()) ->
|
||||||
channel_status()
|
channel_status()
|
||||||
|
@ -248,7 +248,7 @@
|
||||||
{error, Reason};
|
{error, Reason};
|
||||||
C:E:S ->
|
C:E:S ->
|
||||||
{error, #{
|
{error, #{
|
||||||
execption => C,
|
exception => C,
|
||||||
reason => emqx_utils:redact(E),
|
reason => emqx_utils:redact(E),
|
||||||
stacktrace => emqx_utils:redact(S)
|
stacktrace => emqx_utils:redact(S)
|
||||||
}}
|
}}
|
||||||
|
|
|
@ -1077,9 +1077,11 @@ handle_async_worker_down(Data0, Pid) ->
|
||||||
call_query(QM, Id, Index, Ref, Query, QueryOpts) ->
|
call_query(QM, Id, Index, Ref, Query, QueryOpts) ->
|
||||||
?tp(call_query_enter, #{id => Id, query => Query, query_mode => QM}),
|
?tp(call_query_enter, #{id => Id, query => Query, query_mode => QM}),
|
||||||
case emqx_resource_manager:lookup_cached(extract_connector_id(Id)) of
|
case emqx_resource_manager:lookup_cached(extract_connector_id(Id)) of
|
||||||
{ok, _Group, #{status := stopped}} ->
|
%% This seems to be the only place where the `rm_status_stopped' status matters,
|
||||||
|
%% to distinguish from the `disconnected' status.
|
||||||
|
{ok, _Group, #{status := ?rm_status_stopped}} ->
|
||||||
?RESOURCE_ERROR(stopped, "resource stopped or disabled");
|
?RESOURCE_ERROR(stopped, "resource stopped or disabled");
|
||||||
{ok, _Group, #{status := connecting, error := unhealthy_target}} ->
|
{ok, _Group, #{status := ?status_connecting, error := unhealthy_target}} ->
|
||||||
{error, {unrecoverable_error, unhealthy_target}};
|
{error, {unrecoverable_error, unhealthy_target}};
|
||||||
{ok, _Group, Resource} ->
|
{ok, _Group, Resource} ->
|
||||||
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, Resource);
|
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, Resource);
|
||||||
|
|
|
@ -85,7 +85,19 @@
|
||||||
-define(T_OPERATION, 5000).
|
-define(T_OPERATION, 5000).
|
||||||
-define(T_LOOKUP, 1000).
|
-define(T_LOOKUP, 1000).
|
||||||
|
|
||||||
-define(IS_STATUS(ST), ST =:= connecting; ST =:= connected; ST =:= disconnected).
|
%% `gen_statem' states
|
||||||
|
%% Note: most of them coincide with resource _status_. We use a different set of macros
|
||||||
|
%% to avoid mixing those concepts up.
|
||||||
|
%% Also note: the `stopped' _status_ can only be emitted by `emqx_resource_manager'...
|
||||||
|
%% Modules implementing `emqx_resource' behavior should not return it.
|
||||||
|
-define(state_connected, connected).
|
||||||
|
-define(state_connecting, connecting).
|
||||||
|
-define(state_disconnected, disconnected).
|
||||||
|
-define(state_stopped, stopped).
|
||||||
|
|
||||||
|
-define(IS_STATUS(ST),
|
||||||
|
ST =:= ?status_connecting; ST =:= ?status_connected; ST =:= ?status_disconnected
|
||||||
|
).
|
||||||
|
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
%% API
|
%% API
|
||||||
|
@ -136,16 +148,9 @@ create(ResId, Group, ResourceType, Config, Opts) ->
|
||||||
% Create metrics for the resource
|
% Create metrics for the resource
|
||||||
ok = emqx_resource:create_metrics(ResId),
|
ok = emqx_resource:create_metrics(ResId),
|
||||||
QueryMode = emqx_resource:query_mode(ResourceType, Config, Opts),
|
QueryMode = emqx_resource:query_mode(ResourceType, Config, Opts),
|
||||||
case QueryMode of
|
SpawnBufferWorkers = maps:get(spawn_buffer_workers, Opts, true),
|
||||||
%% the resource has built-in buffer, so there is no need for resource workers
|
case SpawnBufferWorkers andalso lists:member(QueryMode, [sync, async]) of
|
||||||
simple_sync_internal_buffer ->
|
true ->
|
||||||
ok;
|
|
||||||
simple_async_internal_buffer ->
|
|
||||||
ok;
|
|
||||||
%% The resource is a consumer resource, so there is no need for resource workers
|
|
||||||
no_queries ->
|
|
||||||
ok;
|
|
||||||
_ ->
|
|
||||||
%% start resource workers as the query type requires them
|
%% start resource workers as the query type requires them
|
||||||
ok = emqx_resource_buffer_worker_sup:start_workers(ResId, Opts),
|
ok = emqx_resource_buffer_worker_sup:start_workers(ResId, Opts),
|
||||||
case maps:get(start_after_created, Opts, ?START_AFTER_CREATED) of
|
case maps:get(start_after_created, Opts, ?START_AFTER_CREATED) of
|
||||||
|
@ -153,7 +158,9 @@ create(ResId, Group, ResourceType, Config, Opts) ->
|
||||||
wait_for_ready(ResId, maps:get(start_timeout, Opts, ?START_TIMEOUT));
|
wait_for_ready(ResId, maps:get(start_timeout, Opts, ?START_TIMEOUT));
|
||||||
false ->
|
false ->
|
||||||
ok
|
ok
|
||||||
end
|
end;
|
||||||
|
false ->
|
||||||
|
ok
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%% @doc Called from `emqx_resource` when doing a dry run for creating a resource instance.
|
%% @doc Called from `emqx_resource` when doing a dry run for creating a resource instance.
|
||||||
|
@ -397,12 +404,12 @@ init({DataIn, Opts}) ->
|
||||||
case maps:get(start_after_created, Opts, ?START_AFTER_CREATED) of
|
case maps:get(start_after_created, Opts, ?START_AFTER_CREATED) of
|
||||||
true ->
|
true ->
|
||||||
%% init the cache so that lookup/1 will always return something
|
%% init the cache so that lookup/1 will always return something
|
||||||
UpdatedData = update_state(Data#data{status = connecting}),
|
UpdatedData = update_state(Data#data{status = ?status_connecting}),
|
||||||
{ok, connecting, UpdatedData, {next_event, internal, start_resource}};
|
{ok, ?state_connecting, UpdatedData, {next_event, internal, start_resource}};
|
||||||
false ->
|
false ->
|
||||||
%% init the cache so that lookup/1 will always return something
|
%% init the cache so that lookup/1 will always return something
|
||||||
UpdatedData = update_state(Data#data{status = stopped}),
|
UpdatedData = update_state(Data#data{status = ?rm_status_stopped}),
|
||||||
{ok, stopped, UpdatedData}
|
{ok, ?state_stopped, UpdatedData}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
terminate({shutdown, removed}, _State, _Data) ->
|
terminate({shutdown, removed}, _State, _Data) ->
|
||||||
|
@ -420,26 +427,26 @@ callback_mode() -> [handle_event_function, state_enter].
|
||||||
|
|
||||||
% Called during testing to force a specific state
|
% Called during testing to force a specific state
|
||||||
handle_event({call, From}, set_resource_status_connecting, _State, Data) ->
|
handle_event({call, From}, set_resource_status_connecting, _State, Data) ->
|
||||||
UpdatedData = update_state(Data#data{status = connecting}, Data),
|
UpdatedData = update_state(Data#data{status = ?status_connecting}, Data),
|
||||||
{next_state, connecting, UpdatedData, [{reply, From, ok}]};
|
{next_state, ?state_connecting, UpdatedData, [{reply, From, ok}]};
|
||||||
% Called when the resource is to be restarted
|
% Called when the resource is to be restarted
|
||||||
handle_event({call, From}, restart, _State, Data) ->
|
handle_event({call, From}, restart, _State, Data) ->
|
||||||
DataNext = stop_resource(Data),
|
DataNext = stop_resource(Data),
|
||||||
start_resource(DataNext, From);
|
start_resource(DataNext, From);
|
||||||
% Called when the resource is to be started (also used for manual reconnect)
|
% Called when the resource is to be started (also used for manual reconnect)
|
||||||
handle_event({call, From}, start, State, Data) when
|
handle_event({call, From}, start, State, Data) when
|
||||||
State =:= stopped orelse
|
State =:= ?state_stopped orelse
|
||||||
State =:= disconnected
|
State =:= ?state_disconnected
|
||||||
->
|
->
|
||||||
start_resource(Data, From);
|
start_resource(Data, From);
|
||||||
handle_event({call, From}, start, _State, _Data) ->
|
handle_event({call, From}, start, _State, _Data) ->
|
||||||
{keep_state_and_data, [{reply, From, ok}]};
|
{keep_state_and_data, [{reply, From, ok}]};
|
||||||
% Called when the resource is to be stopped
|
% Called when the resource is to be stopped
|
||||||
handle_event({call, From}, stop, stopped, _Data) ->
|
handle_event({call, From}, stop, ?state_stopped, _Data) ->
|
||||||
{keep_state_and_data, [{reply, From, ok}]};
|
{keep_state_and_data, [{reply, From, ok}]};
|
||||||
handle_event({call, From}, stop, _State, Data) ->
|
handle_event({call, From}, stop, _State, Data) ->
|
||||||
UpdatedData = stop_resource(Data),
|
UpdatedData = stop_resource(Data),
|
||||||
{next_state, stopped, update_state(UpdatedData, Data), [{reply, From, ok}]};
|
{next_state, ?state_stopped, update_state(UpdatedData, Data), [{reply, From, ok}]};
|
||||||
% Called when a resource is to be stopped and removed.
|
% Called when a resource is to be stopped and removed.
|
||||||
handle_event({call, From}, {remove, ClearMetrics}, _State, Data) ->
|
handle_event({call, From}, {remove, ClearMetrics}, _State, Data) ->
|
||||||
handle_remove_event(From, ClearMetrics, Data);
|
handle_remove_event(From, ClearMetrics, Data);
|
||||||
|
@ -448,10 +455,10 @@ handle_event({call, From}, lookup, _State, #data{group = Group} = Data) ->
|
||||||
Reply = {ok, Group, data_record_to_external_map(Data)},
|
Reply = {ok, Group, data_record_to_external_map(Data)},
|
||||||
{keep_state_and_data, [{reply, From, Reply}]};
|
{keep_state_and_data, [{reply, From, Reply}]};
|
||||||
% Called when doing a manually health check.
|
% Called when doing a manually health check.
|
||||||
handle_event({call, From}, health_check, stopped, _Data) ->
|
handle_event({call, From}, health_check, ?state_stopped, _Data) ->
|
||||||
Actions = [{reply, From, {error, resource_is_stopped}}],
|
Actions = [{reply, From, {error, resource_is_stopped}}],
|
||||||
{keep_state_and_data, Actions};
|
{keep_state_and_data, Actions};
|
||||||
handle_event({call, From}, {channel_health_check, _}, stopped, _Data) ->
|
handle_event({call, From}, {channel_health_check, _}, ?state_stopped, _Data) ->
|
||||||
Actions = [{reply, From, {error, resource_is_stopped}}],
|
Actions = [{reply, From, {error, resource_is_stopped}}],
|
||||||
{keep_state_and_data, Actions};
|
{keep_state_and_data, Actions};
|
||||||
handle_event({call, From}, health_check, _State, Data) ->
|
handle_event({call, From}, health_check, _State, Data) ->
|
||||||
|
@ -459,47 +466,47 @@ handle_event({call, From}, health_check, _State, Data) ->
|
||||||
handle_event({call, From}, {channel_health_check, ChannelId}, _State, Data) ->
|
handle_event({call, From}, {channel_health_check, ChannelId}, _State, Data) ->
|
||||||
handle_manually_channel_health_check(From, Data, ChannelId);
|
handle_manually_channel_health_check(From, Data, ChannelId);
|
||||||
% State: CONNECTING
|
% State: CONNECTING
|
||||||
handle_event(enter, _OldState, connecting = State, Data) ->
|
handle_event(enter, _OldState, ?state_connecting = State, Data) ->
|
||||||
ok = log_state_consistency(State, Data),
|
ok = log_status_consistency(State, Data),
|
||||||
{keep_state_and_data, [{state_timeout, 0, health_check}]};
|
{keep_state_and_data, [{state_timeout, 0, health_check}]};
|
||||||
handle_event(internal, start_resource, connecting, Data) ->
|
handle_event(internal, start_resource, ?state_connecting, Data) ->
|
||||||
start_resource(Data, undefined);
|
start_resource(Data, undefined);
|
||||||
handle_event(state_timeout, health_check, connecting, Data) ->
|
handle_event(state_timeout, health_check, ?state_connecting, Data) ->
|
||||||
handle_connecting_health_check(Data);
|
handle_connecting_health_check(Data);
|
||||||
handle_event(
|
handle_event(
|
||||||
{call, From}, {remove_channel, ChannelId}, connecting = _State, Data
|
{call, From}, {remove_channel, ChannelId}, ?state_connecting = _State, Data
|
||||||
) ->
|
) ->
|
||||||
handle_remove_channel(From, ChannelId, Data);
|
handle_remove_channel(From, ChannelId, Data);
|
||||||
%% State: CONNECTED
|
%% State: CONNECTED
|
||||||
%% The connected state is entered after a successful on_start/2 of the callback mod
|
%% The connected state is entered after a successful on_start/2 of the callback mod
|
||||||
%% and successful health_checks
|
%% and successful health_checks
|
||||||
handle_event(enter, _OldState, connected = State, Data) ->
|
handle_event(enter, _OldState, ?state_connected = State, Data) ->
|
||||||
ok = log_state_consistency(State, Data),
|
ok = log_status_consistency(State, Data),
|
||||||
_ = emqx_alarm:safe_deactivate(Data#data.id),
|
_ = emqx_alarm:safe_deactivate(Data#data.id),
|
||||||
?tp(resource_connected_enter, #{}),
|
?tp(resource_connected_enter, #{}),
|
||||||
{keep_state_and_data, health_check_actions(Data)};
|
{keep_state_and_data, health_check_actions(Data)};
|
||||||
handle_event(state_timeout, health_check, connected, Data) ->
|
handle_event(state_timeout, health_check, ?state_connected, Data) ->
|
||||||
handle_connected_health_check(Data);
|
handle_connected_health_check(Data);
|
||||||
handle_event(
|
handle_event(
|
||||||
{call, From}, {add_channel, ChannelId, Config}, connected = _State, Data
|
{call, From}, {add_channel, ChannelId, Config}, ?state_connected = _State, Data
|
||||||
) ->
|
) ->
|
||||||
handle_add_channel(From, Data, ChannelId, Config);
|
handle_add_channel(From, Data, ChannelId, Config);
|
||||||
handle_event(
|
handle_event(
|
||||||
{call, From}, {remove_channel, ChannelId}, connected = _State, Data
|
{call, From}, {remove_channel, ChannelId}, ?state_connected = _State, Data
|
||||||
) ->
|
) ->
|
||||||
handle_remove_channel(From, ChannelId, Data);
|
handle_remove_channel(From, ChannelId, Data);
|
||||||
%% State: DISCONNECTED
|
%% State: DISCONNECTED
|
||||||
handle_event(enter, _OldState, disconnected = State, Data) ->
|
handle_event(enter, _OldState, ?state_disconnected = State, Data) ->
|
||||||
ok = log_state_consistency(State, Data),
|
ok = log_status_consistency(State, Data),
|
||||||
?tp(resource_disconnected_enter, #{}),
|
?tp(resource_disconnected_enter, #{}),
|
||||||
{keep_state_and_data, retry_actions(Data)};
|
{keep_state_and_data, retry_actions(Data)};
|
||||||
handle_event(state_timeout, auto_retry, disconnected, Data) ->
|
handle_event(state_timeout, auto_retry, ?state_disconnected, Data) ->
|
||||||
?tp(resource_auto_reconnect, #{}),
|
?tp(resource_auto_reconnect, #{}),
|
||||||
start_resource(Data, undefined);
|
start_resource(Data, undefined);
|
||||||
%% State: STOPPED
|
%% State: STOPPED
|
||||||
%% The stopped state is entered after the resource has been explicitly stopped
|
%% The stopped state is entered after the resource has been explicitly stopped
|
||||||
handle_event(enter, _OldState, stopped = State, Data) ->
|
handle_event(enter, _OldState, ?state_stopped = State, Data) ->
|
||||||
ok = log_state_consistency(State, Data),
|
ok = log_status_consistency(State, Data),
|
||||||
{keep_state_and_data, []};
|
{keep_state_and_data, []};
|
||||||
%% The following events can be handled in any other state
|
%% The following events can be handled in any other state
|
||||||
handle_event(
|
handle_event(
|
||||||
|
@ -529,11 +536,11 @@ handle_event(EventType, EventData, State, Data) ->
|
||||||
),
|
),
|
||||||
keep_state_and_data.
|
keep_state_and_data.
|
||||||
|
|
||||||
log_state_consistency(State, #data{status = State} = Data) ->
|
log_status_consistency(Status, #data{status = Status} = Data) ->
|
||||||
log_cache_consistency(read_cache(Data#data.id), Data);
|
log_cache_consistency(read_cache(Data#data.id), Data);
|
||||||
log_state_consistency(State, Data) ->
|
log_status_consistency(Status, Data) ->
|
||||||
?tp(warning, "inconsistent_state", #{
|
?tp(warning, "inconsistent_status", #{
|
||||||
state => State,
|
status => Status,
|
||||||
data => emqx_utils:redact(Data)
|
data => emqx_utils:redact(Data)
|
||||||
}).
|
}).
|
||||||
|
|
||||||
|
@ -591,25 +598,25 @@ start_resource(Data, From) ->
|
||||||
%% in case the emqx_resource:call_start/2 hangs, the lookup/1 can read status from the cache
|
%% in case the emqx_resource:call_start/2 hangs, the lookup/1 can read status from the cache
|
||||||
case emqx_resource:call_start(Data#data.id, Data#data.mod, Data#data.config) of
|
case emqx_resource:call_start(Data#data.id, Data#data.mod, Data#data.config) of
|
||||||
{ok, ResourceState} ->
|
{ok, ResourceState} ->
|
||||||
UpdatedData1 = Data#data{status = connecting, state = ResourceState},
|
UpdatedData1 = Data#data{status = ?status_connecting, state = ResourceState},
|
||||||
%% Perform an initial health_check immediately before transitioning into a connected state
|
%% Perform an initial health_check immediately before transitioning into a connected state
|
||||||
UpdatedData2 = add_channels(UpdatedData1),
|
UpdatedData2 = add_channels(UpdatedData1),
|
||||||
Actions = maybe_reply([{state_timeout, 0, health_check}], From, ok),
|
Actions = maybe_reply([{state_timeout, 0, health_check}], From, ok),
|
||||||
{next_state, connecting, update_state(UpdatedData2, Data), Actions};
|
{next_state, ?state_connecting, update_state(UpdatedData2, Data), Actions};
|
||||||
{error, Reason} = Err ->
|
{error, Reason} = Err ->
|
||||||
?SLOG(warning, #{
|
?SLOG(warning, #{
|
||||||
msg => "start_resource_failed",
|
msg => "start_resource_failed",
|
||||||
id => Data#data.id,
|
id => Data#data.id,
|
||||||
reason => Reason
|
reason => Reason
|
||||||
}),
|
}),
|
||||||
_ = maybe_alarm(disconnected, Data#data.id, Err, Data#data.error),
|
_ = maybe_alarm(?status_disconnected, Data#data.id, Err, Data#data.error),
|
||||||
%% Add channels and raise alarms
|
%% Add channels and raise alarms
|
||||||
NewData1 = channels_health_check(disconnected, add_channels(Data)),
|
NewData1 = channels_health_check(?status_disconnected, add_channels(Data)),
|
||||||
%% Keep track of the error reason why the connection did not work
|
%% Keep track of the error reason why the connection did not work
|
||||||
%% so that the Reason can be returned when the verification call is made.
|
%% so that the Reason can be returned when the verification call is made.
|
||||||
NewData2 = NewData1#data{status = disconnected, error = Err},
|
NewData2 = NewData1#data{status = ?status_disconnected, error = Err},
|
||||||
Actions = maybe_reply(retry_actions(NewData2), From, Err),
|
Actions = maybe_reply(retry_actions(NewData2), From, Err),
|
||||||
{next_state, disconnected, update_state(NewData2, Data), Actions}
|
{next_state, ?state_disconnected, update_state(NewData2, Data), Actions}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
add_channels(Data) ->
|
add_channels(Data) ->
|
||||||
|
@ -666,13 +673,13 @@ add_channels_in_list([{ChannelID, ChannelConfig} | Rest], Data) ->
|
||||||
added_channels = NewAddedChannelsMap
|
added_channels = NewAddedChannelsMap
|
||||||
},
|
},
|
||||||
%% Raise an alarm since the channel could not be added
|
%% Raise an alarm since the channel could not be added
|
||||||
_ = maybe_alarm(disconnected, ChannelID, Error, no_prev_error),
|
_ = maybe_alarm(?status_disconnected, ChannelID, Error, no_prev_error),
|
||||||
add_channels_in_list(Rest, NewData)
|
add_channels_in_list(Rest, NewData)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
maybe_stop_resource(#data{status = Status} = Data) when Status /= stopped ->
|
maybe_stop_resource(#data{status = Status} = Data) when Status =/= ?rm_status_stopped ->
|
||||||
stop_resource(Data);
|
stop_resource(Data);
|
||||||
maybe_stop_resource(#data{status = stopped} = Data) ->
|
maybe_stop_resource(#data{status = ?rm_status_stopped} = Data) ->
|
||||||
Data.
|
Data.
|
||||||
|
|
||||||
stop_resource(#data{state = ResState, id = ResId} = Data) ->
|
stop_resource(#data{state = ResState, id = ResId} = Data) ->
|
||||||
|
@ -691,7 +698,7 @@ stop_resource(#data{state = ResState, id = ResId} = Data) ->
|
||||||
end,
|
end,
|
||||||
_ = maybe_clear_alarm(ResId),
|
_ = maybe_clear_alarm(ResId),
|
||||||
ok = emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId),
|
ok = emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId),
|
||||||
NewData#data{status = stopped}.
|
NewData#data{status = ?rm_status_stopped}.
|
||||||
|
|
||||||
remove_channels(Data) ->
|
remove_channels(Data) ->
|
||||||
Channels = maps:keys(Data#data.added_channels),
|
Channels = maps:keys(Data#data.added_channels),
|
||||||
|
@ -706,7 +713,7 @@ remove_channels_in_list([ChannelID | Rest], Data, KeepInChannelMap) ->
|
||||||
true ->
|
true ->
|
||||||
AddedChannelsMap;
|
AddedChannelsMap;
|
||||||
false ->
|
false ->
|
||||||
maybe_clear_alarm(ChannelID),
|
_ = maybe_clear_alarm(ChannelID),
|
||||||
maps:remove(ChannelID, AddedChannelsMap)
|
maps:remove(ChannelID, AddedChannelsMap)
|
||||||
end,
|
end,
|
||||||
case safe_call_remove_channel(Data#data.id, Data#data.mod, Data#data.state, ChannelID) of
|
case safe_call_remove_channel(Data#data.id, Data#data.mod, Data#data.state, ChannelID) of
|
||||||
|
@ -858,13 +865,15 @@ handle_connecting_health_check(Data) ->
|
||||||
with_health_check(
|
with_health_check(
|
||||||
Data,
|
Data,
|
||||||
fun
|
fun
|
||||||
(connected, UpdatedData) ->
|
(?status_connected, UpdatedData) ->
|
||||||
{next_state, connected, channels_health_check(connected, UpdatedData)};
|
{next_state, ?state_connected,
|
||||||
(connecting, UpdatedData) ->
|
channels_health_check(?status_connected, UpdatedData)};
|
||||||
{keep_state, channels_health_check(connecting, UpdatedData),
|
(?status_connecting, UpdatedData) ->
|
||||||
|
{keep_state, channels_health_check(?status_connecting, UpdatedData),
|
||||||
health_check_actions(UpdatedData)};
|
health_check_actions(UpdatedData)};
|
||||||
(disconnected, UpdatedData) ->
|
(?status_disconnected, UpdatedData) ->
|
||||||
{next_state, disconnected, channels_health_check(disconnected, UpdatedData)}
|
{next_state, ?state_disconnected,
|
||||||
|
channels_health_check(?status_disconnected, UpdatedData)}
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
|
||||||
|
@ -872,8 +881,8 @@ handle_connected_health_check(Data) ->
|
||||||
with_health_check(
|
with_health_check(
|
||||||
Data,
|
Data,
|
||||||
fun
|
fun
|
||||||
(connected, UpdatedData0) ->
|
(?status_connected, UpdatedData0) ->
|
||||||
UpdatedData1 = channels_health_check(connected, UpdatedData0),
|
UpdatedData1 = channels_health_check(?status_connected, UpdatedData0),
|
||||||
{keep_state, UpdatedData1, health_check_actions(UpdatedData1)};
|
{keep_state, UpdatedData1, health_check_actions(UpdatedData1)};
|
||||||
(Status, UpdatedData) ->
|
(Status, UpdatedData) ->
|
||||||
?SLOG(warning, #{
|
?SLOG(warning, #{
|
||||||
|
@ -881,6 +890,10 @@ handle_connected_health_check(Data) ->
|
||||||
id => Data#data.id,
|
id => Data#data.id,
|
||||||
status => Status
|
status => Status
|
||||||
}),
|
}),
|
||||||
|
%% Note: works because, coincidentally, channel/resource status is a
|
||||||
|
%% subset of resource manager state... But there should be a conversion
|
||||||
|
%% between the two here, as resource manager also has `stopped', which is
|
||||||
|
%% not a valid status at the time of writing.
|
||||||
{next_state, Status, channels_health_check(Status, UpdatedData)}
|
{next_state, Status, channels_health_check(Status, UpdatedData)}
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
@ -898,7 +911,8 @@ with_health_check(#data{error = PrevError} = Data, Func) ->
|
||||||
},
|
},
|
||||||
Func(Status, update_state(UpdatedData, Data)).
|
Func(Status, update_state(UpdatedData, Data)).
|
||||||
|
|
||||||
channels_health_check(connected = _ResourceStatus, Data0) ->
|
-spec channels_health_check(resource_status(), data()) -> data().
|
||||||
|
channels_health_check(?status_connected = _ConnectorStatus, Data0) ->
|
||||||
Channels = maps:to_list(Data0#data.added_channels),
|
Channels = maps:to_list(Data0#data.added_channels),
|
||||||
%% All channels with a stutus different from connected or connecting are
|
%% All channels with a stutus different from connected or connecting are
|
||||||
%% not added
|
%% not added
|
||||||
|
@ -914,7 +928,7 @@ channels_health_check(connected = _ResourceStatus, Data0) ->
|
||||||
%% Now that we have done the adding, we can get the status of all channels
|
%% Now that we have done the adding, we can get the status of all channels
|
||||||
Data2 = channel_status_for_all_channels(Data1),
|
Data2 = channel_status_for_all_channels(Data1),
|
||||||
update_state(Data2, Data0);
|
update_state(Data2, Data0);
|
||||||
channels_health_check(connecting, Data0) ->
|
channels_health_check(?status_connecting = _ConnectorStatus, Data0) ->
|
||||||
%% Whenever the resource is connecting:
|
%% Whenever the resource is connecting:
|
||||||
%% 1. Change the status of all added channels to connecting
|
%% 1. Change the status of all added channels to connecting
|
||||||
%% 2. Raise alarms (TODO: if it is a probe we should not raise alarms)
|
%% 2. Raise alarms (TODO: if it is a probe we should not raise alarms)
|
||||||
|
@ -926,7 +940,7 @@ channels_health_check(connecting, Data0) ->
|
||||||
],
|
],
|
||||||
ChannelsWithNewStatuses =
|
ChannelsWithNewStatuses =
|
||||||
[
|
[
|
||||||
{ChannelId, channel_status({connecting, resource_is_connecting})}
|
{ChannelId, channel_status({?status_connecting, resource_is_connecting})}
|
||||||
|| ChannelId <- ChannelsToChangeStatusFor
|
|| ChannelId <- ChannelsToChangeStatusFor
|
||||||
],
|
],
|
||||||
%% Update the channels map
|
%% Update the channels map
|
||||||
|
@ -945,13 +959,13 @@ channels_health_check(connecting, Data0) ->
|
||||||
%% Raise alarms for all channels
|
%% Raise alarms for all channels
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun({ChannelId, Status, PrevStatus}) ->
|
fun({ChannelId, Status, PrevStatus}) ->
|
||||||
maybe_alarm(connecting, ChannelId, Status, PrevStatus)
|
maybe_alarm(?status_connecting, ChannelId, Status, PrevStatus)
|
||||||
end,
|
end,
|
||||||
ChannelsWithNewAndPrevErrorStatuses
|
ChannelsWithNewAndPrevErrorStatuses
|
||||||
),
|
),
|
||||||
Data1 = Data0#data{added_channels = NewChannels},
|
Data1 = Data0#data{added_channels = NewChannels},
|
||||||
update_state(Data1, Data0);
|
update_state(Data1, Data0);
|
||||||
channels_health_check(ResourceStatus, Data0) ->
|
channels_health_check(ConnectorStatus, Data0) ->
|
||||||
%% Whenever the resource is not connected and not connecting:
|
%% Whenever the resource is not connected and not connecting:
|
||||||
%% 1. Remove all added channels
|
%% 1. Remove all added channels
|
||||||
%% 2. Change the status to an error status
|
%% 2. Change the status to an error status
|
||||||
|
@ -969,7 +983,7 @@ channels_health_check(ResourceStatus, Data0) ->
|
||||||
channel_status(
|
channel_status(
|
||||||
{error,
|
{error,
|
||||||
resource_not_connected_channel_error_msg(
|
resource_not_connected_channel_error_msg(
|
||||||
ResourceStatus,
|
ConnectorStatus,
|
||||||
ChannelId,
|
ChannelId,
|
||||||
Data1
|
Data1
|
||||||
)}
|
)}
|
||||||
|
@ -1025,7 +1039,7 @@ channel_status_for_all_channels(Data) ->
|
||||||
%% Raise/clear alarms
|
%% Raise/clear alarms
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun
|
fun
|
||||||
({ID, _OldStatus, #{status := connected}}) ->
|
({ID, _OldStatus, #{status := ?status_connected}}) ->
|
||||||
_ = maybe_clear_alarm(ID);
|
_ = maybe_clear_alarm(ID);
|
||||||
({ID, OldStatus, NewStatus}) ->
|
({ID, OldStatus, NewStatus}) ->
|
||||||
_ = maybe_alarm(NewStatus, ID, NewStatus, OldStatus)
|
_ = maybe_alarm(NewStatus, ID, NewStatus, OldStatus)
|
||||||
|
@ -1071,9 +1085,11 @@ get_config_from_map_or_channel_status(ChannelId, ChannelIdToConfig, ChannelStatu
|
||||||
Config
|
Config
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
-spec update_state(data()) -> data().
|
||||||
update_state(Data) ->
|
update_state(Data) ->
|
||||||
update_state(Data, undefined).
|
update_state(Data, undefined).
|
||||||
|
|
||||||
|
-spec update_state(data(), data() | undefined) -> data().
|
||||||
update_state(DataWas, DataWas) ->
|
update_state(DataWas, DataWas) ->
|
||||||
DataWas;
|
DataWas;
|
||||||
update_state(Data, _DataWas) ->
|
update_state(Data, _DataWas) ->
|
||||||
|
@ -1083,7 +1099,8 @@ update_state(Data, _DataWas) ->
|
||||||
health_check_interval(Opts) ->
|
health_check_interval(Opts) ->
|
||||||
maps:get(health_check_interval, Opts, ?HEALTHCHECK_INTERVAL).
|
maps:get(health_check_interval, Opts, ?HEALTHCHECK_INTERVAL).
|
||||||
|
|
||||||
maybe_alarm(connected, _ResId, _Error, _PrevError) ->
|
-spec maybe_alarm(resource_status(), resource_id(), _Error :: term(), _PrevError :: term()) -> ok.
|
||||||
|
maybe_alarm(?status_connected, _ResId, _Error, _PrevError) ->
|
||||||
ok;
|
ok;
|
||||||
maybe_alarm(_Status, <<?TEST_ID_PREFIX, _/binary>>, _Error, _PrevError) ->
|
maybe_alarm(_Status, <<?TEST_ID_PREFIX, _/binary>>, _Error, _PrevError) ->
|
||||||
ok;
|
ok;
|
||||||
|
@ -1095,7 +1112,7 @@ maybe_alarm(_Status, ResId, Error, _PrevError) ->
|
||||||
case Error of
|
case Error of
|
||||||
{error, undefined} -> <<"Unknown reason">>;
|
{error, undefined} -> <<"Unknown reason">>;
|
||||||
{error, Reason} -> emqx_utils:readable_error_msg(Reason);
|
{error, Reason} -> emqx_utils:readable_error_msg(Reason);
|
||||||
Error -> emqx_utils:readable_error_msg(Error)
|
_ -> emqx_utils:readable_error_msg(Error)
|
||||||
end,
|
end,
|
||||||
emqx_alarm:safe_activate(
|
emqx_alarm:safe_activate(
|
||||||
ResId,
|
ResId,
|
||||||
|
@ -1104,7 +1121,8 @@ maybe_alarm(_Status, ResId, Error, _PrevError) ->
|
||||||
),
|
),
|
||||||
?tp(resource_activate_alarm, #{resource_id => ResId}).
|
?tp(resource_activate_alarm, #{resource_id => ResId}).
|
||||||
|
|
||||||
maybe_resume_resource_workers(ResId, connected) ->
|
-spec maybe_resume_resource_workers(resource_id(), resource_status()) -> ok.
|
||||||
|
maybe_resume_resource_workers(ResId, ?status_connected) ->
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun emqx_resource_buffer_worker:resume/1,
|
fun emqx_resource_buffer_worker:resume/1,
|
||||||
emqx_resource_buffer_worker_sup:worker_pids(ResId)
|
emqx_resource_buffer_worker_sup:worker_pids(ResId)
|
||||||
|
@ -1112,6 +1130,7 @@ maybe_resume_resource_workers(ResId, connected) ->
|
||||||
maybe_resume_resource_workers(_, _) ->
|
maybe_resume_resource_workers(_, _) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
-spec maybe_clear_alarm(resource_id()) -> ok | {error, not_found}.
|
||||||
maybe_clear_alarm(<<?TEST_ID_PREFIX, _/binary>>) ->
|
maybe_clear_alarm(<<?TEST_ID_PREFIX, _/binary>>) ->
|
||||||
ok;
|
ok;
|
||||||
maybe_clear_alarm(ResId) ->
|
maybe_clear_alarm(ResId) ->
|
||||||
|
@ -1132,9 +1151,9 @@ parse_health_check_result({error, Error}, Data) ->
|
||||||
reason => Error
|
reason => Error
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
{disconnected, Data#data.state, {error, Error}}.
|
{?status_disconnected, Data#data.state, {error, Error}}.
|
||||||
|
|
||||||
status_to_error(connected) ->
|
status_to_error(?status_connected) ->
|
||||||
undefined;
|
undefined;
|
||||||
status_to_error(_) ->
|
status_to_error(_) ->
|
||||||
{error, undefined}.
|
{error, undefined}.
|
||||||
|
@ -1170,9 +1189,9 @@ do_wait_for_ready(_ResId, 0) ->
|
||||||
timeout;
|
timeout;
|
||||||
do_wait_for_ready(ResId, Retry) ->
|
do_wait_for_ready(ResId, Retry) ->
|
||||||
case try_read_cache(ResId) of
|
case try_read_cache(ResId) of
|
||||||
#data{status = connected} ->
|
#data{status = ?status_connected} ->
|
||||||
ok;
|
ok;
|
||||||
#data{status = disconnected, error = Err} ->
|
#data{status = ?status_disconnected, error = Err} ->
|
||||||
{error, external_error(Err)};
|
{error, external_error(Err)};
|
||||||
_ ->
|
_ ->
|
||||||
timer:sleep(?WAIT_FOR_RESOURCE_DELAY),
|
timer:sleep(?WAIT_FOR_RESOURCE_DELAY),
|
||||||
|
@ -1203,7 +1222,7 @@ channel_status() ->
|
||||||
%% - connected: the channel is added to the resource, the resource is
|
%% - connected: the channel is added to the resource, the resource is
|
||||||
%% connected and the on_channel_get_status callback has returned
|
%% connected and the on_channel_get_status callback has returned
|
||||||
%% connected. The error field should be undefined.
|
%% connected. The error field should be undefined.
|
||||||
status => disconnected,
|
status => ?status_disconnected,
|
||||||
error => not_added_yet
|
error => not_added_yet
|
||||||
}.
|
}.
|
||||||
|
|
||||||
|
@ -1212,20 +1231,20 @@ channel_status() ->
|
||||||
%% anywhere else in that case.
|
%% anywhere else in that case.
|
||||||
channel_status_new_with_config(Config) ->
|
channel_status_new_with_config(Config) ->
|
||||||
#{
|
#{
|
||||||
status => disconnected,
|
status => ?status_disconnected,
|
||||||
error => not_added_yet,
|
error => not_added_yet,
|
||||||
config => Config
|
config => Config
|
||||||
}.
|
}.
|
||||||
|
|
||||||
channel_status_new_waiting_for_health_check() ->
|
channel_status_new_waiting_for_health_check() ->
|
||||||
#{
|
#{
|
||||||
status => connecting,
|
status => ?status_connecting,
|
||||||
error => no_health_check_yet
|
error => no_health_check_yet
|
||||||
}.
|
}.
|
||||||
|
|
||||||
channel_status({connecting, Error}) ->
|
channel_status({?status_connecting, Error}) ->
|
||||||
#{
|
#{
|
||||||
status => connecting,
|
status => ?status_connecting,
|
||||||
error => Error
|
error => Error
|
||||||
};
|
};
|
||||||
channel_status(?status_disconnected) ->
|
channel_status(?status_disconnected) ->
|
||||||
|
@ -1233,40 +1252,41 @@ channel_status(?status_disconnected) ->
|
||||||
status => ?status_disconnected,
|
status => ?status_disconnected,
|
||||||
error => <<"Disconnected for unknown reason">>
|
error => <<"Disconnected for unknown reason">>
|
||||||
};
|
};
|
||||||
channel_status(connecting) ->
|
channel_status(?status_connecting) ->
|
||||||
#{
|
#{
|
||||||
status => connecting,
|
status => ?status_connecting,
|
||||||
error => <<"Not connected for unknown reason">>
|
error => <<"Not connected for unknown reason">>
|
||||||
};
|
};
|
||||||
channel_status(connected) ->
|
channel_status(?status_connected) ->
|
||||||
#{
|
#{
|
||||||
status => connected,
|
status => ?status_connected,
|
||||||
error => undefined
|
error => undefined
|
||||||
};
|
};
|
||||||
%% Probably not so useful but it is permitted to set an error even when the
|
%% Probably not so useful but it is permitted to set an error even when the
|
||||||
%% status is connected
|
%% status is connected
|
||||||
channel_status({connected, Error}) ->
|
channel_status({?status_connected, Error}) ->
|
||||||
#{
|
#{
|
||||||
status => connected,
|
status => ?status_connected,
|
||||||
error => Error
|
error => Error
|
||||||
};
|
};
|
||||||
channel_status({error, Reason}) ->
|
channel_status({error, Reason}) ->
|
||||||
#{
|
#{
|
||||||
status => disconnected,
|
status => ?status_disconnected,
|
||||||
error => Reason
|
error => Reason
|
||||||
}.
|
}.
|
||||||
|
|
||||||
channel_status_is_channel_added(#{
|
channel_status_is_channel_added(#{
|
||||||
status := connected
|
status := ?status_connected
|
||||||
}) ->
|
}) ->
|
||||||
true;
|
true;
|
||||||
channel_status_is_channel_added(#{
|
channel_status_is_channel_added(#{
|
||||||
status := connecting
|
status := ?status_connecting
|
||||||
}) ->
|
}) ->
|
||||||
true;
|
true;
|
||||||
channel_status_is_channel_added(_Status) ->
|
channel_status_is_channel_added(_Status) ->
|
||||||
false.
|
false.
|
||||||
|
|
||||||
|
-spec add_channel_status_if_not_exists(data(), channel_id(), resource_state()) -> data().
|
||||||
add_channel_status_if_not_exists(Data, ChannelId, State) ->
|
add_channel_status_if_not_exists(Data, ChannelId, State) ->
|
||||||
Channels = Data#data.added_channels,
|
Channels = Data#data.added_channels,
|
||||||
case maps:is_key(ChannelId, Channels) of
|
case maps:is_key(ChannelId, Channels) of
|
||||||
|
@ -1275,6 +1295,12 @@ add_channel_status_if_not_exists(Data, ChannelId, State) ->
|
||||||
false ->
|
false ->
|
||||||
ChannelStatus = channel_status({error, resource_not_operational}),
|
ChannelStatus = channel_status({error, resource_not_operational}),
|
||||||
NewChannels = maps:put(ChannelId, ChannelStatus, Channels),
|
NewChannels = maps:put(ChannelId, ChannelStatus, Channels),
|
||||||
maybe_alarm(State, ChannelId, ChannelStatus, no_prev),
|
ResStatus = state_to_status(State),
|
||||||
|
maybe_alarm(ResStatus, ChannelId, ChannelStatus, no_prev),
|
||||||
Data#data{added_channels = NewChannels}
|
Data#data{added_channels = NewChannels}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
state_to_status(?state_stopped) -> ?rm_status_stopped;
|
||||||
|
state_to_status(?state_connected) -> ?status_connected;
|
||||||
|
state_to_status(?state_connecting) -> ?status_connecting;
|
||||||
|
state_to_status(?state_disconnected) -> ?status_disconnected.
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
|
|
||||||
-export([namespace/0, roots/0, fields/1, desc/1]).
|
-export([namespace/0, roots/0, fields/1, desc/1]).
|
||||||
|
|
||||||
-export([create_opts/1]).
|
-export([create_opts/1, resource_opts_meta/0]).
|
||||||
|
|
||||||
%% range interval in ms
|
%% range interval in ms
|
||||||
-define(HEALTH_CHECK_INTERVAL_RANGE_MIN, 1).
|
-define(HEALTH_CHECK_INTERVAL_RANGE_MIN, 1).
|
||||||
|
|
|
@ -115,7 +115,7 @@ t_create_remove(_) ->
|
||||||
?assertNot(is_process_alive(Pid))
|
?assertNot(is_process_alive(Pid))
|
||||||
end,
|
end,
|
||||||
fun(Trace) ->
|
fun(Trace) ->
|
||||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
@ -174,7 +174,7 @@ t_create_remove_local(_) ->
|
||||||
?assertNot(is_process_alive(Pid))
|
?assertNot(is_process_alive(Pid))
|
||||||
end,
|
end,
|
||||||
fun(Trace) ->
|
fun(Trace) ->
|
||||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
@ -219,7 +219,7 @@ t_do_not_start_after_created(_) ->
|
||||||
?assertNot(is_process_alive(Pid2))
|
?assertNot(is_process_alive(Pid2))
|
||||||
end,
|
end,
|
||||||
fun(Trace) ->
|
fun(Trace) ->
|
||||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
@ -855,7 +855,7 @@ t_healthy_timeout(_) ->
|
||||||
?assertEqual(ok, emqx_resource:remove_local(?ID))
|
?assertEqual(ok, emqx_resource:remove_local(?ID))
|
||||||
end,
|
end,
|
||||||
fun(Trace) ->
|
fun(Trace) ->
|
||||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
@ -894,7 +894,7 @@ t_healthy(_) ->
|
||||||
?assertEqual(ok, emqx_resource:remove_local(?ID))
|
?assertEqual(ok, emqx_resource:remove_local(?ID))
|
||||||
end,
|
end,
|
||||||
fun(Trace) ->
|
fun(Trace) ->
|
||||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
@ -1006,7 +1006,7 @@ t_stop_start(_) ->
|
||||||
end,
|
end,
|
||||||
|
|
||||||
fun(Trace) ->
|
fun(Trace) ->
|
||||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
@ -1064,7 +1064,7 @@ t_stop_start_local(_) ->
|
||||||
?assert(is_process_alive(Pid1))
|
?assert(is_process_alive(Pid1))
|
||||||
end,
|
end,
|
||||||
fun(Trace) ->
|
fun(Trace) ->
|
||||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
@ -1269,7 +1269,7 @@ t_health_check_disconnected(_) ->
|
||||||
)
|
)
|
||||||
end,
|
end,
|
||||||
fun(Trace) ->
|
fun(Trace) ->
|
||||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
|
|
@ -172,44 +172,58 @@ fields("node_metrics") ->
|
||||||
[{"node", sc(binary(), #{desc => ?DESC("node_node"), example => "emqx@127.0.0.1"})}] ++
|
[{"node", sc(binary(), #{desc => ?DESC("node_node"), example => "emqx@127.0.0.1"})}] ++
|
||||||
fields("metrics");
|
fields("metrics");
|
||||||
fields("ctx_pub") ->
|
fields("ctx_pub") ->
|
||||||
|
Event = 'message.publish',
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc(message_publish)},
|
{"event_type", event_type_sc(Event)},
|
||||||
|
{"event", event_sc(Event)},
|
||||||
{"id", sc(binary(), #{desc => ?DESC("event_id")})}
|
{"id", sc(binary(), #{desc => ?DESC("event_id")})}
|
||||||
| msg_event_common_fields()
|
| msg_event_common_fields()
|
||||||
];
|
];
|
||||||
fields("ctx_sub") ->
|
fields("ctx_sub") ->
|
||||||
|
Event = 'session.subscribed',
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc(session_subscribed)}
|
{"event_type", event_type_sc(Event)},
|
||||||
|
{"event", event_sc(Event)}
|
||||||
| msg_event_common_fields()
|
| msg_event_common_fields()
|
||||||
];
|
];
|
||||||
fields("ctx_unsub") ->
|
fields("ctx_unsub") ->
|
||||||
|
Event = 'session.unsubscribed',
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc(session_unsubscribed)}
|
{"event_type", event_type_sc(Event)},
|
||||||
| proplists:delete("event_type", fields("ctx_sub"))
|
{"event", event_sc(Event)}
|
||||||
|
| without(["event_type", "event_topic", "event"], fields("ctx_sub"))
|
||||||
];
|
];
|
||||||
fields("ctx_delivered") ->
|
fields("ctx_delivered") ->
|
||||||
|
Event = 'message.delivered',
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc(message_delivered)},
|
{"event_type", event_type_sc(Event)},
|
||||||
|
{"event", event_sc(Event)},
|
||||||
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
||||||
{"from_clientid", sc(binary(), #{desc => ?DESC("event_from_clientid")})},
|
{"from_clientid", sc(binary(), #{desc => ?DESC("event_from_clientid")})},
|
||||||
{"from_username", sc(binary(), #{desc => ?DESC("event_from_username")})}
|
{"from_username", sc(binary(), #{desc => ?DESC("event_from_username")})}
|
||||||
| msg_event_common_fields()
|
| msg_event_common_fields()
|
||||||
];
|
];
|
||||||
fields("ctx_acked") ->
|
fields("ctx_acked") ->
|
||||||
|
Event = 'message.acked',
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc(message_acked)}
|
{"event_type", event_type_sc(Event)},
|
||||||
| proplists:delete("event_type", fields("ctx_delivered"))
|
{"event", event_sc(Event)}
|
||||||
|
| without(["event_type", "event_topic", "event"], fields("ctx_delivered"))
|
||||||
];
|
];
|
||||||
fields("ctx_dropped") ->
|
fields("ctx_dropped") ->
|
||||||
|
Event = 'message.dropped',
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc(message_dropped)},
|
{"event_type", event_type_sc(Event)},
|
||||||
|
{"event", event_sc(Event)},
|
||||||
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
||||||
{"reason", sc(binary(), #{desc => ?DESC("event_ctx_dropped")})}
|
{"reason", sc(binary(), #{desc => ?DESC("event_ctx_dropped")})}
|
||||||
| msg_event_common_fields()
|
| msg_event_common_fields()
|
||||||
];
|
];
|
||||||
fields("ctx_connected") ->
|
fields("ctx_connected") ->
|
||||||
|
Event = 'client.connected',
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc(client_connected)},
|
{"event_type", event_type_sc(Event)},
|
||||||
|
{"event", event_sc(Event)},
|
||||||
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
||||||
{"username", sc(binary(), #{desc => ?DESC("event_username")})},
|
{"username", sc(binary(), #{desc => ?DESC("event_username")})},
|
||||||
{"mountpoint", sc(binary(), #{desc => ?DESC("event_mountpoint")})},
|
{"mountpoint", sc(binary(), #{desc => ?DESC("event_mountpoint")})},
|
||||||
|
@ -227,8 +241,10 @@ fields("ctx_connected") ->
|
||||||
})}
|
})}
|
||||||
];
|
];
|
||||||
fields("ctx_disconnected") ->
|
fields("ctx_disconnected") ->
|
||||||
|
Event = 'client.disconnected',
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc(client_disconnected)},
|
{"event_type", event_type_sc(Event)},
|
||||||
|
{"event", event_sc(Event)},
|
||||||
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
||||||
{"username", sc(binary(), #{desc => ?DESC("event_username")})},
|
{"username", sc(binary(), #{desc => ?DESC("event_username")})},
|
||||||
{"reason", sc(binary(), #{desc => ?DESC("event_ctx_disconnected_reason")})},
|
{"reason", sc(binary(), #{desc => ?DESC("event_ctx_disconnected_reason")})},
|
||||||
|
@ -240,8 +256,10 @@ fields("ctx_disconnected") ->
|
||||||
})}
|
})}
|
||||||
];
|
];
|
||||||
fields("ctx_connack") ->
|
fields("ctx_connack") ->
|
||||||
|
Event = 'client.connack',
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc(client_connack)},
|
{"event_type", event_type_sc(Event)},
|
||||||
|
{"event", event_sc(Event)},
|
||||||
{"reason_code", sc(binary(), #{desc => ?DESC("event_ctx_connack_reason_code")})},
|
{"reason_code", sc(binary(), #{desc => ?DESC("event_ctx_connack_reason_code")})},
|
||||||
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
||||||
{"clean_start", sc(boolean(), #{desc => ?DESC("event_clean_start"), default => true})},
|
{"clean_start", sc(boolean(), #{desc => ?DESC("event_clean_start"), default => true})},
|
||||||
|
@ -258,8 +276,10 @@ fields("ctx_connack") ->
|
||||||
})}
|
})}
|
||||||
];
|
];
|
||||||
fields("ctx_check_authz_complete") ->
|
fields("ctx_check_authz_complete") ->
|
||||||
|
Event = 'client.check_authz_complete',
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc(client_check_authz_complete)},
|
{"event_type", event_type_sc(Event)},
|
||||||
|
{"event", event_sc(Event)},
|
||||||
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
||||||
{"username", sc(binary(), #{desc => ?DESC("event_username")})},
|
{"username", sc(binary(), #{desc => ?DESC("event_username")})},
|
||||||
{"peerhost", sc(binary(), #{desc => ?DESC("event_peerhost")})},
|
{"peerhost", sc(binary(), #{desc => ?DESC("event_peerhost")})},
|
||||||
|
@ -269,8 +289,11 @@ fields("ctx_check_authz_complete") ->
|
||||||
{"result", sc(binary(), #{desc => ?DESC("event_result")})}
|
{"result", sc(binary(), #{desc => ?DESC("event_result")})}
|
||||||
];
|
];
|
||||||
fields("ctx_bridge_mqtt") ->
|
fields("ctx_bridge_mqtt") ->
|
||||||
|
Event = '$bridges/mqtt:*',
|
||||||
|
EventBin = atom_to_binary(Event),
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc('$bridges/mqtt:*')},
|
{"event_type", event_type_sc(Event)},
|
||||||
|
{"event", event_sc(EventBin)},
|
||||||
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
||||||
{"payload", sc(binary(), #{desc => ?DESC("event_payload")})},
|
{"payload", sc(binary(), #{desc => ?DESC("event_payload")})},
|
||||||
{"topic", sc(binary(), #{desc => ?DESC("event_topic")})},
|
{"topic", sc(binary(), #{desc => ?DESC("event_topic")})},
|
||||||
|
@ -281,8 +304,10 @@ fields("ctx_bridge_mqtt") ->
|
||||||
qos()
|
qos()
|
||||||
];
|
];
|
||||||
fields("ctx_delivery_dropped") ->
|
fields("ctx_delivery_dropped") ->
|
||||||
|
Event = 'delivery.dropped',
|
||||||
[
|
[
|
||||||
{"event_type", event_type_sc(delivery_dropped)},
|
{"event_type", event_type_sc(Event)},
|
||||||
|
{"event", event_sc(Event)},
|
||||||
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
||||||
{"reason", sc(binary(), #{desc => ?DESC("event_ctx_dropped")})},
|
{"reason", sc(binary(), #{desc => ?DESC("event_ctx_dropped")})},
|
||||||
{"from_clientid", sc(binary(), #{desc => ?DESC("event_from_clientid")})},
|
{"from_clientid", sc(binary(), #{desc => ?DESC("event_from_clientid")})},
|
||||||
|
@ -309,7 +334,21 @@ sc(Type, Meta) -> hoconsc:mk(Type, Meta).
|
||||||
ref(Field) -> hoconsc:ref(?MODULE, Field).
|
ref(Field) -> hoconsc:ref(?MODULE, Field).
|
||||||
|
|
||||||
event_type_sc(Event) ->
|
event_type_sc(Event) ->
|
||||||
sc(Event, #{desc => ?DESC("event_event_type"), required => true}).
|
EventType = event_to_event_type(Event),
|
||||||
|
sc(EventType, #{desc => ?DESC("event_event_type"), required => true}).
|
||||||
|
|
||||||
|
-spec event_to_event_type(atom()) -> atom().
|
||||||
|
event_to_event_type(Event) ->
|
||||||
|
binary_to_atom(binary:replace(atom_to_binary(Event), <<".">>, <<"_">>)).
|
||||||
|
|
||||||
|
event_sc(Event) when is_binary(Event) ->
|
||||||
|
%% only exception is `$bridges/...'.
|
||||||
|
sc(binary(), #{default => Event, importance => ?IMPORTANCE_HIDDEN});
|
||||||
|
event_sc(Event) ->
|
||||||
|
sc(Event, #{default => Event, importance => ?IMPORTANCE_HIDDEN}).
|
||||||
|
|
||||||
|
without(FieldNames, Fields) ->
|
||||||
|
lists:foldl(fun proplists:delete/2, Fields, FieldNames).
|
||||||
|
|
||||||
publish_received_at_sc() ->
|
publish_received_at_sc() ->
|
||||||
sc(integer(), #{desc => ?DESC("event_publish_received_at")}).
|
sc(integer(), #{desc => ?DESC("event_publish_received_at")}).
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
test(#{sql := Sql, context := Context}) ->
|
test(#{sql := Sql, context := Context}) ->
|
||||||
case emqx_rule_sqlparser:parse(Sql) of
|
case emqx_rule_sqlparser:parse(Sql) of
|
||||||
{ok, Select} ->
|
{ok, Select} ->
|
||||||
InTopic = maps:get(topic, Context, <<>>),
|
InTopic = get_in_topic(Context),
|
||||||
EventTopics = emqx_rule_sqlparser:select_from(Select),
|
EventTopics = emqx_rule_sqlparser:select_from(Select),
|
||||||
case lists:all(fun is_publish_topic/1, EventTopics) of
|
case lists:all(fun is_publish_topic/1, EventTopics) of
|
||||||
true ->
|
true ->
|
||||||
|
@ -37,8 +37,13 @@ test(#{sql := Sql, context := Context}) ->
|
||||||
false -> {error, nomatch}
|
false -> {error, nomatch}
|
||||||
end;
|
end;
|
||||||
false ->
|
false ->
|
||||||
|
case lists:member(InTopic, EventTopics) of
|
||||||
|
true ->
|
||||||
%% the rule is for both publish and events, test it directly
|
%% the rule is for both publish and events, test it directly
|
||||||
test_rule(Sql, Select, Context, EventTopics)
|
test_rule(Sql, Select, Context, EventTopics);
|
||||||
|
false ->
|
||||||
|
{error, nomatch}
|
||||||
|
end
|
||||||
end;
|
end;
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
?SLOG(debug, #{
|
?SLOG(debug, #{
|
||||||
|
@ -92,15 +97,12 @@ flatten([D | L]) when is_list(D) ->
|
||||||
[D0 || {ok, D0} <- D] ++ flatten(L).
|
[D0 || {ok, D0} <- D] ++ flatten(L).
|
||||||
|
|
||||||
fill_default_values(Event, Context) ->
|
fill_default_values(Event, Context) ->
|
||||||
maps:merge(envs_examp(Event), Context).
|
maps:merge(envs_examp(Event, Context), Context).
|
||||||
|
|
||||||
envs_examp(EventTopic) ->
|
envs_examp(EventTopic, Context) ->
|
||||||
EventName = emqx_rule_events:event_name(EventTopic),
|
EventName = maps:get(event, Context, emqx_rule_events:event_name(EventTopic)),
|
||||||
emqx_rule_maps:atom_key_map(
|
Env = maps:from_list(emqx_rule_events:columns_with_exam(EventName)),
|
||||||
maps:from_list(
|
emqx_rule_maps:atom_key_map(Env).
|
||||||
emqx_rule_events:columns_with_exam(EventName)
|
|
||||||
)
|
|
||||||
).
|
|
||||||
|
|
||||||
is_test_runtime_env_atom() ->
|
is_test_runtime_env_atom() ->
|
||||||
'emqx_rule_sqltester:is_test_runtime_env'.
|
'emqx_rule_sqltester:is_test_runtime_env'.
|
||||||
|
@ -118,3 +120,26 @@ is_test_runtime_env() ->
|
||||||
true -> true;
|
true -> true;
|
||||||
_ -> false
|
_ -> false
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
%% Most events have the original `topic' input, but their own topic (i.e.: `$events/...')
|
||||||
|
%% is different from `topic'.
|
||||||
|
get_in_topic(Context) ->
|
||||||
|
case maps:find(event_topic, Context) of
|
||||||
|
{ok, EventTopic} ->
|
||||||
|
EventTopic;
|
||||||
|
error ->
|
||||||
|
case maps:find(event, Context) of
|
||||||
|
{ok, Event} ->
|
||||||
|
maybe_infer_in_topic(Context, Event);
|
||||||
|
error ->
|
||||||
|
maps:get(topic, Context, <<>>)
|
||||||
|
end
|
||||||
|
end.
|
||||||
|
|
||||||
|
maybe_infer_in_topic(Context, 'message.publish') ->
|
||||||
|
%% This is special because the common use in the frontend is to select this event, but
|
||||||
|
%% test the input `topic' field against MQTT topic filters in the `FROM' clause rather
|
||||||
|
%% than the corresponding `$events/message_publish'.
|
||||||
|
maps:get(topic, Context, <<>>);
|
||||||
|
maybe_infer_in_topic(_Context, Event) ->
|
||||||
|
emqx_rule_events:event_topic(Event).
|
||||||
|
|
|
@ -1990,7 +1990,10 @@ t_sqlparse_event_1(_Config) ->
|
||||||
emqx_rule_sqltester:test(
|
emqx_rule_sqltester:test(
|
||||||
#{
|
#{
|
||||||
sql => Sql,
|
sql => Sql,
|
||||||
context => #{topic => <<"t/tt">>}
|
context => #{
|
||||||
|
topic => <<"t/tt">>,
|
||||||
|
event => 'session.subscribed'
|
||||||
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
).
|
).
|
||||||
|
@ -2004,7 +2007,10 @@ t_sqlparse_event_2(_Config) ->
|
||||||
emqx_rule_sqltester:test(
|
emqx_rule_sqltester:test(
|
||||||
#{
|
#{
|
||||||
sql => Sql,
|
sql => Sql,
|
||||||
context => #{clientid => <<"abc">>}
|
context => #{
|
||||||
|
clientid => <<"abc">>,
|
||||||
|
event => 'client.connected'
|
||||||
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
).
|
).
|
||||||
|
|
|
@ -0,0 +1,385 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_rule_engine_api_2_SUITE).
|
||||||
|
|
||||||
|
-compile(nowarn_export_all).
|
||||||
|
-compile(export_all).
|
||||||
|
|
||||||
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% CT boilerplate
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
all() ->
|
||||||
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
|
init_per_suite(Config) ->
|
||||||
|
Apps = emqx_cth_suite:start(
|
||||||
|
app_specs(),
|
||||||
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
|
),
|
||||||
|
emqx_common_test_http:create_default_app(),
|
||||||
|
[{apps, Apps} | Config].
|
||||||
|
|
||||||
|
end_per_suite(Config) ->
|
||||||
|
Apps = ?config(apps, Config),
|
||||||
|
ok = emqx_cth_suite:stop(Apps),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
app_specs() ->
|
||||||
|
[
|
||||||
|
emqx_conf,
|
||||||
|
emqx_rule_engine,
|
||||||
|
emqx_management,
|
||||||
|
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
|
||||||
|
].
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Helper fns
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
maybe_json_decode(X) ->
|
||||||
|
case emqx_utils_json:safe_decode(X, [return_maps]) of
|
||||||
|
{ok, Decoded} -> Decoded;
|
||||||
|
{error, _} -> X
|
||||||
|
end.
|
||||||
|
|
||||||
|
request(Method, Path, Params) ->
|
||||||
|
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
|
||||||
|
Opts = #{return_all => true},
|
||||||
|
case emqx_mgmt_api_test_util:request_api(Method, Path, "", AuthHeader, Params, Opts) of
|
||||||
|
{ok, {Status, Headers, Body0}} ->
|
||||||
|
Body = maybe_json_decode(Body0),
|
||||||
|
{ok, {Status, Headers, Body}};
|
||||||
|
{error, {Status, Headers, Body0}} ->
|
||||||
|
Body =
|
||||||
|
case emqx_utils_json:safe_decode(Body0, [return_maps]) of
|
||||||
|
{ok, Decoded0 = #{<<"message">> := Msg0}} ->
|
||||||
|
Msg = maybe_json_decode(Msg0),
|
||||||
|
Decoded0#{<<"message">> := Msg};
|
||||||
|
{ok, Decoded0} ->
|
||||||
|
Decoded0;
|
||||||
|
{error, _} ->
|
||||||
|
Body0
|
||||||
|
end,
|
||||||
|
{error, {Status, Headers, Body}};
|
||||||
|
Error ->
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
sql_test_api(Params) ->
|
||||||
|
Method = post,
|
||||||
|
Path = emqx_mgmt_api_test_util:api_path(["rule_test"]),
|
||||||
|
ct:pal("sql test (http):\n ~p", [Params]),
|
||||||
|
Res = request(Method, Path, Params),
|
||||||
|
ct:pal("sql test (http) result:\n ~p", [Res]),
|
||||||
|
Res.
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Test cases
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
t_rule_test_smoke(_Config) ->
|
||||||
|
%% Example inputs recorded from frontend on 2023-12-04
|
||||||
|
Publish = [
|
||||||
|
#{
|
||||||
|
expected => #{code => 200},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"message_publish">>,
|
||||||
|
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"topic">> => <<"t/a">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
hint => <<"wrong topic">>,
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"message_publish">>,
|
||||||
|
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"topic">> => <<"a">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
hint => <<
|
||||||
|
"Currently, the frontend doesn't try to match against "
|
||||||
|
"$events/message_published, but it may start sending "
|
||||||
|
"the event topic in the future."
|
||||||
|
>>,
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"message_publish">>,
|
||||||
|
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"topic">> => <<"t/a">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"$events/message_published\"">>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
%% Default input SQL doesn't match any event topic
|
||||||
|
DefaultNoMatch = [
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx_2">>,
|
||||||
|
<<"event_type">> => <<"message_delivered">>,
|
||||||
|
<<"from_clientid">> => <<"c_emqx_1">>,
|
||||||
|
<<"from_username">> => <<"u_emqx_1">>,
|
||||||
|
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"topic">> => <<"t/a">>,
|
||||||
|
<<"username">> => <<"u_emqx_2">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx_2">>,
|
||||||
|
<<"event_type">> => <<"message_acked">>,
|
||||||
|
<<"from_clientid">> => <<"c_emqx_1">>,
|
||||||
|
<<"from_username">> => <<"u_emqx_1">>,
|
||||||
|
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"topic">> => <<"t/a">>,
|
||||||
|
<<"username">> => <<"u_emqx_2">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"message_dropped">>,
|
||||||
|
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"reason">> => <<"no_subscribers">>,
|
||||||
|
<<"topic">> => <<"t/a">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"client_connected">>,
|
||||||
|
<<"peername">> => <<"127.0.0.1:52918">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"client_disconnected">>,
|
||||||
|
<<"reason">> => <<"normal">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"client_connack">>,
|
||||||
|
<<"reason_code">> => <<"sucess">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"action">> => <<"publish">>,
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"client_check_authz_complete">>,
|
||||||
|
<<"result">> => <<"allow">>,
|
||||||
|
<<"topic">> => <<"t/1">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"session_subscribed">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"topic">> => <<"t/a">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"session_unsubscribed">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"topic">> => <<"t/a">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx_2">>,
|
||||||
|
<<"event_type">> => <<"delivery_dropped">>,
|
||||||
|
<<"from_clientid">> => <<"c_emqx_1">>,
|
||||||
|
<<"from_username">> => <<"u_emqx_1">>,
|
||||||
|
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"reason">> => <<"queue_full">>,
|
||||||
|
<<"topic">> => <<"t/a">>,
|
||||||
|
<<"username">> => <<"u_emqx_2">>
|
||||||
|
},
|
||||||
|
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
MultipleFrom = [
|
||||||
|
#{
|
||||||
|
expected => #{code => 200},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"session_unsubscribed">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"topic">> => <<"t/a">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> =>
|
||||||
|
<<"SELECT\n *\nFROM\n \"t/#\", \"$events/session_unsubscribed\" ">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 200},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"session_unsubscribed">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"topic">> => <<"t/a">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> =>
|
||||||
|
<<"SELECT\n *\nFROM\n \"$events/message_dropped\", \"$events/session_unsubscribed\" ">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
expected => #{code => 412},
|
||||||
|
input =>
|
||||||
|
#{
|
||||||
|
<<"context">> =>
|
||||||
|
#{
|
||||||
|
<<"clientid">> => <<"c_emqx">>,
|
||||||
|
<<"event_type">> => <<"session_unsubscribed">>,
|
||||||
|
<<"qos">> => 1,
|
||||||
|
<<"topic">> => <<"t/a">>,
|
||||||
|
<<"username">> => <<"u_emqx">>
|
||||||
|
},
|
||||||
|
<<"sql">> =>
|
||||||
|
<<"SELECT\n *\nFROM\n \"$events/message_dropped\", \"$events/client_connected\" ">>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
Cases = Publish ++ DefaultNoMatch ++ MultipleFrom,
|
||||||
|
FailedCases = lists:filtermap(fun do_t_rule_test_smoke/1, Cases),
|
||||||
|
?assertEqual([], FailedCases),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
do_t_rule_test_smoke(#{input := Input, expected := #{code := ExpectedCode}} = Case) ->
|
||||||
|
{_ErrOrOk, {{_, Code, _}, _, Body}} = sql_test_api(Input),
|
||||||
|
case Code =:= ExpectedCode of
|
||||||
|
true ->
|
||||||
|
false;
|
||||||
|
false ->
|
||||||
|
{true, #{
|
||||||
|
expected => ExpectedCode,
|
||||||
|
hint => maps:get(hint, Case, <<>>),
|
||||||
|
got => Code,
|
||||||
|
resp_body => Body
|
||||||
|
}}
|
||||||
|
end.
|
|
@ -216,7 +216,7 @@ t_ctx_delivery_dropped(_) ->
|
||||||
|
|
||||||
t_mongo_date_function_should_return_string_in_test_env(_) ->
|
t_mongo_date_function_should_return_string_in_test_env(_) ->
|
||||||
SQL =
|
SQL =
|
||||||
<<"SELECT mongo_date() as mongo_date FROM \"t/1\"">>,
|
<<"SELECT mongo_date() as mongo_date FROM \"$events/client_check_authz_complete\"">>,
|
||||||
Context =
|
Context =
|
||||||
#{
|
#{
|
||||||
action => <<"publish">>,
|
action => <<"publish">>,
|
||||||
|
|
|
@ -348,19 +348,11 @@ receive_published(Line) ->
|
||||||
|
|
||||||
cluster(Config) ->
|
cluster(Config) ->
|
||||||
PrivDataDir = ?config(priv_dir, Config),
|
PrivDataDir = ?config(priv_dir, Config),
|
||||||
PeerModule =
|
|
||||||
case os:getenv("IS_CI") of
|
|
||||||
false ->
|
|
||||||
slave;
|
|
||||||
_ ->
|
|
||||||
ct_slave
|
|
||||||
end,
|
|
||||||
Cluster = emqx_common_test_helpers:emqx_cluster(
|
Cluster = emqx_common_test_helpers:emqx_cluster(
|
||||||
[core, core],
|
[core, core],
|
||||||
[
|
[
|
||||||
{apps, ?APPS},
|
{apps, ?APPS},
|
||||||
{listener_ports, []},
|
{listener_ports, []},
|
||||||
{peer_mod, PeerModule},
|
|
||||||
{priv_data_dir, PrivDataDir},
|
{priv_data_dir, PrivDataDir},
|
||||||
{load_schema, true},
|
{load_schema, true},
|
||||||
{start_autocluster, true},
|
{start_autocluster, true},
|
||||||
|
@ -382,7 +374,7 @@ cluster(Config) ->
|
||||||
|
|
||||||
start_cluster(Cluster) ->
|
start_cluster(Cluster) ->
|
||||||
Nodes = [
|
Nodes = [
|
||||||
emqx_common_test_helpers:start_slave(Name, Opts)
|
emqx_common_test_helpers:start_peer(Name, Opts)
|
||||||
|| {Name, Opts} <- Cluster
|
|| {Name, Opts} <- Cluster
|
||||||
],
|
],
|
||||||
NumNodes = length(Nodes),
|
NumNodes = length(Nodes),
|
||||||
|
@ -390,7 +382,7 @@ start_cluster(Cluster) ->
|
||||||
emqx_utils:pmap(
|
emqx_utils:pmap(
|
||||||
fun(N) ->
|
fun(N) ->
|
||||||
ct:pal("stopping ~p", [N]),
|
ct:pal("stopping ~p", [N]),
|
||||||
ok = emqx_common_test_helpers:stop_slave(N)
|
ok = emqx_common_test_helpers:stop_peer(N)
|
||||||
end,
|
end,
|
||||||
Nodes
|
Nodes
|
||||||
)
|
)
|
||||||
|
|
|
@ -154,7 +154,7 @@ init_per_testcase(t_exhook_info, Config) ->
|
||||||
emqx_common_test_helpers:start_apps([emqx_exhook]),
|
emqx_common_test_helpers:start_apps([emqx_exhook]),
|
||||||
Config;
|
Config;
|
||||||
init_per_testcase(t_cluster_uuid, Config) ->
|
init_per_testcase(t_cluster_uuid, Config) ->
|
||||||
Node = start_slave(n1),
|
Node = start_peer(n1),
|
||||||
[{n1, Node} | Config];
|
[{n1, Node} | Config];
|
||||||
init_per_testcase(t_uuid_restored_from_file, Config) ->
|
init_per_testcase(t_uuid_restored_from_file, Config) ->
|
||||||
Config;
|
Config;
|
||||||
|
@ -210,7 +210,7 @@ end_per_testcase(t_exhook_info, _Config) ->
|
||||||
ok;
|
ok;
|
||||||
end_per_testcase(t_cluster_uuid, Config) ->
|
end_per_testcase(t_cluster_uuid, Config) ->
|
||||||
Node = proplists:get_value(n1, Config),
|
Node = proplists:get_value(n1, Config),
|
||||||
ok = stop_slave(Node);
|
ok = stop_peer(Node);
|
||||||
end_per_testcase(t_num_clients, Config) ->
|
end_per_testcase(t_num_clients, Config) ->
|
||||||
ok = snabbkaffe:stop(),
|
ok = snabbkaffe:stop(),
|
||||||
Config;
|
Config;
|
||||||
|
@ -782,7 +782,7 @@ find_gen_rpc_port() ->
|
||||||
{ok, {_, Port}} = inet:sockname(EPort),
|
{ok, {_, Port}} = inet:sockname(EPort),
|
||||||
Port.
|
Port.
|
||||||
|
|
||||||
start_slave(Name) ->
|
start_peer(Name) ->
|
||||||
Port = find_gen_rpc_port(),
|
Port = find_gen_rpc_port(),
|
||||||
TestNode = node(),
|
TestNode = node(),
|
||||||
Handler =
|
Handler =
|
||||||
|
@ -811,11 +811,9 @@ start_slave(Name) ->
|
||||||
apps => [emqx, emqx_conf, emqx_retainer, emqx_modules, emqx_telemetry]
|
apps => [emqx, emqx_conf, emqx_retainer, emqx_modules, emqx_telemetry]
|
||||||
},
|
},
|
||||||
|
|
||||||
emqx_common_test_helpers:start_slave(Name, Opts).
|
emqx_common_test_helpers:start_peer(Name, Opts).
|
||||||
|
|
||||||
stop_slave(Node) ->
|
stop_peer(Node) ->
|
||||||
% This line don't work!!
|
|
||||||
%emqx_cluster_rpc:fast_forward_to_commit(Node, 100),
|
|
||||||
rpc:call(Node, ?MODULE, leave_cluster, []),
|
rpc:call(Node, ?MODULE, leave_cluster, []),
|
||||||
ok = emqx_cth_peer:stop(Node),
|
ok = emqx_cth_peer:stop(Node),
|
||||||
?assertEqual([node()], mria:running_nodes()),
|
?assertEqual([node()], mria:running_nodes()),
|
||||||
|
|
|
@ -35,7 +35,8 @@
|
||||||
if_only_to_toggle_enable/2,
|
if_only_to_toggle_enable/2,
|
||||||
update_if_present/3,
|
update_if_present/3,
|
||||||
put_if/4,
|
put_if/4,
|
||||||
rename/3
|
rename/3,
|
||||||
|
key_comparer/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export_type([config_key/0, config_key_path/0]).
|
-export_type([config_key/0, config_key_path/0]).
|
||||||
|
@ -318,3 +319,16 @@ rename(OldKey, NewKey, Map) ->
|
||||||
error ->
|
error ->
|
||||||
Map
|
Map
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
-spec key_comparer(K) -> fun((M, M) -> boolean()) when M :: #{K => _V}.
|
||||||
|
key_comparer(K) ->
|
||||||
|
fun
|
||||||
|
(#{K := V1}, #{K := V2}) ->
|
||||||
|
V1 < V2;
|
||||||
|
(#{K := _}, _) ->
|
||||||
|
false;
|
||||||
|
(_, #{K := _}) ->
|
||||||
|
true;
|
||||||
|
(M1, M2) ->
|
||||||
|
M1 < M2
|
||||||
|
end.
|
||||||
|
|
|
@ -110,3 +110,22 @@ best_effort_recursive_sum_test_() ->
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
].
|
].
|
||||||
|
|
||||||
|
key_comparer_test() ->
|
||||||
|
Comp = emqx_utils_maps:key_comparer(foo),
|
||||||
|
?assertEqual(
|
||||||
|
[
|
||||||
|
#{},
|
||||||
|
#{baz => 42},
|
||||||
|
#{foo => 1},
|
||||||
|
#{foo => 42},
|
||||||
|
#{foo => bar, baz => 42}
|
||||||
|
],
|
||||||
|
lists:sort(Comp, [
|
||||||
|
#{foo => 42},
|
||||||
|
#{baz => 42},
|
||||||
|
#{foo => bar, baz => 42},
|
||||||
|
#{foo => 1},
|
||||||
|
#{}
|
||||||
|
])
|
||||||
|
).
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
Added a technical preview of the new persistent session implementation based on RocksDB.
|
||||||
|
Please note that this feature is in alpha stage and must not be enabled in the production systems.
|
||||||
|
|
||||||
|
Features missing in the early preview version of the new persistent session implementation:
|
||||||
|
|
||||||
|
- Shard failover
|
||||||
|
- Retained messages
|
||||||
|
- Will message handling
|
||||||
|
- Shared subscriptions
|
||||||
|
- Subscription IDs
|
|
@ -0,0 +1 @@
|
||||||
|
Fix COAP gateway bug that caused it to ignore subscription options.
|
|
@ -0,0 +1,7 @@
|
||||||
|
Updated `gen_rpc` library to version 3.3.0. The new version includes
|
||||||
|
several performance improvements:
|
||||||
|
|
||||||
|
- Avoid allocating extra memory for the packets before they are sent
|
||||||
|
to the wire in some cases
|
||||||
|
|
||||||
|
- Bypass network for the local calls
|
2
mix.exs
2
mix.exs
|
@ -56,7 +56,7 @@ defmodule EMQXUmbrella.MixProject do
|
||||||
{:esockd, github: "emqx/esockd", tag: "5.9.8", override: true},
|
{:esockd, github: "emqx/esockd", tag: "5.9.8", override: true},
|
||||||
{:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-1", override: true},
|
{:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-1", override: true},
|
||||||
{:ekka, github: "emqx/ekka", tag: "0.15.16", override: true},
|
{:ekka, github: "emqx/ekka", tag: "0.15.16", override: true},
|
||||||
{:gen_rpc, github: "emqx/gen_rpc", tag: "3.2.2", override: true},
|
{:gen_rpc, github: "emqx/gen_rpc", tag: "3.3.0", override: true},
|
||||||
{:grpc, github: "emqx/grpc-erl", tag: "0.6.12", override: true},
|
{:grpc, github: "emqx/grpc-erl", tag: "0.6.12", override: true},
|
||||||
{:minirest, github: "emqx/minirest", tag: "1.3.14", override: true},
|
{:minirest, github: "emqx/minirest", tag: "1.3.14", override: true},
|
||||||
{:ecpool, github: "emqx/ecpool", tag: "0.5.4", override: true},
|
{:ecpool, github: "emqx/ecpool", tag: "0.5.4", override: true},
|
||||||
|
|
|
@ -63,7 +63,7 @@
|
||||||
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.8"}}}
|
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.8"}}}
|
||||||
, {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-1"}}}
|
, {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-1"}}}
|
||||||
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.16"}}}
|
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.16"}}}
|
||||||
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.2"}}}
|
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.0"}}}
|
||||||
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.12"}}}
|
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.12"}}}
|
||||||
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.14"}}}
|
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.14"}}}
|
||||||
, {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.4"}}}
|
, {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.4"}}}
|
||||||
|
|
|
@ -7,27 +7,27 @@ connect_timeout.label:
|
||||||
"""Connect Timeout"""
|
"""Connect Timeout"""
|
||||||
|
|
||||||
producer_opts.desc:
|
producer_opts.desc:
|
||||||
"""Local MQTT data source and Azure Event Hub bridge configs."""
|
"""Local MQTT data source and Azure Event Hubs bridge configs."""
|
||||||
|
|
||||||
producer_opts.label:
|
producer_opts.label:
|
||||||
"""MQTT to Azure Event Hub"""
|
"""MQTT to Azure Event Hubs"""
|
||||||
|
|
||||||
min_metadata_refresh_interval.desc:
|
min_metadata_refresh_interval.desc:
|
||||||
"""Minimum time interval the client has to wait before refreshing Azure Event Hub Kafka broker and topic metadata. Setting too small value may add extra load on Azure Event Hub."""
|
"""Minimum time interval the client has to wait before refreshing Azure Event Hubs Kafka broker and topic metadata. Setting too small value may add extra load on Azure Event Hubs."""
|
||||||
|
|
||||||
min_metadata_refresh_interval.label:
|
min_metadata_refresh_interval.label:
|
||||||
"""Min Metadata Refresh Interval"""
|
"""Min Metadata Refresh Interval"""
|
||||||
|
|
||||||
kafka_producer.desc:
|
kafka_producer.desc:
|
||||||
"""Azure Event Hub Producer configuration."""
|
"""Azure Event Hubs Producer configuration."""
|
||||||
|
|
||||||
kafka_producer.label:
|
kafka_producer.label:
|
||||||
"""Azure Event Hub Producer"""
|
"""Azure Event Hubs Producer"""
|
||||||
|
|
||||||
producer_buffer.desc:
|
producer_buffer.desc:
|
||||||
"""Configure producer message buffer.
|
"""Configure producer message buffer.
|
||||||
|
|
||||||
Tell Azure Event Hub producer how to buffer messages when EMQX has more messages to send than Azure Event Hub can keep up, or when Azure Event Hub is down."""
|
Tell Azure Event Hubs producer how to buffer messages when EMQX has more messages to send than Azure Event Hubs can keep up, or when Azure Event Hubs is down."""
|
||||||
|
|
||||||
producer_buffer.label:
|
producer_buffer.label:
|
||||||
"""Message Buffer"""
|
"""Message Buffer"""
|
||||||
|
@ -45,7 +45,7 @@ socket_receive_buffer.label:
|
||||||
"""Socket Receive Buffer Size"""
|
"""Socket Receive Buffer Size"""
|
||||||
|
|
||||||
socket_tcp_keepalive.desc:
|
socket_tcp_keepalive.desc:
|
||||||
"""Enable TCP keepalive for Azure Event Hub bridge connections.
|
"""Enable TCP keepalive for Azure Event Hubs bridge connections.
|
||||||
The value is three comma separated numbers in the format of 'Idle,Interval,Probes'
|
The value is three comma separated numbers in the format of 'Idle,Interval,Probes'
|
||||||
- Idle: The number of seconds a connection needs to be idle before the server begins to send out keep-alive probes (Linux default 7200).
|
- Idle: The number of seconds a connection needs to be idle before the server begins to send out keep-alive probes (Linux default 7200).
|
||||||
- Interval: The number of seconds between TCP keep-alive probes (Linux default 75).
|
- Interval: The number of seconds between TCP keep-alive probes (Linux default 75).
|
||||||
|
@ -63,16 +63,16 @@ desc_name.label:
|
||||||
"""Bridge Name"""
|
"""Bridge Name"""
|
||||||
|
|
||||||
producer_kafka_opts.desc:
|
producer_kafka_opts.desc:
|
||||||
"""Azure Event Hub producer configs."""
|
"""Azure Event Hubs producer configs."""
|
||||||
|
|
||||||
producer_kafka_opts.label:
|
producer_kafka_opts.label:
|
||||||
"""Azure Event Hub Producer"""
|
"""Azure Event Hubs Producer"""
|
||||||
|
|
||||||
kafka_topic.desc:
|
kafka_topic.desc:
|
||||||
"""Event Hub name"""
|
"""Event Hubs name"""
|
||||||
|
|
||||||
kafka_topic.label:
|
kafka_topic.label:
|
||||||
"""Event Hub Name"""
|
"""Event Hubs Name"""
|
||||||
|
|
||||||
kafka_message_timestamp.desc:
|
kafka_message_timestamp.desc:
|
||||||
"""Which timestamp to use. The timestamp is expected to be a millisecond precision Unix epoch which can be in string format, e.g. <code>1661326462115</code> or <code>'1661326462115'</code>. When the desired data field for this template is not found, or if the found data is not a valid integer, the current system timestamp will be used."""
|
"""Which timestamp to use. The timestamp is expected to be a millisecond precision Unix epoch which can be in string format, e.g. <code>1661326462115</code> or <code>'1661326462115'</code>. When the desired data field for this template is not found, or if the found data is not a valid integer, the current system timestamp will be used."""
|
||||||
|
@ -97,21 +97,21 @@ socket_opts.label:
|
||||||
"""Socket Options"""
|
"""Socket Options"""
|
||||||
|
|
||||||
partition_count_refresh_interval.desc:
|
partition_count_refresh_interval.desc:
|
||||||
"""The time interval for Azure Event Hub producer to discover increased number of partitions.
|
"""The time interval for Azure Event Hubs producer to discover increased number of partitions.
|
||||||
After the number of partitions is increased in Azure Event Hub, EMQX will start taking the
|
After the number of partitions is increased in Azure Event Hubs, EMQX will start taking the
|
||||||
discovered partitions into account when dispatching messages per <code>partition_strategy</code>."""
|
discovered partitions into account when dispatching messages per <code>partition_strategy</code>."""
|
||||||
|
|
||||||
partition_count_refresh_interval.label:
|
partition_count_refresh_interval.label:
|
||||||
"""Partition Count Refresh Interval"""
|
"""Partition Count Refresh Interval"""
|
||||||
|
|
||||||
max_batch_bytes.desc:
|
max_batch_bytes.desc:
|
||||||
"""Maximum bytes to collect in an Azure Event Hub message batch. Most of the Kafka brokers default to a limit of 1 MB batch size. EMQX's default value is less than 1 MB in order to compensate Kafka message encoding overheads (especially when each individual message is very small). When a single message is over the limit, it is still sent (as a single element batch)."""
|
"""Maximum bytes to collect in an Azure Event Hubs message batch."""
|
||||||
|
|
||||||
max_batch_bytes.label:
|
max_batch_bytes.label:
|
||||||
"""Max Batch Bytes"""
|
"""Max Batch Bytes"""
|
||||||
|
|
||||||
required_acks.desc:
|
required_acks.desc:
|
||||||
"""Required acknowledgements for Azure Event Hub partition leader to wait for its followers before it sends back the acknowledgement to EMQX Azure Event Hub producer
|
"""Required acknowledgements for Azure Event Hubs partition leader to wait for its followers before it sends back the acknowledgement to EMQX Azure Event Hubs producer
|
||||||
|
|
||||||
<code>all_isr</code>: Require all in-sync replicas to acknowledge.
|
<code>all_isr</code>: Require all in-sync replicas to acknowledge.
|
||||||
<code>leader_only</code>: Require only the partition-leader's acknowledgement."""
|
<code>leader_only</code>: Require only the partition-leader's acknowledgement."""
|
||||||
|
@ -120,7 +120,7 @@ required_acks.label:
|
||||||
"""Required Acks"""
|
"""Required Acks"""
|
||||||
|
|
||||||
kafka_headers.desc:
|
kafka_headers.desc:
|
||||||
"""Please provide a placeholder to be used as Azure Event Hub Headers<br/>
|
"""Please provide a placeholder to be used as Azure Event Hubs Headers<br/>
|
||||||
e.g. <code>${pub_props}</code><br/>
|
e.g. <code>${pub_props}</code><br/>
|
||||||
Notice that the value of the placeholder must either be an object:
|
Notice that the value of the placeholder must either be an object:
|
||||||
<code>{\"foo\": \"bar\"}</code>
|
<code>{\"foo\": \"bar\"}</code>
|
||||||
|
@ -128,39 +128,39 @@ or an array of key-value pairs:
|
||||||
<code>[{\"key\": \"foo\", \"value\": \"bar\"}]</code>"""
|
<code>[{\"key\": \"foo\", \"value\": \"bar\"}]</code>"""
|
||||||
|
|
||||||
kafka_headers.label:
|
kafka_headers.label:
|
||||||
"""Azure Event Hub Headers"""
|
"""Azure Event Hubs Headers"""
|
||||||
|
|
||||||
producer_kafka_ext_headers.desc:
|
producer_kafka_ext_headers.desc:
|
||||||
"""Please provide more key-value pairs for Azure Event Hub headers<br/>
|
"""Please provide more key-value pairs for Azure Event Hubs headers<br/>
|
||||||
The key-value pairs here will be combined with the
|
The key-value pairs here will be combined with the
|
||||||
value of <code>kafka_headers</code> field before sending to Azure Event Hub."""
|
value of <code>kafka_headers</code> field before sending to Azure Event Hubs."""
|
||||||
|
|
||||||
producer_kafka_ext_headers.label:
|
producer_kafka_ext_headers.label:
|
||||||
"""Extra Azure Event Hub headers"""
|
"""Extra Azure Event Hubs headers"""
|
||||||
|
|
||||||
producer_kafka_ext_header_key.desc:
|
producer_kafka_ext_header_key.desc:
|
||||||
"""Key of the Azure Event Hub header. Placeholders in format of ${var} are supported."""
|
"""Key of the Azure Event Hubs header. Placeholders in format of ${var} are supported."""
|
||||||
|
|
||||||
producer_kafka_ext_header_key.label:
|
producer_kafka_ext_header_key.label:
|
||||||
"""Azure Event Hub extra header key."""
|
"""Azure Event Hubs extra header key."""
|
||||||
|
|
||||||
producer_kafka_ext_header_value.desc:
|
producer_kafka_ext_header_value.desc:
|
||||||
"""Value of the Azure Event Hub header. Placeholders in format of ${var} are supported."""
|
"""Value of the Azure Event Hubs header. Placeholders in format of ${var} are supported."""
|
||||||
|
|
||||||
producer_kafka_ext_header_value.label:
|
producer_kafka_ext_header_value.label:
|
||||||
"""Value"""
|
"""Value"""
|
||||||
|
|
||||||
kafka_header_value_encode_mode.desc:
|
kafka_header_value_encode_mode.desc:
|
||||||
"""Azure Event Hub headers value encode mode<br/>
|
"""Azure Event Hubs headers value encode mode<br/>
|
||||||
- NONE: only add binary values to Azure Event Hub headers;<br/>
|
- NONE: only add binary values to Azure Event Hubs headers;<br/>
|
||||||
- JSON: only add JSON values to Azure Event Hub headers,
|
- JSON: only add JSON values to Azure Event Hubs headers,
|
||||||
and encode it to JSON strings before sending."""
|
and encode it to JSON strings before sending."""
|
||||||
|
|
||||||
kafka_header_value_encode_mode.label:
|
kafka_header_value_encode_mode.label:
|
||||||
"""Azure Event Hub headers value encode mode"""
|
"""Azure Event Hubs headers value encode mode"""
|
||||||
|
|
||||||
metadata_request_timeout.desc:
|
metadata_request_timeout.desc:
|
||||||
"""Maximum wait time when fetching metadata from Azure Event Hub."""
|
"""Maximum wait time when fetching metadata from Azure Event Hubs."""
|
||||||
|
|
||||||
metadata_request_timeout.label:
|
metadata_request_timeout.label:
|
||||||
"""Metadata Request Timeout"""
|
"""Metadata Request Timeout"""
|
||||||
|
@ -220,52 +220,52 @@ config_enable.label:
|
||||||
"""Enable or Disable"""
|
"""Enable or Disable"""
|
||||||
|
|
||||||
desc_config.desc:
|
desc_config.desc:
|
||||||
"""Configuration for an Azure Event Hub bridge."""
|
"""Configuration for an Azure Event Hubs bridge."""
|
||||||
|
|
||||||
desc_config.label:
|
desc_config.label:
|
||||||
"""Azure Event Hub Bridge Configuration"""
|
"""Azure Event Hubs Bridge Configuration"""
|
||||||
|
|
||||||
buffer_per_partition_limit.desc:
|
buffer_per_partition_limit.desc:
|
||||||
"""Number of bytes allowed to buffer for each Azure Event Hub partition. When this limit is exceeded, old messages will be dropped in a trade for credits for new messages to be buffered."""
|
"""Number of bytes allowed to buffer for each Azure Event Hubs partition. When this limit is exceeded, old messages will be dropped in a trade for credits for new messages to be buffered."""
|
||||||
|
|
||||||
buffer_per_partition_limit.label:
|
buffer_per_partition_limit.label:
|
||||||
"""Per-partition Buffer Limit"""
|
"""Per-partition Buffer Limit"""
|
||||||
|
|
||||||
bootstrap_hosts.desc:
|
bootstrap_hosts.desc:
|
||||||
"""A comma separated list of Azure Event Hub Kafka <code>host[:port]</code> namespace endpoints to bootstrap the client. Default port number is 9093."""
|
"""A comma separated list of Azure Event Hubs Kafka <code>host[:port]</code> namespace endpoints to bootstrap the client. Default port number is 9093."""
|
||||||
|
|
||||||
bootstrap_hosts.label:
|
bootstrap_hosts.label:
|
||||||
"""Bootstrap Hosts"""
|
"""Bootstrap Hosts"""
|
||||||
|
|
||||||
kafka_message_key.desc:
|
kafka_message_key.desc:
|
||||||
"""Template to render Azure Event Hub message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Azure Event Hub's <code>NULL</code> (but not empty string) is used."""
|
"""Template to render Azure Event Hubs message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Azure Event Hubs's <code>NULL</code> (but not empty string) is used."""
|
||||||
|
|
||||||
kafka_message_key.label:
|
kafka_message_key.label:
|
||||||
"""Message Key"""
|
"""Message Key"""
|
||||||
|
|
||||||
kafka_message.desc:
|
kafka_message.desc:
|
||||||
"""Template to render an Azure Event Hub message."""
|
"""Template to render an Azure Event Hubs message."""
|
||||||
|
|
||||||
kafka_message.label:
|
kafka_message.label:
|
||||||
"""Azure Event Hub Message Template"""
|
"""Azure Event Hubs Message Template"""
|
||||||
|
|
||||||
mqtt_topic.desc:
|
mqtt_topic.desc:
|
||||||
"""MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Azure Event Hub."""
|
"""MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Azure Event Hubs."""
|
||||||
|
|
||||||
mqtt_topic.label:
|
mqtt_topic.label:
|
||||||
"""Source MQTT Topic"""
|
"""Source MQTT Topic"""
|
||||||
|
|
||||||
kafka_message_value.desc:
|
kafka_message_value.desc:
|
||||||
"""Template to render Azure Event Hub message value. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Azure Event Hub's <code>NULL</code> (but not empty string) is used."""
|
"""Template to render Azure Event Hubs message value. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Azure Event Hubs' <code>NULL</code> (but not empty string) is used."""
|
||||||
|
|
||||||
kafka_message_value.label:
|
kafka_message_value.label:
|
||||||
"""Message Value"""
|
"""Message Value"""
|
||||||
|
|
||||||
partition_strategy.desc:
|
partition_strategy.desc:
|
||||||
"""Partition strategy is to tell the producer how to dispatch messages to Azure Event Hub partitions.
|
"""Partition strategy is to tell the producer how to dispatch messages to Azure Event Hubs partitions.
|
||||||
|
|
||||||
<code>random</code>: Randomly pick a partition for each message
|
<code>random</code>: Randomly pick a partition for each message
|
||||||
<code>key_dispatch</code>: Hash Azure Event Hub message key to a partition number"""
|
<code>key_dispatch</code>: Hash Azure Event Hubs message key to a partition number"""
|
||||||
|
|
||||||
partition_strategy.label:
|
partition_strategy.label:
|
||||||
"""Partition Strategy"""
|
"""Partition Strategy"""
|
||||||
|
@ -278,7 +278,7 @@ buffer_segment_bytes.label:
|
||||||
"""Segment File Bytes"""
|
"""Segment File Bytes"""
|
||||||
|
|
||||||
max_inflight.desc:
|
max_inflight.desc:
|
||||||
"""Maximum number of batches allowed for Azure Event Hub producer (per-partition) to send before receiving acknowledgement from Azure Event Hub. Greater value typically means better throughput. However, there can be a risk of message reordering when this value is greater than 1."""
|
"""Maximum number of batches allowed for Azure Event Hubs producer (per-partition) to send before receiving acknowledgement from Azure Event Hubs. Greater value typically means better throughput. However, there can be a risk of message reordering when this value is greater than 1."""
|
||||||
|
|
||||||
max_inflight.label:
|
max_inflight.label:
|
||||||
"""Max Inflight"""
|
"""Max Inflight"""
|
||||||
|
@ -308,25 +308,25 @@ auth_username_password.label:
|
||||||
"""Username/password Auth"""
|
"""Username/password Auth"""
|
||||||
|
|
||||||
auth_sasl_password.desc:
|
auth_sasl_password.desc:
|
||||||
"""The Connection String for connecting to Azure Event Hub. Should be the "connection string-primary key" of a Namespace shared access policy."""
|
"""The Connection String for connecting to Azure Event Hubs. Should be the "connection string-primary key" of a Namespace shared access policy."""
|
||||||
|
|
||||||
auth_sasl_password.label:
|
auth_sasl_password.label:
|
||||||
"""Connection String"""
|
"""Connection String"""
|
||||||
|
|
||||||
producer_kafka_opts.desc:
|
producer_kafka_opts.desc:
|
||||||
"""Azure Event Hub producer configs."""
|
"""Azure Event Hubs producer configs."""
|
||||||
|
|
||||||
producer_kafka_opts.label:
|
producer_kafka_opts.label:
|
||||||
"""Azure Event Hub Producer"""
|
"""Azure Event Hubs Producer"""
|
||||||
|
|
||||||
desc_config.desc:
|
desc_config.desc:
|
||||||
"""Configuration for an Azure Event Hub bridge."""
|
"""Configuration for an Azure Event Hubs bridge."""
|
||||||
|
|
||||||
desc_config.label:
|
desc_config.label:
|
||||||
"""Azure Event Hub Bridge Configuration"""
|
"""Azure Event Hubs Bridge Configuration"""
|
||||||
|
|
||||||
ssl_client_opts.desc:
|
ssl_client_opts.desc:
|
||||||
"""TLS/SSL options for Azure Event Hub client."""
|
"""TLS/SSL options for Azure Event Hubs client."""
|
||||||
ssl_client_opts.label:
|
ssl_client_opts.label:
|
||||||
"""TLS/SSL options"""
|
"""TLS/SSL options"""
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,8 @@ desc_connectors.label:
|
||||||
"""Connectors"""
|
"""Connectors"""
|
||||||
|
|
||||||
connector_field.desc:
|
connector_field.desc:
|
||||||
"""Name of connector used to connect to the resource where the action is to be performed."""
|
"""Name of the connector specified by the action, used for external resource selection."""
|
||||||
|
|
||||||
connector_field.label:
|
connector_field.label:
|
||||||
"""Connector"""
|
"""Connector"""
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ This is used to limit the connection rate for this node.
|
||||||
Once the limit is reached, new connections will be deferred or refused.<br/>
|
Once the limit is reached, new connections will be deferred or refused.<br/>
|
||||||
For example:<br/>
|
For example:<br/>
|
||||||
- <code>1000/s</code> :: Only accepts 1000 connections per second<br/>
|
- <code>1000/s</code> :: Only accepts 1000 connections per second<br/>
|
||||||
- <code>1000/10s</code> :: Only accepts 1000 connections every 10 seconds"""
|
- <code>1000/10s</code> :: Only accepts 1000 connections every 10 seconds."""
|
||||||
max_conn_rate.label:
|
max_conn_rate.label:
|
||||||
"""Maximum Connection Rate"""
|
"""Maximum Connection Rate"""
|
||||||
|
|
||||||
|
|
|
@ -12,13 +12,6 @@ batch_time.desc:
|
||||||
batch_time.label:
|
batch_time.label:
|
||||||
"""Max batch wait time"""
|
"""Max batch wait time"""
|
||||||
|
|
||||||
buffer_mode.desc:
|
|
||||||
"""Buffer operation mode.
|
|
||||||
<code>memory_only</mode>: Buffer all messages in memory.<code>volatile_offload</code>: Buffer message in memory first, when up to certain limit (see <code>buffer_seg_bytes</code> config for more information), then start offloading messages to disk"""
|
|
||||||
|
|
||||||
buffer_mode.label:
|
|
||||||
"""Buffer Mode"""
|
|
||||||
|
|
||||||
buffer_seg_bytes.desc:
|
buffer_seg_bytes.desc:
|
||||||
"""Applicable when buffer mode is set to <code>volatile_offload</code>.
|
"""Applicable when buffer mode is set to <code>volatile_offload</code>.
|
||||||
This value is to specify the size of each on-disk buffer file."""
|
This value is to specify the size of each on-disk buffer file."""
|
||||||
|
|
|
@ -573,7 +573,7 @@ fields_tcp_opts_buffer.label:
|
||||||
"""TCP user-space buffer"""
|
"""TCP user-space buffer"""
|
||||||
|
|
||||||
server_ssl_opts_schema_honor_cipher_order.desc:
|
server_ssl_opts_schema_honor_cipher_order.desc:
|
||||||
"""An important security setting, it forces the cipher to be set based
|
"""An important security setting. It forces the cipher to be set based
|
||||||
on the server-specified order instead of the client-specified order,
|
on the server-specified order instead of the client-specified order,
|
||||||
hence enforcing the (usually more properly configured) security
|
hence enforcing the (usually more properly configured) security
|
||||||
ordering of the server administrator."""
|
ordering of the server administrator."""
|
||||||
|
@ -1012,13 +1012,13 @@ fields_ws_opts_supported_subprotocols.label:
|
||||||
|
|
||||||
broker_shared_subscription_strategy.desc:
|
broker_shared_subscription_strategy.desc:
|
||||||
"""Dispatch strategy for shared subscription.
|
"""Dispatch strategy for shared subscription.
|
||||||
- `random`: dispatch the message to a random selected subscriber
|
- `random`: Randomly select a subscriber for dispatch;
|
||||||
- `round_robin`: select the subscribers in a round-robin manner
|
- `round_robin`: Messages from a single publisher are dispatched to subscribers in turn;
|
||||||
- `round_robin_per_group`: select the subscribers in round-robin fashion within each shared subscriber group
|
- `round_robin_per_group`: All messages are dispatched to subscribers in turn;
|
||||||
- `local`: select random local subscriber otherwise select random cluster-wide
|
- `local`: Randomly select a subscriber on the current node, if there are no subscribers on the current node, then randomly select within the cluster;
|
||||||
- `sticky`: always use the last selected subscriber to dispatch, until the subscriber disconnects.
|
- `sticky`: Continuously dispatch messages to the initially selected subscriber until their session ends;
|
||||||
- `hash_clientid`: select the subscribers by hashing the `clientIds`
|
- `hash_clientid`: Hash the publisher's client ID to select a subscriber;
|
||||||
- `hash_topic`: select the subscribers by hashing the source topic"""
|
- `hash_topic`: Hash the publishing topic to select a subscriber."""
|
||||||
|
|
||||||
fields_deflate_opts_mem_level.desc:
|
fields_deflate_opts_mem_level.desc:
|
||||||
"""Specifies the size of the compression state.<br/>
|
"""Specifies the size of the compression state.<br/>
|
||||||
|
@ -1386,7 +1386,7 @@ However it's no longer useful because the shared-subscrption messages in a expir
|
||||||
base_listener_enable_authn.desc:
|
base_listener_enable_authn.desc:
|
||||||
"""Set <code>true</code> (default) to enable client authentication on this listener, the authentication
|
"""Set <code>true</code> (default) to enable client authentication on this listener, the authentication
|
||||||
process goes through the configured authentication chain.
|
process goes through the configured authentication chain.
|
||||||
When set to <code>false</code> to allow any clients with or without authentication information such as username or password to log in.
|
When set to <code>false</code>, any client (with or without username/password) is allowed to connect.
|
||||||
When set to <code>quick_deny_anonymous</code>, it behaves like when set to <code>true</code>, but clients will be
|
When set to <code>quick_deny_anonymous</code>, it behaves like when set to <code>true</code>, but clients will be
|
||||||
denied immediately without going through any authenticators if <code>username</code> is not provided. This is useful to fence off
|
denied immediately without going through any authenticators if <code>username</code> is not provided. This is useful to fence off
|
||||||
anonymous clients early."""
|
anonymous clients early."""
|
||||||
|
@ -1577,4 +1577,16 @@ session_ds_session_gc_interval.desc:
|
||||||
session_ds_session_gc_batch_size.desc:
|
session_ds_session_gc_batch_size.desc:
|
||||||
"""The size of each batch of expired persistent sessions to be garbage collected per iteration."""
|
"""The size of each batch of expired persistent sessions to be garbage collected per iteration."""
|
||||||
|
|
||||||
|
session_ds_max_batch_size.desc:
|
||||||
|
"""This value affects the flow control for the persistent sessions.
|
||||||
|
The session queries the DB for the new messages in batches.
|
||||||
|
Size of the batch doesn't exceed this value or `ReceiveMaximum`, whichever is smaller."""
|
||||||
|
|
||||||
|
session_ds_min_batch_size.desc:
|
||||||
|
"""This value affects the flow control for the persistent sessions.
|
||||||
|
The session will query the DB for the new messages when the value of `FreeSpace` variable is larger than this value or `ReceiveMaximum` / 2, whichever is smaller.
|
||||||
|
|
||||||
|
`FreeSpace` is calculated as `ReceiveMaximum` for the session - number of inflight messages."""
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue