Merge remote-tracking branch 'origin/master' into sync-m-r54-20231205
This commit is contained in:
commit
deb3fcd606
|
@ -29,7 +29,7 @@
|
|||
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
|
||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.8"}}},
|
||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.16"}}},
|
||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.2"}}},
|
||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.0"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.40.0"}}},
|
||||
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
|
||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||
|
|
|
@ -1211,7 +1211,7 @@ handle_info(
|
|||
) when
|
||||
ConnState =:= connected orelse ConnState =:= reauthenticating
|
||||
->
|
||||
{Intent, Session1} = emqx_session:disconnect(ClientInfo, ConnInfo, Session),
|
||||
{Intent, Session1} = session_disconnect(ClientInfo, ConnInfo, Session),
|
||||
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(Channel)),
|
||||
Channel2 = Channel1#channel{session = Session1},
|
||||
case maybe_shutdown(Reason, Intent, Channel2) of
|
||||
|
@ -2191,6 +2191,11 @@ ensure_disconnected(
|
|||
emqx_cm:mark_channel_disconnected(ChanPid),
|
||||
Channel#channel{conninfo = NConnInfo, conn_state = disconnected}.
|
||||
|
||||
session_disconnect(ClientInfo, ConnInfo, Session) when Session /= undefined ->
|
||||
emqx_session:disconnect(ClientInfo, ConnInfo, Session);
|
||||
session_disconnect(_ClientInfo, _ConnInfo, undefined) ->
|
||||
{shutdown, undefined}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Maybe Publish will msg
|
||||
|
||||
|
|
|
@ -169,7 +169,8 @@ commit_offset(
|
|||
-spec poll(reply_fun(), emqx_persistent_session_ds:id(), inflight(), pos_integer()) ->
|
||||
{emqx_session:replies(), inflight()}.
|
||||
poll(ReplyFun, SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < ?EPOCH_SIZE ->
|
||||
FetchThreshold = max(1, WindowSize div 2),
|
||||
MinBatchSize = emqx_config:get([session_persistence, min_batch_size]),
|
||||
FetchThreshold = min(MinBatchSize, ceil(WindowSize / 2)),
|
||||
FreeSpace = WindowSize - n_inflight(Inflight0),
|
||||
case FreeSpace >= FetchThreshold of
|
||||
false ->
|
||||
|
|
|
@ -96,6 +96,12 @@
|
|||
props := map(),
|
||||
extra := map()
|
||||
}.
|
||||
|
||||
-define(TIMER_PULL, timer_pull).
|
||||
-define(TIMER_GET_STREAMS, timer_get_streams).
|
||||
-define(TIMER_BUMP_LAST_ALIVE_AT, timer_bump_last_alive_at).
|
||||
-type timer() :: ?TIMER_PULL | ?TIMER_GET_STREAMS | ?TIMER_BUMP_LAST_ALIVE_AT.
|
||||
|
||||
-type session() :: #{
|
||||
%% Client ID
|
||||
id := id(),
|
||||
|
@ -111,6 +117,8 @@
|
|||
receive_maximum := pos_integer(),
|
||||
%% Connection Info
|
||||
conninfo := emqx_types:conninfo(),
|
||||
%% Timers
|
||||
timer() => reference(),
|
||||
%%
|
||||
props := map()
|
||||
}.
|
||||
|
@ -120,7 +128,6 @@
|
|||
-type clientinfo() :: emqx_types:clientinfo().
|
||||
-type conninfo() :: emqx_session:conninfo().
|
||||
-type replies() :: emqx_session:replies().
|
||||
-type timer() :: pull | get_streams | bump_last_alive_at.
|
||||
|
||||
-define(STATS_KEYS, [
|
||||
subscriptions_cnt,
|
||||
|
@ -144,8 +151,7 @@
|
|||
session().
|
||||
create(#{clientid := ClientID}, ConnInfo, Conf) ->
|
||||
% TODO: expiration
|
||||
ensure_timers(),
|
||||
ensure_session(ClientID, ConnInfo, Conf).
|
||||
ensure_timers(ensure_session(ClientID, ConnInfo, Conf)).
|
||||
|
||||
-spec open(clientinfo(), conninfo()) ->
|
||||
{_IsPresent :: true, session(), []} | false.
|
||||
|
@ -159,10 +165,9 @@ open(#{clientid := ClientID} = _ClientInfo, ConnInfo) ->
|
|||
ok = emqx_cm:discard_session(ClientID),
|
||||
case session_open(ClientID, ConnInfo) of
|
||||
Session0 = #{} ->
|
||||
ensure_timers(),
|
||||
ReceiveMaximum = receive_maximum(ConnInfo),
|
||||
Session = Session0#{receive_maximum => ReceiveMaximum},
|
||||
{true, Session, []};
|
||||
{true, ensure_timers(Session), []};
|
||||
false ->
|
||||
false
|
||||
end.
|
||||
|
@ -333,9 +338,9 @@ publish(_PacketId, Msg, Session) ->
|
|||
puback(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) ->
|
||||
case emqx_persistent_message_ds_replayer:commit_offset(Id, ack, PacketId, Inflight0) of
|
||||
{true, Inflight} ->
|
||||
%% TODO
|
||||
%% TODO: we pass a bogus message into the hook:
|
||||
Msg = emqx_message:make(Id, <<>>, <<>>),
|
||||
{ok, Msg, [], Session#{inflight => Inflight}};
|
||||
{ok, Msg, [], pull_now(Session#{inflight => Inflight})};
|
||||
{false, _} ->
|
||||
%% Invalid Packet Id
|
||||
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND}
|
||||
|
@ -351,9 +356,9 @@ puback(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) ->
|
|||
pubrec(PacketId, Session = #{id := Id, inflight := Inflight0}) ->
|
||||
case emqx_persistent_message_ds_replayer:commit_offset(Id, rec, PacketId, Inflight0) of
|
||||
{true, Inflight} ->
|
||||
%% TODO
|
||||
%% TODO: we pass a bogus message into the hook:
|
||||
Msg = emqx_message:make(Id, <<>>, <<>>),
|
||||
{ok, Msg, Session#{inflight => Inflight}};
|
||||
{ok, Msg, pull_now(Session#{inflight => Inflight})};
|
||||
{false, _} ->
|
||||
%% Invalid Packet Id
|
||||
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND}
|
||||
|
@ -399,9 +404,11 @@ deliver(_ClientInfo, _Delivers, Session) ->
|
|||
{ok, replies(), session()} | {ok, replies(), timeout(), session()}.
|
||||
handle_timeout(
|
||||
_ClientInfo,
|
||||
pull,
|
||||
Session = #{id := Id, inflight := Inflight0, receive_maximum := ReceiveMaximum}
|
||||
?TIMER_PULL,
|
||||
Session0 = #{id := Id, inflight := Inflight0, receive_maximum := ReceiveMaximum}
|
||||
) ->
|
||||
MaxBatchSize = emqx_config:get([session_persistence, max_batch_size]),
|
||||
BatchSize = min(ReceiveMaximum, MaxBatchSize),
|
||||
{Publishes, Inflight} = emqx_persistent_message_ds_replayer:poll(
|
||||
fun
|
||||
(_Seqno, Message = #message{qos = ?QOS_0}) ->
|
||||
|
@ -412,7 +419,7 @@ handle_timeout(
|
|||
end,
|
||||
Id,
|
||||
Inflight0,
|
||||
ReceiveMaximum
|
||||
BatchSize
|
||||
),
|
||||
IdlePollInterval = emqx_config:get([session_persistence, idle_poll_interval]),
|
||||
Timeout =
|
||||
|
@ -422,13 +429,12 @@ handle_timeout(
|
|||
[_ | _] ->
|
||||
0
|
||||
end,
|
||||
ensure_timer(pull, Timeout),
|
||||
{ok, Publishes, Session#{inflight := Inflight}};
|
||||
handle_timeout(_ClientInfo, get_streams, Session) ->
|
||||
Session = emqx_session:ensure_timer(?TIMER_PULL, Timeout, Session0#{inflight := Inflight}),
|
||||
{ok, Publishes, Session};
|
||||
handle_timeout(_ClientInfo, ?TIMER_GET_STREAMS, Session) ->
|
||||
renew_streams(Session),
|
||||
ensure_timer(get_streams),
|
||||
{ok, [], Session};
|
||||
handle_timeout(_ClientInfo, bump_last_alive_at, Session0) ->
|
||||
{ok, [], emqx_session:ensure_timer(?TIMER_GET_STREAMS, 100, Session)};
|
||||
handle_timeout(_ClientInfo, ?TIMER_BUMP_LAST_ALIVE_AT, Session0) ->
|
||||
%% Note: we take a pessimistic approach here and assume that the client will be alive
|
||||
%% until the next bump timeout. With this, we avoid garbage collecting this session
|
||||
%% too early in case the session/connection/node crashes earlier without having time
|
||||
|
@ -436,8 +442,8 @@ handle_timeout(_ClientInfo, bump_last_alive_at, Session0) ->
|
|||
BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]),
|
||||
EstimatedLastAliveAt = now_ms() + BumpInterval,
|
||||
Session = session_set_last_alive_at_trans(Session0, EstimatedLastAliveAt),
|
||||
ensure_timer(bump_last_alive_at),
|
||||
{ok, [], Session}.
|
||||
BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]),
|
||||
{ok, [], emqx_session:ensure_timer(?TIMER_BUMP_LAST_ALIVE_AT, BumpInterval, Session)}.
|
||||
|
||||
-spec replay(clientinfo(), [], session()) ->
|
||||
{ok, replies(), session()}.
|
||||
|
@ -957,22 +963,15 @@ export_record(_, _, [], Acc) ->
|
|||
|
||||
%% TODO: find a more reliable way to perform actions that have side
|
||||
%% effects. Add `CBM:init' callback to the session behavior?
|
||||
ensure_timers() ->
|
||||
ensure_timer(pull),
|
||||
ensure_timer(get_streams),
|
||||
ensure_timer(bump_last_alive_at).
|
||||
-spec ensure_timers(session()) -> session().
|
||||
ensure_timers(Session0) ->
|
||||
Session1 = emqx_session:ensure_timer(?TIMER_PULL, 100, Session0),
|
||||
Session2 = emqx_session:ensure_timer(?TIMER_GET_STREAMS, 100, Session1),
|
||||
emqx_session:ensure_timer(?TIMER_BUMP_LAST_ALIVE_AT, 100, Session2).
|
||||
|
||||
-spec ensure_timer(timer()) -> ok.
|
||||
ensure_timer(bump_last_alive_at = Type) ->
|
||||
BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]),
|
||||
ensure_timer(Type, BumpInterval);
|
||||
ensure_timer(Type) ->
|
||||
ensure_timer(Type, 100).
|
||||
|
||||
-spec ensure_timer(timer(), non_neg_integer()) -> ok.
|
||||
ensure_timer(Type, Timeout) ->
|
||||
_ = emqx_utils:start_timer(Timeout, {emqx_session, Type}),
|
||||
ok.
|
||||
-spec pull_now(session()) -> session().
|
||||
pull_now(Session) ->
|
||||
emqx_session:reset_timer(?TIMER_PULL, 0, Session).
|
||||
|
||||
-spec receive_maximum(conninfo()) -> pos_integer().
|
||||
receive_maximum(ConnInfo) ->
|
||||
|
|
|
@ -1773,6 +1773,22 @@ fields("session_persistence") ->
|
|||
}
|
||||
}
|
||||
)},
|
||||
{"max_batch_size",
|
||||
sc(
|
||||
pos_integer(),
|
||||
#{
|
||||
default => 1000,
|
||||
desc => ?DESC(session_ds_max_batch_size)
|
||||
}
|
||||
)},
|
||||
{"min_batch_size",
|
||||
sc(
|
||||
pos_integer(),
|
||||
#{
|
||||
default => 100,
|
||||
desc => ?DESC(session_ds_min_batch_size)
|
||||
}
|
||||
)},
|
||||
{"idle_poll_interval",
|
||||
sc(
|
||||
timeout_duration(),
|
||||
|
|
|
@ -111,8 +111,7 @@
|
|||
reply/0,
|
||||
replies/0,
|
||||
common_timer_name/0,
|
||||
custom_timer_name/0,
|
||||
timerset/0
|
||||
custom_timer_name/0
|
||||
]).
|
||||
|
||||
-type session_id() :: _TODO.
|
||||
|
@ -154,8 +153,6 @@
|
|||
emqx_session_mem:session()
|
||||
| emqx_persistent_session_ds:session().
|
||||
|
||||
-type timerset() :: #{custom_timer_name() => _TimerRef :: reference()}.
|
||||
|
||||
-define(INFO_KEYS, [
|
||||
id,
|
||||
created_at,
|
||||
|
@ -477,28 +474,26 @@ handle_timeout(ClientInfo, Timer, Session) ->
|
|||
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec ensure_timer(custom_timer_name(), timeout(), timerset()) ->
|
||||
timerset().
|
||||
ensure_timer(Name, _Time, Timers = #{}) when is_map_key(Name, Timers) ->
|
||||
Timers;
|
||||
ensure_timer(Name, Time, Timers = #{}) when Time > 0 ->
|
||||
-spec ensure_timer(custom_timer_name(), timeout(), map()) ->
|
||||
map().
|
||||
ensure_timer(Name, Time, Timers = #{}) when Time >= 0 ->
|
||||
TRef = emqx_utils:start_timer(Time, {?MODULE, Name}),
|
||||
Timers#{Name => TRef}.
|
||||
|
||||
-spec reset_timer(custom_timer_name(), timeout(), timerset()) ->
|
||||
timerset().
|
||||
reset_timer(Name, Time, Channel) ->
|
||||
ensure_timer(Name, Time, cancel_timer(Name, Channel)).
|
||||
-spec reset_timer(custom_timer_name(), timeout(), map()) ->
|
||||
map().
|
||||
reset_timer(Name, Time, Timers) ->
|
||||
ensure_timer(Name, Time, cancel_timer(Name, Timers)).
|
||||
|
||||
-spec cancel_timer(custom_timer_name(), timerset()) ->
|
||||
timerset().
|
||||
cancel_timer(Name, Timers) ->
|
||||
case maps:take(Name, Timers) of
|
||||
{TRef, NTimers} ->
|
||||
-spec cancel_timer(custom_timer_name(), map()) ->
|
||||
map().
|
||||
cancel_timer(Name, Timers0) ->
|
||||
case maps:take(Name, Timers0) of
|
||||
{TRef, Timers} ->
|
||||
ok = emqx_utils:cancel_timer(TRef),
|
||||
NTimers;
|
||||
Timers;
|
||||
error ->
|
||||
Timers
|
||||
Timers0
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -611,8 +606,8 @@ maybe_mock_impl_mod({Mock, _State}) when is_atom(Mock) ->
|
|||
Mock.
|
||||
-else.
|
||||
-spec maybe_mock_impl_mod(_Session) -> no_return().
|
||||
maybe_mock_impl_mod(_) ->
|
||||
error(noimpl).
|
||||
maybe_mock_impl_mod(Session) ->
|
||||
error(noimpl, [Session]).
|
||||
-endif.
|
||||
|
||||
-spec choose_impl_mod(conninfo()) -> module().
|
||||
|
|
|
@ -91,13 +91,11 @@ match([H | T1], [H | T2]) ->
|
|||
match(T1, T2);
|
||||
match([_H | T1], ['+' | T2]) ->
|
||||
match(T1, T2);
|
||||
match([<<>> | T1], ['' | T2]) ->
|
||||
match(T1, T2);
|
||||
match(_, ['#']) ->
|
||||
true;
|
||||
match([_H1 | _], [_H2 | _]) ->
|
||||
false;
|
||||
match([_H1 | _], []) ->
|
||||
false;
|
||||
match([], [_H | _T2]) ->
|
||||
match(_, _) ->
|
||||
false.
|
||||
|
||||
-spec match_share(Name, Filter) -> boolean() when
|
||||
|
|
|
@ -70,8 +70,8 @@
|
|||
emqx_cluster/2,
|
||||
start_ekka/0,
|
||||
start_epmd/0,
|
||||
start_slave/2,
|
||||
stop_slave/1,
|
||||
start_peer/2,
|
||||
stop_peer/1,
|
||||
listener_port/2
|
||||
]).
|
||||
|
||||
|
@ -734,13 +734,11 @@ emqx_cluster(Specs0, CommonOpts) ->
|
|||
|
||||
%% Lower level starting API
|
||||
|
||||
-spec start_slave(shortname(), node_opts()) -> nodename().
|
||||
start_slave(Name, Opts) when is_list(Opts) ->
|
||||
start_slave(Name, maps:from_list(Opts));
|
||||
start_slave(Name, Opts) when is_map(Opts) ->
|
||||
SlaveMod = maps:get(peer_mod, Opts, ct_slave),
|
||||
-spec start_peer(shortname(), node_opts()) -> nodename().
|
||||
start_peer(Name, Opts) when is_list(Opts) ->
|
||||
start_peer(Name, maps:from_list(Opts));
|
||||
start_peer(Name, Opts) when is_map(Opts) ->
|
||||
Node = node_name(Name),
|
||||
put_peer_mod(Node, SlaveMod),
|
||||
Cookie = atom_to_list(erlang:get_cookie()),
|
||||
PrivDataDir = maps:get(priv_data_dir, Opts, "/tmp"),
|
||||
NodeDataDir = filename:join([
|
||||
|
@ -750,19 +748,13 @@ start_slave(Name, Opts) when is_map(Opts) ->
|
|||
]),
|
||||
DoStart =
|
||||
fun() ->
|
||||
case SlaveMod of
|
||||
ct_slave ->
|
||||
ct:pal("~p: node data dir: ~s", [Node, NodeDataDir]),
|
||||
Envs = [
|
||||
{"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"},
|
||||
{"EMQX_NODE__COOKIE", Cookie},
|
||||
{"EMQX_NODE__DATA_DIR", NodeDataDir}
|
||||
],
|
||||
emqx_cth_peer:start(Node, erl_flags(), Envs);
|
||||
slave ->
|
||||
Envs = [{"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"}],
|
||||
emqx_cth_peer:start(Node, ebin_path(), Envs)
|
||||
end
|
||||
emqx_cth_peer:start(Node, erl_flags(), Envs)
|
||||
end,
|
||||
case DoStart() of
|
||||
{ok, _} ->
|
||||
|
@ -778,7 +770,7 @@ start_slave(Name, Opts) when is_map(Opts) ->
|
|||
Node.
|
||||
|
||||
%% Node stopping
|
||||
stop_slave(Node0) ->
|
||||
stop_peer(Node0) ->
|
||||
Node = node_name(Node0),
|
||||
emqx_cth_peer:stop(Node).
|
||||
|
||||
|
@ -939,7 +931,7 @@ setup_node(Node, Opts) when is_map(Opts) ->
|
|||
ignore ->
|
||||
ok;
|
||||
Err ->
|
||||
stop_slave(Node),
|
||||
stop_peer(Node),
|
||||
error({failed_to_join_cluster, #{node => Node, error => Err}})
|
||||
end
|
||||
end,
|
||||
|
@ -956,19 +948,6 @@ set_env_once(Var, Value) ->
|
|||
end,
|
||||
ok.
|
||||
|
||||
put_peer_mod(Node, SlaveMod) ->
|
||||
put({?MODULE, Node}, SlaveMod),
|
||||
ok.
|
||||
|
||||
get_peer_mod(Node) ->
|
||||
case get({?MODULE, Node}) of
|
||||
undefined -> ct_slave;
|
||||
SlaveMod -> SlaveMod
|
||||
end.
|
||||
|
||||
erase_peer_mod(Node) ->
|
||||
erase({?MODULE, Node}).
|
||||
|
||||
node_name(Name) ->
|
||||
case string:tokens(atom_to_list(Name), "@") of
|
||||
[_Name, _Host] ->
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
-define(TIMEOUT_NODE_START_MS, 15000).
|
||||
-define(TIMEOUT_APPS_START_MS, 30000).
|
||||
-define(TIMEOUT_NODE_STOP_S, 15).
|
||||
-define(TIMEOUT_CLUSTER_WAIT_MS, timer:seconds(10)).
|
||||
|
||||
%%
|
||||
|
||||
|
@ -91,11 +92,7 @@
|
|||
%% Working directory
|
||||
%% If this directory is not empty, starting up the node applications will fail
|
||||
%% Default: "${ClusterOpts.work_dir}/${nodename}"
|
||||
work_dir => file:name(),
|
||||
|
||||
% Tooling to manage nodes
|
||||
% Default: `ct_slave`.
|
||||
driver => ct_slave | slave
|
||||
work_dir => file:name()
|
||||
}}.
|
||||
|
||||
-spec start([nodespec()], ClusterOpts) ->
|
||||
|
@ -118,11 +115,52 @@ start(NodeSpecs) ->
|
|||
% 2. Start applications needed to enable clustering
|
||||
% Generally, this causes some applications to restart, but we deliberately don't
|
||||
% start them yet.
|
||||
_ = lists:foreach(fun run_node_phase_cluster/1, NodeSpecs),
|
||||
ShouldAppearInRunningNodes = lists:map(fun run_node_phase_cluster/1, NodeSpecs),
|
||||
IsClustered = lists:member(true, ShouldAppearInRunningNodes),
|
||||
% 3. Start applications after cluster is formed
|
||||
% Cluster-joins are complete, so they shouldn't restart in the background anymore.
|
||||
_ = emqx_utils:pmap(fun run_node_phase_apps/1, NodeSpecs, ?TIMEOUT_APPS_START_MS),
|
||||
[Node || #{name := Node} <- NodeSpecs].
|
||||
Nodes = [Node || #{name := Node} <- NodeSpecs],
|
||||
%% 4. Wait for the nodes to cluster
|
||||
case IsClustered of
|
||||
true ->
|
||||
ok = wait_clustered(Nodes, ?TIMEOUT_CLUSTER_WAIT_MS);
|
||||
false ->
|
||||
ok
|
||||
end,
|
||||
Nodes.
|
||||
|
||||
%% Wait until all nodes see all nodes as mria running nodes
|
||||
wait_clustered(Nodes, Timeout) ->
|
||||
Check = fun(Node) ->
|
||||
Running = erpc:call(Node, mria, running_nodes, []),
|
||||
case Nodes -- Running of
|
||||
[] ->
|
||||
true;
|
||||
NotRunning ->
|
||||
{false, NotRunning}
|
||||
end
|
||||
end,
|
||||
wait_clustered(Nodes, Check, deadline(Timeout)).
|
||||
|
||||
wait_clustered([], _Check, _Deadline) ->
|
||||
ok;
|
||||
wait_clustered([Node | Nodes] = All, Check, Deadline) ->
|
||||
IsOverdue = is_overdue(Deadline),
|
||||
case Check(Node) of
|
||||
true ->
|
||||
wait_clustered(Nodes, Check, Deadline);
|
||||
{false, NodesNotRunnging} when IsOverdue ->
|
||||
error(
|
||||
{timeout, #{
|
||||
checking_from_node => Node,
|
||||
nodes_not_running => NodesNotRunnging
|
||||
}}
|
||||
);
|
||||
{false, Nodes} ->
|
||||
timer:sleep(100),
|
||||
wait_clustered(All, Check, Deadline)
|
||||
end.
|
||||
|
||||
restart(Node, Spec) ->
|
||||
ct:pal("Stopping peer node ~p", [Node]),
|
||||
|
@ -162,8 +200,7 @@ mk_init_nodespec(N, Name, NodeOpts, ClusterOpts) ->
|
|||
role => core,
|
||||
apps => [],
|
||||
base_port => BasePort,
|
||||
work_dir => filename:join([WorkDir, Node]),
|
||||
driver => ct_slave
|
||||
work_dir => filename:join([WorkDir, Node])
|
||||
},
|
||||
maps:merge(Defaults, NodeOpts).
|
||||
|
||||
|
@ -309,15 +346,21 @@ start_bare_nodes(Names, Timeout) ->
|
|||
end,
|
||||
Names
|
||||
),
|
||||
Deadline = erlang:monotonic_time() + erlang:convert_time_unit(Timeout, millisecond, nanosecond),
|
||||
Deadline = deadline(Timeout),
|
||||
Nodes = wait_boot_complete(Waits, Deadline),
|
||||
lists:foreach(fun(Node) -> pong = net_adm:ping(Node) end, Nodes),
|
||||
Nodes.
|
||||
|
||||
deadline(Timeout) ->
|
||||
erlang:monotonic_time() + erlang:convert_time_unit(Timeout, millisecond, nanosecond).
|
||||
|
||||
is_overdue(Deadline) ->
|
||||
erlang:monotonic_time() > Deadline.
|
||||
|
||||
wait_boot_complete([], _) ->
|
||||
[];
|
||||
wait_boot_complete(Waits, Deadline) ->
|
||||
case erlang:monotonic_time() > Deadline of
|
||||
case is_overdue(Deadline) of
|
||||
true ->
|
||||
error({timeout, Waits});
|
||||
false ->
|
||||
|
@ -340,11 +383,11 @@ node_init(Node) ->
|
|||
ok = snabbkaffe:forward_trace(Node),
|
||||
ok.
|
||||
|
||||
%% Returns 'true' if this node should appear in running nodes list.
|
||||
run_node_phase_cluster(Spec = #{name := Node}) ->
|
||||
ok = load_apps(Node, Spec),
|
||||
ok = start_apps_clustering(Node, Spec),
|
||||
ok = maybe_join_cluster(Node, Spec),
|
||||
ok.
|
||||
maybe_join_cluster(Node, Spec).
|
||||
|
||||
run_node_phase_apps(Spec = #{name := Node}) ->
|
||||
ok = start_apps(Node, Spec),
|
||||
|
@ -368,18 +411,20 @@ start_apps(Node, #{apps := Apps} = Spec) ->
|
|||
suite_opts(Spec) ->
|
||||
maps:with([work_dir, boot_type], Spec).
|
||||
|
||||
%% Returns 'true' if this node should appear in the cluster.
|
||||
maybe_join_cluster(_Node, #{boot_type := restart}) ->
|
||||
%% when restart, the node should already be in the cluster
|
||||
%% hence no need to (re)join
|
||||
ok;
|
||||
true;
|
||||
maybe_join_cluster(_Node, #{role := replicant}) ->
|
||||
ok;
|
||||
true;
|
||||
maybe_join_cluster(Node, Spec) ->
|
||||
case get_cluster_seeds(Spec) of
|
||||
[JoinTo | _] ->
|
||||
ok = join_cluster(Node, JoinTo);
|
||||
ok = join_cluster(Node, JoinTo),
|
||||
true;
|
||||
[] ->
|
||||
ok
|
||||
false
|
||||
end.
|
||||
|
||||
join_cluster(Node, JoinTo) ->
|
||||
|
|
|
@ -282,6 +282,34 @@ t_publish_as_persistent(_Config) ->
|
|||
emqtt:stop(Pub)
|
||||
end.
|
||||
|
||||
t_publish_empty_topic_levels(_Config) ->
|
||||
Sub = connect(<<?MODULE_STRING "1">>, true, 30),
|
||||
Pub = connect(<<?MODULE_STRING "2">>, true, 30),
|
||||
try
|
||||
{ok, _, [?RC_GRANTED_QOS_1]} = emqtt:subscribe(Sub, <<"t//+//#">>, qos1),
|
||||
Messages = [
|
||||
{<<"t//1">>, <<"1">>},
|
||||
{<<"t//1/">>, <<"2">>},
|
||||
{<<"t//2//">>, <<"3">>},
|
||||
{<<"t//2//foo">>, <<"4">>},
|
||||
{<<"t//2/foo">>, <<"5">>},
|
||||
{<<"t/3/bar">>, <<"6">>}
|
||||
],
|
||||
[emqtt:publish(Pub, Topic, Payload, ?QOS_1) || {Topic, Payload} <- Messages],
|
||||
Received = receive_messages(length(Messages), 1_500),
|
||||
?assertMatch(
|
||||
[
|
||||
#{topic := <<"t//1/">>, payload := <<"2">>},
|
||||
#{topic := <<"t//2//">>, payload := <<"3">>},
|
||||
#{topic := <<"t//2//foo">>, payload := <<"4">>}
|
||||
],
|
||||
lists:sort(emqx_utils_maps:key_comparer(payload), Received)
|
||||
)
|
||||
after
|
||||
emqtt:stop(Sub),
|
||||
emqtt:stop(Pub)
|
||||
end.
|
||||
|
||||
%%
|
||||
|
||||
connect(ClientId, CleanStart, EI) ->
|
||||
|
@ -322,15 +350,18 @@ consume(It) ->
|
|||
end.
|
||||
|
||||
receive_messages(Count) ->
|
||||
lists:reverse(receive_messages(Count, [])).
|
||||
receive_messages(Count, 5_000).
|
||||
|
||||
receive_messages(0, Msgs) ->
|
||||
receive_messages(Count, Timeout) ->
|
||||
lists:reverse(receive_messages(Count, [], Timeout)).
|
||||
|
||||
receive_messages(0, Msgs, _Timeout) ->
|
||||
Msgs;
|
||||
receive_messages(Count, Msgs) ->
|
||||
receive_messages(Count, Msgs, Timeout) ->
|
||||
receive
|
||||
{publish, Msg} ->
|
||||
receive_messages(Count - 1, [Msg | Msgs])
|
||||
after 5_000 ->
|
||||
receive_messages(Count - 1, [Msg | Msgs], Timeout)
|
||||
after Timeout ->
|
||||
Msgs
|
||||
end.
|
||||
|
||||
|
|
|
@ -63,7 +63,6 @@ init_per_suite(Config) ->
|
|||
end,
|
||||
emqx_common_test_helpers:boot_modules(all),
|
||||
emqx_common_test_helpers:start_apps([]),
|
||||
emqx_logger:set_log_level(debug),
|
||||
[{dist_pid, DistPid} | Config].
|
||||
|
||||
end_per_suite(Config) ->
|
||||
|
@ -575,7 +574,7 @@ t_local(Config) when is_list(Config) ->
|
|||
<<"sticky_group">> => sticky
|
||||
},
|
||||
|
||||
Node = start_slave('local_shared_sub_local_1', 21999),
|
||||
Node = start_peer('local_shared_sub_local_1', 21999),
|
||||
ok = ensure_group_config(GroupConfig),
|
||||
ok = ensure_group_config(Node, GroupConfig),
|
||||
|
||||
|
@ -606,7 +605,7 @@ t_local(Config) when is_list(Config) ->
|
|||
|
||||
emqtt:stop(ConnPid1),
|
||||
emqtt:stop(ConnPid2),
|
||||
stop_slave(Node),
|
||||
stop_peer(Node),
|
||||
|
||||
?assertEqual(local, emqx_shared_sub:strategy(<<"local_group">>)),
|
||||
?assertEqual(local, RemoteLocalGroupStrategy),
|
||||
|
@ -628,7 +627,7 @@ t_remote(Config) when is_list(Config) ->
|
|||
<<"sticky_group">> => sticky
|
||||
},
|
||||
|
||||
Node = start_slave('remote_shared_sub_remote_1', 21999),
|
||||
Node = start_peer('remote_shared_sub_remote_1', 21999),
|
||||
ok = ensure_group_config(GroupConfig),
|
||||
ok = ensure_group_config(Node, GroupConfig),
|
||||
|
||||
|
@ -664,7 +663,7 @@ t_remote(Config) when is_list(Config) ->
|
|||
after
|
||||
emqtt:stop(ConnPidLocal),
|
||||
emqtt:stop(ConnPidRemote),
|
||||
stop_slave(Node)
|
||||
stop_peer(Node)
|
||||
end.
|
||||
|
||||
t_local_fallback(Config) when is_list(Config) ->
|
||||
|
@ -677,7 +676,7 @@ t_local_fallback(Config) when is_list(Config) ->
|
|||
Topic = <<"local_foo/bar">>,
|
||||
ClientId1 = <<"ClientId1">>,
|
||||
ClientId2 = <<"ClientId2">>,
|
||||
Node = start_slave('local_fallback_shared_sub_1', 11888),
|
||||
Node = start_peer('local_fallback_shared_sub_1', 11888),
|
||||
|
||||
{ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1}]),
|
||||
{ok, _} = emqtt:connect(ConnPid1),
|
||||
|
@ -693,7 +692,7 @@ t_local_fallback(Config) when is_list(Config) ->
|
|||
{true, UsedSubPid2} = last_message(<<"hello2">>, [ConnPid1], 2_000),
|
||||
|
||||
emqtt:stop(ConnPid1),
|
||||
stop_slave(Node),
|
||||
stop_peer(Node),
|
||||
|
||||
?assertEqual(UsedSubPid1, UsedSubPid2),
|
||||
ok.
|
||||
|
@ -1253,7 +1252,7 @@ recv_msgs(Count, Msgs) ->
|
|||
Msgs
|
||||
end.
|
||||
|
||||
start_slave(Name, Port) ->
|
||||
start_peer(Name, Port) ->
|
||||
{ok, Node} = emqx_cth_peer:start_link(
|
||||
Name,
|
||||
ebin_path()
|
||||
|
@ -1262,7 +1261,7 @@ start_slave(Name, Port) ->
|
|||
setup_node(Node, Port),
|
||||
Node.
|
||||
|
||||
stop_slave(Node) ->
|
||||
stop_peer(Node) ->
|
||||
rpc:call(Node, mria, leave, []),
|
||||
emqx_cth_peer:stop(Node).
|
||||
|
||||
|
|
|
@ -145,15 +145,16 @@ assert_messages_missed(Ls1, Ls2) ->
|
|||
|
||||
assert_messages_order([], []) ->
|
||||
ok;
|
||||
assert_messages_order([Msg | Ls1], [#{payload := No} | Ls2]) ->
|
||||
case emqx_message:payload(Msg) == No of
|
||||
false ->
|
||||
assert_messages_order([Msg | Expected], Received) ->
|
||||
%% Account for duplicate messages:
|
||||
case lists:splitwith(fun(#{payload := P}) -> emqx_message:payload(Msg) == P end, Received) of
|
||||
{[], [#{payload := Mismatch} | _]} ->
|
||||
ct:fail("Message order is not correct, expected: ~p, received: ~p", [
|
||||
emqx_message:payload(Msg), No
|
||||
emqx_message:payload(Msg), Mismatch
|
||||
]),
|
||||
error;
|
||||
true ->
|
||||
assert_messages_order(Ls1, Ls2)
|
||||
{_Matching, Rest} ->
|
||||
assert_messages_order(Expected, Rest)
|
||||
end.
|
||||
|
||||
messages(Offset, Cnt) ->
|
||||
|
|
|
@ -115,6 +115,12 @@ t_sys_match(_) ->
|
|||
true = match(<<"a/b/$c">>, <<"a/b/#">>),
|
||||
true = match(<<"a/b/$c">>, <<"a/#">>).
|
||||
|
||||
t_match_tokens(_) ->
|
||||
true = match(emqx_topic:tokens(<<"a/b/c">>), words(<<"a/+/c">>)),
|
||||
true = match(emqx_topic:tokens(<<"a//c">>), words(<<"a/+/c">>)),
|
||||
false = match(emqx_topic:tokens(<<"a//c/">>), words(<<"a/+/c">>)),
|
||||
true = match(emqx_topic:tokens(<<"a//c/">>), words(<<"a/+/c/#">>)).
|
||||
|
||||
t_match_perf(_) ->
|
||||
true = match(<<"a/b/ccc">>, <<"a/#">>),
|
||||
Name = <<"/abkc/19383/192939/akakdkkdkak/xxxyyuya/akakak">>,
|
||||
|
|
|
@ -260,11 +260,10 @@ create(BridgeType, BridgeName, RawConf) ->
|
|||
#{override_to => cluster}
|
||||
).
|
||||
|
||||
%% NOTE: This function can cause broken references from rules but it is only
|
||||
%% called directly from test cases.
|
||||
|
||||
-spec remove(bridge_v2_type(), bridge_v2_name()) -> ok | {error, any()}.
|
||||
remove(BridgeType, BridgeName) ->
|
||||
%% NOTE: This function can cause broken references from rules but it is only
|
||||
%% called directly from test cases.
|
||||
?SLOG(debug, #{
|
||||
brige_action => remove,
|
||||
bridge_version => 2,
|
||||
|
|
|
@ -16,6 +16,32 @@
|
|||
-module(emqx_bridge_v2_tests).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Helper fns
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
non_deprecated_fields(Fields) ->
|
||||
[K || {K, Schema} <- Fields, not hocon_schema:is_deprecated(Schema)].
|
||||
|
||||
find_resource_opts_fields(SchemaMod, FieldName) ->
|
||||
Fields = hocon_schema:fields(SchemaMod, FieldName),
|
||||
case lists:keyfind(resource_opts, 1, Fields) of
|
||||
false ->
|
||||
undefined;
|
||||
{resource_opts, ROSc} ->
|
||||
get_resource_opts_subfields(ROSc)
|
||||
end.
|
||||
|
||||
get_resource_opts_subfields(Sc) ->
|
||||
?R_REF(SchemaModRO, FieldNameRO) = hocon_schema:field_schema(Sc, type),
|
||||
ROFields = non_deprecated_fields(hocon_schema:fields(SchemaModRO, FieldNameRO)),
|
||||
proplists:get_keys(ROFields).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Testcases
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
resource_opts_union_connector_actions_test() ->
|
||||
%% The purpose of this test is to ensure we have split `resource_opts' fields
|
||||
|
@ -37,5 +63,47 @@ resource_opts_union_connector_actions_test() ->
|
|||
),
|
||||
ok.
|
||||
|
||||
non_deprecated_fields(Fields) ->
|
||||
[K || {K, Schema} <- Fields, not hocon_schema:is_deprecated(Schema)].
|
||||
connector_resource_opts_test() ->
|
||||
%% The purpose of this test is to ensure that all connectors have the `resource_opts'
|
||||
%% field with at least some sub-fields that should always be present.
|
||||
%% These are used by `emqx_resource_manager' itself to manage the resource lifecycle.
|
||||
MinimumROFields = [
|
||||
health_check_interval,
|
||||
query_mode,
|
||||
start_after_created,
|
||||
start_timeout
|
||||
],
|
||||
ConnectorSchemasRefs =
|
||||
lists:map(
|
||||
fun({Type, #{type := ?MAP(_, ?R_REF(SchemaMod, FieldName))}}) ->
|
||||
{Type, find_resource_opts_fields(SchemaMod, FieldName)}
|
||||
end,
|
||||
emqx_connector_schema:fields(connectors)
|
||||
),
|
||||
ConnectorsMissingRO = [Type || {Type, undefined} <- ConnectorSchemasRefs],
|
||||
ConnectorsMissingROSubfields =
|
||||
lists:filtermap(
|
||||
fun
|
||||
({_Type, undefined}) ->
|
||||
false;
|
||||
({Type, Fs}) ->
|
||||
case MinimumROFields -- Fs of
|
||||
[] ->
|
||||
false;
|
||||
MissingFields ->
|
||||
{true, {Type, MissingFields}}
|
||||
end
|
||||
end,
|
||||
ConnectorSchemasRefs
|
||||
),
|
||||
?assertEqual(
|
||||
#{
|
||||
missing_resource_opts_field => #{},
|
||||
missing_subfields => #{}
|
||||
},
|
||||
#{
|
||||
missing_resource_opts_field => maps:from_keys(ConnectorsMissingRO, true),
|
||||
missing_subfields => maps:from_list(ConnectorsMissingROSubfields)
|
||||
}
|
||||
),
|
||||
ok.
|
||||
|
|
|
@ -588,7 +588,6 @@ cluster(Config) ->
|
|||
[
|
||||
{apps, [emqx_conf, emqx_rule_engine, emqx_bridge]},
|
||||
{listener_ports, []},
|
||||
{peer_mod, slave},
|
||||
{priv_data_dir, PrivDataDir},
|
||||
{load_schema, true},
|
||||
{start_autocluster, true},
|
||||
|
@ -611,7 +610,7 @@ start_cluster(Cluster) ->
|
|||
Nodes = lists:map(
|
||||
fun({Name, Opts}) ->
|
||||
ct:pal("starting ~p", [Name]),
|
||||
emqx_common_test_helpers:start_slave(Name, Opts)
|
||||
emqx_common_test_helpers:start_peer(Name, Opts)
|
||||
end,
|
||||
Cluster
|
||||
),
|
||||
|
@ -620,7 +619,7 @@ start_cluster(Cluster) ->
|
|||
emqx_utils:pmap(
|
||||
fun(N) ->
|
||||
ct:pal("stopping ~p", [N]),
|
||||
emqx_common_test_helpers:stop_slave(N)
|
||||
emqx_common_test_helpers:stop_peer(N)
|
||||
end,
|
||||
Nodes
|
||||
)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{application, emqx_bridge_http, [
|
||||
{description, "EMQX HTTP Bridge and Connector Application"},
|
||||
{vsn, "0.1.5"},
|
||||
{vsn, "0.1.6"},
|
||||
{registered, []},
|
||||
{applications, [kernel, stdlib, emqx_connector, emqx_resource, ehttpc]},
|
||||
{env, [{emqx_action_info_modules, [emqx_bridge_http_action_info]}]},
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
|
||||
-export([
|
||||
bridge_v2_examples/1,
|
||||
%%conn_bridge_examples/1,
|
||||
connector_examples/1
|
||||
]).
|
||||
|
||||
|
@ -169,7 +168,7 @@ basic_config() ->
|
|||
}
|
||||
)},
|
||||
{description, emqx_schema:description_schema()}
|
||||
] ++ http_resource_opts() ++ connector_opts().
|
||||
] ++ connector_opts().
|
||||
|
||||
request_config() ->
|
||||
[
|
||||
|
@ -321,7 +320,7 @@ http_resource_opts() ->
|
|||
connector_opts() ->
|
||||
mark_request_field_deperecated(
|
||||
proplists:delete(max_retries, emqx_bridge_http_connector:fields(config))
|
||||
).
|
||||
) ++ http_resource_opts().
|
||||
|
||||
mark_request_field_deperecated(Fields) ->
|
||||
lists:map(
|
||||
|
|
|
@ -548,6 +548,8 @@ fields(consumer_kafka_opts) ->
|
|||
#{default => <<"5s">>, desc => ?DESC(consumer_offset_commit_interval_seconds)}
|
||||
)}
|
||||
];
|
||||
fields(connector_resource_opts) ->
|
||||
emqx_connector_schema:resource_opts_fields();
|
||||
fields(resource_opts) ->
|
||||
SupportedFields = [health_check_interval],
|
||||
CreationOpts = emqx_bridge_v2_schema:resource_opts_fields(),
|
||||
|
@ -568,6 +570,8 @@ desc("config_connector") ->
|
|||
?DESC("desc_config");
|
||||
desc(resource_opts) ->
|
||||
?DESC(emqx_resource_schema, "resource_opts");
|
||||
desc(connector_resource_opts) ->
|
||||
?DESC(emqx_resource_schema, "resource_opts");
|
||||
desc("get_" ++ Type) when
|
||||
Type =:= "consumer"; Type =:= "producer"; Type =:= "connector"; Type =:= "bridge_v2"
|
||||
->
|
||||
|
@ -626,7 +630,7 @@ kafka_connector_config_fields() ->
|
|||
})},
|
||||
{socket_opts, mk(ref(socket_opts), #{required => false, desc => ?DESC(socket_opts)})},
|
||||
{ssl, mk(ref(ssl_client_opts), #{})}
|
||||
] ++ [resource_opts()].
|
||||
] ++ emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts).
|
||||
|
||||
producer_opts(ActionOrBridgeV1) ->
|
||||
[
|
||||
|
|
|
@ -1069,20 +1069,12 @@ setup_and_start_listeners(Node, NodeOpts) ->
|
|||
|
||||
cluster(Config) ->
|
||||
PrivDataDir = ?config(priv_dir, Config),
|
||||
PeerModule =
|
||||
case os:getenv("IS_CI") of
|
||||
false ->
|
||||
slave;
|
||||
_ ->
|
||||
ct_slave
|
||||
end,
|
||||
ExtraEnvHandlerHook = setup_group_subscriber_spy_fn(),
|
||||
Cluster = emqx_common_test_helpers:emqx_cluster(
|
||||
[core, core],
|
||||
[
|
||||
{apps, [emqx_conf, emqx_rule_engine, emqx_bridge_kafka, emqx_bridge]},
|
||||
{listener_ports, []},
|
||||
{peer_mod, PeerModule},
|
||||
{priv_data_dir, PrivDataDir},
|
||||
{load_schema, true},
|
||||
{start_autocluster, true},
|
||||
|
@ -1744,14 +1736,14 @@ t_cluster_group(Config) ->
|
|||
begin
|
||||
Nodes =
|
||||
[_N1, N2 | _] = [
|
||||
emqx_common_test_helpers:start_slave(Name, Opts)
|
||||
emqx_common_test_helpers:start_peer(Name, Opts)
|
||||
|| {Name, Opts} <- Cluster
|
||||
],
|
||||
on_exit(fun() ->
|
||||
emqx_utils:pmap(
|
||||
fun(N) ->
|
||||
ct:pal("stopping ~p", [N]),
|
||||
ok = emqx_common_test_helpers:stop_slave(N)
|
||||
ok = emqx_common_test_helpers:stop_peer(N)
|
||||
end,
|
||||
Nodes
|
||||
)
|
||||
|
@ -1827,10 +1819,10 @@ t_node_joins_existing_cluster(Config) ->
|
|||
begin
|
||||
[{Name1, Opts1}, {Name2, Opts2} | _] = Cluster,
|
||||
ct:pal("starting ~p", [Name1]),
|
||||
N1 = emqx_common_test_helpers:start_slave(Name1, Opts1),
|
||||
N1 = emqx_common_test_helpers:start_peer(Name1, Opts1),
|
||||
on_exit(fun() ->
|
||||
ct:pal("stopping ~p", [N1]),
|
||||
ok = emqx_common_test_helpers:stop_slave(N1)
|
||||
ok = emqx_common_test_helpers:stop_peer(N1)
|
||||
end),
|
||||
{{ok, _}, {ok, _}} =
|
||||
?wait_async_action(
|
||||
|
@ -1870,10 +1862,10 @@ t_node_joins_existing_cluster(Config) ->
|
|||
30_000
|
||||
),
|
||||
ct:pal("starting ~p", [Name2]),
|
||||
N2 = emqx_common_test_helpers:start_slave(Name2, Opts2),
|
||||
N2 = emqx_common_test_helpers:start_peer(Name2, Opts2),
|
||||
on_exit(fun() ->
|
||||
ct:pal("stopping ~p", [N2]),
|
||||
ok = emqx_common_test_helpers:stop_slave(N2)
|
||||
ok = emqx_common_test_helpers:stop_peer(N2)
|
||||
end),
|
||||
Nodes = [N1, N2],
|
||||
wait_for_cluster_rpc(N2),
|
||||
|
@ -1963,7 +1955,7 @@ t_cluster_node_down(Config) ->
|
|||
lists:map(
|
||||
fun({Name, Opts}) ->
|
||||
ct:pal("starting ~p", [Name]),
|
||||
emqx_common_test_helpers:start_slave(Name, Opts)
|
||||
emqx_common_test_helpers:start_peer(Name, Opts)
|
||||
end,
|
||||
Cluster
|
||||
),
|
||||
|
@ -1971,7 +1963,7 @@ t_cluster_node_down(Config) ->
|
|||
emqx_utils:pmap(
|
||||
fun(N) ->
|
||||
ct:pal("stopping ~p", [N]),
|
||||
ok = emqx_common_test_helpers:stop_slave(N)
|
||||
ok = emqx_common_test_helpers:stop_peer(N)
|
||||
end,
|
||||
Nodes
|
||||
)
|
||||
|
@ -2016,7 +2008,7 @@ t_cluster_node_down(Config) ->
|
|||
{TId, Pid} = start_async_publisher(Config, KafkaTopic),
|
||||
|
||||
ct:pal("stopping node ~p", [N1]),
|
||||
ok = emqx_common_test_helpers:stop_slave(N1),
|
||||
ok = emqx_common_test_helpers:stop_peer(N1),
|
||||
|
||||
%% Give some time for the consumers in remaining node to
|
||||
%% rebalance.
|
||||
|
|
|
@ -53,7 +53,8 @@ fields("config") ->
|
|||
];
|
||||
fields("config_connector") ->
|
||||
emqx_connector_schema:common_fields() ++
|
||||
fields("connection_fields");
|
||||
fields("connection_fields") ++
|
||||
emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts);
|
||||
fields("connection_fields") ->
|
||||
[
|
||||
{parameters,
|
||||
|
@ -93,6 +94,8 @@ fields(action_parameters) ->
|
|||
{collection, mk(binary(), #{desc => ?DESC("collection"), default => <<"mqtt">>})},
|
||||
{payload_template, mk(binary(), #{required => false, desc => ?DESC("payload_template")})}
|
||||
];
|
||||
fields(connector_resource_opts) ->
|
||||
emqx_connector_schema:resource_opts_fields();
|
||||
fields(resource_opts) ->
|
||||
fields("creation_opts");
|
||||
fields(mongodb_rs) ->
|
||||
|
@ -202,6 +205,8 @@ desc("creation_opts") ->
|
|||
?DESC(emqx_resource_schema, "creation_opts");
|
||||
desc(resource_opts) ->
|
||||
?DESC(emqx_resource_schema, "resource_opts");
|
||||
desc(connector_resource_opts) ->
|
||||
?DESC(emqx_resource_schema, "resource_opts");
|
||||
desc(mongodb_rs) ->
|
||||
?DESC(mongodb_rs_conf);
|
||||
desc(mongodb_sharded) ->
|
||||
|
|
|
@ -54,7 +54,15 @@ bridge_v1_config_to_connector_config(BridgeV1Config) ->
|
|||
ConnectorTopLevelKeys = schema_keys("config_connector"),
|
||||
ConnectorKeys = maps:keys(BridgeV1Config) -- (ActionKeys -- ConnectorTopLevelKeys),
|
||||
ConnectorParametersKeys = ConnectorKeys -- ConnectorTopLevelKeys,
|
||||
make_config_map(ConnectorKeys, ConnectorParametersKeys, BridgeV1Config).
|
||||
ConnConfig0 = make_config_map(ConnectorKeys, ConnectorParametersKeys, BridgeV1Config),
|
||||
emqx_utils_maps:update_if_present(
|
||||
<<"resource_opts">>,
|
||||
fun(ResourceOpts) ->
|
||||
CommonROSubfields = emqx_connector_schema:common_resource_opts_subfields_bin(),
|
||||
maps:with(CommonROSubfields, ResourceOpts)
|
||||
end,
|
||||
ConnConfig0
|
||||
).
|
||||
|
||||
make_config_map(PickKeys, IndentKeys, Config) ->
|
||||
Conf0 = maps:with(PickKeys, Config),
|
||||
|
|
|
@ -517,19 +517,11 @@ try_decode_json(Payload) ->
|
|||
|
||||
cluster(Config) ->
|
||||
PrivDataDir = ?config(priv_dir, Config),
|
||||
PeerModule =
|
||||
case os:getenv("IS_CI") of
|
||||
false ->
|
||||
slave;
|
||||
_ ->
|
||||
ct_slave
|
||||
end,
|
||||
Cluster = emqx_common_test_helpers:emqx_cluster(
|
||||
[core, core],
|
||||
[
|
||||
{apps, [emqx_conf] ++ ?APPS ++ [pulsar]},
|
||||
{listener_ports, []},
|
||||
{peer_mod, PeerModule},
|
||||
{priv_data_dir, PrivDataDir},
|
||||
{load_schema, true},
|
||||
{start_autocluster, true},
|
||||
|
@ -551,7 +543,7 @@ cluster(Config) ->
|
|||
start_cluster(Cluster) ->
|
||||
Nodes =
|
||||
[
|
||||
emqx_common_test_helpers:start_slave(Name, Opts)
|
||||
emqx_common_test_helpers:start_peer(Name, Opts)
|
||||
|| {Name, Opts} <- Cluster
|
||||
],
|
||||
NumNodes = length(Nodes),
|
||||
|
@ -559,7 +551,7 @@ start_cluster(Cluster) ->
|
|||
emqx_utils:pmap(
|
||||
fun(N) ->
|
||||
ct:pal("stopping ~p", [N]),
|
||||
ok = emqx_common_test_helpers:stop_slave(N)
|
||||
ok = emqx_common_test_helpers:stop_peer(N)
|
||||
end,
|
||||
Nodes
|
||||
)
|
||||
|
|
|
@ -51,6 +51,7 @@ fields("config_connector") ->
|
|||
)}
|
||||
] ++
|
||||
emqx_redis:redis_fields() ++
|
||||
emqx_connector_schema:resource_opts_ref(?MODULE, resource_opts) ++
|
||||
emqx_connector_schema_lib:ssl_fields();
|
||||
fields(action) ->
|
||||
{?TYPE,
|
||||
|
|
|
@ -93,7 +93,9 @@ roots() ->
|
|||
[{config, #{type => hoconsc:ref(?MODULE, config)}}].
|
||||
|
||||
fields(config) ->
|
||||
emqx_connector_schema:common_fields() ++ fields("connection_fields");
|
||||
emqx_connector_schema:common_fields() ++
|
||||
fields("connection_fields") ++
|
||||
emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts);
|
||||
fields("connection_fields") ->
|
||||
[
|
||||
{server, server()},
|
||||
|
@ -114,6 +116,8 @@ fields("connection_fields") ->
|
|||
emqx_connector_schema_lib:pool_size(Other)
|
||||
end}
|
||||
];
|
||||
fields(connector_resource_opts) ->
|
||||
emqx_connector_schema:resource_opts_fields();
|
||||
fields(Field) when
|
||||
Field == "get";
|
||||
Field == "post";
|
||||
|
@ -125,6 +129,8 @@ fields(Field) when
|
|||
|
||||
desc(config) ->
|
||||
?DESC("desc_config");
|
||||
desc(connector_resource_opts) ->
|
||||
?DESC(emqx_resource_schema, "resource_opts");
|
||||
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
|
||||
["Configuration for Syskeeper Proxy using `", string:to_upper(Method), "` method."];
|
||||
desc(_) ->
|
||||
|
|
|
@ -77,7 +77,9 @@ namespace() -> "connector_syskeeper_proxy".
|
|||
roots() -> [].
|
||||
|
||||
fields(config) ->
|
||||
emqx_connector_schema:common_fields() ++ fields("connection_fields");
|
||||
emqx_connector_schema:common_fields() ++
|
||||
fields("connection_fields") ++
|
||||
emqx_connector_schema:resource_opts_ref(?MODULE, connector_resource_opts);
|
||||
fields("connection_fields") ->
|
||||
[
|
||||
{listen, listen()},
|
||||
|
@ -92,6 +94,8 @@ fields("connection_fields") ->
|
|||
#{desc => ?DESC(handshake_timeout), default => <<"10s">>}
|
||||
)}
|
||||
];
|
||||
fields(connector_resource_opts) ->
|
||||
emqx_connector_schema:resource_opts_fields();
|
||||
fields(Field) when
|
||||
Field == "get";
|
||||
Field == "post";
|
||||
|
@ -103,6 +107,8 @@ fields(Field) when
|
|||
|
||||
desc(config) ->
|
||||
?DESC("desc_config");
|
||||
desc(connector_resource_opts) ->
|
||||
?DESC(emqx_resource_schema, "resource_opts");
|
||||
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
|
||||
["Configuration for Syskeeper Proxy using `", string:to_upper(Method), "` method."];
|
||||
desc(_) ->
|
||||
|
|
|
@ -222,16 +222,16 @@ assert_config_load_done(Nodes) ->
|
|||
).
|
||||
|
||||
stop_cluster(Nodes) ->
|
||||
emqx_utils:pmap(fun emqx_common_test_helpers:stop_slave/1, Nodes).
|
||||
emqx_utils:pmap(fun emqx_common_test_helpers:stop_peer/1, Nodes).
|
||||
|
||||
start_cluster(Specs) ->
|
||||
[emqx_common_test_helpers:start_slave(Name, Opts) || {Name, Opts} <- Specs].
|
||||
[emqx_common_test_helpers:start_peer(Name, Opts) || {Name, Opts} <- Specs].
|
||||
|
||||
start_cluster_async(Specs) ->
|
||||
[
|
||||
begin
|
||||
Opts1 = maps:remove(join_to, Opts),
|
||||
spawn_link(fun() -> emqx_common_test_helpers:start_slave(Name, Opts1) end),
|
||||
spawn_link(fun() -> emqx_common_test_helpers:start_peer(Name, Opts1) end),
|
||||
timer:sleep(7_000)
|
||||
end
|
||||
|| {Name, Opts} <- Specs
|
||||
|
|
|
@ -382,9 +382,13 @@ safe_atom(Bin) when is_binary(Bin) -> binary_to_existing_atom(Bin, utf8);
|
|||
safe_atom(Atom) when is_atom(Atom) -> Atom.
|
||||
|
||||
parse_opts(Conf, Opts0) ->
|
||||
override_start_after_created(Conf, Opts0).
|
||||
Opts1 = override_start_after_created(Conf, Opts0),
|
||||
set_no_buffer_workers(Opts1).
|
||||
|
||||
override_start_after_created(Config, Opts) ->
|
||||
Enabled = maps:get(enable, Config, true),
|
||||
StartAfterCreated = Enabled andalso maps:get(start_after_created, Opts, Enabled),
|
||||
Opts#{start_after_created => StartAfterCreated}.
|
||||
|
||||
set_no_buffer_workers(Opts) ->
|
||||
Opts#{spawn_buffer_workers => false}.
|
||||
|
|
|
@ -40,7 +40,13 @@
|
|||
type_and_name_fields/1
|
||||
]).
|
||||
|
||||
-export([resource_opts_fields/0, resource_opts_fields/1]).
|
||||
-export([
|
||||
common_resource_opts_subfields/0,
|
||||
common_resource_opts_subfields_bin/0,
|
||||
resource_opts_fields/0,
|
||||
resource_opts_fields/1,
|
||||
resource_opts_ref/2
|
||||
]).
|
||||
|
||||
-export([examples/1]).
|
||||
|
||||
|
@ -178,14 +184,19 @@ split_bridge_to_connector_and_action(
|
|||
%% Get connector fields from bridge config
|
||||
lists:foldl(
|
||||
fun({ConnectorFieldName, _Spec}, ToTransformSoFar) ->
|
||||
case maps:is_key(to_bin(ConnectorFieldName), BridgeV1Conf) of
|
||||
ConnectorFieldNameBin = to_bin(ConnectorFieldName),
|
||||
case maps:is_key(ConnectorFieldNameBin, BridgeV1Conf) of
|
||||
true ->
|
||||
NewToTransform = maps:put(
|
||||
to_bin(ConnectorFieldName),
|
||||
maps:get(to_bin(ConnectorFieldName), BridgeV1Conf),
|
||||
ToTransformSoFar
|
||||
PrevFieldConfig =
|
||||
project_to_connector_resource_opts(
|
||||
ConnectorFieldNameBin,
|
||||
maps:get(ConnectorFieldNameBin, BridgeV1Conf)
|
||||
),
|
||||
NewToTransform;
|
||||
maps:put(
|
||||
ConnectorFieldNameBin,
|
||||
PrevFieldConfig,
|
||||
ToTransformSoFar
|
||||
);
|
||||
false ->
|
||||
ToTransformSoFar
|
||||
end
|
||||
|
@ -213,6 +224,12 @@ split_bridge_to_connector_and_action(
|
|||
end,
|
||||
{BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}.
|
||||
|
||||
project_to_connector_resource_opts(<<"resource_opts">>, OldResourceOpts) ->
|
||||
Subfields = common_resource_opts_subfields_bin(),
|
||||
maps:with(Subfields, OldResourceOpts);
|
||||
project_to_connector_resource_opts(_, OldConfig) ->
|
||||
OldConfig.
|
||||
|
||||
transform_bridge_v1_config_to_action_config(
|
||||
BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName
|
||||
) ->
|
||||
|
@ -497,19 +514,33 @@ status_and_actions_fields() ->
|
|||
)}
|
||||
].
|
||||
|
||||
resource_opts_ref(Module, RefName) ->
|
||||
[
|
||||
{resource_opts,
|
||||
mk(
|
||||
ref(Module, RefName),
|
||||
emqx_resource_schema:resource_opts_meta()
|
||||
)}
|
||||
].
|
||||
|
||||
common_resource_opts_subfields() ->
|
||||
[
|
||||
health_check_interval,
|
||||
query_mode,
|
||||
start_after_created,
|
||||
start_timeout
|
||||
].
|
||||
|
||||
common_resource_opts_subfields_bin() ->
|
||||
lists:map(fun atom_to_binary/1, common_resource_opts_subfields()).
|
||||
|
||||
resource_opts_fields() ->
|
||||
resource_opts_fields(_Overrides = []).
|
||||
|
||||
resource_opts_fields(Overrides) ->
|
||||
%% Note: these don't include buffer-related configurations because buffer workers are
|
||||
%% tied to the action.
|
||||
ConnectorROFields = [
|
||||
health_check_interval,
|
||||
query_mode,
|
||||
request_ttl,
|
||||
start_after_created,
|
||||
start_timeout
|
||||
],
|
||||
ConnectorROFields = common_resource_opts_subfields(),
|
||||
lists:filter(
|
||||
fun({Key, _Sc}) -> lists:member(Key, ConnectorROFields) end,
|
||||
emqx_resource_schema:create_opts(Overrides)
|
||||
|
|
|
@ -163,11 +163,11 @@ t_remove_fail({'init', Config}) ->
|
|||
meck:expect(?CONNECTOR, on_add_channel, 4, {ok, connector_state}),
|
||||
meck:expect(?CONNECTOR, on_stop, 2, ok),
|
||||
meck:expect(?CONNECTOR, on_get_status, 2, connected),
|
||||
[{mocked_mods, [?CONNECTOR, emqx_connector_ee_schema]} | Config];
|
||||
t_remove_fail({'end', Config}) ->
|
||||
MockedMods = ?config(mocked_mods, Config),
|
||||
meck:unload(MockedMods),
|
||||
meck:expect(?CONNECTOR, query_mode, 1, simple_async_internal_buffer),
|
||||
Config;
|
||||
t_remove_fail({'end', _Config}) ->
|
||||
meck:unload(),
|
||||
ok;
|
||||
t_remove_fail(_Config) ->
|
||||
?assertEqual(
|
||||
[],
|
||||
|
@ -200,7 +200,20 @@ t_remove_fail(_Config) ->
|
|||
{_, {?CONNECTOR, on_add_channel, _}, {ok, connector_state}},
|
||||
{_, {?CONNECTOR, on_get_channels, [_]}, _}
|
||||
],
|
||||
lists:filter(
|
||||
fun({_, {?CONNECTOR, Fun, _Args}, _}) ->
|
||||
lists:member(
|
||||
Fun, [
|
||||
callback_mode,
|
||||
on_start,
|
||||
on_get_channels,
|
||||
on_get_status,
|
||||
on_add_channel
|
||||
]
|
||||
)
|
||||
end,
|
||||
meck:history(?CONNECTOR)
|
||||
)
|
||||
),
|
||||
ok.
|
||||
|
||||
|
@ -269,6 +282,33 @@ t_create_with_bad_name_root_path(_Config) ->
|
|||
),
|
||||
ok.
|
||||
|
||||
t_no_buffer_workers({'init', Config}) ->
|
||||
meck:new(emqx_connector_ee_schema, [passthrough]),
|
||||
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR),
|
||||
meck:new(?CONNECTOR, [non_strict]),
|
||||
meck:expect(?CONNECTOR, callback_mode, 0, async_if_possible),
|
||||
meck:expect(?CONNECTOR, on_start, 2, {ok, connector_state}),
|
||||
meck:expect(?CONNECTOR, on_get_channels, 1, []),
|
||||
meck:expect(?CONNECTOR, on_add_channel, 4, {ok, connector_state}),
|
||||
meck:expect(?CONNECTOR, on_stop, 2, ok),
|
||||
meck:expect(?CONNECTOR, on_get_status, 2, connected),
|
||||
meck:expect(?CONNECTOR, query_mode, 1, sync),
|
||||
[
|
||||
{path, [connectors, kafka_producer, no_bws]}
|
||||
| Config
|
||||
];
|
||||
t_no_buffer_workers({'end', Config}) ->
|
||||
Path = ?config(path, Config),
|
||||
{ok, _} = emqx:remove_config(Path),
|
||||
meck:unload(),
|
||||
ok;
|
||||
t_no_buffer_workers(Config) ->
|
||||
Path = ?config(path, Config),
|
||||
ConnConfig = connector_config(),
|
||||
?assertMatch({ok, _}, emqx:update_config(Path, ConnConfig)),
|
||||
?assertEqual([], supervisor:which_children(emqx_resource_buffer_worker_sup)),
|
||||
ok.
|
||||
|
||||
%% helpers
|
||||
|
||||
connector_config() ->
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
-module(emqx_connector_dummy_impl).
|
||||
|
||||
-export([
|
||||
query_mode/1,
|
||||
callback_mode/0,
|
||||
on_start/2,
|
||||
on_stop/2,
|
||||
|
@ -24,6 +25,7 @@
|
|||
on_get_channel_status/3
|
||||
]).
|
||||
|
||||
query_mode(_) -> error(unexpected).
|
||||
callback_mode() -> error(unexpected).
|
||||
on_start(_, _) -> error(unexpected).
|
||||
on_stop(_, _) -> error(unexpected).
|
||||
|
|
|
@ -1,36 +1,50 @@
|
|||
# EMQX Replay
|
||||
|
||||
`emqx_ds` is a generic durable storage for MQTT messages within EMQX.
|
||||
`emqx_ds` is an application implementing durable storage for MQTT messages within EMQX.
|
||||
|
||||
Concepts:
|
||||
# Features
|
||||
|
||||
- Streams. Stream is an abstraction that encompasses topics, shards, different data layouts, etc.
|
||||
The client application must only aware of the streams.
|
||||
|
||||
- Batching. All the API functions are batch-oriented.
|
||||
|
||||
> 0. App overview introduction
|
||||
> 1. let people know what your project can do specifically. Is it a base
|
||||
> library dependency, or what kind of functionality is provided to the user?
|
||||
> 2. Provide context and add a link to any reference visitors might be
|
||||
> unfamiliar with.
|
||||
> 3. Design details, implementation technology architecture, Roadmap, etc.
|
||||
- Iterators. Iterators can be stored durably or transferred over network.
|
||||
They take relatively small space.
|
||||
|
||||
# [Features] - [Optional]
|
||||
> A List of features your application provided. If the feature is quite simple, just
|
||||
> list in the previous section.
|
||||
- Support for various backends. Almost any DBMS that supports range
|
||||
queries can serve as a `emqx_durable_storage` backend.
|
||||
|
||||
- Builtin backend based on RocksDB.
|
||||
- Changing storage layout on the fly: it's achieved by creating a
|
||||
new set of tables (known as "generation") and the schema.
|
||||
- Sharding based on publisher's client ID
|
||||
|
||||
# Limitation
|
||||
TBD
|
||||
|
||||
- Builtin backend currently doesn't replicate data across different sites
|
||||
- There is no local cache of messages, which may result in transferring the same data multiple times
|
||||
|
||||
# Documentation links
|
||||
TBD
|
||||
|
||||
# Usage
|
||||
TBD
|
||||
|
||||
Currently it's only used to implement persistent sessions.
|
||||
|
||||
In the future it can serve as a storage for retained messages or as a generic message buffering layer for the bridges.
|
||||
|
||||
# Configurations
|
||||
TBD
|
||||
|
||||
`emqx_durable_storage` doesn't have any configurable parameters.
|
||||
Instead, it relies on the upper-level business applications to create
|
||||
a correct configuration and pass it to `emqx_ds:open_db(DBName, Config)`
|
||||
function according to its needs.
|
||||
|
||||
# HTTP APIs
|
||||
|
||||
None
|
||||
|
||||
# Other
|
||||
TBD
|
||||
|
||||
|
|
|
@ -368,7 +368,7 @@ check_message(
|
|||
#{?tag := ?IT, ?start_time := StartTime, ?topic_filter := TopicFilter},
|
||||
#message{timestamp = Timestamp, topic = Topic}
|
||||
) when Timestamp >= StartTime ->
|
||||
emqx_topic:match(emqx_topic:words(Topic), TopicFilter);
|
||||
emqx_topic:match(emqx_topic:tokens(Topic), TopicFilter);
|
||||
check_message(_Cutoff, _It, _Msg) ->
|
||||
false.
|
||||
|
||||
|
@ -378,7 +378,7 @@ format_key(KeyMapper, Key) ->
|
|||
|
||||
-spec make_key(s(), emqx_types:message()) -> {binary(), [binary()]}.
|
||||
make_key(#s{keymappers = KeyMappers, trie = Trie}, #message{timestamp = Timestamp, topic = TopicBin}) ->
|
||||
Tokens = emqx_topic:tokens(TopicBin),
|
||||
Tokens = emqx_topic:words(TopicBin),
|
||||
{TopicIndex, Varying} = emqx_ds_lts:topic_key(Trie, fun threshold_fun/1, Tokens),
|
||||
VaryingHashes = [hash_topic_level(I) || I <- Varying],
|
||||
KeyMapper = array:get(length(Varying), KeyMappers),
|
||||
|
|
|
@ -69,7 +69,7 @@ make_iterator(Node, DB, Shard, Stream, TopicFilter, StartTime) ->
|
|||
| {ok, end_of_stream}
|
||||
| {error, _}.
|
||||
next(Node, DB, Shard, Iter, BatchSize) ->
|
||||
erpc:call(Node, emqx_ds_replication_layer, do_next_v1, [DB, Shard, Iter, BatchSize]).
|
||||
emqx_rpc:call(Shard, Node, emqx_ds_replication_layer, do_next_v1, [DB, Shard, Iter, BatchSize]).
|
||||
|
||||
-spec store_batch(
|
||||
node(),
|
||||
|
@ -80,7 +80,9 @@ next(Node, DB, Shard, Iter, BatchSize) ->
|
|||
) ->
|
||||
emqx_ds:store_batch_result().
|
||||
store_batch(Node, DB, Shard, Batch, Options) ->
|
||||
erpc:call(Node, emqx_ds_replication_layer, do_store_batch_v1, [DB, Shard, Batch, Options]).
|
||||
emqx_rpc:call(Shard, Node, emqx_ds_replication_layer, do_store_batch_v1, [
|
||||
DB, Shard, Batch, Options
|
||||
]).
|
||||
|
||||
%%================================================================================
|
||||
%% behavior callbacks
|
||||
|
|
|
@ -50,9 +50,9 @@ end_per_suite(Config) ->
|
|||
init_per_testcase(Case, Config) ->
|
||||
_ = emqx_eviction_agent:disable(test_eviction),
|
||||
ok = snabbkaffe:start_trace(),
|
||||
start_slave(Case, Config).
|
||||
start_peer(Case, Config).
|
||||
|
||||
start_slave(t_explicit_session_takeover, Config) ->
|
||||
start_peer(t_explicit_session_takeover, Config) ->
|
||||
NodeNames =
|
||||
[
|
||||
t_explicit_session_takeover_donor,
|
||||
|
@ -65,19 +65,19 @@ start_slave(t_explicit_session_takeover, Config) ->
|
|||
),
|
||||
ok = snabbkaffe:start_trace(),
|
||||
[{evacuate_nodes, ClusterNodes} | Config];
|
||||
start_slave(_Case, Config) ->
|
||||
start_peer(_Case, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_testcase(TestCase, Config) ->
|
||||
emqx_eviction_agent:disable(test_eviction),
|
||||
ok = snabbkaffe:stop(),
|
||||
stop_slave(TestCase, Config).
|
||||
stop_peer(TestCase, Config).
|
||||
|
||||
stop_slave(t_explicit_session_takeover, Config) ->
|
||||
stop_peer(t_explicit_session_takeover, Config) ->
|
||||
emqx_eviction_agent_test_helpers:stop_cluster(
|
||||
?config(evacuate_nodes, Config)
|
||||
);
|
||||
stop_slave(_Case, _Config) ->
|
||||
stop_peer(_Case, _Config) ->
|
||||
ok.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
|
@ -74,14 +74,16 @@ check_topic([]) ->
|
|||
check_topic(Path) ->
|
||||
{ok, emqx_http_lib:uri_decode(iolist_to_binary(lists:join(<<"/">>, Path)))}.
|
||||
|
||||
get_sub_opts(#coap_message{options = Opts} = Msg) ->
|
||||
SubOpts = maps:fold(fun parse_sub_opts/3, #{}, Opts),
|
||||
get_sub_opts(Msg) ->
|
||||
SubOpts = maps:fold(
|
||||
fun parse_sub_opts/3, #{}, emqx_coap_message:get_option(uri_query, Msg, #{})
|
||||
),
|
||||
case SubOpts of
|
||||
#{qos := _} ->
|
||||
maps:merge(SubOpts, ?SUBOPTS);
|
||||
maps:merge(?SUBOPTS, SubOpts);
|
||||
_ ->
|
||||
CfgType = emqx_conf:get([gateway, coap, subscribe_qos], ?QOS_0),
|
||||
maps:merge(SubOpts, ?SUBOPTS#{qos => type_to_qos(CfgType, Msg)})
|
||||
maps:merge(?SUBOPTS#{qos => type_to_qos(CfgType, Msg)}, SubOpts)
|
||||
end.
|
||||
|
||||
parse_sub_opts(<<"qos">>, V, Opts) ->
|
||||
|
|
|
@ -345,6 +345,45 @@ t_subscribe(_) ->
|
|||
Topics
|
||||
).
|
||||
|
||||
t_subscribe_with_qos_opt(_) ->
|
||||
Topics = [
|
||||
{<<"abc">>, 0},
|
||||
{<<"/abc">>, 1},
|
||||
{<<"abc/d">>, 2}
|
||||
],
|
||||
Fun = fun({Topic, Qos}, Channel, Token) ->
|
||||
Payload = <<"123">>,
|
||||
URI = pubsub_uri(binary_to_list(Topic), Token) ++ "&qos=" ++ integer_to_list(Qos),
|
||||
Req = make_req(get, Payload, [{observe, 0}]),
|
||||
{ok, content, _} = do_request(Channel, URI, Req),
|
||||
?LOGT("observer topic:~ts~n", [Topic]),
|
||||
|
||||
%% ensure subscribe succeed
|
||||
timer:sleep(100),
|
||||
[SubPid] = emqx:subscribers(Topic),
|
||||
?assert(is_pid(SubPid)),
|
||||
?assertEqual(Qos, maps:get(qos, emqx_broker:get_subopts(SubPid, Topic))),
|
||||
%% publish a message
|
||||
emqx:publish(emqx_message:make(Topic, Payload)),
|
||||
{ok, content, Notify} = with_response(Channel),
|
||||
?LOGT("observer get Notif=~p", [Notify]),
|
||||
|
||||
#coap_content{payload = PayloadRecv} = Notify,
|
||||
|
||||
?assertEqual(Payload, PayloadRecv)
|
||||
end,
|
||||
|
||||
with_connection(Topics, Fun),
|
||||
|
||||
%% subscription removed if coap client disconnected
|
||||
timer:sleep(100),
|
||||
lists:foreach(
|
||||
fun({Topic, _Qos}) ->
|
||||
?assertEqual([], emqx:subscribers(Topic))
|
||||
end,
|
||||
Topics
|
||||
).
|
||||
|
||||
t_un_subscribe(_) ->
|
||||
%% can unsubscribe to a normal topic
|
||||
Topics = [
|
||||
|
|
|
@ -57,14 +57,22 @@ all() ->
|
|||
|
||||
init_per_suite(Config) ->
|
||||
application:load(emqx_gateway_coap),
|
||||
ok = emqx_common_test_helpers:load_config(emqx_gateway_schema, ?CONF_DEFAULT),
|
||||
emqx_mgmt_api_test_util:init_suite([emqx_auth, emqx_gateway]),
|
||||
Config.
|
||||
Apps = emqx_cth_suite:start(
|
||||
[
|
||||
{emqx_conf, ?CONF_DEFAULT},
|
||||
emqx_gateway,
|
||||
emqx_auth,
|
||||
emqx_management,
|
||||
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
|
||||
],
|
||||
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||
),
|
||||
_ = emqx_common_test_http:create_default_app(),
|
||||
[{suite_apps, Apps} | Config].
|
||||
|
||||
end_per_suite(Config) ->
|
||||
{ok, _} = emqx:remove_config([<<"gateway">>, <<"coap">>]),
|
||||
emqx_mgmt_api_test_util:end_suite([emqx_gateway, emqx_auth]),
|
||||
Config.
|
||||
emqx_cth_suite:stop(?config(suite_apps, Config)),
|
||||
emqx_config:delete_override_conf_files().
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Cases
|
||||
|
|
|
@ -112,7 +112,7 @@ setup_test(TestCase, Config) when
|
|||
end}
|
||||
]
|
||||
),
|
||||
Nodes = [emqx_common_test_helpers:start_slave(Name, Opts) || {Name, Opts} <- Cluster],
|
||||
Nodes = [emqx_common_test_helpers:start_peer(Name, Opts) || {Name, Opts} <- Cluster],
|
||||
[{nodes, Nodes}, {cluster, Cluster}, {old_license, LicenseKey}];
|
||||
setup_test(_TestCase, _Config) ->
|
||||
[].
|
||||
|
|
|
@ -42,8 +42,8 @@ t_cluster_query(_Config) ->
|
|||
ct:timetrap({seconds, 120}),
|
||||
snabbkaffe:fix_ct_logging(),
|
||||
[{Name, Opts}, {Name1, Opts1}] = cluster_specs(),
|
||||
Node1 = emqx_common_test_helpers:start_slave(Name, Opts),
|
||||
Node2 = emqx_common_test_helpers:start_slave(Name1, Opts1),
|
||||
Node1 = emqx_common_test_helpers:start_peer(Name, Opts),
|
||||
Node2 = emqx_common_test_helpers:start_peer(Name1, Opts1),
|
||||
try
|
||||
process_flag(trap_exit, true),
|
||||
ClientLs1 = [start_emqtt_client(Node1, I, 2883) || I <- lists:seq(1, 10)],
|
||||
|
@ -168,8 +168,8 @@ t_cluster_query(_Config) ->
|
|||
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1),
|
||||
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs2)
|
||||
after
|
||||
emqx_common_test_helpers:stop_slave(Node1),
|
||||
emqx_common_test_helpers:stop_slave(Node2)
|
||||
emqx_common_test_helpers:stop_peer(Node1),
|
||||
emqx_common_test_helpers:stop_peer(Node2)
|
||||
end,
|
||||
ok.
|
||||
|
||||
|
|
|
@ -54,8 +54,6 @@ t_cluster_topology_api_empty_resp(_) ->
|
|||
).
|
||||
|
||||
t_cluster_topology_api_replicants(Config) ->
|
||||
%% some time to stabilize
|
||||
timer:sleep(3000),
|
||||
[Core1, Core2, Replicant] = _NodesList = ?config(cluster, Config),
|
||||
{200, Core1Resp} = rpc:call(Core1, emqx_mgmt_api_cluster, cluster_topology, [get, #{}]),
|
||||
{200, Core2Resp} = rpc:call(Core2, emqx_mgmt_api_cluster, cluster_topology, [get, #{}]),
|
||||
|
|
|
@ -194,8 +194,8 @@ t_api_listeners_list_not_ready(Config) when is_list(Config) ->
|
|||
snabbkaffe:fix_ct_logging(),
|
||||
Cluster = [{Name, Opts}, {Name1, Opts1}] = cluster([core, core]),
|
||||
ct:pal("Starting ~p", [Cluster]),
|
||||
Node1 = emqx_common_test_helpers:start_slave(Name, Opts),
|
||||
Node2 = emqx_common_test_helpers:start_slave(Name1, Opts1),
|
||||
Node1 = emqx_common_test_helpers:start_peer(Name, Opts),
|
||||
Node2 = emqx_common_test_helpers:start_peer(Name1, Opts1),
|
||||
try
|
||||
L1 = get_tcp_listeners(Node1),
|
||||
|
||||
|
@ -214,8 +214,8 @@ t_api_listeners_list_not_ready(Config) when is_list(Config) ->
|
|||
?assert(length(L1) > length(L2), Comment),
|
||||
?assertEqual(length(L2), length(L3), Comment)
|
||||
after
|
||||
emqx_common_test_helpers:stop_slave(Node1),
|
||||
emqx_common_test_helpers:stop_slave(Node2)
|
||||
emqx_common_test_helpers:stop_peer(Node1),
|
||||
emqx_common_test_helpers:stop_peer(Node2)
|
||||
end.
|
||||
|
||||
t_clear_certs(Config) when is_list(Config) ->
|
||||
|
|
|
@ -129,8 +129,8 @@ t_multiple_nodes_api(_) ->
|
|||
Seq2 = list_to_atom(atom_to_list(?MODULE) ++ "2"),
|
||||
Cluster = [{Name, Opts}, {Name1, Opts1}] = cluster([{core, Seq1}, {core, Seq2}]),
|
||||
ct:pal("Starting ~p", [Cluster]),
|
||||
Node1 = emqx_common_test_helpers:start_slave(Name, Opts),
|
||||
Node2 = emqx_common_test_helpers:start_slave(Name1, Opts1),
|
||||
Node1 = emqx_common_test_helpers:start_peer(Name, Opts),
|
||||
Node2 = emqx_common_test_helpers:start_peer(Name1, Opts1),
|
||||
try
|
||||
{200, NodesList} = rpc:call(Node1, emqx_mgmt_api_nodes, nodes, [get, #{}]),
|
||||
All = [Node1, Node2],
|
||||
|
@ -148,8 +148,8 @@ t_multiple_nodes_api(_) ->
|
|||
]),
|
||||
?assertMatch(#{node := Node1}, Node11)
|
||||
after
|
||||
emqx_common_test_helpers:stop_slave(Node1),
|
||||
emqx_common_test_helpers:stop_slave(Node2)
|
||||
emqx_common_test_helpers:stop_peer(Node1),
|
||||
emqx_common_test_helpers:stop_peer(Node2)
|
||||
end,
|
||||
ok.
|
||||
|
||||
|
|
|
@ -27,12 +27,12 @@ all() ->
|
|||
|
||||
init_per_suite(Config) ->
|
||||
emqx_mgmt_api_test_util:init_suite(),
|
||||
Slave = emqx_common_test_helpers:start_slave(some_node, []),
|
||||
[{slave, Slave} | Config].
|
||||
Peer = emqx_common_test_helpers:start_peer(node1, []),
|
||||
[{peer, Peer} | Config].
|
||||
|
||||
end_per_suite(Config) ->
|
||||
Slave = ?config(slave, Config),
|
||||
emqx_common_test_helpers:stop_slave(Slave),
|
||||
Peer = ?config(peer, Config),
|
||||
emqx_common_test_helpers:stop_peer(Peer),
|
||||
mria:clear_table(?ROUTE_TAB),
|
||||
emqx_mgmt_api_test_util:end_suite().
|
||||
|
||||
|
@ -80,18 +80,18 @@ t_nodes_api(Config) ->
|
|||
%% get topics/:topic
|
||||
%% We add another route here to ensure that the response handles
|
||||
%% multiple routes for a single topic
|
||||
Slave = ?config(slave, Config),
|
||||
ok = emqx_router:add_route(Topic, Slave),
|
||||
Peer = ?config(peer, Config),
|
||||
ok = emqx_router:add_route(Topic, Peer),
|
||||
RoutePath = emqx_mgmt_api_test_util:api_path(["topics", Topic]),
|
||||
{ok, RouteResponse} = emqx_mgmt_api_test_util:request_api(get, RoutePath),
|
||||
ok = emqx_router:delete_route(Topic, Slave),
|
||||
ok = emqx_router:delete_route(Topic, Peer),
|
||||
|
||||
[
|
||||
#{<<"topic">> := Topic, <<"node">> := Node1},
|
||||
#{<<"topic">> := Topic, <<"node">> := Node2}
|
||||
] = emqx_utils_json:decode(RouteResponse, [return_maps]),
|
||||
|
||||
?assertEqual(lists:usort([Node, atom_to_binary(Slave)]), lists:usort([Node1, Node2])),
|
||||
?assertEqual(lists:usort([Node, atom_to_binary(Peer)]), lists:usort([Node1, Node2])),
|
||||
|
||||
ok = emqtt:stop(Client).
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@ t_rebalance_node_crash(Config) ->
|
|||
?assertWaitEvent(
|
||||
begin
|
||||
ok = rpc:call(DonorNode, emqx_node_rebalance, start, [Opts]),
|
||||
emqx_common_test_helpers:stop_slave(RecipientNode)
|
||||
emqx_common_test_helpers:stop_peer(RecipientNode)
|
||||
end,
|
||||
#{?snk_kind := emqx_node_rebalance_started},
|
||||
1000
|
||||
|
|
|
@ -628,11 +628,11 @@ group_t_copy_plugin_to_a_new_node({init, Config}) ->
|
|||
load_schema => false
|
||||
}
|
||||
),
|
||||
CopyFromNode = emqx_common_test_helpers:start_slave(
|
||||
CopyFromNode = emqx_common_test_helpers:start_peer(
|
||||
CopyFrom, maps:remove(join_to, CopyFromOpts)
|
||||
),
|
||||
ok = rpc:call(CopyFromNode, emqx_plugins, put_config, [install_dir, FromInstallDir]),
|
||||
CopyToNode = emqx_common_test_helpers:start_slave(CopyTo, maps:remove(join_to, CopyToOpts)),
|
||||
CopyToNode = emqx_common_test_helpers:start_peer(CopyTo, maps:remove(join_to, CopyToOpts)),
|
||||
ok = rpc:call(CopyToNode, emqx_plugins, put_config, [install_dir, ToInstallDir]),
|
||||
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
|
||||
ok = rpc:call(CopyFromNode, emqx_plugins, ensure_installed, [NameVsn]),
|
||||
|
@ -662,8 +662,8 @@ group_t_copy_plugin_to_a_new_node({'end', Config}) ->
|
|||
ok = rpc:call(CopyToNode, emqx_config, delete_override_conf_files, []),
|
||||
rpc:call(CopyToNode, ekka, leave, []),
|
||||
rpc:call(CopyFromNode, ekka, leave, []),
|
||||
ok = emqx_common_test_helpers:stop_slave(CopyToNode),
|
||||
ok = emqx_common_test_helpers:stop_slave(CopyFromNode),
|
||||
ok = emqx_common_test_helpers:stop_peer(CopyToNode),
|
||||
ok = emqx_common_test_helpers:stop_peer(CopyFromNode),
|
||||
ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)),
|
||||
ok = file:del_dir_r(proplists:get_value(from_install_dir, Config));
|
||||
group_t_copy_plugin_to_a_new_node(Config) ->
|
||||
|
@ -737,7 +737,6 @@ group_t_copy_plugin_to_a_new_node_single_node({init, Config}) ->
|
|||
end,
|
||||
priv_data_dir => PrivDataDir,
|
||||
schema_mod => emqx_conf_schema,
|
||||
peer_mod => slave,
|
||||
load_schema => true
|
||||
}
|
||||
),
|
||||
|
@ -751,7 +750,7 @@ group_t_copy_plugin_to_a_new_node_single_node({init, Config}) ->
|
|||
];
|
||||
group_t_copy_plugin_to_a_new_node_single_node({'end', Config}) ->
|
||||
CopyToNode = proplists:get_value(copy_to_node_name, Config),
|
||||
ok = emqx_common_test_helpers:stop_slave(CopyToNode),
|
||||
ok = emqx_common_test_helpers:stop_peer(CopyToNode),
|
||||
ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)),
|
||||
ok;
|
||||
group_t_copy_plugin_to_a_new_node_single_node(Config) ->
|
||||
|
@ -762,7 +761,7 @@ group_t_copy_plugin_to_a_new_node_single_node(Config) ->
|
|||
%% Start the node for the first time. The plugin should start
|
||||
%% successfully even if it's not extracted yet. Simply starting
|
||||
%% the node would crash if not working properly.
|
||||
CopyToNode = emqx_common_test_helpers:start_slave(CopyTo, CopyToOpts),
|
||||
CopyToNode = emqx_common_test_helpers:start_peer(CopyTo, CopyToOpts),
|
||||
ct:pal("~p config:\n ~p", [
|
||||
CopyToNode, erpc:call(CopyToNode, emqx_plugins, get_config, [[], #{}])
|
||||
]),
|
||||
|
@ -805,11 +804,10 @@ group_t_cluster_leave({init, Config}) ->
|
|||
end,
|
||||
priv_data_dir => PrivDataDir,
|
||||
schema_mod => emqx_conf_schema,
|
||||
peer_mod => slave,
|
||||
load_schema => true
|
||||
}
|
||||
),
|
||||
Nodes = [emqx_common_test_helpers:start_slave(Name, Opts) || {Name, Opts} <- Cluster],
|
||||
Nodes = [emqx_common_test_helpers:start_peer(Name, Opts) || {Name, Opts} <- Cluster],
|
||||
[
|
||||
{to_install_dir, ToInstallDir},
|
||||
{cluster, Cluster},
|
||||
|
@ -820,7 +818,7 @@ group_t_cluster_leave({init, Config}) ->
|
|||
];
|
||||
group_t_cluster_leave({'end', Config}) ->
|
||||
Nodes = proplists:get_value(nodes, Config),
|
||||
[ok = emqx_common_test_helpers:stop_slave(N) || N <- Nodes],
|
||||
[ok = emqx_common_test_helpers:stop_peer(N) || N <- Nodes],
|
||||
ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)),
|
||||
ok;
|
||||
group_t_cluster_leave(Config) ->
|
||||
|
|
|
@ -49,7 +49,11 @@ fields("connection_fields") ->
|
|||
adjust_fields(emqx_connector_schema_lib:relational_db_fields()) ++
|
||||
emqx_connector_schema_lib:ssl_fields();
|
||||
fields("config_connector") ->
|
||||
fields("connection_fields") ++ emqx_connector_schema:common_fields();
|
||||
fields("connection_fields") ++
|
||||
emqx_connector_schema:common_fields() ++
|
||||
emqx_connector_schema:resource_opts_ref(?MODULE, resource_opts);
|
||||
fields(resource_opts) ->
|
||||
emqx_connector_schema:resource_opts_fields();
|
||||
fields(config) ->
|
||||
fields("config_connector") ++
|
||||
fields(action);
|
||||
|
@ -159,5 +163,7 @@ values(common) ->
|
|||
|
||||
desc("config_connector") ->
|
||||
?DESC("config_connector");
|
||||
desc(resource_opts) ->
|
||||
?DESC(emqx_resource_schema, "resource_opts");
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
|
|
@ -101,7 +101,10 @@
|
|||
max_buffer_bytes => pos_integer(),
|
||||
query_mode => query_mode(),
|
||||
resume_interval => pos_integer(),
|
||||
inflight_window => pos_integer()
|
||||
inflight_window => pos_integer(),
|
||||
%% Only for `emqx_resource_manager' usage. If false, prevents spawning buffer
|
||||
%% workers, regardless of resource query mode.
|
||||
spawn_buffer_workers => boolean()
|
||||
}.
|
||||
-type query_result() ::
|
||||
ok
|
||||
|
|
|
@ -201,9 +201,9 @@
|
|||
|
||||
%% when calling emqx_resource:health_check/2
|
||||
-callback on_get_status(resource_id(), resource_state()) ->
|
||||
resource_status()
|
||||
| {resource_status(), resource_state()}
|
||||
| {resource_status(), resource_state(), term()}.
|
||||
health_check_status()
|
||||
| {health_check_status(), resource_state()}
|
||||
| {health_check_status(), resource_state(), term()}.
|
||||
|
||||
-callback on_get_channel_status(resource_id(), channel_id(), resource_state()) ->
|
||||
channel_status()
|
||||
|
@ -248,7 +248,7 @@
|
|||
{error, Reason};
|
||||
C:E:S ->
|
||||
{error, #{
|
||||
execption => C,
|
||||
exception => C,
|
||||
reason => emqx_utils:redact(E),
|
||||
stacktrace => emqx_utils:redact(S)
|
||||
}}
|
||||
|
|
|
@ -1077,9 +1077,11 @@ handle_async_worker_down(Data0, Pid) ->
|
|||
call_query(QM, Id, Index, Ref, Query, QueryOpts) ->
|
||||
?tp(call_query_enter, #{id => Id, query => Query, query_mode => QM}),
|
||||
case emqx_resource_manager:lookup_cached(extract_connector_id(Id)) of
|
||||
{ok, _Group, #{status := stopped}} ->
|
||||
%% This seems to be the only place where the `rm_status_stopped' status matters,
|
||||
%% to distinguish from the `disconnected' status.
|
||||
{ok, _Group, #{status := ?rm_status_stopped}} ->
|
||||
?RESOURCE_ERROR(stopped, "resource stopped or disabled");
|
||||
{ok, _Group, #{status := connecting, error := unhealthy_target}} ->
|
||||
{ok, _Group, #{status := ?status_connecting, error := unhealthy_target}} ->
|
||||
{error, {unrecoverable_error, unhealthy_target}};
|
||||
{ok, _Group, Resource} ->
|
||||
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, Resource);
|
||||
|
|
|
@ -85,7 +85,19 @@
|
|||
-define(T_OPERATION, 5000).
|
||||
-define(T_LOOKUP, 1000).
|
||||
|
||||
-define(IS_STATUS(ST), ST =:= connecting; ST =:= connected; ST =:= disconnected).
|
||||
%% `gen_statem' states
|
||||
%% Note: most of them coincide with resource _status_. We use a different set of macros
|
||||
%% to avoid mixing those concepts up.
|
||||
%% Also note: the `stopped' _status_ can only be emitted by `emqx_resource_manager'...
|
||||
%% Modules implementing `emqx_resource' behavior should not return it.
|
||||
-define(state_connected, connected).
|
||||
-define(state_connecting, connecting).
|
||||
-define(state_disconnected, disconnected).
|
||||
-define(state_stopped, stopped).
|
||||
|
||||
-define(IS_STATUS(ST),
|
||||
ST =:= ?status_connecting; ST =:= ?status_connected; ST =:= ?status_disconnected
|
||||
).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% API
|
||||
|
@ -136,16 +148,9 @@ create(ResId, Group, ResourceType, Config, Opts) ->
|
|||
% Create metrics for the resource
|
||||
ok = emqx_resource:create_metrics(ResId),
|
||||
QueryMode = emqx_resource:query_mode(ResourceType, Config, Opts),
|
||||
case QueryMode of
|
||||
%% the resource has built-in buffer, so there is no need for resource workers
|
||||
simple_sync_internal_buffer ->
|
||||
ok;
|
||||
simple_async_internal_buffer ->
|
||||
ok;
|
||||
%% The resource is a consumer resource, so there is no need for resource workers
|
||||
no_queries ->
|
||||
ok;
|
||||
_ ->
|
||||
SpawnBufferWorkers = maps:get(spawn_buffer_workers, Opts, true),
|
||||
case SpawnBufferWorkers andalso lists:member(QueryMode, [sync, async]) of
|
||||
true ->
|
||||
%% start resource workers as the query type requires them
|
||||
ok = emqx_resource_buffer_worker_sup:start_workers(ResId, Opts),
|
||||
case maps:get(start_after_created, Opts, ?START_AFTER_CREATED) of
|
||||
|
@ -153,7 +158,9 @@ create(ResId, Group, ResourceType, Config, Opts) ->
|
|||
wait_for_ready(ResId, maps:get(start_timeout, Opts, ?START_TIMEOUT));
|
||||
false ->
|
||||
ok
|
||||
end
|
||||
end;
|
||||
false ->
|
||||
ok
|
||||
end.
|
||||
|
||||
%% @doc Called from `emqx_resource` when doing a dry run for creating a resource instance.
|
||||
|
@ -397,12 +404,12 @@ init({DataIn, Opts}) ->
|
|||
case maps:get(start_after_created, Opts, ?START_AFTER_CREATED) of
|
||||
true ->
|
||||
%% init the cache so that lookup/1 will always return something
|
||||
UpdatedData = update_state(Data#data{status = connecting}),
|
||||
{ok, connecting, UpdatedData, {next_event, internal, start_resource}};
|
||||
UpdatedData = update_state(Data#data{status = ?status_connecting}),
|
||||
{ok, ?state_connecting, UpdatedData, {next_event, internal, start_resource}};
|
||||
false ->
|
||||
%% init the cache so that lookup/1 will always return something
|
||||
UpdatedData = update_state(Data#data{status = stopped}),
|
||||
{ok, stopped, UpdatedData}
|
||||
UpdatedData = update_state(Data#data{status = ?rm_status_stopped}),
|
||||
{ok, ?state_stopped, UpdatedData}
|
||||
end.
|
||||
|
||||
terminate({shutdown, removed}, _State, _Data) ->
|
||||
|
@ -420,26 +427,26 @@ callback_mode() -> [handle_event_function, state_enter].
|
|||
|
||||
% Called during testing to force a specific state
|
||||
handle_event({call, From}, set_resource_status_connecting, _State, Data) ->
|
||||
UpdatedData = update_state(Data#data{status = connecting}, Data),
|
||||
{next_state, connecting, UpdatedData, [{reply, From, ok}]};
|
||||
UpdatedData = update_state(Data#data{status = ?status_connecting}, Data),
|
||||
{next_state, ?state_connecting, UpdatedData, [{reply, From, ok}]};
|
||||
% Called when the resource is to be restarted
|
||||
handle_event({call, From}, restart, _State, Data) ->
|
||||
DataNext = stop_resource(Data),
|
||||
start_resource(DataNext, From);
|
||||
% Called when the resource is to be started (also used for manual reconnect)
|
||||
handle_event({call, From}, start, State, Data) when
|
||||
State =:= stopped orelse
|
||||
State =:= disconnected
|
||||
State =:= ?state_stopped orelse
|
||||
State =:= ?state_disconnected
|
||||
->
|
||||
start_resource(Data, From);
|
||||
handle_event({call, From}, start, _State, _Data) ->
|
||||
{keep_state_and_data, [{reply, From, ok}]};
|
||||
% Called when the resource is to be stopped
|
||||
handle_event({call, From}, stop, stopped, _Data) ->
|
||||
handle_event({call, From}, stop, ?state_stopped, _Data) ->
|
||||
{keep_state_and_data, [{reply, From, ok}]};
|
||||
handle_event({call, From}, stop, _State, Data) ->
|
||||
UpdatedData = stop_resource(Data),
|
||||
{next_state, stopped, update_state(UpdatedData, Data), [{reply, From, ok}]};
|
||||
{next_state, ?state_stopped, update_state(UpdatedData, Data), [{reply, From, ok}]};
|
||||
% Called when a resource is to be stopped and removed.
|
||||
handle_event({call, From}, {remove, ClearMetrics}, _State, Data) ->
|
||||
handle_remove_event(From, ClearMetrics, Data);
|
||||
|
@ -448,10 +455,10 @@ handle_event({call, From}, lookup, _State, #data{group = Group} = Data) ->
|
|||
Reply = {ok, Group, data_record_to_external_map(Data)},
|
||||
{keep_state_and_data, [{reply, From, Reply}]};
|
||||
% Called when doing a manually health check.
|
||||
handle_event({call, From}, health_check, stopped, _Data) ->
|
||||
handle_event({call, From}, health_check, ?state_stopped, _Data) ->
|
||||
Actions = [{reply, From, {error, resource_is_stopped}}],
|
||||
{keep_state_and_data, Actions};
|
||||
handle_event({call, From}, {channel_health_check, _}, stopped, _Data) ->
|
||||
handle_event({call, From}, {channel_health_check, _}, ?state_stopped, _Data) ->
|
||||
Actions = [{reply, From, {error, resource_is_stopped}}],
|
||||
{keep_state_and_data, Actions};
|
||||
handle_event({call, From}, health_check, _State, Data) ->
|
||||
|
@ -459,47 +466,47 @@ handle_event({call, From}, health_check, _State, Data) ->
|
|||
handle_event({call, From}, {channel_health_check, ChannelId}, _State, Data) ->
|
||||
handle_manually_channel_health_check(From, Data, ChannelId);
|
||||
% State: CONNECTING
|
||||
handle_event(enter, _OldState, connecting = State, Data) ->
|
||||
ok = log_state_consistency(State, Data),
|
||||
handle_event(enter, _OldState, ?state_connecting = State, Data) ->
|
||||
ok = log_status_consistency(State, Data),
|
||||
{keep_state_and_data, [{state_timeout, 0, health_check}]};
|
||||
handle_event(internal, start_resource, connecting, Data) ->
|
||||
handle_event(internal, start_resource, ?state_connecting, Data) ->
|
||||
start_resource(Data, undefined);
|
||||
handle_event(state_timeout, health_check, connecting, Data) ->
|
||||
handle_event(state_timeout, health_check, ?state_connecting, Data) ->
|
||||
handle_connecting_health_check(Data);
|
||||
handle_event(
|
||||
{call, From}, {remove_channel, ChannelId}, connecting = _State, Data
|
||||
{call, From}, {remove_channel, ChannelId}, ?state_connecting = _State, Data
|
||||
) ->
|
||||
handle_remove_channel(From, ChannelId, Data);
|
||||
%% State: CONNECTED
|
||||
%% The connected state is entered after a successful on_start/2 of the callback mod
|
||||
%% and successful health_checks
|
||||
handle_event(enter, _OldState, connected = State, Data) ->
|
||||
ok = log_state_consistency(State, Data),
|
||||
handle_event(enter, _OldState, ?state_connected = State, Data) ->
|
||||
ok = log_status_consistency(State, Data),
|
||||
_ = emqx_alarm:safe_deactivate(Data#data.id),
|
||||
?tp(resource_connected_enter, #{}),
|
||||
{keep_state_and_data, health_check_actions(Data)};
|
||||
handle_event(state_timeout, health_check, connected, Data) ->
|
||||
handle_event(state_timeout, health_check, ?state_connected, Data) ->
|
||||
handle_connected_health_check(Data);
|
||||
handle_event(
|
||||
{call, From}, {add_channel, ChannelId, Config}, connected = _State, Data
|
||||
{call, From}, {add_channel, ChannelId, Config}, ?state_connected = _State, Data
|
||||
) ->
|
||||
handle_add_channel(From, Data, ChannelId, Config);
|
||||
handle_event(
|
||||
{call, From}, {remove_channel, ChannelId}, connected = _State, Data
|
||||
{call, From}, {remove_channel, ChannelId}, ?state_connected = _State, Data
|
||||
) ->
|
||||
handle_remove_channel(From, ChannelId, Data);
|
||||
%% State: DISCONNECTED
|
||||
handle_event(enter, _OldState, disconnected = State, Data) ->
|
||||
ok = log_state_consistency(State, Data),
|
||||
handle_event(enter, _OldState, ?state_disconnected = State, Data) ->
|
||||
ok = log_status_consistency(State, Data),
|
||||
?tp(resource_disconnected_enter, #{}),
|
||||
{keep_state_and_data, retry_actions(Data)};
|
||||
handle_event(state_timeout, auto_retry, disconnected, Data) ->
|
||||
handle_event(state_timeout, auto_retry, ?state_disconnected, Data) ->
|
||||
?tp(resource_auto_reconnect, #{}),
|
||||
start_resource(Data, undefined);
|
||||
%% State: STOPPED
|
||||
%% The stopped state is entered after the resource has been explicitly stopped
|
||||
handle_event(enter, _OldState, stopped = State, Data) ->
|
||||
ok = log_state_consistency(State, Data),
|
||||
handle_event(enter, _OldState, ?state_stopped = State, Data) ->
|
||||
ok = log_status_consistency(State, Data),
|
||||
{keep_state_and_data, []};
|
||||
%% The following events can be handled in any other state
|
||||
handle_event(
|
||||
|
@ -529,11 +536,11 @@ handle_event(EventType, EventData, State, Data) ->
|
|||
),
|
||||
keep_state_and_data.
|
||||
|
||||
log_state_consistency(State, #data{status = State} = Data) ->
|
||||
log_status_consistency(Status, #data{status = Status} = Data) ->
|
||||
log_cache_consistency(read_cache(Data#data.id), Data);
|
||||
log_state_consistency(State, Data) ->
|
||||
?tp(warning, "inconsistent_state", #{
|
||||
state => State,
|
||||
log_status_consistency(Status, Data) ->
|
||||
?tp(warning, "inconsistent_status", #{
|
||||
status => Status,
|
||||
data => emqx_utils:redact(Data)
|
||||
}).
|
||||
|
||||
|
@ -591,25 +598,25 @@ start_resource(Data, From) ->
|
|||
%% in case the emqx_resource:call_start/2 hangs, the lookup/1 can read status from the cache
|
||||
case emqx_resource:call_start(Data#data.id, Data#data.mod, Data#data.config) of
|
||||
{ok, ResourceState} ->
|
||||
UpdatedData1 = Data#data{status = connecting, state = ResourceState},
|
||||
UpdatedData1 = Data#data{status = ?status_connecting, state = ResourceState},
|
||||
%% Perform an initial health_check immediately before transitioning into a connected state
|
||||
UpdatedData2 = add_channels(UpdatedData1),
|
||||
Actions = maybe_reply([{state_timeout, 0, health_check}], From, ok),
|
||||
{next_state, connecting, update_state(UpdatedData2, Data), Actions};
|
||||
{next_state, ?state_connecting, update_state(UpdatedData2, Data), Actions};
|
||||
{error, Reason} = Err ->
|
||||
?SLOG(warning, #{
|
||||
msg => "start_resource_failed",
|
||||
id => Data#data.id,
|
||||
reason => Reason
|
||||
}),
|
||||
_ = maybe_alarm(disconnected, Data#data.id, Err, Data#data.error),
|
||||
_ = maybe_alarm(?status_disconnected, Data#data.id, Err, Data#data.error),
|
||||
%% Add channels and raise alarms
|
||||
NewData1 = channels_health_check(disconnected, add_channels(Data)),
|
||||
NewData1 = channels_health_check(?status_disconnected, add_channels(Data)),
|
||||
%% Keep track of the error reason why the connection did not work
|
||||
%% so that the Reason can be returned when the verification call is made.
|
||||
NewData2 = NewData1#data{status = disconnected, error = Err},
|
||||
NewData2 = NewData1#data{status = ?status_disconnected, error = Err},
|
||||
Actions = maybe_reply(retry_actions(NewData2), From, Err),
|
||||
{next_state, disconnected, update_state(NewData2, Data), Actions}
|
||||
{next_state, ?state_disconnected, update_state(NewData2, Data), Actions}
|
||||
end.
|
||||
|
||||
add_channels(Data) ->
|
||||
|
@ -666,13 +673,13 @@ add_channels_in_list([{ChannelID, ChannelConfig} | Rest], Data) ->
|
|||
added_channels = NewAddedChannelsMap
|
||||
},
|
||||
%% Raise an alarm since the channel could not be added
|
||||
_ = maybe_alarm(disconnected, ChannelID, Error, no_prev_error),
|
||||
_ = maybe_alarm(?status_disconnected, ChannelID, Error, no_prev_error),
|
||||
add_channels_in_list(Rest, NewData)
|
||||
end.
|
||||
|
||||
maybe_stop_resource(#data{status = Status} = Data) when Status /= stopped ->
|
||||
maybe_stop_resource(#data{status = Status} = Data) when Status =/= ?rm_status_stopped ->
|
||||
stop_resource(Data);
|
||||
maybe_stop_resource(#data{status = stopped} = Data) ->
|
||||
maybe_stop_resource(#data{status = ?rm_status_stopped} = Data) ->
|
||||
Data.
|
||||
|
||||
stop_resource(#data{state = ResState, id = ResId} = Data) ->
|
||||
|
@ -691,7 +698,7 @@ stop_resource(#data{state = ResState, id = ResId} = Data) ->
|
|||
end,
|
||||
_ = maybe_clear_alarm(ResId),
|
||||
ok = emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId),
|
||||
NewData#data{status = stopped}.
|
||||
NewData#data{status = ?rm_status_stopped}.
|
||||
|
||||
remove_channels(Data) ->
|
||||
Channels = maps:keys(Data#data.added_channels),
|
||||
|
@ -706,7 +713,7 @@ remove_channels_in_list([ChannelID | Rest], Data, KeepInChannelMap) ->
|
|||
true ->
|
||||
AddedChannelsMap;
|
||||
false ->
|
||||
maybe_clear_alarm(ChannelID),
|
||||
_ = maybe_clear_alarm(ChannelID),
|
||||
maps:remove(ChannelID, AddedChannelsMap)
|
||||
end,
|
||||
case safe_call_remove_channel(Data#data.id, Data#data.mod, Data#data.state, ChannelID) of
|
||||
|
@ -858,13 +865,15 @@ handle_connecting_health_check(Data) ->
|
|||
with_health_check(
|
||||
Data,
|
||||
fun
|
||||
(connected, UpdatedData) ->
|
||||
{next_state, connected, channels_health_check(connected, UpdatedData)};
|
||||
(connecting, UpdatedData) ->
|
||||
{keep_state, channels_health_check(connecting, UpdatedData),
|
||||
(?status_connected, UpdatedData) ->
|
||||
{next_state, ?state_connected,
|
||||
channels_health_check(?status_connected, UpdatedData)};
|
||||
(?status_connecting, UpdatedData) ->
|
||||
{keep_state, channels_health_check(?status_connecting, UpdatedData),
|
||||
health_check_actions(UpdatedData)};
|
||||
(disconnected, UpdatedData) ->
|
||||
{next_state, disconnected, channels_health_check(disconnected, UpdatedData)}
|
||||
(?status_disconnected, UpdatedData) ->
|
||||
{next_state, ?state_disconnected,
|
||||
channels_health_check(?status_disconnected, UpdatedData)}
|
||||
end
|
||||
).
|
||||
|
||||
|
@ -872,8 +881,8 @@ handle_connected_health_check(Data) ->
|
|||
with_health_check(
|
||||
Data,
|
||||
fun
|
||||
(connected, UpdatedData0) ->
|
||||
UpdatedData1 = channels_health_check(connected, UpdatedData0),
|
||||
(?status_connected, UpdatedData0) ->
|
||||
UpdatedData1 = channels_health_check(?status_connected, UpdatedData0),
|
||||
{keep_state, UpdatedData1, health_check_actions(UpdatedData1)};
|
||||
(Status, UpdatedData) ->
|
||||
?SLOG(warning, #{
|
||||
|
@ -881,6 +890,10 @@ handle_connected_health_check(Data) ->
|
|||
id => Data#data.id,
|
||||
status => Status
|
||||
}),
|
||||
%% Note: works because, coincidentally, channel/resource status is a
|
||||
%% subset of resource manager state... But there should be a conversion
|
||||
%% between the two here, as resource manager also has `stopped', which is
|
||||
%% not a valid status at the time of writing.
|
||||
{next_state, Status, channels_health_check(Status, UpdatedData)}
|
||||
end
|
||||
).
|
||||
|
@ -898,7 +911,8 @@ with_health_check(#data{error = PrevError} = Data, Func) ->
|
|||
},
|
||||
Func(Status, update_state(UpdatedData, Data)).
|
||||
|
||||
channels_health_check(connected = _ResourceStatus, Data0) ->
|
||||
-spec channels_health_check(resource_status(), data()) -> data().
|
||||
channels_health_check(?status_connected = _ConnectorStatus, Data0) ->
|
||||
Channels = maps:to_list(Data0#data.added_channels),
|
||||
%% All channels with a stutus different from connected or connecting are
|
||||
%% not added
|
||||
|
@ -914,7 +928,7 @@ channels_health_check(connected = _ResourceStatus, Data0) ->
|
|||
%% Now that we have done the adding, we can get the status of all channels
|
||||
Data2 = channel_status_for_all_channels(Data1),
|
||||
update_state(Data2, Data0);
|
||||
channels_health_check(connecting, Data0) ->
|
||||
channels_health_check(?status_connecting = _ConnectorStatus, Data0) ->
|
||||
%% Whenever the resource is connecting:
|
||||
%% 1. Change the status of all added channels to connecting
|
||||
%% 2. Raise alarms (TODO: if it is a probe we should not raise alarms)
|
||||
|
@ -926,7 +940,7 @@ channels_health_check(connecting, Data0) ->
|
|||
],
|
||||
ChannelsWithNewStatuses =
|
||||
[
|
||||
{ChannelId, channel_status({connecting, resource_is_connecting})}
|
||||
{ChannelId, channel_status({?status_connecting, resource_is_connecting})}
|
||||
|| ChannelId <- ChannelsToChangeStatusFor
|
||||
],
|
||||
%% Update the channels map
|
||||
|
@ -945,13 +959,13 @@ channels_health_check(connecting, Data0) ->
|
|||
%% Raise alarms for all channels
|
||||
lists:foreach(
|
||||
fun({ChannelId, Status, PrevStatus}) ->
|
||||
maybe_alarm(connecting, ChannelId, Status, PrevStatus)
|
||||
maybe_alarm(?status_connecting, ChannelId, Status, PrevStatus)
|
||||
end,
|
||||
ChannelsWithNewAndPrevErrorStatuses
|
||||
),
|
||||
Data1 = Data0#data{added_channels = NewChannels},
|
||||
update_state(Data1, Data0);
|
||||
channels_health_check(ResourceStatus, Data0) ->
|
||||
channels_health_check(ConnectorStatus, Data0) ->
|
||||
%% Whenever the resource is not connected and not connecting:
|
||||
%% 1. Remove all added channels
|
||||
%% 2. Change the status to an error status
|
||||
|
@ -969,7 +983,7 @@ channels_health_check(ResourceStatus, Data0) ->
|
|||
channel_status(
|
||||
{error,
|
||||
resource_not_connected_channel_error_msg(
|
||||
ResourceStatus,
|
||||
ConnectorStatus,
|
||||
ChannelId,
|
||||
Data1
|
||||
)}
|
||||
|
@ -1025,7 +1039,7 @@ channel_status_for_all_channels(Data) ->
|
|||
%% Raise/clear alarms
|
||||
lists:foreach(
|
||||
fun
|
||||
({ID, _OldStatus, #{status := connected}}) ->
|
||||
({ID, _OldStatus, #{status := ?status_connected}}) ->
|
||||
_ = maybe_clear_alarm(ID);
|
||||
({ID, OldStatus, NewStatus}) ->
|
||||
_ = maybe_alarm(NewStatus, ID, NewStatus, OldStatus)
|
||||
|
@ -1071,9 +1085,11 @@ get_config_from_map_or_channel_status(ChannelId, ChannelIdToConfig, ChannelStatu
|
|||
Config
|
||||
end.
|
||||
|
||||
-spec update_state(data()) -> data().
|
||||
update_state(Data) ->
|
||||
update_state(Data, undefined).
|
||||
|
||||
-spec update_state(data(), data() | undefined) -> data().
|
||||
update_state(DataWas, DataWas) ->
|
||||
DataWas;
|
||||
update_state(Data, _DataWas) ->
|
||||
|
@ -1083,7 +1099,8 @@ update_state(Data, _DataWas) ->
|
|||
health_check_interval(Opts) ->
|
||||
maps:get(health_check_interval, Opts, ?HEALTHCHECK_INTERVAL).
|
||||
|
||||
maybe_alarm(connected, _ResId, _Error, _PrevError) ->
|
||||
-spec maybe_alarm(resource_status(), resource_id(), _Error :: term(), _PrevError :: term()) -> ok.
|
||||
maybe_alarm(?status_connected, _ResId, _Error, _PrevError) ->
|
||||
ok;
|
||||
maybe_alarm(_Status, <<?TEST_ID_PREFIX, _/binary>>, _Error, _PrevError) ->
|
||||
ok;
|
||||
|
@ -1095,7 +1112,7 @@ maybe_alarm(_Status, ResId, Error, _PrevError) ->
|
|||
case Error of
|
||||
{error, undefined} -> <<"Unknown reason">>;
|
||||
{error, Reason} -> emqx_utils:readable_error_msg(Reason);
|
||||
Error -> emqx_utils:readable_error_msg(Error)
|
||||
_ -> emqx_utils:readable_error_msg(Error)
|
||||
end,
|
||||
emqx_alarm:safe_activate(
|
||||
ResId,
|
||||
|
@ -1104,7 +1121,8 @@ maybe_alarm(_Status, ResId, Error, _PrevError) ->
|
|||
),
|
||||
?tp(resource_activate_alarm, #{resource_id => ResId}).
|
||||
|
||||
maybe_resume_resource_workers(ResId, connected) ->
|
||||
-spec maybe_resume_resource_workers(resource_id(), resource_status()) -> ok.
|
||||
maybe_resume_resource_workers(ResId, ?status_connected) ->
|
||||
lists:foreach(
|
||||
fun emqx_resource_buffer_worker:resume/1,
|
||||
emqx_resource_buffer_worker_sup:worker_pids(ResId)
|
||||
|
@ -1112,6 +1130,7 @@ maybe_resume_resource_workers(ResId, connected) ->
|
|||
maybe_resume_resource_workers(_, _) ->
|
||||
ok.
|
||||
|
||||
-spec maybe_clear_alarm(resource_id()) -> ok | {error, not_found}.
|
||||
maybe_clear_alarm(<<?TEST_ID_PREFIX, _/binary>>) ->
|
||||
ok;
|
||||
maybe_clear_alarm(ResId) ->
|
||||
|
@ -1132,9 +1151,9 @@ parse_health_check_result({error, Error}, Data) ->
|
|||
reason => Error
|
||||
}
|
||||
),
|
||||
{disconnected, Data#data.state, {error, Error}}.
|
||||
{?status_disconnected, Data#data.state, {error, Error}}.
|
||||
|
||||
status_to_error(connected) ->
|
||||
status_to_error(?status_connected) ->
|
||||
undefined;
|
||||
status_to_error(_) ->
|
||||
{error, undefined}.
|
||||
|
@ -1170,9 +1189,9 @@ do_wait_for_ready(_ResId, 0) ->
|
|||
timeout;
|
||||
do_wait_for_ready(ResId, Retry) ->
|
||||
case try_read_cache(ResId) of
|
||||
#data{status = connected} ->
|
||||
#data{status = ?status_connected} ->
|
||||
ok;
|
||||
#data{status = disconnected, error = Err} ->
|
||||
#data{status = ?status_disconnected, error = Err} ->
|
||||
{error, external_error(Err)};
|
||||
_ ->
|
||||
timer:sleep(?WAIT_FOR_RESOURCE_DELAY),
|
||||
|
@ -1203,7 +1222,7 @@ channel_status() ->
|
|||
%% - connected: the channel is added to the resource, the resource is
|
||||
%% connected and the on_channel_get_status callback has returned
|
||||
%% connected. The error field should be undefined.
|
||||
status => disconnected,
|
||||
status => ?status_disconnected,
|
||||
error => not_added_yet
|
||||
}.
|
||||
|
||||
|
@ -1212,20 +1231,20 @@ channel_status() ->
|
|||
%% anywhere else in that case.
|
||||
channel_status_new_with_config(Config) ->
|
||||
#{
|
||||
status => disconnected,
|
||||
status => ?status_disconnected,
|
||||
error => not_added_yet,
|
||||
config => Config
|
||||
}.
|
||||
|
||||
channel_status_new_waiting_for_health_check() ->
|
||||
#{
|
||||
status => connecting,
|
||||
status => ?status_connecting,
|
||||
error => no_health_check_yet
|
||||
}.
|
||||
|
||||
channel_status({connecting, Error}) ->
|
||||
channel_status({?status_connecting, Error}) ->
|
||||
#{
|
||||
status => connecting,
|
||||
status => ?status_connecting,
|
||||
error => Error
|
||||
};
|
||||
channel_status(?status_disconnected) ->
|
||||
|
@ -1233,40 +1252,41 @@ channel_status(?status_disconnected) ->
|
|||
status => ?status_disconnected,
|
||||
error => <<"Disconnected for unknown reason">>
|
||||
};
|
||||
channel_status(connecting) ->
|
||||
channel_status(?status_connecting) ->
|
||||
#{
|
||||
status => connecting,
|
||||
status => ?status_connecting,
|
||||
error => <<"Not connected for unknown reason">>
|
||||
};
|
||||
channel_status(connected) ->
|
||||
channel_status(?status_connected) ->
|
||||
#{
|
||||
status => connected,
|
||||
status => ?status_connected,
|
||||
error => undefined
|
||||
};
|
||||
%% Probably not so useful but it is permitted to set an error even when the
|
||||
%% status is connected
|
||||
channel_status({connected, Error}) ->
|
||||
channel_status({?status_connected, Error}) ->
|
||||
#{
|
||||
status => connected,
|
||||
status => ?status_connected,
|
||||
error => Error
|
||||
};
|
||||
channel_status({error, Reason}) ->
|
||||
#{
|
||||
status => disconnected,
|
||||
status => ?status_disconnected,
|
||||
error => Reason
|
||||
}.
|
||||
|
||||
channel_status_is_channel_added(#{
|
||||
status := connected
|
||||
status := ?status_connected
|
||||
}) ->
|
||||
true;
|
||||
channel_status_is_channel_added(#{
|
||||
status := connecting
|
||||
status := ?status_connecting
|
||||
}) ->
|
||||
true;
|
||||
channel_status_is_channel_added(_Status) ->
|
||||
false.
|
||||
|
||||
-spec add_channel_status_if_not_exists(data(), channel_id(), resource_state()) -> data().
|
||||
add_channel_status_if_not_exists(Data, ChannelId, State) ->
|
||||
Channels = Data#data.added_channels,
|
||||
case maps:is_key(ChannelId, Channels) of
|
||||
|
@ -1275,6 +1295,12 @@ add_channel_status_if_not_exists(Data, ChannelId, State) ->
|
|||
false ->
|
||||
ChannelStatus = channel_status({error, resource_not_operational}),
|
||||
NewChannels = maps:put(ChannelId, ChannelStatus, Channels),
|
||||
maybe_alarm(State, ChannelId, ChannelStatus, no_prev),
|
||||
ResStatus = state_to_status(State),
|
||||
maybe_alarm(ResStatus, ChannelId, ChannelStatus, no_prev),
|
||||
Data#data{added_channels = NewChannels}
|
||||
end.
|
||||
|
||||
state_to_status(?state_stopped) -> ?rm_status_stopped;
|
||||
state_to_status(?state_connected) -> ?status_connected;
|
||||
state_to_status(?state_connecting) -> ?status_connecting;
|
||||
state_to_status(?state_disconnected) -> ?status_disconnected.
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
-export([namespace/0, roots/0, fields/1, desc/1]).
|
||||
|
||||
-export([create_opts/1]).
|
||||
-export([create_opts/1, resource_opts_meta/0]).
|
||||
|
||||
%% range interval in ms
|
||||
-define(HEALTH_CHECK_INTERVAL_RANGE_MIN, 1).
|
||||
|
|
|
@ -115,7 +115,7 @@ t_create_remove(_) ->
|
|||
?assertNot(is_process_alive(Pid))
|
||||
end,
|
||||
fun(Trace) ->
|
||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||
end
|
||||
).
|
||||
|
@ -174,7 +174,7 @@ t_create_remove_local(_) ->
|
|||
?assertNot(is_process_alive(Pid))
|
||||
end,
|
||||
fun(Trace) ->
|
||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||
end
|
||||
).
|
||||
|
@ -219,7 +219,7 @@ t_do_not_start_after_created(_) ->
|
|||
?assertNot(is_process_alive(Pid2))
|
||||
end,
|
||||
fun(Trace) ->
|
||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||
end
|
||||
).
|
||||
|
@ -855,7 +855,7 @@ t_healthy_timeout(_) ->
|
|||
?assertEqual(ok, emqx_resource:remove_local(?ID))
|
||||
end,
|
||||
fun(Trace) ->
|
||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||
end
|
||||
).
|
||||
|
@ -894,7 +894,7 @@ t_healthy(_) ->
|
|||
?assertEqual(ok, emqx_resource:remove_local(?ID))
|
||||
end,
|
||||
fun(Trace) ->
|
||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||
end
|
||||
).
|
||||
|
@ -1006,7 +1006,7 @@ t_stop_start(_) ->
|
|||
end,
|
||||
|
||||
fun(Trace) ->
|
||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||
end
|
||||
).
|
||||
|
@ -1064,7 +1064,7 @@ t_stop_start_local(_) ->
|
|||
?assert(is_process_alive(Pid1))
|
||||
end,
|
||||
fun(Trace) ->
|
||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||
end
|
||||
).
|
||||
|
@ -1269,7 +1269,7 @@ t_health_check_disconnected(_) ->
|
|||
)
|
||||
end,
|
||||
fun(Trace) ->
|
||||
?assertEqual([], ?of_kind("inconsistent_state", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_status", Trace)),
|
||||
?assertEqual([], ?of_kind("inconsistent_cache", Trace))
|
||||
end
|
||||
).
|
||||
|
|
|
@ -172,44 +172,58 @@ fields("node_metrics") ->
|
|||
[{"node", sc(binary(), #{desc => ?DESC("node_node"), example => "emqx@127.0.0.1"})}] ++
|
||||
fields("metrics");
|
||||
fields("ctx_pub") ->
|
||||
Event = 'message.publish',
|
||||
[
|
||||
{"event_type", event_type_sc(message_publish)},
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(Event)},
|
||||
{"id", sc(binary(), #{desc => ?DESC("event_id")})}
|
||||
| msg_event_common_fields()
|
||||
];
|
||||
fields("ctx_sub") ->
|
||||
Event = 'session.subscribed',
|
||||
[
|
||||
{"event_type", event_type_sc(session_subscribed)}
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(Event)}
|
||||
| msg_event_common_fields()
|
||||
];
|
||||
fields("ctx_unsub") ->
|
||||
Event = 'session.unsubscribed',
|
||||
[
|
||||
{"event_type", event_type_sc(session_unsubscribed)}
|
||||
| proplists:delete("event_type", fields("ctx_sub"))
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(Event)}
|
||||
| without(["event_type", "event_topic", "event"], fields("ctx_sub"))
|
||||
];
|
||||
fields("ctx_delivered") ->
|
||||
Event = 'message.delivered',
|
||||
[
|
||||
{"event_type", event_type_sc(message_delivered)},
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(Event)},
|
||||
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
||||
{"from_clientid", sc(binary(), #{desc => ?DESC("event_from_clientid")})},
|
||||
{"from_username", sc(binary(), #{desc => ?DESC("event_from_username")})}
|
||||
| msg_event_common_fields()
|
||||
];
|
||||
fields("ctx_acked") ->
|
||||
Event = 'message.acked',
|
||||
[
|
||||
{"event_type", event_type_sc(message_acked)}
|
||||
| proplists:delete("event_type", fields("ctx_delivered"))
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(Event)}
|
||||
| without(["event_type", "event_topic", "event"], fields("ctx_delivered"))
|
||||
];
|
||||
fields("ctx_dropped") ->
|
||||
Event = 'message.dropped',
|
||||
[
|
||||
{"event_type", event_type_sc(message_dropped)},
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(Event)},
|
||||
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
||||
{"reason", sc(binary(), #{desc => ?DESC("event_ctx_dropped")})}
|
||||
| msg_event_common_fields()
|
||||
];
|
||||
fields("ctx_connected") ->
|
||||
Event = 'client.connected',
|
||||
[
|
||||
{"event_type", event_type_sc(client_connected)},
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(Event)},
|
||||
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
||||
{"username", sc(binary(), #{desc => ?DESC("event_username")})},
|
||||
{"mountpoint", sc(binary(), #{desc => ?DESC("event_mountpoint")})},
|
||||
|
@ -227,8 +241,10 @@ fields("ctx_connected") ->
|
|||
})}
|
||||
];
|
||||
fields("ctx_disconnected") ->
|
||||
Event = 'client.disconnected',
|
||||
[
|
||||
{"event_type", event_type_sc(client_disconnected)},
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(Event)},
|
||||
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
||||
{"username", sc(binary(), #{desc => ?DESC("event_username")})},
|
||||
{"reason", sc(binary(), #{desc => ?DESC("event_ctx_disconnected_reason")})},
|
||||
|
@ -240,8 +256,10 @@ fields("ctx_disconnected") ->
|
|||
})}
|
||||
];
|
||||
fields("ctx_connack") ->
|
||||
Event = 'client.connack',
|
||||
[
|
||||
{"event_type", event_type_sc(client_connack)},
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(Event)},
|
||||
{"reason_code", sc(binary(), #{desc => ?DESC("event_ctx_connack_reason_code")})},
|
||||
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
||||
{"clean_start", sc(boolean(), #{desc => ?DESC("event_clean_start"), default => true})},
|
||||
|
@ -258,8 +276,10 @@ fields("ctx_connack") ->
|
|||
})}
|
||||
];
|
||||
fields("ctx_check_authz_complete") ->
|
||||
Event = 'client.check_authz_complete',
|
||||
[
|
||||
{"event_type", event_type_sc(client_check_authz_complete)},
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(Event)},
|
||||
{"clientid", sc(binary(), #{desc => ?DESC("event_clientid")})},
|
||||
{"username", sc(binary(), #{desc => ?DESC("event_username")})},
|
||||
{"peerhost", sc(binary(), #{desc => ?DESC("event_peerhost")})},
|
||||
|
@ -269,8 +289,11 @@ fields("ctx_check_authz_complete") ->
|
|||
{"result", sc(binary(), #{desc => ?DESC("event_result")})}
|
||||
];
|
||||
fields("ctx_bridge_mqtt") ->
|
||||
Event = '$bridges/mqtt:*',
|
||||
EventBin = atom_to_binary(Event),
|
||||
[
|
||||
{"event_type", event_type_sc('$bridges/mqtt:*')},
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(EventBin)},
|
||||
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
||||
{"payload", sc(binary(), #{desc => ?DESC("event_payload")})},
|
||||
{"topic", sc(binary(), #{desc => ?DESC("event_topic")})},
|
||||
|
@ -281,8 +304,10 @@ fields("ctx_bridge_mqtt") ->
|
|||
qos()
|
||||
];
|
||||
fields("ctx_delivery_dropped") ->
|
||||
Event = 'delivery.dropped',
|
||||
[
|
||||
{"event_type", event_type_sc(delivery_dropped)},
|
||||
{"event_type", event_type_sc(Event)},
|
||||
{"event", event_sc(Event)},
|
||||
{"id", sc(binary(), #{desc => ?DESC("event_id")})},
|
||||
{"reason", sc(binary(), #{desc => ?DESC("event_ctx_dropped")})},
|
||||
{"from_clientid", sc(binary(), #{desc => ?DESC("event_from_clientid")})},
|
||||
|
@ -309,7 +334,21 @@ sc(Type, Meta) -> hoconsc:mk(Type, Meta).
|
|||
ref(Field) -> hoconsc:ref(?MODULE, Field).
|
||||
|
||||
event_type_sc(Event) ->
|
||||
sc(Event, #{desc => ?DESC("event_event_type"), required => true}).
|
||||
EventType = event_to_event_type(Event),
|
||||
sc(EventType, #{desc => ?DESC("event_event_type"), required => true}).
|
||||
|
||||
-spec event_to_event_type(atom()) -> atom().
|
||||
event_to_event_type(Event) ->
|
||||
binary_to_atom(binary:replace(atom_to_binary(Event), <<".">>, <<"_">>)).
|
||||
|
||||
event_sc(Event) when is_binary(Event) ->
|
||||
%% only exception is `$bridges/...'.
|
||||
sc(binary(), #{default => Event, importance => ?IMPORTANCE_HIDDEN});
|
||||
event_sc(Event) ->
|
||||
sc(Event, #{default => Event, importance => ?IMPORTANCE_HIDDEN}).
|
||||
|
||||
without(FieldNames, Fields) ->
|
||||
lists:foldl(fun proplists:delete/2, Fields, FieldNames).
|
||||
|
||||
publish_received_at_sc() ->
|
||||
sc(integer(), #{desc => ?DESC("event_publish_received_at")}).
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
test(#{sql := Sql, context := Context}) ->
|
||||
case emqx_rule_sqlparser:parse(Sql) of
|
||||
{ok, Select} ->
|
||||
InTopic = maps:get(topic, Context, <<>>),
|
||||
InTopic = get_in_topic(Context),
|
||||
EventTopics = emqx_rule_sqlparser:select_from(Select),
|
||||
case lists:all(fun is_publish_topic/1, EventTopics) of
|
||||
true ->
|
||||
|
@ -37,8 +37,13 @@ test(#{sql := Sql, context := Context}) ->
|
|||
false -> {error, nomatch}
|
||||
end;
|
||||
false ->
|
||||
case lists:member(InTopic, EventTopics) of
|
||||
true ->
|
||||
%% the rule is for both publish and events, test it directly
|
||||
test_rule(Sql, Select, Context, EventTopics)
|
||||
test_rule(Sql, Select, Context, EventTopics);
|
||||
false ->
|
||||
{error, nomatch}
|
||||
end
|
||||
end;
|
||||
{error, Reason} ->
|
||||
?SLOG(debug, #{
|
||||
|
@ -92,15 +97,12 @@ flatten([D | L]) when is_list(D) ->
|
|||
[D0 || {ok, D0} <- D] ++ flatten(L).
|
||||
|
||||
fill_default_values(Event, Context) ->
|
||||
maps:merge(envs_examp(Event), Context).
|
||||
maps:merge(envs_examp(Event, Context), Context).
|
||||
|
||||
envs_examp(EventTopic) ->
|
||||
EventName = emqx_rule_events:event_name(EventTopic),
|
||||
emqx_rule_maps:atom_key_map(
|
||||
maps:from_list(
|
||||
emqx_rule_events:columns_with_exam(EventName)
|
||||
)
|
||||
).
|
||||
envs_examp(EventTopic, Context) ->
|
||||
EventName = maps:get(event, Context, emqx_rule_events:event_name(EventTopic)),
|
||||
Env = maps:from_list(emqx_rule_events:columns_with_exam(EventName)),
|
||||
emqx_rule_maps:atom_key_map(Env).
|
||||
|
||||
is_test_runtime_env_atom() ->
|
||||
'emqx_rule_sqltester:is_test_runtime_env'.
|
||||
|
@ -118,3 +120,26 @@ is_test_runtime_env() ->
|
|||
true -> true;
|
||||
_ -> false
|
||||
end.
|
||||
|
||||
%% Most events have the original `topic' input, but their own topic (i.e.: `$events/...')
|
||||
%% is different from `topic'.
|
||||
get_in_topic(Context) ->
|
||||
case maps:find(event_topic, Context) of
|
||||
{ok, EventTopic} ->
|
||||
EventTopic;
|
||||
error ->
|
||||
case maps:find(event, Context) of
|
||||
{ok, Event} ->
|
||||
maybe_infer_in_topic(Context, Event);
|
||||
error ->
|
||||
maps:get(topic, Context, <<>>)
|
||||
end
|
||||
end.
|
||||
|
||||
maybe_infer_in_topic(Context, 'message.publish') ->
|
||||
%% This is special because the common use in the frontend is to select this event, but
|
||||
%% test the input `topic' field against MQTT topic filters in the `FROM' clause rather
|
||||
%% than the corresponding `$events/message_publish'.
|
||||
maps:get(topic, Context, <<>>);
|
||||
maybe_infer_in_topic(_Context, Event) ->
|
||||
emqx_rule_events:event_topic(Event).
|
||||
|
|
|
@ -1990,7 +1990,10 @@ t_sqlparse_event_1(_Config) ->
|
|||
emqx_rule_sqltester:test(
|
||||
#{
|
||||
sql => Sql,
|
||||
context => #{topic => <<"t/tt">>}
|
||||
context => #{
|
||||
topic => <<"t/tt">>,
|
||||
event => 'session.subscribed'
|
||||
}
|
||||
}
|
||||
)
|
||||
).
|
||||
|
@ -2004,7 +2007,10 @@ t_sqlparse_event_2(_Config) ->
|
|||
emqx_rule_sqltester:test(
|
||||
#{
|
||||
sql => Sql,
|
||||
context => #{clientid => <<"abc">>}
|
||||
context => #{
|
||||
clientid => <<"abc">>,
|
||||
event => 'client.connected'
|
||||
}
|
||||
}
|
||||
)
|
||||
).
|
||||
|
|
|
@ -0,0 +1,385 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
-module(emqx_rule_engine_api_2_SUITE).
|
||||
|
||||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% CT boilerplate
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
Apps = emqx_cth_suite:start(
|
||||
app_specs(),
|
||||
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||
),
|
||||
emqx_common_test_http:create_default_app(),
|
||||
[{apps, Apps} | Config].
|
||||
|
||||
end_per_suite(Config) ->
|
||||
Apps = ?config(apps, Config),
|
||||
ok = emqx_cth_suite:stop(Apps),
|
||||
ok.
|
||||
|
||||
app_specs() ->
|
||||
[
|
||||
emqx_conf,
|
||||
emqx_rule_engine,
|
||||
emqx_management,
|
||||
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
|
||||
].
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Helper fns
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
maybe_json_decode(X) ->
|
||||
case emqx_utils_json:safe_decode(X, [return_maps]) of
|
||||
{ok, Decoded} -> Decoded;
|
||||
{error, _} -> X
|
||||
end.
|
||||
|
||||
request(Method, Path, Params) ->
|
||||
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
|
||||
Opts = #{return_all => true},
|
||||
case emqx_mgmt_api_test_util:request_api(Method, Path, "", AuthHeader, Params, Opts) of
|
||||
{ok, {Status, Headers, Body0}} ->
|
||||
Body = maybe_json_decode(Body0),
|
||||
{ok, {Status, Headers, Body}};
|
||||
{error, {Status, Headers, Body0}} ->
|
||||
Body =
|
||||
case emqx_utils_json:safe_decode(Body0, [return_maps]) of
|
||||
{ok, Decoded0 = #{<<"message">> := Msg0}} ->
|
||||
Msg = maybe_json_decode(Msg0),
|
||||
Decoded0#{<<"message">> := Msg};
|
||||
{ok, Decoded0} ->
|
||||
Decoded0;
|
||||
{error, _} ->
|
||||
Body0
|
||||
end,
|
||||
{error, {Status, Headers, Body}};
|
||||
Error ->
|
||||
Error
|
||||
end.
|
||||
|
||||
sql_test_api(Params) ->
|
||||
Method = post,
|
||||
Path = emqx_mgmt_api_test_util:api_path(["rule_test"]),
|
||||
ct:pal("sql test (http):\n ~p", [Params]),
|
||||
Res = request(Method, Path, Params),
|
||||
ct:pal("sql test (http) result:\n ~p", [Res]),
|
||||
Res.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Test cases
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
t_rule_test_smoke(_Config) ->
|
||||
%% Example inputs recorded from frontend on 2023-12-04
|
||||
Publish = [
|
||||
#{
|
||||
expected => #{code => 200},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"message_publish">>,
|
||||
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||
<<"qos">> => 1,
|
||||
<<"topic">> => <<"t/a">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
hint => <<"wrong topic">>,
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"message_publish">>,
|
||||
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||
<<"qos">> => 1,
|
||||
<<"topic">> => <<"a">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
hint => <<
|
||||
"Currently, the frontend doesn't try to match against "
|
||||
"$events/message_published, but it may start sending "
|
||||
"the event topic in the future."
|
||||
>>,
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"message_publish">>,
|
||||
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||
<<"qos">> => 1,
|
||||
<<"topic">> => <<"t/a">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"$events/message_published\"">>
|
||||
}
|
||||
}
|
||||
],
|
||||
%% Default input SQL doesn't match any event topic
|
||||
DefaultNoMatch = [
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx_2">>,
|
||||
<<"event_type">> => <<"message_delivered">>,
|
||||
<<"from_clientid">> => <<"c_emqx_1">>,
|
||||
<<"from_username">> => <<"u_emqx_1">>,
|
||||
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||
<<"qos">> => 1,
|
||||
<<"topic">> => <<"t/a">>,
|
||||
<<"username">> => <<"u_emqx_2">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx_2">>,
|
||||
<<"event_type">> => <<"message_acked">>,
|
||||
<<"from_clientid">> => <<"c_emqx_1">>,
|
||||
<<"from_username">> => <<"u_emqx_1">>,
|
||||
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||
<<"qos">> => 1,
|
||||
<<"topic">> => <<"t/a">>,
|
||||
<<"username">> => <<"u_emqx_2">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"message_dropped">>,
|
||||
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||
<<"qos">> => 1,
|
||||
<<"reason">> => <<"no_subscribers">>,
|
||||
<<"topic">> => <<"t/a">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"client_connected">>,
|
||||
<<"peername">> => <<"127.0.0.1:52918">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"client_disconnected">>,
|
||||
<<"reason">> => <<"normal">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"client_connack">>,
|
||||
<<"reason_code">> => <<"sucess">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"action">> => <<"publish">>,
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"client_check_authz_complete">>,
|
||||
<<"result">> => <<"allow">>,
|
||||
<<"topic">> => <<"t/1">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"session_subscribed">>,
|
||||
<<"qos">> => 1,
|
||||
<<"topic">> => <<"t/a">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"session_unsubscribed">>,
|
||||
<<"qos">> => 1,
|
||||
<<"topic">> => <<"t/a">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx_2">>,
|
||||
<<"event_type">> => <<"delivery_dropped">>,
|
||||
<<"from_clientid">> => <<"c_emqx_1">>,
|
||||
<<"from_username">> => <<"u_emqx_1">>,
|
||||
<<"payload">> => <<"{\"msg\": \"hello\"}">>,
|
||||
<<"qos">> => 1,
|
||||
<<"reason">> => <<"queue_full">>,
|
||||
<<"topic">> => <<"t/a">>,
|
||||
<<"username">> => <<"u_emqx_2">>
|
||||
},
|
||||
<<"sql">> => <<"SELECT\n *\nFROM\n \"t/#\"">>
|
||||
}
|
||||
}
|
||||
],
|
||||
MultipleFrom = [
|
||||
#{
|
||||
expected => #{code => 200},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"session_unsubscribed">>,
|
||||
<<"qos">> => 1,
|
||||
<<"topic">> => <<"t/a">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> =>
|
||||
<<"SELECT\n *\nFROM\n \"t/#\", \"$events/session_unsubscribed\" ">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 200},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"session_unsubscribed">>,
|
||||
<<"qos">> => 1,
|
||||
<<"topic">> => <<"t/a">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> =>
|
||||
<<"SELECT\n *\nFROM\n \"$events/message_dropped\", \"$events/session_unsubscribed\" ">>
|
||||
}
|
||||
},
|
||||
#{
|
||||
expected => #{code => 412},
|
||||
input =>
|
||||
#{
|
||||
<<"context">> =>
|
||||
#{
|
||||
<<"clientid">> => <<"c_emqx">>,
|
||||
<<"event_type">> => <<"session_unsubscribed">>,
|
||||
<<"qos">> => 1,
|
||||
<<"topic">> => <<"t/a">>,
|
||||
<<"username">> => <<"u_emqx">>
|
||||
},
|
||||
<<"sql">> =>
|
||||
<<"SELECT\n *\nFROM\n \"$events/message_dropped\", \"$events/client_connected\" ">>
|
||||
}
|
||||
}
|
||||
],
|
||||
Cases = Publish ++ DefaultNoMatch ++ MultipleFrom,
|
||||
FailedCases = lists:filtermap(fun do_t_rule_test_smoke/1, Cases),
|
||||
?assertEqual([], FailedCases),
|
||||
ok.
|
||||
|
||||
do_t_rule_test_smoke(#{input := Input, expected := #{code := ExpectedCode}} = Case) ->
|
||||
{_ErrOrOk, {{_, Code, _}, _, Body}} = sql_test_api(Input),
|
||||
case Code =:= ExpectedCode of
|
||||
true ->
|
||||
false;
|
||||
false ->
|
||||
{true, #{
|
||||
expected => ExpectedCode,
|
||||
hint => maps:get(hint, Case, <<>>),
|
||||
got => Code,
|
||||
resp_body => Body
|
||||
}}
|
||||
end.
|
|
@ -216,7 +216,7 @@ t_ctx_delivery_dropped(_) ->
|
|||
|
||||
t_mongo_date_function_should_return_string_in_test_env(_) ->
|
||||
SQL =
|
||||
<<"SELECT mongo_date() as mongo_date FROM \"t/1\"">>,
|
||||
<<"SELECT mongo_date() as mongo_date FROM \"$events/client_check_authz_complete\"">>,
|
||||
Context =
|
||||
#{
|
||||
action => <<"publish">>,
|
||||
|
|
|
@ -348,19 +348,11 @@ receive_published(Line) ->
|
|||
|
||||
cluster(Config) ->
|
||||
PrivDataDir = ?config(priv_dir, Config),
|
||||
PeerModule =
|
||||
case os:getenv("IS_CI") of
|
||||
false ->
|
||||
slave;
|
||||
_ ->
|
||||
ct_slave
|
||||
end,
|
||||
Cluster = emqx_common_test_helpers:emqx_cluster(
|
||||
[core, core],
|
||||
[
|
||||
{apps, ?APPS},
|
||||
{listener_ports, []},
|
||||
{peer_mod, PeerModule},
|
||||
{priv_data_dir, PrivDataDir},
|
||||
{load_schema, true},
|
||||
{start_autocluster, true},
|
||||
|
@ -382,7 +374,7 @@ cluster(Config) ->
|
|||
|
||||
start_cluster(Cluster) ->
|
||||
Nodes = [
|
||||
emqx_common_test_helpers:start_slave(Name, Opts)
|
||||
emqx_common_test_helpers:start_peer(Name, Opts)
|
||||
|| {Name, Opts} <- Cluster
|
||||
],
|
||||
NumNodes = length(Nodes),
|
||||
|
@ -390,7 +382,7 @@ start_cluster(Cluster) ->
|
|||
emqx_utils:pmap(
|
||||
fun(N) ->
|
||||
ct:pal("stopping ~p", [N]),
|
||||
ok = emqx_common_test_helpers:stop_slave(N)
|
||||
ok = emqx_common_test_helpers:stop_peer(N)
|
||||
end,
|
||||
Nodes
|
||||
)
|
||||
|
|
|
@ -154,7 +154,7 @@ init_per_testcase(t_exhook_info, Config) ->
|
|||
emqx_common_test_helpers:start_apps([emqx_exhook]),
|
||||
Config;
|
||||
init_per_testcase(t_cluster_uuid, Config) ->
|
||||
Node = start_slave(n1),
|
||||
Node = start_peer(n1),
|
||||
[{n1, Node} | Config];
|
||||
init_per_testcase(t_uuid_restored_from_file, Config) ->
|
||||
Config;
|
||||
|
@ -210,7 +210,7 @@ end_per_testcase(t_exhook_info, _Config) ->
|
|||
ok;
|
||||
end_per_testcase(t_cluster_uuid, Config) ->
|
||||
Node = proplists:get_value(n1, Config),
|
||||
ok = stop_slave(Node);
|
||||
ok = stop_peer(Node);
|
||||
end_per_testcase(t_num_clients, Config) ->
|
||||
ok = snabbkaffe:stop(),
|
||||
Config;
|
||||
|
@ -782,7 +782,7 @@ find_gen_rpc_port() ->
|
|||
{ok, {_, Port}} = inet:sockname(EPort),
|
||||
Port.
|
||||
|
||||
start_slave(Name) ->
|
||||
start_peer(Name) ->
|
||||
Port = find_gen_rpc_port(),
|
||||
TestNode = node(),
|
||||
Handler =
|
||||
|
@ -811,11 +811,9 @@ start_slave(Name) ->
|
|||
apps => [emqx, emqx_conf, emqx_retainer, emqx_modules, emqx_telemetry]
|
||||
},
|
||||
|
||||
emqx_common_test_helpers:start_slave(Name, Opts).
|
||||
emqx_common_test_helpers:start_peer(Name, Opts).
|
||||
|
||||
stop_slave(Node) ->
|
||||
% This line don't work!!
|
||||
%emqx_cluster_rpc:fast_forward_to_commit(Node, 100),
|
||||
stop_peer(Node) ->
|
||||
rpc:call(Node, ?MODULE, leave_cluster, []),
|
||||
ok = emqx_cth_peer:stop(Node),
|
||||
?assertEqual([node()], mria:running_nodes()),
|
||||
|
|
|
@ -35,7 +35,8 @@
|
|||
if_only_to_toggle_enable/2,
|
||||
update_if_present/3,
|
||||
put_if/4,
|
||||
rename/3
|
||||
rename/3,
|
||||
key_comparer/1
|
||||
]).
|
||||
|
||||
-export_type([config_key/0, config_key_path/0]).
|
||||
|
@ -318,3 +319,16 @@ rename(OldKey, NewKey, Map) ->
|
|||
error ->
|
||||
Map
|
||||
end.
|
||||
|
||||
-spec key_comparer(K) -> fun((M, M) -> boolean()) when M :: #{K => _V}.
|
||||
key_comparer(K) ->
|
||||
fun
|
||||
(#{K := V1}, #{K := V2}) ->
|
||||
V1 < V2;
|
||||
(#{K := _}, _) ->
|
||||
false;
|
||||
(_, #{K := _}) ->
|
||||
true;
|
||||
(M1, M2) ->
|
||||
M1 < M2
|
||||
end.
|
||||
|
|
|
@ -110,3 +110,22 @@ best_effort_recursive_sum_test_() ->
|
|||
)
|
||||
)
|
||||
].
|
||||
|
||||
key_comparer_test() ->
|
||||
Comp = emqx_utils_maps:key_comparer(foo),
|
||||
?assertEqual(
|
||||
[
|
||||
#{},
|
||||
#{baz => 42},
|
||||
#{foo => 1},
|
||||
#{foo => 42},
|
||||
#{foo => bar, baz => 42}
|
||||
],
|
||||
lists:sort(Comp, [
|
||||
#{foo => 42},
|
||||
#{baz => 42},
|
||||
#{foo => bar, baz => 42},
|
||||
#{foo => 1},
|
||||
#{}
|
||||
])
|
||||
).
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
Added a technical preview of the new persistent session implementation based on RocksDB.
|
||||
Please note that this feature is in alpha stage and must not be enabled in the production systems.
|
||||
|
||||
Features missing in the early preview version of the new persistent session implementation:
|
||||
|
||||
- Shard failover
|
||||
- Retained messages
|
||||
- Will message handling
|
||||
- Shared subscriptions
|
||||
- Subscription IDs
|
|
@ -0,0 +1 @@
|
|||
Fix COAP gateway bug that caused it to ignore subscription options.
|
|
@ -0,0 +1,7 @@
|
|||
Updated `gen_rpc` library to version 3.3.0. The new version includes
|
||||
several performance improvements:
|
||||
|
||||
- Avoid allocating extra memory for the packets before they are sent
|
||||
to the wire in some cases
|
||||
|
||||
- Bypass network for the local calls
|
2
mix.exs
2
mix.exs
|
@ -56,7 +56,7 @@ defmodule EMQXUmbrella.MixProject do
|
|||
{:esockd, github: "emqx/esockd", tag: "5.9.8", override: true},
|
||||
{:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-1", override: true},
|
||||
{:ekka, github: "emqx/ekka", tag: "0.15.16", override: true},
|
||||
{:gen_rpc, github: "emqx/gen_rpc", tag: "3.2.2", override: true},
|
||||
{:gen_rpc, github: "emqx/gen_rpc", tag: "3.3.0", override: true},
|
||||
{:grpc, github: "emqx/grpc-erl", tag: "0.6.12", override: true},
|
||||
{:minirest, github: "emqx/minirest", tag: "1.3.14", override: true},
|
||||
{:ecpool, github: "emqx/ecpool", tag: "0.5.4", override: true},
|
||||
|
|
|
@ -63,7 +63,7 @@
|
|||
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.8"}}}
|
||||
, {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-1"}}}
|
||||
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.16"}}}
|
||||
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.2"}}}
|
||||
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.0"}}}
|
||||
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.12"}}}
|
||||
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.14"}}}
|
||||
, {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.4"}}}
|
||||
|
|
|
@ -7,27 +7,27 @@ connect_timeout.label:
|
|||
"""Connect Timeout"""
|
||||
|
||||
producer_opts.desc:
|
||||
"""Local MQTT data source and Azure Event Hub bridge configs."""
|
||||
"""Local MQTT data source and Azure Event Hubs bridge configs."""
|
||||
|
||||
producer_opts.label:
|
||||
"""MQTT to Azure Event Hub"""
|
||||
"""MQTT to Azure Event Hubs"""
|
||||
|
||||
min_metadata_refresh_interval.desc:
|
||||
"""Minimum time interval the client has to wait before refreshing Azure Event Hub Kafka broker and topic metadata. Setting too small value may add extra load on Azure Event Hub."""
|
||||
"""Minimum time interval the client has to wait before refreshing Azure Event Hubs Kafka broker and topic metadata. Setting too small value may add extra load on Azure Event Hubs."""
|
||||
|
||||
min_metadata_refresh_interval.label:
|
||||
"""Min Metadata Refresh Interval"""
|
||||
|
||||
kafka_producer.desc:
|
||||
"""Azure Event Hub Producer configuration."""
|
||||
"""Azure Event Hubs Producer configuration."""
|
||||
|
||||
kafka_producer.label:
|
||||
"""Azure Event Hub Producer"""
|
||||
"""Azure Event Hubs Producer"""
|
||||
|
||||
producer_buffer.desc:
|
||||
"""Configure producer message buffer.
|
||||
|
||||
Tell Azure Event Hub producer how to buffer messages when EMQX has more messages to send than Azure Event Hub can keep up, or when Azure Event Hub is down."""
|
||||
Tell Azure Event Hubs producer how to buffer messages when EMQX has more messages to send than Azure Event Hubs can keep up, or when Azure Event Hubs is down."""
|
||||
|
||||
producer_buffer.label:
|
||||
"""Message Buffer"""
|
||||
|
@ -45,7 +45,7 @@ socket_receive_buffer.label:
|
|||
"""Socket Receive Buffer Size"""
|
||||
|
||||
socket_tcp_keepalive.desc:
|
||||
"""Enable TCP keepalive for Azure Event Hub bridge connections.
|
||||
"""Enable TCP keepalive for Azure Event Hubs bridge connections.
|
||||
The value is three comma separated numbers in the format of 'Idle,Interval,Probes'
|
||||
- Idle: The number of seconds a connection needs to be idle before the server begins to send out keep-alive probes (Linux default 7200).
|
||||
- Interval: The number of seconds between TCP keep-alive probes (Linux default 75).
|
||||
|
@ -63,16 +63,16 @@ desc_name.label:
|
|||
"""Bridge Name"""
|
||||
|
||||
producer_kafka_opts.desc:
|
||||
"""Azure Event Hub producer configs."""
|
||||
"""Azure Event Hubs producer configs."""
|
||||
|
||||
producer_kafka_opts.label:
|
||||
"""Azure Event Hub Producer"""
|
||||
"""Azure Event Hubs Producer"""
|
||||
|
||||
kafka_topic.desc:
|
||||
"""Event Hub name"""
|
||||
"""Event Hubs name"""
|
||||
|
||||
kafka_topic.label:
|
||||
"""Event Hub Name"""
|
||||
"""Event Hubs Name"""
|
||||
|
||||
kafka_message_timestamp.desc:
|
||||
"""Which timestamp to use. The timestamp is expected to be a millisecond precision Unix epoch which can be in string format, e.g. <code>1661326462115</code> or <code>'1661326462115'</code>. When the desired data field for this template is not found, or if the found data is not a valid integer, the current system timestamp will be used."""
|
||||
|
@ -97,21 +97,21 @@ socket_opts.label:
|
|||
"""Socket Options"""
|
||||
|
||||
partition_count_refresh_interval.desc:
|
||||
"""The time interval for Azure Event Hub producer to discover increased number of partitions.
|
||||
After the number of partitions is increased in Azure Event Hub, EMQX will start taking the
|
||||
"""The time interval for Azure Event Hubs producer to discover increased number of partitions.
|
||||
After the number of partitions is increased in Azure Event Hubs, EMQX will start taking the
|
||||
discovered partitions into account when dispatching messages per <code>partition_strategy</code>."""
|
||||
|
||||
partition_count_refresh_interval.label:
|
||||
"""Partition Count Refresh Interval"""
|
||||
|
||||
max_batch_bytes.desc:
|
||||
"""Maximum bytes to collect in an Azure Event Hub message batch. Most of the Kafka brokers default to a limit of 1 MB batch size. EMQX's default value is less than 1 MB in order to compensate Kafka message encoding overheads (especially when each individual message is very small). When a single message is over the limit, it is still sent (as a single element batch)."""
|
||||
"""Maximum bytes to collect in an Azure Event Hubs message batch."""
|
||||
|
||||
max_batch_bytes.label:
|
||||
"""Max Batch Bytes"""
|
||||
|
||||
required_acks.desc:
|
||||
"""Required acknowledgements for Azure Event Hub partition leader to wait for its followers before it sends back the acknowledgement to EMQX Azure Event Hub producer
|
||||
"""Required acknowledgements for Azure Event Hubs partition leader to wait for its followers before it sends back the acknowledgement to EMQX Azure Event Hubs producer
|
||||
|
||||
<code>all_isr</code>: Require all in-sync replicas to acknowledge.
|
||||
<code>leader_only</code>: Require only the partition-leader's acknowledgement."""
|
||||
|
@ -120,7 +120,7 @@ required_acks.label:
|
|||
"""Required Acks"""
|
||||
|
||||
kafka_headers.desc:
|
||||
"""Please provide a placeholder to be used as Azure Event Hub Headers<br/>
|
||||
"""Please provide a placeholder to be used as Azure Event Hubs Headers<br/>
|
||||
e.g. <code>${pub_props}</code><br/>
|
||||
Notice that the value of the placeholder must either be an object:
|
||||
<code>{\"foo\": \"bar\"}</code>
|
||||
|
@ -128,39 +128,39 @@ or an array of key-value pairs:
|
|||
<code>[{\"key\": \"foo\", \"value\": \"bar\"}]</code>"""
|
||||
|
||||
kafka_headers.label:
|
||||
"""Azure Event Hub Headers"""
|
||||
"""Azure Event Hubs Headers"""
|
||||
|
||||
producer_kafka_ext_headers.desc:
|
||||
"""Please provide more key-value pairs for Azure Event Hub headers<br/>
|
||||
"""Please provide more key-value pairs for Azure Event Hubs headers<br/>
|
||||
The key-value pairs here will be combined with the
|
||||
value of <code>kafka_headers</code> field before sending to Azure Event Hub."""
|
||||
value of <code>kafka_headers</code> field before sending to Azure Event Hubs."""
|
||||
|
||||
producer_kafka_ext_headers.label:
|
||||
"""Extra Azure Event Hub headers"""
|
||||
"""Extra Azure Event Hubs headers"""
|
||||
|
||||
producer_kafka_ext_header_key.desc:
|
||||
"""Key of the Azure Event Hub header. Placeholders in format of ${var} are supported."""
|
||||
"""Key of the Azure Event Hubs header. Placeholders in format of ${var} are supported."""
|
||||
|
||||
producer_kafka_ext_header_key.label:
|
||||
"""Azure Event Hub extra header key."""
|
||||
"""Azure Event Hubs extra header key."""
|
||||
|
||||
producer_kafka_ext_header_value.desc:
|
||||
"""Value of the Azure Event Hub header. Placeholders in format of ${var} are supported."""
|
||||
"""Value of the Azure Event Hubs header. Placeholders in format of ${var} are supported."""
|
||||
|
||||
producer_kafka_ext_header_value.label:
|
||||
"""Value"""
|
||||
|
||||
kafka_header_value_encode_mode.desc:
|
||||
"""Azure Event Hub headers value encode mode<br/>
|
||||
- NONE: only add binary values to Azure Event Hub headers;<br/>
|
||||
- JSON: only add JSON values to Azure Event Hub headers,
|
||||
"""Azure Event Hubs headers value encode mode<br/>
|
||||
- NONE: only add binary values to Azure Event Hubs headers;<br/>
|
||||
- JSON: only add JSON values to Azure Event Hubs headers,
|
||||
and encode it to JSON strings before sending."""
|
||||
|
||||
kafka_header_value_encode_mode.label:
|
||||
"""Azure Event Hub headers value encode mode"""
|
||||
"""Azure Event Hubs headers value encode mode"""
|
||||
|
||||
metadata_request_timeout.desc:
|
||||
"""Maximum wait time when fetching metadata from Azure Event Hub."""
|
||||
"""Maximum wait time when fetching metadata from Azure Event Hubs."""
|
||||
|
||||
metadata_request_timeout.label:
|
||||
"""Metadata Request Timeout"""
|
||||
|
@ -220,52 +220,52 @@ config_enable.label:
|
|||
"""Enable or Disable"""
|
||||
|
||||
desc_config.desc:
|
||||
"""Configuration for an Azure Event Hub bridge."""
|
||||
"""Configuration for an Azure Event Hubs bridge."""
|
||||
|
||||
desc_config.label:
|
||||
"""Azure Event Hub Bridge Configuration"""
|
||||
"""Azure Event Hubs Bridge Configuration"""
|
||||
|
||||
buffer_per_partition_limit.desc:
|
||||
"""Number of bytes allowed to buffer for each Azure Event Hub partition. When this limit is exceeded, old messages will be dropped in a trade for credits for new messages to be buffered."""
|
||||
"""Number of bytes allowed to buffer for each Azure Event Hubs partition. When this limit is exceeded, old messages will be dropped in a trade for credits for new messages to be buffered."""
|
||||
|
||||
buffer_per_partition_limit.label:
|
||||
"""Per-partition Buffer Limit"""
|
||||
|
||||
bootstrap_hosts.desc:
|
||||
"""A comma separated list of Azure Event Hub Kafka <code>host[:port]</code> namespace endpoints to bootstrap the client. Default port number is 9093."""
|
||||
"""A comma separated list of Azure Event Hubs Kafka <code>host[:port]</code> namespace endpoints to bootstrap the client. Default port number is 9093."""
|
||||
|
||||
bootstrap_hosts.label:
|
||||
"""Bootstrap Hosts"""
|
||||
|
||||
kafka_message_key.desc:
|
||||
"""Template to render Azure Event Hub message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Azure Event Hub's <code>NULL</code> (but not empty string) is used."""
|
||||
"""Template to render Azure Event Hubs message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Azure Event Hubs's <code>NULL</code> (but not empty string) is used."""
|
||||
|
||||
kafka_message_key.label:
|
||||
"""Message Key"""
|
||||
|
||||
kafka_message.desc:
|
||||
"""Template to render an Azure Event Hub message."""
|
||||
"""Template to render an Azure Event Hubs message."""
|
||||
|
||||
kafka_message.label:
|
||||
"""Azure Event Hub Message Template"""
|
||||
"""Azure Event Hubs Message Template"""
|
||||
|
||||
mqtt_topic.desc:
|
||||
"""MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Azure Event Hub."""
|
||||
"""MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Azure Event Hubs."""
|
||||
|
||||
mqtt_topic.label:
|
||||
"""Source MQTT Topic"""
|
||||
|
||||
kafka_message_value.desc:
|
||||
"""Template to render Azure Event Hub message value. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Azure Event Hub's <code>NULL</code> (but not empty string) is used."""
|
||||
"""Template to render Azure Event Hubs message value. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Azure Event Hubs' <code>NULL</code> (but not empty string) is used."""
|
||||
|
||||
kafka_message_value.label:
|
||||
"""Message Value"""
|
||||
|
||||
partition_strategy.desc:
|
||||
"""Partition strategy is to tell the producer how to dispatch messages to Azure Event Hub partitions.
|
||||
"""Partition strategy is to tell the producer how to dispatch messages to Azure Event Hubs partitions.
|
||||
|
||||
<code>random</code>: Randomly pick a partition for each message
|
||||
<code>key_dispatch</code>: Hash Azure Event Hub message key to a partition number"""
|
||||
<code>key_dispatch</code>: Hash Azure Event Hubs message key to a partition number"""
|
||||
|
||||
partition_strategy.label:
|
||||
"""Partition Strategy"""
|
||||
|
@ -278,7 +278,7 @@ buffer_segment_bytes.label:
|
|||
"""Segment File Bytes"""
|
||||
|
||||
max_inflight.desc:
|
||||
"""Maximum number of batches allowed for Azure Event Hub producer (per-partition) to send before receiving acknowledgement from Azure Event Hub. Greater value typically means better throughput. However, there can be a risk of message reordering when this value is greater than 1."""
|
||||
"""Maximum number of batches allowed for Azure Event Hubs producer (per-partition) to send before receiving acknowledgement from Azure Event Hubs. Greater value typically means better throughput. However, there can be a risk of message reordering when this value is greater than 1."""
|
||||
|
||||
max_inflight.label:
|
||||
"""Max Inflight"""
|
||||
|
@ -308,25 +308,25 @@ auth_username_password.label:
|
|||
"""Username/password Auth"""
|
||||
|
||||
auth_sasl_password.desc:
|
||||
"""The Connection String for connecting to Azure Event Hub. Should be the "connection string-primary key" of a Namespace shared access policy."""
|
||||
"""The Connection String for connecting to Azure Event Hubs. Should be the "connection string-primary key" of a Namespace shared access policy."""
|
||||
|
||||
auth_sasl_password.label:
|
||||
"""Connection String"""
|
||||
|
||||
producer_kafka_opts.desc:
|
||||
"""Azure Event Hub producer configs."""
|
||||
"""Azure Event Hubs producer configs."""
|
||||
|
||||
producer_kafka_opts.label:
|
||||
"""Azure Event Hub Producer"""
|
||||
"""Azure Event Hubs Producer"""
|
||||
|
||||
desc_config.desc:
|
||||
"""Configuration for an Azure Event Hub bridge."""
|
||||
"""Configuration for an Azure Event Hubs bridge."""
|
||||
|
||||
desc_config.label:
|
||||
"""Azure Event Hub Bridge Configuration"""
|
||||
"""Azure Event Hubs Bridge Configuration"""
|
||||
|
||||
ssl_client_opts.desc:
|
||||
"""TLS/SSL options for Azure Event Hub client."""
|
||||
"""TLS/SSL options for Azure Event Hubs client."""
|
||||
ssl_client_opts.label:
|
||||
"""TLS/SSL options"""
|
||||
|
||||
|
|
|
@ -12,7 +12,8 @@ desc_connectors.label:
|
|||
"""Connectors"""
|
||||
|
||||
connector_field.desc:
|
||||
"""Name of connector used to connect to the resource where the action is to be performed."""
|
||||
"""Name of the connector specified by the action, used for external resource selection."""
|
||||
|
||||
connector_field.label:
|
||||
"""Connector"""
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ This is used to limit the connection rate for this node.
|
|||
Once the limit is reached, new connections will be deferred or refused.<br/>
|
||||
For example:<br/>
|
||||
- <code>1000/s</code> :: Only accepts 1000 connections per second<br/>
|
||||
- <code>1000/10s</code> :: Only accepts 1000 connections every 10 seconds"""
|
||||
- <code>1000/10s</code> :: Only accepts 1000 connections every 10 seconds."""
|
||||
max_conn_rate.label:
|
||||
"""Maximum Connection Rate"""
|
||||
|
||||
|
|
|
@ -12,13 +12,6 @@ batch_time.desc:
|
|||
batch_time.label:
|
||||
"""Max batch wait time"""
|
||||
|
||||
buffer_mode.desc:
|
||||
"""Buffer operation mode.
|
||||
<code>memory_only</mode>: Buffer all messages in memory.<code>volatile_offload</code>: Buffer message in memory first, when up to certain limit (see <code>buffer_seg_bytes</code> config for more information), then start offloading messages to disk"""
|
||||
|
||||
buffer_mode.label:
|
||||
"""Buffer Mode"""
|
||||
|
||||
buffer_seg_bytes.desc:
|
||||
"""Applicable when buffer mode is set to <code>volatile_offload</code>.
|
||||
This value is to specify the size of each on-disk buffer file."""
|
||||
|
|
|
@ -573,7 +573,7 @@ fields_tcp_opts_buffer.label:
|
|||
"""TCP user-space buffer"""
|
||||
|
||||
server_ssl_opts_schema_honor_cipher_order.desc:
|
||||
"""An important security setting, it forces the cipher to be set based
|
||||
"""An important security setting. It forces the cipher to be set based
|
||||
on the server-specified order instead of the client-specified order,
|
||||
hence enforcing the (usually more properly configured) security
|
||||
ordering of the server administrator."""
|
||||
|
@ -1012,13 +1012,13 @@ fields_ws_opts_supported_subprotocols.label:
|
|||
|
||||
broker_shared_subscription_strategy.desc:
|
||||
"""Dispatch strategy for shared subscription.
|
||||
- `random`: dispatch the message to a random selected subscriber
|
||||
- `round_robin`: select the subscribers in a round-robin manner
|
||||
- `round_robin_per_group`: select the subscribers in round-robin fashion within each shared subscriber group
|
||||
- `local`: select random local subscriber otherwise select random cluster-wide
|
||||
- `sticky`: always use the last selected subscriber to dispatch, until the subscriber disconnects.
|
||||
- `hash_clientid`: select the subscribers by hashing the `clientIds`
|
||||
- `hash_topic`: select the subscribers by hashing the source topic"""
|
||||
- `random`: Randomly select a subscriber for dispatch;
|
||||
- `round_robin`: Messages from a single publisher are dispatched to subscribers in turn;
|
||||
- `round_robin_per_group`: All messages are dispatched to subscribers in turn;
|
||||
- `local`: Randomly select a subscriber on the current node, if there are no subscribers on the current node, then randomly select within the cluster;
|
||||
- `sticky`: Continuously dispatch messages to the initially selected subscriber until their session ends;
|
||||
- `hash_clientid`: Hash the publisher's client ID to select a subscriber;
|
||||
- `hash_topic`: Hash the publishing topic to select a subscriber."""
|
||||
|
||||
fields_deflate_opts_mem_level.desc:
|
||||
"""Specifies the size of the compression state.<br/>
|
||||
|
@ -1386,7 +1386,7 @@ However it's no longer useful because the shared-subscrption messages in a expir
|
|||
base_listener_enable_authn.desc:
|
||||
"""Set <code>true</code> (default) to enable client authentication on this listener, the authentication
|
||||
process goes through the configured authentication chain.
|
||||
When set to <code>false</code> to allow any clients with or without authentication information such as username or password to log in.
|
||||
When set to <code>false</code>, any client (with or without username/password) is allowed to connect.
|
||||
When set to <code>quick_deny_anonymous</code>, it behaves like when set to <code>true</code>, but clients will be
|
||||
denied immediately without going through any authenticators if <code>username</code> is not provided. This is useful to fence off
|
||||
anonymous clients early."""
|
||||
|
@ -1577,4 +1577,16 @@ session_ds_session_gc_interval.desc:
|
|||
session_ds_session_gc_batch_size.desc:
|
||||
"""The size of each batch of expired persistent sessions to be garbage collected per iteration."""
|
||||
|
||||
session_ds_max_batch_size.desc:
|
||||
"""This value affects the flow control for the persistent sessions.
|
||||
The session queries the DB for the new messages in batches.
|
||||
Size of the batch doesn't exceed this value or `ReceiveMaximum`, whichever is smaller."""
|
||||
|
||||
session_ds_min_batch_size.desc:
|
||||
"""This value affects the flow control for the persistent sessions.
|
||||
The session will query the DB for the new messages when the value of `FreeSpace` variable is larger than this value or `ReceiveMaximum` / 2, whichever is smaller.
|
||||
|
||||
`FreeSpace` is calculated as `ReceiveMaximum` for the session - number of inflight messages."""
|
||||
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue