Merge remote-tracking branch 'upstream/release-56' into 0328-sync-release-56

This commit is contained in:
Ivan Dyachkov 2024-03-28 09:59:54 +01:00
commit f4446ec680
79 changed files with 2135 additions and 592 deletions

View File

@ -24,7 +24,7 @@ jobs:
matrix: matrix:
profile: profile:
- ['emqx', 'master', '5.3-2:1.15.7-26.2.1-2'] - ['emqx', 'master', '5.3-2:1.15.7-26.2.1-2']
- ['emqx-enterprise', 'release-56', '5.3-2:1.15.7-25.3.2-2'] - ['emqx-enterprise', 'release-56', '5.3-2:1.15.7-26.2.1-2']
os: os:
- debian10 - debian10
- ubuntu22.04 - ubuntu22.04

View File

@ -20,8 +20,8 @@ endif
# Dashboard version # Dashboard version
# from https://github.com/emqx/emqx-dashboard5 # from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.7.0 export EMQX_DASHBOARD_VERSION ?= v1.8.0
export EMQX_EE_DASHBOARD_VERSION ?= e1.6.0-beta.5 export EMQX_EE_DASHBOARD_VERSION ?= e1.6.0
PROFILE ?= emqx PROFILE ?= emqx
REL_PROFILES := emqx emqx-enterprise REL_PROFILES := emqx emqx-enterprise

View File

@ -32,7 +32,7 @@
%% `apps/emqx/src/bpapi/README.md' %% `apps/emqx/src/bpapi/README.md'
%% Opensource edition %% Opensource edition
-define(EMQX_RELEASE_CE, "5.6.0-rc.1"). -define(EMQX_RELEASE_CE, "5.6.0").
%% Enterprise edition %% Enterprise edition
-define(EMQX_RELEASE_EE, "5.6.0-rc.1"). -define(EMQX_RELEASE_EE, "5.6.0").

View File

@ -116,9 +116,10 @@ app_specs() ->
app_specs(_Opts = #{}). app_specs(_Opts = #{}).
app_specs(Opts) -> app_specs(Opts) ->
DefaultEMQXConf = "session_persistence {enable = true, renew_streams_interval = 1s}",
ExtraEMQXConf = maps:get(extra_emqx_conf, Opts, ""), ExtraEMQXConf = maps:get(extra_emqx_conf, Opts, ""),
[ [
{emqx, "session_persistence = {enable = true}" ++ ExtraEMQXConf} {emqx, DefaultEMQXConf ++ ExtraEMQXConf}
]. ].
get_mqtt_port(Node, Type) -> get_mqtt_port(Node, Type) ->
@ -132,15 +133,6 @@ wait_nodeup(Node) ->
pong = net_adm:ping(Node) pong = net_adm:ping(Node)
). ).
wait_gen_rpc_down(_NodeSpec = #{apps := Apps}) ->
#{override_env := Env} = proplists:get_value(gen_rpc, Apps),
Port = proplists:get_value(tcp_server_port, Env),
?retry(
_Sleep0 = 500,
_Attempts0 = 50,
false = emqx_common_test_helpers:is_tcp_server_available("127.0.0.1", Port)
).
start_client(Opts0 = #{}) -> start_client(Opts0 = #{}) ->
Defaults = #{ Defaults = #{
port => 1883, port => 1883,

View File

@ -69,6 +69,7 @@
{emqx_resource,2}. {emqx_resource,2}.
{emqx_retainer,1}. {emqx_retainer,1}.
{emqx_retainer,2}. {emqx_retainer,2}.
{emqx_router,1}.
{emqx_rule_engine,1}. {emqx_rule_engine,1}.
{emqx_shared_sub,1}. {emqx_shared_sub,1}.
{emqx_slow_subs,1}. {emqx_slow_subs,1}.

View File

@ -28,7 +28,7 @@
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}, {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.1"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.1"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.0"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.1"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.42.1"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.42.1"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},

View File

@ -86,8 +86,35 @@ supported_version(API) ->
-spec announce(node(), atom()) -> ok. -spec announce(node(), atom()) -> ok.
announce(Node, App) -> announce(Node, App) ->
{ok, Data} = file:consult(?MODULE:versions_file(App)), {ok, Data} = file:consult(?MODULE:versions_file(App)),
{atomic, ok} = mria:transaction(?COMMON_SHARD, fun ?MODULE:announce_fun/2, [Node, Data]), %% replicant(5.6.0) will call old core(<5.6.0) announce_fun/2 is undef on old core
ok. %% so we just use anonymous function to update.
case mria:transaction(?COMMON_SHARD, fun ?MODULE:announce_fun/2, [Node, Data]) of
{atomic, ok} ->
ok;
{aborted, {undef, [{?MODULE, announce_fun, _, _} | _]}} ->
{atomic, ok} = mria:transaction(
?COMMON_SHARD,
fun() ->
MS = ets:fun2ms(fun(#?TAB{key = {N, API}}) when N =:= Node ->
{N, API}
end),
OldKeys = mnesia:select(?TAB, MS, write),
_ = [
mnesia:delete({?TAB, Key})
|| Key <- OldKeys
],
%% Insert new records:
_ = [
mnesia:write(#?TAB{key = {Node, API}, version = Version})
|| {API, Version} <- Data
],
%% Update maximum supported version:
_ = [update_minimum(API) || {API, _} <- Data],
ok
end
),
ok
end.
-spec versions_file(atom()) -> file:filename_all(). -spec versions_file(atom()) -> file:filename_all().
versions_file(App) -> versions_file(App) ->

View File

@ -237,25 +237,32 @@ log_formatter(HandlerName, Conf) ->
_ -> _ ->
conf_get("formatter", Conf) conf_get("formatter", Conf)
end, end,
TsFormat = timstamp_format(Conf),
do_formatter( do_formatter(
Format, CharsLimit, SingleLine, TimeOffSet, Depth Format, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat
). ).
%% auto | epoch | rfc3339
timstamp_format(Conf) ->
conf_get("timestamp_format", Conf).
%% helpers %% helpers
do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth) -> do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat) ->
{emqx_logger_jsonfmt, #{ {emqx_logger_jsonfmt, #{
chars_limit => CharsLimit, chars_limit => CharsLimit,
single_line => SingleLine, single_line => SingleLine,
time_offset => TimeOffSet, time_offset => TimeOffSet,
depth => Depth depth => Depth,
timestamp_format => TsFormat
}}; }};
do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth) -> do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat) ->
{emqx_logger_textfmt, #{ {emqx_logger_textfmt, #{
template => [time, " [", level, "] ", msg, "\n"], template => ["[", level, "] ", msg, "\n"],
chars_limit => CharsLimit, chars_limit => CharsLimit,
single_line => SingleLine, single_line => SingleLine,
time_offset => TimeOffSet, time_offset => TimeOffSet,
depth => Depth depth => Depth,
timestamp_format => TsFormat
}}. }}.
%% Don't record all logger message %% Don't record all logger message

View File

@ -154,7 +154,7 @@ do_authorize(ClientInfo, Action, Topic) ->
case run_hooks('client.authorize', [ClientInfo, Action, Topic], Default) of case run_hooks('client.authorize', [ClientInfo, Action, Topic], Default) of
AuthzResult = #{result := Result} when Result == allow; Result == deny -> AuthzResult = #{result := Result} when Result == allow; Result == deny ->
From = maps:get(from, AuthzResult, unknown), From = maps:get(from, AuthzResult, unknown),
ok = log_result(ClientInfo, Topic, Action, From, Result), ok = log_result(Topic, Action, From, Result),
emqx_hooks:run( emqx_hooks:run(
'client.check_authz_complete', 'client.check_authz_complete',
[ClientInfo, Action, Topic, Result, From] [ClientInfo, Action, Topic, Result, From]
@ -173,24 +173,28 @@ do_authorize(ClientInfo, Action, Topic) ->
deny deny
end. end.
log_result(#{username := Username}, Topic, Action, From, Result) -> log_result(Topic, Action, From, Result) ->
LogMeta = fun() -> LogMeta = fun() ->
#{ #{
username => Username,
topic => Topic, topic => Topic,
action => format_action(Action), action => format_action(Action),
source => format_from(From) source => format_from(From)
} }
end, end,
case Result of do_log_result(Action, Result, LogMeta).
allow ->
?SLOG(info, (LogMeta())#{msg => "authorization_permission_allowed"}); do_log_result(_Action, allow, LogMeta) ->
deny -> ?SLOG(info, (LogMeta())#{msg => "authorization_permission_allowed"}, #{tag => "AUTHZ"});
?SLOG_THROTTLE( do_log_result(?AUTHZ_PUBLISH_MATCH_MAP(_, _), deny, LogMeta) ->
warning, %% for publish action, we do not log permission deny at warning level here
(LogMeta())#{msg => authorization_permission_denied} %% because it will be logged as cannot_publish_to_topic_due_to_not_authorized
) ?SLOG(info, (LogMeta())#{msg => "authorization_permission_denied"}, #{tag => "AUTHZ"});
end. do_log_result(_, deny, LogMeta) ->
?SLOG_THROTTLE(
warning,
(LogMeta())#{msg => authorization_permission_denied},
#{tag => "AUTHZ"}
).
%% @private Format authorization rules source. %% @private Format authorization rules source.
format_from(default) -> format_from(default) ->

View File

@ -642,7 +642,7 @@ process_publish(Packet = ?PUBLISH_PACKET(QoS, Topic, PacketId), Channel) ->
msg => cannot_publish_to_topic_due_to_not_authorized, msg => cannot_publish_to_topic_due_to_not_authorized,
reason => emqx_reason_codes:name(Rc) reason => emqx_reason_codes:name(Rc)
}, },
#{topic => Topic} #{topic => Topic, tag => "AUTHZ"}
), ),
case emqx:get_config([authorization, deny_action], ignore) of case emqx:get_config([authorization, deny_action], ignore) of
ignore -> ignore ->
@ -661,7 +661,7 @@ process_publish(Packet = ?PUBLISH_PACKET(QoS, Topic, PacketId), Channel) ->
msg => cannot_publish_to_topic_due_to_quota_exceeded, msg => cannot_publish_to_topic_due_to_quota_exceeded,
reason => emqx_reason_codes:name(Rc) reason => emqx_reason_codes:name(Rc)
}, },
#{topic => Topic} #{topic => Topic, tag => "AUTHZ"}
), ),
case QoS of case QoS of
?QOS_0 -> ?QOS_0 ->
@ -1166,9 +1166,11 @@ handle_call(
kick, kick,
Channel = #channel{ Channel = #channel{
conn_state = ConnState, conn_state = ConnState,
conninfo = #{proto_ver := ProtoVer} conninfo = #{proto_ver := ProtoVer},
session = Session
} }
) -> ) ->
emqx_session:destroy(Session),
Channel0 = maybe_publish_will_msg(kicked, Channel), Channel0 = maybe_publish_will_msg(kicked, Channel),
Channel1 = Channel1 =
case ConnState of case ConnState of
@ -1745,8 +1747,10 @@ fix_mountpoint(ClientInfo = #{mountpoint := MountPoint}) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Set log metadata %% Set log metadata
set_log_meta(_ConnPkt, #channel{clientinfo = #{clientid := ClientId}}) -> set_log_meta(_ConnPkt, #channel{clientinfo = #{clientid := ClientId} = ClientInfo}) ->
emqx_logger:set_metadata_clientid(ClientId). Username = maps:get(username, ClientInfo, undefined),
emqx_logger:set_metadata_clientid(ClientId),
emqx_logger:set_metadata_username(Username).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Check banned %% Check banned
@ -1813,6 +1817,7 @@ authenticate(
Channel Channel
); );
_ -> _ ->
log_auth_failure("bad_authentication_method"),
{error, ?RC_BAD_AUTHENTICATION_METHOD} {error, ?RC_BAD_AUTHENTICATION_METHOD}
end. end.
@ -1839,6 +1844,7 @@ do_authenticate(
auth_cache = AuthCache auth_cache = AuthCache
}}; }};
{error, Reason} -> {error, Reason} ->
log_auth_failure(Reason),
{error, emqx_reason_codes:connack_error(Reason)} {error, emqx_reason_codes:connack_error(Reason)}
end; end;
do_authenticate(Credential, #channel{clientinfo = ClientInfo} = Channel) -> do_authenticate(Credential, #channel{clientinfo = ClientInfo} = Channel) ->
@ -1846,9 +1852,20 @@ do_authenticate(Credential, #channel{clientinfo = ClientInfo} = Channel) ->
{ok, AuthResult} -> {ok, AuthResult} ->
{ok, #{}, Channel#channel{clientinfo = merge_auth_result(ClientInfo, AuthResult)}}; {ok, #{}, Channel#channel{clientinfo = merge_auth_result(ClientInfo, AuthResult)}};
{error, Reason} -> {error, Reason} ->
log_auth_failure(Reason),
{error, emqx_reason_codes:connack_error(Reason)} {error, emqx_reason_codes:connack_error(Reason)}
end. end.
log_auth_failure(Reason) ->
?SLOG_THROTTLE(
warning,
#{
msg => authentication_failure,
reason => Reason
},
#{tag => "AUTHN"}
).
%% Merge authentication result into ClientInfo %% Merge authentication result into ClientInfo
%% Authentication result may include: %% Authentication result may include:
%% 1. `is_superuser': The superuser flag from various backends %% 1. `is_superuser': The superuser flag from various backends

View File

@ -36,8 +36,7 @@
max_size/1, max_size/1,
is_full/1, is_full/1,
is_empty/1, is_empty/1,
window/1, window/1
query/2
]). ]).
-export_type([inflight/0]). -export_type([inflight/0]).
@ -139,47 +138,3 @@ size(?INFLIGHT(Tree)) ->
-spec max_size(inflight()) -> non_neg_integer(). -spec max_size(inflight()) -> non_neg_integer().
max_size(?INFLIGHT(MaxSize, _Tree)) -> max_size(?INFLIGHT(MaxSize, _Tree)) ->
MaxSize. MaxSize.
-spec query(inflight(), #{continuation => Cont, limit := L}) ->
{[{key(), term()}], #{continuation := Cont, count := C}}
when
Cont :: none | end_of_data | key(),
L :: non_neg_integer(),
C :: non_neg_integer().
query(?INFLIGHT(Tree), #{limit := Limit} = Pager) ->
Count = gb_trees:size(Tree),
ContKey = maps:get(continuation, Pager, none),
{List, NextCont} = sublist(iterator_from(ContKey, Tree), Limit),
{List, #{continuation => NextCont, count => Count}}.
iterator_from(none, Tree) ->
gb_trees:iterator(Tree);
iterator_from(ContKey, Tree) ->
It = gb_trees:iterator_from(ContKey, Tree),
case gb_trees:next(It) of
{ContKey, _Val, ItNext} -> ItNext;
_ -> It
end.
sublist(_It, 0) ->
{[], none};
sublist(It, Len) ->
{ListAcc, HasNext} = sublist(It, Len, []),
{lists:reverse(ListAcc), next_cont(ListAcc, HasNext)}.
sublist(It, 0, Acc) ->
{Acc, gb_trees:next(It) =/= none};
sublist(It, Len, Acc) ->
case gb_trees:next(It) of
none ->
{Acc, false};
{Key, Val, ItNext} ->
sublist(ItNext, Len - 1, [{Key, Val} | Acc])
end.
next_cont(_Acc, false) ->
end_of_data;
next_cont([{LastKey, _LastVal} | _Acc], _HasNext) ->
LastKey;
next_cont([], _HasNext) ->
end_of_data.

View File

@ -43,6 +43,7 @@
-export([ -export([
set_metadata_peername/1, set_metadata_peername/1,
set_metadata_clientid/1, set_metadata_clientid/1,
set_metadata_username/1,
set_proc_metadata/1, set_proc_metadata/1,
set_primary_log_level/1, set_primary_log_level/1,
set_log_handler_level/2, set_log_handler_level/2,
@ -142,6 +143,12 @@ set_metadata_clientid(<<>>) ->
set_metadata_clientid(ClientId) -> set_metadata_clientid(ClientId) ->
set_proc_metadata(#{clientid => ClientId}). set_proc_metadata(#{clientid => ClientId}).
-spec set_metadata_username(emqx_types:username()) -> ok.
set_metadata_username(Username) when Username =:= undefined orelse Username =:= <<>> ->
ok;
set_metadata_username(Username) ->
set_proc_metadata(#{username => Username}).
-spec set_metadata_peername(peername_str()) -> ok. -spec set_metadata_peername(peername_str()) -> ok.
set_metadata_peername(Peername) -> set_metadata_peername(Peername) ->
set_proc_metadata(#{peername => Peername}). set_proc_metadata(#{peername => Peername}).

View File

@ -285,9 +285,21 @@ json_obj_root(Data0, Config) ->
), ),
lists:filter( lists:filter(
fun({_, V}) -> V =/= undefined end, fun({_, V}) -> V =/= undefined end,
[{time, Time}, {level, Level}, {msg, Msg}] [{time, format_ts(Time, Config)}, {level, Level}, {msg, Msg}]
) ++ Data. ) ++ Data.
format_ts(Ts, #{timestamp_format := rfc3339, time_offset := Offset}) when is_integer(Ts) ->
iolist_to_binary(
calendar:system_time_to_rfc3339(Ts, [
{unit, microsecond},
{offset, Offset},
{time_designator, $T}
])
);
format_ts(Ts, _Config) ->
% auto | epoch
Ts.
json_obj(Data, Config) -> json_obj(Data, Config) ->
maps:fold( maps:fold(
fun(K, V, D) -> fun(K, V, D) ->

View File

@ -20,7 +20,7 @@
-export([check_config/1]). -export([check_config/1]).
-export([try_format_unicode/1]). -export([try_format_unicode/1]).
check_config(X) -> logger_formatter:check_config(X). check_config(X) -> logger_formatter:check_config(maps:without([timestamp_format], X)).
%% Principle here is to delegate the formatting to logger_formatter:format/2 %% Principle here is to delegate the formatting to logger_formatter:format/2
%% as much as possible, and only enrich the report with clientid, peername, topic, username %% as much as possible, and only enrich the report with clientid, peername, topic, username
@ -35,7 +35,7 @@ format(#{msg := {report, ReportMap}, meta := Meta} = Event, Config) when is_map(
false -> false ->
maps:from_list(ReportList) maps:from_list(ReportList)
end, end,
logger_formatter:format(Event#{msg := {report, Report}}, Config); fmt(Event#{msg := {report, Report}}, Config);
format(#{msg := {string, String}} = Event, Config) -> format(#{msg := {string, String}} = Event, Config) ->
%% copied from logger_formatter:format/2 %% copied from logger_formatter:format/2
%% unsure how this case is triggered %% unsure how this case is triggered
@ -45,7 +45,23 @@ format(#{msg := Msg0, meta := Meta} = Event, Config) ->
%% and logger:log(Level, "message", #{key => value}) %% and logger:log(Level, "message", #{key => value})
Msg1 = enrich_client_info(Msg0, Meta), Msg1 = enrich_client_info(Msg0, Meta),
Msg2 = enrich_topic(Msg1, Meta), Msg2 = enrich_topic(Msg1, Meta),
logger_formatter:format(Event#{msg := Msg2}, Config). fmt(Event#{msg := Msg2}, Config).
fmt(#{meta := #{time := Ts}} = Data, Config) ->
Timestamp =
case Config of
#{timestamp_format := epoch} ->
integer_to_list(Ts);
_ ->
% auto | rfc3339
TimeOffset = maps:get(time_offset, Config, ""),
calendar:system_time_to_rfc3339(Ts, [
{unit, microsecond},
{offset, TimeOffset},
{time_designator, $T}
])
end,
[Timestamp, " ", logger_formatter:format(Data, Config)].
%% Other report callbacks may only accept map() reports such as gen_server formatter %% Other report callbacks may only accept map() reports such as gen_server formatter
is_list_report_acceptable(#{report_cb := Cb}) -> is_list_report_acceptable(#{report_cb := Cb}) ->
@ -69,7 +85,9 @@ enrich_report(ReportRaw, Meta) ->
ClientId = maps:get(clientid, Meta, undefined), ClientId = maps:get(clientid, Meta, undefined),
Peer = maps:get(peername, Meta, undefined), Peer = maps:get(peername, Meta, undefined),
Msg = maps:get(msg, ReportRaw, undefined), Msg = maps:get(msg, ReportRaw, undefined),
Tag = maps:get(tag, ReportRaw, undefined), %% TODO: move all tags to Meta so we can filter traces
%% based on tags (currently not supported)
Tag = maps:get(tag, ReportRaw, maps:get(tag, Meta, undefined)),
%% turn it into a list so that the order of the fields is determined %% turn it into a list so that the order of the fields is determined
lists:foldl( lists:foldl(
fun fun

View File

@ -98,6 +98,7 @@
-define(HIGHEST_PRIORITY, infinity). -define(HIGHEST_PRIORITY, infinity).
-define(MAX_LEN_INFINITY, 0). -define(MAX_LEN_INFINITY, 0).
-define(INFO_KEYS, [store_qos0, max_len, len, dropped]). -define(INFO_KEYS, [store_qos0, max_len, len, dropped]).
-define(INSERT_TS, mqueue_insert_ts).
-record(shift_opts, { -record(shift_opts, {
multiplier :: non_neg_integer(), multiplier :: non_neg_integer(),
@ -172,54 +173,82 @@ filter(Pred, #mqueue{q = Q, len = Len, dropped = Droppend} = MQ) ->
MQ#mqueue{q = Q2, len = Len2, dropped = Droppend + Diff} MQ#mqueue{q = Q2, len = Len2, dropped = Droppend + Diff}
end. end.
-spec query(mqueue(), #{continuation => ContMsgId, limit := L}) -> -spec query(mqueue(), #{position => Pos, limit := Limit}) ->
{[message()], #{continuation := ContMsgId, count := C}} {[message()], #{position := Pos, start := Pos}}
when when
ContMsgId :: none | end_of_data | binary(), Pos :: none | {integer(), priority()},
C :: non_neg_integer(), Limit :: non_neg_integer().
L :: non_neg_integer(). query(MQ, #{limit := Limit} = PagerParams) ->
query(MQ, #{limit := Limit} = Pager) -> Pos = maps:get(position, PagerParams, none),
ContMsgId = maps:get(continuation, Pager, none), PQsList = ?PQUEUE:to_queues_list(MQ#mqueue.q),
{List, NextCont} = sublist(skip_until(MQ, ContMsgId), Limit), {Msgs, NxtPos} = sublist(skip_until(PQsList, Pos), Limit, [], Pos),
{List, #{continuation => NextCont, count => len(MQ)}}. {Msgs, #{position => NxtPos, start => first_msg_pos(PQsList)}}.
skip_until(MQ, none = _MsgId) -> first_msg_pos([]) ->
MQ; none;
skip_until(MQ, MsgId) -> first_msg_pos([{Prio, PQ} | T]) ->
do_skip_until(MQ, MsgId). case ?PQUEUE:out(PQ) of
{empty, _PQ} ->
do_skip_until(MQ, MsgId) -> first_msg_pos(T);
case out(MQ) of {{value, Msg}, _Q} ->
{empty, MQ} -> {insert_ts(Msg), Prio}
MQ;
{{value, #message{id = MsgId}}, Q1} ->
Q1;
{{value, _Msg}, Q1} ->
do_skip_until(Q1, MsgId)
end. end.
sublist(_MQ, 0) -> skip_until(PQsList, none = _Pos) ->
{[], none}; PQsList;
sublist(MQ, Len) -> skip_until(PQsList, {MsgPos, PrioPos}) ->
{ListAcc, HasNext} = sublist(MQ, Len, []), case skip_until_prio(PQsList, PrioPos) of
{lists:reverse(ListAcc), next_cont(ListAcc, HasNext)}. [{Prio, PQ} | T] ->
PQ1 = skip_until_msg(PQ, MsgPos),
sublist(MQ, 0, Acc) -> [{Prio, PQ1} | T];
{Acc, element(1, out(MQ)) =/= empty}; [] ->
sublist(MQ, Len, Acc) -> []
case out(MQ) of
{empty, _MQ} ->
{Acc, false};
{{value, Msg}, Q1} ->
sublist(Q1, Len - 1, [Msg | Acc])
end. end.
next_cont(_Acc, false) -> skip_until_prio(PQsList, PrioPos) ->
end_of_data; lists:dropwhile(fun({Prio, _PQ}) -> Prio > PrioPos end, PQsList).
next_cont([#message{id = Id} | _Acc], _HasNext) ->
Id; skip_until_msg(PQ, MsgPos) ->
next_cont([], _HasNext) -> case ?PQUEUE:out(PQ) of
end_of_data. {empty, PQ1} ->
PQ1;
{{value, Msg}, PQ1} ->
case insert_ts(Msg) > MsgPos of
true -> PQ;
false -> skip_until_msg(PQ1, MsgPos)
end
end.
sublist(PQs, Len, Acc, LastPosPrio) when PQs =:= []; Len =:= 0 ->
{Acc, LastPosPrio};
sublist([{Prio, PQ} | T], Len, Acc, LastPosPrio) ->
{SingleQAcc, SingleQSize} = sublist_single_pq(Prio, PQ, Len, [], 0),
Acc1 = Acc ++ lists:reverse(SingleQAcc),
NxtPosPrio =
case SingleQAcc of
[H | _] -> {insert_ts(H), Prio};
[] -> LastPosPrio
end,
case SingleQSize =:= Len of
true ->
{Acc1, NxtPosPrio};
false ->
sublist(T, Len - SingleQSize, Acc1, NxtPosPrio)
end.
sublist_single_pq(_Prio, _PQ, 0, Acc, AccSize) ->
{Acc, AccSize};
sublist_single_pq(Prio, PQ, Len, Acc, AccSize) ->
case ?PQUEUE:out(0, PQ) of
{empty, _PQ} ->
{Acc, AccSize};
{{value, Msg}, PQ1} ->
Msg1 = with_prio(Msg, Prio),
sublist_single_pq(Prio, PQ1, Len - 1, [Msg1 | Acc], AccSize + 1)
end.
with_prio(#message{extra = Extra} = Msg, Prio) ->
Msg#message{extra = Extra#{mqueue_priority => Prio}}.
to_list(MQ, Acc) -> to_list(MQ, Acc) ->
case out(MQ) of case out(MQ) of
@ -256,14 +285,15 @@ in(
) -> ) ->
Priority = get_priority(Topic, PTab, Dp), Priority = get_priority(Topic, PTab, Dp),
PLen = ?PQUEUE:plen(Priority, Q), PLen = ?PQUEUE:plen(Priority, Q),
Msg1 = with_ts(Msg),
case MaxLen =/= ?MAX_LEN_INFINITY andalso PLen =:= MaxLen of case MaxLen =/= ?MAX_LEN_INFINITY andalso PLen =:= MaxLen of
true -> true ->
%% reached max length, drop the oldest message %% reached max length, drop the oldest message
{{value, DroppedMsg}, Q1} = ?PQUEUE:out(Priority, Q), {{value, DroppedMsg}, Q1} = ?PQUEUE:out(Priority, Q),
Q2 = ?PQUEUE:in(Msg, Priority, Q1), Q2 = ?PQUEUE:in(Msg1, Priority, Q1),
{DroppedMsg, MQ#mqueue{q = Q2, dropped = Dropped + 1}}; {without_ts(DroppedMsg), MQ#mqueue{q = Q2, dropped = Dropped + 1}};
false -> false ->
{_DroppedMsg = undefined, MQ#mqueue{len = Len + 1, q = ?PQUEUE:in(Msg, Priority, Q)}} {_DroppedMsg = undefined, MQ#mqueue{len = Len + 1, q = ?PQUEUE:in(Msg1, Priority, Q)}}
end. end.
-spec out(mqueue()) -> {empty | {value, message()}, mqueue()}. -spec out(mqueue()) -> {empty | {value, message()}, mqueue()}.
@ -280,7 +310,7 @@ out(MQ = #mqueue{q = Q, len = Len, last_prio = undefined, shift_opts = ShiftOpts
last_prio = Prio, last_prio = Prio,
p_credit = get_credits(Prio, ShiftOpts) p_credit = get_credits(Prio, ShiftOpts)
}, },
{{value, Val}, MQ1}; {{value, without_ts(Val)}, MQ1};
out(MQ = #mqueue{q = Q, p_credit = 0}) -> out(MQ = #mqueue{q = Q, p_credit = 0}) ->
MQ1 = MQ#mqueue{ MQ1 = MQ#mqueue{
q = ?PQUEUE:shift(Q), q = ?PQUEUE:shift(Q),
@ -288,8 +318,12 @@ out(MQ = #mqueue{q = Q, p_credit = 0}) ->
}, },
out(MQ1); out(MQ1);
out(MQ = #mqueue{q = Q, len = Len, p_credit = Cnt}) -> out(MQ = #mqueue{q = Q, len = Len, p_credit = Cnt}) ->
{R, Q1} = ?PQUEUE:out(Q), {R, Q2} =
{R, MQ#mqueue{q = Q1, len = Len - 1, p_credit = Cnt - 1}}. case ?PQUEUE:out(Q) of
{{value, Val}, Q1} -> {{value, without_ts(Val)}, Q1};
Other -> Other
end,
{R, MQ#mqueue{q = Q2, len = Len - 1, p_credit = Cnt - 1}}.
get_opt(Key, Opts, Default) -> get_opt(Key, Opts, Default) ->
case maps:get(Key, Opts, Default) of case maps:get(Key, Opts, Default) of
@ -359,3 +393,23 @@ p_table(PTab = #{}) ->
); );
p_table(PTab) -> p_table(PTab) ->
PTab. PTab.
%% This is used to sort/traverse messages in query/2
with_ts(#message{extra = Extra} = Msg) ->
TsNano = erlang:system_time(nanosecond),
Extra1 =
case is_map(Extra) of
true -> Extra;
%% extra field has not being used before EMQX 5.4.0
%% and defaulted to an empty list,
%% if it's not a map it's safe to overwrite it
false -> #{}
end,
Msg#message{extra = Extra1#{?INSERT_TS => TsNano}}.
without_ts(#message{extra = Extra} = Msg) ->
Msg#message{extra = maps:remove(?INSERT_TS, Extra)};
without_ts(Msg) ->
Msg.
insert_ts(#message{extra = #{?INSERT_TS := Ts}}) -> Ts.

View File

@ -36,7 +36,8 @@
-export([ -export([
create/4, create/4,
open/4, open/4,
destroy/1 destroy/1,
kick_offline_session/1
]). ]).
-export([ -export([
@ -220,6 +221,15 @@ destroy(#{clientid := ClientID}) ->
destroy_session(ClientID) -> destroy_session(ClientID) ->
session_drop(ClientID, destroy). session_drop(ClientID, destroy).
-spec kick_offline_session(emqx_types:clientid()) -> ok.
kick_offline_session(ClientID) ->
case emqx_persistent_message:is_persistence_enabled() of
true ->
session_drop(ClientID, kicked);
false ->
ok
end.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Info, Stats %% Info, Stats
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -292,7 +302,9 @@ info(awaiting_rel_max, #{props := Conf}) ->
info(await_rel_timeout, #{props := _Conf}) -> info(await_rel_timeout, #{props := _Conf}) ->
%% TODO: currently this setting is ignored: %% TODO: currently this setting is ignored:
%% maps:get(await_rel_timeout, Conf). %% maps:get(await_rel_timeout, Conf).
0. 0;
info({MsgsQ, _PagerParams}, _Session) when MsgsQ =:= mqueue_msgs; MsgsQ =:= inflight_msgs ->
{error, not_implemented}.
-spec stats(session()) -> emqx_types:stats(). -spec stats(session()) -> emqx_types:stats().
stats(Session) -> stats(Session) ->

View File

@ -21,11 +21,12 @@
-record(ps_route, { -record(ps_route, {
topic :: binary(), topic :: binary(),
dest :: emqx_persistent_session_ds:id() dest :: emqx_persistent_session_ds:id() | '_'
}). }).
-record(ps_routeidx, { -record(ps_routeidx, {
entry :: '$1' | emqx_topic_index:key(emqx_persistent_session_ds_router:dest()), entry :: '$1' | emqx_topic_index:key(emqx_persistent_session_ds_router:dest()),
unused = [] :: nil() unused = [] :: nil() | '_'
}). }).
-endif. -endif.

View File

@ -32,6 +32,12 @@
foldl_routes/2 foldl_routes/2
]). ]).
%% Topics API
-export([
stream/1,
stats/1
]).
-export([cleanup_routes/1]). -export([cleanup_routes/1]).
-export([print_routes/1]). -export([print_routes/1]).
-export([topics/0]). -export([topics/0]).
@ -196,6 +202,26 @@ foldl_routes(FoldFun, AccIn) ->
foldr_routes(FoldFun, AccIn) -> foldr_routes(FoldFun, AccIn) ->
fold_routes(foldr, FoldFun, AccIn). fold_routes(foldr, FoldFun, AccIn).
%%--------------------------------------------------------------------
%% Topic API
%%--------------------------------------------------------------------
%% @doc Create a `emqx_utils_stream:stream(#route{})` out of the router state,
%% potentially filtered by a topic or topic filter. The stream emits `#route{}`
%% records since this is what `emqx_mgmt_api_topics` knows how to deal with.
-spec stream(_MTopic :: '_' | emqx_types:topic()) ->
emqx_utils_stream:stream(emqx_types:route()).
stream(MTopic) ->
emqx_utils_stream:chain(stream(?PS_ROUTER_TAB, MTopic), stream(?PS_FILTERS_TAB, MTopic)).
%% @doc Retrieve router stats.
%% n_routes: total number of routes, should be equal to the length of `stream('_')`.
-spec stats(n_routes) -> non_neg_integer().
stats(n_routes) ->
NTopics = ets:info(?PS_ROUTER_TAB, size),
NFilters = ets:info(?PS_FILTERS_TAB, size),
emqx_maybe:define(NTopics, 0) + emqx_maybe:define(NFilters, 0).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Internal fns %% Internal fns
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -225,6 +251,12 @@ get_dest_session_id({_, DSSessionId}) ->
get_dest_session_id(DSSessionId) -> get_dest_session_id(DSSessionId) ->
DSSessionId. DSSessionId.
export_route(#ps_route{topic = Topic, dest = Dest}) ->
#route{topic = Topic, dest = Dest}.
export_routeidx(#ps_routeidx{entry = M}) ->
#route{topic = emqx_topic_index:get_topic(M), dest = emqx_topic_index:get_id(M)}.
match_to_route(M) -> match_to_route(M) ->
#ps_route{topic = emqx_topic_index:get_topic(M), dest = emqx_topic_index:get_id(M)}. #ps_route{topic = emqx_topic_index:get_topic(M), dest = emqx_topic_index:get_id(M)}.
@ -242,3 +274,35 @@ list_route_tab_topics() ->
mria_route_tab_delete(Route) -> mria_route_tab_delete(Route) ->
mria:dirty_delete_object(?PS_ROUTER_TAB, Route). mria:dirty_delete_object(?PS_ROUTER_TAB, Route).
%% @doc Create a `emqx_utils_stream:stream(#route{})` out of contents of either of
%% 2 route tables, optionally filtered by a topic or topic filter. If the latter is
%% specified, then it doesn't make sense to scan through `?PS_ROUTER_TAB` if it's
%% a wildcard topic, and vice versa for `?PS_FILTERS_TAB` if it's not, so we optimize
%% it away by returning an empty stream in those cases.
stream(Tab = ?PS_ROUTER_TAB, MTopic) ->
case MTopic == '_' orelse not emqx_topic:wildcard(MTopic) of
true ->
MatchSpec = #ps_route{topic = MTopic, _ = '_'},
mk_tab_stream(Tab, MatchSpec, fun export_route/1);
false ->
emqx_utils_stream:empty()
end;
stream(Tab = ?PS_FILTERS_TAB, MTopic) ->
case MTopic == '_' orelse emqx_topic:wildcard(MTopic) of
true ->
MatchSpec = #ps_routeidx{entry = emqx_trie_search:make_pat(MTopic, '_'), _ = '_'},
mk_tab_stream(Tab, MatchSpec, fun export_routeidx/1);
false ->
emqx_utils_stream:empty()
end.
mk_tab_stream(Tab, MatchSpec, Mapper) ->
%% NOTE: Currently relying on the fact that tables are backed by ETSes.
emqx_utils_stream:map(
Mapper,
emqx_utils_stream:ets(fun
(undefined) -> ets:match_object(Tab, MatchSpec, 1);
(Cont) -> ets:match_object(Cont)
end)
).

View File

@ -46,6 +46,7 @@
len/1, len/1,
plen/2, plen/2,
to_list/1, to_list/1,
to_queues_list/1,
from_list/1, from_list/1,
in/2, in/2,
in/3, in/3,
@ -121,6 +122,18 @@ to_list({pqueue, Queues}) ->
{0, V} <- to_list(Q) {0, V} <- to_list(Q)
]. ].
-spec to_queues_list(pqueue()) -> [{priority(), squeue()}].
to_queues_list({queue, _In, _Out, _Len} = Squeue) ->
[{0, Squeue}];
to_queues_list({pqueue, Queues}) ->
lists:sort(
fun
({infinity = _P1, _}, {_P2, _}) -> true;
({P1, _}, {P2, _}) -> P1 >= P2
end,
[{maybe_negate_priority(P), Q} || {P, Q} <- Queues]
).
-spec from_list([{priority(), any()}]) -> pqueue(). -spec from_list([{priority(), any()}]) -> pqueue().
from_list(L) -> from_list(L) ->
lists:foldl(fun({P, E}, Q) -> in(E, P, Q) end, new(), L). lists:foldl(fun({P, E}, Q) -> in(E, P, Q) end, new(), L).

View File

@ -58,7 +58,7 @@
]). ]).
%% Topics API %% Topics API
-export([select/3]). -export([stream/1]).
-export([print_routes/1]). -export([print_routes/1]).
@ -92,9 +92,11 @@
]). ]).
-export_type([dest/0]). -export_type([dest/0]).
-export_type([schemavsn/0]).
-type group() :: binary(). -type group() :: binary().
-type dest() :: node() | {group(), node()}. -type dest() :: node() | {group(), node()}.
-type schemavsn() :: v1 | v2.
%% Operation :: {add, ...} | {delete, ...}. %% Operation :: {add, ...} | {delete, ...}.
-type batch() :: #{batch_route() => _Operation :: tuple()}. -type batch() :: #{batch_route() => _Operation :: tuple()}.
@ -266,18 +268,15 @@ mria_batch_v1(Batch) ->
batch_get_action(Op) -> batch_get_action(Op) ->
element(1, Op). element(1, Op).
-spec select(Spec, _Limit :: pos_integer(), Continuation) -> -spec stream(_Spec :: {_TopicPat, _DestPat}) ->
{[emqx_types:route()], Continuation} | '$end_of_table' emqx_utils_stream:stream(emqx_types:route()).
when stream(MatchSpec) ->
Spec :: {_TopicPat, _DestPat}, stream(get_schema_vsn(), MatchSpec).
Continuation :: term() | '$end_of_table'.
select(MatchSpec, Limit, Cont) ->
select(get_schema_vsn(), MatchSpec, Limit, Cont).
select(v2, MatchSpec, Limit, Cont) -> stream(v2, MatchSpec) ->
select_v2(MatchSpec, Limit, Cont); stream_v2(MatchSpec);
select(v1, MatchSpec, Limit, Cont) -> stream(v1, MatchSpec) ->
select_v1(MatchSpec, Limit, Cont). stream_v1(MatchSpec).
-spec topics() -> list(emqx_types:topic()). -spec topics() -> list(emqx_types:topic()).
topics() -> topics() ->
@ -452,10 +451,8 @@ cleanup_routes_v1_fallback(Node) ->
] ]
end). end).
select_v1({MTopic, MDest}, Limit, undefined) -> stream_v1(Spec) ->
ets:match_object(?ROUTE_TAB, #route{topic = MTopic, dest = MDest}, Limit); mk_route_stream(?ROUTE_TAB, Spec).
select_v1(_Spec, _Limit, Cont) ->
ets:select(Cont).
list_topics_v1() -> list_topics_v1() ->
list_route_tab_topics(). list_route_tab_topics().
@ -591,36 +588,27 @@ make_route_rec_pat(DestPattern) ->
[{1, route}, {#route.dest, DestPattern}] [{1, route}, {#route.dest, DestPattern}]
). ).
select_v2(Spec, Limit, undefined) -> stream_v2(Spec) ->
Stream = mk_route_stream(Spec),
select_next(Limit, Stream);
select_v2(_Spec, Limit, Stream) ->
select_next(Limit, Stream).
select_next(N, Stream) ->
case emqx_utils_stream:consume(N, Stream) of
{Routes, SRest} ->
{Routes, SRest};
Routes ->
{Routes, '$end_of_table'}
end.
mk_route_stream(Spec) ->
emqx_utils_stream:chain( emqx_utils_stream:chain(
mk_route_stream(route, Spec), mk_route_stream(?ROUTE_TAB, Spec),
mk_route_stream(filter, Spec) mk_route_stream(?ROUTE_TAB_FILTERS, Spec)
). ).
mk_route_stream(route, Spec) -> mk_route_stream(Tab = ?ROUTE_TAB, {MTopic, MDest}) ->
emqx_utils_stream:ets(fun(Cont) -> select_v1(Spec, 1, Cont) end); emqx_utils_stream:ets(fun
mk_route_stream(filter, {MTopic, MDest}) -> (undefined) ->
ets:match_object(Tab, #route{topic = MTopic, dest = MDest}, 1);
(Cont) ->
ets:match_object(Cont)
end);
mk_route_stream(Tab = ?ROUTE_TAB_FILTERS, {MTopic, MDest}) ->
emqx_utils_stream:map( emqx_utils_stream:map(
fun routeidx_to_route/1, fun routeidx_to_route/1,
emqx_utils_stream:ets( emqx_utils_stream:ets(
fun fun
(undefined) -> (undefined) ->
MatchSpec = #routeidx{entry = emqx_trie_search:make_pat(MTopic, MDest)}, MatchSpec = #routeidx{entry = emqx_trie_search:make_pat(MTopic, MDest)},
ets:match_object(?ROUTE_TAB_FILTERS, MatchSpec, 1); ets:match_object(Tab, MatchSpec, 1);
(Cont) -> (Cont) ->
ets:match_object(Cont) ets:match_object(Cont)
end end
@ -657,8 +645,8 @@ match_to_route(M) ->
-define(PT_SCHEMA_VSN, {?MODULE, schemavsn}). -define(PT_SCHEMA_VSN, {?MODULE, schemavsn}).
-type schemavsn() :: v1 | v2. %% @doc Get the schema version in use.
%% BPAPI RPC Target @ emqx_router_proto
-spec get_schema_vsn() -> schemavsn(). -spec get_schema_vsn() -> schemavsn().
get_schema_vsn() -> get_schema_vsn() ->
persistent_term:get(?PT_SCHEMA_VSN). persistent_term:get(?PT_SCHEMA_VSN).
@ -668,23 +656,23 @@ init_schema() ->
ok = mria:wait_for_tables([?ROUTE_TAB, ?ROUTE_TAB_FILTERS]), ok = mria:wait_for_tables([?ROUTE_TAB, ?ROUTE_TAB_FILTERS]),
ok = emqx_trie:wait_for_tables(), ok = emqx_trie:wait_for_tables(),
ConfSchema = emqx_config:get([broker, routing, storage_schema]), ConfSchema = emqx_config:get([broker, routing, storage_schema]),
Schema = choose_schema_vsn(ConfSchema), {ClusterSchema, ClusterState} = discover_cluster_schema_vsn(),
Schema = choose_schema_vsn(ConfSchema, ClusterSchema, ClusterState),
ok = persistent_term:put(?PT_SCHEMA_VSN, Schema), ok = persistent_term:put(?PT_SCHEMA_VSN, Schema),
case Schema of case Schema =:= ConfSchema of
ConfSchema -> true ->
?SLOG(info, #{ ?SLOG(info, #{
msg => "routing_schema_used", msg => "routing_schema_used",
schema => Schema schema => Schema
}); });
_ -> false ->
?SLOG(notice, #{ ?SLOG(notice, #{
msg => "configured_routing_schema_ignored", msg => "configured_routing_schema_ignored",
schema_in_use => Schema, schema_in_use => Schema,
configured => ConfSchema, configured => ConfSchema,
reason => reason =>
"Could not use configured routing storage schema because " "Could not use configured routing storage schema because "
"there are already non-empty routing tables pertaining to " "cluster is already running with a different schema."
"another schema."
}) })
end. end.
@ -693,34 +681,147 @@ deinit_schema() ->
_ = persistent_term:erase(?PT_SCHEMA_VSN), _ = persistent_term:erase(?PT_SCHEMA_VSN),
ok. ok.
-spec choose_schema_vsn(schemavsn()) -> schemavsn(). -spec discover_cluster_schema_vsn() ->
choose_schema_vsn(ConfType) -> {schemavsn() | undefined, _State :: [{node(), schemavsn() | undefined, _Details}]}.
IsEmptyIndex = emqx_trie:empty(), discover_cluster_schema_vsn() ->
IsEmptyFilters = is_empty(?ROUTE_TAB_FILTERS), discover_cluster_schema_vsn(emqx:running_nodes() -- [node()]).
case {IsEmptyIndex, IsEmptyFilters} of
{true, true} -> -spec discover_cluster_schema_vsn([node()]) ->
ConfType; {schemavsn() | undefined, _State :: [{node(), schemavsn() | undefined, _Details}]}.
{false, true} -> discover_cluster_schema_vsn([]) ->
v1; %% single node
{true, false} -> {undefined, []};
v2; discover_cluster_schema_vsn(Nodes) ->
{false, false} -> Responses = lists:zipwith(
?SLOG(critical, #{ fun
msg => "conflicting_routing_schemas_detected_in_cluster", (Node, {ok, Schema}) ->
configured => ConfType, {Node, Schema, configured};
(Node, {error, {exception, undef, _Stacktrace}}) ->
%% No such function on the remote node, assuming it doesn't know about v2 routing.
{Node, v1, legacy};
(Node, {error, {exception, badarg, _Stacktrace}}) ->
%% Likely, persistent term is not defined yet.
{Node, unknown, starting};
(Node, Error) ->
{Node, unknown, Error}
end,
Nodes,
emqx_router_proto_v1:get_routing_schema_vsn(Nodes)
),
case lists:usort([Vsn || {_Node, Vsn, _} <- Responses, Vsn /= unknown]) of
[Vsn] when Vsn =:= v1; Vsn =:= v2 ->
{Vsn, Responses};
[] ->
?SLOG(warning, #{
msg => "cluster_routing_schema_discovery_failed",
responses => Responses,
reason => reason =>
"There are records in the routing tables related to both v1 " "Could not determine configured routing storage schema in peer nodes."
"and v2 storage schemas. This probably means that some nodes " }),
"in the cluster use v1 schema and some use v2, independently " {undefined, Responses};
"of each other. The routing is likely broken. Manual intervention " [_ | _] ->
"and full cluster restart is required. This node will shut down." Desc = schema_conflict_reason(config, Responses),
io:format(standard_error, "Error: ~ts~n", [Desc]),
?SLOG(critical, #{
msg => "conflicting_routing_schemas_in_cluster",
responses => Responses,
description => Desc
}),
error(conflicting_routing_schemas_configured_in_cluster)
end.
-spec choose_schema_vsn(
schemavsn(),
_ClusterSchema :: schemavsn() | undefined,
_ClusterState :: [{node(), schemavsn() | undefined, _Details}]
) -> schemavsn().
choose_schema_vsn(ConfSchema, ClusterSchema, State) ->
case detect_table_schema_vsn() of
[] ->
%% No records in the tables, use schema configured in the cluster if any,
%% otherwise use configured.
emqx_maybe:define(ClusterSchema, ConfSchema);
[Schema] when Schema =:= ClusterSchema ->
%% Table contents match configured schema in the cluster.
Schema;
[Schema] when ClusterSchema =:= undefined ->
%% There are existing records following some schema, we have to use it.
Schema;
_Conflicting when ClusterSchema =/= undefined ->
%% There are existing records in both v1 and v2 schema,
%% we have to use what the peer nodes agreed on.
%% because it could be THIS node which caused the cnoflict.
%%
%% The stale records will be left-over, but harmless
Desc =
"Conflicting schema version detected for routing records, but "
"all the peer nodes are running the same version, so this node "
"will use the same schema but discard the harmless stale records. "
"This warning will go away after the next full cluster (non-rolling) restart.",
?SLOG(warning, #{
msg => "conflicting_routing_storage_detected",
resolved => ClusterSchema,
description => Desc
}),
ClusterSchema;
_Conflicting ->
Desc = schema_conflict_reason(records, State),
io:format(standard_error, "Error: ~ts~n", [Desc]),
?SLOG(critical, #{
msg => "conflicting_routing_storage_in_cluster",
description => Desc
}), }),
error(conflicting_routing_schemas_detected_in_cluster) error(conflicting_routing_schemas_detected_in_cluster)
end. end.
schema_conflict_reason(Type, State) ->
Observe =
case Type of
config ->
"Peer nodes have route storage schema resolved into conflicting versions.\n";
records ->
"There are conflicting routing records found.\n"
end,
Cause =
"\nThis was caused by a race-condition when the cluster was rolling upgraded "
"from an older version to 5.4.0, 5.4.1, 5.5.0 or 5.5.1."
"\nThis node cannot boot before the conflicts are resolved.\n",
Observe ++ Cause ++ mk_conflict_resolution_action(State).
detect_table_schema_vsn() ->
lists:flatten([
[v1 || _NonEmptyTrieIndex = not emqx_trie:empty()],
[v2 || _NonEmptyFilterTab = not is_empty(?ROUTE_TAB_FILTERS)]
]).
is_empty(Tab) -> is_empty(Tab) ->
ets:first(Tab) =:= '$end_of_table'. ets:first(Tab) =:= '$end_of_table'.
mk_conflict_resolution_action(State) ->
NodesV1 = [Node || {Node, v1, _} <- State],
NodesUnknown = [Node || {Node, unknown, _} <- State],
Format =
"There are two ways to resolve the conflict:"
"\n"
"\nA: Full cluster restart: stop ALL running nodes one by one "
"and restart them in the reversed order."
"\n"
"\nB: Force v1 nodes to clean up their routes."
"\n Following EMQX nodes are running with v1 schema: ~0p."
"\n 1. Stop listeners with command \"emqx eval 'emqx_listener:stop()'\" in all v1 nodes"
"\n 2. Wait until they are safe to restart."
"\n This could take some time, depending on the number of clients and their subscriptions."
"\n Below conditions should be true for each of the nodes in order to proceed:"
"\n a) Command 'ets:info(emqx_subscriber, size)' prints `0`."
"\n b) Command 'emqx ctl topics list' prints No topics.`"
"\n 3. Upgrade the nodes to 5.6.0 or newer.",
FormatUnkown =
"Additionally, the following nodes were unreachable during startup: ~0p."
"It is strongly advised to include them in the manual resolution procedure as well.",
Message = io_lib:format(Format, [NodesV1]),
MessageUnknown = [io_lib:format(FormatUnkown, [NodesUnknown]) || NodesUnknown =/= []],
unicode:characters_to_list([Message, "\n", MessageUnknown]).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% gen_server callbacks %% gen_server callbacks
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -154,6 +154,8 @@
-define(DEFAULT_BATCH_N, 1000). -define(DEFAULT_BATCH_N, 1000).
-define(INFLIGHT_INSERT_TS, inflight_insert_ts).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Init a Session %% Init a Session
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -280,8 +282,7 @@ info(inflight_cnt, #session{inflight = Inflight}) ->
info(inflight_max, #session{inflight = Inflight}) -> info(inflight_max, #session{inflight = Inflight}) ->
emqx_inflight:max_size(Inflight); emqx_inflight:max_size(Inflight);
info({inflight_msgs, PagerParams}, #session{inflight = Inflight}) -> info({inflight_msgs, PagerParams}, #session{inflight = Inflight}) ->
{InflightList, Meta} = emqx_inflight:query(Inflight, PagerParams), inflight_query(Inflight, PagerParams);
{[I#inflight_data.message || {_, I} <- InflightList], Meta};
info(retry_interval, #session{retry_interval = Interval}) -> info(retry_interval, #session{retry_interval = Interval}) ->
Interval; Interval;
info(mqueue, #session{mqueue = MQueue}) -> info(mqueue, #session{mqueue = MQueue}) ->
@ -407,7 +408,7 @@ puback(ClientInfo, PacketId, Session = #session{inflight = Inflight}) ->
Inflight1 = emqx_inflight:delete(PacketId, Inflight), Inflight1 = emqx_inflight:delete(PacketId, Inflight),
Session1 = Session#session{inflight = Inflight1}, Session1 = Session#session{inflight = Inflight1},
{ok, Replies, Session2} = dequeue(ClientInfo, Session1), {ok, Replies, Session2} = dequeue(ClientInfo, Session1),
{ok, Msg, Replies, Session2}; {ok, without_inflight_insert_ts(Msg), Replies, Session2};
{value, _} -> {value, _} ->
{error, ?RC_PACKET_IDENTIFIER_IN_USE}; {error, ?RC_PACKET_IDENTIFIER_IN_USE};
none -> none ->
@ -426,7 +427,7 @@ pubrec(PacketId, Session = #session{inflight = Inflight}) ->
{value, #inflight_data{phase = wait_ack, message = Msg} = Data} -> {value, #inflight_data{phase = wait_ack, message = Msg} = Data} ->
Update = Data#inflight_data{phase = wait_comp}, Update = Data#inflight_data{phase = wait_comp},
Inflight1 = emqx_inflight:update(PacketId, Update, Inflight), Inflight1 = emqx_inflight:update(PacketId, Update, Inflight),
{ok, Msg, Session#session{inflight = Inflight1}}; {ok, without_inflight_insert_ts(Msg), Session#session{inflight = Inflight1}};
{value, _} -> {value, _} ->
{error, ?RC_PACKET_IDENTIFIER_IN_USE}; {error, ?RC_PACKET_IDENTIFIER_IN_USE};
none -> none ->
@ -462,7 +463,7 @@ pubcomp(ClientInfo, PacketId, Session = #session{inflight = Inflight}) ->
Inflight1 = emqx_inflight:delete(PacketId, Inflight), Inflight1 = emqx_inflight:delete(PacketId, Inflight),
Session1 = Session#session{inflight = Inflight1}, Session1 = Session#session{inflight = Inflight1},
{ok, Replies, Session2} = dequeue(ClientInfo, Session1), {ok, Replies, Session2} = dequeue(ClientInfo, Session1),
{ok, Msg, Replies, Session2}; {ok, without_inflight_insert_ts(Msg), Replies, Session2};
{value, _Other} -> {value, _Other} ->
{error, ?RC_PACKET_IDENTIFIER_IN_USE}; {error, ?RC_PACKET_IDENTIFIER_IN_USE};
none -> none ->
@ -650,7 +651,7 @@ do_retry_delivery(
Msg1 = emqx_message:set_flag(dup, true, Msg), Msg1 = emqx_message:set_flag(dup, true, Msg),
Update = Data#inflight_data{message = Msg1, timestamp = Now}, Update = Data#inflight_data{message = Msg1, timestamp = Now},
Inflight1 = emqx_inflight:update(PacketId, Update, Inflight), Inflight1 = emqx_inflight:update(PacketId, Update, Inflight),
{[{PacketId, Msg1} | Acc], Inflight1} {[{PacketId, without_inflight_insert_ts(Msg1)} | Acc], Inflight1}
end; end;
do_retry_delivery(_ClientInfo, PacketId, Data, Now, Acc, Inflight) -> do_retry_delivery(_ClientInfo, PacketId, Data, Now, Acc, Inflight) ->
Update = Data#inflight_data{timestamp = Now}, Update = Data#inflight_data{timestamp = Now},
@ -739,7 +740,7 @@ replay(ClientInfo, Session) ->
({PacketId, #inflight_data{phase = wait_comp}}) -> ({PacketId, #inflight_data{phase = wait_comp}}) ->
{pubrel, PacketId}; {pubrel, PacketId};
({PacketId, #inflight_data{message = Msg}}) -> ({PacketId, #inflight_data{message = Msg}}) ->
{PacketId, emqx_message:set_flag(dup, true, Msg)} {PacketId, without_inflight_insert_ts(emqx_message:set_flag(dup, true, Msg))}
end, end,
emqx_inflight:to_list(Session#session.inflight) emqx_inflight:to_list(Session#session.inflight)
), ),
@ -786,7 +787,7 @@ redispatch_shared_messages(#session{inflight = Inflight, mqueue = Q}) ->
%% If the Client's Session terminates before the Client reconnects, %% If the Client's Session terminates before the Client reconnects,
%% the Server MUST NOT send the Application Message to any other %% the Server MUST NOT send the Application Message to any other
%% subscribed Client [MQTT-4.8.2-5]. %% subscribed Client [MQTT-4.8.2-5].
{true, Msg}; {true, without_inflight_insert_ts(Msg)};
({_PacketId, #inflight_data{}}) -> ({_PacketId, #inflight_data{}}) ->
false false
end, end,
@ -822,22 +823,83 @@ publish_will_message_now(#session{} = Session, #message{} = WillMsg) ->
%% Helper functions %% Helper functions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-compile({inline, [sort_fun/2, batch_n/1, with_ts/1, age/2]}). -compile(
{inline, [
sort_fun/2, batch_n/1, inflight_insert_ts/1, without_inflight_insert_ts/1, with_ts/1, age/2
]}
).
sort_fun({_, A}, {_, B}) -> sort_fun({_, A}, {_, B}) ->
A#inflight_data.timestamp =< B#inflight_data.timestamp. A#inflight_data.timestamp =< B#inflight_data.timestamp.
query_sort_fun({_, #inflight_data{message = A}}, {_, #inflight_data{message = B}}) ->
inflight_insert_ts(A) =< inflight_insert_ts(B).
-spec inflight_query(emqx_inflight:inflight(), #{
position => integer() | none, limit := pos_integer()
}) ->
{[emqx_types:message()], #{position := integer() | none, start := integer() | none}}.
inflight_query(Inflight, #{limit := Limit} = PagerParams) ->
InflightL = emqx_inflight:to_list(fun query_sort_fun/2, Inflight),
StartPos =
case InflightL of
[{_, #inflight_data{message = FirstM}} | _] -> inflight_insert_ts(FirstM);
[] -> none
end,
Position = maps:get(position, PagerParams, none),
InflightMsgs = sublist_from_pos(InflightL, Position, Limit),
NextPos =
case InflightMsgs of
[_ | _] = L ->
inflight_insert_ts(lists:last(L));
[] ->
Position
end,
{InflightMsgs, #{start => StartPos, position => NextPos}}.
sublist_from_pos(InflightList, none = _Position, Limit) ->
inflight_msgs_sublist(InflightList, Limit);
sublist_from_pos(InflightList, Position, Limit) ->
Inflight = lists:dropwhile(
fun({_, #inflight_data{message = M}}) ->
inflight_insert_ts(M) =< Position
end,
InflightList
),
inflight_msgs_sublist(Inflight, Limit).
%% Small optimization to get sublist and drop keys in one traversal
inflight_msgs_sublist([{_Key, #inflight_data{message = Msg}} | T], Limit) when Limit > 0 ->
[Msg | inflight_msgs_sublist(T, Limit - 1)];
inflight_msgs_sublist(_, _) ->
[].
inflight_insert_ts(#message{extra = #{?INFLIGHT_INSERT_TS := Ts}}) -> Ts.
without_inflight_insert_ts(#message{extra = Extra} = Msg) ->
Msg#message{extra = maps:remove(?INFLIGHT_INSERT_TS, Extra)}.
batch_n(Inflight) -> batch_n(Inflight) ->
case emqx_inflight:max_size(Inflight) of case emqx_inflight:max_size(Inflight) of
0 -> ?DEFAULT_BATCH_N; 0 -> ?DEFAULT_BATCH_N;
Sz -> Sz - emqx_inflight:size(Inflight) Sz -> Sz - emqx_inflight:size(Inflight)
end. end.
with_ts(Msg) -> with_ts(#message{extra = Extra} = Msg) ->
InsertTsNano = erlang:system_time(nanosecond),
%% This is used to sort/traverse messages in inflight_query/2
Extra1 =
case is_map(Extra) of
true -> Extra;
%% extra field has not being used before EMQX 5.4.0 and defaulted to an empty list,
%% if it's not a map it's safe to overwrite it
false -> #{}
end,
Msg1 = Msg#message{extra = Extra1#{?INFLIGHT_INSERT_TS => InsertTsNano}},
#inflight_data{ #inflight_data{
phase = wait_ack, phase = wait_ack,
message = Msg, message = Msg1,
timestamp = erlang:system_time(millisecond) timestamp = erlang:convert_time_unit(InsertTsNano, nanosecond, millisecond)
}. }.
age(Now, Ts) -> Now - Ts. age(Now, Ts) -> Now - Ts.

View File

@ -169,7 +169,9 @@ filters(#{type := ip_address, filter := Filter, name := Name}) ->
formatter(#{type := _Type, payload_encode := PayloadEncode}) -> formatter(#{type := _Type, payload_encode := PayloadEncode}) ->
{emqx_trace_formatter, #{ {emqx_trace_formatter, #{
%% template is for ?SLOG message not ?TRACE. %% template is for ?SLOG message not ?TRACE.
template => [time, " [", level, "] ", msg, "\n"], %% XXX: Don't need to print the time field in logger_formatter due to we manually concat it
%% in emqx_logger_textfmt:fmt/2
template => ["[", level, "] ", msg, "\n"],
single_line => true, single_line => true,
max_size => unlimited, max_size => unlimited,
depth => unlimited, depth => unlimited,

View File

@ -0,0 +1,37 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_router_proto_v1).
-behaviour(emqx_bpapi).
-export([introduced_in/0]).
-export([
get_routing_schema_vsn/1
]).
-include_lib("emqx/include/bpapi.hrl").
-define(TIMEOUT, 3_000).
introduced_in() ->
"5.6.0".
-spec get_routing_schema_vsn([node()]) ->
[emqx_rpc:erpc(emqx_router:schemavsn())].
get_routing_schema_vsn(Nodes) ->
erpc:multicall(Nodes, emqx_router, get_schema_vsn, [], ?TIMEOUT).

View File

@ -59,7 +59,8 @@
-define(FORCE_DELETED_APIS, [ -define(FORCE_DELETED_APIS, [
{emqx_statsd, 1}, {emqx_statsd, 1},
{emqx_plugin_libs, 1}, {emqx_plugin_libs, 1},
{emqx_persistent_session, 1} {emqx_persistent_session, 1},
{emqx_ds, 3}
]). ]).
%% List of known RPC backend modules: %% List of known RPC backend modules:
-define(RPC_MODULES, "gen_rpc, erpc, rpc, emqx_rpc"). -define(RPC_MODULES, "gen_rpc, erpc, rpc, emqx_rpc").
@ -119,17 +120,16 @@ check_compat(DumpFilenames) ->
%% Note: sets nok flag %% Note: sets nok flag
-spec check_compat(fulldump(), fulldump()) -> ok. -spec check_compat(fulldump(), fulldump()) -> ok.
check_compat(Dump1 = #{release := Rel1}, Dump2 = #{release := Rel2}) -> check_compat(Dump1 = #{release := Rel1}, Dump2 = #{release := Rel2}) when Rel2 >= Rel1 ->
check_api_immutability(Dump1, Dump2), check_api_immutability(Dump1, Dump2),
Rel2 >= Rel1 andalso typecheck_apis(Dump1, Dump2);
typecheck_apis(Dump1, Dump2). check_compat(_, _) ->
ok.
%% It's not allowed to change BPAPI modules. Check that no changes %% It's not allowed to change BPAPI modules. Check that no changes
%% have been made. (sets nok flag) %% have been made. (sets nok flag)
-spec check_api_immutability(fulldump(), fulldump()) -> ok. -spec check_api_immutability(fulldump(), fulldump()) -> ok.
check_api_immutability(#{release := Rel1, api := APIs1}, #{release := Rel2, api := APIs2}) when check_api_immutability(#{release := Rel1, api := APIs1}, #{release := Rel2, api := APIs2}) ->
Rel2 >= Rel1
->
%% TODO: Handle API deprecation %% TODO: Handle API deprecation
_ = maps:map( _ = maps:map(
fun(Key = {API, Version}, Val) -> fun(Key = {API, Version}, Val) ->
@ -137,14 +137,15 @@ check_api_immutability(#{release := Rel1, api := APIs1}, #{release := Rel2, api
Val -> Val ->
ok; ok;
undefined -> undefined ->
case lists:member({API, Version}, ?FORCE_DELETED_APIS) of case lists:member(Key, ?FORCE_DELETED_APIS) of
true -> true ->
ok; ok;
false -> false ->
setnok(), setnok(),
logger:error( logger:error(
"API ~p v~p was removed in release ~p without being deprecated.", "API ~p v~p was removed in release ~p without being deprecated. "
[API, Version, Rel2] "Old release: ~p",
[API, Version, Rel2, Rel1]
) )
end; end;
_Val -> _Val ->
@ -157,8 +158,6 @@ check_api_immutability(#{release := Rel1, api := APIs1}, #{release := Rel2, api
end, end,
APIs1 APIs1
), ),
ok;
check_api_immutability(_, _) ->
ok. ok.
filter_calls(Calls) -> filter_calls(Calls) ->
@ -181,8 +180,8 @@ typecheck_apis(
AllCalls = filter_calls(AllCalls0), AllCalls = filter_calls(AllCalls0),
lists:foreach( lists:foreach(
fun({From, To}) -> fun({From, To}) ->
Caller = get_param_types(CallerSigs, From), Caller = get_param_types(CallerSigs, From, From),
Callee = get_param_types(CalleeSigs, To), Callee = get_param_types(CalleeSigs, From, To),
%% TODO: check return types %% TODO: check return types
case typecheck_rpc(Caller, Callee) of case typecheck_rpc(Caller, Callee) of
[] -> [] ->
@ -226,8 +225,8 @@ typecheck_rpc(Caller, Callee) ->
Callee Callee
). ).
-spec get_param_types(dialyzer_dump(), emqx_bpapi:call()) -> param_types(). %%-spec get_param_types(dialyzer_dump(), emqx_bpapi:call()) -> param_types().
get_param_types(Signatures, {M, F, A}) -> get_param_types(Signatures, From, {M, F, A}) ->
Arity = length(A), Arity = length(A),
case Signatures of case Signatures of
#{{M, F, Arity} := {_RetType, AttrTypes}} -> #{{M, F, Arity} := {_RetType, AttrTypes}} ->
@ -235,7 +234,7 @@ get_param_types(Signatures, {M, F, A}) ->
Arity = length(AttrTypes), Arity = length(AttrTypes),
maps:from_list(lists:zip(A, AttrTypes)); maps:from_list(lists:zip(A, AttrTypes));
_ -> _ ->
logger:critical("Call ~p:~p/~p is not found in PLT~n", [M, F, Arity]), logger:critical("Call ~p:~p/~p from ~p is not found in PLT~n", [M, F, Arity, From]),
error({badkey, {M, F, A}}) error({badkey, {M, F, A}})
end. end.

View File

@ -126,73 +126,3 @@ t_to_list(_) ->
), ),
ExpList = [{Seq, integer_to_binary(Seq)} || Seq <- lists:seq(1, 10)], ExpList = [{Seq, integer_to_binary(Seq)} || Seq <- lists:seq(1, 10)],
?assertEqual(ExpList, emqx_inflight:to_list(Inflight)). ?assertEqual(ExpList, emqx_inflight:to_list(Inflight)).
t_query(_) ->
EmptyInflight = emqx_inflight:new(500),
?assertMatch(
{[], #{continuation := end_of_data}}, emqx_inflight:query(EmptyInflight, #{limit => 50})
),
?assertMatch(
{[], #{continuation := end_of_data}},
emqx_inflight:query(EmptyInflight, #{continuation => <<"empty">>, limit => 50})
),
?assertMatch(
{[], #{continuation := end_of_data}},
emqx_inflight:query(EmptyInflight, #{continuation => none, limit => 50})
),
Inflight = lists:foldl(
fun(Seq, QAcc) ->
emqx_inflight:insert(Seq, integer_to_binary(Seq), QAcc)
end,
EmptyInflight,
lists:reverse(lists:seq(1, 114))
),
LastCont = lists:foldl(
fun(PageSeq, Cont) ->
Limit = 10,
PagerParams = #{continuation => Cont, limit => Limit},
{Page, #{continuation := NextCont} = Meta} = emqx_inflight:query(Inflight, PagerParams),
?assertEqual(10, length(Page)),
ExpFirst = PageSeq * Limit - Limit + 1,
ExpLast = PageSeq * Limit,
?assertEqual({ExpFirst, integer_to_binary(ExpFirst)}, lists:nth(1, Page)),
?assertEqual({ExpLast, integer_to_binary(ExpLast)}, lists:nth(10, Page)),
?assertMatch(
#{count := 114, continuation := IntCont} when is_integer(IntCont),
Meta
),
NextCont
end,
none,
lists:seq(1, 11)
),
{LastPartialPage, LastMeta} = emqx_inflight:query(Inflight, #{
continuation => LastCont, limit => 10
}),
?assertEqual(4, length(LastPartialPage)),
?assertEqual({111, <<"111">>}, lists:nth(1, LastPartialPage)),
?assertEqual({114, <<"114">>}, lists:nth(4, LastPartialPage)),
?assertMatch(#{continuation := end_of_data, count := 114}, LastMeta),
?assertMatch(
{[], #{continuation := end_of_data}},
emqx_inflight:query(Inflight, #{continuation => <<"not-existing-cont-id">>, limit => 10})
),
{LargePage, LargeMeta} = emqx_inflight:query(Inflight, #{limit => 1000}),
?assertEqual(114, length(LargePage)),
?assertEqual({1, <<"1">>}, hd(LargePage)),
?assertEqual({114, <<"114">>}, lists:last(LargePage)),
?assertMatch(#{continuation := end_of_data}, LargeMeta),
{FullPage, FullMeta} = emqx_inflight:query(Inflight, #{limit => 114}),
?assertEqual(114, length(FullPage)),
?assertEqual({1, <<"1">>}, hd(FullPage)),
?assertEqual({114, <<"114">>}, lists:last(FullPage)),
?assertMatch(#{continuation := end_of_data}, FullMeta),
{EmptyPage, EmptyMeta} = emqx_inflight:query(Inflight, #{limit => 0}),
?assertEqual([], EmptyPage),
?assertMatch(#{continuation := none, count := 114}, EmptyMeta).

View File

@ -284,13 +284,15 @@ t_dropped(_) ->
t_query(_) -> t_query(_) ->
EmptyQ = ?Q:init(#{max_len => 500, store_qos0 => true}), EmptyQ = ?Q:init(#{max_len => 500, store_qos0 => true}),
?assertMatch({[], #{continuation := end_of_data}}, ?Q:query(EmptyQ, #{limit => 50})), ?assertEqual({[], #{position => none, start => none}}, ?Q:query(EmptyQ, #{limit => 50})),
?assertMatch( RandPos = {erlang:system_time(nanosecond), 0},
{[], #{continuation := end_of_data}}, ?assertEqual(
?Q:query(EmptyQ, #{continuation => <<"empty">>, limit => 50}) {[], #{position => RandPos, start => none}},
?Q:query(EmptyQ, #{position => RandPos, limit => 50})
), ),
?assertMatch( ?assertEqual(
{[], #{continuation := end_of_data}}, ?Q:query(EmptyQ, #{continuation => none, limit => 50}) {[], #{position => none, start => none}},
?Q:query(EmptyQ, #{continuation => none, limit => 50})
), ),
Q = lists:foldl( Q = lists:foldl(
@ -303,52 +305,146 @@ t_query(_) ->
lists:seq(1, 114) lists:seq(1, 114)
), ),
LastCont = lists:foldl( {LastPos, LastStart} = lists:foldl(
fun(PageSeq, Cont) -> fun(PageSeq, {Pos, PrevStart}) ->
Limit = 10, Limit = 10,
PagerParams = #{continuation => Cont, limit => Limit}, PagerParams = #{position => Pos, limit => Limit},
{Page, #{continuation := NextCont} = Meta} = ?Q:query(Q, PagerParams), {Page, #{position := NextPos, start := Start}} = ?Q:query(Q, PagerParams),
?assertEqual(10, length(Page)), ?assertEqual(10, length(Page)),
ExpFirstPayload = integer_to_binary(PageSeq * Limit - Limit + 1), ExpFirstPayload = integer_to_binary(PageSeq * Limit - Limit + 1),
ExpLastPayload = integer_to_binary(PageSeq * Limit), ExpLastPayload = integer_to_binary(PageSeq * Limit),
?assertEqual( FirstMsg = lists:nth(1, Page),
ExpFirstPayload, LastMsg = lists:nth(10, Page),
emqx_message:payload(lists:nth(1, Page)), ?assertEqual(ExpFirstPayload, emqx_message:payload(FirstMsg)),
#{page_seq => PageSeq, page => Page, meta => Meta} ?assertEqual(ExpLastPayload, emqx_message:payload(LastMsg)),
), %% start value must not change as Mqueue is not modified during traversal
?assertEqual(ExpLastPayload, emqx_message:payload(lists:nth(10, Page))), NextStart =
?assertMatch(#{count := 114, continuation := <<_/binary>>}, Meta), case PageSeq of
NextCont 1 ->
?assertEqual({mqueue_ts(FirstMsg), 0}, Start),
Start;
_ ->
?assertEqual(PrevStart, Start),
PrevStart
end,
{NextPos, NextStart}
end, end,
none, {none, none},
lists:seq(1, 11) lists:seq(1, 11)
), ),
{LastPartialPage, LastMeta} = ?Q:query(Q, #{continuation => LastCont, limit => 10}),
{LastPartialPage, #{position := FinalPos} = LastMeta} = ?Q:query(Q, #{
position => LastPos, limit => 10
}),
LastMsg = lists:nth(4, LastPartialPage),
?assertEqual(4, length(LastPartialPage)), ?assertEqual(4, length(LastPartialPage)),
?assertEqual(<<"111">>, emqx_message:payload(lists:nth(1, LastPartialPage))), ?assertEqual(<<"111">>, emqx_message:payload(lists:nth(1, LastPartialPage))),
?assertEqual(<<"114">>, emqx_message:payload(lists:nth(4, LastPartialPage))), ?assertEqual(<<"114">>, emqx_message:payload(LastMsg)),
?assertMatch(#{continuation := end_of_data, count := 114}, LastMeta), ?assertEqual(#{position => {mqueue_ts(LastMsg), 0}, start => LastStart}, LastMeta),
?assertEqual(
?assertMatch( {[], #{start => LastStart, position => FinalPos}},
{[], #{continuation := end_of_data}}, ?Q:query(Q, #{position => FinalPos, limit => 10})
?Q:query(Q, #{continuation => <<"not-existing-cont-id">>, limit => 10})
), ),
{LargePage, LargeMeta} = ?Q:query(Q, #{limit => 1000}), {LargePage, LargeMeta} = ?Q:query(Q, #{position => none, limit => 1000}),
?assertEqual(114, length(LargePage)), ?assertEqual(114, length(LargePage)),
?assertEqual(<<"1">>, emqx_message:payload(hd(LargePage))), ?assertEqual(<<"1">>, emqx_message:payload(hd(LargePage))),
?assertEqual(<<"114">>, emqx_message:payload(lists:last(LargePage))), ?assertEqual(<<"114">>, emqx_message:payload(lists:last(LargePage))),
?assertMatch(#{continuation := end_of_data}, LargeMeta), ?assertEqual(#{start => LastStart, position => FinalPos}, LargeMeta),
{FullPage, FullMeta} = ?Q:query(Q, #{limit => 114}), {FullPage, FullMeta} = ?Q:query(Q, #{position => none, limit => 114}),
?assertEqual(114, length(FullPage)), ?assertEqual(LargePage, FullPage),
?assertEqual(<<"1">>, emqx_message:payload(hd(FullPage))), ?assertEqual(LargeMeta, FullMeta),
?assertEqual(<<"114">>, emqx_message:payload(lists:last(FullPage))),
?assertMatch(#{continuation := end_of_data}, FullMeta),
{EmptyPage, EmptyMeta} = ?Q:query(Q, #{limit => 0}), {_, Q1} = emqx_mqueue:out(Q),
?assertEqual([], EmptyPage), {PageAfterRemove, #{start := StartAfterRemove}} = ?Q:query(Q1, #{position => none, limit => 10}),
?assertMatch(#{continuation := none, count := 114}, EmptyMeta). ?assertEqual(<<"2">>, emqx_message:payload(hd(PageAfterRemove))),
?assertEqual(StartAfterRemove, {mqueue_ts(hd(PageAfterRemove)), 0}).
t_query_with_priorities(_) ->
Priorities = #{<<"t/infinity">> => infinity, <<"t/10">> => 10, <<"t/5">> => 5},
EmptyQ = ?Q:init(#{max_len => 500, store_qos0 => true, priorities => Priorities}),
?assertEqual({[], #{position => none, start => none}}, ?Q:query(EmptyQ, #{limit => 50})),
RandPos = {erlang:system_time(nanosecond), 0},
?assertEqual(
{[], #{position => RandPos, start => none}},
?Q:query(EmptyQ, #{position => RandPos, limit => 50})
),
?assertEqual(
{[], #{position => none, start => none}},
?Q:query(EmptyQ, #{continuation => none, limit => 50})
),
{Q, ExpMsgsAcc} = lists:foldl(
fun(Topic, {QAcc, MsgsAcc}) ->
{TopicQ, TopicMsgs} =
lists:foldl(
fun(Seq, {TopicQAcc, TopicMsgsAcc}) ->
Payload = <<Topic/binary, "_", (integer_to_binary(Seq))/binary>>,
Msg = emqx_message:make(Topic, Payload),
{_, TopicQAcc1} = ?Q:in(Msg, TopicQAcc),
{TopicQAcc1, [Msg | TopicMsgsAcc]}
end,
{QAcc, []},
lists:seq(1, 10)
),
{TopicQ, [lists:reverse(TopicMsgs) | MsgsAcc]}
end,
{EmptyQ, []},
[<<"t/test">>, <<"t/5">>, <<"t/infinity">>, <<"t/10">>]
),
%% Manual resorting from the highest to the lowest priority
[ExpMsgsPrio0, ExpMsgsPrio5, ExpMsgsPrioInf, ExpMsgsPrio10] = lists:reverse(ExpMsgsAcc),
ExpMsgs = ExpMsgsPrioInf ++ ExpMsgsPrio10 ++ ExpMsgsPrio5 ++ ExpMsgsPrio0,
{AllMsgs, #{start := StartPos, position := Pos}} = ?Q:query(Q, #{position => none, limit => 40}),
?assertEqual(40, length(AllMsgs)),
?assertEqual(ExpMsgs, with_empty_extra(AllMsgs)),
FirstMsg = hd(AllMsgs),
LastMsg = lists:last(AllMsgs),
?assertEqual(<<"t/infinity_1">>, emqx_message:payload(FirstMsg)),
?assertEqual(StartPos, {mqueue_ts(FirstMsg), infinity}),
?assertEqual(<<"t/test_10">>, emqx_message:payload(LastMsg)),
?assertMatch({_, 0}, Pos),
?assertEqual(Pos, {mqueue_ts(LastMsg), mqueue_prio(LastMsg)}),
Pos5 = {mqueue_ts(lists:nth(5, AllMsgs)), mqueue_prio(lists:nth(5, AllMsgs))},
LastInfPos = {mqueue_ts(lists:nth(10, AllMsgs)), mqueue_prio(lists:nth(5, AllMsgs))},
{MsgsPrioInfTo10, #{start := StartPos, position := PosPrio10Msg5}} = ?Q:query(Q, #{
position => Pos5, limit => 10
}),
?assertEqual(10, length(MsgsPrioInfTo10)),
?assertEqual(<<"t/infinity_6">>, emqx_message:payload(hd(MsgsPrioInfTo10))),
?assertEqual(<<"t/10_5">>, emqx_message:payload(lists:last(MsgsPrioInfTo10))),
?assertEqual(PosPrio10Msg5, {
mqueue_ts(lists:last(MsgsPrioInfTo10)), mqueue_prio(lists:last(MsgsPrioInfTo10))
}),
{MsgsPrioInfTo5, #{start := StartPos, position := PosPrio5Msg5}} = ?Q:query(Q, #{
position => Pos5, limit => 20
}),
?assertEqual(20, length(MsgsPrioInfTo5)),
?assertEqual(<<"t/infinity_6">>, emqx_message:payload(hd(MsgsPrioInfTo5))),
?assertEqual(<<"t/5_5">>, emqx_message:payload(lists:last(MsgsPrioInfTo5))),
?assertEqual(PosPrio5Msg5, {
mqueue_ts(lists:last(MsgsPrioInfTo5)), mqueue_prio(lists:last(MsgsPrioInfTo5))
}),
{MsgsPrio10, #{start := StartPos, position := PosPrio10}} = ?Q:query(Q, #{
position => LastInfPos, limit => 10
}),
?assertEqual(ExpMsgsPrio10, with_empty_extra(MsgsPrio10)),
?assertEqual(10, length(MsgsPrio10)),
?assertEqual(<<"t/10_1">>, emqx_message:payload(hd(MsgsPrio10))),
?assertEqual(<<"t/10_10">>, emqx_message:payload(lists:last(MsgsPrio10))),
?assertEqual(PosPrio10, {mqueue_ts(lists:last(MsgsPrio10)), mqueue_prio(lists:last(MsgsPrio10))}),
{MsgsPrio10To5, #{start := StartPos, position := _}} = ?Q:query(Q, #{
position => LastInfPos, limit => 20
}),
?assertEqual(ExpMsgsPrio10 ++ ExpMsgsPrio5, with_empty_extra(MsgsPrio10To5)).
conservation_prop() -> conservation_prop() ->
?FORALL( ?FORALL(
@ -413,3 +509,9 @@ drain(Q) ->
{{value, #message{topic = T, payload = P}}, Q1} -> {{value, #message{topic = T, payload = P}}, Q1} ->
[{T, P} | drain(Q1)] [{T, P} | drain(Q1)]
end. end.
mqueue_ts(#message{extra = #{mqueue_insert_ts := Ts}}) -> Ts.
mqueue_prio(#message{extra = #{mqueue_priority := Prio}}) -> Prio.
with_empty_extra(Msgs) ->
[M#message{extra = #{}} || M <- Msgs].

View File

@ -30,7 +30,8 @@ all() ->
{group, routing_schema_v1}, {group, routing_schema_v1},
{group, routing_schema_v2}, {group, routing_schema_v2},
t_routing_schema_switch_v1, t_routing_schema_switch_v1,
t_routing_schema_switch_v2 t_routing_schema_switch_v2,
t_routing_schema_consistent_clean_cluster
]. ].
groups() -> groups() ->
@ -477,6 +478,60 @@ t_routing_schema_switch(VFrom, VTo, WorkDir) ->
ok = emqx_cth_cluster:stop(Nodes) ok = emqx_cth_cluster:stop(Nodes)
end. end.
t_routing_schema_consistent_clean_cluster(Config) ->
WorkDir = emqx_cth_suite:work_dir(?FUNCTION_NAME, Config),
% Start first node with routing schema v1
[Node1] = emqx_cth_cluster:start(
[
{routing_schema_consistent1, #{
apps => [mk_genrpc_appspec(), mk_emqx_appspec(1, v1)]
}}
],
#{work_dir => WorkDir}
),
% Start rest of nodes with routing schema v2
NodesRest = emqx_cth_cluster:start(
[
{routing_schema_consistent2, #{
apps => [mk_genrpc_appspec(), mk_emqx_appspec(2, v2)],
base_port => 20000,
join_to => Node1
}},
{routing_schema_consistent3, #{
apps => [mk_genrpc_appspec(), mk_emqx_appspec(3, v2)],
base_port => 20100,
join_to => Node1
}}
],
#{work_dir => WorkDir}
),
Nodes = [Node1 | NodesRest],
try
% Verify that cluser is still on v1
?assertEqual(
[{ok, v1} || _ <- Nodes],
erpc:multicall(Nodes, emqx_router, get_schema_vsn, [])
),
% Wait for all nodes to agree on cluster state
?retry(
500,
10,
?assertEqual(
[{ok, Nodes} || _ <- Nodes],
erpc:multicall(Nodes, emqx, running_nodes, [])
)
),
C1 = start_client(Node1),
C2 = start_client(hd(NodesRest)),
ok = subscribe(C2, <<"t/#">>),
{ok, _} = publish(C1, <<"t/a/b/c">>, <<"yayconsistency">>),
?assertReceive({pub, C2, #{topic := <<"t/a/b/c">>, payload := <<"yayconsistency">>}}),
ok = emqtt:stop(C1),
ok = emqtt:stop(C2)
after
ok = emqx_cth_cluster:stop(Nodes)
end.
t_slow_rlog_routing_consistency(init, Config) -> t_slow_rlog_routing_consistency(init, Config) ->
[Core1, _Core2, _Replicant] = ?config(cluster, Config), [Core1, _Core2, _Replicant] = ?config(cluster, Config),
MnesiaHook = rpc:call(Core1, persistent_term, get, [{mnesia_hook, post_commit}]), MnesiaHook = rpc:call(Core1, persistent_term, get, [{mnesia_hook, post_commit}]),

View File

@ -19,6 +19,7 @@
-compile(export_all). -compile(export_all).
-compile(nowarn_export_all). -compile(nowarn_export_all).
-include_lib("emqx/include/emqx.hrl").
-include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl").
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl"). -include_lib("common_test/include/ct.hrl").
@ -116,6 +117,80 @@ t_session_stats(_) ->
maps:from_list(Stats) maps:from_list(Stats)
). ).
t_session_inflight_query(_) ->
EmptyInflight = emqx_inflight:new(500),
Session = session(#{inflight => EmptyInflight}),
EmptyQueryResMeta = {[], #{position => none, start => none}},
?assertEqual(EmptyQueryResMeta, inflight_query(Session, none, 10)),
?assertEqual(EmptyQueryResMeta, inflight_query(Session, none, 10)),
RandPos = erlang:system_time(nanosecond),
?assertEqual({[], #{position => RandPos, start => none}}, inflight_query(Session, RandPos, 10)),
Inflight = lists:foldl(
fun(Seq, Acc) ->
Msg = emqx_message:make(clientid, ?QOS_2, <<"t">>, integer_to_binary(Seq)),
emqx_inflight:insert(Seq, emqx_session_mem:with_ts(Msg), Acc)
end,
EmptyInflight,
lists:seq(1, 114)
),
Session1 = session(#{inflight => Inflight}),
{LastPos, LastStart} = lists:foldl(
fun(PageSeq, {Pos, PrevStart}) ->
Limit = 10,
{Page, #{position := NextPos, start := Start}} = inflight_query(Session1, Pos, Limit),
?assertEqual(10, length(Page)),
ExpFirst = PageSeq * Limit - Limit + 1,
ExpLast = PageSeq * Limit,
FirstMsg = lists:nth(1, Page),
LastMsg = lists:nth(10, Page),
?assertEqual(integer_to_binary(ExpFirst), emqx_message:payload(FirstMsg)),
?assertEqual(integer_to_binary(ExpLast), emqx_message:payload(LastMsg)),
%% start value must not change as Inflight is not modified during traversal
NextStart =
case PageSeq of
1 ->
?assertEqual(inflight_ts(FirstMsg), Start),
Start;
_ ->
?assertEqual(PrevStart, Start),
PrevStart
end,
?assertEqual(inflight_ts(LastMsg), NextPos),
{NextPos, NextStart}
end,
{none, none},
lists:seq(1, 11)
),
{LastPartialPage, #{position := FinalPos} = LastMeta} = inflight_query(
Session1, LastPos, 10
),
LastMsg = lists:nth(4, LastPartialPage),
?assertEqual(4, length(LastPartialPage)),
?assertEqual(<<"111">>, emqx_message:payload(lists:nth(1, LastPartialPage))),
?assertEqual(<<"114">>, emqx_message:payload(LastMsg)),
?assertEqual(#{position => inflight_ts(LastMsg), start => LastStart}, LastMeta),
?assertEqual(
{[], #{start => LastStart, position => FinalPos}},
inflight_query(Session1, FinalPos, 10)
),
{LargePage, LargeMeta} = inflight_query(Session1, none, 1000),
?assertEqual(114, length(LargePage)),
?assertEqual(<<"1">>, emqx_message:payload(hd(LargePage))),
?assertEqual(<<"114">>, emqx_message:payload(lists:last(LargePage))),
?assertEqual(#{start => LastStart, position => FinalPos}, LargeMeta),
{FullPage, FullMeta} = inflight_query(Session1, none, 114),
?assertEqual(LargePage, FullPage),
?assertEqual(LargeMeta, FullMeta),
Session2 = session(#{inflight => emqx_inflight:delete(1, Inflight)}),
{PageAfterRemove, #{start := StartAfterRemove}} = inflight_query(Session2, none, 10),
?assertEqual(<<"2">>, emqx_message:payload(hd(PageAfterRemove))),
?assertEqual(StartAfterRemove, inflight_ts(hd(PageAfterRemove))).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test cases for sub/unsub %% Test cases for sub/unsub
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -275,9 +350,10 @@ t_pubrel_error_packetid_not_found(_) ->
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} = emqx_session_mem:pubrel(1, session()). {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} = emqx_session_mem:pubrel(1, session()).
t_pubcomp(_) -> t_pubcomp(_) ->
Inflight = emqx_inflight:insert(1, with_ts(wait_comp, undefined), emqx_inflight:new()), Msg = emqx_message:make(test, ?QOS_2, <<"t">>, <<>>),
Inflight = emqx_inflight:insert(1, with_ts(wait_comp, Msg), emqx_inflight:new()),
Session = session(#{inflight => Inflight}), Session = session(#{inflight => Inflight}),
{ok, undefined, [], Session1} = emqx_session_mem:pubcomp(clientinfo(), 1, Session), {ok, Msg, [], Session1} = emqx_session_mem:pubcomp(clientinfo(), 1, Session),
?assertEqual(0, emqx_session_mem:info(inflight_cnt, Session1)). ?assertEqual(0, emqx_session_mem:info(inflight_cnt, Session1)).
t_pubcomp_error_packetid_in_use(_) -> t_pubcomp_error_packetid_in_use(_) ->
@ -600,3 +676,8 @@ set_duplicate_pub({Id, Msg}) ->
get_packet_id({Id, _}) -> get_packet_id({Id, _}) ->
Id. Id.
inflight_query(Session, Pos, Limit) ->
emqx_session_mem:info({inflight_msgs, #{position => Pos, limit => Limit}}, Session).
inflight_ts(#message{extra = #{inflight_insert_ts := Ts}}) -> Ts.

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -3,7 +3,8 @@
{erl_opts, [debug_info]}. {erl_opts, [debug_info]}.
{deps, [ {deps, [
{hstreamdb_erl, {hstreamdb_erl,
{git, "https://github.com/hstreamdb/hstreamdb_erl.git", {tag, "0.5.18+v0.18.1"}}}, {git, "https://github.com/hstreamdb/hstreamdb_erl.git",
{tag, "0.5.18+v0.18.1+ezstd-v1.0.5-emqx1"}}},
{emqx, {path, "../../apps/emqx"}}, {emqx, {path, "../../apps/emqx"}},
{emqx_utils, {path, "../../apps/emqx_utils"}} {emqx_utils, {path, "../../apps/emqx_utils"}}
]}. ]}.

View File

@ -28,6 +28,17 @@ connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) ->
BridgeV1Config2 = emqx_utils_maps:deep_merge(ConnectorConfig, BridgeV1Config1), BridgeV1Config2 = emqx_utils_maps:deep_merge(ConnectorConfig, BridgeV1Config1),
emqx_utils_maps:rename(<<"parameters">>, <<"kafka">>, BridgeV1Config2). emqx_utils_maps:rename(<<"parameters">>, <<"kafka">>, BridgeV1Config2).
bridge_v1_config_to_action_config(BridgeV1Conf0 = #{<<"producer">> := _}, ConnectorName) ->
%% Ancient v1 config, when `kafka' key was wrapped by `producer'
BridgeV1Conf1 = emqx_utils_maps:unindent(<<"producer">>, BridgeV1Conf0),
BridgeV1Conf =
case maps:take(<<"mqtt">>, BridgeV1Conf1) of
{#{<<"topic">> := Topic}, BridgeV1Conf2} when is_binary(Topic) ->
BridgeV1Conf2#{<<"local_topic">> => Topic};
_ ->
maps:remove(<<"mqtt">>, BridgeV1Conf1)
end,
bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName);
bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) -> bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) ->
Config0 = emqx_action_info:transform_bridge_v1_config_to_action_config( Config0 = emqx_action_info:transform_bridge_v1_config_to_action_config(
BridgeV1Conf, ConnectorName, schema_module(), kafka_producer BridgeV1Conf, ConnectorName, schema_module(), kafka_producer

View File

@ -6,7 +6,8 @@
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
-export([atoms/0]). -export([atoms/0, kafka_producer_old_hocon/1]).
%% ensure atoms exist %% ensure atoms exist
atoms() -> [myproducer, my_consumer]. atoms() -> [myproducer, my_consumer].

View File

@ -36,6 +36,7 @@ all() ->
emqx_common_test_helpers:all(?MODULE). emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) -> init_per_suite(Config) ->
emqx_common_test_helpers:clear_screen(),
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
KafkaHost = os:getenv("KAFKA_PLAIN_HOST", "toxiproxy.emqx.net"), KafkaHost = os:getenv("KAFKA_PLAIN_HOST", "toxiproxy.emqx.net"),
@ -79,9 +80,22 @@ end_per_suite(Config) ->
emqx_cth_suite:stop(Apps), emqx_cth_suite:stop(Apps),
ok. ok.
init_per_testcase(t_ancient_v1_config_migration_with_local_topic = TestCase, Config) ->
Cluster = setup_cluster_ancient_config(TestCase, Config, #{with_local_topic => true}),
[{cluster, Cluster} | Config];
init_per_testcase(t_ancient_v1_config_migration_without_local_topic = TestCase, Config) ->
Cluster = setup_cluster_ancient_config(TestCase, Config, #{with_local_topic => false}),
[{cluster, Cluster} | Config];
init_per_testcase(_TestCase, Config) -> init_per_testcase(_TestCase, Config) ->
Config. Config.
end_per_testcase(TestCase, Config) when
TestCase =:= t_ancient_v1_config_migration_with_local_topic;
TestCase =:= t_ancient_v1_config_migration_without_local_topic
->
Cluster = ?config(cluster, Config),
emqx_cth_cluster:stop(Cluster),
ok;
end_per_testcase(_TestCase, Config) -> end_per_testcase(_TestCase, Config) ->
ProxyHost = ?config(proxy_host, Config), ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config), ProxyPort = ?config(proxy_port, Config),
@ -94,6 +108,32 @@ end_per_testcase(_TestCase, Config) ->
%% Helper fns %% Helper fns
%%------------------------------------------------------------------------------------- %%-------------------------------------------------------------------------------------
basic_node_conf(WorkDir) ->
#{
<<"node">> => #{
<<"cookie">> => erlang:get_cookie(),
<<"data_dir">> => unicode:characters_to_binary(WorkDir)
}
}.
setup_cluster_ancient_config(TestCase, Config, #{with_local_topic := WithLocalTopic}) ->
AncientIOList = emqx_bridge_kafka_tests:kafka_producer_old_hocon(WithLocalTopic),
{ok, AncientCfg0} = hocon:binary(AncientIOList),
WorkDir = emqx_cth_suite:work_dir(TestCase, Config),
BasicConf = basic_node_conf(WorkDir),
AncientCfg = emqx_utils_maps:deep_merge(BasicConf, AncientCfg0),
Apps = [
emqx,
emqx_conf,
emqx_connector,
emqx_bridge_kafka,
{emqx_bridge, #{schema_mod => emqx_enterprise_schema, config => AncientCfg}}
],
emqx_cth_cluster:start(
[{kafka_producer_ancient_cfg1, #{apps => Apps}}],
#{work_dir => WorkDir}
).
check_send_message_with_bridge(BridgeName) -> check_send_message_with_bridge(BridgeName) ->
#{offset := Offset, payload := Payload} = send_message(BridgeName), #{offset := Offset, payload := Payload} = send_message(BridgeName),
%% ###################################### %% ######################################
@ -578,3 +618,23 @@ t_create_connector_while_connection_is_down(Config) ->
[] []
), ),
ok. ok.
t_ancient_v1_config_migration_with_local_topic(Config) ->
%% Simply starting this test case successfully is enough, as the core of the test is
%% to be able to successfully start the node with the ancient config.
[Node] = ?config(cluster, Config),
?assertMatch(
[#{type := <<"kafka_producer">>}],
erpc:call(Node, fun emqx_bridge_v2:list/0)
),
ok.
t_ancient_v1_config_migration_without_local_topic(Config) ->
%% Simply starting this test case successfully is enough, as the core of the test is
%% to be able to successfully start the node with the ancient config.
[Node] = ?config(cluster, Config),
?assertMatch(
[#{type := <<"kafka_producer">>}],
erpc:call(Node, fun emqx_bridge_v2:list/0)
),
ok.

View File

@ -79,6 +79,7 @@
-define(DEFAULT_MAX_PORTS, 1024 * 1024). -define(DEFAULT_MAX_PORTS, 1024 * 1024).
-define(LOG_THROTTLING_MSGS, [ -define(LOG_THROTTLING_MSGS, [
authentication_failure,
authorization_permission_denied, authorization_permission_denied,
cannot_publish_to_topic_due_to_not_authorized, cannot_publish_to_topic_due_to_not_authorized,
cannot_publish_to_topic_due_to_quota_exceeded, cannot_publish_to_topic_due_to_quota_exceeded,
@ -1277,6 +1278,15 @@ log_handler_common_confs(Handler, Default) ->
importance => ?IMPORTANCE_MEDIUM importance => ?IMPORTANCE_MEDIUM
} }
)}, )},
{"timestamp_format",
sc(
hoconsc:enum([auto, epoch, rfc3339]),
#{
default => auto,
desc => ?DESC("common_handler_timestamp_format"),
importance => ?IMPORTANCE_MEDIUM
}
)},
{"time_offset", {"time_offset",
sc( sc(
string(), string(),

View File

@ -77,7 +77,8 @@ t_log_conf(_Conf) ->
<<"rotation_count">> => 10, <<"rotation_count">> => 10,
<<"rotation_size">> => <<"50MB">>, <<"rotation_size">> => <<"50MB">>,
<<"time_offset">> => <<"system">>, <<"time_offset">> => <<"system">>,
<<"path">> => <<"log/emqx.log">> <<"path">> => <<"log/emqx.log">>,
<<"timestamp_format">> => <<"auto">>
}, },
ExpectLog1 = #{ ExpectLog1 = #{
<<"console">> => <<"console">> =>
@ -85,7 +86,8 @@ t_log_conf(_Conf) ->
<<"enable">> => true, <<"enable">> => true,
<<"formatter">> => <<"text">>, <<"formatter">> => <<"text">>,
<<"level">> => <<"debug">>, <<"level">> => <<"debug">>,
<<"time_offset">> => <<"system">> <<"time_offset">> => <<"system">>,
<<"timestamp_format">> => <<"auto">>
}, },
<<"file">> => <<"file">> =>
#{<<"default">> => FileExpect}, #{<<"default">> => FileExpect},

View File

@ -131,8 +131,9 @@ log.file_handlers {
chars_limit => unlimited, chars_limit => unlimited,
depth => 100, depth => 100,
single_line => true, single_line => true,
template => [time, " [", level, "] ", msg, "\n"], template => ["[", level, "] ", msg, "\n"],
time_offset => TimeOffset time_offset => TimeOffset,
timestamp_format => auto
}} }}
). ).

View File

@ -238,7 +238,7 @@ transform_bridge_v1_config_to_action_config(
ActionMap0 = lists:foldl( ActionMap0 = lists:foldl(
fun fun
({enable, _Spec}, ToTransformSoFar) -> ({enable, _Spec}, ToTransformSoFar) ->
%% Enable filed is used in both %% Enable field is used in both
ToTransformSoFar; ToTransformSoFar;
({ConnectorFieldName, _Spec}, ToTransformSoFar) -> ({ConnectorFieldName, _Spec}, ToTransformSoFar) ->
ConnectorFieldNameBin = to_bin(ConnectorFieldName), ConnectorFieldNameBin = to_bin(ConnectorFieldName),

View File

@ -178,36 +178,27 @@ fields(hasnext) ->
>>, >>,
Meta = #{desc => Desc, required => true}, Meta = #{desc => Desc, required => true},
[{hasnext, hoconsc:mk(boolean(), Meta)}]; [{hasnext, hoconsc:mk(boolean(), Meta)}];
fields('after') -> fields(position) ->
Desc = <<
"The value of \"last\" field returned in the previous response. It can then be used"
" in subsequent requests to get the next chunk of results.<br/>"
"It is used instead of \"page\" parameter to traverse volatile data.<br/>"
"Can be omitted or set to \"none\" to get the first chunk of data.<br/>"
"\last\" = end_of_data\" is returned, if there is no more data.<br/>"
"Sending \"after=end_of_table\" back to the server will result in \"400 Bad Request\""
" error response."
>>,
Meta = #{
in => query, desc => Desc, required => false, example => <<"AAYS53qRa0n07AAABFIACg">>
},
[{'after', hoconsc:mk(hoconsc:union([none, end_of_data, binary()]), Meta)}];
fields(last) ->
Desc = << Desc = <<
"An opaque token that can then be in subsequent requests to get " "An opaque token that can then be in subsequent requests to get "
" the next chunk of results: \"?after={last}\"<br/>" " the next chunk of results: \"?position={prev_response.meta.position}\"<br/>"
"if there is no more data, \"last\" = end_of_data\" is returned.<br/>" "It is used instead of \"page\" parameter to traverse highly volatile data.<br/>"
"Sending \"after=end_of_table\" back to the server will result in \"400 Bad Request\"" "Can be omitted or set to \"none\" to get the first chunk of data."
" error response."
>>, >>,
Meta = #{ Meta = #{
desc => Desc, required => true, example => <<"AAYS53qRa0n07AAABFIACg">> in => query, desc => Desc, required => false, example => <<"none">>
}, },
[{last, hoconsc:mk(hoconsc:union([none, end_of_data, binary()]), Meta)}]; [{position, hoconsc:mk(hoconsc:union([none, end_of_data, binary()]), Meta)}];
fields(start) ->
Desc = <<"The position of the current first element of the data collection.">>,
Meta = #{
desc => Desc, required => true, example => <<"none">>
},
[{start, hoconsc:mk(hoconsc:union([none, binary()]), Meta)}];
fields(meta) -> fields(meta) ->
fields(page) ++ fields(limit) ++ fields(count) ++ fields(hasnext); fields(page) ++ fields(limit) ++ fields(count) ++ fields(hasnext);
fields(continuation_meta) -> fields(continuation_meta) ->
fields(last) ++ fields(count). fields(start) ++ fields(position).
-spec schema_with_example(hocon_schema:type(), term()) -> hocon_schema:field_schema(). -spec schema_with_example(hocon_schema:type(), term()) -> hocon_schema:field_schema().
schema_with_example(Type, Example) -> schema_with_example(Type, Example) ->

View File

@ -78,7 +78,8 @@ t_audit_log_conf(_Config) ->
<<"rotation_count">> => 10, <<"rotation_count">> => 10,
<<"rotation_size">> => <<"50MB">>, <<"rotation_size">> => <<"50MB">>,
<<"time_offset">> => <<"system">>, <<"time_offset">> => <<"system">>,
<<"path">> => <<"log/emqx.log">> <<"path">> => <<"log/emqx.log">>,
<<"timestamp_format">> => <<"auto">>
}, },
ExpectLog1 = #{ ExpectLog1 = #{
<<"console">> => <<"console">> =>
@ -86,7 +87,8 @@ t_audit_log_conf(_Config) ->
<<"enable">> => false, <<"enable">> => false,
<<"formatter">> => <<"text">>, <<"formatter">> => <<"text">>,
<<"level">> => <<"warning">>, <<"level">> => <<"warning">>,
<<"time_offset">> => <<"system">> <<"time_offset">> => <<"system">>,
<<"timestamp_format">> => <<"auto">>
}, },
<<"file">> => <<"file">> =>
#{<<"default">> => FileExpect}, #{<<"default">> => FileExpect},
@ -99,7 +101,8 @@ t_audit_log_conf(_Config) ->
<<"max_filter_size">> => 5000, <<"max_filter_size">> => 5000,
<<"rotation_count">> => 10, <<"rotation_count">> => 10,
<<"rotation_size">> => <<"50MB">>, <<"rotation_size">> => <<"50MB">>,
<<"time_offset">> => <<"system">> <<"time_offset">> => <<"system">>,
<<"timestamp_format">> => <<"auto">>
} }
}, },
%% The default value of throttling.msgs can be frequently updated, %% The default value of throttling.msgs can be frequently updated,

View File

@ -234,7 +234,7 @@ parse_data(
<<Year:?BYTE, Month:?BYTE, Day:?BYTE, Hour:?BYTE, Minute:?BYTE, Second:?BYTE, Total:?BYTE, <<Year:?BYTE, Month:?BYTE, Day:?BYTE, Hour:?BYTE, Minute:?BYTE, Second:?BYTE, Total:?BYTE,
Rest/binary>> Rest/binary>>
) -> ) ->
%% XXX: need check ACK filed? %% XXX: need check ACK field?
#{ #{
<<"Time">> => #{ <<"Time">> => #{
<<"Year">> => Year, <<"Year">> => Year,

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang; -*- %% -*- mode: erlang; -*-
{deps, [ {deps, [
{jesse, {git, "https://github.com/emqx/jesse.git", {tag, "1.7.12"}}}, {jesse, {git, "https://github.com/emqx/jesse.git", {tag, "1.8.0"}}},
{emqx, {path, "../../apps/emqx"}}, {emqx, {path, "../../apps/emqx"}},
{emqx_utils, {path, "../emqx_utils"}}, {emqx_utils, {path, "../emqx_utils"}},
{emqx_gateway, {path, "../../apps/emqx_gateway"}} {emqx_gateway, {path, "../../apps/emqx_gateway"}}

View File

@ -80,24 +80,29 @@ update_setting(Setting) when is_map(Setting) ->
check(_ConnInfo, AckProps) -> check(_ConnInfo, AckProps) ->
case emqx_license_checker:limits() of case emqx_license_checker:limits() of
{ok, #{max_connections := ?ERR_EXPIRED}} -> {ok, #{max_connections := ?ERR_EXPIRED}} ->
?SLOG(error, #{msg => "connection_rejected_due_to_license_expired"}), ?SLOG(error, #{msg => "connection_rejected_due_to_license_expired"}, #{tag => "LICENSE"}),
{stop, {error, ?RC_QUOTA_EXCEEDED}}; {stop, {error, ?RC_QUOTA_EXCEEDED}};
{ok, #{max_connections := MaxClients}} -> {ok, #{max_connections := MaxClients}} ->
case check_max_clients_exceeded(MaxClients) of case check_max_clients_exceeded(MaxClients) of
true -> true ->
?SLOG_THROTTLE( ?SLOG_THROTTLE(
error, error,
#{msg => connection_rejected_due_to_license_limit_reached} #{msg => connection_rejected_due_to_license_limit_reached},
#{tag => "LICENSE"}
), ),
{stop, {error, ?RC_QUOTA_EXCEEDED}}; {stop, {error, ?RC_QUOTA_EXCEEDED}};
false -> false ->
{ok, AckProps} {ok, AckProps}
end; end;
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{ ?SLOG(
msg => "connection_rejected_due_to_license_not_loaded", error,
reason => Reason #{
}), msg => "connection_rejected_due_to_license_not_loaded",
reason => Reason
},
#{tag => "LICENSE"}
),
{stop, {error, ?RC_QUOTA_EXCEEDED}} {stop, {error, ?RC_QUOTA_EXCEEDED}}
end. end.

View File

@ -172,11 +172,15 @@ refresh(State) ->
State. State.
log_new_license(Old, New) -> log_new_license(Old, New) ->
?SLOG(info, #{ ?SLOG(
msg => "new_license_loaded", info,
old_license => emqx_license_parser:summary(Old), #{
new_license => emqx_license_parser:summary(New) msg => "new_license_loaded",
}). old_license => emqx_license_parser:summary(Old),
new_license => emqx_license_parser:summary(New)
},
#{tag => "LICENSE"}
).
ensure_check_license_timer(#{check_license_interval := CheckInterval} = State) -> ensure_check_license_timer(#{check_license_interval := CheckInterval} = State) ->
ok = cancel_timer(State, check_timer), ok = cancel_timer(State, check_timer),

View File

@ -129,13 +129,17 @@ error_msg(Code, Msg) ->
'/license'(post, #{body := #{<<"key">> := Key}}) -> '/license'(post, #{body := #{<<"key">> := Key}}) ->
case emqx_license:update_key(Key) of case emqx_license:update_key(Key) of
{error, Error} -> {error, Error} ->
?SLOG(error, #{ ?SLOG(
msg => "bad_license_key", error,
reason => Error #{
}), msg => "bad_license_key",
reason => Error
},
#{tag => "LICENSE"}
),
{400, error_msg(?BAD_REQUEST, <<"Bad license key">>)}; {400, error_msg(?BAD_REQUEST, <<"Bad license key">>)};
{ok, _} -> {ok, _} ->
?SLOG(info, #{msg => "updated_license_key"}), ?SLOG(info, #{msg => "updated_license_key"}, #{tag => "LICENSE"}),
License = maps:from_list(emqx_license_checker:dump()), License = maps:from_list(emqx_license_checker:dump()),
{200, License} {200, License}
end; end;
@ -147,13 +151,17 @@ error_msg(Code, Msg) ->
'/license/setting'(put, #{body := Setting}) -> '/license/setting'(put, #{body := Setting}) ->
case emqx_license:update_setting(Setting) of case emqx_license:update_setting(Setting) of
{error, Error} -> {error, Error} ->
?SLOG(error, #{ ?SLOG(
msg => "bad_license_setting", error,
reason => Error #{
}), msg => "bad_license_setting",
reason => Error
},
#{tag => "LICENSE"}
),
{400, error_msg(?BAD_REQUEST, <<"Bad license setting">>)}; {400, error_msg(?BAD_REQUEST, <<"Bad license setting">>)};
{ok, _} -> {ok, _} ->
?SLOG(info, #{msg => "updated_license_setting"}), ?SLOG(info, #{msg => "updated_license_setting"}, #{tag => "LICENSE"}),
'/license/setting'(get, undefined) '/license/setting'(get, undefined)
end. end.

View File

@ -27,6 +27,7 @@
]). ]).
-export([open_ports_check/0]). -export([open_ports_check/0]).
-export([mria_lb_custom_info/0, mria_lb_custom_info_check/1]).
-ifdef(TEST). -ifdef(TEST).
-export([create_plan/0]). -export([create_plan/0]).
@ -51,6 +52,13 @@ start() ->
configure_shard_transports(), configure_shard_transports(),
set_mnesia_extra_diagnostic_checks(), set_mnesia_extra_diagnostic_checks(),
emqx_otel_app:configure_otel_deps(), emqx_otel_app:configure_otel_deps(),
%% Register mria callbacks that help to check compatibility of the
%% replicant with the core node. Currently they rely on the exact
%% match of the version of EMQX OTP application:
_ = application:load(mria),
_ = application:load(emqx),
mria_config:register_callback(lb_custom_info, fun ?MODULE:mria_lb_custom_info/0),
mria_config:register_callback(lb_custom_info_check, fun ?MODULE:mria_lb_custom_info_check/1),
ekka:start(), ekka:start(),
ok. ok.
@ -227,3 +235,21 @@ resolve_dist_address_type() ->
_ -> _ ->
inet inet
end. end.
%% Note: this function is stored in the Mria's application environment
mria_lb_custom_info() ->
get_emqx_vsn().
%% Note: this function is stored in the Mria's application environment
mria_lb_custom_info_check(undefined) ->
false;
mria_lb_custom_info_check(OtherVsn) ->
get_emqx_vsn() =:= OtherVsn.
get_emqx_vsn() ->
case application:get_key(emqx, vsn) of
{ok, Vsn} ->
Vsn;
undefined ->
undefined
end.

View File

@ -15,6 +15,3 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-define(DEFAULT_ROW_LIMIT, 100). -define(DEFAULT_ROW_LIMIT, 100).
-define(URL_PARAM_INTEGER, url_param_integer).
-define(URL_PARAM_BINARY, url_param_binary).

View File

@ -359,6 +359,10 @@ kickout_client(ClientId) ->
case lookup_client({clientid, ClientId}, undefined) of case lookup_client({clientid, ClientId}, undefined) of
[] -> [] ->
{error, not_found}; {error, not_found};
[{ClientId, _}] ->
%% Offline durable session (client ID is a plain binary
%% without channel pid):
emqx_persistent_session_ds:kick_offline_session(ClientId);
_ -> _ ->
Results = [kickout_client(Node, ClientId) || Node <- emqx:running_nodes()], Results = [kickout_client(Node, ClientId) || Node <- emqx:running_nodes()],
check_results(Results) check_results(Results)
@ -372,6 +376,7 @@ kickout_clients(ClientIds) when is_list(ClientIds) ->
emqx_management_proto_v5:kickout_clients(Node, ClientIds) emqx_management_proto_v5:kickout_clients(Node, ClientIds)
end, end,
Results = lists:map(F, emqx:running_nodes()), Results = lists:map(F, emqx:running_nodes()),
lists:foreach(fun emqx_persistent_session_ds:kick_offline_session/1, ClientIds),
case lists:filter(fun(Res) -> Res =/= ok end, Results) of case lists:filter(fun(Res) -> Res =/= ok end, Results) of
[] -> [] ->
ok; ok;
@ -509,7 +514,7 @@ do_call_client(ClientId, Req) ->
Pid = lists:last(Pids), Pid = lists:last(Pids),
case emqx_cm:get_chan_info(ClientId, Pid) of case emqx_cm:get_chan_info(ClientId, Pid) of
#{conninfo := #{conn_mod := ConnMod}} -> #{conninfo := #{conn_mod := ConnMod}} ->
erlang:apply(ConnMod, call, [Pid, Req]); call_conn(ConnMod, Pid, Req);
undefined -> undefined ->
{error, not_found} {error, not_found}
end end
@ -698,3 +703,13 @@ check_results(Results) ->
default_row_limit() -> default_row_limit() ->
?DEFAULT_ROW_LIMIT. ?DEFAULT_ROW_LIMIT.
call_conn(ConnMod, Pid, Req) ->
try
erlang:apply(ConnMod, call, [Pid, Req])
catch
exit:R when R =:= shutdown; R =:= normal ->
{error, shutdown};
exit:{R, _} when R =:= shutdown; R =:= noproc ->
{error, shutdown}
end.

View File

@ -39,7 +39,6 @@
-export([ -export([
parse_pager_params/1, parse_pager_params/1,
parse_cont_pager_params/2, parse_cont_pager_params/2,
encode_cont_pager_params/2,
parse_qstring/2, parse_qstring/2,
init_query_result/0, init_query_result/0,
init_query_state/5, init_query_state/5,
@ -138,32 +137,18 @@ page(Params) ->
limit(Params) when is_map(Params) -> limit(Params) when is_map(Params) ->
maps:get(<<"limit">>, Params, emqx_mgmt:default_row_limit()). maps:get(<<"limit">>, Params, emqx_mgmt:default_row_limit()).
continuation(Params, Encoding) -> position(Params, Decoder) ->
try try
decode_continuation(maps:get(<<"after">>, Params, none), Encoding) decode_position(maps:get(<<"position">>, Params, none), Decoder)
catch catch
_:_ -> _:_ ->
error error
end. end.
decode_continuation(none, _Encoding) -> decode_position(none, _Decoder) ->
none; none;
decode_continuation(end_of_data, _Encoding) -> decode_position(Pos, Decoder) ->
%% Clients should not send "after=end_of_data" back to the server Decoder(Pos).
error;
decode_continuation(Cont, ?URL_PARAM_INTEGER) ->
binary_to_integer(Cont);
decode_continuation(Cont, ?URL_PARAM_BINARY) ->
emqx_utils:hexstr_to_bin(Cont).
encode_continuation(none, _Encoding) ->
none;
encode_continuation(end_of_data, _Encoding) ->
end_of_data;
encode_continuation(Cont, ?URL_PARAM_INTEGER) ->
integer_to_binary(Cont);
encode_continuation(Cont, ?URL_PARAM_BINARY) ->
emqx_utils:bin_to_hexstr(Cont, lower).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Node Query %% Node Query
@ -670,25 +655,18 @@ parse_pager_params(Params) ->
false false
end. end.
-spec parse_cont_pager_params(map(), ?URL_PARAM_INTEGER | ?URL_PARAM_BINARY) -> -spec parse_cont_pager_params(map(), fun((binary()) -> term())) ->
#{limit := pos_integer(), continuation := none | end_of_table | binary()} | false. #{limit := pos_integer(), position := none | term()} | false.
parse_cont_pager_params(Params, Encoding) -> parse_cont_pager_params(Params, PositionDecoder) ->
Cont = continuation(Params, Encoding), Pos = position(Params, PositionDecoder),
Limit = b2i(limit(Params)), Limit = b2i(limit(Params)),
case Limit > 0 andalso Cont =/= error of case Limit > 0 andalso Pos =/= error of
true -> true ->
#{continuation => Cont, limit => Limit}; #{position => Pos, limit => Limit};
false -> false ->
false false
end. end.
-spec encode_cont_pager_params(map(), ?URL_PARAM_INTEGER | ?URL_PARAM_BINARY) -> map().
encode_cont_pager_params(#{continuation := Cont} = Meta, ContEncoding) ->
Meta1 = maps:remove(continuation, Meta),
Meta1#{last => encode_continuation(Cont, ContEncoding)};
encode_cont_pager_params(Meta, _ContEncoding) ->
Meta.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Types %% Types
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -90,6 +90,11 @@
message => <<"Client ID not found">> message => <<"Client ID not found">>
}). }).
-define(CLIENT_SHUTDOWN, #{
code => 'CLIENT_SHUTDOWN',
message => <<"Client connection has been shutdown">>
}).
namespace() -> undefined. namespace() -> undefined.
api_spec() -> api_spec() ->
@ -413,11 +418,11 @@ schema("/clients/:clientid/keepalive") ->
} }
}; };
schema("/clients/:clientid/mqueue_messages") -> schema("/clients/:clientid/mqueue_messages") ->
ContExample = <<"AAYS53qRa0n07AAABFIACg">>, ContExample = <<"1710785444656449826_10">>,
RespSchema = ?R_REF(mqueue_messages), RespSchema = ?R_REF(mqueue_messages),
client_msgs_schema(mqueue_msgs, ?DESC(get_client_mqueue_msgs), ContExample, RespSchema); client_msgs_schema(mqueue_msgs, ?DESC(get_client_mqueue_msgs), ContExample, RespSchema);
schema("/clients/:clientid/inflight_messages") -> schema("/clients/:clientid/inflight_messages") ->
ContExample = <<"10">>, ContExample = <<"1710785444656449826">>,
RespSchema = ?R_REF(inflight_messages), RespSchema = ?R_REF(inflight_messages),
client_msgs_schema(inflight_msgs, ?DESC(get_client_inflight_msgs), ContExample, RespSchema); client_msgs_schema(inflight_msgs, ?DESC(get_client_inflight_msgs), ContExample, RespSchema);
schema("/sessions_count") -> schema("/sessions_count") ->
@ -716,7 +721,7 @@ fields(unsubscribe) ->
]; ];
fields(mqueue_messages) -> fields(mqueue_messages) ->
[ [
{data, hoconsc:mk(hoconsc:array(?REF(message)), #{desc => ?DESC(mqueue_msgs_list)})}, {data, hoconsc:mk(hoconsc:array(?REF(mqueue_message)), #{desc => ?DESC(mqueue_msgs_list)})},
{meta, hoconsc:mk(hoconsc:ref(emqx_dashboard_swagger, continuation_meta), #{})} {meta, hoconsc:mk(hoconsc:ref(emqx_dashboard_swagger, continuation_meta), #{})}
]; ];
fields(inflight_messages) -> fields(inflight_messages) ->
@ -732,8 +737,18 @@ fields(message) ->
{publish_at, hoconsc:mk(integer(), #{desc => ?DESC(msg_publish_at)})}, {publish_at, hoconsc:mk(integer(), #{desc => ?DESC(msg_publish_at)})},
{from_clientid, hoconsc:mk(binary(), #{desc => ?DESC(msg_from_clientid)})}, {from_clientid, hoconsc:mk(binary(), #{desc => ?DESC(msg_from_clientid)})},
{from_username, hoconsc:mk(binary(), #{desc => ?DESC(msg_from_username)})}, {from_username, hoconsc:mk(binary(), #{desc => ?DESC(msg_from_username)})},
{payload, hoconsc:mk(binary(), #{desc => ?DESC(msg_payload)})} {payload, hoconsc:mk(binary(), #{desc => ?DESC(msg_payload)})},
{inserted_at, hoconsc:mk(binary(), #{desc => ?DESC(msg_inserted_at)})}
]; ];
fields(mqueue_message) ->
fields(message) ++
[
{mqueue_priority,
hoconsc:mk(
hoconsc:union([integer(), infinity]),
#{desc => ?DESC(msg_mqueue_priority)}
)}
];
fields(requested_client_fields) -> fields(requested_client_fields) ->
%% NOTE: some Client fields actually returned in response are missing in schema: %% NOTE: some Client fields actually returned in response are missing in schema:
%% enable_authn, is_persistent, listener, peerport %% enable_authn, is_persistent, listener, peerport
@ -980,7 +995,7 @@ client_msgs_schema(OpId, Desc, ContExample, RespSchema) ->
responses => #{ responses => #{
200 => 200 =>
emqx_dashboard_swagger:schema_with_example(RespSchema, #{ emqx_dashboard_swagger:schema_with_example(RespSchema, #{
<<"data">> => [message_example()], <<"data">> => [message_example(OpId)],
<<"meta">> => #{ <<"meta">> => #{
<<"count">> => 100, <<"count">> => 100,
<<"last">> => ContExample <<"last">> => ContExample
@ -991,7 +1006,10 @@ client_msgs_schema(OpId, Desc, ContExample, RespSchema) ->
['INVALID_PARAMETER'], <<"Invalid parameters">> ['INVALID_PARAMETER'], <<"Invalid parameters">>
), ),
404 => emqx_dashboard_swagger:error_codes( 404 => emqx_dashboard_swagger:error_codes(
['CLIENTID_NOT_FOUND'], <<"Client ID not found">> ['CLIENTID_NOT_FOUND', 'CLIENT_SHUTDOWN'], <<"Client ID not found">>
),
?NOT_IMPLEMENTED => emqx_dashboard_swagger:error_codes(
['NOT_IMPLEMENTED'], <<"API not implemented">>
) )
} }
} }
@ -1023,7 +1041,7 @@ client_msgs_params() ->
>>, >>,
validator => fun max_bytes_validator/1 validator => fun max_bytes_validator/1
})}, })},
hoconsc:ref(emqx_dashboard_swagger, 'after'), hoconsc:ref(emqx_dashboard_swagger, position),
hoconsc:ref(emqx_dashboard_swagger, limit) hoconsc:ref(emqx_dashboard_swagger, limit)
]. ].
@ -1260,22 +1278,53 @@ is_live_session(ClientId) ->
[] =/= emqx_cm_registry:lookup_channels(ClientId). [] =/= emqx_cm_registry:lookup_channels(ClientId).
list_client_msgs(MsgType, ClientID, QString) -> list_client_msgs(MsgType, ClientID, QString) ->
case emqx_mgmt_api:parse_cont_pager_params(QString, cont_encoding(MsgType)) of case emqx_mgmt_api:parse_cont_pager_params(QString, pos_decoder(MsgType)) of
false -> false ->
{400, #{code => <<"INVALID_PARAMETER">>, message => <<"after_limit_invalid">>}}; {400, #{code => <<"INVALID_PARAMETER">>, message => <<"position_limit_invalid">>}};
PagerParams = #{} -> PagerParams = #{} ->
case emqx_mgmt:list_client_msgs(MsgType, ClientID, PagerParams) of case emqx_mgmt:list_client_msgs(MsgType, ClientID, PagerParams) of
{error, not_found} -> {error, not_found} ->
{404, ?CLIENTID_NOT_FOUND}; {404, ?CLIENTID_NOT_FOUND};
{error, shutdown} ->
{404, ?CLIENT_SHUTDOWN};
{error, not_implemented} ->
{?NOT_IMPLEMENTED, #{
code => 'NOT_IMPLEMENTED',
message => <<"API not implemented for persistent sessions">>
}};
{Msgs, Meta = #{}} when is_list(Msgs) -> {Msgs, Meta = #{}} when is_list(Msgs) ->
format_msgs_resp(MsgType, Msgs, Meta, QString) format_msgs_resp(MsgType, Msgs, Meta, QString)
end end
end. end.
%% integer packet id pos_decoder(mqueue_msgs) -> fun decode_mqueue_pos/1;
cont_encoding(inflight_msgs) -> ?URL_PARAM_INTEGER; pos_decoder(inflight_msgs) -> fun decode_msg_pos/1.
%% binary message id
cont_encoding(mqueue_msgs) -> ?URL_PARAM_BINARY. encode_msgs_meta(_MsgType, #{start := StartPos, position := Pos}) ->
#{start => encode_pos(StartPos), position => encode_pos(Pos)}.
encode_pos(none) ->
none;
encode_pos({MsgPos, PrioPos}) ->
MsgPosBin = integer_to_binary(MsgPos),
PrioPosBin =
case PrioPos of
infinity -> <<"infinity">>;
_ -> integer_to_binary(PrioPos)
end,
<<MsgPosBin/binary, "_", PrioPosBin/binary>>;
encode_pos(Pos) when is_integer(Pos) ->
integer_to_binary(Pos).
-spec decode_mqueue_pos(binary()) -> {integer(), infinity | integer()}.
decode_mqueue_pos(Pos) ->
[MsgPos, PrioPos] = binary:split(Pos, <<"_">>),
{decode_msg_pos(MsgPos), decode_priority_pos(PrioPos)}.
decode_msg_pos(Pos) -> binary_to_integer(Pos).
decode_priority_pos(<<"infinity">>) -> infinity;
decode_priority_pos(Pos) -> binary_to_integer(Pos).
max_bytes_validator(MaxBytes) when is_integer(MaxBytes), MaxBytes > 0 -> max_bytes_validator(MaxBytes) when is_integer(MaxBytes), MaxBytes > 0 ->
ok; ok;
@ -1482,8 +1531,8 @@ format_msgs_resp(MsgType, Msgs, Meta, QString) ->
<<"payload">> := PayloadFmt, <<"payload">> := PayloadFmt,
<<"max_payload_bytes">> := MaxBytes <<"max_payload_bytes">> := MaxBytes
} = QString, } = QString,
Meta1 = emqx_mgmt_api:encode_cont_pager_params(Meta, cont_encoding(MsgType)), Meta1 = encode_msgs_meta(MsgType, Meta),
Resp = #{meta => Meta1, data => format_msgs(Msgs, PayloadFmt, MaxBytes)}, Resp = #{meta => Meta1, data => format_msgs(MsgType, Msgs, PayloadFmt, MaxBytes)},
%% Make sure minirest won't set another content-type for self-encoded JSON response body %% Make sure minirest won't set another content-type for self-encoded JSON response body
Headers = #{<<"content-type">> => <<"application/json">>}, Headers = #{<<"content-type">> => <<"application/json">>},
case emqx_utils_json:safe_encode(Resp) of case emqx_utils_json:safe_encode(Resp) of
@ -1499,13 +1548,13 @@ format_msgs_resp(MsgType, Msgs, Meta, QString) ->
?INTERNAL_ERROR(Error) ?INTERNAL_ERROR(Error)
end. end.
format_msgs([FirstMsg | Msgs], PayloadFmt, MaxBytes) -> format_msgs(MsgType, [FirstMsg | Msgs], PayloadFmt, MaxBytes) ->
%% Always include at least one message payload, even if it exceeds the limit %% Always include at least one message payload, even if it exceeds the limit
{FirstMsg1, PayloadSize0} = format_msg(FirstMsg, PayloadFmt), {FirstMsg1, PayloadSize0} = format_msg(MsgType, FirstMsg, PayloadFmt),
{Msgs1, _} = {Msgs1, _} =
catch lists:foldl( catch lists:foldl(
fun(Msg, {MsgsAcc, SizeAcc} = Acc) -> fun(Msg, {MsgsAcc, SizeAcc} = Acc) ->
{Msg1, PayloadSize} = format_msg(Msg, PayloadFmt), {Msg1, PayloadSize} = format_msg(MsgType, Msg, PayloadFmt),
case SizeAcc + PayloadSize of case SizeAcc + PayloadSize of
SizeAcc1 when SizeAcc1 =< MaxBytes -> SizeAcc1 when SizeAcc1 =< MaxBytes ->
{[Msg1 | MsgsAcc], SizeAcc1}; {[Msg1 | MsgsAcc], SizeAcc1};
@ -1517,10 +1566,11 @@ format_msgs([FirstMsg | Msgs], PayloadFmt, MaxBytes) ->
Msgs Msgs
), ),
lists:reverse(Msgs1); lists:reverse(Msgs1);
format_msgs([], _PayloadFmt, _MaxBytes) -> format_msgs(_MsgType, [], _PayloadFmt, _MaxBytes) ->
[]. [].
format_msg( format_msg(
MsgType,
#message{ #message{
id = ID, id = ID,
qos = Qos, qos = Qos,
@ -1529,10 +1579,10 @@ format_msg(
timestamp = Timestamp, timestamp = Timestamp,
headers = Headers, headers = Headers,
payload = Payload payload = Payload
}, } = Msg,
PayloadFmt PayloadFmt
) -> ) ->
Msg = #{ MsgMap = #{
msgid => emqx_guid:to_hexstr(ID), msgid => emqx_guid:to_hexstr(ID),
qos => Qos, qos => Qos,
topic => Topic, topic => Topic,
@ -1540,15 +1590,23 @@ format_msg(
from_clientid => emqx_utils_conv:bin(From), from_clientid => emqx_utils_conv:bin(From),
from_username => maps:get(username, Headers, <<>>) from_username => maps:get(username, Headers, <<>>)
}, },
format_payload(PayloadFmt, Msg, Payload). MsgMap1 = format_by_msg_type(MsgType, Msg, MsgMap),
format_payload(PayloadFmt, MsgMap1, Payload).
format_payload(none, Msg, _Payload) -> format_by_msg_type(mqueue_msgs, Msg, MsgMap) ->
{Msg, 0}; #message{extra = #{mqueue_priority := Prio, mqueue_insert_ts := Ts}} = Msg,
format_payload(base64, Msg, Payload) -> MsgMap#{mqueue_priority => Prio, inserted_at => integer_to_binary(Ts)};
format_by_msg_type(inflight_msgs, Msg, MsgMap) ->
#message{extra = #{inflight_insert_ts := Ts}} = Msg,
MsgMap#{inserted_at => integer_to_binary(Ts)}.
format_payload(none, MsgMap, _Payload) ->
{MsgMap, 0};
format_payload(base64, MsgMap, Payload) ->
Payload1 = base64:encode(Payload), Payload1 = base64:encode(Payload),
{Msg#{payload => Payload1}, erlang:byte_size(Payload1)}; {MsgMap#{payload => Payload1}, erlang:byte_size(Payload1)};
format_payload(plain, Msg, Payload) -> format_payload(plain, MsgMap, Payload) ->
{Msg#{payload => Payload}, erlang:iolist_size(Payload)}. {MsgMap#{payload => Payload}, erlang:iolist_size(Payload)}.
%% format func helpers %% format func helpers
take_maps_from_inner(_Key, Value, Current) when is_map(Value) -> take_maps_from_inner(_Key, Value, Current) when is_map(Value) ->
@ -1652,6 +1710,11 @@ client_example() ->
<<"durable">> => false <<"durable">> => false
}. }.
message_example(inflight_msgs) ->
message_example();
message_example(mqueue_msgs) ->
(message_example())#{<<"mqueue_priority">> => 0}.
message_example() -> message_example() ->
#{ #{
<<"msgid">> => <<"000611F460D57FA9F44500000D360002">>, <<"msgid">> => <<"000611F460D57FA9F44500000D360002">>,

View File

@ -96,6 +96,11 @@ fields(topic) ->
hoconsc:mk(binary(), #{ hoconsc:mk(binary(), #{
desc => <<"Node">>, desc => <<"Node">>,
required => true required => true
})},
{session,
hoconsc:mk(binary(), #{
desc => <<"Session ID">>,
required => false
})} })}
]. ].
@ -113,8 +118,8 @@ do_list(Params) ->
try try
Pager = parse_pager_params(Params), Pager = parse_pager_params(Params),
{_, Query} = emqx_mgmt_api:parse_qstring(Params, ?TOPICS_QUERY_SCHEMA), {_, Query} = emqx_mgmt_api:parse_qstring(Params, ?TOPICS_QUERY_SCHEMA),
QState = Pager#{continuation => undefined}, Stream = mk_topic_stream(qs2ms(Query)),
QResult = eval_topic_query(qs2ms(Query), QState), QResult = eval_topic_query(Stream, Pager, emqx_mgmt_api:init_query_result()),
{200, format_list_response(Pager, Query, QResult)} {200, format_list_response(Pager, Query, QResult)}
catch catch
throw:{error, page_limit_invalid} -> throw:{error, page_limit_invalid} ->
@ -160,31 +165,48 @@ gen_match_spec({topic, '=:=', QTopic}, {_MTopic, MNode}) when is_atom(MNode) ->
gen_match_spec({node, '=:=', QNode}, {MTopic, _MDest}) -> gen_match_spec({node, '=:=', QNode}, {MTopic, _MDest}) ->
{MTopic, QNode}. {MTopic, QNode}.
eval_topic_query(MS, QState) -> mk_topic_stream(Spec = {MTopic, _MDest = '_'}) ->
finalize_query(eval_topic_query(MS, QState, emqx_mgmt_api:init_query_result())). emqx_utils_stream:chain(emqx_router:stream(Spec), mk_persistent_topic_stream(MTopic));
mk_topic_stream(Spec) ->
%% NOTE: Assuming that no persistent topic ever matches a query with `node` filter.
emqx_router:stream(Spec).
eval_topic_query(MS, QState, QResult) -> mk_persistent_topic_stream(Spec) ->
case eval_topic_query_page(MS, QState) of case emqx_persistent_message:is_persistence_enabled() of
{Rows, '$end_of_table'} -> true ->
{_, NQResult} = emqx_mgmt_api:accumulate_query_rows(node(), Rows, QState, QResult), emqx_persistent_session_ds_router:stream(Spec);
NQResult#{complete => true}; false ->
{Rows, NCont} -> emqx_utils_stream:empty()
case emqx_mgmt_api:accumulate_query_rows(node(), Rows, QState, QResult) of
{more, NQResult} ->
eval_topic_query(MS, QState#{continuation := NCont}, NQResult);
{enough, NQResult} ->
NQResult#{complete => false}
end;
'$end_of_table' ->
QResult#{complete => true}
end. end.
eval_topic_query_page(MS, #{limit := Limit, continuation := Cont}) -> eval_count() ->
emqx_router:select(MS, Limit, Cont). emqx_router:stats(n_routes) + eval_persistent_count().
finalize_query(QResult = #{overflow := Overflow, complete := Complete}) -> eval_persistent_count() ->
case emqx_persistent_message:is_persistence_enabled() of
true ->
emqx_persistent_session_ds_router:stats(n_routes);
false ->
0
end.
eval_topic_query(Stream, QState = #{limit := Limit}, QResult) ->
case emqx_utils_stream:consume(Limit, Stream) of
{Rows, NStream} ->
case emqx_mgmt_api:accumulate_query_rows(node(), Rows, QState, QResult) of
{more, NQResult} ->
eval_topic_query(NStream, QState, NQResult);
{enough, NQResult} ->
finalize_query(false, NQResult)
end;
Rows when is_list(Rows) ->
{_, NQResult} = emqx_mgmt_api:accumulate_query_rows(node(), Rows, QState, QResult),
finalize_query(true, NQResult)
end.
finalize_query(Complete, QResult = #{overflow := Overflow}) ->
HasNext = Overflow orelse not Complete, HasNext = Overflow orelse not Complete,
QResult#{hasnext => HasNext}. QResult#{complete => Complete, hasnext => HasNext}.
format_list_response(Meta, Query, QResult = #{rows := RowsAcc}) -> format_list_response(Meta, Query, QResult = #{rows := RowsAcc}) ->
#{ #{
@ -198,14 +220,16 @@ format_list_response(Meta, Query, QResult = #{rows := RowsAcc}) ->
format_response_meta(Meta, _Query, #{hasnext := HasNext, complete := true, cursor := Cursor}) -> format_response_meta(Meta, _Query, #{hasnext := HasNext, complete := true, cursor := Cursor}) ->
Meta#{hasnext => HasNext, count => Cursor}; Meta#{hasnext => HasNext, count => Cursor};
format_response_meta(Meta, _Query = {[], []}, #{hasnext := HasNext}) -> format_response_meta(Meta, _Query = {[], []}, #{hasnext := HasNext}) ->
Meta#{hasnext => HasNext, count => emqx_router:stats(n_routes)}; Meta#{hasnext => HasNext, count => eval_count()};
format_response_meta(Meta, _Query, #{hasnext := HasNext}) -> format_response_meta(Meta, _Query, #{hasnext := HasNext}) ->
Meta#{hasnext => HasNext}. Meta#{hasnext => HasNext}.
format(#route{topic = Topic, dest = {Group, Node}}) -> format(#route{topic = Topic, dest = {Group, Node}}) ->
#{topic => ?SHARE(Group, Topic), node => Node}; #{topic => ?SHARE(Group, Topic), node => Node};
format(#route{topic = Topic, dest = Node}) when is_atom(Node) -> format(#route{topic = Topic, dest = Node}) when is_atom(Node) ->
#{topic => Topic, node => Node}. #{topic => Topic, node => Node};
format(#route{topic = Topic, dest = SessionId}) when is_binary(SessionId) ->
#{topic => Topic, session => SessionId}.
topic_param(In) -> topic_param(In) ->
{ {

View File

@ -56,12 +56,10 @@ client_msgs_testcases() ->
]. ].
init_per_suite(Config) -> init_per_suite(Config) ->
ok = snabbkaffe:start_trace(),
emqx_mgmt_api_test_util:init_suite(), emqx_mgmt_api_test_util:init_suite(),
Config. Config.
end_per_suite(_) -> end_per_suite(_) ->
ok = snabbkaffe:stop(),
emqx_mgmt_api_test_util:end_suite(). emqx_mgmt_api_test_util:end_suite().
init_per_group(persistent_sessions, Config) -> init_per_group(persistent_sessions, Config) ->
@ -95,10 +93,15 @@ end_per_group(persistent_sessions, Config) ->
end_per_group(_Group, _Config) -> end_per_group(_Group, _Config) ->
ok. ok.
init_per_testcase(_TC, Config) ->
ok = snabbkaffe:start_trace(),
Config.
end_per_testcase(TC, _Config) when end_per_testcase(TC, _Config) when
TC =:= t_inflight_messages; TC =:= t_inflight_messages;
TC =:= t_mqueue_messages TC =:= t_mqueue_messages
-> ->
ok = snabbkaffe:stop(),
ClientId = atom_to_binary(TC), ClientId = atom_to_binary(TC),
lists:foreach(fun(P) -> exit(P, kill) end, emqx_cm:lookup_channels(local, ClientId)), lists:foreach(fun(P) -> exit(P, kill) end, emqx_cm:lookup_channels(local, ClientId)),
ok = emqx_common_test_helpers:wait_for( ok = emqx_common_test_helpers:wait_for(
@ -108,7 +111,7 @@ end_per_testcase(TC, _Config) when
5000 5000
); );
end_per_testcase(_TC, _Config) -> end_per_testcase(_TC, _Config) ->
ok. ok = snabbkaffe:stop().
t_clients(_) -> t_clients(_) ->
process_flag(trap_exit, true), process_flag(trap_exit, true),
@ -313,8 +316,7 @@ t_persistent_sessions2(Config) ->
%% 2) Client connects to the same node and takes over, listed only once. %% 2) Client connects to the same node and takes over, listed only once.
C2 = connect_client(#{port => Port1, clientid => ClientId}), C2 = connect_client(#{port => Port1, clientid => ClientId}),
assert_single_client(O#{node => N1, clientid => ClientId, status => connected}), assert_single_client(O#{node => N1, clientid => ClientId, status => connected}),
ok = emqtt:stop(C2), ok = emqtt:disconnect(C2, ?RC_SUCCESS, #{'Session-Expiry-Interval' => 0}),
ok = erpc:call(N1, emqx_persistent_session_ds, destroy_session, [ClientId]),
?retry( ?retry(
100, 100,
20, 20,
@ -322,9 +324,7 @@ t_persistent_sessions2(Config) ->
{ok, {{_, 200, _}, _, #{<<"data">> := []}}}, {ok, {{_, 200, _}, _, #{<<"data">> := []}}},
list_request(APIPort) list_request(APIPort)
) )
), )
ok
end, end,
[] []
), ),
@ -360,10 +360,7 @@ t_persistent_sessions3(Config) ->
list_request(APIPort, "node=" ++ atom_to_list(N1)) list_request(APIPort, "node=" ++ atom_to_list(N1))
) )
), ),
ok = emqtt:stop(C2), ok = emqtt:disconnect(C2, ?RC_SUCCESS, #{'Session-Expiry-Interval' => 0})
ok = erpc:call(N1, emqx_persistent_session_ds, destroy_session, [ClientId]),
ok
end, end,
[] []
), ),
@ -403,10 +400,7 @@ t_persistent_sessions4(Config) ->
list_request(APIPort, "node=" ++ atom_to_list(N1)) list_request(APIPort, "node=" ++ atom_to_list(N1))
) )
), ),
ok = emqtt:stop(C2), ok = emqtt:disconnect(C2, ?RC_SUCCESS, #{'Session-Expiry-Interval' => 0})
ok = erpc:call(N1, emqx_persistent_session_ds, destroy_session, [ClientId]),
ok
end, end,
[] []
), ),
@ -1076,18 +1070,19 @@ t_mqueue_messages(Config) ->
Path = emqx_mgmt_api_test_util:api_path(["clients", ClientId, "mqueue_messages"]), Path = emqx_mgmt_api_test_util:api_path(["clients", ClientId, "mqueue_messages"]),
?assert(Count =< emqx:get_config([mqtt, max_mqueue_len])), ?assert(Count =< emqx:get_config([mqtt, max_mqueue_len])),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(), AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
test_messages(Path, Topic, Count, AuthHeader, ?config(payload_encoding, Config)), IsMqueue = true,
test_messages(Path, Topic, Count, AuthHeader, ?config(payload_encoding, Config), IsMqueue),
?assertMatch( ?assertMatch(
{error, {_, 400, _}}, {error, {_, 400, _}},
emqx_mgmt_api_test_util:request_api( emqx_mgmt_api_test_util:request_api(
get, Path, "limit=10&after=not-base64%23%21", AuthHeader get, Path, "limit=10&position=not-valid", AuthHeader
) )
), ),
?assertMatch( ?assertMatch(
{error, {_, 400, _}}, {error, {_, 400, _}},
emqx_mgmt_api_test_util:request_api( emqx_mgmt_api_test_util:request_api(
get, Path, "limit=-5&after=not-base64%23%21", AuthHeader get, Path, "limit=-5&position=not-valid", AuthHeader
) )
). ).
@ -1099,18 +1094,21 @@ t_inflight_messages(Config) ->
Path = emqx_mgmt_api_test_util:api_path(["clients", ClientId, "inflight_messages"]), Path = emqx_mgmt_api_test_util:api_path(["clients", ClientId, "inflight_messages"]),
InflightLimit = emqx:get_config([mqtt, max_inflight]), InflightLimit = emqx:get_config([mqtt, max_inflight]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(), AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
test_messages(Path, Topic, InflightLimit, AuthHeader, ?config(payload_encoding, Config)), IsMqueue = false,
test_messages(
Path, Topic, InflightLimit, AuthHeader, ?config(payload_encoding, Config), IsMqueue
),
?assertMatch( ?assertMatch(
{error, {_, 400, _}}, {error, {_, 400, _}},
emqx_mgmt_api_test_util:request_api( emqx_mgmt_api_test_util:request_api(
get, Path, "limit=10&after=not-int", AuthHeader get, Path, "limit=10&position=not-int", AuthHeader
) )
), ),
?assertMatch( ?assertMatch(
{error, {_, 400, _}}, {error, {_, 400, _}},
emqx_mgmt_api_test_util:request_api( emqx_mgmt_api_test_util:request_api(
get, Path, "limit=-5&after=invalid-int", AuthHeader get, Path, "limit=-5&position=invalid-int", AuthHeader
) )
), ),
emqtt:stop(Client). emqtt:stop(Client).
@ -1148,19 +1146,16 @@ publish_msgs(Topic, Count) ->
lists:seq(1, Count) lists:seq(1, Count)
). ).
test_messages(Path, Topic, Count, AuthHeader, PayloadEncoding) -> test_messages(Path, Topic, Count, AuthHeader, PayloadEncoding, IsMqueue) ->
Qs0 = io_lib:format("payload=~s", [PayloadEncoding]), Qs0 = io_lib:format("payload=~s", [PayloadEncoding]),
{ok, MsgsResp} = emqx_mgmt_api_test_util:request_api(get, Path, Qs0, AuthHeader), {ok, MsgsResp} = emqx_mgmt_api_test_util:request_api(get, Path, Qs0, AuthHeader),
#{<<"meta">> := Meta, <<"data">> := Msgs} = emqx_utils_json:decode(MsgsResp), #{<<"meta">> := Meta, <<"data">> := Msgs} = emqx_utils_json:decode(MsgsResp),
#{<<"start">> := StartPos, <<"position">> := Pos} = Meta,
?assertMatch( ?assertEqual(StartPos, msg_pos(hd(Msgs), IsMqueue)),
#{ ?assertEqual(Pos, msg_pos(lists:last(Msgs), IsMqueue)),
<<"last">> := <<"end_of_data">>,
<<"count">> := Count
},
Meta
),
?assertEqual(length(Msgs), Count), ?assertEqual(length(Msgs), Count),
lists:foreach( lists:foreach(
fun({Seq, #{<<"payload">> := P} = M}) -> fun({Seq, #{<<"payload">> := P} = M}) ->
?assertEqual(Seq, binary_to_integer(decode_payload(P, PayloadEncoding))), ?assertEqual(Seq, binary_to_integer(decode_payload(P, PayloadEncoding))),
@ -1171,10 +1166,12 @@ test_messages(Path, Topic, Count, AuthHeader, PayloadEncoding) ->
<<"qos">> := _, <<"qos">> := _,
<<"publish_at">> := _, <<"publish_at">> := _,
<<"from_clientid">> := _, <<"from_clientid">> := _,
<<"from_username">> := _ <<"from_username">> := _,
<<"inserted_at">> := _
}, },
M M
) ),
IsMqueue andalso ?assertMatch(#{<<"mqueue_priority">> := _}, M)
end, end,
lists:zip(lists:seq(1, Count), Msgs) lists:zip(lists:seq(1, Count), Msgs)
), ),
@ -1189,62 +1186,69 @@ test_messages(Path, Topic, Count, AuthHeader, PayloadEncoding) ->
get, Path, QsPayloadLimit, AuthHeader get, Path, QsPayloadLimit, AuthHeader
), ),
#{<<"meta">> := _, <<"data">> := FirstMsgOnly} = emqx_utils_json:decode(LimitedMsgsResp), #{<<"meta">> := _, <<"data">> := FirstMsgOnly} = emqx_utils_json:decode(LimitedMsgsResp),
ct:pal("~p", [FirstMsgOnly]),
?assertEqual(1, length(FirstMsgOnly)), ?assertEqual(1, length(FirstMsgOnly)),
?assertEqual( ?assertEqual(
<<"1">>, decode_payload(maps:get(<<"payload">>, hd(FirstMsgOnly)), PayloadEncoding) <<"1">>, decode_payload(maps:get(<<"payload">>, hd(FirstMsgOnly)), PayloadEncoding)
), ),
Limit = 19, Limit = 19,
LastCont = lists:foldl( LastPos = lists:foldl(
fun(PageSeq, Cont) -> fun(PageSeq, ThisPos) ->
Qs = io_lib:format("payload=~s&after=~s&limit=~p", [PayloadEncoding, Cont, Limit]), Qs = io_lib:format("payload=~s&position=~s&limit=~p", [PayloadEncoding, ThisPos, Limit]),
{ok, MsgsRespP} = emqx_mgmt_api_test_util:request_api(get, Path, Qs, AuthHeader), {ok, MsgsRespPage} = emqx_mgmt_api_test_util:request_api(get, Path, Qs, AuthHeader),
#{ #{
<<"meta">> := #{<<"last">> := NextCont} = MetaP, <<"meta">> := #{<<"position">> := NextPos, <<"start">> := ThisStart},
<<"data">> := MsgsP <<"data">> := MsgsPage
} = emqx_utils_json:decode(MsgsRespP), } = emqx_utils_json:decode(MsgsRespPage),
?assertMatch(#{<<"count">> := Count}, MetaP),
?assertNotEqual(<<"end_of_data">>, NextCont), ?assertEqual(NextPos, msg_pos(lists:last(MsgsPage), IsMqueue)),
?assertEqual(length(MsgsP), Limit), %% Start position is the same in every response and points to the first msg
?assertEqual(StartPos, ThisStart),
?assertEqual(length(MsgsPage), Limit),
ExpFirstPayload = integer_to_binary(PageSeq * Limit - Limit + 1), ExpFirstPayload = integer_to_binary(PageSeq * Limit - Limit + 1),
ExpLastPayload = integer_to_binary(PageSeq * Limit), ExpLastPayload = integer_to_binary(PageSeq * Limit),
?assertEqual( ?assertEqual(
ExpFirstPayload, decode_payload(maps:get(<<"payload">>, hd(MsgsP)), PayloadEncoding) ExpFirstPayload,
decode_payload(maps:get(<<"payload">>, hd(MsgsPage)), PayloadEncoding)
), ),
?assertEqual( ?assertEqual(
ExpLastPayload, ExpLastPayload,
decode_payload(maps:get(<<"payload">>, lists:last(MsgsP)), PayloadEncoding) decode_payload(maps:get(<<"payload">>, lists:last(MsgsPage)), PayloadEncoding)
), ),
NextCont NextPos
end, end,
none, none,
lists:seq(1, Count div 19) lists:seq(1, Count div 19)
), ),
LastPartialPage = Count div 19 + 1, LastPartialPage = Count div 19 + 1,
LastQs = io_lib:format("payload=~s&after=~s&limit=~p", [PayloadEncoding, LastCont, Limit]), LastQs = io_lib:format("payload=~s&position=~s&limit=~p", [PayloadEncoding, LastPos, Limit]),
{ok, MsgsRespLastP} = emqx_mgmt_api_test_util:request_api(get, Path, LastQs, AuthHeader), {ok, MsgsRespLastP} = emqx_mgmt_api_test_util:request_api(get, Path, LastQs, AuthHeader),
#{<<"meta">> := #{<<"last">> := EmptyCont} = MetaLastP, <<"data">> := MsgsLastP} = emqx_utils_json:decode( #{<<"meta">> := #{<<"position">> := LastPartialPos}, <<"data">> := MsgsLastPage} = emqx_utils_json:decode(
MsgsRespLastP MsgsRespLastP
), ),
?assertEqual(<<"end_of_data">>, EmptyCont), %% The same as the position of all messages returned in one request
?assertMatch(#{<<"count">> := Count}, MetaLastP), ?assertEqual(Pos, LastPartialPos),
?assertEqual( ?assertEqual(
integer_to_binary(LastPartialPage * Limit - Limit + 1), integer_to_binary(LastPartialPage * Limit - Limit + 1),
decode_payload(maps:get(<<"payload">>, hd(MsgsLastP)), PayloadEncoding) decode_payload(maps:get(<<"payload">>, hd(MsgsLastPage)), PayloadEncoding)
), ),
?assertEqual( ?assertEqual(
integer_to_binary(Count), integer_to_binary(Count),
decode_payload(maps:get(<<"payload">>, lists:last(MsgsLastP)), PayloadEncoding) decode_payload(maps:get(<<"payload">>, lists:last(MsgsLastPage)), PayloadEncoding)
), ),
ExceedQs = io_lib:format("payload=~s&after=~s&limit=~p", [ ExceedQs = io_lib:format("payload=~s&position=~s&limit=~p", [
PayloadEncoding, EmptyCont, Limit PayloadEncoding, LastPartialPos, Limit
]), ]),
{ok, MsgsEmptyResp} = emqx_mgmt_api_test_util:request_api(get, Path, ExceedQs, AuthHeader),
?assertMatch( ?assertMatch(
{error, {_, 400, _}}, #{
emqx_mgmt_api_test_util:request_api(get, Path, ExceedQs, AuthHeader) <<"data">> := [],
<<"meta">> := #{<<"position">> := LastPartialPos, <<"start">> := StartPos}
},
emqx_utils_json:decode(MsgsEmptyResp)
), ),
%% Invalid common page params %% Invalid common page params
@ -1275,6 +1279,11 @@ test_messages(Path, Topic, Count, AuthHeader, PayloadEncoding) ->
emqx_mgmt_api_test_util:request_api(get, Path, "max_payload_bytes=0MB", AuthHeader) emqx_mgmt_api_test_util:request_api(get, Path, "max_payload_bytes=0MB", AuthHeader)
). ).
msg_pos(#{<<"inserted_at">> := TsBin, <<"mqueue_priority">> := Prio} = _Msg, true = _IsMqueue) ->
<<TsBin/binary, "_", (emqx_utils_conv:bin(Prio))/binary>>;
msg_pos(#{<<"inserted_at">> := TsBin} = _Msg, _IsMqueue) ->
TsBin.
decode_payload(Payload, base64) -> decode_payload(Payload, base64) ->
base64:decode(Payload); base64:decode(Payload);
decode_payload(Payload, _) -> decode_payload(Payload, _) ->

View File

@ -27,7 +27,7 @@ all() ->
init_per_suite(Config) -> init_per_suite(Config) ->
Apps = emqx_cth_suite:start( Apps = emqx_cth_suite:start(
[ [
emqx, {emqx, "session_persistence.enable = true"},
emqx_management, emqx_management,
emqx_mgmt_api_test_util:emqx_dashboard() emqx_mgmt_api_test_util:emqx_dashboard()
], ],
@ -204,13 +204,90 @@ t_shared_topics_invalid(_Config) ->
emqx_utils_json:decode(Body, [return_maps]) emqx_utils_json:decode(Body, [return_maps])
). ).
t_persistent_topics(_Config) ->
PersistentOpts = #{
proto_ver => v5,
properties => #{'Session-Expiry-Interval' => 300}
},
Client1 = client(t_persistent_topics_m1),
Client2 = client(t_persistent_topics_m2),
SessionId1 = <<"t_persistent_topics_p1">>,
SessionId2 = <<"t_persistent_topics_p2">>,
ClientPersistent1 = client(SessionId1, PersistentOpts),
ClientPersistent2 = client(SessionId2, PersistentOpts),
_ = [
?assertMatch({ok, _, _}, emqtt:subscribe(Client, Topic))
|| {Client, Topics} <- [
{Client1, [<<"t/client/mem">>, <<"t/+">>]},
{Client2, [<<"t/client/mem">>, <<"t/+">>]},
{ClientPersistent1, [<<"t/persistent/#">>, <<"t/client/ps">>, <<"t/+">>]},
{ClientPersistent2, [<<"t/persistent/#">>, <<"t/client/ps">>, <<"t/+">>]}
],
Topic <- Topics
],
Matched = request_json(get, ["topics"]),
?assertMatch(
#{<<"page">> := 1, <<"limit">> := 100, <<"count">> := 8},
maps:get(<<"meta">>, Matched)
),
%% Get back both topics for both persistent and in-memory subscriptions.
Expected = [
#{<<"topic">> => <<"t/+">>, <<"node">> => atom_to_binary(node())},
#{<<"topic">> => <<"t/+">>, <<"session">> => SessionId1},
#{<<"topic">> => <<"t/+">>, <<"session">> => SessionId2},
#{<<"topic">> => <<"t/client/mem">>, <<"node">> => atom_to_binary(node())},
#{<<"topic">> => <<"t/client/ps">>, <<"session">> => SessionId1},
#{<<"topic">> => <<"t/client/ps">>, <<"session">> => SessionId2},
#{<<"topic">> => <<"t/persistent/#">>, <<"session">> => SessionId1},
#{<<"topic">> => <<"t/persistent/#">>, <<"session">> => SessionId2}
],
?assertEqual(
lists:sort(Expected),
lists:sort(maps:get(<<"data">>, Matched))
),
%% Are results the same when paginating?
#{<<"data">> := Page1} = R1 = request_json(get, ["topics"], [{"page", "1"}, {"limit", "3"}]),
#{<<"data">> := Page2} = request_json(get, ["topics"], [{"page", "2"}, {"limit", "3"}]),
#{<<"data">> := Page3} = request_json(get, ["topics"], [{"page", "3"}, {"limit", "3"}]),
?assertEqual(
lists:sort(Expected),
lists:sort(Page1 ++ Page2 ++ Page3)
),
%% Count respects persistent sessions.
?assertMatch(
#{
<<"meta">> := #{<<"page">> := 1, <<"limit">> := 3, <<"count">> := 8},
<<"data">> := [_, _, _]
},
R1
),
%% Filtering by node makes no sense for persistent sessions.
?assertMatch(
#{
<<"data">> := [
#{<<"topic">> := <<"t/client/mem">>, <<"node">> := _},
#{<<"topic">> := <<"t/+">>, <<"node">> := _}
],
<<"meta">> := #{<<"page">> := 1, <<"limit">> := 100, <<"count">> := 2}
},
request_json(get, ["topics"], [{"node", atom_to_list(node())}])
).
%% Utilities %% Utilities
client(Name) -> client(Name) ->
{ok, Client} = emqtt:start_link(#{ client(Name, #{}).
username => emqx_utils_conv:bin(Name),
clientid => emqx_utils_conv:bin(Name) client(Name, Overrides) ->
}), {ok, Client} = emqtt:start_link(
maps:merge(
#{
username => emqx_utils_conv:bin(Name),
clientid => emqx_utils_conv:bin(Name)
},
Overrides
)
),
{ok, _} = emqtt:connect(Client), {ok, _} = emqtt:connect(Client),
Client. Client.

View File

@ -124,9 +124,12 @@ ensure_otel_metrics(
) -> ) ->
ok; ok;
ensure_otel_metrics(#{metrics := #{enable := true}} = Conf, _Old) -> ensure_otel_metrics(#{metrics := #{enable := true}} = Conf, _Old) ->
ok = emqx_otel_cpu_sup:stop_otel_cpu_sup(),
_ = emqx_otel_cpu_sup:start_otel_cpu_sup(Conf),
_ = emqx_otel_metrics:stop_otel(), _ = emqx_otel_metrics:stop_otel(),
emqx_otel_metrics:start_otel(Conf); emqx_otel_metrics:start_otel(Conf);
ensure_otel_metrics(#{metrics := #{enable := false}}, _Old) -> ensure_otel_metrics(#{metrics := #{enable := false}}, _Old) ->
ok = emqx_otel_cpu_sup:stop_otel_cpu_sup(),
emqx_otel_metrics:stop_otel(); emqx_otel_metrics:stop_otel();
ensure_otel_metrics(_, _) -> ensure_otel_metrics(_, _) ->
ok. ok.

View File

@ -0,0 +1,146 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_otel_cpu_sup).
-behaviour(gen_server).
-include_lib("emqx/include/logger.hrl").
%% gen_server APIs
-export([start_link/1]).
-export([
start_otel_cpu_sup/1,
stop_otel_cpu_sup/0,
stats/1
]).
%% gen_server callbacks
-export([
init/1,
handle_continue/2,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3
]).
-define(REFRESH, refresh).
-define(OTEL_CPU_USAGE_WORKER, ?MODULE).
-define(SUPERVISOR, emqx_otel_sup).
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
start_otel_cpu_sup(Conf) ->
Spec = emqx_otel_sup:worker_spec(?MODULE, Conf),
assert_started(supervisor:start_child(?SUPERVISOR, Spec)).
stop_otel_cpu_sup() ->
case erlang:whereis(?SUPERVISOR) of
undefined ->
ok;
Pid ->
case supervisor:terminate_child(Pid, ?MODULE) of
ok -> supervisor:delete_child(Pid, ?MODULE);
{error, not_found} -> ok;
Error -> Error
end
end.
stats(Name) ->
gen_server:call(?OTEL_CPU_USAGE_WORKER, {?FUNCTION_NAME, Name}, infinity).
%%--------------------------------------------------------------------
%% gen_server callbacks
%% simply handle cpu_sup:util/0,1 called in one process
%%--------------------------------------------------------------------
start_link(Conf) ->
gen_server:start_link({local, ?OTEL_CPU_USAGE_WORKER}, ?MODULE, Conf, []).
init(Conf) ->
{ok, _InitState = #{}, {continue, {setup, Conf}}}.
%% Interval in milliseconds
handle_continue({setup, #{metrics := #{enable := true, interval := Interval}}}, State) ->
%% start os_mon temporarily
{ok, _} = application:ensure_all_started(os_mon),
%% The returned value of the first call to cpu_sup:util/0 or cpu_sup:util/1 by a
%% process will on most systems be the CPU utilization since system boot,
%% but this is not guaranteed and the value should therefore be regarded as garbage.
%% This also applies to the first call after a restart of cpu_sup.
_Val = cpu_sup:util(),
TRef = start_refresh_timer(Interval),
{noreply, State#{interval => Interval, refresh_time_ref => TRef}}.
handle_call({stats, Name}, _From, State) ->
{reply, get_stats(Name, State), State};
handle_call(stop, _From, State) ->
cancel_outdated_timer(State),
{stop, normal, State};
handle_call(Req, _From, State) ->
?SLOG(error, #{msg => "unexpected_call", call => Req}),
{reply, ignored, State}.
handle_cast(Msg, State) ->
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),
{noreply, State}.
handle_info({timeout, _Timer, ?REFRESH}, State) ->
{noreply, refresh(State)}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
refresh(#{interval := Interval} = State) ->
NState =
case cpu_sup:util([]) of
{all, U, I, _} ->
State#{'cpu.use' => U, 'cpu.idle' => I};
_ ->
State#{'cpu.use' => 0, 'cpu.idle' => 0}
end,
TRef = start_refresh_timer(Interval),
NState#{refresh_time_ref => TRef}.
get_stats(Name, State) ->
maps:get(Name, State, 0).
cancel_outdated_timer(#{refresh_time_ref := TRef}) ->
emqx_utils:cancel_timer(TRef),
ok.
start_refresh_timer(Interval) ->
start_timer(Interval, ?REFRESH).
start_timer(Interval, Msg) ->
emqx_utils:start_timer(Interval, Msg).
assert_started({ok, _Pid}) -> ok;
assert_started({ok, _Pid, _Info}) -> ok;
assert_started({error, {already_started, _Pid}}) -> ok;
assert_started({error, Reason}) -> {error, Reason}.

View File

@ -197,6 +197,10 @@ bytes_metrics() ->
get_stats_gauge(Name) -> get_stats_gauge(Name) ->
[{emqx_stats:getstat(Name), #{}}]. [{emqx_stats:getstat(Name), #{}}].
get_vm_gauge('cpu.use') ->
[{emqx_otel_cpu_sup:stats('cpu.use'), #{}}];
get_vm_gauge('cpu.idle') ->
[{emqx_otel_cpu_sup:stats('cpu.idle'), #{}}];
get_vm_gauge(Name) -> get_vm_gauge(Name) ->
[{emqx_mgmt:vm_stats(Name), #{}}]. [{emqx_mgmt:vm_stats(Name), #{}}].
@ -254,8 +258,6 @@ create_counter(Meter, Counters, CallBack) ->
Counters Counters
). ).
%% Note: list_to_existing_atom("cpu.use") will crash
%% so we make sure the atom is already existing here
normalize_name(cpu_use) -> normalize_name(cpu_use) ->
'cpu.use'; 'cpu.use';
normalize_name(cpu_idle) -> normalize_name(cpu_idle) ->

View File

@ -42,7 +42,12 @@ init([]) ->
}, },
Children = Children =
case emqx_conf:get([opentelemetry]) of case emqx_conf:get([opentelemetry]) of
#{metrics := #{enable := false}} -> []; #{metrics := #{enable := false}} ->
#{metrics := #{enable := true}} = Conf -> [worker_spec(emqx_otel_metrics, Conf)] [];
#{metrics := #{enable := true}} = Conf ->
[
worker_spec(emqx_otel_metrics, Conf),
worker_spec(emqx_otel_cpu_sup, Conf)
]
end, end,
{ok, {SupFlags, Children}}. {ok, {SupFlags, Children}}.

View File

@ -6,7 +6,7 @@
{emqx_utils, {path, "../emqx_utils"}}, {emqx_utils, {path, "../emqx_utils"}},
{emqx_rule_engine, {path, "../emqx_rule_engine"}}, {emqx_rule_engine, {path, "../emqx_rule_engine"}},
{erlavro, {git, "https://github.com/emqx/erlavro.git", {tag, "2.10.0"}}}, {erlavro, {git, "https://github.com/emqx/erlavro.git", {tag, "2.10.0"}}},
{jesse, {git, "https://github.com/emqx/jesse.git", {tag, "1.7.12"}}}, {jesse, {git, "https://github.com/emqx/jesse.git", {tag, "1.8.0"}}},
{gpb, "4.19.9"} {gpb, "4.19.9"}
]}. ]}.

View File

@ -37,6 +37,9 @@
%% Timestamp (Unit: millisecond) %% Timestamp (Unit: millisecond)
timestamp :: integer(), timestamp :: integer(),
%% Miscellaneous extensions, currently used for OpenTelemetry context propagation %% Miscellaneous extensions, currently used for OpenTelemetry context propagation
%% and storing mqueue/inflight insertion timestamps.
%% It was not used prior to 5.4.0 and defaulted to an empty list.
%% Must be a map now.
extra = #{} :: term() extra = #{} :: term()
}). }).

View File

@ -1,6 +1,7 @@
Implement log throttling. The feature reduces the number of potentially flooding logged events by Implement log throttling. The feature reduces the number of potentially flooding logged events by
dropping all but the first event within a configured time window. dropping all but the first event within a configured time window.
Throttling is applied to the following log events: Throttling is applied to the following log events:
- authentication_failure,
- authorization_permission_denied, - authorization_permission_denied,
- cannot_publish_to_topic_due_to_not_authorized, - cannot_publish_to_topic_due_to_not_authorized,
- cannot_publish_to_topic_due_to_quota_exceeded, - cannot_publish_to_topic_due_to_quota_exceeded,

View File

@ -1,21 +1,20 @@
Implement HTTP APIs to get the list of client's inflight and mqueue messages. Implement HTTP APIs to get the list of client's in-flight and mqueue messages.
To get the first chunk of data: To get the first chunk of data:
- GET /clients/{clientid}/mqueue_messages?limit=100 - GET /clients/{clientid}/mqueue_messages?limit=100
- GET /clients/{clientid}/inflight_messages?limit=100 - GET /clients/{clientid}/inflight_messages?limit=100
Alternatively: Alternatively:
- GET /clients/{clientid}/mqueue_messages?limit=100&after=none - GET /clients/{clientid}/mqueue_messages?limit=100&position=none
- GET /clients/{clientid}/inflight_messages?limit=100&after=none - GET /clients/{clientid}/inflight_messages?limit=100&position=none
To get the next chunk of data: To get the next chunk of data:
- GET /clients/{clientid}/mqueue_messages?limit=100&after={last} - GET /clients/{clientid}/mqueue_messages?limit=100&position={position}
- GET /clients/{clientid}/inflight_messages?limit=100&after={last} - GET /clients/{clientid}/inflight_messages?limit=100&position={position}
Where {last} is a value (opaque string token) of "meta.last" field from the previous response. Where {position} is a value (opaque string token) of "meta.position" field from the previous response.
If there is no more data, "last" = "end_of_data" is returned. Mqueue messages are ordered according to their priority and queue (FIFO) order: from higher priority to lower priority.
If a subsequent request is attempted with "after=end_of_data", a "400 Bad Request" error response will be received. By default, all messages in Mqueue have the same priority of 0.
Mqueue messages are ordered according to the queue (FIFO) order. In-flight messages are ordered by time at which they were inserted to the in-flight storage (from older to newer messages).
Inflight messages are ordered by MQTT Packet Id, which may not represent the chronological messages order.

View File

@ -0,0 +1,3 @@
Add `username` log field.
If MQTT client is connected with a non-empty username the logs and traces will include `username` field.

View File

@ -0,0 +1,10 @@
Add `timestamp_format` config to log handers.
We've added a new configuration option `timestamp_format` to the log handlers.
This new config supports the following values:
- `auto`: Automatically determines the timestamp format based on the log formatter being used.
Utilizes `rfc3339` format for text formatters, and `epoch` format for JSON formatters.
- `epoch`: Represents timestamps in microseconds precision Unix epoch format.
- `rfc3339`: Uses RFC3339 compliant format for date-time strings. For example: `2024-03-26T11:52:19.777087+00:00`.

View File

@ -0,0 +1 @@
Add support for kickout of durable sessions.

View File

@ -0,0 +1,6 @@
Fixed an issue which may occur when performing rolling upgrade, especially when upgrading from a version earlier than 5.4.0.
When the cluster is empty (more precisely, routing tables are empty), try to additionally ask the cluster nodes for the routing schema in use, to make more informed decision about routing storage schema upon startup. This should make routing storage schema less likely to diverge across cluster nodes, especially when the cluster is composed of different versions of EMQX.
The version also logs instructions for how to manually resolve if conflict is detected in a running cluster.

View File

@ -0,0 +1,2 @@
Add a strict check that prevents replicant nodes from connecting to the core nodes running with a different version of EMQX application.
Effectively it means that during the rolling upgrades the replicant nodes can only work if there is at least one core node with the matching EMQX release.

269
changes/e5.6.0.en.md Normal file
View File

@ -0,0 +1,269 @@
# e5.6.0
## Enhancements
- [#12326](https://github.com/emqx/emqx/pull/12326) Enhanced session tracking with registration history. EMQX now has the capability to monitor the history of session registrations, including those that have expired. By configuring `broker.session_history_retain`, EMQX retains records of expired sessions for a specified duration.
- **Session count API**: Use the API `GET /api/v5/sessions_count?since=1705682238` to obtain a count of sessions across the cluster that remained active since the given UNIX epoch timestamp (with seconds precision). This enhancement aids in analyzing session activity over time.
- **Metrics expansion with cluster sessions gauge**: A new gauge metric, `cluster_sessions`, is added to better track the number of sessions within the cluster. This metric is also integrated into Prometheus for easy monitoring:
```
# TYPE emqx_cluster_sessions_count gauge
emqx_cluster_sessions_count 1234
```
NOTE: Please consider this metric as an approximate estimation. Due to the asynchronous nature of data collection and calculation, exact precision may vary.
- [#12398](https://github.com/emqx/emqx/pull/12398) Exposed the `swagger_support` option in the Dashboard configuration, allowing for the enabling or disabling of the Swagger API documentation.
- [#12467](https://github.com/emqx/emqx/pull/12467) Started supporting cluster discovery using AAAA DNS record type.
- [#12483](https://github.com/emqx/emqx/pull/12483) Renamed `emqx ctl conf cluster_sync tnxid ID` to `emqx ctl conf cluster_sync inspect ID`.
For backward compatibility, `tnxid` is kept, but considered deprecated and will be removed in 5.7.
- [#12495](https://github.com/emqx/emqx/pull/12495) Introduced new AWS S3 connector and action.
- [#12499](https://github.com/emqx/emqx/pull/12499) Enhanced client banning capabilities with extended rules, including:
* Matching `clientid` against a specified regular expression.
* Matching client's `username` against a specified regular expression.
* Matching client's peer address against a CIDR range.
**Important Notice**: Implementing a large number of broad matching rules (not specific to an individual clientid, username, or host) may affect system performance. It's advised to use these extended ban rules judiciously to maintain optimal system efficiency.
- [#12509](https://github.com/emqx/emqx/pull/12509) Implemented API to re-order all authenticators / authorization sources.
- [#12517](https://github.com/emqx/emqx/pull/12517) Configuration files have been upgraded to accommodate multi-line string values, preserving indentation for enhanced readability and maintainability. This improvement utilizes `"""~` and `~"""` markers to quote indented lines, offering a structured and clear way to define complex configurations. For example:
```
rule_xlu4 {
sql = """~
SELECT
*
FROM
"t/#"
~"""
}
```
See [HOCON 0.42.0](https://github.com/emqx/hocon/releases/tag/0.42.0) release notes for details.
- [#12520](https://github.com/emqx/emqx/pull/12520) Implemented log throttling. The feature reduces the volume of logged events that could potentially flood the system by dropping all but the first occurance of an event within a configured time window.
Log throttling is applied to the following log events that are critical yet prone to repetition:
- `authentication_failure`
- `authorization_permission_denied`
- `cannot_publish_to_topic_due_to_not_authorized`
- `cannot_publish_to_topic_due_to_quota_exceeded`
- `connection_rejected_due_to_license_limit_reached`
- `dropped_msg_due_to_mqueue_is_full`
- [#12561](https://github.com/emqx/emqx/pull/12561) Implemented HTTP APIs to get the list of client's in-flight and message queue (mqueue) messages. These APIs facilitate detailed insights and effective control over message queues and in-flight messaging, ensuring efficient message handling and monitoring.
To get the first chunk of data:
- `GET /clients/{clientid}/mqueue_messages?limit=100`
- `GET /clients/{clientid}/inflight_messages?limit=100`
Alternatively, for the first chunks without specifying a start position:
- `GET /clients/{clientid}/mqueue_messages?limit=100&position=none`
- `GET /clients/{clientid}/inflight_messages?limit=100&position=none`
To get the next chunk of data:
- `GET /clients/{clientid}/mqueue_messages?limit=100&position={position}`
- `GET /clients/{clientid}/inflight_messages?limit=100&position={position}`
Where `{position}` is a value (opaque string token) of `meta.position` field from the previous response.
Ordering and Prioritization:
- **Mqueue Messages**: These are prioritized and sequenced based on their queue order (FIFO), from higher to lower priority. By default, mqueue messages carry a uniform priority level of 0.
- **In-Flight Messages**: Sequenced by the timestamp of their insertion into the in-flight storage, from oldest to newest.
- [#12590](https://github.com/emqx/emqx/pull/12590) Removed `mfa` meta data from log messages to improve clarity.
- [#12641](https://github.com/emqx/emqx/pull/12641) Improved text log formatter fields order. The new fields order is as follows:
`tag` > `clientid` > `msg` > `peername` > `username` > `topic` > [other fields]
- [#12670](https://github.com/emqx/emqx/pull/12670) Added field `shared_subscriptions` to endpoint `/monitor_current` and `/monitor_current/nodes/:node`.
- [#12679](https://github.com/emqx/emqx/pull/12679) Upgraded docker image base from Debian 11 to Debian 12.
- [#12700](https://github.com/emqx/emqx/pull/12700) Started supporting "b" and "B" unit in bytesize hocon fields.
For example, all three fields below will have the value of 1024 bytes:
```
bytesize_field = "1024b"
bytesize_field2 = "1024B"
bytesize_field2 = 1024
```
- [#12719](https://github.com/emqx/emqx/pull/12719) The `/clients` API has been upgraded to accommodate queries for multiple `clientid`s and `username`s simultaneously, offering a more flexible and powerful tool for monitoring client connections. Additionally, this update introduces the capability to customize which client information fields are included in the API response, optimizing for specific monitoring needs.
Examples of Multi-Client/Username Queries:
- To query multiple clients by ID: `/clients?clientid=client1&clientid=client2`
- To query multiple users: `/clients?username=user11&username=user2`
- To combine multiple client IDs and usernames in one query: `/clients?clientid=client1&clientid=client2&username=user1&username=user2`
Examples of Selecting Fields for the Response:
- To include all fields in the response: `/clients?fields=all` (Note: Omitting the `fields` parameter defaults to returning all fields.)
- To specify only certain fields: `/clients?fields=clientid,username`
- [#12330](https://github.com/emqx/emqx/pull/12330) The Cassandra bridge has been split into connector and action components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12353](https://github.com/emqx/emqx/pull/12353) The OpenTSDB bridge has been split into connector and action components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12376](https://github.com/emqx/emqx/pull/12376) The Kinesis bridge has been split into connector and action components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12386](https://github.com/emqx/emqx/pull/12386) The GreptimeDB bridge has been split into connector and action components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12423](https://github.com/emqx/emqx/pull/12423) The RabbitMQ bridge has been split into connector, action and source components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12425](https://github.com/emqx/emqx/pull/12425) The ClickHouse bridge has been split into connector and action components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12439](https://github.com/emqx/emqx/pull/12439) The Oracle bridge has been split into connector and action components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12449](https://github.com/emqx/emqx/pull/12449) The TDEngine bridge has been split into connector and action components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12488](https://github.com/emqx/emqx/pull/12488) The RocketMQ bridge has been split into connector and action components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12512](https://github.com/emqx/emqx/pull/12512) The HStreamDB bridge has been split into connector and action components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically, however, it is recommended to do the upgrade manually as new fields have been added to the configuration.
- [#12543](https://github.com/emqx/emqx/pull/12543) The DynamoDB bridge has been split into connector and action components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12595](https://github.com/emqx/emqx/pull/12595) The Kafka Consumer bridge has been split into connector and source components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12619](https://github.com/emqx/emqx/pull/12619) The Microsoft SQL Server bridge has been split into connector and action components. They are backwards compatible with the bridge HTTP API. Configuration will be upgraded automatically.
- [#12381](https://github.com/emqx/emqx/pull/12381) Added new SQL functions: `map_keys()`, `map_values()`, `map_to_entries()`, `join_to_string()`, `join_to_string()`, `join_to_sql_values_string()`, `is_null_var()`, `is_not_null_var()`.
For more information on the functions and their usage, refer to [Built-in SQL Functions](../data-integration/rule-sql-builtin-functions) the documentation.
- [#12427](https://github.com/emqx/emqx/pull/12427) Introduced the capability to specify a limit on the number of Kafka partitions that can be used for Kafka data integration.
- [#12577](https://github.com/emqx/emqx/pull/12577) Updated the `service_account_json` field for both the GCP PubSub Producer and Consumer connectors to accept JSON-encoded strings. Now, it's possible to set this field to a JSON-encoded string. Using the previous format (a HOCON map) is still supported but not encouraged.
- [#12581](https://github.com/emqx/emqx/pull/12581) Added JSON schema to schema registry.
JSON Schema supports [Draft 03](http://tools.ietf.org/html/draft-zyp-json-schema-03), [Draft 04](http://tools.ietf.org/html/draft-zyp-json-schema-04) and [Draft 05](https://datatracker.ietf.org/doc/html/draft-wright-json-schema-00).
- [#12602](https://github.com/emqx/emqx/pull/12602) Enhanced health checking for IoTDB connector, using its `ping` API instead of just checking for an existing socket connection.
- [#12336](https://github.com/emqx/emqx/pull/12336) Refined the approach to managing asynchronous tasks by segregating the cleanup of channels into its own dedicated pool. This separation addresses performance issues encountered during channels cleanup under conditions of high network latency, ensuring that such tasks do not impede the efficiency of other asynchronous operations, such as route cleanup.
- [#12725](https://github.com/emqx/emqx/pull/12725) Implemented REST API to list the available source types.
- [#12746](https://github.com/emqx/emqx/pull/12746) Added `username` log field. If MQTT client is connected with a non-empty username the logs and traces will include `username` field.
- [#12785](https://github.com/emqx/emqx/pull/12785) Added `timestamp_format` configuration option to log handlers. This new option allows for the following settings:
- `auto`: Automatically determines the timestamp format based on the log formatter being used.
Utilizes `rfc3339` format for text formatters, and `epoch` format for JSON formatters.
- `epoch`: Represents timestamps in microseconds precision Unix epoch format.
- `rfc3339`: Uses RFC3339 compliant format for date-time strings. For example, `2024-03-26T11:52:19.777087+00:00`.
## Bug Fixes
- [#11868](https://github.com/emqx/emqx/pull/11868) Fixed a bug where will messages were not published after session takeover.
- [#12347](https://github.com/emqx/emqx/pull/12347) Implemented an update to ensure that messages processed by the Rule SQL for the MQTT egress data bridge are always rendered as valid, even in scenarios where the data is incomplete or lacks certain placeholders defined in the bridge configuration. This adjustment prevents messages from being incorrectly deemed invalid and subsequently discarded by the MQTT egress data bridge, as was the case previously.
When variables in `payload` and `topic` templates are undefined, they are now rendered as empty strings instead of the literal `undefined` string.
- [#12472](https://github.com/emqx/emqx/pull/12472) Fixed an issue where certain read operations on `/api/v5/actions/` and `/api/v5/sources/` endpoints might result in a `500` error code during the process of rolling upgrades.
- [#12492](https://github.com/emqx/emqx/pull/12492) EMQX now returns the `Receive-Maximum` property in the `CONNACK` message for MQTT v5 clients, aligning with protocol expectations. This implementation considers the minimum value of the client's `Receive-Maximum` setting and the server's `max_inflight` configuration as the limit for the number of inflight (unacknowledged) messages permitted. Previously, the determined value was not sent back to the client in the `CONNACK` message.
NOTE: A current known issue with these enhanced API responses is that the total client count provided may exceed the actual number of clients due to the inclusion of disconnected sessions.
- [#12505](https://github.com/emqx/emqx/pull/12505) Upgraded the Kafka producer client `wolff` from version 1.10.1 to 1.10.2. This latest version maintains a long-lived metadata connection for each connector, optimizing EMQX's performance by reducing the frequency of establishing new connections for action and connector health checks.
- [#12513](https://github.com/emqx/emqx/pull/12513) Changed the level of several flooding log events from `warning` to `info`.
- [#12530](https://github.com/emqx/emqx/pull/12530) Improved the error reporting for `frame_too_large` events and malformed `CONNECT` packet parsing failures. These updates now provide additional information, aiding in the troubleshooting process.
- [#12541](https://github.com/emqx/emqx/pull/12541) Introduced a new configuration validation step for autocluster by DNS records to ensure compatibility between `node.name` and `cluster.discover_strategy`. Specifically, when utilizing the `dns` strategy with either `a` or `aaaa` record types, it is mandatory for all nodes to use a (static) IP address as the host name.
- [#12566](https://github.com/emqx/emqx/pull/12566) Enhanced the bootstrap file for REST API keys:
- Empty lines within the file are now skipped, eliminating the previous behavior of generating an error.
- API keys specified in the bootstrap file are assigned the highest precedence. In cases where a new key from the bootstrap file conflicts with an existing key, the older key will be automatically removed to ensure that the bootstrap keys take effect without issue.
- [#12646](https://github.com/emqx/emqx/pull/12646) Fixed an issue with the rule engine's date-time string parser. Previously, time zone adjustments were only effective for date-time strings specified with second-level precision.
- [#12652](https://github.com/emqx/emqx/pull/12652) Fixed a discrepancy where the subbits functions with 4 and 5 parameters, despite being documented, were missing from the actual implementation. These functions have now been added.
- [#12663](https://github.com/emqx/emqx/pull/12663) Fixed an issue where the `emqx_vm_cpu_use` and `emqx_vm_cpu_idle` metrics, accessible via the Prometheus endpoint `/prometheus/stats`, were inaccurately reflecting the average CPU usage since the operating system boot. This fix ensures that these metrics now accurately represent the current CPU usage and idle, providing more relevant and timely data for monitoring purposes.
- [#12668](https://github.com/emqx/emqx/pull/12668) Refactored the SQL function `date_to_unix_ts()` by using `calendar:datetime_to_gregorian_seconds/1`.
This change also added validation for the input date format.
- [#12672](https://github.com/emqx/emqx/pull/12672) Changed the process for generating the node boot configuration by incorporating the loading of `{data_dir}/configs/cluster.hocon`. Previously, changes to logging configurations made via the Dashboard and saved in `{data_dir}/configs/cluster.hocon` were only applied after the initial boot configuration was generated using `etc/emqx.conf`, leading to potential loss of some log segment files due to late reconfiguration.
Now, both `{data_dir}/configs/cluster.hocon` and `etc/emqx.conf` are loaded concurrently, with settings from `emqx.conf` taking precedence, to create the boot configuration.
- [#12696](https://github.com/emqx/emqx/pull/12696) Fixed an issue where attempting to reconnect an action or source could lead to wrong error messages being returned in the HTTP API.
- [#12714](https://github.com/emqx/emqx/pull/12714) Fixed inaccuracies in several metrics reported by the `/prometheus/stats` endpoint of the Prometheus API. The correction applies to the following metrics:
- `emqx_cluster_sessions_count`
- `emqx_cluster_sessions_max`
- `emqx_cluster_nodes_running`
- `emqx_cluster_nodes_stopped`
- `emqx_subscriptions_shared_count`
- `emqx_subscriptions_shared_max`
Additionally, this fix rectified an issue within the `/stats` endpoint concerning the `subscriptions.shared.count` and `subscriptions.shared.max` fields. Previously, these values failed to update promptly following a client's disconnection or unsubscription from a Shared-Subscription.
- [#12390](https://github.com/emqx/emqx/pull/12390) Fixed an issue where the `/license` API request may crash during cluster joining processes.
- [#12411](https://github.com/emqx/emqx/pull/12411) Fixed a bug where `null` values would be inserted as `1853189228` in `int` columns in Cassandra data integration.
- [#12522](https://github.com/emqx/emqx/pull/12522) Refined the parsing process for Kafka bootstrap hosts to exclude spaces following commas, addressing connection timeouts and DNS resolution failures due to malformed host entries.
- [#12656](https://github.com/emqx/emqx/pull/12656) Implemented a topic verification step for creating GCP PubSub Producer actions, ensuring failure notifications when the topic doesn't exist or provided credentials lack sufficient permissions.
- [#12678](https://github.com/emqx/emqx/pull/12678) Enhanced the DynamoDB connector to clearly report the reason for connection failures, improving upon the previous lack of error insights.
- [#12681](https://github.com/emqx/emqx/pull/12681) Fixed a security issue where secrets could be logged at debug level when sending messages to a RocketMQ bridge/action.
- [#12715](https://github.com/emqx/emqx/pull/12715) Fixed a crash that could occur during configuration updates if the connector for the ingress data integration source had active channels.
- [#12767](https://github.com/emqx/emqx/pull/12767) Fixed issues encountered during upgrades from 5.0.1 to 5.5.1, specifically related to Kafka Producer configurations that led to upgrade failures. The correction ensures that Kafka Producer configurations are accurately transformed into the new action and connector configuration format required by EMQX version 5.5.1 and beyond.
- [#12768](https://github.com/emqx/emqx/pull/12768) Addressed a startup failure issue in EMQX version 5.4.0 and later, particularly noted during rolling upgrades from versions before 5.4.0. The issue was related to the initialization of the routing schema when both v1 and v2 routing tables were empty.
The node now attempts to retrieve the routing schema version in use across the cluster instead of using the v2 routing table by default when local routing tables are found empty at startup. This approach mitigates potential conflicts and reduces the chances of diverging routing storage schemas among cluster nodes, especially in a mixed-version cluster scenario.
If conflict is detected in a running cluster, EMQX writes instructions on how to manually resolve it in the log as part of the error message with `critical` severity. The same error message and instructions will also be written on standard error to make sure this message will not get lost even if no log handler is configured.
- [#12786](https://github.com/emqx/emqx/pull/12786) Added a strict check that prevents replicant nodes from connecting to core nodes running with a different version of EMQX application.
This check ensures that during the rolling upgrades, the replicant nodes can only work when at least one core node is running the same EMQX release version.
## Breaking Changes
- [#12576](https://github.com/emqx/emqx/pull/12576) Starting from 5.6, the "Configuration Manual" document will no longer include the `bridges` config root.
A `bridge` is now either `action` + `connector` for egress data integration, or `source` + `connector` for ingress data integration.
Please note that the `bridges` config (in `cluster.hocon`) and the REST API path `api/v5/bridges` still works, but considered deprecated.
- [#12634](https://github.com/emqx/emqx/pull/12634) Triple-quote string values in HOCON config files no longer support escape sequence.
The detailed information can be found in [this pull request](https://github.com/emqx/hocon/pull/290).
Here is a summary of the impact on EMQX users:
- EMQX 5.6 is the first version to generate triple-quote strings in `cluster.hocon`,
meaning for generated configs, there is no compatibility issue.
- For user hand-crafted configs (such as `emqx.conf`) a thorough review is needed
to inspect if escape sequences are used (such as `\n`, `\r`, `\t` and `\\`), if yes,
such strings should be changed to regular quotes (one pair of `"`) instead of triple-quotes.

View File

@ -0,0 +1 @@
Correctly migrate older Kafka Producer configurations (pre 5.0.2) to action and connector configurations.

230
changes/v5.6.0.en.md Normal file
View File

@ -0,0 +1,230 @@
# v5.6.0
## Enhancements
- [#12251](https://github.com/emqx/emqx/pull/12251) Optimized the performance of the RocksDB-based persistent sessions, achieving a reduction in RAM usage and database request frequency. Key improvements include:
- Introduced dirty session state to avoid frequent mria transactions.
- Introduced an intermediate buffer for the persistent messages.
- Used separate tracks of PacketIds for QoS1 and QoS2 messages.
- Limited the number of continuous ranges of inflight messages to 1 per stream.
- [#12326](https://github.com/emqx/emqx/pull/12326) Enhanced session tracking with registration history. EMQX now has the capability to monitor the history of session registrations, including those that have expired. By configuring `broker.session_history_retain`, EMQX retains records of expired sessions for a specified duration.
- **Session count API**: Use the API `GET /api/v5/sessions_count?since=1705682238` to obtain a count of sessions across the cluster that remained active since the given UNIX epoch timestamp (with seconds precision). This enhancement aids in analyzing session activity over time.
- **Metrics expansion with cluster sessions gauge**: A new gauge metric, `cluster_sessions`, is added to better track the number of sessions within the cluster. This metric is also integrated into Prometheus for easy monitoring:
```
# TYPE emqx_cluster_sessions_count gauge
emqx_cluster_sessions_count 1234
```
NOTE: Please consider this metric as an approximate estimation. Due to the asynchronous nature of data collection and calculation, exact precision may vary.
- [#12338](https://github.com/emqx/emqx/pull/12338) Introduced a time-based garbage collection mechanism to the RocksDB-based persistent session backend. This feature ensures more efficient management of stored messages, optimizing storage utilization and system performance by automatically purging outdated messages.
- [#12398](https://github.com/emqx/emqx/pull/12398) Exposed the `swagger_support` option in the Dashboard configuration, allowing for the enabling or disabling of the Swagger API documentation.
- [#12467](https://github.com/emqx/emqx/pull/12467) Started supporting cluster discovery using AAAA DNS record type.
- [#12483](https://github.com/emqx/emqx/pull/12483) Renamed `emqx ctl conf cluster_sync tnxid ID` to `emqx ctl conf cluster_sync inspect ID`.
For backward compatibility, `tnxid` is kept, but considered deprecated and will be removed in 5.7.
- [#12499](https://github.com/emqx/emqx/pull/12499) Enhanced client banning capabilities with extended rules, including:
* Matching `clientid` against a specified regular expression.
* Matching client's `username` against a specified regular expression.
* Matching client's peer address against a CIDR range.
**Important Notice**: Implementing a large number of broad matching rules (not specific to an individual clientid, username, or host) may affect system performance. It's advised to use these extended ban rules judiciously to maintain optimal system efficiency.
- [#12509](https://github.com/emqx/emqx/pull/12509) Implemented API to re-order all authenticators / authorization sources.
- [#12517](https://github.com/emqx/emqx/pull/12517) Configuration files have been upgraded to accommodate multi-line string values, preserving indentation for enhanced readability and maintainability. This improvement utilizes `"""~` and `~"""` markers to quote indented lines, offering a structured and clear way to define complex configurations. For example:
```
rule_xlu4 {
sql = """~
SELECT
*
FROM
"t/#"
~"""
}
```
See [HOCON 0.42.0](https://github.com/emqx/hocon/releases/tag/0.42.0) release notes for details.
- [#12520](https://github.com/emqx/emqx/pull/12520) Implemented log throttling. The feature reduces the volume of logged events that could potentially flood the system by dropping all but the first occurance of an event within a configured time window.
Log throttling is applied to the following log events that are critical yet prone to repetition:
- `authentication_failure`
- `authorization_permission_denied`
- `cannot_publish_to_topic_due_to_not_authorized`
- `cannot_publish_to_topic_due_to_quota_exceeded`
- `connection_rejected_due_to_license_limit_reached`
- `dropped_msg_due_to_mqueue_is_full`
- [#12561](https://github.com/emqx/emqx/pull/12561) Implemented HTTP APIs to get the list of client's in-flight and message queue (mqueue) messages. These APIs facilitate detailed insights and effective control over message queues and in-flight messaging, ensuring efficient message handling and monitoring.
To get the first chunk of data:
- `GET /clients/{clientid}/mqueue_messages?limit=100`
- `GET /clients/{clientid}/inflight_messages?limit=100`
Alternatively, for the first chunks without specifying a start position:
- `GET /clients/{clientid}/mqueue_messages?limit=100&position=none`
- `GET /clients/{clientid}/inflight_messages?limit=100&position=none`
To get the next chunk of data:
- `GET /clients/{clientid}/mqueue_messages?limit=100&position={position}`
- `GET /clients/{clientid}/inflight_messages?limit=100&position={position}`
Where `{position}` is a value (opaque string token) of `meta.position` field from the previous response.
Ordering and Prioritization:
- **Mqueue Messages**: These are prioritized and sequenced based on their queue order (FIFO), from higher to lower priority. By default, mqueue messages carry a uniform priority level of 0.
- **In-Flight Messages**: Sequenced by the timestamp of their insertion into the in-flight storage, from oldest to newest.
- [#12590](https://github.com/emqx/emqx/pull/12590) Removed `mfa` meta data from log messages to improve clarity.
- [#12641](https://github.com/emqx/emqx/pull/12641) Improved text log formatter fields order. The new fields order is as follows:
`tag` > `clientid` > `msg` > `peername` > `username` > `topic` > [other fields]
- [#12670](https://github.com/emqx/emqx/pull/12670) Added field `shared_subscriptions` to endpoint `/monitor_current` and `/monitor_current/nodes/:node`.
- [#12679](https://github.com/emqx/emqx/pull/12679) Upgraded docker image base from Debian 11 to Debian 12.
- [#12700](https://github.com/emqx/emqx/pull/12700) Started supporting "b" and "B" unit in bytesize hocon fields. For example, all three fields below will have the value of 1024 bytes:
```
bytesize_field = "1024b"
bytesize_field2 = "1024B"
bytesize_field2 = 1024
```
- [#12719](https://github.com/emqx/emqx/pull/12719) The `/clients` API has been upgraded to accommodate queries for multiple `clientid`s and `username`s simultaneously, offering a more flexible and powerful tool for monitoring client connections. Additionally, this update introduces the capability to customize which client information fields are included in the API response, optimizing for specific monitoring needs.
Examples of Multi-Client/Username Queries:
- To query multiple clients by ID: `/clients?clientid=client1&clientid=client2`
- To query multiple users: `/clients?username=user11&username=user2`
- To combine multiple client IDs and usernames in one query: `/clients?clientid=client1&clientid=client2&username=user1&username=user2`
Examples of Selecting Fields for the Response:
- To include all fields in the response: `/clients?fields=all` (Note: Omitting the `fields` parameter defaults to returning all fields.)
- To specify only certain fields: `/clients?fields=clientid,username`
- [#12381](https://github.com/emqx/emqx/pull/12381) Added new SQL functions: `map_keys()`, `map_values()`, `map_to_entries()`, `join_to_string()`, `join_to_string()`, `join_to_sql_values_string()`, `is_null_var()`, `is_not_null_var()`.
For more information on the functions and their usage, refer to [Built-in SQL Functions](../data-integration/rule-sql-builtin-functions) the documentation.
- [#12336](https://github.com/emqx/emqx/pull/12336) Refined the approach to managing asynchronous tasks by segregating the cleanup of channels into its own dedicated pool. This separation addresses performance issues encountered during channels cleanup under conditions of high network latency, ensuring that such tasks do not impede the efficiency of other asynchronous operations, such as route cleanup.
- [#12725](https://github.com/emqx/emqx/pull/12725) Implemented REST API to list the available source types.
- [#12746](https://github.com/emqx/emqx/pull/12746) Added `username` log field. If MQTT client is connected with a non-empty username the logs and traces will include `username` field.
- [#12785](https://github.com/emqx/emqx/pull/12785) Added `timestamp_format` configuration option to log handlers. This new option allows for the following settings:
- `auto`: Automatically determines the timestamp format based on the log formatter being used.
Utilizes `rfc3339` format for text formatters, and `epoch` format for JSON formatters.
- `epoch`: Represents timestamps in microseconds precision Unix epoch format.
- `rfc3339`: Uses RFC3339 compliant format for date-time strings. For example, `2024-03-26T11:52:19.777087+00:00`.
## Bug Fixes
- [#11868](https://github.com/emqx/emqx/pull/11868) Fixed a bug where will messages were not published after session takeover.
- [#12347](https://github.com/emqx/emqx/pull/12347) Implemented an update to ensure that messages processed by the Rule SQL for the MQTT egress data bridge are always rendered as valid, even in scenarios where the data is incomplete or lacks certain placeholders defined in the bridge configuration. This adjustment prevents messages from being incorrectly deemed invalid and subsequently discarded by the MQTT egress data bridge, as was the case previously.
When variables in `payload` and `topic` templates are undefined, they are now rendered as empty strings instead of the literal `undefined` string.
- [#12472](https://github.com/emqx/emqx/pull/12472) Fixed an issue where certain read operations on `/api/v5/actions/` and `/api/v5/sources/` endpoints might result in a `500` error code during the process of rolling upgrades.
- [#12492](https://github.com/emqx/emqx/pull/12492) EMQX now returns the `Receive-Maximum` property in the `CONNACK` message for MQTT v5 clients, aligning with protocol expectations. This implementation considers the minimum value of the client's `Receive-Maximum` setting and the server's `max_inflight` configuration as the limit for the number of inflight (unacknowledged) messages permitted. Previously, the determined value was not sent back to the client in the `CONNACK` message.
- [#12500](https://github.com/emqx/emqx/pull/12500) The `GET /clients` and `GET /client/:clientid` HTTP APIs have been updated to include disconnected persistent sessions in their responses.
NOTE: A current known issue with these enhanced API responses is that the total client count provided may exceed the actual number of clients due to the inclusion of disconnected sessions.
- [#12513](https://github.com/emqx/emqx/pull/12513) Changed the level of several flooding log events from `warning` to `info`.
- [#12530](https://github.com/emqx/emqx/pull/12530) Improved the error reporting for `frame_too_large` events and malformed `CONNECT` packet parsing failures. These updates now provide additional information, aiding in the troubleshooting process.
- [#12541](https://github.com/emqx/emqx/pull/12541) Introduced a new configuration validation step for autocluster by DNS records to ensure compatibility between `node.name` and `cluster.discover_strategy`. Specifically, when utilizing the `dns` strategy with either `a` or `aaaa` record types, it is mandatory for all nodes to use a (static) IP address as the host name.
- [#12562](https://github.com/emqx/emqx/pull/12562) Added a new configuration root: `durable_storage`. This configuration tree contains the settings related to the new persistent session feature.
- [#12566](https://github.com/emqx/emqx/pull/12566) Enhanced the bootstrap file for REST API keys:
- Empty lines within the file are now skipped, eliminating the previous behavior of generating an error.
- API keys specified in the bootstrap file are assigned the highest precedence. In cases where a new key from the bootstrap file conflicts with an existing key, the older key will be automatically removed to ensure that the bootstrap keys take effect without issue.
- [#12646](https://github.com/emqx/emqx/pull/12646) Fixed an issue with the rule engine's date-time string parser. Previously, time zone adjustments were only effective for date-time strings specified with second-level precision.
- [#12652](https://github.com/emqx/emqx/pull/12652) Fixed a discrepancy where the subbits functions with 4 and 5 parameters, despite being documented, were missing from the actual implementation. These functions have now been added.
- [#12663](https://github.com/emqx/emqx/pull/12663) Fixed an issue where the `emqx_vm_cpu_use` and `emqx_vm_cpu_idle` metrics, accessible via the Prometheus endpoint `/prometheus/stats`, were inaccurately reflecting the average CPU usage since the operating system boot. This fix ensures that these metrics now accurately represent the current CPU usage and idle, providing more relevant and timely data for monitoring purposes.
- [#12668](https://github.com/emqx/emqx/pull/12668) Refactored the SQL function `date_to_unix_ts()` by using `calendar:datetime_to_gregorian_seconds/1`.
This change also added validation for the input date format.
- [#12672](https://github.com/emqx/emqx/pull/12672) Changed the process for generating the node boot configuration by incorporating the loading of `{data_dir}/configs/cluster.hocon`. Previously, changes to logging configurations made via the Dashboard and saved in `{data_dir}/configs/cluster.hocon` were only applied after the initial boot configuration was generated using `etc/emqx.conf`, leading to potential loss of some log segment files due to late reconfiguration.
Now, both `{data_dir}/configs/cluster.hocon` and `etc/emqx.conf` are loaded concurrently, with settings from `emqx.conf` taking precedence, to create the boot configuration.
- [#12696](https://github.com/emqx/emqx/pull/12696) Fixed an issue where attempting to reconnect an action or source could lead to wrong error messages being returned in the HTTP API.
- [#12714](https://github.com/emqx/emqx/pull/12714) Fixed inaccuracies in several metrics reported by the `/prometheus/stats` endpoint of the Prometheus API. The correction applies to the following metrics:
- `emqx_cluster_sessions_count`
- `emqx_cluster_sessions_max`
- `emqx_cluster_nodes_running`
- `emqx_cluster_nodes_stopped`
- `emqx_subscriptions_shared_count`
- `emqx_subscriptions_shared_max`
Additionally, this fix rectified an issue within the `/stats` endpoint concerning the `subscriptions.shared.count` and `subscriptions.shared.max` fields. Previously, these values failed to update promptly following a client's disconnection or unsubscription from a Shared-Subscription.
- [#12715](https://github.com/emqx/emqx/pull/12715) Fixed a crash that could occur during configuration updates if the connector for the ingress data integration source had active channels.
- [#12740](https://github.com/emqx/emqx/pull/12740) Fixed an issue when durable sessions could not be kicked out.
- [#12768](https://github.com/emqx/emqx/pull/12768) Addressed a startup failure issue in EMQX version 5.4.0 and later, particularly noted during rolling upgrades from versions before 5.4.0. The issue was related to the initialization of the routing schema when both v1 and v2 routing tables were empty.
The node now attempts to retrieve the routing schema version in use across the cluster instead of using the v2 routing table by default when local routing tables are found empty at startup. This approach mitigates potential conflicts and reduces the chances of diverging routing storage schemas among cluster nodes, especially in a mixed-version cluster scenario.
If conflict is detected in a running cluster, EMQX writes instructions on how to manually resolve it in the log as part of the error message with `critical` severity. The same error message and instructions will also be written on standard error to make sure this message will not get lost even if no log handler is configured.
- [#12786](https://github.com/emqx/emqx/pull/12786) Added a strict check that prevents replicant nodes from connecting to core nodes running with a different version of EMQX application.
This check ensures that during the rolling upgrades, the replicant nodes can only work when at least one core node is running the same EMQX release version.
## Breaking Changes
- [#12576](https://github.com/emqx/emqx/pull/12576) Starting from 5.6, the "Configuration Manual" document will no longer include the `bridges` config root.
A `bridge` is now either `action` + `connector` for egress data integration, or `source` + `connector` for ingress data integration.
Please note that the `bridges` config (in `cluster.hocon`) and the REST API path `api/v5/bridges` still works, but considered deprecated.
- [#12634](https://github.com/emqx/emqx/pull/12634) Triple-quote string values in HOCON config files no longer support escape sequence.
The detailed information can be found in [this pull request](https://github.com/emqx/hocon/pull/290).
Here is a summary of the impact on EMQX users:
- EMQX 5.6 is the first version to generate triple-quote strings in `cluster.hocon`,
meaning for generated configs, there is no compatibility issue.
- For user hand-crafted configs (such as `emqx.conf`) a thorough review is needed
to inspect if escape sequences are used (such as `\n`, `\r`, `\t` and `\\`), if yes,
such strings should be changed to regular quotes (one pair of `"`) instead of triple-quotes.

View File

@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
version: 5.6.0-rc.1 version: 5.6.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. # incremented each time you make changes to the application.
appVersion: 5.6.0-rc.1 appVersion: 5.6.0

View File

@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
version: 5.6.0-rc.1 version: 5.6.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. # incremented each time you make changes to the application.
appVersion: 5.6.0-rc.1 appVersion: 5.6.0

View File

@ -55,7 +55,7 @@ defmodule EMQXUmbrella.MixProject do
{:cowboy, github: "emqx/cowboy", tag: "2.9.2", override: true}, {:cowboy, github: "emqx/cowboy", tag: "2.9.2", override: true},
{:esockd, github: "emqx/esockd", tag: "5.11.1", override: true}, {:esockd, github: "emqx/esockd", tag: "5.11.1", override: true},
{:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-2", override: true}, {:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-2", override: true},
{:ekka, github: "emqx/ekka", tag: "0.19.0", override: true}, {:ekka, github: "emqx/ekka", tag: "0.19.1", override: true},
{:gen_rpc, github: "emqx/gen_rpc", tag: "3.3.1", override: true}, {:gen_rpc, github: "emqx/gen_rpc", tag: "3.3.1", override: true},
{:grpc, github: "emqx/grpc-erl", tag: "0.6.12", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.12", override: true},
{:minirest, github: "emqx/minirest", tag: "1.4.0", override: true}, {:minirest, github: "emqx/minirest", tag: "1.4.0", override: true},
@ -202,7 +202,8 @@ defmodule EMQXUmbrella.MixProject do
defp enterprise_deps(_profile_info = %{edition_type: :enterprise}) do defp enterprise_deps(_profile_info = %{edition_type: :enterprise}) do
[ [
{:hstreamdb_erl, github: "hstreamdb/hstreamdb_erl", tag: "0.5.18+v0.18.1"}, {:hstreamdb_erl,
github: "hstreamdb/hstreamdb_erl", tag: "0.5.18+v0.18.1+ezstd-v1.0.5-emqx1"},
{:influxdb, github: "emqx/influxdb-client-erl", tag: "1.1.13", override: true}, {:influxdb, github: "emqx/influxdb-client-erl", tag: "1.1.13", override: true},
{:wolff, github: "kafka4beam/wolff", tag: "1.10.2"}, {:wolff, github: "kafka4beam/wolff", tag: "1.10.2"},
{:kafka_protocol, github: "kafka4beam/kafka_protocol", tag: "4.1.5", override: true}, {:kafka_protocol, github: "kafka4beam/kafka_protocol", tag: "4.1.5", override: true},

View File

@ -83,7 +83,7 @@
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.1"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.1"}}},
{rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-2"}}}, {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-2"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.0"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.1"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
{grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.12"}}}, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.12"}}},
{minirest, {git, "https://github.com/emqx/minirest", {tag, "1.4.0"}}}, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.4.0"}}},

View File

@ -124,9 +124,9 @@ kafka_headers.desc:
"""Provide a placeholder for message headers<br/> """Provide a placeholder for message headers<br/>
e.g. <code>${pub_props}</code><br/> e.g. <code>${pub_props}</code><br/>
Note that the value of the placeholder must be either an object: Note that the value of the placeholder must be either an object:
<code>{\"foo\": \"bar\"}</code> <code>{"foo": "bar"}</code>
or an array of key-value pairs: or an array of key-value pairs:
<code>[{\"key\": \"foo\", \"value\": \"bar\"}]</code>""" <code>[{"key": "foo", "value": "bar"}]</code>"""
kafka_headers.label: kafka_headers.label:
"""Message Headers""" """Message Headers"""

View File

@ -124,9 +124,9 @@ kafka_headers.desc:
"""Provide a placeholder for message headers<br/> """Provide a placeholder for message headers<br/>
e.g. <code>${pub_props}</code><br/> e.g. <code>${pub_props}</code><br/>
Note that the value of the placeholder must be either an object: Note that the value of the placeholder must be either an object:
<code>{\"foo\": \"bar\"}</code> <code>{"foo": "bar"}</code>
or an array of key-value pairs: or an array of key-value pairs:
<code>[{\"key\": \"foo\", \"value\": \"bar\"}]</code>""" <code>[{"key": "foo", "value": "bar"}]</code>"""
kafka_headers.label: kafka_headers.label:
"""Message Headers""" """Message Headers"""

View File

@ -198,9 +198,9 @@ kafka_headers.desc:
"""Provide a placeholder for message headers<br/> """Provide a placeholder for message headers<br/>
e.g. <code>${pub_props}</code><br/> e.g. <code>${pub_props}</code><br/>
Note that the value of the placeholder must be either an object: Note that the value of the placeholder must be either an object:
<code>{\"foo\": \"bar\"}</code> <code>{"foo": "bar"}</code>
or an array of key-value pairs: or an array of key-value pairs:
<code>[{\"key\": \"foo\", \"value\": \"bar\"}]</code>""" <code>[{"key": "foo", "value": "bar"}]</code>"""
kafka_headers.label: kafka_headers.label:
"""Message Headers""" """Message Headers"""

View File

@ -761,6 +761,15 @@ common_handler_formatter.desc:
common_handler_formatter.label: common_handler_formatter.label:
"""Log Formatter""" """Log Formatter"""
common_handler_timestamp_format.label:
"""Timestamp Format"""
common_handler_timestamp_format.desc: """~
Pick a timestamp format:
- `auto`: automatically choose the best format based on log formatter. `epoch` for JSON and `rfc3339` for text.
- `epoch`: Unix epoch time in microseconds.
- `rfc3339`: RFC3339 format."""
rpc_async_batch_size.desc: rpc_async_batch_size.desc:
"""The maximum number of batch messages sent in asynchronous mode. """The maximum number of batch messages sent in asynchronous mode.
Note that this configuration does not work in synchronous mode.""" Note that this configuration does not work in synchronous mode."""

View File

@ -41,20 +41,22 @@ get_client_mqueue_msgs.label:
"""Get client mqueue messages""" """Get client mqueue messages"""
get_client_inflight_msgs.desc: get_client_inflight_msgs.desc:
"""Get client inflight messages""" """Get client in-flight messages"""
get_client_inflight_msgs.label: get_client_inflight_msgs.label:
"""Get client inflight messages""" """Get client in-flight messages"""
mqueue_msgs_list.desc: mqueue_msgs_list.desc:
"""Client's mqueue messages list. The queue (FIFO) ordering is preserved.""" """Client's mqueue messages list.
Messages are ordered according to their priority and queue (FIFO) order: from higher priority to lower priority.
By default, all messages in Mqueue have the same priority of 0."""
mqueue_msgs_list.label: mqueue_msgs_list.label:
"""Client's mqueue messages""" """Client's mqueue messages"""
inflight_msgs_list.desc: inflight_msgs_list.desc:
"""Client's inflight messages list. """Client's in-flight messages list.
Ordered by MQTT Packet Id, which may not represent the chronological messages order.""" Messages are sorted by time at which they were inserted to the In-flight storage (from older to newer messages)."""
inflight_msgs_list.label: inflight_msgs_list.label:
"""Client's inflight messages""" """Client's in-flight messages"""
msg_id.desc: msg_id.desc:
"""Message ID.""" """Message ID."""
@ -68,13 +70,13 @@ msg_topic.label:
msg_qos.desc: msg_qos.desc:
"""Message QoS.""" """Message QoS."""
msg_topic.label: msg_qos.label:
"""Message Qos""" """Message QoS"""
msg_publish_at.desc: msg_publish_at.desc:
"""Message publish time, a millisecond precision Unix epoch timestamp.""" """Message publish time, a millisecond precision Unix epoch timestamp."""
msg_publish_at.label: msg_publish_at.label:
"""Message Publish Time.""" """Message Publish Time"""
msg_from_clientid.desc: msg_from_clientid.desc:
"""Message publisher's client ID.""" """Message publisher's client ID."""
@ -84,7 +86,17 @@ msg_from_clientid.desc:
msg_from_username.desc: msg_from_username.desc:
"""Message publisher's username.""" """Message publisher's username."""
msg_from_username.label: msg_from_username.label:
"""Message Publisher's Username """ """Message Publisher's Username"""
msg_inserted_at.desc:
"""A nanosecond precision Unix epoch timestamp at which a message was inserted to In-flight / Mqueue."""
msg_inserted_at.label:
"""Message Insertion Time"""
msg_mqueue_priority.desc:
"""Message Mqueue Priority."""
msg_mqueue_priority.label:
"""Message Mqueue Priority"""
subscribe.desc: subscribe.desc:
"""Subscribe""" """Subscribe"""