Compare commits
No commits in common. "master" and "dependabot/github_actions/actions-ef71aea555" have entirely different histories.
master
...
dependabot
|
@ -10,7 +10,7 @@ services:
|
||||||
nofile: 1024
|
nofile: 1024
|
||||||
image: openldap
|
image: openldap
|
||||||
#ports:
|
#ports:
|
||||||
# - "389:389"
|
# - 389:389
|
||||||
volumes:
|
volumes:
|
||||||
- ./certs/ca.crt:/etc/certs/ca.crt
|
- ./certs/ca.crt:/etc/certs/ca.crt
|
||||||
restart: always
|
restart: always
|
||||||
|
|
|
@ -51,7 +51,7 @@ runs:
|
||||||
echo "SELF_HOSTED=false" >> $GITHUB_OUTPUT
|
echo "SELF_HOSTED=false" >> $GITHUB_OUTPUT
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
- uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
- uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1
|
||||||
id: cache
|
id: cache
|
||||||
if: steps.prepare.outputs.SELF_HOSTED != 'true'
|
if: steps.prepare.outputs.SELF_HOSTED != 'true'
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -122,10 +122,9 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
ls -lR _packages/$PROFILE
|
ls -lR _packages/$PROFILE
|
||||||
mv _packages/$PROFILE/*.tar.gz ./
|
mv _packages/$PROFILE/*.tar.gz ./
|
||||||
|
|
||||||
- name: Enable containerd image store on Docker Engine
|
- name: Enable containerd image store on Docker Engine
|
||||||
run: |
|
run: |
|
||||||
echo "$(sudo cat /etc/docker/daemon.json | jq '. += {"features": {"containerd-snapshotter": true}}')" > daemon.json
|
echo "$(jq '. += {"features": {"containerd-snapshotter": true}}' /etc/docker/daemon.json)" > daemon.json
|
||||||
sudo mv daemon.json /etc/docker/daemon.json
|
sudo mv daemon.json /etc/docker/daemon.json
|
||||||
sudo systemctl restart docker
|
sudo systemctl restart docker
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@ jobs:
|
||||||
profile:
|
profile:
|
||||||
- ['emqx', 'master']
|
- ['emqx', 'master']
|
||||||
- ['emqx', 'release-57']
|
- ['emqx', 'release-57']
|
||||||
- ['emqx', 'release-58']
|
|
||||||
os:
|
os:
|
||||||
- ubuntu22.04
|
- ubuntu22.04
|
||||||
- amzn2023
|
- amzn2023
|
||||||
|
|
|
@ -24,7 +24,6 @@ jobs:
|
||||||
branch:
|
branch:
|
||||||
- master
|
- master
|
||||||
- release-57
|
- release-57
|
||||||
- release-58
|
|
||||||
language:
|
language:
|
||||||
- cpp
|
- cpp
|
||||||
- python
|
- python
|
||||||
|
|
|
@ -24,7 +24,6 @@ jobs:
|
||||||
ref:
|
ref:
|
||||||
- master
|
- master
|
||||||
- release-57
|
- release-57
|
||||||
- release-58
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
with:
|
with:
|
||||||
|
|
4
Makefile
4
Makefile
|
@ -10,8 +10,8 @@ include env.sh
|
||||||
|
|
||||||
# Dashboard version
|
# Dashboard version
|
||||||
# from https://github.com/emqx/emqx-dashboard5
|
# from https://github.com/emqx/emqx-dashboard5
|
||||||
export EMQX_DASHBOARD_VERSION ?= v1.10.0-beta.1
|
export EMQX_DASHBOARD_VERSION ?= v1.9.1
|
||||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.8.0-beta.1
|
export EMQX_EE_DASHBOARD_VERSION ?= e1.7.1
|
||||||
|
|
||||||
export EMQX_RELUP ?= true
|
export EMQX_RELUP ?= true
|
||||||
export EMQX_REL_FORM ?= tgz
|
export EMQX_REL_FORM ?= tgz
|
||||||
|
|
|
@ -683,7 +683,6 @@ end).
|
||||||
|
|
||||||
-define(FRAME_PARSE_ERROR, frame_parse_error).
|
-define(FRAME_PARSE_ERROR, frame_parse_error).
|
||||||
-define(FRAME_SERIALIZE_ERROR, frame_serialize_error).
|
-define(FRAME_SERIALIZE_ERROR, frame_serialize_error).
|
||||||
|
|
||||||
-define(THROW_FRAME_ERROR(Reason), erlang:throw({?FRAME_PARSE_ERROR, Reason})).
|
-define(THROW_FRAME_ERROR(Reason), erlang:throw({?FRAME_PARSE_ERROR, Reason})).
|
||||||
-define(THROW_SERIALIZE_ERROR(Reason), erlang:throw({?FRAME_SERIALIZE_ERROR, Reason})).
|
-define(THROW_SERIALIZE_ERROR(Reason), erlang:throw({?FRAME_SERIALIZE_ERROR, Reason})).
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
%% `apps/emqx/src/bpapi/README.md'
|
%% `apps/emqx/src/bpapi/README.md'
|
||||||
|
|
||||||
%% Opensource edition
|
%% Opensource edition
|
||||||
-define(EMQX_RELEASE_CE, "5.8.0-alpha.1").
|
-define(EMQX_RELEASE_CE, "5.7.1").
|
||||||
|
|
||||||
%% Enterprise edition
|
%% Enterprise edition
|
||||||
-define(EMQX_RELEASE_EE, "5.8.0-alpha.1").
|
-define(EMQX_RELEASE_EE, "5.7.1").
|
||||||
|
|
|
@ -91,7 +91,7 @@
|
||||||
?_DO_TRACE(Tag, Msg, Meta),
|
?_DO_TRACE(Tag, Msg, Meta),
|
||||||
?SLOG(
|
?SLOG(
|
||||||
Level,
|
Level,
|
||||||
(Meta)#{msg => Msg, tag => Tag},
|
(emqx_trace_formatter:format_meta_map(Meta))#{msg => Msg, tag => Tag},
|
||||||
#{is_trace => false}
|
#{is_trace => false}
|
||||||
)
|
)
|
||||||
end).
|
end).
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
|
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
|
||||||
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
|
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
|
||||||
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
|
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
|
||||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.12.0"}}},
|
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.3"}}},
|
||||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.5"}}},
|
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.5"}}},
|
||||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
|
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
|
||||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.43.2"}}},
|
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.43.2"}}},
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{application, emqx, [
|
{application, emqx, [
|
||||||
{id, "emqx"},
|
{id, "emqx"},
|
||||||
{description, "EMQX Core"},
|
{description, "EMQX Core"},
|
||||||
{vsn, "5.3.4"},
|
{vsn, "5.3.3"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -146,9 +146,7 @@
|
||||||
-type replies() :: emqx_types:packet() | reply() | [reply()].
|
-type replies() :: emqx_types:packet() | reply() | [reply()].
|
||||||
|
|
||||||
-define(IS_MQTT_V5, #channel{conninfo = #{proto_ver := ?MQTT_PROTO_V5}}).
|
-define(IS_MQTT_V5, #channel{conninfo = #{proto_ver := ?MQTT_PROTO_V5}}).
|
||||||
-define(IS_CONNECTED_OR_REAUTHENTICATING(ConnState),
|
|
||||||
((ConnState == connected) orelse (ConnState == reauthenticating))
|
|
||||||
).
|
|
||||||
-define(IS_COMMON_SESSION_TIMER(N),
|
-define(IS_COMMON_SESSION_TIMER(N),
|
||||||
((N == retry_delivery) orelse (N == expire_awaiting_rel))
|
((N == retry_delivery) orelse (N == expire_awaiting_rel))
|
||||||
).
|
).
|
||||||
|
@ -339,7 +337,7 @@ take_conn_info_fields(Fields, ClientInfo, ConnInfo) ->
|
||||||
| {shutdown, Reason :: term(), channel()}
|
| {shutdown, Reason :: term(), channel()}
|
||||||
| {shutdown, Reason :: term(), replies(), channel()}.
|
| {shutdown, Reason :: term(), replies(), channel()}.
|
||||||
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = ConnState}) when
|
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = ConnState}) when
|
||||||
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
|
ConnState =:= connected orelse ConnState =:= reauthenticating
|
||||||
->
|
->
|
||||||
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel);
|
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel);
|
||||||
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = connecting}) ->
|
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = connecting}) ->
|
||||||
|
@ -569,8 +567,29 @@ handle_in(
|
||||||
process_disconnect(ReasonCode, Properties, NChannel);
|
process_disconnect(ReasonCode, Properties, NChannel);
|
||||||
handle_in(?AUTH_PACKET(), Channel) ->
|
handle_in(?AUTH_PACKET(), Channel) ->
|
||||||
handle_out(disconnect, ?RC_IMPLEMENTATION_SPECIFIC_ERROR, Channel);
|
handle_out(disconnect, ?RC_IMPLEMENTATION_SPECIFIC_ERROR, Channel);
|
||||||
handle_in({frame_error, Reason}, Channel) ->
|
handle_in({frame_error, Reason}, Channel = #channel{conn_state = idle}) ->
|
||||||
handle_frame_error(Reason, Channel);
|
shutdown(shutdown_count(frame_error, Reason), Channel);
|
||||||
|
handle_in(
|
||||||
|
{frame_error, #{cause := frame_too_large} = R}, Channel = #channel{conn_state = connecting}
|
||||||
|
) ->
|
||||||
|
shutdown(
|
||||||
|
shutdown_count(frame_error, R), ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), Channel
|
||||||
|
);
|
||||||
|
handle_in({frame_error, Reason}, Channel = #channel{conn_state = connecting}) ->
|
||||||
|
shutdown(shutdown_count(frame_error, Reason), ?CONNACK_PACKET(?RC_MALFORMED_PACKET), Channel);
|
||||||
|
handle_in(
|
||||||
|
{frame_error, #{cause := frame_too_large}}, Channel = #channel{conn_state = ConnState}
|
||||||
|
) when
|
||||||
|
ConnState =:= connected orelse ConnState =:= reauthenticating
|
||||||
|
->
|
||||||
|
handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel);
|
||||||
|
handle_in({frame_error, Reason}, Channel = #channel{conn_state = ConnState}) when
|
||||||
|
ConnState =:= connected orelse ConnState =:= reauthenticating
|
||||||
|
->
|
||||||
|
handle_out(disconnect, {?RC_MALFORMED_PACKET, Reason}, Channel);
|
||||||
|
handle_in({frame_error, Reason}, Channel = #channel{conn_state = disconnected}) ->
|
||||||
|
?SLOG(error, #{msg => "malformed_mqtt_message", reason => Reason}),
|
||||||
|
{ok, Channel};
|
||||||
handle_in(Packet, Channel) ->
|
handle_in(Packet, Channel) ->
|
||||||
?SLOG(error, #{msg => "disconnecting_due_to_unexpected_message", packet => Packet}),
|
?SLOG(error, #{msg => "disconnecting_due_to_unexpected_message", packet => Packet}),
|
||||||
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel).
|
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel).
|
||||||
|
@ -1002,68 +1021,6 @@ not_nacked({deliver, _Topic, Msg}) ->
|
||||||
true
|
true
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Handle Frame Error
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
handle_frame_error(
|
|
||||||
Reason = #{cause := frame_too_large},
|
|
||||||
Channel = #channel{conn_state = ConnState, conninfo = ConnInfo}
|
|
||||||
) when
|
|
||||||
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
|
|
||||||
->
|
|
||||||
ShutdownCount = shutdown_count(frame_error, Reason),
|
|
||||||
case proto_ver(Reason, ConnInfo) of
|
|
||||||
?MQTT_PROTO_V5 ->
|
|
||||||
handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel);
|
|
||||||
_ ->
|
|
||||||
shutdown(ShutdownCount, Channel)
|
|
||||||
end;
|
|
||||||
%% Only send CONNACK with reason code `frame_too_large` for MQTT-v5.0 when connecting,
|
|
||||||
%% otherwise DONOT send any CONNACK or DISCONNECT packet.
|
|
||||||
handle_frame_error(
|
|
||||||
Reason,
|
|
||||||
Channel = #channel{conn_state = ConnState, conninfo = ConnInfo}
|
|
||||||
) when
|
|
||||||
is_map(Reason) andalso
|
|
||||||
(ConnState == idle orelse ConnState == connecting)
|
|
||||||
->
|
|
||||||
ShutdownCount = shutdown_count(frame_error, Reason),
|
|
||||||
ProtoVer = proto_ver(Reason, ConnInfo),
|
|
||||||
NChannel = Channel#channel{conninfo = ConnInfo#{proto_ver => ProtoVer}},
|
|
||||||
case ProtoVer of
|
|
||||||
?MQTT_PROTO_V5 ->
|
|
||||||
shutdown(ShutdownCount, ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), NChannel);
|
|
||||||
_ ->
|
|
||||||
shutdown(ShutdownCount, NChannel)
|
|
||||||
end;
|
|
||||||
handle_frame_error(
|
|
||||||
Reason,
|
|
||||||
Channel = #channel{conn_state = connecting}
|
|
||||||
) ->
|
|
||||||
shutdown(
|
|
||||||
shutdown_count(frame_error, Reason),
|
|
||||||
?CONNACK_PACKET(?RC_MALFORMED_PACKET),
|
|
||||||
Channel
|
|
||||||
);
|
|
||||||
handle_frame_error(
|
|
||||||
Reason,
|
|
||||||
Channel = #channel{conn_state = ConnState}
|
|
||||||
) when
|
|
||||||
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
|
|
||||||
->
|
|
||||||
handle_out(
|
|
||||||
disconnect,
|
|
||||||
{?RC_MALFORMED_PACKET, Reason},
|
|
||||||
Channel
|
|
||||||
);
|
|
||||||
handle_frame_error(
|
|
||||||
Reason,
|
|
||||||
Channel = #channel{conn_state = disconnected}
|
|
||||||
) ->
|
|
||||||
?SLOG(error, #{msg => "malformed_mqtt_message", reason => Reason}),
|
|
||||||
{ok, Channel}.
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Handle outgoing packet
|
%% Handle outgoing packet
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -1332,7 +1289,7 @@ handle_info(
|
||||||
session = Session
|
session = Session
|
||||||
}
|
}
|
||||||
) when
|
) when
|
||||||
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
|
ConnState =:= connected orelse ConnState =:= reauthenticating
|
||||||
->
|
->
|
||||||
{Intent, Session1} = session_disconnect(ClientInfo, ConnInfo, Session),
|
{Intent, Session1} = session_disconnect(ClientInfo, ConnInfo, Session),
|
||||||
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(sock_closed, Channel)),
|
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(sock_closed, Channel)),
|
||||||
|
@ -2679,7 +2636,8 @@ save_alias(outbound, AliasId, Topic, TopicAliases = #{outbound := Aliases}) ->
|
||||||
NAliases = maps:put(Topic, AliasId, Aliases),
|
NAliases = maps:put(Topic, AliasId, Aliases),
|
||||||
TopicAliases#{outbound => NAliases}.
|
TopicAliases#{outbound => NAliases}.
|
||||||
|
|
||||||
-compile({inline, [reply/2, shutdown/2, shutdown/3]}).
|
-compile({inline, [reply/2, shutdown/2, shutdown/3, sp/1, flag/1]}).
|
||||||
|
|
||||||
reply(Reply, Channel) ->
|
reply(Reply, Channel) ->
|
||||||
{reply, Reply, Channel}.
|
{reply, Reply, Channel}.
|
||||||
|
|
||||||
|
@ -2715,13 +2673,13 @@ disconnect_and_shutdown(
|
||||||
?IS_MQTT_V5 =
|
?IS_MQTT_V5 =
|
||||||
#channel{conn_state = ConnState}
|
#channel{conn_state = ConnState}
|
||||||
) when
|
) when
|
||||||
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
|
ConnState =:= connected orelse ConnState =:= reauthenticating
|
||||||
->
|
->
|
||||||
NChannel = ensure_disconnected(Reason, Channel),
|
NChannel = ensure_disconnected(Reason, Channel),
|
||||||
shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), NChannel);
|
shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), NChannel);
|
||||||
%% mqtt v3/v4 connected sessions
|
%% mqtt v3/v4 connected sessions
|
||||||
disconnect_and_shutdown(Reason, Reply, Channel = #channel{conn_state = ConnState}) when
|
disconnect_and_shutdown(Reason, Reply, Channel = #channel{conn_state = ConnState}) when
|
||||||
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
|
ConnState =:= connected orelse ConnState =:= reauthenticating
|
||||||
->
|
->
|
||||||
NChannel = ensure_disconnected(Reason, Channel),
|
NChannel = ensure_disconnected(Reason, Channel),
|
||||||
shutdown(Reason, Reply, NChannel);
|
shutdown(Reason, Reply, NChannel);
|
||||||
|
@ -2764,13 +2722,6 @@ is_durable_session(#channel{session = Session}) ->
|
||||||
false
|
false
|
||||||
end.
|
end.
|
||||||
|
|
||||||
proto_ver(#{proto_ver := ProtoVer}, _ConnInfo) ->
|
|
||||||
ProtoVer;
|
|
||||||
proto_ver(_Reason, #{proto_ver := ProtoVer}) ->
|
|
||||||
ProtoVer;
|
|
||||||
proto_ver(_, _) ->
|
|
||||||
?MQTT_PROTO_V4.
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% For CT tests
|
%% For CT tests
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
|
@ -783,8 +783,7 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
|
||||||
input_bytes => Data,
|
input_bytes => Data,
|
||||||
parsed_packets => Packets
|
parsed_packets => Packets
|
||||||
}),
|
}),
|
||||||
NState = enrich_state(Reason, State),
|
{[{frame_error, Reason} | Packets], State};
|
||||||
{[{frame_error, Reason} | Packets], NState};
|
|
||||||
error:Reason:Stacktrace ->
|
error:Reason:Stacktrace ->
|
||||||
?LOG(error, #{
|
?LOG(error, #{
|
||||||
at_state => emqx_frame:describe_state(ParseState),
|
at_state => emqx_frame:describe_state(ParseState),
|
||||||
|
@ -1228,12 +1227,6 @@ inc_counter(Key, Inc) ->
|
||||||
_ = emqx_pd:inc_counter(Key, Inc),
|
_ = emqx_pd:inc_counter(Key, Inc),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
enrich_state(#{parse_state := NParseState}, State) ->
|
|
||||||
Serialize = emqx_frame:serialize_opts(NParseState),
|
|
||||||
State#state{parse_state = NParseState, serialize = Serialize};
|
|
||||||
enrich_state(_, State) ->
|
|
||||||
State.
|
|
||||||
|
|
||||||
set_tcp_keepalive({quic, _Listener}) ->
|
set_tcp_keepalive({quic, _Listener}) ->
|
||||||
ok;
|
ok;
|
||||||
set_tcp_keepalive({Type, Id}) ->
|
set_tcp_keepalive({Type, Id}) ->
|
||||||
|
|
|
@ -267,50 +267,28 @@ packet(Header, Variable) ->
|
||||||
packet(Header, Variable, Payload) ->
|
packet(Header, Variable, Payload) ->
|
||||||
#mqtt_packet{header = Header, variable = Variable, payload = Payload}.
|
#mqtt_packet{header = Header, variable = Variable, payload = Payload}.
|
||||||
|
|
||||||
parse_connect(FrameBin, Options = #{strict_mode := StrictMode}) ->
|
parse_connect(FrameBin, StrictMode) ->
|
||||||
{ProtoName, Rest0} = parse_utf8_string_with_cause(FrameBin, StrictMode, invalid_proto_name),
|
{ProtoName, Rest} = parse_utf8_string_with_cause(FrameBin, StrictMode, invalid_proto_name),
|
||||||
%% No need to parse and check proto_ver if proto_name is invalid, check it first
|
case ProtoName of
|
||||||
%% And the matching check of `proto_name` and `proto_ver` fields will be done in `emqx_packet:check_proto_ver/2`
|
<<"MQTT">> ->
|
||||||
_ = validate_proto_name(ProtoName),
|
ok;
|
||||||
{IsBridge, ProtoVer, Rest2} = parse_connect_proto_ver(Rest0),
|
<<"MQIsdp">> ->
|
||||||
NOptions = Options#{version => ProtoVer},
|
ok;
|
||||||
try
|
_ ->
|
||||||
do_parse_connect(ProtoName, IsBridge, ProtoVer, Rest2, StrictMode)
|
%% from spec: the server MAY send disconnect with reason code 0x84
|
||||||
catch
|
%% we chose to close socket because the client is likely not talking MQTT anyway
|
||||||
throw:{?FRAME_PARSE_ERROR, ReasonM} when is_map(ReasonM) ->
|
?PARSE_ERR(#{
|
||||||
?PARSE_ERR(
|
cause => invalid_proto_name,
|
||||||
ReasonM#{
|
expected => <<"'MQTT' or 'MQIsdp'">>,
|
||||||
proto_ver => ProtoVer,
|
received => ProtoName
|
||||||
proto_name => ProtoName,
|
})
|
||||||
parse_state => ?NONE(NOptions)
|
end,
|
||||||
}
|
parse_connect2(ProtoName, Rest, StrictMode).
|
||||||
);
|
|
||||||
throw:{?FRAME_PARSE_ERROR, Reason} ->
|
|
||||||
?PARSE_ERR(
|
|
||||||
#{
|
|
||||||
cause => Reason,
|
|
||||||
proto_ver => ProtoVer,
|
|
||||||
proto_name => ProtoName,
|
|
||||||
parse_state => ?NONE(NOptions)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
end.
|
|
||||||
|
|
||||||
do_parse_connect(
|
parse_connect2(
|
||||||
ProtoName,
|
ProtoName,
|
||||||
IsBridge,
|
<<BridgeTag:4, ProtoVer:4, UsernameFlagB:1, PasswordFlagB:1, WillRetainB:1, WillQoS:2,
|
||||||
ProtoVer,
|
WillFlagB:1, CleanStart:1, Reserved:1, KeepAlive:16/big, Rest2/binary>>,
|
||||||
<<
|
|
||||||
UsernameFlagB:1,
|
|
||||||
PasswordFlagB:1,
|
|
||||||
WillRetainB:1,
|
|
||||||
WillQoS:2,
|
|
||||||
WillFlagB:1,
|
|
||||||
CleanStart:1,
|
|
||||||
Reserved:1,
|
|
||||||
KeepAlive:16/big,
|
|
||||||
Rest/binary
|
|
||||||
>>,
|
|
||||||
StrictMode
|
StrictMode
|
||||||
) ->
|
) ->
|
||||||
_ = validate_connect_reserved(Reserved),
|
_ = validate_connect_reserved(Reserved),
|
||||||
|
@ -325,14 +303,14 @@ do_parse_connect(
|
||||||
UsernameFlag = bool(UsernameFlagB),
|
UsernameFlag = bool(UsernameFlagB),
|
||||||
PasswordFlag = bool(PasswordFlagB)
|
PasswordFlag = bool(PasswordFlagB)
|
||||||
),
|
),
|
||||||
{Properties, Rest3} = parse_properties(Rest, ProtoVer, StrictMode),
|
{Properties, Rest3} = parse_properties(Rest2, ProtoVer, StrictMode),
|
||||||
{ClientId, Rest4} = parse_utf8_string_with_cause(Rest3, StrictMode, invalid_clientid),
|
{ClientId, Rest4} = parse_utf8_string_with_cause(Rest3, StrictMode, invalid_clientid),
|
||||||
ConnPacket = #mqtt_packet_connect{
|
ConnPacket = #mqtt_packet_connect{
|
||||||
proto_name = ProtoName,
|
proto_name = ProtoName,
|
||||||
proto_ver = ProtoVer,
|
proto_ver = ProtoVer,
|
||||||
%% For bridge mode, non-standard implementation
|
%% For bridge mode, non-standard implementation
|
||||||
%% Invented by mosquitto, named 'try_private': https://mosquitto.org/man/mosquitto-conf-5.html
|
%% Invented by mosquitto, named 'try_private': https://mosquitto.org/man/mosquitto-conf-5.html
|
||||||
is_bridge = IsBridge,
|
is_bridge = (BridgeTag =:= 8),
|
||||||
clean_start = bool(CleanStart),
|
clean_start = bool(CleanStart),
|
||||||
will_flag = WillFlag,
|
will_flag = WillFlag,
|
||||||
will_qos = WillQoS,
|
will_qos = WillQoS,
|
||||||
|
@ -365,16 +343,16 @@ do_parse_connect(
|
||||||
unexpected_trailing_bytes => size(Rest7)
|
unexpected_trailing_bytes => size(Rest7)
|
||||||
})
|
})
|
||||||
end;
|
end;
|
||||||
do_parse_connect(_ProtoName, _IsBridge, _ProtoVer, Bin, _StrictMode) ->
|
parse_connect2(_ProtoName, Bin, _StrictMode) ->
|
||||||
%% sent less than 24 bytes
|
%% sent less than 32 bytes
|
||||||
?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}).
|
?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}).
|
||||||
|
|
||||||
parse_packet(
|
parse_packet(
|
||||||
#mqtt_packet_header{type = ?CONNECT},
|
#mqtt_packet_header{type = ?CONNECT},
|
||||||
FrameBin,
|
FrameBin,
|
||||||
Options
|
#{strict_mode := StrictMode}
|
||||||
) ->
|
) ->
|
||||||
parse_connect(FrameBin, Options);
|
parse_connect(FrameBin, StrictMode);
|
||||||
parse_packet(
|
parse_packet(
|
||||||
#mqtt_packet_header{type = ?CONNACK},
|
#mqtt_packet_header{type = ?CONNACK},
|
||||||
<<AckFlags:8, ReasonCode:8, Rest/binary>>,
|
<<AckFlags:8, ReasonCode:8, Rest/binary>>,
|
||||||
|
@ -538,12 +516,6 @@ parse_packet_id(<<PacketId:16/big, Rest/binary>>) ->
|
||||||
parse_packet_id(_) ->
|
parse_packet_id(_) ->
|
||||||
?PARSE_ERR(invalid_packet_id).
|
?PARSE_ERR(invalid_packet_id).
|
||||||
|
|
||||||
parse_connect_proto_ver(<<BridgeTag:4, ProtoVer:4, Rest/binary>>) ->
|
|
||||||
{_IsBridge = (BridgeTag =:= 8), ProtoVer, Rest};
|
|
||||||
parse_connect_proto_ver(Bin) ->
|
|
||||||
%% sent less than 1 bytes or empty
|
|
||||||
?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}).
|
|
||||||
|
|
||||||
parse_properties(Bin, Ver, _StrictMode) when Ver =/= ?MQTT_PROTO_V5 ->
|
parse_properties(Bin, Ver, _StrictMode) when Ver =/= ?MQTT_PROTO_V5 ->
|
||||||
{#{}, Bin};
|
{#{}, Bin};
|
||||||
%% TODO: version mess?
|
%% TODO: version mess?
|
||||||
|
@ -767,8 +739,6 @@ serialize_fun(#{version := Ver, max_size := MaxSize, strict_mode := StrictMode})
|
||||||
initial_serialize_opts(Opts) ->
|
initial_serialize_opts(Opts) ->
|
||||||
maps:merge(?DEFAULT_OPTIONS, Opts).
|
maps:merge(?DEFAULT_OPTIONS, Opts).
|
||||||
|
|
||||||
serialize_opts(?NONE(Options)) ->
|
|
||||||
maps:merge(?DEFAULT_OPTIONS, Options);
|
|
||||||
serialize_opts(#mqtt_packet_connect{proto_ver = ProtoVer, properties = ConnProps}) ->
|
serialize_opts(#mqtt_packet_connect{proto_ver = ProtoVer, properties = ConnProps}) ->
|
||||||
MaxSize = get_property('Maximum-Packet-Size', ConnProps, ?MAX_PACKET_SIZE),
|
MaxSize = get_property('Maximum-Packet-Size', ConnProps, ?MAX_PACKET_SIZE),
|
||||||
#{version => ProtoVer, max_size => MaxSize, strict_mode => false}.
|
#{version => ProtoVer, max_size => MaxSize, strict_mode => false}.
|
||||||
|
@ -1187,34 +1157,18 @@ validate_subqos([3 | _]) -> ?PARSE_ERR(bad_subqos);
|
||||||
validate_subqos([_ | T]) -> validate_subqos(T);
|
validate_subqos([_ | T]) -> validate_subqos(T);
|
||||||
validate_subqos([]) -> ok.
|
validate_subqos([]) -> ok.
|
||||||
|
|
||||||
%% from spec: the server MAY send disconnect with reason code 0x84
|
|
||||||
%% we chose to close socket because the client is likely not talking MQTT anyway
|
|
||||||
validate_proto_name(<<"MQTT">>) ->
|
|
||||||
ok;
|
|
||||||
validate_proto_name(<<"MQIsdp">>) ->
|
|
||||||
ok;
|
|
||||||
validate_proto_name(ProtoName) ->
|
|
||||||
?PARSE_ERR(#{
|
|
||||||
cause => invalid_proto_name,
|
|
||||||
expected => <<"'MQTT' or 'MQIsdp'">>,
|
|
||||||
received => ProtoName
|
|
||||||
}).
|
|
||||||
|
|
||||||
%% MQTT-v3.1.1-[MQTT-3.1.2-3], MQTT-v5.0-[MQTT-3.1.2-3]
|
%% MQTT-v3.1.1-[MQTT-3.1.2-3], MQTT-v5.0-[MQTT-3.1.2-3]
|
||||||
-compile({inline, [validate_connect_reserved/1]}).
|
|
||||||
validate_connect_reserved(0) -> ok;
|
validate_connect_reserved(0) -> ok;
|
||||||
validate_connect_reserved(1) -> ?PARSE_ERR(reserved_connect_flag).
|
validate_connect_reserved(1) -> ?PARSE_ERR(reserved_connect_flag).
|
||||||
|
|
||||||
-compile({inline, [validate_connect_will/3]}).
|
|
||||||
%% MQTT-v3.1.1-[MQTT-3.1.2-13], MQTT-v5.0-[MQTT-3.1.2-11]
|
%% MQTT-v3.1.1-[MQTT-3.1.2-13], MQTT-v5.0-[MQTT-3.1.2-11]
|
||||||
validate_connect_will(false, _, WillQoS) when WillQoS > 0 -> ?PARSE_ERR(invalid_will_qos);
|
validate_connect_will(false, _, WillQos) when WillQos > 0 -> ?PARSE_ERR(invalid_will_qos);
|
||||||
%% MQTT-v3.1.1-[MQTT-3.1.2-14], MQTT-v5.0-[MQTT-3.1.2-12]
|
%% MQTT-v3.1.1-[MQTT-3.1.2-14], MQTT-v5.0-[MQTT-3.1.2-12]
|
||||||
validate_connect_will(true, _, WillQoS) when WillQoS > 2 -> ?PARSE_ERR(invalid_will_qos);
|
validate_connect_will(true, _, WillQoS) when WillQoS > 2 -> ?PARSE_ERR(invalid_will_qos);
|
||||||
%% MQTT-v3.1.1-[MQTT-3.1.2-15], MQTT-v5.0-[MQTT-3.1.2-13]
|
%% MQTT-v3.1.1-[MQTT-3.1.2-15], MQTT-v5.0-[MQTT-3.1.2-13]
|
||||||
validate_connect_will(false, WillRetain, _) when WillRetain -> ?PARSE_ERR(invalid_will_retain);
|
validate_connect_will(false, WillRetain, _) when WillRetain -> ?PARSE_ERR(invalid_will_retain);
|
||||||
validate_connect_will(_, _, _) -> ok.
|
validate_connect_will(_, _, _) -> ok.
|
||||||
|
|
||||||
-compile({inline, [validate_connect_password_flag/4]}).
|
|
||||||
%% MQTT-v3.1
|
%% MQTT-v3.1
|
||||||
%% Username flag and password flag are not strongly related
|
%% Username flag and password flag are not strongly related
|
||||||
%% https://public.dhe.ibm.com/software/dw/webservices/ws-mqtt/mqtt-v3r1.html#connect
|
%% https://public.dhe.ibm.com/software/dw/webservices/ws-mqtt/mqtt-v3r1.html#connect
|
||||||
|
@ -1229,7 +1183,6 @@ validate_connect_password_flag(true, ?MQTT_PROTO_V5, _, _) ->
|
||||||
validate_connect_password_flag(_, _, _, _) ->
|
validate_connect_password_flag(_, _, _, _) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
-compile({inline, [bool/1]}).
|
|
||||||
bool(0) -> false;
|
bool(0) -> false;
|
||||||
bool(1) -> true.
|
bool(1) -> true.
|
||||||
|
|
||||||
|
|
|
@ -432,7 +432,7 @@ do_start_listener(Type, Name, Id, #{bind := ListenOn} = Opts) when ?ESOCKD_LISTE
|
||||||
esockd:open(
|
esockd:open(
|
||||||
Id,
|
Id,
|
||||||
ListenOn,
|
ListenOn,
|
||||||
merge_default(esockd_opts(Id, Type, Name, Opts, _OldOpts = undefined))
|
merge_default(esockd_opts(Id, Type, Name, Opts))
|
||||||
);
|
);
|
||||||
%% Start MQTT/WS listener
|
%% Start MQTT/WS listener
|
||||||
do_start_listener(Type, Name, Id, Opts) when ?COWBOY_LISTENER(Type) ->
|
do_start_listener(Type, Name, Id, Opts) when ?COWBOY_LISTENER(Type) ->
|
||||||
|
@ -476,7 +476,7 @@ do_update_listener(Type, Name, OldConf, NewConf = #{bind := ListenOn}) when
|
||||||
Id = listener_id(Type, Name),
|
Id = listener_id(Type, Name),
|
||||||
case maps:get(bind, OldConf) of
|
case maps:get(bind, OldConf) of
|
||||||
ListenOn ->
|
ListenOn ->
|
||||||
esockd:set_options({Id, ListenOn}, esockd_opts(Id, Type, Name, NewConf, OldConf));
|
esockd:set_options({Id, ListenOn}, esockd_opts(Id, Type, Name, NewConf));
|
||||||
_Different ->
|
_Different ->
|
||||||
%% TODO
|
%% TODO
|
||||||
%% Again, we're not strictly required to drop live connections in this case.
|
%% Again, we're not strictly required to drop live connections in this case.
|
||||||
|
@ -588,7 +588,7 @@ perform_listener_change(update, {{Type, Name, ConfOld}, {_, _, ConfNew}}) ->
|
||||||
perform_listener_change(stop, {Type, Name, Conf}) ->
|
perform_listener_change(stop, {Type, Name, Conf}) ->
|
||||||
stop_listener(Type, Name, Conf).
|
stop_listener(Type, Name, Conf).
|
||||||
|
|
||||||
esockd_opts(ListenerId, Type, Name, Opts0, OldOpts) ->
|
esockd_opts(ListenerId, Type, Name, Opts0) ->
|
||||||
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
|
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
|
||||||
Limiter = limiter(Opts0),
|
Limiter = limiter(Opts0),
|
||||||
Opts2 =
|
Opts2 =
|
||||||
|
@ -620,7 +620,7 @@ esockd_opts(ListenerId, Type, Name, Opts0, OldOpts) ->
|
||||||
tcp ->
|
tcp ->
|
||||||
Opts3#{tcp_options => tcp_opts(Opts0)};
|
Opts3#{tcp_options => tcp_opts(Opts0)};
|
||||||
ssl ->
|
ssl ->
|
||||||
OptsWithCRL = inject_crl_config(Opts0, OldOpts),
|
OptsWithCRL = inject_crl_config(Opts0),
|
||||||
OptsWithSNI = inject_sni_fun(ListenerId, OptsWithCRL),
|
OptsWithSNI = inject_sni_fun(ListenerId, OptsWithCRL),
|
||||||
OptsWithRootFun = inject_root_fun(OptsWithSNI),
|
OptsWithRootFun = inject_root_fun(OptsWithSNI),
|
||||||
OptsWithVerifyFun = inject_verify_fun(OptsWithRootFun),
|
OptsWithVerifyFun = inject_verify_fun(OptsWithRootFun),
|
||||||
|
@ -996,7 +996,7 @@ inject_sni_fun(_ListenerId, Conf) ->
|
||||||
Conf.
|
Conf.
|
||||||
|
|
||||||
inject_crl_config(
|
inject_crl_config(
|
||||||
Conf = #{ssl_options := #{enable_crl_check := true} = SSLOpts}, _OldOpts
|
Conf = #{ssl_options := #{enable_crl_check := true} = SSLOpts}
|
||||||
) ->
|
) ->
|
||||||
HTTPTimeout = emqx_config:get([crl_cache, http_timeout], timer:seconds(15)),
|
HTTPTimeout = emqx_config:get([crl_cache, http_timeout], timer:seconds(15)),
|
||||||
Conf#{
|
Conf#{
|
||||||
|
@ -1006,16 +1006,7 @@ inject_crl_config(
|
||||||
crl_cache => {emqx_ssl_crl_cache, {internal, [{http, HTTPTimeout}]}}
|
crl_cache => {emqx_ssl_crl_cache, {internal, [{http, HTTPTimeout}]}}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
inject_crl_config(#{ssl_options := SSLOpts0} = Conf0, #{} = OldOpts) ->
|
inject_crl_config(Conf) ->
|
||||||
%% Note: we must set crl options to `undefined' to unset them. Otherwise,
|
|
||||||
%% `esockd' will retain such options when `esockd:merge_opts/2' is called and the SSL
|
|
||||||
%% options were previously enabled.
|
|
||||||
WasEnabled = emqx_utils_maps:deep_get([ssl_options, enable_crl_check], OldOpts, false),
|
|
||||||
Undefine = fun(Acc, K) -> emqx_utils_maps:put_if(Acc, K, undefined, WasEnabled) end,
|
|
||||||
SSLOpts1 = Undefine(SSLOpts0, crl_check),
|
|
||||||
SSLOpts = Undefine(SSLOpts1, crl_cache),
|
|
||||||
Conf0#{ssl_options := SSLOpts};
|
|
||||||
inject_crl_config(Conf, undefined = _OldOpts) ->
|
|
||||||
Conf.
|
Conf.
|
||||||
|
|
||||||
maybe_unregister_ocsp_stapling_refresh(
|
maybe_unregister_ocsp_stapling_refresh(
|
||||||
|
|
|
@ -105,7 +105,7 @@ format(Msg, Meta, Config) ->
|
||||||
maybe_format_msg(undefined, _Meta, _Config) ->
|
maybe_format_msg(undefined, _Meta, _Config) ->
|
||||||
#{};
|
#{};
|
||||||
maybe_format_msg({report, Report0} = Msg, #{report_cb := Cb} = Meta, Config) ->
|
maybe_format_msg({report, Report0} = Msg, #{report_cb := Cb} = Meta, Config) ->
|
||||||
Report = emqx_logger_textfmt:try_encode_meta(Report0, Config),
|
Report = emqx_logger_textfmt:try_encode_payload(Report0, Config),
|
||||||
case is_map(Report) andalso Cb =:= ?DEFAULT_FORMATTER of
|
case is_map(Report) andalso Cb =:= ?DEFAULT_FORMATTER of
|
||||||
true ->
|
true ->
|
||||||
%% reporting a map without a customised format function
|
%% reporting a map without a customised format function
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
|
|
||||||
-export([format/2]).
|
-export([format/2]).
|
||||||
-export([check_config/1]).
|
-export([check_config/1]).
|
||||||
-export([try_format_unicode/1, try_encode_meta/2]).
|
-export([try_format_unicode/1, try_encode_payload/2]).
|
||||||
%% Used in the other log formatters
|
%% Used in the other log formatters
|
||||||
-export([evaluate_lazy_values_if_dbg_level/1, evaluate_lazy_values/1]).
|
-export([evaluate_lazy_values_if_dbg_level/1, evaluate_lazy_values/1]).
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ is_list_report_acceptable(_) ->
|
||||||
enrich_report(ReportRaw0, Meta, Config) ->
|
enrich_report(ReportRaw0, Meta, Config) ->
|
||||||
%% clientid and peername always in emqx_conn's process metadata.
|
%% clientid and peername always in emqx_conn's process metadata.
|
||||||
%% topic and username can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2
|
%% topic and username can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2
|
||||||
ReportRaw = try_encode_meta(ReportRaw0, Config),
|
ReportRaw = try_encode_payload(ReportRaw0, Config),
|
||||||
Topic =
|
Topic =
|
||||||
case maps:get(topic, Meta, undefined) of
|
case maps:get(topic, Meta, undefined) of
|
||||||
undefined -> maps:get(topic, ReportRaw, undefined);
|
undefined -> maps:get(topic, ReportRaw, undefined);
|
||||||
|
@ -180,22 +180,9 @@ enrich_topic({Fmt, Args}, #{topic := Topic}) when is_list(Fmt) ->
|
||||||
enrich_topic(Msg, _) ->
|
enrich_topic(Msg, _) ->
|
||||||
Msg.
|
Msg.
|
||||||
|
|
||||||
try_encode_meta(Report, Config) ->
|
try_encode_payload(#{payload := Payload} = Report, #{payload_encode := Encode}) ->
|
||||||
lists:foldl(
|
|
||||||
fun(Meta, Acc) ->
|
|
||||||
try_encode_meta(Meta, Acc, Config)
|
|
||||||
end,
|
|
||||||
Report,
|
|
||||||
[payload, packet]
|
|
||||||
).
|
|
||||||
|
|
||||||
try_encode_meta(payload, #{payload := Payload} = Report, #{payload_encode := Encode}) ->
|
|
||||||
Report#{payload := encode_payload(Payload, Encode)};
|
Report#{payload := encode_payload(Payload, Encode)};
|
||||||
try_encode_meta(packet, #{packet := Packet} = Report, #{payload_encode := Encode}) when
|
try_encode_payload(Report, _Config) ->
|
||||||
is_tuple(Packet)
|
|
||||||
->
|
|
||||||
Report#{packet := emqx_packet:format(Packet, Encode)};
|
|
||||||
try_encode_meta(_, Report, _Config) ->
|
|
||||||
Report.
|
Report.
|
||||||
|
|
||||||
encode_payload(Payload, text) ->
|
encode_payload(Payload, text) ->
|
||||||
|
@ -203,5 +190,4 @@ encode_payload(Payload, text) ->
|
||||||
encode_payload(_Payload, hidden) ->
|
encode_payload(_Payload, hidden) ->
|
||||||
"******";
|
"******";
|
||||||
encode_payload(Payload, hex) ->
|
encode_payload(Payload, hex) ->
|
||||||
Bin = emqx_utils_conv:bin(Payload),
|
binary:encode_hex(Payload).
|
||||||
binary:encode_hex(Bin).
|
|
||||||
|
|
|
@ -51,6 +51,7 @@
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
format/1,
|
||||||
format/2
|
format/2
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
@ -480,6 +481,10 @@ will_msg(#mqtt_packet_connect{
|
||||||
headers = #{username => Username, properties => Props}
|
headers = #{username => Username, properties => Props}
|
||||||
}.
|
}.
|
||||||
|
|
||||||
|
%% @doc Format packet
|
||||||
|
-spec format(emqx_types:packet()) -> iolist().
|
||||||
|
format(Packet) -> format(Packet, emqx_trace_handler:payload_encode()).
|
||||||
|
|
||||||
%% @doc Format packet
|
%% @doc Format packet
|
||||||
-spec format(emqx_types:packet(), hex | text | hidden) -> iolist().
|
-spec format(emqx_types:packet(), hex | text | hidden) -> iolist().
|
||||||
format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) ->
|
format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) ->
|
||||||
|
|
|
@ -56,11 +56,6 @@
|
||||||
cold_get_subscription/2
|
cold_get_subscription/2
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([
|
|
||||||
format_lease_events/1,
|
|
||||||
format_stream_progresses/1
|
|
||||||
]).
|
|
||||||
|
|
||||||
-define(schedule_subscribe, schedule_subscribe).
|
-define(schedule_subscribe, schedule_subscribe).
|
||||||
-define(schedule_unsubscribe, schedule_unsubscribe).
|
-define(schedule_unsubscribe, schedule_unsubscribe).
|
||||||
|
|
||||||
|
@ -241,14 +236,14 @@ schedule_subscribe(
|
||||||
ScheduledActions1 = ScheduledActions0#{
|
ScheduledActions1 = ScheduledActions0#{
|
||||||
ShareTopicFilter => ScheduledAction#{type => {?schedule_subscribe, SubOpts}}
|
ShareTopicFilter => ScheduledAction#{type => {?schedule_subscribe, SubOpts}}
|
||||||
},
|
},
|
||||||
?tp(debug, shared_subs_schedule_subscribe_override, #{
|
?tp(warning, shared_subs_schedule_subscribe_override, #{
|
||||||
share_topic_filter => ShareTopicFilter,
|
share_topic_filter => ShareTopicFilter,
|
||||||
new_type => {?schedule_subscribe, SubOpts},
|
new_type => {?schedule_subscribe, SubOpts},
|
||||||
old_action => format_schedule_action(ScheduledAction)
|
old_action => format_schedule_action(ScheduledAction)
|
||||||
}),
|
}),
|
||||||
SharedSubS0#{scheduled_actions := ScheduledActions1};
|
SharedSubS0#{scheduled_actions := ScheduledActions1};
|
||||||
_ ->
|
_ ->
|
||||||
?tp(debug, shared_subs_schedule_subscribe_new, #{
|
?tp(warning, shared_subs_schedule_subscribe_new, #{
|
||||||
share_topic_filter => ShareTopicFilter, subopts => SubOpts
|
share_topic_filter => ShareTopicFilter, subopts => SubOpts
|
||||||
}),
|
}),
|
||||||
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_subscribe(
|
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_subscribe(
|
||||||
|
@ -299,7 +294,7 @@ schedule_unsubscribe(
|
||||||
ScheduledActions1 = ScheduledActions0#{
|
ScheduledActions1 = ScheduledActions0#{
|
||||||
ShareTopicFilter => ScheduledAction1
|
ShareTopicFilter => ScheduledAction1
|
||||||
},
|
},
|
||||||
?tp(debug, shared_subs_schedule_unsubscribe_override, #{
|
?tp(warning, shared_subs_schedule_unsubscribe_override, #{
|
||||||
share_topic_filter => ShareTopicFilter,
|
share_topic_filter => ShareTopicFilter,
|
||||||
new_type => ?schedule_unsubscribe,
|
new_type => ?schedule_unsubscribe,
|
||||||
old_action => format_schedule_action(ScheduledAction0)
|
old_action => format_schedule_action(ScheduledAction0)
|
||||||
|
@ -314,7 +309,7 @@ schedule_unsubscribe(
|
||||||
progresses => []
|
progresses => []
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
?tp(debug, shared_subs_schedule_unsubscribe_new, #{
|
?tp(warning, shared_subs_schedule_unsubscribe_new, #{
|
||||||
share_topic_filter => ShareTopicFilter,
|
share_topic_filter => ShareTopicFilter,
|
||||||
stream_keys => format_stream_keys(StreamKeys)
|
stream_keys => format_stream_keys(StreamKeys)
|
||||||
}),
|
}),
|
||||||
|
@ -339,7 +334,7 @@ renew_streams(S0, #{agent := Agent0, scheduled_actions := ScheduledActions} = Sh
|
||||||
Agent0
|
Agent0
|
||||||
),
|
),
|
||||||
StreamLeaseEvents =/= [] andalso
|
StreamLeaseEvents =/= [] andalso
|
||||||
?tp(debug, shared_subs_new_stream_lease_events, #{
|
?tp(warning, shared_subs_new_stream_lease_events, #{
|
||||||
stream_lease_events => format_lease_events(StreamLeaseEvents)
|
stream_lease_events => format_lease_events(StreamLeaseEvents)
|
||||||
}),
|
}),
|
||||||
S1 = lists:foldl(
|
S1 = lists:foldl(
|
||||||
|
@ -506,7 +501,7 @@ run_scheduled_action(
|
||||||
Progresses1 = stream_progresses(S, StreamKeysToWait0 -- StreamKeysToWait1) ++ Progresses0,
|
Progresses1 = stream_progresses(S, StreamKeysToWait0 -- StreamKeysToWait1) ++ Progresses0,
|
||||||
case StreamKeysToWait1 of
|
case StreamKeysToWait1 of
|
||||||
[] ->
|
[] ->
|
||||||
?tp(debug, shared_subs_schedule_action_complete, #{
|
?tp(warning, shared_subs_schedule_action_complete, #{
|
||||||
share_topic_filter => ShareTopicFilter,
|
share_topic_filter => ShareTopicFilter,
|
||||||
progresses => format_stream_progresses(Progresses1),
|
progresses => format_stream_progresses(Progresses1),
|
||||||
type => Type
|
type => Type
|
||||||
|
@ -530,7 +525,7 @@ run_scheduled_action(
|
||||||
end;
|
end;
|
||||||
_ ->
|
_ ->
|
||||||
Action1 = Action#{stream_keys_to_wait => StreamKeysToWait1, progresses => Progresses1},
|
Action1 = Action#{stream_keys_to_wait => StreamKeysToWait1, progresses => Progresses1},
|
||||||
?tp(debug, shared_subs_schedule_action_continue, #{
|
?tp(warning, shared_subs_schedule_action_continue, #{
|
||||||
share_topic_filter => ShareTopicFilter,
|
share_topic_filter => ShareTopicFilter,
|
||||||
new_action => format_schedule_action(Action1)
|
new_action => format_schedule_action(Action1)
|
||||||
}),
|
}),
|
||||||
|
|
|
@ -62,7 +62,7 @@
|
||||||
streams := [{pid(), quicer:stream_handle()}],
|
streams := [{pid(), quicer:stream_handle()}],
|
||||||
%% New stream opts
|
%% New stream opts
|
||||||
stream_opts := map(),
|
stream_opts := map(),
|
||||||
%% If connection is resumed from session ticket
|
%% If conneciton is resumed from session ticket
|
||||||
is_resumed => boolean(),
|
is_resumed => boolean(),
|
||||||
%% mqtt message serializer config
|
%% mqtt message serializer config
|
||||||
serialize => undefined,
|
serialize => undefined,
|
||||||
|
@ -70,8 +70,8 @@
|
||||||
}.
|
}.
|
||||||
-type cb_ret() :: quicer_lib:cb_ret().
|
-type cb_ret() :: quicer_lib:cb_ret().
|
||||||
|
|
||||||
%% @doc Data streams initializations are started in parallel with control streams, data streams are blocked
|
%% @doc Data streams initializions are started in parallel with control streams, data streams are blocked
|
||||||
%% for the activation from control stream after it is accepted as a legit connection.
|
%% for the activation from control stream after it is accepted as a legit conneciton.
|
||||||
%% For security, the initial number of allowed data streams from client should be limited by
|
%% For security, the initial number of allowed data streams from client should be limited by
|
||||||
%% 'peer_bidi_stream_count` & 'peer_unidi_stream_count`
|
%% 'peer_bidi_stream_count` & 'peer_unidi_stream_count`
|
||||||
-spec activate_data_streams(pid(), {
|
-spec activate_data_streams(pid(), {
|
||||||
|
@ -80,7 +80,7 @@
|
||||||
activate_data_streams(ConnOwner, {PS, Serialize, Channel}) ->
|
activate_data_streams(ConnOwner, {PS, Serialize, Channel}) ->
|
||||||
gen_server:call(ConnOwner, {activate_data_streams, {PS, Serialize, Channel}}, infinity).
|
gen_server:call(ConnOwner, {activate_data_streams, {PS, Serialize, Channel}}, infinity).
|
||||||
|
|
||||||
%% @doc connection owner init callback
|
%% @doc conneciton owner init callback
|
||||||
-spec init(map()) -> {ok, cb_state()}.
|
-spec init(map()) -> {ok, cb_state()}.
|
||||||
init(#{stream_opts := SOpts} = S) when is_list(SOpts) ->
|
init(#{stream_opts := SOpts} = S) when is_list(SOpts) ->
|
||||||
init(S#{stream_opts := maps:from_list(SOpts)});
|
init(S#{stream_opts := maps:from_list(SOpts)});
|
||||||
|
|
|
@ -589,14 +589,6 @@ ensure_valid_options(Options, Versions) ->
|
||||||
|
|
||||||
ensure_valid_options([], _, Acc) ->
|
ensure_valid_options([], _, Acc) ->
|
||||||
lists:reverse(Acc);
|
lists:reverse(Acc);
|
||||||
ensure_valid_options([{K, undefined} | T], Versions, Acc) when
|
|
||||||
K =:= crl_check;
|
|
||||||
K =:= crl_cache
|
|
||||||
->
|
|
||||||
%% Note: we must set crl options to `undefined' to unset them. Otherwise,
|
|
||||||
%% `esockd' will retain such options when `esockd:merge_opts/2' is called and the SSL
|
|
||||||
%% options were previously enabled.
|
|
||||||
ensure_valid_options(T, Versions, [{K, undefined} | Acc]);
|
|
||||||
ensure_valid_options([{_, undefined} | T], Versions, Acc) ->
|
ensure_valid_options([{_, undefined} | T], Versions, Acc) ->
|
||||||
ensure_valid_options(T, Versions, Acc);
|
ensure_valid_options(T, Versions, Acc);
|
||||||
ensure_valid_options([{_, ""} | T], Versions, Acc) ->
|
ensure_valid_options([{_, ""} | T], Versions, Acc) ->
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
-include("emqx_mqtt.hrl").
|
-include("emqx_mqtt.hrl").
|
||||||
|
|
||||||
-export([format/2]).
|
-export([format/2]).
|
||||||
|
-export([format_meta_map/1]).
|
||||||
|
|
||||||
%% logger_formatter:config/0 is not exported.
|
%% logger_formatter:config/0 is not exported.
|
||||||
-type config() :: map().
|
-type config() :: map().
|
||||||
|
@ -42,6 +43,10 @@ format(
|
||||||
format(Event, Config) ->
|
format(Event, Config) ->
|
||||||
emqx_logger_textfmt:format(Event, Config).
|
emqx_logger_textfmt:format(Event, Config).
|
||||||
|
|
||||||
|
format_meta_map(Meta) ->
|
||||||
|
Encode = emqx_trace_handler:payload_encode(),
|
||||||
|
format_meta_map(Meta, Encode).
|
||||||
|
|
||||||
format_meta_map(Meta, Encode) ->
|
format_meta_map(Meta, Encode) ->
|
||||||
format_meta_map(Meta, Encode, [
|
format_meta_map(Meta, Encode, [
|
||||||
{packet, fun format_packet/2},
|
{packet, fun format_packet/2},
|
||||||
|
|
|
@ -436,7 +436,6 @@ websocket_handle({Frame, _}, State) ->
|
||||||
%% TODO: should not close the ws connection
|
%% TODO: should not close the ws connection
|
||||||
?LOG(error, #{msg => "unexpected_frame", frame => Frame}),
|
?LOG(error, #{msg => "unexpected_frame", frame => Frame}),
|
||||||
shutdown(unexpected_ws_frame, State).
|
shutdown(unexpected_ws_frame, State).
|
||||||
|
|
||||||
websocket_info({call, From, Req}, State) ->
|
websocket_info({call, From, Req}, State) ->
|
||||||
handle_call(From, Req, State);
|
handle_call(From, Req, State);
|
||||||
websocket_info({cast, rate_limit}, State) ->
|
websocket_info({cast, rate_limit}, State) ->
|
||||||
|
@ -738,8 +737,7 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
|
||||||
input_bytes => Data
|
input_bytes => Data
|
||||||
}),
|
}),
|
||||||
FrameError = {frame_error, Reason},
|
FrameError = {frame_error, Reason},
|
||||||
NState = enrich_state(Reason, State),
|
{[{incoming, FrameError} | Packets], State};
|
||||||
{[{incoming, FrameError} | Packets], NState};
|
|
||||||
error:Reason:Stacktrace ->
|
error:Reason:Stacktrace ->
|
||||||
?LOG(error, #{
|
?LOG(error, #{
|
||||||
at_state => emqx_frame:describe_state(ParseState),
|
at_state => emqx_frame:describe_state(ParseState),
|
||||||
|
@ -832,7 +830,7 @@ serialize_and_inc_stats_fun(#state{serialize = Serialize}) ->
|
||||||
?LOG(warning, #{
|
?LOG(warning, #{
|
||||||
msg => "packet_discarded",
|
msg => "packet_discarded",
|
||||||
reason => "frame_too_large",
|
reason => "frame_too_large",
|
||||||
packet => Packet
|
packet => emqx_packet:format(Packet)
|
||||||
}),
|
}),
|
||||||
ok = emqx_metrics:inc('delivery.dropped.too_large'),
|
ok = emqx_metrics:inc('delivery.dropped.too_large'),
|
||||||
ok = emqx_metrics:inc('delivery.dropped'),
|
ok = emqx_metrics:inc('delivery.dropped'),
|
||||||
|
@ -1071,13 +1069,6 @@ check_max_connection(Type, Listener) ->
|
||||||
{denny, Reason}
|
{denny, Reason}
|
||||||
end
|
end
|
||||||
end.
|
end.
|
||||||
|
|
||||||
enrich_state(#{parse_state := NParseState}, State) ->
|
|
||||||
Serialize = emqx_frame:serialize_opts(NParseState),
|
|
||||||
State#state{parse_state = NParseState, serialize = Serialize};
|
|
||||||
enrich_state(_, State) ->
|
|
||||||
State.
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% For CT tests
|
%% For CT tests
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
|
@ -414,32 +414,24 @@ t_handle_in_auth(_) ->
|
||||||
emqx_channel:handle_in(?AUTH_PACKET(), Channel).
|
emqx_channel:handle_in(?AUTH_PACKET(), Channel).
|
||||||
|
|
||||||
t_handle_in_frame_error(_) ->
|
t_handle_in_frame_error(_) ->
|
||||||
IdleChannelV5 = channel(#{conn_state => idle}),
|
IdleChannel = channel(#{conn_state => idle}),
|
||||||
%% no CONNACK packet for v4
|
{shutdown, #{shutdown_count := frame_too_large, cause := frame_too_large}, _Chan} =
|
||||||
?assertMatch(
|
emqx_channel:handle_in({frame_error, #{cause => frame_too_large}}, IdleChannel),
|
||||||
{shutdown, #{shutdown_count := frame_too_large, cause := frame_too_large}, _Chan},
|
|
||||||
emqx_channel:handle_in(
|
|
||||||
{frame_error, #{cause => frame_too_large}}, v4(IdleChannelV5)
|
|
||||||
)
|
|
||||||
),
|
|
||||||
|
|
||||||
ConnectingChan = channel(#{conn_state => connecting}),
|
ConnectingChan = channel(#{conn_state => connecting}),
|
||||||
ConnackPacket = ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE),
|
ConnackPacket = ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE),
|
||||||
?assertMatch(
|
{shutdown,
|
||||||
{shutdown,
|
#{
|
||||||
#{
|
shutdown_count := frame_too_large,
|
||||||
shutdown_count := frame_too_large,
|
cause := frame_too_large,
|
||||||
cause := frame_too_large,
|
limit := 100,
|
||||||
limit := 100,
|
received := 101
|
||||||
received := 101
|
},
|
||||||
},
|
ConnackPacket,
|
||||||
ConnackPacket, _},
|
_} =
|
||||||
emqx_channel:handle_in(
|
emqx_channel:handle_in(
|
||||||
{frame_error, #{cause => frame_too_large, received => 101, limit => 100}},
|
{frame_error, #{cause => frame_too_large, received => 101, limit => 100}},
|
||||||
ConnectingChan
|
ConnectingChan
|
||||||
)
|
),
|
||||||
),
|
|
||||||
|
|
||||||
DisconnectPacket = ?DISCONNECT_PACKET(?RC_PACKET_TOO_LARGE),
|
DisconnectPacket = ?DISCONNECT_PACKET(?RC_PACKET_TOO_LARGE),
|
||||||
ConnectedChan = channel(#{conn_state => connected}),
|
ConnectedChan = channel(#{conn_state => connected}),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
|
|
|
@ -138,14 +138,13 @@ init_per_testcase(t_refresh_config = TestCase, Config) ->
|
||||||
];
|
];
|
||||||
init_per_testcase(TestCase, Config) when
|
init_per_testcase(TestCase, Config) when
|
||||||
TestCase =:= t_update_listener;
|
TestCase =:= t_update_listener;
|
||||||
TestCase =:= t_update_listener_enable_disable;
|
|
||||||
TestCase =:= t_validations
|
TestCase =:= t_validations
|
||||||
->
|
->
|
||||||
ct:timetrap({seconds, 30}),
|
ct:timetrap({seconds, 30}),
|
||||||
ok = snabbkaffe:start_trace(),
|
ok = snabbkaffe:start_trace(),
|
||||||
%% when running emqx standalone tests, we can't use those
|
%% when running emqx standalone tests, we can't use those
|
||||||
%% features.
|
%% features.
|
||||||
case does_module_exist(emqx_mgmt) of
|
case does_module_exist(emqx_management) of
|
||||||
true ->
|
true ->
|
||||||
DataDir = ?config(data_dir, Config),
|
DataDir = ?config(data_dir, Config),
|
||||||
CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]),
|
CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]),
|
||||||
|
@ -166,7 +165,7 @@ init_per_testcase(TestCase, Config) when
|
||||||
{emqx_conf, #{config => #{listeners => #{ssl => #{default => ListenerConf}}}}},
|
{emqx_conf, #{config => #{listeners => #{ssl => #{default => ListenerConf}}}}},
|
||||||
emqx,
|
emqx,
|
||||||
emqx_management,
|
emqx_management,
|
||||||
emqx_mgmt_api_test_util:emqx_dashboard()
|
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
|
||||||
],
|
],
|
||||||
#{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}
|
#{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}
|
||||||
),
|
),
|
||||||
|
@ -207,7 +206,6 @@ read_crl(Filename) ->
|
||||||
|
|
||||||
end_per_testcase(TestCase, Config) when
|
end_per_testcase(TestCase, Config) when
|
||||||
TestCase =:= t_update_listener;
|
TestCase =:= t_update_listener;
|
||||||
TestCase =:= t_update_listener_enable_disable;
|
|
||||||
TestCase =:= t_validations
|
TestCase =:= t_validations
|
||||||
->
|
->
|
||||||
Skip = proplists:get_bool(skip_does_not_apply, Config),
|
Skip = proplists:get_bool(skip_does_not_apply, Config),
|
||||||
|
@ -1059,104 +1057,3 @@ do_t_validations(_Config) ->
|
||||||
),
|
),
|
||||||
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
%% Checks that if CRL is ever enabled and then disabled, clients can connect, even if they
|
|
||||||
%% would otherwise not have their corresponding CRLs cached and fail with `{bad_crls,
|
|
||||||
%% no_relevant_crls}`.
|
|
||||||
t_update_listener_enable_disable(Config) ->
|
|
||||||
case proplists:get_bool(skip_does_not_apply, Config) of
|
|
||||||
true ->
|
|
||||||
ct:pal("skipping as this test does not apply in this profile"),
|
|
||||||
ok;
|
|
||||||
false ->
|
|
||||||
do_t_update_listener_enable_disable(Config)
|
|
||||||
end.
|
|
||||||
|
|
||||||
do_t_update_listener_enable_disable(Config) ->
|
|
||||||
DataDir = ?config(data_dir, Config),
|
|
||||||
Keyfile = filename:join([DataDir, "server.key.pem"]),
|
|
||||||
Certfile = filename:join([DataDir, "server.cert.pem"]),
|
|
||||||
Cacertfile = filename:join([DataDir, "ca-chain.cert.pem"]),
|
|
||||||
ClientCert = filename:join(DataDir, "client.cert.pem"),
|
|
||||||
ClientKey = filename:join(DataDir, "client.key.pem"),
|
|
||||||
|
|
||||||
ListenerId = "ssl:default",
|
|
||||||
%% Enable CRL
|
|
||||||
{ok, {{_, 200, _}, _, ListenerData0}} = get_listener_via_api(ListenerId),
|
|
||||||
CRLConfig0 =
|
|
||||||
#{
|
|
||||||
<<"ssl_options">> =>
|
|
||||||
#{
|
|
||||||
<<"keyfile">> => Keyfile,
|
|
||||||
<<"certfile">> => Certfile,
|
|
||||||
<<"cacertfile">> => Cacertfile,
|
|
||||||
<<"enable_crl_check">> => true,
|
|
||||||
<<"fail_if_no_peer_cert">> => true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ListenerData1 = emqx_utils_maps:deep_merge(ListenerData0, CRLConfig0),
|
|
||||||
{ok, {_, _, ListenerData2}} = update_listener_via_api(ListenerId, ListenerData1),
|
|
||||||
?assertMatch(
|
|
||||||
#{
|
|
||||||
<<"ssl_options">> :=
|
|
||||||
#{
|
|
||||||
<<"enable_crl_check">> := true,
|
|
||||||
<<"verify">> := <<"verify_peer">>,
|
|
||||||
<<"fail_if_no_peer_cert">> := true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ListenerData2
|
|
||||||
),
|
|
||||||
|
|
||||||
%% Disable CRL
|
|
||||||
CRLConfig1 =
|
|
||||||
#{
|
|
||||||
<<"ssl_options">> =>
|
|
||||||
#{
|
|
||||||
<<"keyfile">> => Keyfile,
|
|
||||||
<<"certfile">> => Certfile,
|
|
||||||
<<"cacertfile">> => Cacertfile,
|
|
||||||
<<"enable_crl_check">> => false,
|
|
||||||
<<"fail_if_no_peer_cert">> => true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ListenerData3 = emqx_utils_maps:deep_merge(ListenerData2, CRLConfig1),
|
|
||||||
redbug:start(
|
|
||||||
[
|
|
||||||
"esockd_server:get_listener_prop -> return",
|
|
||||||
"esockd_server:set_listener_prop -> return",
|
|
||||||
"esockd:merge_opts -> return",
|
|
||||||
"esockd_listener_sup:set_options -> return",
|
|
||||||
"emqx_listeners:inject_crl_config -> return"
|
|
||||||
],
|
|
||||||
[{msgs, 100}]
|
|
||||||
),
|
|
||||||
{ok, {_, _, ListenerData4}} = update_listener_via_api(ListenerId, ListenerData3),
|
|
||||||
?assertMatch(
|
|
||||||
#{
|
|
||||||
<<"ssl_options">> :=
|
|
||||||
#{
|
|
||||||
<<"enable_crl_check">> := false,
|
|
||||||
<<"verify">> := <<"verify_peer">>,
|
|
||||||
<<"fail_if_no_peer_cert">> := true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ListenerData4
|
|
||||||
),
|
|
||||||
|
|
||||||
%% Now the client that would be blocked tries to connect and should now be allowed.
|
|
||||||
{ok, C} = emqtt:start_link([
|
|
||||||
{ssl, true},
|
|
||||||
{ssl_opts, [
|
|
||||||
{certfile, ClientCert},
|
|
||||||
{keyfile, ClientKey},
|
|
||||||
{verify, verify_none}
|
|
||||||
]},
|
|
||||||
{port, 8883}
|
|
||||||
]),
|
|
||||||
?assertMatch({ok, _}, emqtt:connect(C)),
|
|
||||||
emqtt:stop(C),
|
|
||||||
|
|
||||||
?assertNotReceive({http_get, _}),
|
|
||||||
|
|
||||||
ok.
|
|
||||||
|
|
|
@ -63,7 +63,6 @@ groups() ->
|
||||||
t_parse_malformed_properties,
|
t_parse_malformed_properties,
|
||||||
t_malformed_connect_header,
|
t_malformed_connect_header,
|
||||||
t_malformed_connect_data,
|
t_malformed_connect_data,
|
||||||
t_malformed_connect_data_proto_ver,
|
|
||||||
t_reserved_connect_flag,
|
t_reserved_connect_flag,
|
||||||
t_invalid_clientid,
|
t_invalid_clientid,
|
||||||
t_undefined_password,
|
t_undefined_password,
|
||||||
|
@ -168,8 +167,6 @@ t_parse_malformed_utf8_string(_) ->
|
||||||
ParseState = emqx_frame:initial_parse_state(#{strict_mode => true}),
|
ParseState = emqx_frame:initial_parse_state(#{strict_mode => true}),
|
||||||
?ASSERT_FRAME_THROW(utf8_string_invalid, emqx_frame:parse(MalformedPacket, ParseState)).
|
?ASSERT_FRAME_THROW(utf8_string_invalid, emqx_frame:parse(MalformedPacket, ParseState)).
|
||||||
|
|
||||||
%% TODO: parse v3 with 0 length clientid
|
|
||||||
|
|
||||||
t_serialize_parse_v3_connect(_) ->
|
t_serialize_parse_v3_connect(_) ->
|
||||||
Bin =
|
Bin =
|
||||||
<<16, 37, 0, 6, 77, 81, 73, 115, 100, 112, 3, 2, 0, 60, 0, 23, 109, 111, 115, 113, 112, 117,
|
<<16, 37, 0, 6, 77, 81, 73, 115, 100, 112, 3, 2, 0, 60, 0, 23, 109, 111, 115, 113, 112, 117,
|
||||||
|
@ -327,7 +324,7 @@ t_serialize_parse_bridge_connect(_) ->
|
||||||
header = #mqtt_packet_header{type = ?CONNECT},
|
header = #mqtt_packet_header{type = ?CONNECT},
|
||||||
variable = #mqtt_packet_connect{
|
variable = #mqtt_packet_connect{
|
||||||
clientid = <<"C_00:0C:29:2B:77:52">>,
|
clientid = <<"C_00:0C:29:2B:77:52">>,
|
||||||
proto_ver = ?MQTT_PROTO_V3,
|
proto_ver = 16#03,
|
||||||
proto_name = <<"MQIsdp">>,
|
proto_name = <<"MQIsdp">>,
|
||||||
is_bridge = true,
|
is_bridge = true,
|
||||||
will_retain = true,
|
will_retain = true,
|
||||||
|
@ -689,36 +686,15 @@ t_malformed_connect_header(_) ->
|
||||||
).
|
).
|
||||||
|
|
||||||
t_malformed_connect_data(_) ->
|
t_malformed_connect_data(_) ->
|
||||||
ProtoNameWithLen = <<0, 6, "MQIsdp">>,
|
|
||||||
ConnectFlags = <<2#00000000>>,
|
|
||||||
ClientIdwithLen = <<0, 1, "a">>,
|
|
||||||
UnexpectedRestBin = <<0, 1, 2>>,
|
|
||||||
?ASSERT_FRAME_THROW(
|
?ASSERT_FRAME_THROW(
|
||||||
#{cause := malformed_connect, unexpected_trailing_bytes := 3},
|
#{cause := malformed_connect, unexpected_trailing_bytes := _},
|
||||||
emqx_frame:parse(
|
emqx_frame:parse(<<16, 15, 0, 6, 77, 81, 73, 115, 100, 112, 3, 0, 0, 0, 0, 0, 0>>)
|
||||||
<<16, 18, ProtoNameWithLen/binary, ?MQTT_PROTO_V3, ConnectFlags/binary, 0, 0,
|
|
||||||
ClientIdwithLen/binary, UnexpectedRestBin/binary>>
|
|
||||||
)
|
|
||||||
).
|
|
||||||
|
|
||||||
t_malformed_connect_data_proto_ver(_) ->
|
|
||||||
Proto3NameWithLen = <<0, 6, "MQIsdp">>,
|
|
||||||
?ASSERT_FRAME_THROW(
|
|
||||||
#{cause := malformed_connect, header_bytes := <<>>},
|
|
||||||
emqx_frame:parse(<<16, 8, Proto3NameWithLen/binary>>)
|
|
||||||
),
|
|
||||||
ProtoNameWithLen = <<0, 4, "MQTT">>,
|
|
||||||
?ASSERT_FRAME_THROW(
|
|
||||||
#{cause := malformed_connect, header_bytes := <<>>},
|
|
||||||
emqx_frame:parse(<<16, 6, ProtoNameWithLen/binary>>)
|
|
||||||
).
|
).
|
||||||
|
|
||||||
t_reserved_connect_flag(_) ->
|
t_reserved_connect_flag(_) ->
|
||||||
?assertException(
|
?assertException(
|
||||||
throw,
|
throw,
|
||||||
{frame_parse_error, #{
|
{frame_parse_error, reserved_connect_flag},
|
||||||
cause := reserved_connect_flag, proto_ver := ?MQTT_PROTO_V3, proto_name := <<"MQIsdp">>
|
|
||||||
}},
|
|
||||||
emqx_frame:parse(<<16, 15, 0, 6, 77, 81, 73, 115, 100, 112, 3, 1, 0, 0, 1, 0, 0>>)
|
emqx_frame:parse(<<16, 15, 0, 6, 77, 81, 73, 115, 100, 112, 3, 1, 0, 0, 1, 0, 0>>)
|
||||||
).
|
).
|
||||||
|
|
||||||
|
@ -750,7 +726,7 @@ t_undefined_password(_) ->
|
||||||
},
|
},
|
||||||
variable = #mqtt_packet_connect{
|
variable = #mqtt_packet_connect{
|
||||||
proto_name = <<"MQTT">>,
|
proto_name = <<"MQTT">>,
|
||||||
proto_ver = ?MQTT_PROTO_V4,
|
proto_ver = 4,
|
||||||
is_bridge = false,
|
is_bridge = false,
|
||||||
clean_start = true,
|
clean_start = true,
|
||||||
will_flag = false,
|
will_flag = false,
|
||||||
|
@ -798,9 +774,7 @@ t_invalid_will_retain(_) ->
|
||||||
54, 75, 78, 112, 57, 0, 6, 68, 103, 55, 87, 87, 87>>,
|
54, 75, 78, 112, 57, 0, 6, 68, 103, 55, 87, 87, 87>>,
|
||||||
?assertException(
|
?assertException(
|
||||||
throw,
|
throw,
|
||||||
{frame_parse_error, #{
|
{frame_parse_error, invalid_will_retain},
|
||||||
cause := invalid_will_retain, proto_ver := ?MQTT_PROTO_V5, proto_name := <<"MQTT">>
|
|
||||||
}},
|
|
||||||
emqx_frame:parse(ConnectBin)
|
emqx_frame:parse(ConnectBin)
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
@ -822,30 +796,22 @@ t_invalid_will_qos(_) ->
|
||||||
),
|
),
|
||||||
?assertException(
|
?assertException(
|
||||||
throw,
|
throw,
|
||||||
{frame_parse_error, #{
|
{frame_parse_error, invalid_will_qos},
|
||||||
cause := invalid_will_qos, proto_ver := ?MQTT_PROTO_V5, proto_name := <<"MQTT">>
|
|
||||||
}},
|
|
||||||
emqx_frame:parse(ConnectBinFun(Will_F_WillQoS1))
|
emqx_frame:parse(ConnectBinFun(Will_F_WillQoS1))
|
||||||
),
|
),
|
||||||
?assertException(
|
?assertException(
|
||||||
throw,
|
throw,
|
||||||
{frame_parse_error, #{
|
{frame_parse_error, invalid_will_qos},
|
||||||
cause := invalid_will_qos, proto_ver := ?MQTT_PROTO_V5, proto_name := <<"MQTT">>
|
|
||||||
}},
|
|
||||||
emqx_frame:parse(ConnectBinFun(Will_F_WillQoS2))
|
emqx_frame:parse(ConnectBinFun(Will_F_WillQoS2))
|
||||||
),
|
),
|
||||||
?assertException(
|
?assertException(
|
||||||
throw,
|
throw,
|
||||||
{frame_parse_error, #{
|
{frame_parse_error, invalid_will_qos},
|
||||||
cause := invalid_will_qos, proto_ver := ?MQTT_PROTO_V5, proto_name := <<"MQTT">>
|
|
||||||
}},
|
|
||||||
emqx_frame:parse(ConnectBinFun(Will_F_WillQoS3))
|
emqx_frame:parse(ConnectBinFun(Will_F_WillQoS3))
|
||||||
),
|
),
|
||||||
?assertException(
|
?assertException(
|
||||||
throw,
|
throw,
|
||||||
{frame_parse_error, #{
|
{frame_parse_error, invalid_will_qos},
|
||||||
cause := invalid_will_qos, proto_ver := ?MQTT_PROTO_V5, proto_name := <<"MQTT">>
|
|
||||||
}},
|
|
||||||
emqx_frame:parse(ConnectBinFun(Will_T_WillQoS3))
|
emqx_frame:parse(ConnectBinFun(Will_T_WillQoS3))
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
|
@ -377,60 +377,42 @@ t_will_msg(_) ->
|
||||||
|
|
||||||
t_format(_) ->
|
t_format(_) ->
|
||||||
io:format("~ts", [
|
io:format("~ts", [
|
||||||
emqx_packet:format(
|
emqx_packet:format(#mqtt_packet{
|
||||||
#mqtt_packet{
|
header = #mqtt_packet_header{type = ?CONNACK, retain = true, dup = 0},
|
||||||
header = #mqtt_packet_header{type = ?CONNACK, retain = true, dup = 0},
|
variable = undefined
|
||||||
variable = undefined
|
})
|
||||||
},
|
]),
|
||||||
text
|
io:format("~ts", [
|
||||||
)
|
emqx_packet:format(#mqtt_packet{
|
||||||
|
header = #mqtt_packet_header{type = ?CONNACK}, variable = 1, payload = <<"payload">>
|
||||||
|
})
|
||||||
]),
|
]),
|
||||||
io:format(
|
|
||||||
"~ts",
|
|
||||||
[
|
|
||||||
emqx_packet:format(
|
|
||||||
#mqtt_packet{
|
|
||||||
header = #mqtt_packet_header{type = ?CONNACK},
|
|
||||||
variable = 1,
|
|
||||||
payload = <<"payload">>
|
|
||||||
},
|
|
||||||
text
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
io:format("~ts", [
|
io:format("~ts", [
|
||||||
emqx_packet:format(
|
emqx_packet:format(
|
||||||
?CONNECT_PACKET(
|
?CONNECT_PACKET(#mqtt_packet_connect{
|
||||||
#mqtt_packet_connect{
|
will_flag = true,
|
||||||
will_flag = true,
|
will_retain = true,
|
||||||
will_retain = true,
|
will_qos = ?QOS_2,
|
||||||
will_qos = ?QOS_2,
|
will_topic = <<"topic">>,
|
||||||
will_topic = <<"topic">>,
|
will_payload = <<"payload">>
|
||||||
will_payload = <<"payload">>
|
})
|
||||||
}
|
|
||||||
),
|
|
||||||
text
|
|
||||||
)
|
)
|
||||||
]),
|
]),
|
||||||
io:format("~ts", [
|
io:format("~ts", [
|
||||||
emqx_packet:format(?CONNECT_PACKET(#mqtt_packet_connect{password = password}), text)
|
emqx_packet:format(?CONNECT_PACKET(#mqtt_packet_connect{password = password}))
|
||||||
]),
|
]),
|
||||||
io:format("~ts", [emqx_packet:format(?CONNACK_PACKET(?CONNACK_SERVER), text)]),
|
io:format("~ts", [emqx_packet:format(?CONNACK_PACKET(?CONNACK_SERVER))]),
|
||||||
io:format("~ts", [emqx_packet:format(?PUBLISH_PACKET(?QOS_1, 1), text)]),
|
io:format("~ts", [emqx_packet:format(?PUBLISH_PACKET(?QOS_1, 1))]),
|
||||||
|
io:format("~ts", [emqx_packet:format(?PUBLISH_PACKET(?QOS_2, <<"topic">>, 10, <<"payload">>))]),
|
||||||
|
io:format("~ts", [emqx_packet:format(?PUBACK_PACKET(?PUBACK, 98))]),
|
||||||
|
io:format("~ts", [emqx_packet:format(?PUBREL_PACKET(99))]),
|
||||||
io:format("~ts", [
|
io:format("~ts", [
|
||||||
emqx_packet:format(?PUBLISH_PACKET(?QOS_2, <<"topic">>, 10, <<"payload">>), text)
|
emqx_packet:format(?SUBSCRIBE_PACKET(15, [{<<"topic">>, ?QOS_0}, {<<"topic1">>, ?QOS_1}]))
|
||||||
]),
|
]),
|
||||||
io:format("~ts", [emqx_packet:format(?PUBACK_PACKET(?PUBACK, 98), text)]),
|
io:format("~ts", [emqx_packet:format(?SUBACK_PACKET(40, [?QOS_0, ?QOS_1]))]),
|
||||||
io:format("~ts", [emqx_packet:format(?PUBREL_PACKET(99), text)]),
|
io:format("~ts", [emqx_packet:format(?UNSUBSCRIBE_PACKET(89, [<<"t">>, <<"t2">>]))]),
|
||||||
io:format("~ts", [
|
io:format("~ts", [emqx_packet:format(?UNSUBACK_PACKET(90))]),
|
||||||
emqx_packet:format(
|
io:format("~ts", [emqx_packet:format(?DISCONNECT_PACKET(128))]).
|
||||||
?SUBSCRIBE_PACKET(15, [{<<"topic">>, ?QOS_0}, {<<"topic1">>, ?QOS_1}]), text
|
|
||||||
)
|
|
||||||
]),
|
|
||||||
io:format("~ts", [emqx_packet:format(?SUBACK_PACKET(40, [?QOS_0, ?QOS_1]), text)]),
|
|
||||||
io:format("~ts", [emqx_packet:format(?UNSUBSCRIBE_PACKET(89, [<<"t">>, <<"t2">>]), text)]),
|
|
||||||
io:format("~ts", [emqx_packet:format(?UNSUBACK_PACKET(90), text)]),
|
|
||||||
io:format("~ts", [emqx_packet:format(?DISCONNECT_PACKET(128), text)]).
|
|
||||||
|
|
||||||
t_parse_empty_publish(_) ->
|
t_parse_empty_publish(_) ->
|
||||||
%% 52: 0011(type=PUBLISH) 0100 (QoS=2)
|
%% 52: 0011(type=PUBLISH) 0100 (QoS=2)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_auth, [
|
{application, emqx_auth, [
|
||||||
{description, "EMQX Authentication and authorization"},
|
{description, "EMQX Authentication and authorization"},
|
||||||
{vsn, "0.3.4"},
|
{vsn, "0.3.3"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, [emqx_auth_sup]},
|
{registered, [emqx_auth_sup]},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -477,15 +477,9 @@ authorize_deny(
|
||||||
sources()
|
sources()
|
||||||
) ->
|
) ->
|
||||||
authz_result().
|
authz_result().
|
||||||
authorize(#{username := Username} = Client, PubSub, Topic, _DefaultResult, Sources) ->
|
authorize(Client, PubSub, Topic, _DefaultResult, Sources) ->
|
||||||
case maps:get(is_superuser, Client, false) of
|
case maps:get(is_superuser, Client, false) of
|
||||||
true ->
|
true ->
|
||||||
?tp(authz_skipped, #{reason => client_is_superuser, action => PubSub}),
|
|
||||||
?TRACE("AUTHZ", "authorization_skipped_as_superuser", #{
|
|
||||||
username => Username,
|
|
||||||
topic => Topic,
|
|
||||||
action => emqx_access_control:format_action(PubSub)
|
|
||||||
}),
|
|
||||||
emqx_metrics:inc(?METRIC_SUPERUSER),
|
emqx_metrics:inc(?METRIC_SUPERUSER),
|
||||||
{stop, #{result => allow, from => superuser}};
|
{stop, #{result => allow, from => superuser}};
|
||||||
false ->
|
false ->
|
||||||
|
|
|
@ -674,77 +674,5 @@ t_publish_last_will_testament_banned_client_connecting(_Config) ->
|
||||||
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_sikpped_as_superuser(_Config) ->
|
|
||||||
ClientInfo = #{
|
|
||||||
clientid => <<"clientid">>,
|
|
||||||
username => <<"username">>,
|
|
||||||
peerhost => {127, 0, 0, 1},
|
|
||||||
zone => default,
|
|
||||||
listener => {tcp, default},
|
|
||||||
is_superuser => true
|
|
||||||
},
|
|
||||||
?check_trace(
|
|
||||||
begin
|
|
||||||
?assertEqual(
|
|
||||||
allow,
|
|
||||||
emqx_access_control:authorize(ClientInfo, ?AUTHZ_PUBLISH(?QOS_0), <<"p/t/0">>)
|
|
||||||
),
|
|
||||||
?assertEqual(
|
|
||||||
allow,
|
|
||||||
emqx_access_control:authorize(ClientInfo, ?AUTHZ_PUBLISH(?QOS_1), <<"p/t/1">>)
|
|
||||||
),
|
|
||||||
?assertEqual(
|
|
||||||
allow,
|
|
||||||
emqx_access_control:authorize(ClientInfo, ?AUTHZ_PUBLISH(?QOS_2), <<"p/t/2">>)
|
|
||||||
),
|
|
||||||
?assertEqual(
|
|
||||||
allow,
|
|
||||||
emqx_access_control:authorize(ClientInfo, ?AUTHZ_SUBSCRIBE(?QOS_0), <<"s/t/0">>)
|
|
||||||
),
|
|
||||||
?assertEqual(
|
|
||||||
allow,
|
|
||||||
emqx_access_control:authorize(ClientInfo, ?AUTHZ_SUBSCRIBE(?QOS_1), <<"s/t/1">>)
|
|
||||||
),
|
|
||||||
?assertEqual(
|
|
||||||
allow,
|
|
||||||
emqx_access_control:authorize(ClientInfo, ?AUTHZ_SUBSCRIBE(?QOS_2), <<"s/t/2">>)
|
|
||||||
)
|
|
||||||
end,
|
|
||||||
fun(Trace) ->
|
|
||||||
?assertMatch(
|
|
||||||
[
|
|
||||||
#{
|
|
||||||
reason := client_is_superuser,
|
|
||||||
action := #{qos := ?QOS_0, action_type := publish}
|
|
||||||
},
|
|
||||||
#{
|
|
||||||
reason := client_is_superuser,
|
|
||||||
action := #{qos := ?QOS_1, action_type := publish}
|
|
||||||
},
|
|
||||||
#{
|
|
||||||
reason := client_is_superuser,
|
|
||||||
action := #{qos := ?QOS_2, action_type := publish}
|
|
||||||
},
|
|
||||||
#{
|
|
||||||
reason := client_is_superuser,
|
|
||||||
action := #{qos := ?QOS_0, action_type := subscribe}
|
|
||||||
},
|
|
||||||
#{
|
|
||||||
reason := client_is_superuser,
|
|
||||||
action := #{qos := ?QOS_1, action_type := subscribe}
|
|
||||||
},
|
|
||||||
#{
|
|
||||||
reason := client_is_superuser,
|
|
||||||
action := #{qos := ?QOS_2, action_type := subscribe}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
?of_kind(authz_skipped, Trace)
|
|
||||||
),
|
|
||||||
ok
|
|
||||||
end
|
|
||||||
),
|
|
||||||
|
|
||||||
ok = snabbkaffe:stop().
|
|
||||||
|
|
||||||
stop_apps(Apps) ->
|
stop_apps(Apps) ->
|
||||||
lists:foreach(fun application:stop/1, Apps).
|
lists:foreach(fun application:stop/1, Apps).
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_auth_http, [
|
{application, emqx_auth_http, [
|
||||||
{description, "EMQX External HTTP API Authentication and Authorization"},
|
{description, "EMQX External HTTP API Authentication and Authorization"},
|
||||||
{vsn, "0.3.1"},
|
{vsn, "0.3.0"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_auth_http_app, []}},
|
{mod, {emqx_auth_http_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_auth_jwt, [
|
{application, emqx_auth_jwt, [
|
||||||
{description, "EMQX JWT Authentication and Authorization"},
|
{description, "EMQX JWT Authentication and Authorization"},
|
||||||
{vsn, "0.3.3"},
|
{vsn, "0.3.2"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_auth_jwt_app, []}},
|
{mod, {emqx_auth_jwt_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_auth_mnesia, [
|
{application, emqx_auth_mnesia, [
|
||||||
{description, "EMQX Buitl-in Database Authentication and Authorization"},
|
{description, "EMQX Buitl-in Database Authentication and Authorization"},
|
||||||
{vsn, "0.1.7"},
|
{vsn, "0.1.6"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_auth_mnesia_app, []}},
|
{mod, {emqx_auth_mnesia_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_auth_mongodb, [
|
{application, emqx_auth_mongodb, [
|
||||||
{description, "EMQX MongoDB Authentication and Authorization"},
|
{description, "EMQX MongoDB Authentication and Authorization"},
|
||||||
{vsn, "0.2.2"},
|
{vsn, "0.2.1"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_auth_mongodb_app, []}},
|
{mod, {emqx_auth_mongodb_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_auth_mysql, [
|
{application, emqx_auth_mysql, [
|
||||||
{description, "EMQX MySQL Authentication and Authorization"},
|
{description, "EMQX MySQL Authentication and Authorization"},
|
||||||
{vsn, "0.2.2"},
|
{vsn, "0.2.1"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_auth_mysql_app, []}},
|
{mod, {emqx_auth_mysql_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_auth_postgresql, [
|
{application, emqx_auth_postgresql, [
|
||||||
{description, "EMQX PostgreSQL Authentication and Authorization"},
|
{description, "EMQX PostgreSQL Authentication and Authorization"},
|
||||||
{vsn, "0.2.2"},
|
{vsn, "0.2.1"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_auth_postgresql_app, []}},
|
{mod, {emqx_auth_postgresql_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_auth_redis, [
|
{application, emqx_auth_redis, [
|
||||||
{description, "EMQX Redis Authentication and Authorization"},
|
{description, "EMQX Redis Authentication and Authorization"},
|
||||||
{vsn, "0.2.2"},
|
{vsn, "0.2.1"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_auth_redis_app, []}},
|
{mod, {emqx_auth_redis_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_bridge, [
|
{application, emqx_bridge, [
|
||||||
{description, "EMQX bridges"},
|
{description, "EMQX bridges"},
|
||||||
{vsn, "0.2.4"},
|
{vsn, "0.2.3"},
|
||||||
{registered, [emqx_bridge_sup]},
|
{registered, [emqx_bridge_sup]},
|
||||||
{mod, {emqx_bridge_app, []}},
|
{mod, {emqx_bridge_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -1154,7 +1154,7 @@ t_bridges_probe(Config) ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
{ok, 400, #{
|
{ok, 400, #{
|
||||||
<<"code">> := <<"TEST_FAILED">>,
|
<<"code">> := <<"TEST_FAILED">>,
|
||||||
<<"message">> := <<"Connection refused", _/binary>>
|
<<"message">> := <<"Connection refused">>
|
||||||
}},
|
}},
|
||||||
request_json(
|
request_json(
|
||||||
post,
|
post,
|
||||||
|
|
|
@ -889,8 +889,7 @@ t_sync_query_down(Config, Opts) ->
|
||||||
),
|
),
|
||||||
|
|
||||||
?force_ordering(
|
?force_ordering(
|
||||||
#{?snk_kind := SNKKind} when
|
#{?snk_kind := call_query},
|
||||||
SNKKind =:= call_query orelse SNKKind =:= simple_query_enter,
|
|
||||||
#{?snk_kind := cut_connection, ?snk_span := start}
|
#{?snk_kind := cut_connection, ?snk_span := start}
|
||||||
),
|
),
|
||||||
%% Note: order of arguments here is reversed compared to `?force_ordering'.
|
%% Note: order of arguments here is reversed compared to `?force_ordering'.
|
||||||
|
@ -914,7 +913,6 @@ t_sync_query_down(Config, Opts) ->
|
||||||
emqx_common_test_helpers:enable_failure(down, ProxyName, ProxyHost, ProxyPort)
|
emqx_common_test_helpers:enable_failure(down, ProxyName, ProxyHost, ProxyPort)
|
||||||
)
|
)
|
||||||
end),
|
end),
|
||||||
?tp("publishing_message", #{}),
|
|
||||||
try
|
try
|
||||||
{_, {ok, _}} =
|
{_, {ok, _}} =
|
||||||
snabbkaffe:wait_async_action(
|
snabbkaffe:wait_async_action(
|
||||||
|
@ -923,7 +921,6 @@ t_sync_query_down(Config, Opts) ->
|
||||||
infinity
|
infinity
|
||||||
)
|
)
|
||||||
after
|
after
|
||||||
?tp("healing_failure", #{}),
|
|
||||||
emqx_common_test_helpers:heal_failure(down, ProxyName, ProxyHost, ProxyPort)
|
emqx_common_test_helpers:heal_failure(down, ProxyName, ProxyHost, ProxyPort)
|
||||||
end,
|
end,
|
||||||
{ok, _} = snabbkaffe:block_until(SuccessTPFilter, infinity),
|
{ok, _} = snabbkaffe:block_until(SuccessTPFilter, infinity),
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_gcp_pubsub, [
|
{application, emqx_bridge_gcp_pubsub, [
|
||||||
{description, "EMQX Enterprise GCP Pub/Sub Bridge"},
|
{description, "EMQX Enterprise GCP Pub/Sub Bridge"},
|
||||||
{vsn, "0.3.3"},
|
{vsn, "0.3.2"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_http, [
|
{application, emqx_bridge_http, [
|
||||||
{description, "EMQX HTTP Bridge and Connector Application"},
|
{description, "EMQX HTTP Bridge and Connector Application"},
|
||||||
{vsn, "0.3.4"},
|
{vsn, "0.3.3"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, emqx_resource, ehttpc]},
|
{applications, [kernel, stdlib, emqx_resource, ehttpc]},
|
||||||
{env, [
|
{env, [
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_bridge_kafka, [
|
{application, emqx_bridge_kafka, [
|
||||||
{description, "EMQX Enterprise Kafka Bridge"},
|
{description, "EMQX Enterprise Kafka Bridge"},
|
||||||
{vsn, "0.3.4"},
|
{vsn, "0.3.3"},
|
||||||
{registered, [emqx_bridge_kafka_consumer_sup]},
|
{registered, [emqx_bridge_kafka_consumer_sup]},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -1918,14 +1918,13 @@ t_node_joins_existing_cluster(Config) ->
|
||||||
_Attempts2 = 50,
|
_Attempts2 = 50,
|
||||||
[] =/= erpc:call(N2, emqx_router, lookup_routes, [MQTTTopic])
|
[] =/= erpc:call(N2, emqx_router, lookup_routes, [MQTTTopic])
|
||||||
),
|
),
|
||||||
NumMsgs = 50 * NPartitions,
|
|
||||||
{ok, SRef1} =
|
{ok, SRef1} =
|
||||||
snabbkaffe:subscribe(
|
snabbkaffe:subscribe(
|
||||||
?match_event(#{
|
?match_event(#{
|
||||||
?snk_kind := kafka_consumer_handle_message,
|
?snk_kind := kafka_consumer_handle_message,
|
||||||
?snk_span := {complete, _}
|
?snk_span := {complete, _}
|
||||||
}),
|
}),
|
||||||
NumMsgs,
|
NPartitions,
|
||||||
20_000
|
20_000
|
||||||
),
|
),
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
|
@ -1934,7 +1933,7 @@ t_node_joins_existing_cluster(Config) ->
|
||||||
Val = <<"v", (integer_to_binary(N))/binary>>,
|
Val = <<"v", (integer_to_binary(N))/binary>>,
|
||||||
publish(Config, KafkaTopic, [#{key => Key, value => Val}])
|
publish(Config, KafkaTopic, [#{key => Key, value => Val}])
|
||||||
end,
|
end,
|
||||||
lists:seq(1, NumMsgs)
|
lists:seq(1, 10 * NPartitions)
|
||||||
),
|
),
|
||||||
{ok, _} = snabbkaffe:receive_events(SRef1),
|
{ok, _} = snabbkaffe:receive_events(SRef1),
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_bridge_mqtt, [
|
{application, emqx_bridge_mqtt, [
|
||||||
{description, "EMQX MQTT Broker Bridge"},
|
{description, "EMQX MQTT Broker Bridge"},
|
||||||
{vsn, "0.2.4"},
|
{vsn, "0.2.3"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -98,7 +98,7 @@ on_start(ResourceId, #{server := Server} = Conf) ->
|
||||||
server => Server
|
server => Server
|
||||||
}};
|
}};
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
{error, emqx_maybe:define(explain_error(Reason), Reason)}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_add_channel(
|
on_add_channel(
|
||||||
|
@ -200,7 +200,7 @@ on_get_channel_status(
|
||||||
} = _State
|
} = _State
|
||||||
) when is_map_key(ChannelId, Channels) ->
|
) when is_map_key(ChannelId, Channels) ->
|
||||||
%% The channel should be ok as long as the MQTT client is ok
|
%% The channel should be ok as long as the MQTT client is ok
|
||||||
?status_connected.
|
connected.
|
||||||
|
|
||||||
on_get_channels(ResId) ->
|
on_get_channels(ResId) ->
|
||||||
emqx_bridge_v2:get_channels_for_connector(ResId).
|
emqx_bridge_v2:get_channels_for_connector(ResId).
|
||||||
|
@ -356,15 +356,10 @@ on_get_status(_ResourceId, State) ->
|
||||||
Workers = [{Pool, Worker} || {Pool, PN} <- Pools, {_Name, Worker} <- ecpool:workers(PN)],
|
Workers = [{Pool, Worker} || {Pool, PN} <- Pools, {_Name, Worker} <- ecpool:workers(PN)],
|
||||||
try emqx_utils:pmap(fun get_status/1, Workers, ?HEALTH_CHECK_TIMEOUT) of
|
try emqx_utils:pmap(fun get_status/1, Workers, ?HEALTH_CHECK_TIMEOUT) of
|
||||||
Statuses ->
|
Statuses ->
|
||||||
case combine_status(Statuses) of
|
combine_status(Statuses)
|
||||||
{Status, Msg} ->
|
|
||||||
{Status, State, Msg};
|
|
||||||
Status ->
|
|
||||||
Status
|
|
||||||
end
|
|
||||||
catch
|
catch
|
||||||
exit:timeout ->
|
exit:timeout ->
|
||||||
?status_connecting
|
connecting
|
||||||
end.
|
end.
|
||||||
|
|
||||||
get_status({_Pool, Worker}) ->
|
get_status({_Pool, Worker}) ->
|
||||||
|
@ -372,7 +367,7 @@ get_status({_Pool, Worker}) ->
|
||||||
{ok, Client} ->
|
{ok, Client} ->
|
||||||
emqx_bridge_mqtt_ingress:status(Client);
|
emqx_bridge_mqtt_ingress:status(Client);
|
||||||
{error, _} ->
|
{error, _} ->
|
||||||
?status_disconnected
|
disconnected
|
||||||
end.
|
end.
|
||||||
|
|
||||||
combine_status(Statuses) ->
|
combine_status(Statuses) ->
|
||||||
|
@ -380,25 +375,11 @@ combine_status(Statuses) ->
|
||||||
%% Natural order of statuses: [connected, connecting, disconnected]
|
%% Natural order of statuses: [connected, connecting, disconnected]
|
||||||
%% * `disconnected` wins over any other status
|
%% * `disconnected` wins over any other status
|
||||||
%% * `connecting` wins over `connected`
|
%% * `connecting` wins over `connected`
|
||||||
ToStatus = fun
|
case lists:reverse(lists:usort(Statuses)) of
|
||||||
({S, _Reason}) -> S;
|
|
||||||
(S) when is_atom(S) -> S
|
|
||||||
end,
|
|
||||||
CompareFn = fun(S1A, S2A) ->
|
|
||||||
S1 = ToStatus(S1A),
|
|
||||||
S2 = ToStatus(S2A),
|
|
||||||
S1 > S2
|
|
||||||
end,
|
|
||||||
case lists:usort(CompareFn, Statuses) of
|
|
||||||
[{Status, Reason} | _] ->
|
|
||||||
case explain_error(Reason) of
|
|
||||||
undefined -> Status;
|
|
||||||
Msg -> {Status, Msg}
|
|
||||||
end;
|
|
||||||
[Status | _] ->
|
[Status | _] ->
|
||||||
Status;
|
Status;
|
||||||
[] ->
|
[] ->
|
||||||
?status_disconnected
|
disconnected
|
||||||
end.
|
end.
|
||||||
|
|
||||||
mk_ingress_config(
|
mk_ingress_config(
|
||||||
|
@ -533,54 +514,15 @@ connect(Pid, Name) ->
|
||||||
{ok, Pid};
|
{ok, Pid};
|
||||||
{error, Reason} = Error ->
|
{error, Reason} = Error ->
|
||||||
IsDryRun = emqx_resource:is_dry_run(Name),
|
IsDryRun = emqx_resource:is_dry_run(Name),
|
||||||
log_connect_error_reason(?LOG_LEVEL(IsDryRun), Reason, Name),
|
?SLOG(?LOG_LEVEL(IsDryRun), #{
|
||||||
|
msg => "ingress_client_connect_failed",
|
||||||
|
reason => Reason,
|
||||||
|
resource_id => Name
|
||||||
|
}),
|
||||||
_ = catch emqtt:stop(Pid),
|
_ = catch emqtt:stop(Pid),
|
||||||
Error
|
Error
|
||||||
end.
|
end.
|
||||||
|
|
||||||
log_connect_error_reason(Level, {tcp_closed, _} = Reason, Name) ->
|
|
||||||
?tp(emqx_bridge_mqtt_connector_tcp_closed, #{}),
|
|
||||||
?SLOG(Level, #{
|
|
||||||
msg => "ingress_client_connect_failed",
|
|
||||||
reason => Reason,
|
|
||||||
name => Name,
|
|
||||||
explain => explain_error(Reason)
|
|
||||||
});
|
|
||||||
log_connect_error_reason(Level, econnrefused = Reason, Name) ->
|
|
||||||
?tp(emqx_bridge_mqtt_connector_econnrefused_error, #{}),
|
|
||||||
?SLOG(Level, #{
|
|
||||||
msg => "ingress_client_connect_failed",
|
|
||||||
reason => Reason,
|
|
||||||
name => Name,
|
|
||||||
explain => explain_error(Reason)
|
|
||||||
});
|
|
||||||
log_connect_error_reason(Level, Reason, Name) ->
|
|
||||||
?SLOG(Level, #{
|
|
||||||
msg => "ingress_client_connect_failed",
|
|
||||||
reason => Reason,
|
|
||||||
name => Name
|
|
||||||
}).
|
|
||||||
|
|
||||||
explain_error(econnrefused) ->
|
|
||||||
<<
|
|
||||||
"Connection refused. "
|
|
||||||
"This error indicates that your connection attempt to the MQTT server was rejected. "
|
|
||||||
"In simpler terms, the server you tried to connect to refused your request. "
|
|
||||||
"There can be multiple reasons for this. "
|
|
||||||
"For example, the MQTT server you're trying to connect to might be down or not "
|
|
||||||
"running at all or you might have provided the wrong address "
|
|
||||||
"or port number for the server."
|
|
||||||
>>;
|
|
||||||
explain_error({tcp_closed, _}) ->
|
|
||||||
<<
|
|
||||||
"Your MQTT connection attempt was unsuccessful. "
|
|
||||||
"It might be at its maximum capacity for handling new connections. "
|
|
||||||
"To diagnose the issue further, you can check the server logs for "
|
|
||||||
"any specific messages related to the unavailability or connection limits."
|
|
||||||
>>;
|
|
||||||
explain_error(_Reason) ->
|
|
||||||
undefined.
|
|
||||||
|
|
||||||
handle_disconnect(_Reason) ->
|
handle_disconnect(_Reason) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
-include_lib("emqx/include/logger.hrl").
|
-include_lib("emqx/include/logger.hrl").
|
||||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
-include_lib("emqx_resource/include/emqx_resource.hrl").
|
|
||||||
|
|
||||||
%% management APIs
|
%% management APIs
|
||||||
-export([
|
-export([
|
||||||
|
@ -235,13 +234,13 @@ status(Pid) ->
|
||||||
try
|
try
|
||||||
case proplists:get_value(socket, info(Pid)) of
|
case proplists:get_value(socket, info(Pid)) of
|
||||||
Socket when Socket /= undefined ->
|
Socket when Socket /= undefined ->
|
||||||
?status_connected;
|
connected;
|
||||||
undefined ->
|
undefined ->
|
||||||
?status_connecting
|
connecting
|
||||||
end
|
end
|
||||||
catch
|
catch
|
||||||
exit:{noproc, _} ->
|
exit:{noproc, _} ->
|
||||||
?status_disconnected
|
disconnected
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%%
|
%%
|
||||||
|
|
|
@ -1025,39 +1025,31 @@ t_mqtt_conn_bridge_egress_async_reconnect(_) ->
|
||||||
ct:sleep(1000),
|
ct:sleep(1000),
|
||||||
|
|
||||||
%% stop the listener 1883 to make the bridge disconnected
|
%% stop the listener 1883 to make the bridge disconnected
|
||||||
?check_trace(
|
ok = emqx_listeners:stop_listener('tcp:default'),
|
||||||
begin
|
ct:sleep(1500),
|
||||||
ok = emqx_listeners:stop_listener('tcp:default'),
|
?assertMatch(
|
||||||
ct:sleep(1500),
|
#{<<"status">> := Status} when
|
||||||
?assertMatch(
|
Status == <<"connecting">> orelse Status == <<"disconnected">>,
|
||||||
#{<<"status">> := Status} when
|
request_bridge(BridgeIDEgress)
|
||||||
Status == <<"connecting">> orelse Status == <<"disconnected">>,
|
|
||||||
request_bridge(BridgeIDEgress)
|
|
||||||
),
|
|
||||||
|
|
||||||
%% start the listener 1883 to make the bridge reconnected
|
|
||||||
ok = emqx_listeners:start_listener('tcp:default'),
|
|
||||||
timer:sleep(1500),
|
|
||||||
?assertMatch(
|
|
||||||
#{<<"status">> := <<"connected">>},
|
|
||||||
request_bridge(BridgeIDEgress)
|
|
||||||
),
|
|
||||||
|
|
||||||
N = stop_publisher(Publisher),
|
|
||||||
|
|
||||||
%% all those messages should eventually be delivered
|
|
||||||
[
|
|
||||||
assert_mqtt_msg_received(RemoteTopic, Payload)
|
|
||||||
|| I <- lists:seq(1, N),
|
|
||||||
Payload <- [integer_to_binary(I)]
|
|
||||||
],
|
|
||||||
ok
|
|
||||||
end,
|
|
||||||
fun(Trace) ->
|
|
||||||
?assertMatch([_ | _], ?of_kind(emqx_bridge_mqtt_connector_econnrefused_error, Trace)),
|
|
||||||
ok
|
|
||||||
end
|
|
||||||
),
|
),
|
||||||
|
|
||||||
|
%% start the listener 1883 to make the bridge reconnected
|
||||||
|
ok = emqx_listeners:start_listener('tcp:default'),
|
||||||
|
timer:sleep(1500),
|
||||||
|
?assertMatch(
|
||||||
|
#{<<"status">> := <<"connected">>},
|
||||||
|
request_bridge(BridgeIDEgress)
|
||||||
|
),
|
||||||
|
|
||||||
|
N = stop_publisher(Publisher),
|
||||||
|
|
||||||
|
%% all those messages should eventually be delivered
|
||||||
|
[
|
||||||
|
assert_mqtt_msg_received(RemoteTopic, Payload)
|
||||||
|
|| I <- lists:seq(1, N),
|
||||||
|
Payload <- [integer_to_binary(I)]
|
||||||
|
],
|
||||||
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
start_publisher(Topic, Interval, CtrlPid) ->
|
start_publisher(Topic, Interval, CtrlPid) ->
|
||||||
|
|
|
@ -131,9 +131,6 @@ hookpoint(Config) ->
|
||||||
BridgeId = bridge_id(Config),
|
BridgeId = bridge_id(Config),
|
||||||
emqx_bridge_resource:bridge_hookpoint(BridgeId).
|
emqx_bridge_resource:bridge_hookpoint(BridgeId).
|
||||||
|
|
||||||
simplify_result(Res) ->
|
|
||||||
emqx_bridge_v2_testlib:simplify_result(Res).
|
|
||||||
|
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
%% Testcases
|
%% Testcases
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
@ -249,46 +246,3 @@ t_receive_via_rule(Config) ->
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_connect_with_more_clients_than_the_broker_accepts(Config) ->
|
|
||||||
Name = ?config(connector_name, Config),
|
|
||||||
OrgConf = emqx_mgmt_listeners_conf:get_raw(tcp, default),
|
|
||||||
on_exit(fun() ->
|
|
||||||
emqx_mgmt_listeners_conf:update(tcp, default, OrgConf)
|
|
||||||
end),
|
|
||||||
NewConf = OrgConf#{<<"max_connections">> => 3},
|
|
||||||
{ok, _} = emqx_mgmt_listeners_conf:update(tcp, default, NewConf),
|
|
||||||
?check_trace(
|
|
||||||
#{timetrap => 10_000},
|
|
||||||
begin
|
|
||||||
?assertMatch(
|
|
||||||
{201, #{
|
|
||||||
<<"status">> := <<"disconnected">>,
|
|
||||||
<<"status_reason">> :=
|
|
||||||
<<"Your MQTT connection attempt was unsuccessful", _/binary>>
|
|
||||||
}},
|
|
||||||
simplify_result(
|
|
||||||
emqx_bridge_v2_testlib:create_connector_api(
|
|
||||||
Config,
|
|
||||||
#{<<"pool_size">> => 100}
|
|
||||||
)
|
|
||||||
)
|
|
||||||
),
|
|
||||||
?block_until(#{?snk_kind := emqx_bridge_mqtt_connector_tcp_closed}),
|
|
||||||
?assertMatch(
|
|
||||||
{200, #{
|
|
||||||
<<"status">> := <<"disconnected">>,
|
|
||||||
<<"status_reason">> :=
|
|
||||||
<<"Your MQTT connection attempt was unsuccessful", _/binary>>
|
|
||||||
}},
|
|
||||||
simplify_result(emqx_bridge_v2_testlib:get_connector_api(mqtt, Name))
|
|
||||||
),
|
|
||||||
ok
|
|
||||||
end,
|
|
||||||
fun(Trace) ->
|
|
||||||
?assertMatch([_ | _], ?of_kind(emqx_bridge_mqtt_connector_tcp_closed, Trace)),
|
|
||||||
ok
|
|
||||||
end
|
|
||||||
),
|
|
||||||
|
|
||||||
ok.
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_pulsar, [
|
{application, emqx_bridge_pulsar, [
|
||||||
{description, "EMQX Pulsar Bridge"},
|
{description, "EMQX Pulsar Bridge"},
|
||||||
{vsn, "0.2.4"},
|
{vsn, "0.2.3"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -11,8 +11,7 @@
|
||||||
action_type_name/0,
|
action_type_name/0,
|
||||||
connector_type_name/0,
|
connector_type_name/0,
|
||||||
schema_module/0,
|
schema_module/0,
|
||||||
is_action/1,
|
is_action/1
|
||||||
connector_action_config_to_bridge_v1_config/2
|
|
||||||
]).
|
]).
|
||||||
|
|
||||||
is_action(_) -> true.
|
is_action(_) -> true.
|
||||||
|
@ -24,28 +23,3 @@ action_type_name() -> pulsar.
|
||||||
connector_type_name() -> pulsar.
|
connector_type_name() -> pulsar.
|
||||||
|
|
||||||
schema_module() -> emqx_bridge_pulsar_pubsub_schema.
|
schema_module() -> emqx_bridge_pulsar_pubsub_schema.
|
||||||
|
|
||||||
connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) ->
|
|
||||||
BridgeV1Config1 = emqx_action_info:connector_action_config_to_bridge_v1_config(
|
|
||||||
ConnectorConfig, ActionConfig
|
|
||||||
),
|
|
||||||
BridgeV1Config = maps:with(v1_fields(pulsar_producer), BridgeV1Config1),
|
|
||||||
emqx_utils_maps:update_if_present(
|
|
||||||
<<"resource_opts">>,
|
|
||||||
fun(RO) -> maps:with(v1_fields(producer_resource_opts), RO) end,
|
|
||||||
BridgeV1Config
|
|
||||||
).
|
|
||||||
|
|
||||||
%%------------------------------------------------------------------------------------------
|
|
||||||
%% Internal helper functions
|
|
||||||
%%------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
v1_fields(Struct) ->
|
|
||||||
[
|
|
||||||
to_bin(K)
|
|
||||||
|| {K, _} <- emqx_bridge_pulsar:fields(Struct)
|
|
||||||
].
|
|
||||||
|
|
||||||
to_bin(B) when is_binary(B) -> B;
|
|
||||||
to_bin(L) when is_list(L) -> list_to_binary(L);
|
|
||||||
to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8).
|
|
||||||
|
|
|
@ -60,8 +60,6 @@ resource_type() -> pulsar.
|
||||||
|
|
||||||
callback_mode() -> async_if_possible.
|
callback_mode() -> async_if_possible.
|
||||||
|
|
||||||
query_mode(#{resource_opts := #{query_mode := sync}}) ->
|
|
||||||
simple_sync_internal_buffer;
|
|
||||||
query_mode(_Config) ->
|
query_mode(_Config) ->
|
||||||
simple_async_internal_buffer.
|
simple_async_internal_buffer.
|
||||||
|
|
||||||
|
@ -206,17 +204,12 @@ on_query(_InstanceId, {ChannelId, Message}, State) ->
|
||||||
sync_timeout => SyncTimeout,
|
sync_timeout => SyncTimeout,
|
||||||
is_async => false
|
is_async => false
|
||||||
}),
|
}),
|
||||||
?tp_span(
|
try
|
||||||
"pulsar_producer_query_enter",
|
pulsar:send_sync(Producers, [PulsarMessage], SyncTimeout)
|
||||||
#{instance_id => _InstanceId, message => Message, mode => sync},
|
catch
|
||||||
try
|
error:timeout ->
|
||||||
?tp("pulsar_producer_send", #{msg => PulsarMessage, mode => sync}),
|
{error, timeout}
|
||||||
pulsar:send_sync(Producers, [PulsarMessage], SyncTimeout)
|
end
|
||||||
catch
|
|
||||||
error:timeout ->
|
|
||||||
{error, timeout}
|
|
||||||
end
|
|
||||||
)
|
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec on_query_async(
|
-spec on_query_async(
|
||||||
|
@ -227,11 +220,11 @@ on_query_async(_InstanceId, {ChannelId, Message}, AsyncReplyFn, State) ->
|
||||||
#{channels := Channels} = State,
|
#{channels := Channels} = State,
|
||||||
case maps:find(ChannelId, Channels) of
|
case maps:find(ChannelId, Channels) of
|
||||||
error ->
|
error ->
|
||||||
{error, {unrecoverable_error, channel_not_found}};
|
{error, channel_not_found};
|
||||||
{ok, #{message := MessageTmpl, producers := Producers}} ->
|
{ok, #{message := MessageTmpl, producers := Producers}} ->
|
||||||
?tp_span(
|
?tp_span(
|
||||||
"pulsar_producer_query_enter",
|
pulsar_producer_on_query_async,
|
||||||
#{instance_id => _InstanceId, message => Message, mode => async},
|
#{instance_id => _InstanceId, message => Message},
|
||||||
on_query_async2(ChannelId, Producers, Message, MessageTmpl, AsyncReplyFn)
|
on_query_async2(ChannelId, Producers, Message, MessageTmpl, AsyncReplyFn)
|
||||||
)
|
)
|
||||||
end.
|
end.
|
||||||
|
@ -242,7 +235,6 @@ on_query_async2(ChannelId, Producers, Message, MessageTmpl, AsyncReplyFn) ->
|
||||||
message => PulsarMessage,
|
message => PulsarMessage,
|
||||||
is_async => true
|
is_async => true
|
||||||
}),
|
}),
|
||||||
?tp("pulsar_producer_send", #{msg => PulsarMessage, mode => async}),
|
|
||||||
pulsar:send(Producers, [PulsarMessage], #{callback_fn => AsyncReplyFn}).
|
pulsar:send(Producers, [PulsarMessage], #{callback_fn => AsyncReplyFn}).
|
||||||
|
|
||||||
on_format_query_result({ok, Info}) ->
|
on_format_query_result({ok, Info}) ->
|
||||||
|
|
|
@ -66,8 +66,10 @@ fields(action_resource_opts) ->
|
||||||
batch_size,
|
batch_size,
|
||||||
batch_time,
|
batch_time,
|
||||||
worker_pool_size,
|
worker_pool_size,
|
||||||
|
request_ttl,
|
||||||
inflight_window,
|
inflight_window,
|
||||||
max_buffer_bytes
|
max_buffer_bytes,
|
||||||
|
query_mode
|
||||||
],
|
],
|
||||||
lists:filter(
|
lists:filter(
|
||||||
fun({K, _V}) -> not lists:member(K, UnsupportedOpts) end,
|
fun({K, _V}) -> not lists:member(K, UnsupportedOpts) end,
|
||||||
|
|
|
@ -843,8 +843,7 @@ do_t_send_with_failure(Config, FailureType) ->
|
||||||
?wait_async_action(
|
?wait_async_action(
|
||||||
emqx:publish(Message0),
|
emqx:publish(Message0),
|
||||||
#{
|
#{
|
||||||
?snk_kind := "pulsar_producer_query_enter",
|
?snk_kind := pulsar_producer_on_query_async,
|
||||||
mode := async,
|
|
||||||
?snk_span := {complete, _}
|
?snk_span := {complete, _}
|
||||||
},
|
},
|
||||||
5_000
|
5_000
|
||||||
|
@ -971,11 +970,7 @@ t_producer_process_crash(Config) ->
|
||||||
{_, {ok, _}} =
|
{_, {ok, _}} =
|
||||||
?wait_async_action(
|
?wait_async_action(
|
||||||
emqx:publish(Message0),
|
emqx:publish(Message0),
|
||||||
#{
|
#{?snk_kind := pulsar_producer_on_query_async, ?snk_span := {complete, _}},
|
||||||
?snk_kind := "pulsar_producer_query_enter",
|
|
||||||
mode := async,
|
|
||||||
?snk_span := {complete, _}
|
|
||||||
},
|
|
||||||
5_000
|
5_000
|
||||||
),
|
),
|
||||||
Data0 = receive_consumed(20_000),
|
Data0 = receive_consumed(20_000),
|
||||||
|
|
|
@ -23,25 +23,31 @@
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
all() ->
|
all() ->
|
||||||
All0 = emqx_common_test_helpers:all(?MODULE),
|
[
|
||||||
All = All0 -- matrix_cases(),
|
{group, plain},
|
||||||
Groups = lists:map(fun({G, _, _}) -> {group, G} end, groups()),
|
{group, tls}
|
||||||
Groups ++ All.
|
].
|
||||||
|
|
||||||
groups() ->
|
groups() ->
|
||||||
emqx_common_test_helpers:matrix_to_groups(?MODULE, matrix_cases()).
|
AllTCs = emqx_common_test_helpers:all(?MODULE),
|
||||||
|
[
|
||||||
matrix_cases() ->
|
{plain, AllTCs},
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
{tls, AllTCs}
|
||||||
|
].
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
|
%% Ensure enterprise bridge module is loaded
|
||||||
|
_ = emqx_bridge_enterprise:module_info(),
|
||||||
|
{ok, Cwd} = file:get_cwd(),
|
||||||
|
PrivDir = ?config(priv_dir, Config),
|
||||||
|
WorkDir = emqx_utils_fs:find_relpath(filename:join(PrivDir, "ebp"), Cwd),
|
||||||
Apps = emqx_cth_suite:start(
|
Apps = emqx_cth_suite:start(
|
||||||
lists:flatten([
|
lists:flatten([
|
||||||
?APPS,
|
?APPS,
|
||||||
emqx_management,
|
emqx_management,
|
||||||
emqx_mgmt_api_test_util:emqx_dashboard()
|
emqx_mgmt_api_test_util:emqx_dashboard()
|
||||||
]),
|
]),
|
||||||
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
#{work_dir => WorkDir}
|
||||||
),
|
),
|
||||||
[{suite_apps, Apps} | Config].
|
[{suite_apps, Apps} | Config].
|
||||||
|
|
||||||
|
@ -55,7 +61,6 @@ init_per_group(plain = Type, Config) ->
|
||||||
case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of
|
case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of
|
||||||
true ->
|
true ->
|
||||||
Config1 = common_init_per_group(),
|
Config1 = common_init_per_group(),
|
||||||
ConnectorName = ?MODULE,
|
|
||||||
NewConfig =
|
NewConfig =
|
||||||
[
|
[
|
||||||
{proxy_name, ProxyName},
|
{proxy_name, ProxyName},
|
||||||
|
@ -65,7 +70,7 @@ init_per_group(plain = Type, Config) ->
|
||||||
{use_tls, false}
|
{use_tls, false}
|
||||||
| Config1 ++ Config
|
| Config1 ++ Config
|
||||||
],
|
],
|
||||||
create_connector(ConnectorName, NewConfig),
|
create_connector(?MODULE, NewConfig),
|
||||||
NewConfig;
|
NewConfig;
|
||||||
false ->
|
false ->
|
||||||
maybe_skip_without_ci()
|
maybe_skip_without_ci()
|
||||||
|
@ -77,7 +82,6 @@ init_per_group(tls = Type, Config) ->
|
||||||
case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of
|
case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of
|
||||||
true ->
|
true ->
|
||||||
Config1 = common_init_per_group(),
|
Config1 = common_init_per_group(),
|
||||||
ConnectorName = ?MODULE,
|
|
||||||
NewConfig =
|
NewConfig =
|
||||||
[
|
[
|
||||||
{proxy_name, ProxyName},
|
{proxy_name, ProxyName},
|
||||||
|
@ -87,21 +91,17 @@ init_per_group(tls = Type, Config) ->
|
||||||
{use_tls, true}
|
{use_tls, true}
|
||||||
| Config1 ++ Config
|
| Config1 ++ Config
|
||||||
],
|
],
|
||||||
create_connector(ConnectorName, NewConfig),
|
create_connector(?MODULE, NewConfig),
|
||||||
NewConfig;
|
NewConfig;
|
||||||
false ->
|
false ->
|
||||||
maybe_skip_without_ci()
|
maybe_skip_without_ci()
|
||||||
end;
|
end.
|
||||||
init_per_group(_Group, Config) ->
|
|
||||||
Config.
|
|
||||||
|
|
||||||
end_per_group(Group, Config) when
|
end_per_group(Group, Config) when
|
||||||
Group =:= plain;
|
Group =:= plain;
|
||||||
Group =:= tls
|
Group =:= tls
|
||||||
->
|
->
|
||||||
common_end_per_group(Config),
|
common_end_per_group(Config),
|
||||||
ok;
|
|
||||||
end_per_group(_Group, _Config) ->
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
common_init_per_group() ->
|
common_init_per_group() ->
|
||||||
|
@ -189,49 +189,66 @@ pulsar_connector(Config) ->
|
||||||
":",
|
":",
|
||||||
integer_to_binary(PulsarPort)
|
integer_to_binary(PulsarPort)
|
||||||
]),
|
]),
|
||||||
InnerConfigMap = #{
|
Connector = #{
|
||||||
<<"enable">> => true,
|
<<"connectors">> => #{
|
||||||
<<"ssl">> => #{
|
<<"pulsar">> => #{
|
||||||
<<"enable">> => UseTLS,
|
Name => #{
|
||||||
<<"verify">> => <<"verify_none">>,
|
<<"enable">> => true,
|
||||||
<<"server_name_indication">> => <<"auto">>
|
<<"ssl">> => #{
|
||||||
},
|
<<"enable">> => UseTLS,
|
||||||
<<"authentication">> => <<"none">>,
|
<<"verify">> => <<"verify_none">>,
|
||||||
<<"servers">> => ServerURL
|
<<"server_name_indication">> => <<"auto">>
|
||||||
},
|
},
|
||||||
emqx_bridge_v2_testlib:parse_and_check_connector(?TYPE, Name, InnerConfigMap).
|
<<"authentication">> => <<"none">>,
|
||||||
|
<<"servers">> => ServerURL
|
||||||
pulsar_action(Config) ->
|
}
|
||||||
QueryMode = proplists:get_value(query_mode, Config, <<"sync">>),
|
}
|
||||||
Name = atom_to_binary(?MODULE),
|
|
||||||
InnerConfigMap = #{
|
|
||||||
<<"connector">> => Name,
|
|
||||||
<<"enable">> => true,
|
|
||||||
<<"parameters">> => #{
|
|
||||||
<<"retention_period">> => <<"infinity">>,
|
|
||||||
<<"max_batch_bytes">> => <<"1MB">>,
|
|
||||||
<<"batch_size">> => 100,
|
|
||||||
<<"strategy">> => <<"random">>,
|
|
||||||
<<"buffer">> => #{
|
|
||||||
<<"mode">> => <<"memory">>,
|
|
||||||
<<"per_partition_limit">> => <<"10MB">>,
|
|
||||||
<<"segment_bytes">> => <<"5MB">>,
|
|
||||||
<<"memory_overload_protection">> => true
|
|
||||||
},
|
|
||||||
<<"message">> => #{
|
|
||||||
<<"key">> => <<"${.clientid}">>,
|
|
||||||
<<"value">> => <<"${.}">>
|
|
||||||
},
|
|
||||||
<<"pulsar_topic">> => ?config(pulsar_topic, Config)
|
|
||||||
},
|
|
||||||
<<"resource_opts">> => #{
|
|
||||||
<<"query_mode">> => QueryMode,
|
|
||||||
<<"request_ttl">> => <<"1s">>,
|
|
||||||
<<"health_check_interval">> => <<"1s">>,
|
|
||||||
<<"metrics_flush_interval">> => <<"300ms">>
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
emqx_bridge_v2_testlib:parse_and_check(action, ?TYPE, Name, InnerConfigMap).
|
parse_and_check(<<"connectors">>, emqx_connector_schema, Connector, Name).
|
||||||
|
|
||||||
|
pulsar_action(Config) ->
|
||||||
|
Name = atom_to_binary(?MODULE),
|
||||||
|
Action = #{
|
||||||
|
<<"actions">> => #{
|
||||||
|
<<"pulsar">> => #{
|
||||||
|
Name => #{
|
||||||
|
<<"connector">> => Name,
|
||||||
|
<<"enable">> => true,
|
||||||
|
<<"parameters">> => #{
|
||||||
|
<<"retention_period">> => <<"infinity">>,
|
||||||
|
<<"max_batch_bytes">> => <<"1MB">>,
|
||||||
|
<<"batch_size">> => 100,
|
||||||
|
<<"strategy">> => <<"random">>,
|
||||||
|
<<"buffer">> => #{
|
||||||
|
<<"mode">> => <<"memory">>,
|
||||||
|
<<"per_partition_limit">> => <<"10MB">>,
|
||||||
|
<<"segment_bytes">> => <<"5MB">>,
|
||||||
|
<<"memory_overload_protection">> => true
|
||||||
|
},
|
||||||
|
<<"message">> => #{
|
||||||
|
<<"key">> => <<"${.clientid}">>,
|
||||||
|
<<"value">> => <<"${.}">>
|
||||||
|
},
|
||||||
|
<<"pulsar_topic">> => ?config(pulsar_topic, Config)
|
||||||
|
},
|
||||||
|
<<"resource_opts">> => #{
|
||||||
|
<<"health_check_interval">> => <<"1s">>,
|
||||||
|
<<"metrics_flush_interval">> => <<"300ms">>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
parse_and_check(<<"actions">>, emqx_bridge_v2_schema, Action, Name).
|
||||||
|
|
||||||
|
parse_and_check(Key, Mod, Conf, Name) ->
|
||||||
|
ConfStr = hocon_pp:do(Conf, #{}),
|
||||||
|
ct:pal(ConfStr),
|
||||||
|
{ok, RawConf} = hocon:binary(ConfStr, #{format => map}),
|
||||||
|
hocon_tconf:check_plain(Mod, RawConf, #{required => false, atom_key => false}),
|
||||||
|
#{Key := #{<<"pulsar">> := #{Name := RetConf}}} = RawConf,
|
||||||
|
RetConf.
|
||||||
|
|
||||||
instance_id(Type, Name) ->
|
instance_id(Type, Name) ->
|
||||||
ConnectorId = emqx_bridge_resource:resource_id(Type, ?TYPE, Name),
|
ConnectorId = emqx_bridge_resource:resource_id(Type, ?TYPE, Name),
|
||||||
|
@ -387,44 +404,20 @@ assert_status_api(Line, Type, Name, Status) ->
|
||||||
).
|
).
|
||||||
-define(assertStatusAPI(TYPE, NAME, STATUS), assert_status_api(?LINE, TYPE, NAME, STATUS)).
|
-define(assertStatusAPI(TYPE, NAME, STATUS), assert_status_api(?LINE, TYPE, NAME, STATUS)).
|
||||||
|
|
||||||
proplists_with(Keys, PList) ->
|
|
||||||
lists:filter(fun({K, _}) -> lists:member(K, Keys) end, PList).
|
|
||||||
|
|
||||||
group_path(Config) ->
|
|
||||||
case emqx_common_test_helpers:group_path(Config) of
|
|
||||||
[] ->
|
|
||||||
undefined;
|
|
||||||
Path ->
|
|
||||||
Path
|
|
||||||
end.
|
|
||||||
|
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
%% Testcases
|
%% Testcases
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
t_action_probe(matrix) ->
|
t_action_probe(Config) ->
|
||||||
[[plain], [tls]];
|
|
||||||
t_action_probe(Config) when is_list(Config) ->
|
|
||||||
Name = atom_to_binary(?FUNCTION_NAME),
|
Name = atom_to_binary(?FUNCTION_NAME),
|
||||||
Action = pulsar_action(Config),
|
Action = pulsar_action(Config),
|
||||||
{ok, Res0} = emqx_bridge_v2_testlib:probe_bridge_api(action, ?TYPE, Name, Action),
|
{ok, Res0} = emqx_bridge_v2_testlib:probe_bridge_api(action, ?TYPE, Name, Action),
|
||||||
?assertMatch({{_, 204, _}, _, _}, Res0),
|
?assertMatch({{_, 204, _}, _, _}, Res0),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_action(matrix) ->
|
t_action(Config) ->
|
||||||
[
|
|
||||||
[plain, async],
|
|
||||||
[plain, sync],
|
|
||||||
[tls, async]
|
|
||||||
];
|
|
||||||
t_action(Config) when is_list(Config) ->
|
|
||||||
QueryMode =
|
|
||||||
case group_path(Config) of
|
|
||||||
[_, QM | _] -> atom_to_binary(QM);
|
|
||||||
_ -> <<"async">>
|
|
||||||
end,
|
|
||||||
Name = atom_to_binary(?FUNCTION_NAME),
|
Name = atom_to_binary(?FUNCTION_NAME),
|
||||||
create_action(Name, [{query_mode, QueryMode} | Config]),
|
create_action(Name, Config),
|
||||||
Actions = emqx_bridge_v2:list(actions),
|
Actions = emqx_bridge_v2:list(actions),
|
||||||
Any = fun(#{name := BName}) -> BName =:= Name end,
|
Any = fun(#{name := BName}) -> BName =:= Name end,
|
||||||
?assert(lists:any(Any, Actions), Actions),
|
?assert(lists:any(Any, Actions), Actions),
|
||||||
|
@ -472,9 +465,7 @@ t_action(Config) when is_list(Config) ->
|
||||||
|
|
||||||
%% Tests that deleting/disabling an action that share the same Pulsar topic with other
|
%% Tests that deleting/disabling an action that share the same Pulsar topic with other
|
||||||
%% actions do not disturb the latter.
|
%% actions do not disturb the latter.
|
||||||
t_multiple_actions_sharing_topic(matrix) ->
|
t_multiple_actions_sharing_topic(Config) ->
|
||||||
[[plain], [tls]];
|
|
||||||
t_multiple_actions_sharing_topic(Config) when is_list(Config) ->
|
|
||||||
Type = ?TYPE,
|
Type = ?TYPE,
|
||||||
ConnectorName = <<"c">>,
|
ConnectorName = <<"c">>,
|
||||||
ConnectorConfig = pulsar_connector(Config),
|
ConnectorConfig = pulsar_connector(Config),
|
||||||
|
@ -555,31 +546,3 @@ t_multiple_actions_sharing_topic(Config) when is_list(Config) ->
|
||||||
[]
|
[]
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_sync_query_down(matrix) ->
|
|
||||||
[[plain]];
|
|
||||||
t_sync_query_down(Config0) when is_list(Config0) ->
|
|
||||||
ct:timetrap({seconds, 15}),
|
|
||||||
Payload = #{<<"x">> => <<"some data">>},
|
|
||||||
PayloadBin = emqx_utils_json:encode(Payload),
|
|
||||||
ClientId = <<"some_client">>,
|
|
||||||
Opts = #{
|
|
||||||
make_message_fn => fun(Topic) -> emqx_message:make(ClientId, Topic, PayloadBin) end,
|
|
||||||
enter_tp_filter =>
|
|
||||||
?match_event(#{?snk_kind := "pulsar_producer_send"}),
|
|
||||||
error_tp_filter =>
|
|
||||||
?match_event(#{?snk_kind := "resource_simple_sync_internal_buffer_query_timeout"}),
|
|
||||||
success_tp_filter =>
|
|
||||||
?match_event(#{?snk_kind := pulsar_echo_consumer_message})
|
|
||||||
},
|
|
||||||
Config = [
|
|
||||||
{connector_type, ?TYPE},
|
|
||||||
{connector_name, ?FUNCTION_NAME},
|
|
||||||
{connector_config, pulsar_connector(Config0)},
|
|
||||||
{action_type, ?TYPE},
|
|
||||||
{action_name, ?FUNCTION_NAME},
|
|
||||||
{action_config, pulsar_action(Config0)}
|
|
||||||
| proplists_with([proxy_name, proxy_host, proxy_port], Config0)
|
|
||||||
],
|
|
||||||
emqx_bridge_v2_testlib:t_sync_query_down(Config, Opts),
|
|
||||||
ok.
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_rabbitmq, [
|
{application, emqx_bridge_rabbitmq, [
|
||||||
{description, "EMQX Enterprise RabbitMQ Bridge"},
|
{description, "EMQX Enterprise RabbitMQ Bridge"},
|
||||||
{vsn, "0.2.3"},
|
{vsn, "0.2.2"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_bridge_rabbitmq_app, []}},
|
{mod, {emqx_bridge_rabbitmq_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_s3, [
|
{application, emqx_bridge_s3, [
|
||||||
{description, "EMQX Enterprise S3 Bridge"},
|
{description, "EMQX Enterprise S3 Bridge"},
|
||||||
{vsn, "0.1.6"},
|
{vsn, "0.1.5"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_sqlserver, [
|
{application, emqx_bridge_sqlserver, [
|
||||||
{description, "EMQX Enterprise SQL Server Bridge"},
|
{description, "EMQX Enterprise SQL Server Bridge"},
|
||||||
{vsn, "0.2.4"},
|
{vsn, "0.2.3"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, emqx_resource, odbc]},
|
{applications, [kernel, stdlib, emqx_resource, odbc]},
|
||||||
{env, [
|
{env, [
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_syskeeper, [
|
{application, emqx_bridge_syskeeper, [
|
||||||
{description, "EMQX Enterprise Data bridge for Syskeeper"},
|
{description, "EMQX Enterprise Data bridge for Syskeeper"},
|
||||||
{vsn, "0.1.5"},
|
{vsn, "0.1.4"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_conf, [
|
{application, emqx_conf, [
|
||||||
{description, "EMQX configuration management"},
|
{description, "EMQX configuration management"},
|
||||||
{vsn, "0.2.4"},
|
{vsn, "0.2.3"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_conf_app, []}},
|
{mod, {emqx_conf_app, []}},
|
||||||
{applications, [kernel, stdlib]},
|
{applications, [kernel, stdlib]},
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_connector, [
|
{application, emqx_connector, [
|
||||||
{description, "EMQX Data Integration Connectors"},
|
{description, "EMQX Data Integration Connectors"},
|
||||||
{vsn, "0.3.4"},
|
{vsn, "0.3.3"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_connector_app, []}},
|
{mod, {emqx_connector_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -125,7 +125,6 @@ create(Type, Name, Conf0, Opts) ->
|
||||||
TypeBin = bin(Type),
|
TypeBin = bin(Type),
|
||||||
ResourceId = resource_id(Type, Name),
|
ResourceId = resource_id(Type, Name),
|
||||||
Conf = Conf0#{connector_type => TypeBin, connector_name => Name},
|
Conf = Conf0#{connector_type => TypeBin, connector_name => Name},
|
||||||
_ = emqx_alarm:ensure_deactivated(ResourceId),
|
|
||||||
{ok, _Data} = emqx_resource:create_local(
|
{ok, _Data} = emqx_resource:create_local(
|
||||||
ResourceId,
|
ResourceId,
|
||||||
?CONNECTOR_RESOURCE_GROUP,
|
?CONNECTOR_RESOURCE_GROUP,
|
||||||
|
@ -133,6 +132,7 @@ create(Type, Name, Conf0, Opts) ->
|
||||||
parse_confs(TypeBin, Name, Conf),
|
parse_confs(TypeBin, Name, Conf),
|
||||||
parse_opts(Conf, Opts)
|
parse_opts(Conf, Opts)
|
||||||
),
|
),
|
||||||
|
_ = emqx_alarm:ensure_deactivated(ResourceId),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
update(ConnectorId, {OldConf, Conf}) ->
|
update(ConnectorId, {OldConf, Conf}) ->
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{application, emqx_dashboard, [
|
{application, emqx_dashboard, [
|
||||||
{description, "EMQX Web Dashboard"},
|
{description, "EMQX Web Dashboard"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "5.1.4"},
|
{vsn, "5.1.3"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, [emqx_dashboard_sup]},
|
{registered, [emqx_dashboard_sup]},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_dashboard_sso, [
|
{application, emqx_dashboard_sso, [
|
||||||
{description, "EMQX Dashboard Single Sign-On"},
|
{description, "EMQX Dashboard Single Sign-On"},
|
||||||
{vsn, "0.1.6"},
|
{vsn, "0.1.5"},
|
||||||
{registered, [emqx_dashboard_sso_sup]},
|
{registered, [emqx_dashboard_sso_sup]},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
-compile(nowarn_export_all).
|
-compile(nowarn_export_all).
|
||||||
|
|
||||||
-include("../../emqx/include/emqx.hrl").
|
-include("../../emqx/include/emqx.hrl").
|
||||||
-include("../../emqx_durable_storage/include/emqx_ds.hrl").
|
|
||||||
-include_lib("common_test/include/ct.hrl").
|
-include_lib("common_test/include/ct.hrl").
|
||||||
-include_lib("stdlib/include/assert.hrl").
|
-include_lib("stdlib/include/assert.hrl").
|
||||||
-include("../../emqx/include/asserts.hrl").
|
-include("../../emqx/include/asserts.hrl").
|
||||||
|
@ -146,7 +145,7 @@ t_06_smoke_add_generation(Config) ->
|
||||||
?assertMatch(ok, emqx_ds:add_generation(DB)),
|
?assertMatch(ok, emqx_ds:add_generation(DB)),
|
||||||
[
|
[
|
||||||
{Gen1, #{created_at := Created1, since := Since1, until := Until1}},
|
{Gen1, #{created_at := Created1, since := Since1, until := Until1}},
|
||||||
{_Gen2, #{created_at := Created2, since := Since2, until := undefined}}
|
{Gen2, #{created_at := Created2, since := Since2, until := undefined}}
|
||||||
] = maps:to_list(emqx_ds:list_generations_with_lifetimes(DB)),
|
] = maps:to_list(emqx_ds:list_generations_with_lifetimes(DB)),
|
||||||
%% Check units of the return values (+/- 10s from test begin time):
|
%% Check units of the return values (+/- 10s from test begin time):
|
||||||
?give_or_take(BeginTime, 10_000, Created1),
|
?give_or_take(BeginTime, 10_000, Created1),
|
||||||
|
@ -235,8 +234,8 @@ t_09_atomic_store_batch(Config) ->
|
||||||
DB = ?FUNCTION_NAME,
|
DB = ?FUNCTION_NAME,
|
||||||
?check_trace(
|
?check_trace(
|
||||||
begin
|
begin
|
||||||
DBOpts = (opts(Config))#{atomic_batches => true},
|
application:set_env(emqx_durable_storage, egress_batch_size, 1),
|
||||||
?assertMatch(ok, emqx_ds:open_db(DB, DBOpts)),
|
?assertMatch(ok, emqx_ds:open_db(DB, opts(Config))),
|
||||||
Msgs = [
|
Msgs = [
|
||||||
message(<<"1">>, <<"1">>, 0),
|
message(<<"1">>, <<"1">>, 0),
|
||||||
message(<<"2">>, <<"2">>, 1),
|
message(<<"2">>, <<"2">>, 1),
|
||||||
|
@ -244,8 +243,13 @@ t_09_atomic_store_batch(Config) ->
|
||||||
],
|
],
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
ok,
|
ok,
|
||||||
emqx_ds:store_batch(DB, Msgs, #{sync => true})
|
emqx_ds:store_batch(DB, Msgs, #{
|
||||||
)
|
atomic => true,
|
||||||
|
sync => true
|
||||||
|
})
|
||||||
|
),
|
||||||
|
{ok, Flush} = ?block_until(#{?snk_kind := emqx_ds_buffer_flush}),
|
||||||
|
?assertMatch(#{batch := [_, _, _]}, Flush)
|
||||||
end,
|
end,
|
||||||
[]
|
[]
|
||||||
),
|
),
|
||||||
|
@ -285,124 +289,6 @@ t_10_non_atomic_store_batch(Config) ->
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_11_batch_preconditions(Config) ->
|
|
||||||
DB = ?FUNCTION_NAME,
|
|
||||||
?check_trace(
|
|
||||||
begin
|
|
||||||
DBOpts = (opts(Config))#{
|
|
||||||
atomic_batches => true,
|
|
||||||
force_monotonic_timestamps => false
|
|
||||||
},
|
|
||||||
?assertMatch(ok, emqx_ds:open_db(DB, DBOpts)),
|
|
||||||
|
|
||||||
%% Conditional delete
|
|
||||||
TS = 42,
|
|
||||||
Batch1 = #dsbatch{
|
|
||||||
preconditions = [{if_exists, matcher(<<"c1">>, <<"t/a">>, '_', TS)}],
|
|
||||||
operations = [{delete, matcher(<<"c1">>, <<"t/a">>, '_', TS)}]
|
|
||||||
},
|
|
||||||
%% Conditional insert
|
|
||||||
M1 = message(<<"c1">>, <<"t/a">>, <<"M1">>, TS),
|
|
||||||
Batch2 = #dsbatch{
|
|
||||||
preconditions = [{unless_exists, matcher(<<"c1">>, <<"t/a">>, '_', TS)}],
|
|
||||||
operations = [M1]
|
|
||||||
},
|
|
||||||
|
|
||||||
%% No such message yet, precondition fails:
|
|
||||||
?assertEqual(
|
|
||||||
{error, unrecoverable, {precondition_failed, not_found}},
|
|
||||||
emqx_ds:store_batch(DB, Batch1)
|
|
||||||
),
|
|
||||||
%% No such message yet, `unless` precondition holds:
|
|
||||||
?assertEqual(
|
|
||||||
ok,
|
|
||||||
emqx_ds:store_batch(DB, Batch2)
|
|
||||||
),
|
|
||||||
%% Now there's such message, `unless` precondition now fails:
|
|
||||||
?assertEqual(
|
|
||||||
{error, unrecoverable, {precondition_failed, M1}},
|
|
||||||
emqx_ds:store_batch(DB, Batch2)
|
|
||||||
),
|
|
||||||
%% On the other hand, `if` precondition now holds:
|
|
||||||
?assertEqual(
|
|
||||||
ok,
|
|
||||||
emqx_ds:store_batch(DB, Batch1)
|
|
||||||
),
|
|
||||||
|
|
||||||
%% Wait at least until current epoch ends.
|
|
||||||
ct:sleep(1000),
|
|
||||||
%% There's no messages in the DB.
|
|
||||||
?assertEqual(
|
|
||||||
[],
|
|
||||||
emqx_ds_test_helpers:consume(DB, emqx_topic:words(<<"t/#">>))
|
|
||||||
)
|
|
||||||
end,
|
|
||||||
[]
|
|
||||||
).
|
|
||||||
|
|
||||||
t_12_batch_precondition_conflicts(Config) ->
|
|
||||||
DB = ?FUNCTION_NAME,
|
|
||||||
NBatches = 50,
|
|
||||||
NMessages = 10,
|
|
||||||
?check_trace(
|
|
||||||
begin
|
|
||||||
DBOpts = (opts(Config))#{
|
|
||||||
atomic_batches => true,
|
|
||||||
force_monotonic_timestamps => false
|
|
||||||
},
|
|
||||||
?assertMatch(ok, emqx_ds:open_db(DB, DBOpts)),
|
|
||||||
|
|
||||||
ConflictBatches = [
|
|
||||||
#dsbatch{
|
|
||||||
%% If the slot is free...
|
|
||||||
preconditions = [{if_exists, matcher(<<"c1">>, <<"t/slot">>, _Free = <<>>, 0)}],
|
|
||||||
%% Take it and write NMessages extra messages, so that batches take longer to
|
|
||||||
%% process and have higher chances to conflict with each other.
|
|
||||||
operations =
|
|
||||||
[
|
|
||||||
message(<<"c1">>, <<"t/slot">>, integer_to_binary(I), _TS = 0)
|
|
||||||
| [
|
|
||||||
message(<<"c1">>, {"t/owner/~p/~p", [I, J]}, <<>>, I * 100 + J)
|
|
||||||
|| J <- lists:seq(1, NMessages)
|
|
||||||
]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|| I <- lists:seq(1, NBatches)
|
|
||||||
],
|
|
||||||
|
|
||||||
%% Run those batches concurrently.
|
|
||||||
ok = emqx_ds:store_batch(DB, [message(<<"c1">>, <<"t/slot">>, <<>>, 0)]),
|
|
||||||
Results = emqx_utils:pmap(
|
|
||||||
fun(B) -> emqx_ds:store_batch(DB, B) end,
|
|
||||||
ConflictBatches,
|
|
||||||
infinity
|
|
||||||
),
|
|
||||||
|
|
||||||
%% Only one should have succeeded.
|
|
||||||
?assertEqual([ok], [Ok || Ok = ok <- Results]),
|
|
||||||
|
|
||||||
%% While other failed with an identical `precondition_failed`.
|
|
||||||
Failures = lists:usort([PreconditionFailed || {error, _, PreconditionFailed} <- Results]),
|
|
||||||
?assertMatch(
|
|
||||||
[{precondition_failed, #message{topic = <<"t/slot">>, payload = <<_/bytes>>}}],
|
|
||||||
Failures
|
|
||||||
),
|
|
||||||
|
|
||||||
%% Wait at least until current epoch ends.
|
|
||||||
ct:sleep(1000),
|
|
||||||
%% Storage should contain single batch's messages.
|
|
||||||
[{precondition_failed, #message{payload = IOwner}}] = Failures,
|
|
||||||
WinnerBatch = lists:nth(binary_to_integer(IOwner), ConflictBatches),
|
|
||||||
BatchMessages = lists:sort(WinnerBatch#dsbatch.operations),
|
|
||||||
DBMessages = emqx_ds_test_helpers:consume(DB, emqx_topic:words(<<"t/#">>)),
|
|
||||||
?assertEqual(
|
|
||||||
BatchMessages,
|
|
||||||
DBMessages
|
|
||||||
)
|
|
||||||
end,
|
|
||||||
[]
|
|
||||||
).
|
|
||||||
|
|
||||||
t_smoke_delete_next(Config) ->
|
t_smoke_delete_next(Config) ->
|
||||||
DB = ?FUNCTION_NAME,
|
DB = ?FUNCTION_NAME,
|
||||||
?check_trace(
|
?check_trace(
|
||||||
|
@ -648,25 +534,12 @@ message(ClientId, Topic, Payload, PublishedAt) ->
|
||||||
|
|
||||||
message(Topic, Payload, PublishedAt) ->
|
message(Topic, Payload, PublishedAt) ->
|
||||||
#message{
|
#message{
|
||||||
topic = try_format(Topic),
|
topic = Topic,
|
||||||
payload = try_format(Payload),
|
payload = Payload,
|
||||||
timestamp = PublishedAt,
|
timestamp = PublishedAt,
|
||||||
id = emqx_guid:gen()
|
id = emqx_guid:gen()
|
||||||
}.
|
}.
|
||||||
|
|
||||||
matcher(ClientID, Topic, Payload, Timestamp) ->
|
|
||||||
#message_matcher{
|
|
||||||
from = ClientID,
|
|
||||||
topic = try_format(Topic),
|
|
||||||
timestamp = Timestamp,
|
|
||||||
payload = Payload
|
|
||||||
}.
|
|
||||||
|
|
||||||
try_format({Fmt, Args}) ->
|
|
||||||
emqx_utils:format(Fmt, Args);
|
|
||||||
try_format(String) ->
|
|
||||||
String.
|
|
||||||
|
|
||||||
delete(DB, It, Selector, BatchSize) ->
|
delete(DB, It, Selector, BatchSize) ->
|
||||||
delete(DB, It, Selector, BatchSize, 0).
|
delete(DB, It, Selector, BatchSize, 0).
|
||||||
|
|
||||||
|
@ -689,18 +562,9 @@ all() ->
|
||||||
|
|
||||||
groups() ->
|
groups() ->
|
||||||
TCs = emqx_common_test_helpers:all(?MODULE),
|
TCs = emqx_common_test_helpers:all(?MODULE),
|
||||||
%% TODO: Remove once builtin-local supports preconditions + atomic batches.
|
|
||||||
BuiltinLocalTCs =
|
|
||||||
TCs --
|
|
||||||
[
|
|
||||||
t_09_atomic_store_batch,
|
|
||||||
t_11_batch_preconditions,
|
|
||||||
t_12_batch_precondition_conflicts
|
|
||||||
],
|
|
||||||
BuiltinRaftTCs = TCs,
|
|
||||||
[
|
[
|
||||||
{builtin_local, BuiltinLocalTCs},
|
{builtin_local, TCs},
|
||||||
{builtin_raft, BuiltinRaftTCs}
|
{builtin_raft, TCs}
|
||||||
].
|
].
|
||||||
|
|
||||||
init_per_group(builtin_local, Config) ->
|
init_per_group(builtin_local, Config) ->
|
||||||
|
|
|
@ -43,7 +43,7 @@
|
||||||
%% `emqx_ds_buffer':
|
%% `emqx_ds_buffer':
|
||||||
init_buffer/3,
|
init_buffer/3,
|
||||||
flush_buffer/4,
|
flush_buffer/4,
|
||||||
shard_of_operation/4
|
shard_of_message/4
|
||||||
]).
|
]).
|
||||||
|
|
||||||
%% Internal exports:
|
%% Internal exports:
|
||||||
|
@ -55,7 +55,6 @@
|
||||||
-export_type([db_opts/0, shard/0, iterator/0, delete_iterator/0]).
|
-export_type([db_opts/0, shard/0, iterator/0, delete_iterator/0]).
|
||||||
|
|
||||||
-include_lib("emqx_utils/include/emqx_message.hrl").
|
-include_lib("emqx_utils/include/emqx_message.hrl").
|
||||||
-include_lib("emqx_durable_storage/include/emqx_ds.hrl").
|
|
||||||
|
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
%% Type declarations
|
%% Type declarations
|
||||||
|
@ -231,9 +230,9 @@ flush_buffer(DB, Shard, Messages, S0 = #bs{options = Options}) ->
|
||||||
make_batch(_ForceMonotonic = true, Latest, Messages) ->
|
make_batch(_ForceMonotonic = true, Latest, Messages) ->
|
||||||
assign_monotonic_timestamps(Latest, Messages, []);
|
assign_monotonic_timestamps(Latest, Messages, []);
|
||||||
make_batch(false, Latest, Messages) ->
|
make_batch(false, Latest, Messages) ->
|
||||||
assign_operation_timestamps(Latest, Messages, []).
|
assign_message_timestamps(Latest, Messages, []).
|
||||||
|
|
||||||
assign_monotonic_timestamps(Latest0, [Message = #message{} | Rest], Acc0) ->
|
assign_monotonic_timestamps(Latest0, [Message | Rest], Acc0) ->
|
||||||
case emqx_message:timestamp(Message, microsecond) of
|
case emqx_message:timestamp(Message, microsecond) of
|
||||||
TimestampUs when TimestampUs > Latest0 ->
|
TimestampUs when TimestampUs > Latest0 ->
|
||||||
Latest = TimestampUs;
|
Latest = TimestampUs;
|
||||||
|
@ -242,43 +241,28 @@ assign_monotonic_timestamps(Latest0, [Message = #message{} | Rest], Acc0) ->
|
||||||
end,
|
end,
|
||||||
Acc = [assign_timestamp(Latest, Message) | Acc0],
|
Acc = [assign_timestamp(Latest, Message) | Acc0],
|
||||||
assign_monotonic_timestamps(Latest, Rest, Acc);
|
assign_monotonic_timestamps(Latest, Rest, Acc);
|
||||||
assign_monotonic_timestamps(Latest, [Operation | Rest], Acc0) ->
|
|
||||||
Acc = [Operation | Acc0],
|
|
||||||
assign_monotonic_timestamps(Latest, Rest, Acc);
|
|
||||||
assign_monotonic_timestamps(Latest, [], Acc) ->
|
assign_monotonic_timestamps(Latest, [], Acc) ->
|
||||||
{Latest, lists:reverse(Acc)}.
|
{Latest, lists:reverse(Acc)}.
|
||||||
|
|
||||||
assign_operation_timestamps(Latest0, [Message = #message{} | Rest], Acc0) ->
|
assign_message_timestamps(Latest0, [Message | Rest], Acc0) ->
|
||||||
TimestampUs = emqx_message:timestamp(Message),
|
TimestampUs = emqx_message:timestamp(Message, microsecond),
|
||||||
Latest = max(TimestampUs, Latest0),
|
Latest = max(TimestampUs, Latest0),
|
||||||
Acc = [assign_timestamp(TimestampUs, Message) | Acc0],
|
Acc = [assign_timestamp(TimestampUs, Message) | Acc0],
|
||||||
assign_operation_timestamps(Latest, Rest, Acc);
|
assign_message_timestamps(Latest, Rest, Acc);
|
||||||
assign_operation_timestamps(Latest, [Operation | Rest], Acc0) ->
|
assign_message_timestamps(Latest, [], Acc) ->
|
||||||
Acc = [Operation | Acc0],
|
|
||||||
assign_operation_timestamps(Latest, Rest, Acc);
|
|
||||||
assign_operation_timestamps(Latest, [], Acc) ->
|
|
||||||
{Latest, lists:reverse(Acc)}.
|
{Latest, lists:reverse(Acc)}.
|
||||||
|
|
||||||
assign_timestamp(TimestampUs, Message) ->
|
assign_timestamp(TimestampUs, Message) ->
|
||||||
{TimestampUs, Message}.
|
{TimestampUs, Message}.
|
||||||
|
|
||||||
-spec shard_of_operation(emqx_ds:db(), emqx_ds:operation(), clientid | topic, _Options) -> shard().
|
-spec shard_of_message(emqx_ds:db(), emqx_types:message(), clientid | topic, _Options) -> shard().
|
||||||
shard_of_operation(DB, #message{from = From, topic = Topic}, SerializeBy, _Options) ->
|
shard_of_message(DB, #message{from = From, topic = Topic}, SerializeBy, _Options) ->
|
||||||
case SerializeBy of
|
|
||||||
clientid -> Key = From;
|
|
||||||
topic -> Key = Topic
|
|
||||||
end,
|
|
||||||
shard_of_key(DB, Key);
|
|
||||||
shard_of_operation(DB, {_, #message_matcher{from = From, topic = Topic}}, SerializeBy, _Options) ->
|
|
||||||
case SerializeBy of
|
|
||||||
clientid -> Key = From;
|
|
||||||
topic -> Key = Topic
|
|
||||||
end,
|
|
||||||
shard_of_key(DB, Key).
|
|
||||||
|
|
||||||
shard_of_key(DB, Key) ->
|
|
||||||
N = emqx_ds_builtin_local_meta:n_shards(DB),
|
N = emqx_ds_builtin_local_meta:n_shards(DB),
|
||||||
Hash = erlang:phash2(Key, N),
|
Hash =
|
||||||
|
case SerializeBy of
|
||||||
|
clientid -> erlang:phash2(From, N);
|
||||||
|
topic -> erlang:phash2(Topic, N)
|
||||||
|
end,
|
||||||
integer_to_binary(Hash).
|
integer_to_binary(Hash).
|
||||||
|
|
||||||
-spec get_streams(emqx_ds:db(), emqx_ds:topic_filter(), emqx_ds:time()) ->
|
-spec get_streams(emqx_ds:db(), emqx_ds:topic_filter(), emqx_ds:time()) ->
|
||||||
|
@ -304,7 +288,7 @@ get_streams(DB, TopicFilter, StartTime) ->
|
||||||
-spec make_iterator(
|
-spec make_iterator(
|
||||||
emqx_ds:db(), emqx_ds:ds_specific_stream(), emqx_ds:topic_filter(), emqx_ds:time()
|
emqx_ds:db(), emqx_ds:ds_specific_stream(), emqx_ds:topic_filter(), emqx_ds:time()
|
||||||
) ->
|
) ->
|
||||||
emqx_ds:make_iterator_result(iterator()).
|
emqx_ds:make_iterator_result(emqx_ds:ds_specific_iterator()).
|
||||||
make_iterator(DB, ?stream(Shard, InnerStream), TopicFilter, StartTime) ->
|
make_iterator(DB, ?stream(Shard, InnerStream), TopicFilter, StartTime) ->
|
||||||
ShardId = {DB, Shard},
|
ShardId = {DB, Shard},
|
||||||
case
|
case
|
||||||
|
@ -318,7 +302,7 @@ make_iterator(DB, ?stream(Shard, InnerStream), TopicFilter, StartTime) ->
|
||||||
Error
|
Error
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec update_iterator(emqx_ds:db(), iterator(), emqx_ds:message_key()) ->
|
-spec update_iterator(emqx_ds:db(), emqx_ds:ds_specific_iterator(), emqx_ds:message_key()) ->
|
||||||
emqx_ds:make_iterator_result(iterator()).
|
emqx_ds:make_iterator_result(iterator()).
|
||||||
update_iterator(DB, Iter0 = #{?tag := ?IT, ?shard := Shard, ?enc := StorageIter0}, Key) ->
|
update_iterator(DB, Iter0 = #{?tag := ?IT, ?shard := Shard, ?enc := StorageIter0}, Key) ->
|
||||||
case emqx_ds_storage_layer:update_iterator({DB, Shard}, StorageIter0, Key) of
|
case emqx_ds_storage_layer:update_iterator({DB, Shard}, StorageIter0, Key) of
|
||||||
|
@ -396,7 +380,7 @@ do_next(DB, Iter0 = #{?tag := ?IT, ?shard := Shard, ?enc := StorageIter0}, N) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec do_delete_next(emqx_ds:db(), delete_iterator(), emqx_ds:delete_selector(), pos_integer()) ->
|
-spec do_delete_next(emqx_ds:db(), delete_iterator(), emqx_ds:delete_selector(), pos_integer()) ->
|
||||||
emqx_ds:delete_next_result(delete_iterator()).
|
emqx_ds:delete_next_result(emqx_ds:delete_iterator()).
|
||||||
do_delete_next(
|
do_delete_next(
|
||||||
DB, Iter = #{?tag := ?DELETE_IT, ?shard := Shard, ?enc := StorageIter0}, Selector, N
|
DB, Iter = #{?tag := ?DELETE_IT, ?shard := Shard, ?enc := StorageIter0}, Selector, N
|
||||||
) ->
|
) ->
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
|
|
||||||
current_timestamp/2,
|
current_timestamp/2,
|
||||||
|
|
||||||
shard_of_operation/4,
|
shard_of_message/4,
|
||||||
flush_buffer/4,
|
flush_buffer/4,
|
||||||
init_buffer/3
|
init_buffer/3
|
||||||
]).
|
]).
|
||||||
|
@ -83,7 +83,6 @@
|
||||||
ra_state/0
|
ra_state/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-include_lib("emqx_durable_storage/include/emqx_ds.hrl").
|
|
||||||
-include_lib("emqx_utils/include/emqx_message.hrl").
|
-include_lib("emqx_utils/include/emqx_message.hrl").
|
||||||
-include_lib("snabbkaffe/include/trace.hrl").
|
-include_lib("snabbkaffe/include/trace.hrl").
|
||||||
-include("emqx_ds_replication_layer.hrl").
|
-include("emqx_ds_replication_layer.hrl").
|
||||||
|
@ -101,10 +100,7 @@
|
||||||
n_shards => pos_integer(),
|
n_shards => pos_integer(),
|
||||||
n_sites => pos_integer(),
|
n_sites => pos_integer(),
|
||||||
replication_factor => pos_integer(),
|
replication_factor => pos_integer(),
|
||||||
replication_options => _TODO :: #{},
|
replication_options => _TODO :: #{}
|
||||||
%% Inherited from `emqx_ds:generic_db_opts()`.
|
|
||||||
force_monotonic_timestamps => boolean(),
|
|
||||||
atomic_batches => boolean()
|
|
||||||
}.
|
}.
|
||||||
|
|
||||||
%% This enapsulates the stream entity from the replication level.
|
%% This enapsulates the stream entity from the replication level.
|
||||||
|
@ -139,12 +135,11 @@
|
||||||
?enc := emqx_ds_storage_layer:delete_iterator()
|
?enc := emqx_ds_storage_layer:delete_iterator()
|
||||||
}.
|
}.
|
||||||
|
|
||||||
%% Write batch.
|
%% TODO: this type is obsolete and is kept only for compatibility with
|
||||||
%% Instances of this type currently form the majority of the Raft log.
|
%% BPAPIs. Remove it when emqx_ds_proto_v4 is gone (EMQX 5.6)
|
||||||
-type batch() :: #{
|
-type batch() :: #{
|
||||||
?tag := ?BATCH,
|
?tag := ?BATCH,
|
||||||
?batch_operations := [emqx_ds:operation()],
|
?batch_messages := [emqx_types:message()]
|
||||||
?batch_preconditions => [emqx_ds:precondition()]
|
|
||||||
}.
|
}.
|
||||||
|
|
||||||
-type generation_rank() :: {shard_id(), term()}.
|
-type generation_rank() :: {shard_id(), term()}.
|
||||||
|
@ -245,45 +240,16 @@ drop_db(DB) ->
|
||||||
_ = emqx_ds_proto_v4:drop_db(list_nodes(), DB),
|
_ = emqx_ds_proto_v4:drop_db(list_nodes(), DB),
|
||||||
emqx_ds_replication_layer_meta:drop_db(DB).
|
emqx_ds_replication_layer_meta:drop_db(DB).
|
||||||
|
|
||||||
-spec store_batch(emqx_ds:db(), emqx_ds:batch(), emqx_ds:message_store_opts()) ->
|
-spec store_batch(emqx_ds:db(), [emqx_types:message(), ...], emqx_ds:message_store_opts()) ->
|
||||||
emqx_ds:store_batch_result().
|
emqx_ds:store_batch_result().
|
||||||
store_batch(DB, Batch = #dsbatch{preconditions = [_ | _]}, Opts) ->
|
store_batch(DB, Messages, Opts) ->
|
||||||
%% NOTE: Atomic batch is implied, will not check with DB config.
|
|
||||||
store_batch_atomic(DB, Batch, Opts);
|
|
||||||
store_batch(DB, Batch, Opts) ->
|
|
||||||
case emqx_ds_replication_layer_meta:db_config(DB) of
|
|
||||||
#{atomic_batches := true} ->
|
|
||||||
store_batch_atomic(DB, Batch, Opts);
|
|
||||||
#{} ->
|
|
||||||
store_batch_buffered(DB, Batch, Opts)
|
|
||||||
end.
|
|
||||||
|
|
||||||
store_batch_buffered(DB, #dsbatch{operations = Operations}, Opts) ->
|
|
||||||
store_batch_buffered(DB, Operations, Opts);
|
|
||||||
store_batch_buffered(DB, Batch, Opts) ->
|
|
||||||
try
|
try
|
||||||
emqx_ds_buffer:store_batch(DB, Batch, Opts)
|
emqx_ds_buffer:store_batch(DB, Messages, Opts)
|
||||||
catch
|
catch
|
||||||
error:{Reason, _Call} when Reason == timeout; Reason == noproc ->
|
error:{Reason, _Call} when Reason == timeout; Reason == noproc ->
|
||||||
{error, recoverable, Reason}
|
{error, recoverable, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
store_batch_atomic(DB, Batch, _Opts) ->
|
|
||||||
Shards = shards_of_batch(DB, Batch),
|
|
||||||
case Shards of
|
|
||||||
[Shard] ->
|
|
||||||
case ra_store_batch(DB, Shard, Batch) of
|
|
||||||
{timeout, ServerId} ->
|
|
||||||
{error, recoverable, {timeout, ServerId}};
|
|
||||||
Result ->
|
|
||||||
Result
|
|
||||||
end;
|
|
||||||
[] ->
|
|
||||||
ok;
|
|
||||||
[_ | _] ->
|
|
||||||
{error, unrecoverable, atomic_batch_spans_multiple_shards}
|
|
||||||
end.
|
|
||||||
|
|
||||||
-spec get_streams(emqx_ds:db(), emqx_ds:topic_filter(), emqx_ds:time()) ->
|
-spec get_streams(emqx_ds:db(), emqx_ds:topic_filter(), emqx_ds:time()) ->
|
||||||
[{emqx_ds:stream_rank(), stream()}].
|
[{emqx_ds:stream_rank(), stream()}].
|
||||||
get_streams(DB, TopicFilter, StartTime) ->
|
get_streams(DB, TopicFilter, StartTime) ->
|
||||||
|
@ -426,49 +392,17 @@ flush_buffer(DB, Shard, Messages, State) ->
|
||||||
end,
|
end,
|
||||||
{State, Result}.
|
{State, Result}.
|
||||||
|
|
||||||
-spec shard_of_operation(
|
-spec shard_of_message(emqx_ds:db(), emqx_types:message(), clientid | topic, _Options) ->
|
||||||
emqx_ds:db(),
|
|
||||||
emqx_ds:operation() | emqx_ds:precondition(),
|
|
||||||
clientid | topic,
|
|
||||||
_Options
|
|
||||||
) ->
|
|
||||||
emqx_ds_replication_layer:shard_id().
|
emqx_ds_replication_layer:shard_id().
|
||||||
shard_of_operation(DB, #message{from = From, topic = Topic}, SerializeBy, _Options) ->
|
shard_of_message(DB, #message{from = From, topic = Topic}, SerializeBy, _Options) ->
|
||||||
case SerializeBy of
|
|
||||||
clientid -> Key = From;
|
|
||||||
topic -> Key = Topic
|
|
||||||
end,
|
|
||||||
shard_of_key(DB, Key);
|
|
||||||
shard_of_operation(DB, {_OpName, Matcher}, SerializeBy, _Options) ->
|
|
||||||
#message_matcher{from = From, topic = Topic} = Matcher,
|
|
||||||
case SerializeBy of
|
|
||||||
clientid -> Key = From;
|
|
||||||
topic -> Key = Topic
|
|
||||||
end,
|
|
||||||
shard_of_key(DB, Key).
|
|
||||||
|
|
||||||
shard_of_key(DB, Key) ->
|
|
||||||
N = emqx_ds_replication_shard_allocator:n_shards(DB),
|
N = emqx_ds_replication_shard_allocator:n_shards(DB),
|
||||||
Hash = erlang:phash2(Key, N),
|
Hash =
|
||||||
|
case SerializeBy of
|
||||||
|
clientid -> erlang:phash2(From, N);
|
||||||
|
topic -> erlang:phash2(Topic, N)
|
||||||
|
end,
|
||||||
integer_to_binary(Hash).
|
integer_to_binary(Hash).
|
||||||
|
|
||||||
shards_of_batch(DB, #dsbatch{operations = Operations, preconditions = Preconditions}) ->
|
|
||||||
shards_of_batch(DB, Preconditions, shards_of_batch(DB, Operations, []));
|
|
||||||
shards_of_batch(DB, Operations) ->
|
|
||||||
shards_of_batch(DB, Operations, []).
|
|
||||||
|
|
||||||
shards_of_batch(DB, [Operation | Rest], Acc) ->
|
|
||||||
case shard_of_operation(DB, Operation, clientid, #{}) of
|
|
||||||
Shard when Shard =:= hd(Acc) ->
|
|
||||||
shards_of_batch(DB, Rest, Acc);
|
|
||||||
Shard when Acc =:= [] ->
|
|
||||||
shards_of_batch(DB, Rest, [Shard]);
|
|
||||||
ShardAnother ->
|
|
||||||
[ShardAnother | Acc]
|
|
||||||
end;
|
|
||||||
shards_of_batch(_DB, [], Acc) ->
|
|
||||||
Acc.
|
|
||||||
|
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
%% Internal exports (RPC targets)
|
%% Internal exports (RPC targets)
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
|
@ -678,7 +612,7 @@ list_nodes() ->
|
||||||
-define(SHARD_RPC(DB, SHARD, NODE, BODY),
|
-define(SHARD_RPC(DB, SHARD, NODE, BODY),
|
||||||
case
|
case
|
||||||
emqx_ds_replication_layer_shard:servers(
|
emqx_ds_replication_layer_shard:servers(
|
||||||
DB, SHARD, application:get_env(emqx_ds_builtin_raft, reads, leader_preferred)
|
DB, SHARD, application:get_env(emqx_durable_storage, reads, leader_preferred)
|
||||||
)
|
)
|
||||||
of
|
of
|
||||||
[{_, NODE} | _] ->
|
[{_, NODE} | _] ->
|
||||||
|
@ -690,22 +624,13 @@ list_nodes() ->
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
|
||||||
-spec ra_store_batch(emqx_ds:db(), emqx_ds_replication_layer:shard_id(), emqx_ds:batch()) ->
|
-spec ra_store_batch(emqx_ds:db(), emqx_ds_replication_layer:shard_id(), [emqx_types:message()]) ->
|
||||||
ok | {timeout, _} | emqx_ds:error(_).
|
ok | {timeout, _} | {error, recoverable | unrecoverable, _Err}.
|
||||||
ra_store_batch(DB, Shard, Batch) ->
|
ra_store_batch(DB, Shard, Messages) ->
|
||||||
case Batch of
|
Command = #{
|
||||||
#dsbatch{operations = Operations, preconditions = Preconditions} ->
|
?tag => ?BATCH,
|
||||||
Command = #{
|
?batch_messages => Messages
|
||||||
?tag => ?BATCH,
|
},
|
||||||
?batch_operations => Operations,
|
|
||||||
?batch_preconditions => Preconditions
|
|
||||||
};
|
|
||||||
Operations ->
|
|
||||||
Command = #{
|
|
||||||
?tag => ?BATCH,
|
|
||||||
?batch_operations => Operations
|
|
||||||
}
|
|
||||||
end,
|
|
||||||
Servers = emqx_ds_replication_layer_shard:servers(DB, Shard, leader_preferred),
|
Servers = emqx_ds_replication_layer_shard:servers(DB, Shard, leader_preferred),
|
||||||
case emqx_ds_replication_layer_shard:process_command(Servers, Command, ?RA_TIMEOUT) of
|
case emqx_ds_replication_layer_shard:process_command(Servers, Command, ?RA_TIMEOUT) of
|
||||||
{ok, Result, _Leader} ->
|
{ok, Result, _Leader} ->
|
||||||
|
@ -842,7 +767,6 @@ ra_drop_shard(DB, Shard) ->
|
||||||
|
|
||||||
-define(pd_ra_idx_need_release, '$emqx_ds_raft_idx_need_release').
|
-define(pd_ra_idx_need_release, '$emqx_ds_raft_idx_need_release').
|
||||||
-define(pd_ra_bytes_need_release, '$emqx_ds_raft_bytes_need_release').
|
-define(pd_ra_bytes_need_release, '$emqx_ds_raft_bytes_need_release').
|
||||||
-define(pd_ra_force_monotonic, '$emqx_ds_raft_force_monotonic').
|
|
||||||
|
|
||||||
-spec init(_Args :: map()) -> ra_state().
|
-spec init(_Args :: map()) -> ra_state().
|
||||||
init(#{db := DB, shard := Shard}) ->
|
init(#{db := DB, shard := Shard}) ->
|
||||||
|
@ -852,30 +776,18 @@ init(#{db := DB, shard := Shard}) ->
|
||||||
{ra_state(), _Reply, _Effects}.
|
{ra_state(), _Reply, _Effects}.
|
||||||
apply(
|
apply(
|
||||||
RaftMeta,
|
RaftMeta,
|
||||||
Command = #{
|
#{
|
||||||
?tag := ?BATCH,
|
?tag := ?BATCH,
|
||||||
?batch_operations := OperationsIn
|
?batch_messages := MessagesIn
|
||||||
},
|
},
|
||||||
#{db_shard := DBShard = {DB, Shard}, latest := Latest0} = State0
|
#{db_shard := DBShard = {DB, Shard}, latest := Latest0} = State0
|
||||||
) ->
|
) ->
|
||||||
?tp(ds_ra_apply_batch, #{db => DB, shard => Shard, batch => OperationsIn, latest => Latest0}),
|
?tp(ds_ra_apply_batch, #{db => DB, shard => Shard, batch => MessagesIn, latest => Latest0}),
|
||||||
Preconditions = maps:get(?batch_preconditions, Command, []),
|
{Stats, Latest, Messages} = assign_timestamps(Latest0, MessagesIn),
|
||||||
{Stats, Latest, Operations} = assign_timestamps(DB, Latest0, OperationsIn),
|
Result = emqx_ds_storage_layer:store_batch(DBShard, Messages, #{durable => false}),
|
||||||
%% FIXME
|
State = State0#{latest := Latest},
|
||||||
case emqx_ds_precondition:verify(emqx_ds_storage_layer, DBShard, Preconditions) of
|
set_ts(DBShard, Latest),
|
||||||
ok ->
|
Effects = try_release_log(Stats, RaftMeta, State),
|
||||||
Result = emqx_ds_storage_layer:store_batch(DBShard, Operations, #{durable => false}),
|
|
||||||
State = State0#{latest := Latest},
|
|
||||||
set_ts(DBShard, Latest),
|
|
||||||
Effects = try_release_log(Stats, RaftMeta, State);
|
|
||||||
PreconditionFailed = {precondition_failed, _} ->
|
|
||||||
Result = {error, unrecoverable, PreconditionFailed},
|
|
||||||
State = State0,
|
|
||||||
Effects = [];
|
|
||||||
Result ->
|
|
||||||
State = State0,
|
|
||||||
Effects = []
|
|
||||||
end,
|
|
||||||
Effects =/= [] andalso ?tp(ds_ra_effects, #{effects => Effects, meta => RaftMeta}),
|
Effects =/= [] andalso ?tp(ds_ra_effects, #{effects => Effects, meta => RaftMeta}),
|
||||||
{State, Result, Effects};
|
{State, Result, Effects};
|
||||||
apply(
|
apply(
|
||||||
|
@ -950,21 +862,6 @@ apply(
|
||||||
Effects = handle_custom_event(DBShard, Latest, CustomEvent),
|
Effects = handle_custom_event(DBShard, Latest, CustomEvent),
|
||||||
{State#{latest => Latest}, ok, Effects}.
|
{State#{latest => Latest}, ok, Effects}.
|
||||||
|
|
||||||
assign_timestamps(DB, Latest, Messages) ->
|
|
||||||
ForceMonotonic = force_monotonic_timestamps(DB),
|
|
||||||
assign_timestamps(ForceMonotonic, Latest, Messages, [], 0, 0).
|
|
||||||
|
|
||||||
force_monotonic_timestamps(DB) ->
|
|
||||||
case erlang:get(?pd_ra_force_monotonic) of
|
|
||||||
undefined ->
|
|
||||||
DBConfig = emqx_ds_replication_layer_meta:db_config(DB),
|
|
||||||
Flag = maps:get(force_monotonic_timestamps, DBConfig),
|
|
||||||
erlang:put(?pd_ra_force_monotonic, Flag);
|
|
||||||
Flag ->
|
|
||||||
ok
|
|
||||||
end,
|
|
||||||
Flag.
|
|
||||||
|
|
||||||
try_release_log({_N, BatchSize}, RaftMeta = #{index := CurrentIdx}, State) ->
|
try_release_log({_N, BatchSize}, RaftMeta = #{index := CurrentIdx}, State) ->
|
||||||
%% NOTE
|
%% NOTE
|
||||||
%% Because cursor release means storage flush (see
|
%% Because cursor release means storage flush (see
|
||||||
|
@ -1027,7 +924,10 @@ tick(TimeMs, #{db_shard := DBShard = {DB, Shard}, latest := Latest}) ->
|
||||||
?tp(emqx_ds_replication_layer_tick, #{db => DB, shard => Shard, timestamp => Timestamp}),
|
?tp(emqx_ds_replication_layer_tick, #{db => DB, shard => Shard, timestamp => Timestamp}),
|
||||||
handle_custom_event(DBShard, Timestamp, tick).
|
handle_custom_event(DBShard, Timestamp, tick).
|
||||||
|
|
||||||
assign_timestamps(true, Latest0, [Message0 = #message{} | Rest], Acc, N, Sz) ->
|
assign_timestamps(Latest, Messages) ->
|
||||||
|
assign_timestamps(Latest, Messages, [], 0, 0).
|
||||||
|
|
||||||
|
assign_timestamps(Latest0, [Message0 | Rest], Acc, N, Sz) ->
|
||||||
case emqx_message:timestamp(Message0, microsecond) of
|
case emqx_message:timestamp(Message0, microsecond) of
|
||||||
TimestampUs when TimestampUs > Latest0 ->
|
TimestampUs when TimestampUs > Latest0 ->
|
||||||
Latest = TimestampUs,
|
Latest = TimestampUs,
|
||||||
|
@ -1036,17 +936,8 @@ assign_timestamps(true, Latest0, [Message0 = #message{} | Rest], Acc, N, Sz) ->
|
||||||
Latest = Latest0 + 1,
|
Latest = Latest0 + 1,
|
||||||
Message = assign_timestamp(Latest, Message0)
|
Message = assign_timestamp(Latest, Message0)
|
||||||
end,
|
end,
|
||||||
MSize = approx_message_size(Message0),
|
assign_timestamps(Latest, Rest, [Message | Acc], N + 1, Sz + approx_message_size(Message0));
|
||||||
assign_timestamps(true, Latest, Rest, [Message | Acc], N + 1, Sz + MSize);
|
assign_timestamps(Latest, [], Acc, N, Size) ->
|
||||||
assign_timestamps(false, Latest0, [Message0 = #message{} | Rest], Acc, N, Sz) ->
|
|
||||||
TimestampUs = emqx_message:timestamp(Message0),
|
|
||||||
Latest = max(Latest0, TimestampUs),
|
|
||||||
Message = assign_timestamp(TimestampUs, Message0),
|
|
||||||
MSize = approx_message_size(Message0),
|
|
||||||
assign_timestamps(false, Latest, Rest, [Message | Acc], N + 1, Sz + MSize);
|
|
||||||
assign_timestamps(ForceMonotonic, Latest, [Operation | Rest], Acc, N, Sz) ->
|
|
||||||
assign_timestamps(ForceMonotonic, Latest, Rest, [Operation | Acc], N + 1, Sz);
|
|
||||||
assign_timestamps(_ForceMonotonic, Latest, [], Acc, N, Size) ->
|
|
||||||
{{N, Size}, Latest, lists:reverse(Acc)}.
|
{{N, Size}, Latest, lists:reverse(Acc)}.
|
||||||
|
|
||||||
assign_timestamp(TimestampUs, Message) ->
|
assign_timestamp(TimestampUs, Message) ->
|
||||||
|
|
|
@ -19,8 +19,7 @@
|
||||||
-define(enc, 3).
|
-define(enc, 3).
|
||||||
|
|
||||||
%% ?BATCH
|
%% ?BATCH
|
||||||
-define(batch_operations, 2).
|
-define(batch_messages, 2).
|
||||||
-define(batch_preconditions, 4).
|
|
||||||
-define(timestamp, 3).
|
-define(timestamp, 3).
|
||||||
|
|
||||||
%% add_generation / update_config
|
%% add_generation / update_config
|
||||||
|
|
|
@ -100,7 +100,7 @@ open(TopicSubscriptions, Opts) ->
|
||||||
State0 = init_state(Opts),
|
State0 = init_state(Opts),
|
||||||
State1 = lists:foldl(
|
State1 = lists:foldl(
|
||||||
fun({ShareTopicFilter, #{}}, State) ->
|
fun({ShareTopicFilter, #{}}, State) ->
|
||||||
?tp(debug, ds_agent_open_subscription, #{
|
?tp(warning, ds_agent_open_subscription, #{
|
||||||
topic_filter => ShareTopicFilter
|
topic_filter => ShareTopicFilter
|
||||||
}),
|
}),
|
||||||
add_shared_subscription(State, ShareTopicFilter)
|
add_shared_subscription(State, ShareTopicFilter)
|
||||||
|
@ -120,7 +120,7 @@ can_subscribe(_State, _ShareTopicFilter, _SubOpts) ->
|
||||||
|
|
||||||
-spec on_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> t().
|
-spec on_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> t().
|
||||||
on_subscribe(State0, ShareTopicFilter, _SubOpts) ->
|
on_subscribe(State0, ShareTopicFilter, _SubOpts) ->
|
||||||
?tp(debug, ds_agent_on_subscribe, #{
|
?tp(warning, ds_agent_on_subscribe, #{
|
||||||
share_topic_filter => ShareTopicFilter
|
share_topic_filter => ShareTopicFilter
|
||||||
}),
|
}),
|
||||||
add_shared_subscription(State0, ShareTopicFilter).
|
add_shared_subscription(State0, ShareTopicFilter).
|
||||||
|
@ -163,7 +163,7 @@ on_disconnect(#{groups := Groups0} = State, StreamProgresses) ->
|
||||||
|
|
||||||
-spec on_info(t(), term()) -> t().
|
-spec on_info(t(), term()) -> t().
|
||||||
on_info(State, ?leader_lease_streams_match(GroupId, Leader, StreamProgresses, Version)) ->
|
on_info(State, ?leader_lease_streams_match(GroupId, Leader, StreamProgresses, Version)) ->
|
||||||
?SLOG(debug, #{
|
?SLOG(info, #{
|
||||||
msg => leader_lease_streams,
|
msg => leader_lease_streams,
|
||||||
group_id => GroupId,
|
group_id => GroupId,
|
||||||
streams => StreamProgresses,
|
streams => StreamProgresses,
|
||||||
|
@ -176,7 +176,7 @@ on_info(State, ?leader_lease_streams_match(GroupId, Leader, StreamProgresses, Ve
|
||||||
)
|
)
|
||||||
end);
|
end);
|
||||||
on_info(State, ?leader_renew_stream_lease_match(GroupId, Version)) ->
|
on_info(State, ?leader_renew_stream_lease_match(GroupId, Version)) ->
|
||||||
?SLOG(debug, #{
|
?SLOG(info, #{
|
||||||
msg => leader_renew_stream_lease,
|
msg => leader_renew_stream_lease,
|
||||||
group_id => GroupId,
|
group_id => GroupId,
|
||||||
version => Version
|
version => Version
|
||||||
|
@ -185,7 +185,7 @@ on_info(State, ?leader_renew_stream_lease_match(GroupId, Version)) ->
|
||||||
emqx_ds_shared_sub_group_sm:handle_leader_renew_stream_lease(GSM, Version)
|
emqx_ds_shared_sub_group_sm:handle_leader_renew_stream_lease(GSM, Version)
|
||||||
end);
|
end);
|
||||||
on_info(State, ?leader_renew_stream_lease_match(GroupId, VersionOld, VersionNew)) ->
|
on_info(State, ?leader_renew_stream_lease_match(GroupId, VersionOld, VersionNew)) ->
|
||||||
?SLOG(debug, #{
|
?SLOG(info, #{
|
||||||
msg => leader_renew_stream_lease,
|
msg => leader_renew_stream_lease,
|
||||||
group_id => GroupId,
|
group_id => GroupId,
|
||||||
version_old => VersionOld,
|
version_old => VersionOld,
|
||||||
|
@ -195,7 +195,7 @@ on_info(State, ?leader_renew_stream_lease_match(GroupId, VersionOld, VersionNew)
|
||||||
emqx_ds_shared_sub_group_sm:handle_leader_renew_stream_lease(GSM, VersionOld, VersionNew)
|
emqx_ds_shared_sub_group_sm:handle_leader_renew_stream_lease(GSM, VersionOld, VersionNew)
|
||||||
end);
|
end);
|
||||||
on_info(State, ?leader_update_streams_match(GroupId, VersionOld, VersionNew, StreamsNew)) ->
|
on_info(State, ?leader_update_streams_match(GroupId, VersionOld, VersionNew, StreamsNew)) ->
|
||||||
?SLOG(debug, #{
|
?SLOG(info, #{
|
||||||
msg => leader_update_streams,
|
msg => leader_update_streams,
|
||||||
group_id => GroupId,
|
group_id => GroupId,
|
||||||
version_old => VersionOld,
|
version_old => VersionOld,
|
||||||
|
@ -208,7 +208,7 @@ on_info(State, ?leader_update_streams_match(GroupId, VersionOld, VersionNew, Str
|
||||||
)
|
)
|
||||||
end);
|
end);
|
||||||
on_info(State, ?leader_invalidate_match(GroupId)) ->
|
on_info(State, ?leader_invalidate_match(GroupId)) ->
|
||||||
?SLOG(debug, #{
|
?SLOG(info, #{
|
||||||
msg => leader_invalidate,
|
msg => leader_invalidate,
|
||||||
group_id => GroupId
|
group_id => GroupId
|
||||||
}),
|
}),
|
||||||
|
@ -245,7 +245,7 @@ delete_shared_subscription(State, ShareTopicFilter, GroupProgress) ->
|
||||||
add_shared_subscription(
|
add_shared_subscription(
|
||||||
#{session_id := SessionId, groups := Groups0} = State0, ShareTopicFilter
|
#{session_id := SessionId, groups := Groups0} = State0, ShareTopicFilter
|
||||||
) ->
|
) ->
|
||||||
?SLOG(debug, #{
|
?SLOG(info, #{
|
||||||
msg => agent_add_shared_subscription,
|
msg => agent_add_shared_subscription,
|
||||||
share_topic_filter => ShareTopicFilter
|
share_topic_filter => ShareTopicFilter
|
||||||
}),
|
}),
|
||||||
|
|
|
@ -120,7 +120,7 @@ new(#{
|
||||||
send_after := SendAfter
|
send_after := SendAfter
|
||||||
}) ->
|
}) ->
|
||||||
?SLOG(
|
?SLOG(
|
||||||
debug,
|
info,
|
||||||
#{
|
#{
|
||||||
msg => group_sm_new,
|
msg => group_sm_new,
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
|
@ -133,7 +133,7 @@ new(#{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
send_after => SendAfter
|
send_after => SendAfter
|
||||||
},
|
},
|
||||||
?tp(debug, group_sm_new, #{
|
?tp(warning, group_sm_new, #{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
share_topic_filter => ShareTopicFilter
|
share_topic_filter => ShareTopicFilter
|
||||||
}),
|
}),
|
||||||
|
@ -176,7 +176,7 @@ handle_disconnect(
|
||||||
%% Connecting state
|
%% Connecting state
|
||||||
|
|
||||||
handle_connecting(#{agent := Agent, share_topic_filter := ShareTopicFilter} = GSM) ->
|
handle_connecting(#{agent := Agent, share_topic_filter := ShareTopicFilter} = GSM) ->
|
||||||
?tp(debug, group_sm_enter_connecting, #{
|
?tp(warning, group_sm_enter_connecting, #{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
share_topic_filter => ShareTopicFilter
|
share_topic_filter => ShareTopicFilter
|
||||||
}),
|
}),
|
||||||
|
@ -264,13 +264,11 @@ handle_leader_update_streams(
|
||||||
VersionNew,
|
VersionNew,
|
||||||
StreamProgresses
|
StreamProgresses
|
||||||
) ->
|
) ->
|
||||||
?tp(debug, shared_sub_group_sm_leader_update_streams, #{
|
?tp(warning, shared_sub_group_sm_leader_update_streams, #{
|
||||||
id => Id,
|
id => Id,
|
||||||
version_old => VersionOld,
|
version_old => VersionOld,
|
||||||
version_new => VersionNew,
|
version_new => VersionNew,
|
||||||
stream_progresses => emqx_persistent_session_ds_shared_subs:format_stream_progresses(
|
stream_progresses => emqx_ds_shared_sub_proto:format_stream_progresses(StreamProgresses)
|
||||||
StreamProgresses
|
|
||||||
)
|
|
||||||
}),
|
}),
|
||||||
{AddEvents, Streams1} = lists:foldl(
|
{AddEvents, Streams1} = lists:foldl(
|
||||||
fun(#{stream := Stream, progress := Progress}, {AddEventAcc, StreamsAcc}) ->
|
fun(#{stream := Stream, progress := Progress}, {AddEventAcc, StreamsAcc}) ->
|
||||||
|
@ -305,11 +303,9 @@ handle_leader_update_streams(
|
||||||
maps:keys(Streams1)
|
maps:keys(Streams1)
|
||||||
),
|
),
|
||||||
StreamLeaseEvents = AddEvents ++ RevokeEvents,
|
StreamLeaseEvents = AddEvents ++ RevokeEvents,
|
||||||
?tp(debug, shared_sub_group_sm_leader_update_streams, #{
|
?tp(warning, shared_sub_group_sm_leader_update_streams, #{
|
||||||
id => Id,
|
id => Id,
|
||||||
stream_lease_events => emqx_persistent_session_ds_shared_subs:format_lease_events(
|
stream_lease_events => emqx_ds_shared_sub_proto:format_lease_events(StreamLeaseEvents)
|
||||||
StreamLeaseEvents
|
|
||||||
)
|
|
||||||
}),
|
}),
|
||||||
transition(
|
transition(
|
||||||
GSM,
|
GSM,
|
||||||
|
@ -435,11 +431,24 @@ handle_leader_invalidate(#{agent := Agent, share_topic_filter := ShareTopicFilte
|
||||||
%% Internal API
|
%% Internal API
|
||||||
%%-----------------------------------------------------------------------
|
%%-----------------------------------------------------------------------
|
||||||
|
|
||||||
handle_state_timeout(#{state := ?connecting} = GSM, find_leader_timeout, _Message) ->
|
handle_state_timeout(
|
||||||
|
#{state := ?connecting, share_topic_filter := ShareTopicFilter} = GSM,
|
||||||
|
find_leader_timeout,
|
||||||
|
_Message
|
||||||
|
) ->
|
||||||
|
?tp(debug, find_leader_timeout, #{share_topic_filter => ShareTopicFilter}),
|
||||||
handle_find_leader_timeout(GSM);
|
handle_find_leader_timeout(GSM);
|
||||||
handle_state_timeout(#{state := ?replaying} = GSM, renew_lease_timeout, _Message) ->
|
handle_state_timeout(
|
||||||
|
#{state := ?replaying} = GSM,
|
||||||
|
renew_lease_timeout,
|
||||||
|
_Message
|
||||||
|
) ->
|
||||||
handle_renew_lease_timeout(GSM);
|
handle_renew_lease_timeout(GSM);
|
||||||
handle_state_timeout(GSM, update_stream_state_timeout, _Message) ->
|
handle_state_timeout(
|
||||||
|
GSM,
|
||||||
|
update_stream_state_timeout,
|
||||||
|
_Message
|
||||||
|
) ->
|
||||||
?tp(debug, update_stream_state_timeout, #{}),
|
?tp(debug, update_stream_state_timeout, #{}),
|
||||||
handle_stream_progress(GSM, []).
|
handle_stream_progress(GSM, []).
|
||||||
|
|
||||||
|
|
|
@ -164,7 +164,7 @@ handle_event({call, From}, #register{register_fun = Fun}, ?leader_waiting_regist
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% repalying state
|
%% repalying state
|
||||||
handle_event(enter, _OldState, ?leader_active, #{topic := Topic} = _Data) ->
|
handle_event(enter, _OldState, ?leader_active, #{topic := Topic} = _Data) ->
|
||||||
?tp(debug, shared_sub_leader_enter_actve, #{topic => Topic}),
|
?tp(warning, shared_sub_leader_enter_actve, #{topic => Topic}),
|
||||||
{keep_state_and_data, [
|
{keep_state_and_data, [
|
||||||
{{timeout, #renew_streams{}}, 0, #renew_streams{}},
|
{{timeout, #renew_streams{}}, 0, #renew_streams{}},
|
||||||
{{timeout, #renew_leases{}}, ?dq_config(leader_renew_lease_interval_ms), #renew_leases{}},
|
{{timeout, #renew_leases{}}, ?dq_config(leader_renew_lease_interval_ms), #renew_leases{}},
|
||||||
|
@ -174,7 +174,7 @@ handle_event(enter, _OldState, ?leader_active, #{topic := Topic} = _Data) ->
|
||||||
%% timers
|
%% timers
|
||||||
%% renew_streams timer
|
%% renew_streams timer
|
||||||
handle_event({timeout, #renew_streams{}}, #renew_streams{}, ?leader_active, Data0) ->
|
handle_event({timeout, #renew_streams{}}, #renew_streams{}, ?leader_active, Data0) ->
|
||||||
?tp(debug, shared_sub_leader_timeout, #{timeout => renew_streams}),
|
% ?tp(warning, shared_sub_leader_timeout, #{timeout => renew_streams}),
|
||||||
Data1 = renew_streams(Data0),
|
Data1 = renew_streams(Data0),
|
||||||
{keep_state, Data1,
|
{keep_state, Data1,
|
||||||
{
|
{
|
||||||
|
@ -184,7 +184,7 @@ handle_event({timeout, #renew_streams{}}, #renew_streams{}, ?leader_active, Data
|
||||||
}};
|
}};
|
||||||
%% renew_leases timer
|
%% renew_leases timer
|
||||||
handle_event({timeout, #renew_leases{}}, #renew_leases{}, ?leader_active, Data0) ->
|
handle_event({timeout, #renew_leases{}}, #renew_leases{}, ?leader_active, Data0) ->
|
||||||
?tp(debug, shared_sub_leader_timeout, #{timeout => renew_leases}),
|
% ?tp(warning, shared_sub_leader_timeout, #{timeout => renew_leases}),
|
||||||
Data1 = renew_leases(Data0),
|
Data1 = renew_leases(Data0),
|
||||||
{keep_state, Data1,
|
{keep_state, Data1,
|
||||||
{{timeout, #renew_leases{}}, ?dq_config(leader_renew_lease_interval_ms), #renew_leases{}}};
|
{{timeout, #renew_leases{}}, ?dq_config(leader_renew_lease_interval_ms), #renew_leases{}}};
|
||||||
|
@ -279,7 +279,7 @@ renew_streams(
|
||||||
Data2 = Data1#{stream_states => NewStreamStates, rank_progress => RankProgress1},
|
Data2 = Data1#{stream_states => NewStreamStates, rank_progress => RankProgress1},
|
||||||
Data3 = revoke_streams(Data2),
|
Data3 = revoke_streams(Data2),
|
||||||
Data4 = assign_streams(Data3),
|
Data4 = assign_streams(Data3),
|
||||||
?SLOG(debug, #{
|
?SLOG(info, #{
|
||||||
msg => leader_renew_streams,
|
msg => leader_renew_streams,
|
||||||
topic_filter => TopicFilter,
|
topic_filter => TopicFilter,
|
||||||
new_streams => length(NewStreamsWRanks)
|
new_streams => length(NewStreamsWRanks)
|
||||||
|
@ -368,7 +368,7 @@ revoke_excess_streams_from_agent(Data0, Agent, DesiredCount) ->
|
||||||
false ->
|
false ->
|
||||||
AgentState0;
|
AgentState0;
|
||||||
true ->
|
true ->
|
||||||
?tp(debug, shared_sub_leader_revoke_streams, #{
|
?tp(warning, shared_sub_leader_revoke_streams, #{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
agent_stream_count => length(Streams0),
|
agent_stream_count => length(Streams0),
|
||||||
revoke_count => RevokeCount,
|
revoke_count => RevokeCount,
|
||||||
|
@ -421,7 +421,7 @@ assign_lacking_streams(Data0, Agent, DesiredCount) ->
|
||||||
false ->
|
false ->
|
||||||
Data0;
|
Data0;
|
||||||
true ->
|
true ->
|
||||||
?tp(debug, shared_sub_leader_assign_streams, #{
|
?tp(warning, shared_sub_leader_assign_streams, #{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
agent_stream_count => length(Streams0),
|
agent_stream_count => length(Streams0),
|
||||||
assign_count => AssignCount,
|
assign_count => AssignCount,
|
||||||
|
@ -449,7 +449,7 @@ select_streams_for_assign(Data0, _Agent, AssignCount) ->
|
||||||
%% renew_leases - send lease confirmations to agents
|
%% renew_leases - send lease confirmations to agents
|
||||||
|
|
||||||
renew_leases(#{agents := AgentStates} = Data) ->
|
renew_leases(#{agents := AgentStates} = Data) ->
|
||||||
?tp(debug, shared_sub_leader_renew_leases, #{agents => maps:keys(AgentStates)}),
|
?tp(warning, shared_sub_leader_renew_leases, #{agents => maps:keys(AgentStates)}),
|
||||||
ok = lists:foreach(
|
ok = lists:foreach(
|
||||||
fun({Agent, AgentState}) ->
|
fun({Agent, AgentState}) ->
|
||||||
renew_lease(Data, Agent, AgentState)
|
renew_lease(Data, Agent, AgentState)
|
||||||
|
@ -492,7 +492,7 @@ drop_timeout_agents(#{agents := Agents} = Data) ->
|
||||||
(is_integer(NoReplayingDeadline) andalso NoReplayingDeadline < Now)
|
(is_integer(NoReplayingDeadline) andalso NoReplayingDeadline < Now)
|
||||||
of
|
of
|
||||||
true ->
|
true ->
|
||||||
?SLOG(debug, #{
|
?SLOG(info, #{
|
||||||
msg => leader_agent_timeout,
|
msg => leader_agent_timeout,
|
||||||
now => Now,
|
now => Now,
|
||||||
update_deadline => UpdateDeadline,
|
update_deadline => UpdateDeadline,
|
||||||
|
@ -516,14 +516,14 @@ connect_agent(
|
||||||
Agent,
|
Agent,
|
||||||
AgentMetadata
|
AgentMetadata
|
||||||
) ->
|
) ->
|
||||||
?SLOG(debug, #{
|
?SLOG(info, #{
|
||||||
msg => leader_agent_connected,
|
msg => leader_agent_connected,
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
group_id => GroupId
|
group_id => GroupId
|
||||||
}),
|
}),
|
||||||
case Agents of
|
case Agents of
|
||||||
#{Agent := AgentState} ->
|
#{Agent := AgentState} ->
|
||||||
?tp(debug, shared_sub_leader_agent_already_connected, #{
|
?tp(warning, shared_sub_leader_agent_already_connected, #{
|
||||||
agent => Agent
|
agent => Agent
|
||||||
}),
|
}),
|
||||||
reconnect_agent(Data, Agent, AgentMetadata, AgentState);
|
reconnect_agent(Data, Agent, AgentMetadata, AgentState);
|
||||||
|
@ -546,7 +546,7 @@ reconnect_agent(
|
||||||
AgentMetadata,
|
AgentMetadata,
|
||||||
#{streams := OldStreams, revoked_streams := OldRevokedStreams} = _OldAgentState
|
#{streams := OldStreams, revoked_streams := OldRevokedStreams} = _OldAgentState
|
||||||
) ->
|
) ->
|
||||||
?tp(debug, shared_sub_leader_agent_reconnect, #{
|
?tp(warning, shared_sub_leader_agent_reconnect, #{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
agent_metadata => AgentMetadata,
|
agent_metadata => AgentMetadata,
|
||||||
inherited_streams => OldStreams
|
inherited_streams => OldStreams
|
||||||
|
@ -767,7 +767,7 @@ update_agent_stream_states(Data0, Agent, AgentStreamProgresses, VersionOld, Vers
|
||||||
disconnect_agent(Data0, Agent, AgentStreamProgresses, Version) ->
|
disconnect_agent(Data0, Agent, AgentStreamProgresses, Version) ->
|
||||||
case get_agent_state(Data0, Agent) of
|
case get_agent_state(Data0, Agent) of
|
||||||
#{version := Version} ->
|
#{version := Version} ->
|
||||||
?tp(debug, shared_sub_leader_disconnect_agent, #{
|
?tp(warning, shared_sub_leader_disconnect_agent, #{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
version => Version
|
version => Version
|
||||||
}),
|
}),
|
||||||
|
@ -794,7 +794,7 @@ agent_transition_to_waiting_updating(
|
||||||
Streams,
|
Streams,
|
||||||
RevokedStreams
|
RevokedStreams
|
||||||
) ->
|
) ->
|
||||||
?tp(debug, shared_sub_leader_agent_state_transition, #{
|
?tp(warning, shared_sub_leader_agent_state_transition, #{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
old_state => OldState,
|
old_state => OldState,
|
||||||
new_state => ?waiting_updating
|
new_state => ?waiting_updating
|
||||||
|
@ -818,7 +818,7 @@ agent_transition_to_waiting_updating(
|
||||||
agent_transition_to_waiting_replaying(
|
agent_transition_to_waiting_replaying(
|
||||||
#{group_id := GroupId} = _Data, Agent, #{state := OldState, version := Version} = AgentState0
|
#{group_id := GroupId} = _Data, Agent, #{state := OldState, version := Version} = AgentState0
|
||||||
) ->
|
) ->
|
||||||
?tp(debug, shared_sub_leader_agent_state_transition, #{
|
?tp(warning, shared_sub_leader_agent_state_transition, #{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
old_state => OldState,
|
old_state => OldState,
|
||||||
new_state => ?waiting_replaying
|
new_state => ?waiting_replaying
|
||||||
|
@ -833,7 +833,7 @@ agent_transition_to_waiting_replaying(
|
||||||
agent_transition_to_initial_waiting_replaying(
|
agent_transition_to_initial_waiting_replaying(
|
||||||
#{group_id := GroupId} = Data, Agent, AgentMetadata, InitialStreams
|
#{group_id := GroupId} = Data, Agent, AgentMetadata, InitialStreams
|
||||||
) ->
|
) ->
|
||||||
?tp(debug, shared_sub_leader_agent_state_transition, #{
|
?tp(warning, shared_sub_leader_agent_state_transition, #{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
old_state => none,
|
old_state => none,
|
||||||
new_state => ?waiting_replaying
|
new_state => ?waiting_replaying
|
||||||
|
@ -856,7 +856,7 @@ agent_transition_to_initial_waiting_replaying(
|
||||||
renew_no_replaying_deadline(AgentState).
|
renew_no_replaying_deadline(AgentState).
|
||||||
|
|
||||||
agent_transition_to_replaying(Agent, #{state := ?waiting_replaying} = AgentState) ->
|
agent_transition_to_replaying(Agent, #{state := ?waiting_replaying} = AgentState) ->
|
||||||
?tp(debug, shared_sub_leader_agent_state_transition, #{
|
?tp(warning, shared_sub_leader_agent_state_transition, #{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
old_state => ?waiting_replaying,
|
old_state => ?waiting_replaying,
|
||||||
new_state => ?replaying
|
new_state => ?replaying
|
||||||
|
@ -868,7 +868,7 @@ agent_transition_to_replaying(Agent, #{state := ?waiting_replaying} = AgentState
|
||||||
}.
|
}.
|
||||||
|
|
||||||
agent_transition_to_updating(Agent, #{state := ?waiting_updating} = AgentState0) ->
|
agent_transition_to_updating(Agent, #{state := ?waiting_updating} = AgentState0) ->
|
||||||
?tp(debug, shared_sub_leader_agent_state_transition, #{
|
?tp(warning, shared_sub_leader_agent_state_transition, #{
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
old_state => ?waiting_updating,
|
old_state => ?waiting_updating,
|
||||||
new_state => ?updating
|
new_state => ?updating
|
||||||
|
@ -995,7 +995,7 @@ drop_agent(#{agents := Agents} = Data0, Agent) ->
|
||||||
#{streams := Streams, revoked_streams := RevokedStreams} = AgentState,
|
#{streams := Streams, revoked_streams := RevokedStreams} = AgentState,
|
||||||
AllStreams = Streams ++ RevokedStreams,
|
AllStreams = Streams ++ RevokedStreams,
|
||||||
Data1 = unassign_streams(Data0, AllStreams),
|
Data1 = unassign_streams(Data0, AllStreams),
|
||||||
?tp(debug, shared_sub_leader_drop_agent, #{agent => Agent}),
|
?tp(warning, shared_sub_leader_drop_agent, #{agent => Agent}),
|
||||||
Data1#{agents => maps:remove(Agent, Agents)}.
|
Data1#{agents => maps:remove(Agent, Agents)}.
|
||||||
|
|
||||||
invalidate_agent(#{group_id := GroupId}, Agent) ->
|
invalidate_agent(#{group_id := GroupId}, Agent) ->
|
||||||
|
|
|
@ -55,7 +55,7 @@ set_replayed({{RankX, RankY}, Stream}, State) ->
|
||||||
State#{RankX => #{min_y => MinY, ys => Ys2}};
|
State#{RankX => #{min_y => MinY, ys => Ys2}};
|
||||||
_ ->
|
_ ->
|
||||||
?SLOG(
|
?SLOG(
|
||||||
debug,
|
warning,
|
||||||
#{
|
#{
|
||||||
msg => leader_rank_progress_double_or_invalid_update,
|
msg => leader_rank_progress_double_or_invalid_update,
|
||||||
rank_x => RankX,
|
rank_x => RankX,
|
||||||
|
|
|
@ -22,6 +22,12 @@
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
format_stream_progresses/1,
|
||||||
|
format_stream_progress/1,
|
||||||
|
format_stream_key/1,
|
||||||
|
format_stream_keys/1,
|
||||||
|
format_lease_event/1,
|
||||||
|
format_lease_events/1,
|
||||||
agent/2
|
agent/2
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
@ -51,20 +57,6 @@
|
||||||
agent_metadata/0
|
agent_metadata/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-define(log_agent_msg(ToLeader, Msg),
|
|
||||||
?tp(debug, shared_sub_proto_msg, #{
|
|
||||||
to_leader => ToLeader,
|
|
||||||
msg => emqx_ds_shared_sub_proto_format:format_agent_msg(Msg)
|
|
||||||
})
|
|
||||||
).
|
|
||||||
|
|
||||||
-define(log_leader_msg(ToAgent, Msg),
|
|
||||||
?tp(debug, shared_sub_proto_msg, #{
|
|
||||||
to_agent => ToAgent,
|
|
||||||
msg => emqx_ds_shared_sub_proto_format:format_leader_msg(Msg)
|
|
||||||
})
|
|
||||||
).
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% API
|
%% API
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -75,7 +67,15 @@
|
||||||
agent_connect_leader(ToLeader, FromAgent, AgentMetadata, ShareTopicFilter) when
|
agent_connect_leader(ToLeader, FromAgent, AgentMetadata, ShareTopicFilter) when
|
||||||
?is_local_leader(ToLeader)
|
?is_local_leader(ToLeader)
|
||||||
->
|
->
|
||||||
send_agent_msg(ToLeader, ?agent_connect_leader(FromAgent, AgentMetadata, ShareTopicFilter));
|
?tp(warning, shared_sub_proto_msg, #{
|
||||||
|
type => agent_connect_leader,
|
||||||
|
to_leader => ToLeader,
|
||||||
|
from_agent => FromAgent,
|
||||||
|
agent_metadata => AgentMetadata,
|
||||||
|
share_topic_filter => ShareTopicFilter
|
||||||
|
}),
|
||||||
|
_ = erlang:send(ToLeader, ?agent_connect_leader(FromAgent, AgentMetadata, ShareTopicFilter)),
|
||||||
|
ok;
|
||||||
agent_connect_leader(ToLeader, FromAgent, AgentMetadata, ShareTopicFilter) ->
|
agent_connect_leader(ToLeader, FromAgent, AgentMetadata, ShareTopicFilter) ->
|
||||||
emqx_ds_shared_sub_proto_v1:agent_connect_leader(
|
emqx_ds_shared_sub_proto_v1:agent_connect_leader(
|
||||||
?leader_node(ToLeader), ToLeader, FromAgent, AgentMetadata, ShareTopicFilter
|
?leader_node(ToLeader), ToLeader, FromAgent, AgentMetadata, ShareTopicFilter
|
||||||
|
@ -85,7 +85,15 @@ agent_connect_leader(ToLeader, FromAgent, AgentMetadata, ShareTopicFilter) ->
|
||||||
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, Version) when
|
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, Version) when
|
||||||
?is_local_leader(ToLeader)
|
?is_local_leader(ToLeader)
|
||||||
->
|
->
|
||||||
send_agent_msg(ToLeader, ?agent_update_stream_states(FromAgent, StreamProgresses, Version));
|
?tp(warning, shared_sub_proto_msg, #{
|
||||||
|
type => agent_update_stream_states,
|
||||||
|
to_leader => ToLeader,
|
||||||
|
from_agent => FromAgent,
|
||||||
|
stream_progresses => format_stream_progresses(StreamProgresses),
|
||||||
|
version => Version
|
||||||
|
}),
|
||||||
|
_ = erlang:send(ToLeader, ?agent_update_stream_states(FromAgent, StreamProgresses, Version)),
|
||||||
|
ok;
|
||||||
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, Version) ->
|
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, Version) ->
|
||||||
emqx_ds_shared_sub_proto_v1:agent_update_stream_states(
|
emqx_ds_shared_sub_proto_v1:agent_update_stream_states(
|
||||||
?leader_node(ToLeader), ToLeader, FromAgent, StreamProgresses, Version
|
?leader_node(ToLeader), ToLeader, FromAgent, StreamProgresses, Version
|
||||||
|
@ -97,9 +105,18 @@ agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, Version) ->
|
||||||
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, VersionOld, VersionNew) when
|
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, VersionOld, VersionNew) when
|
||||||
?is_local_leader(ToLeader)
|
?is_local_leader(ToLeader)
|
||||||
->
|
->
|
||||||
send_agent_msg(
|
?tp(warning, shared_sub_proto_msg, #{
|
||||||
|
type => agent_update_stream_states,
|
||||||
|
to_leader => ToLeader,
|
||||||
|
from_agent => FromAgent,
|
||||||
|
stream_progresses => format_stream_progresses(StreamProgresses),
|
||||||
|
version_old => VersionOld,
|
||||||
|
version_new => VersionNew
|
||||||
|
}),
|
||||||
|
_ = erlang:send(
|
||||||
ToLeader, ?agent_update_stream_states(FromAgent, StreamProgresses, VersionOld, VersionNew)
|
ToLeader, ?agent_update_stream_states(FromAgent, StreamProgresses, VersionOld, VersionNew)
|
||||||
);
|
),
|
||||||
|
ok;
|
||||||
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, VersionOld, VersionNew) ->
|
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, VersionOld, VersionNew) ->
|
||||||
emqx_ds_shared_sub_proto_v1:agent_update_stream_states(
|
emqx_ds_shared_sub_proto_v1:agent_update_stream_states(
|
||||||
?leader_node(ToLeader), ToLeader, FromAgent, StreamProgresses, VersionOld, VersionNew
|
?leader_node(ToLeader), ToLeader, FromAgent, StreamProgresses, VersionOld, VersionNew
|
||||||
|
@ -108,7 +125,15 @@ agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, VersionOld, Ve
|
||||||
agent_disconnect(ToLeader, FromAgent, StreamProgresses, Version) when
|
agent_disconnect(ToLeader, FromAgent, StreamProgresses, Version) when
|
||||||
?is_local_leader(ToLeader)
|
?is_local_leader(ToLeader)
|
||||||
->
|
->
|
||||||
send_agent_msg(ToLeader, ?agent_disconnect(FromAgent, StreamProgresses, Version));
|
?tp(warning, shared_sub_proto_msg, #{
|
||||||
|
type => agent_disconnect,
|
||||||
|
to_leader => ToLeader,
|
||||||
|
from_agent => FromAgent,
|
||||||
|
stream_progresses => format_stream_progresses(StreamProgresses),
|
||||||
|
version => Version
|
||||||
|
}),
|
||||||
|
_ = erlang:send(ToLeader, ?agent_disconnect(FromAgent, StreamProgresses, Version)),
|
||||||
|
ok;
|
||||||
agent_disconnect(ToLeader, FromAgent, StreamProgresses, Version) ->
|
agent_disconnect(ToLeader, FromAgent, StreamProgresses, Version) ->
|
||||||
emqx_ds_shared_sub_proto_v1:agent_disconnect(
|
emqx_ds_shared_sub_proto_v1:agent_disconnect(
|
||||||
?leader_node(ToLeader), ToLeader, FromAgent, StreamProgresses, Version
|
?leader_node(ToLeader), ToLeader, FromAgent, StreamProgresses, Version
|
||||||
|
@ -119,7 +144,19 @@ agent_disconnect(ToLeader, FromAgent, StreamProgresses, Version) ->
|
||||||
-spec leader_lease_streams(agent(), group(), leader(), list(leader_stream_progress()), version()) ->
|
-spec leader_lease_streams(agent(), group(), leader(), list(leader_stream_progress()), version()) ->
|
||||||
ok.
|
ok.
|
||||||
leader_lease_streams(ToAgent, OfGroup, Leader, Streams, Version) when ?is_local_agent(ToAgent) ->
|
leader_lease_streams(ToAgent, OfGroup, Leader, Streams, Version) when ?is_local_agent(ToAgent) ->
|
||||||
send_leader_msg(ToAgent, ?leader_lease_streams(OfGroup, Leader, Streams, Version));
|
?tp(warning, shared_sub_proto_msg, #{
|
||||||
|
type => leader_lease_streams,
|
||||||
|
to_agent => ToAgent,
|
||||||
|
of_group => OfGroup,
|
||||||
|
leader => Leader,
|
||||||
|
streams => format_stream_progresses(Streams),
|
||||||
|
version => Version
|
||||||
|
}),
|
||||||
|
_ = emqx_persistent_session_ds_shared_subs_agent:send(
|
||||||
|
?agent_pid(ToAgent),
|
||||||
|
?leader_lease_streams(OfGroup, Leader, Streams, Version)
|
||||||
|
),
|
||||||
|
ok;
|
||||||
leader_lease_streams(ToAgent, OfGroup, Leader, Streams, Version) ->
|
leader_lease_streams(ToAgent, OfGroup, Leader, Streams, Version) ->
|
||||||
emqx_ds_shared_sub_proto_v1:leader_lease_streams(
|
emqx_ds_shared_sub_proto_v1:leader_lease_streams(
|
||||||
?agent_node(ToAgent), ToAgent, OfGroup, Leader, Streams, Version
|
?agent_node(ToAgent), ToAgent, OfGroup, Leader, Streams, Version
|
||||||
|
@ -127,7 +164,17 @@ leader_lease_streams(ToAgent, OfGroup, Leader, Streams, Version) ->
|
||||||
|
|
||||||
-spec leader_renew_stream_lease(agent(), group(), version()) -> ok.
|
-spec leader_renew_stream_lease(agent(), group(), version()) -> ok.
|
||||||
leader_renew_stream_lease(ToAgent, OfGroup, Version) when ?is_local_agent(ToAgent) ->
|
leader_renew_stream_lease(ToAgent, OfGroup, Version) when ?is_local_agent(ToAgent) ->
|
||||||
send_leader_msg(ToAgent, ?leader_renew_stream_lease(OfGroup, Version));
|
?tp(warning, shared_sub_proto_msg, #{
|
||||||
|
type => leader_renew_stream_lease,
|
||||||
|
to_agent => ToAgent,
|
||||||
|
of_group => OfGroup,
|
||||||
|
version => Version
|
||||||
|
}),
|
||||||
|
_ = emqx_persistent_session_ds_shared_subs_agent:send(
|
||||||
|
?agent_pid(ToAgent),
|
||||||
|
?leader_renew_stream_lease(OfGroup, Version)
|
||||||
|
),
|
||||||
|
ok;
|
||||||
leader_renew_stream_lease(ToAgent, OfGroup, Version) ->
|
leader_renew_stream_lease(ToAgent, OfGroup, Version) ->
|
||||||
emqx_ds_shared_sub_proto_v1:leader_renew_stream_lease(
|
emqx_ds_shared_sub_proto_v1:leader_renew_stream_lease(
|
||||||
?agent_node(ToAgent), ToAgent, OfGroup, Version
|
?agent_node(ToAgent), ToAgent, OfGroup, Version
|
||||||
|
@ -135,7 +182,18 @@ leader_renew_stream_lease(ToAgent, OfGroup, Version) ->
|
||||||
|
|
||||||
-spec leader_renew_stream_lease(agent(), group(), version(), version()) -> ok.
|
-spec leader_renew_stream_lease(agent(), group(), version(), version()) -> ok.
|
||||||
leader_renew_stream_lease(ToAgent, OfGroup, VersionOld, VersionNew) when ?is_local_agent(ToAgent) ->
|
leader_renew_stream_lease(ToAgent, OfGroup, VersionOld, VersionNew) when ?is_local_agent(ToAgent) ->
|
||||||
send_leader_msg(ToAgent, ?leader_renew_stream_lease(OfGroup, VersionOld, VersionNew));
|
?tp(warning, shared_sub_proto_msg, #{
|
||||||
|
type => leader_renew_stream_lease,
|
||||||
|
to_agent => ToAgent,
|
||||||
|
of_group => OfGroup,
|
||||||
|
version_old => VersionOld,
|
||||||
|
version_new => VersionNew
|
||||||
|
}),
|
||||||
|
_ = emqx_persistent_session_ds_shared_subs_agent:send(
|
||||||
|
?agent_pid(ToAgent),
|
||||||
|
?leader_renew_stream_lease(OfGroup, VersionOld, VersionNew)
|
||||||
|
),
|
||||||
|
ok;
|
||||||
leader_renew_stream_lease(ToAgent, OfGroup, VersionOld, VersionNew) ->
|
leader_renew_stream_lease(ToAgent, OfGroup, VersionOld, VersionNew) ->
|
||||||
emqx_ds_shared_sub_proto_v1:leader_renew_stream_lease(
|
emqx_ds_shared_sub_proto_v1:leader_renew_stream_lease(
|
||||||
?agent_node(ToAgent), ToAgent, OfGroup, VersionOld, VersionNew
|
?agent_node(ToAgent), ToAgent, OfGroup, VersionOld, VersionNew
|
||||||
|
@ -146,7 +204,19 @@ leader_renew_stream_lease(ToAgent, OfGroup, VersionOld, VersionNew) ->
|
||||||
leader_update_streams(ToAgent, OfGroup, VersionOld, VersionNew, StreamsNew) when
|
leader_update_streams(ToAgent, OfGroup, VersionOld, VersionNew, StreamsNew) when
|
||||||
?is_local_agent(ToAgent)
|
?is_local_agent(ToAgent)
|
||||||
->
|
->
|
||||||
send_leader_msg(ToAgent, ?leader_update_streams(OfGroup, VersionOld, VersionNew, StreamsNew));
|
?tp(warning, shared_sub_proto_msg, #{
|
||||||
|
type => leader_update_streams,
|
||||||
|
to_agent => ToAgent,
|
||||||
|
of_group => OfGroup,
|
||||||
|
version_old => VersionOld,
|
||||||
|
version_new => VersionNew,
|
||||||
|
streams_new => format_stream_progresses(StreamsNew)
|
||||||
|
}),
|
||||||
|
_ = emqx_persistent_session_ds_shared_subs_agent:send(
|
||||||
|
?agent_pid(ToAgent),
|
||||||
|
?leader_update_streams(OfGroup, VersionOld, VersionNew, StreamsNew)
|
||||||
|
),
|
||||||
|
ok;
|
||||||
leader_update_streams(ToAgent, OfGroup, VersionOld, VersionNew, StreamsNew) ->
|
leader_update_streams(ToAgent, OfGroup, VersionOld, VersionNew, StreamsNew) ->
|
||||||
emqx_ds_shared_sub_proto_v1:leader_update_streams(
|
emqx_ds_shared_sub_proto_v1:leader_update_streams(
|
||||||
?agent_node(ToAgent), ToAgent, OfGroup, VersionOld, VersionNew, StreamsNew
|
?agent_node(ToAgent), ToAgent, OfGroup, VersionOld, VersionNew, StreamsNew
|
||||||
|
@ -154,7 +224,16 @@ leader_update_streams(ToAgent, OfGroup, VersionOld, VersionNew, StreamsNew) ->
|
||||||
|
|
||||||
-spec leader_invalidate(agent(), group()) -> ok.
|
-spec leader_invalidate(agent(), group()) -> ok.
|
||||||
leader_invalidate(ToAgent, OfGroup) when ?is_local_agent(ToAgent) ->
|
leader_invalidate(ToAgent, OfGroup) when ?is_local_agent(ToAgent) ->
|
||||||
send_leader_msg(ToAgent, ?leader_invalidate(OfGroup));
|
?tp(warning, shared_sub_proto_msg, #{
|
||||||
|
type => leader_invalidate,
|
||||||
|
to_agent => ToAgent,
|
||||||
|
of_group => OfGroup
|
||||||
|
}),
|
||||||
|
_ = emqx_persistent_session_ds_shared_subs_agent:send(
|
||||||
|
?agent_pid(ToAgent),
|
||||||
|
?leader_invalidate(OfGroup)
|
||||||
|
),
|
||||||
|
ok;
|
||||||
leader_invalidate(ToAgent, OfGroup) ->
|
leader_invalidate(ToAgent, OfGroup) ->
|
||||||
emqx_ds_shared_sub_proto_v1:leader_invalidate(
|
emqx_ds_shared_sub_proto_v1:leader_invalidate(
|
||||||
?agent_node(ToAgent), ToAgent, OfGroup
|
?agent_node(ToAgent), ToAgent, OfGroup
|
||||||
|
@ -168,12 +247,41 @@ agent(Id, Pid) ->
|
||||||
_ = Id,
|
_ = Id,
|
||||||
?agent(Id, Pid).
|
?agent(Id, Pid).
|
||||||
|
|
||||||
send_agent_msg(ToLeader, Msg) ->
|
format_stream_progresses(Streams) ->
|
||||||
?log_agent_msg(ToLeader, Msg),
|
lists:map(
|
||||||
_ = erlang:send(ToLeader, Msg),
|
fun format_stream_progress/1,
|
||||||
ok.
|
Streams
|
||||||
|
).
|
||||||
|
|
||||||
send_leader_msg(ToAgent, Msg) ->
|
format_stream_progress(#{stream := Stream, progress := Progress} = Value) ->
|
||||||
?log_leader_msg(ToAgent, Msg),
|
Value#{stream => format_opaque(Stream), progress => format_progress(Progress)}.
|
||||||
_ = emqx_persistent_session_ds_shared_subs_agent:send(?agent_pid(ToAgent), Msg),
|
|
||||||
ok.
|
format_progress(#{iterator := Iterator} = Progress) ->
|
||||||
|
Progress#{iterator => format_opaque(Iterator)}.
|
||||||
|
|
||||||
|
format_stream_key({SubId, Stream}) ->
|
||||||
|
{SubId, format_opaque(Stream)}.
|
||||||
|
|
||||||
|
format_stream_keys(StreamKeys) ->
|
||||||
|
lists:map(
|
||||||
|
fun format_stream_key/1,
|
||||||
|
StreamKeys
|
||||||
|
).
|
||||||
|
|
||||||
|
format_lease_events(Events) ->
|
||||||
|
lists:map(
|
||||||
|
fun format_lease_event/1,
|
||||||
|
Events
|
||||||
|
).
|
||||||
|
|
||||||
|
format_lease_event(#{stream := Stream, progress := Progress} = Event) ->
|
||||||
|
Event#{stream => format_opaque(Stream), progress => format_progress(Progress)};
|
||||||
|
format_lease_event(#{stream := Stream} = Event) ->
|
||||||
|
Event#{stream => format_opaque(Stream)}.
|
||||||
|
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Helpers
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
format_opaque(Opaque) ->
|
||||||
|
erlang:phash2(Opaque).
|
||||||
|
|
|
@ -12,167 +12,146 @@
|
||||||
|
|
||||||
%% agent messages, sent from agent side to the leader
|
%% agent messages, sent from agent side to the leader
|
||||||
|
|
||||||
-define(agent_connect_leader_msg, 1).
|
-define(agent_connect_leader_msg, agent_connect_leader).
|
||||||
-define(agent_update_stream_states_msg, 2).
|
-define(agent_update_stream_states_msg, agent_update_stream_states).
|
||||||
-define(agent_connect_leader_timeout_msg, 3).
|
-define(agent_connect_leader_timeout_msg, agent_connect_leader_timeout).
|
||||||
-define(agent_renew_stream_lease_timeout_msg, 4).
|
-define(agent_renew_stream_lease_timeout_msg, agent_renew_stream_lease_timeout).
|
||||||
-define(agent_disconnect_msg, 5).
|
-define(agent_disconnect_msg, agent_disconnect).
|
||||||
|
|
||||||
%% message keys (used used not to send atoms over the network)
|
|
||||||
-define(agent_msg_type, 1).
|
|
||||||
-define(agent_msg_agent, 2).
|
|
||||||
-define(agent_msg_share_topic_filter, 3).
|
|
||||||
-define(agent_msg_agent_metadata, 4).
|
|
||||||
-define(agent_msg_stream_states, 5).
|
|
||||||
-define(agent_msg_version, 6).
|
|
||||||
-define(agent_msg_version_old, 7).
|
|
||||||
-define(agent_msg_version_new, 8).
|
|
||||||
|
|
||||||
%% Agent messages sent to the leader.
|
%% Agent messages sent to the leader.
|
||||||
%% Leader talks to many agents, `agent` field is used to identify the sender.
|
%% Leader talks to many agents, `agent` field is used to identify the sender.
|
||||||
|
|
||||||
-define(agent_connect_leader(Agent, AgentMetadata, ShareTopicFilter), #{
|
-define(agent_connect_leader(Agent, AgentMetadata, ShareTopicFilter), #{
|
||||||
?agent_msg_type => ?agent_connect_leader_msg,
|
type => ?agent_connect_leader_msg,
|
||||||
?agent_msg_share_topic_filter => ShareTopicFilter,
|
share_topic_filter => ShareTopicFilter,
|
||||||
?agent_msg_agent_metadata => AgentMetadata,
|
agent_metadata => AgentMetadata,
|
||||||
?agent_msg_agent => Agent
|
agent => Agent
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(agent_connect_leader_match(Agent, AgentMetadata, ShareTopicFilter), #{
|
-define(agent_connect_leader_match(Agent, AgentMetadata, ShareTopicFilter), #{
|
||||||
?agent_msg_type := ?agent_connect_leader_msg,
|
type := ?agent_connect_leader_msg,
|
||||||
?agent_msg_share_topic_filter := ShareTopicFilter,
|
share_topic_filter := ShareTopicFilter,
|
||||||
?agent_msg_agent_metadata := AgentMetadata,
|
agent_metadata := AgentMetadata,
|
||||||
?agent_msg_agent := Agent
|
agent := Agent
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(agent_update_stream_states(Agent, StreamStates, Version), #{
|
-define(agent_update_stream_states(Agent, StreamStates, Version), #{
|
||||||
?agent_msg_type => ?agent_update_stream_states_msg,
|
type => ?agent_update_stream_states_msg,
|
||||||
?agent_msg_stream_states => StreamStates,
|
stream_states => StreamStates,
|
||||||
?agent_msg_version => Version,
|
version => Version,
|
||||||
?agent_msg_agent => Agent
|
agent => Agent
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(agent_update_stream_states_match(Agent, StreamStates, Version), #{
|
-define(agent_update_stream_states_match(Agent, StreamStates, Version), #{
|
||||||
?agent_msg_type := ?agent_update_stream_states_msg,
|
type := ?agent_update_stream_states_msg,
|
||||||
?agent_msg_stream_states := StreamStates,
|
stream_states := StreamStates,
|
||||||
?agent_msg_version := Version,
|
version := Version,
|
||||||
?agent_msg_agent := Agent
|
agent := Agent
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(agent_update_stream_states(Agent, StreamStates, VersionOld, VersionNew), #{
|
-define(agent_update_stream_states(Agent, StreamStates, VersionOld, VersionNew), #{
|
||||||
?agent_msg_type => ?agent_update_stream_states_msg,
|
type => ?agent_update_stream_states_msg,
|
||||||
?agent_msg_stream_states => StreamStates,
|
stream_states => StreamStates,
|
||||||
?agent_msg_version_old => VersionOld,
|
version_old => VersionOld,
|
||||||
?agent_msg_version_new => VersionNew,
|
version_new => VersionNew,
|
||||||
?agent_msg_agent => Agent
|
agent => Agent
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(agent_update_stream_states_match(Agent, StreamStates, VersionOld, VersionNew), #{
|
-define(agent_update_stream_states_match(Agent, StreamStates, VersionOld, VersionNew), #{
|
||||||
?agent_msg_type := ?agent_update_stream_states_msg,
|
type := ?agent_update_stream_states_msg,
|
||||||
?agent_msg_stream_states := StreamStates,
|
stream_states := StreamStates,
|
||||||
?agent_msg_version_old := VersionOld,
|
version_old := VersionOld,
|
||||||
?agent_msg_version_new := VersionNew,
|
version_new := VersionNew,
|
||||||
?agent_msg_agent := Agent
|
agent := Agent
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(agent_disconnect(Agent, StreamStates, Version), #{
|
-define(agent_disconnect(Agent, StreamStates, Version), #{
|
||||||
?agent_msg_type => ?agent_disconnect_msg,
|
type => ?agent_disconnect_msg,
|
||||||
?agent_msg_stream_states => StreamStates,
|
stream_states => StreamStates,
|
||||||
?agent_msg_version => Version,
|
version => Version,
|
||||||
?agent_msg_agent => Agent
|
agent => Agent
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(agent_disconnect_match(Agent, StreamStates, Version), #{
|
-define(agent_disconnect_match(Agent, StreamStates, Version), #{
|
||||||
?agent_msg_type := ?agent_disconnect_msg,
|
type := ?agent_disconnect_msg,
|
||||||
?agent_msg_stream_states := StreamStates,
|
stream_states := StreamStates,
|
||||||
?agent_msg_version := Version,
|
version := Version,
|
||||||
?agent_msg_agent := Agent
|
agent := Agent
|
||||||
}).
|
}).
|
||||||
|
|
||||||
%% leader messages, sent from the leader to the agent
|
%% leader messages, sent from the leader to the agent
|
||||||
%% Agent may have several shared subscriptions, so may talk to several leaders
|
%% Agent may have several shared subscriptions, so may talk to several leaders
|
||||||
%% `group_id` field is used to identify the leader.
|
%% `group_id` field is used to identify the leader.
|
||||||
|
|
||||||
-define(leader_lease_streams_msg, 101).
|
-define(leader_lease_streams_msg, leader_lease_streams).
|
||||||
-define(leader_renew_stream_lease_msg, 102).
|
-define(leader_renew_stream_lease_msg, leader_renew_stream_lease).
|
||||||
-define(leader_update_streams, 103).
|
|
||||||
-define(leader_invalidate, 104).
|
|
||||||
|
|
||||||
-define(leader_msg_type, 101).
|
|
||||||
-define(leader_msg_streams, 102).
|
|
||||||
-define(leader_msg_version, 103).
|
|
||||||
-define(leader_msg_version_old, 104).
|
|
||||||
-define(leader_msg_version_new, 105).
|
|
||||||
-define(leader_msg_streams_new, 106).
|
|
||||||
-define(leader_msg_leader, 107).
|
|
||||||
-define(leader_msg_group_id, 108).
|
|
||||||
|
|
||||||
-define(leader_lease_streams(GrouId, Leader, Streams, Version), #{
|
-define(leader_lease_streams(GrouId, Leader, Streams, Version), #{
|
||||||
?leader_msg_type => ?leader_lease_streams_msg,
|
type => ?leader_lease_streams_msg,
|
||||||
?leader_msg_streams => Streams,
|
streams => Streams,
|
||||||
?leader_msg_version => Version,
|
version => Version,
|
||||||
?leader_msg_leader => Leader,
|
leader => Leader,
|
||||||
?leader_msg_group_id => GrouId
|
group_id => GrouId
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(leader_lease_streams_match(GroupId, Leader, Streams, Version), #{
|
-define(leader_lease_streams_match(GroupId, Leader, Streams, Version), #{
|
||||||
?leader_msg_type := ?leader_lease_streams_msg,
|
type := ?leader_lease_streams_msg,
|
||||||
?leader_msg_streams := Streams,
|
streams := Streams,
|
||||||
?leader_msg_version := Version,
|
version := Version,
|
||||||
?leader_msg_leader := Leader,
|
leader := Leader,
|
||||||
?leader_msg_group_id := GroupId
|
group_id := GroupId
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(leader_renew_stream_lease(GroupId, Version), #{
|
-define(leader_renew_stream_lease(GroupId, Version), #{
|
||||||
?leader_msg_type => ?leader_renew_stream_lease_msg,
|
type => ?leader_renew_stream_lease_msg,
|
||||||
?leader_msg_version => Version,
|
version => Version,
|
||||||
?leader_msg_group_id => GroupId
|
group_id => GroupId
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(leader_renew_stream_lease_match(GroupId, Version), #{
|
-define(leader_renew_stream_lease_match(GroupId, Version), #{
|
||||||
?leader_msg_type := ?leader_renew_stream_lease_msg,
|
type := ?leader_renew_stream_lease_msg,
|
||||||
?leader_msg_version := Version,
|
version := Version,
|
||||||
?leader_msg_group_id := GroupId
|
group_id := GroupId
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(leader_renew_stream_lease(GroupId, VersionOld, VersionNew), #{
|
-define(leader_renew_stream_lease(GroupId, VersionOld, VersionNew), #{
|
||||||
?leader_msg_type => ?leader_renew_stream_lease_msg,
|
type => ?leader_renew_stream_lease_msg,
|
||||||
?leader_msg_version_old => VersionOld,
|
version_old => VersionOld,
|
||||||
?leader_msg_version_new => VersionNew,
|
version_new => VersionNew,
|
||||||
?leader_msg_group_id => GroupId
|
group_id => GroupId
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(leader_renew_stream_lease_match(GroupId, VersionOld, VersionNew), #{
|
-define(leader_renew_stream_lease_match(GroupId, VersionOld, VersionNew), #{
|
||||||
?leader_msg_type := ?leader_renew_stream_lease_msg,
|
type := ?leader_renew_stream_lease_msg,
|
||||||
?leader_msg_version_old := VersionOld,
|
version_old := VersionOld,
|
||||||
?leader_msg_version_new := VersionNew,
|
version_new := VersionNew,
|
||||||
?leader_msg_group_id := GroupId
|
group_id := GroupId
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(leader_update_streams(GroupId, VersionOld, VersionNew, StreamsNew), #{
|
-define(leader_update_streams(GroupId, VersionOld, VersionNew, StreamsNew), #{
|
||||||
?leader_msg_type => ?leader_update_streams,
|
type => leader_update_streams,
|
||||||
?leader_msg_version_old => VersionOld,
|
version_old => VersionOld,
|
||||||
?leader_msg_version_new => VersionNew,
|
version_new => VersionNew,
|
||||||
?leader_msg_streams_new => StreamsNew,
|
streams_new => StreamsNew,
|
||||||
?leader_msg_group_id => GroupId
|
group_id => GroupId
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(leader_update_streams_match(GroupId, VersionOld, VersionNew, StreamsNew), #{
|
-define(leader_update_streams_match(GroupId, VersionOld, VersionNew, StreamsNew), #{
|
||||||
?leader_msg_type := ?leader_update_streams,
|
type := leader_update_streams,
|
||||||
?leader_msg_version_old := VersionOld,
|
version_old := VersionOld,
|
||||||
?leader_msg_version_new := VersionNew,
|
version_new := VersionNew,
|
||||||
?leader_msg_streams_new := StreamsNew,
|
streams_new := StreamsNew,
|
||||||
?leader_msg_group_id := GroupId
|
group_id := GroupId
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(leader_invalidate(GroupId), #{
|
-define(leader_invalidate(GroupId), #{
|
||||||
?leader_msg_type => ?leader_invalidate,
|
type => leader_invalidate,
|
||||||
?leader_msg_group_id => GroupId
|
group_id => GroupId
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-define(leader_invalidate_match(GroupId), #{
|
-define(leader_invalidate_match(GroupId), #{
|
||||||
?leader_msg_type := ?leader_invalidate,
|
type := leader_invalidate,
|
||||||
?leader_msg_group_id := GroupId
|
group_id := GroupId
|
||||||
}).
|
}).
|
||||||
|
|
||||||
%% Helpers
|
%% Helpers
|
||||||
|
|
|
@ -1,82 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
-module(emqx_ds_shared_sub_proto_format).
|
|
||||||
|
|
||||||
-include("emqx_ds_shared_sub_proto.hrl").
|
|
||||||
|
|
||||||
-export([format_agent_msg/1, format_leader_msg/1]).
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% API
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
format_agent_msg(Msg) ->
|
|
||||||
maps:from_list(
|
|
||||||
lists:map(
|
|
||||||
fun({K, V}) ->
|
|
||||||
FormattedKey = agent_msg_key(K),
|
|
||||||
{FormattedKey, format_agent_msg_value(FormattedKey, V)}
|
|
||||||
end,
|
|
||||||
maps:to_list(Msg)
|
|
||||||
)
|
|
||||||
).
|
|
||||||
|
|
||||||
format_leader_msg(Msg) ->
|
|
||||||
maps:from_list(
|
|
||||||
lists:map(
|
|
||||||
fun({K, V}) ->
|
|
||||||
FormattedKey = leader_msg_key(K),
|
|
||||||
{FormattedKey, format_leader_msg_value(FormattedKey, V)}
|
|
||||||
end,
|
|
||||||
maps:to_list(Msg)
|
|
||||||
)
|
|
||||||
).
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Internal functions
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
format_agent_msg_value(agent_msg_type, Type) ->
|
|
||||||
agent_msg_type(Type);
|
|
||||||
format_agent_msg_value(agent_msg_stream_states, StreamStates) ->
|
|
||||||
emqx_persistent_session_ds_shared_subs:format_stream_progresses(StreamStates);
|
|
||||||
format_agent_msg_value(_, Value) ->
|
|
||||||
Value.
|
|
||||||
|
|
||||||
format_leader_msg_value(leader_msg_type, Type) ->
|
|
||||||
leader_msg_type(Type);
|
|
||||||
format_leader_msg_value(leader_msg_streams, Streams) ->
|
|
||||||
emqx_persistent_session_ds_shared_subs:format_lease_events(Streams);
|
|
||||||
format_leader_msg_value(_, Value) ->
|
|
||||||
Value.
|
|
||||||
|
|
||||||
agent_msg_type(?agent_connect_leader_msg) -> agent_connect_leader_msg;
|
|
||||||
agent_msg_type(?agent_update_stream_states_msg) -> agent_update_stream_states_msg;
|
|
||||||
agent_msg_type(?agent_connect_leader_timeout_msg) -> agent_connect_leader_timeout_msg;
|
|
||||||
agent_msg_type(?agent_renew_stream_lease_timeout_msg) -> agent_renew_stream_lease_timeout_msg;
|
|
||||||
agent_msg_type(?agent_disconnect_msg) -> agent_disconnect_msg.
|
|
||||||
|
|
||||||
agent_msg_key(?agent_msg_type) -> agent_msg_type;
|
|
||||||
agent_msg_key(?agent_msg_agent) -> agent_msg_agent;
|
|
||||||
agent_msg_key(?agent_msg_share_topic_filter) -> agent_msg_share_topic_filter;
|
|
||||||
agent_msg_key(?agent_msg_agent_metadata) -> agent_msg_agent_metadata;
|
|
||||||
agent_msg_key(?agent_msg_stream_states) -> agent_msg_stream_states;
|
|
||||||
agent_msg_key(?agent_msg_version) -> agent_msg_version;
|
|
||||||
agent_msg_key(?agent_msg_version_old) -> agent_msg_version_old;
|
|
||||||
agent_msg_key(?agent_msg_version_new) -> agent_msg_version_new.
|
|
||||||
|
|
||||||
leader_msg_type(?leader_lease_streams_msg) -> leader_lease_streams_msg;
|
|
||||||
leader_msg_type(?leader_renew_stream_lease_msg) -> leader_renew_stream_lease_msg;
|
|
||||||
leader_msg_type(?leader_update_streams) -> leader_update_streams;
|
|
||||||
leader_msg_type(?leader_invalidate) -> leader_invalidate.
|
|
||||||
|
|
||||||
leader_msg_key(?leader_msg_type) -> leader_msg_type;
|
|
||||||
leader_msg_key(?leader_msg_streams) -> leader_msg_streams;
|
|
||||||
leader_msg_key(?leader_msg_version) -> leader_msg_version;
|
|
||||||
leader_msg_key(?leader_msg_version_old) -> leader_msg_version_old;
|
|
||||||
leader_msg_key(?leader_msg_version_new) -> leader_msg_version_new;
|
|
||||||
leader_msg_key(?leader_msg_streams_new) -> leader_msg_streams_new;
|
|
||||||
leader_msg_key(?leader_msg_leader) -> leader_msg_leader;
|
|
||||||
leader_msg_key(?leader_msg_group_id) -> leader_msg_group_id.
|
|
|
@ -113,7 +113,7 @@ do_lookup_leader(Agent, AgentMetadata, ShareTopicFilter, State) ->
|
||||||
Pid ->
|
Pid ->
|
||||||
Pid
|
Pid
|
||||||
end,
|
end,
|
||||||
?SLOG(debug, #{
|
?SLOG(info, #{
|
||||||
msg => lookup_leader,
|
msg => lookup_leader,
|
||||||
agent => Agent,
|
agent => Agent,
|
||||||
share_topic_filter => ShareTopicFilter,
|
share_topic_filter => ShareTopicFilter,
|
||||||
|
|
|
@ -417,7 +417,7 @@ t_lease_reconnect(_Config) ->
|
||||||
|
|
||||||
?assertWaitEvent(
|
?assertWaitEvent(
|
||||||
{ok, _, _} = emqtt:subscribe(ConnShared, <<"$share/gr2/topic2/#">>, 1),
|
{ok, _, _} = emqtt:subscribe(ConnShared, <<"$share/gr2/topic2/#">>, 1),
|
||||||
#{?snk_kind := group_sm_find_leader_timeout},
|
#{?snk_kind := find_leader_timeout},
|
||||||
5_000
|
5_000
|
||||||
),
|
),
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,6 @@
|
||||||
topic/0,
|
topic/0,
|
||||||
batch/0,
|
batch/0,
|
||||||
operation/0,
|
operation/0,
|
||||||
deletion/0,
|
|
||||||
precondition/0,
|
precondition/0,
|
||||||
stream/0,
|
stream/0,
|
||||||
delete_stream/0,
|
delete_stream/0,
|
||||||
|
@ -111,9 +110,7 @@
|
||||||
message()
|
message()
|
||||||
%% Delete a message.
|
%% Delete a message.
|
||||||
%% Does nothing if the message does not exist.
|
%% Does nothing if the message does not exist.
|
||||||
| deletion().
|
| {delete, message_matcher('_')}.
|
||||||
|
|
||||||
-type deletion() :: {delete, message_matcher('_')}.
|
|
||||||
|
|
||||||
%% Precondition.
|
%% Precondition.
|
||||||
%% Fails whole batch if the storage already has the matching message (`if_exists'),
|
%% Fails whole batch if the storage already has the matching message (`if_exists'),
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
-behaviour(gen_server).
|
-behaviour(gen_server).
|
||||||
|
|
||||||
%% API:
|
%% API:
|
||||||
-export([start_link/4, store_batch/3, shard_of_operation/3]).
|
-export([start_link/4, store_batch/3, shard_of_message/3]).
|
||||||
-export([ls/0]).
|
-export([ls/0]).
|
||||||
|
|
||||||
%% behavior callbacks:
|
%% behavior callbacks:
|
||||||
|
@ -46,18 +46,19 @@
|
||||||
-define(cbm(DB), {?MODULE, DB}).
|
-define(cbm(DB), {?MODULE, DB}).
|
||||||
|
|
||||||
-record(enqueue_req, {
|
-record(enqueue_req, {
|
||||||
operations :: [emqx_ds:operation()],
|
messages :: [emqx_types:message()],
|
||||||
sync :: boolean(),
|
sync :: boolean(),
|
||||||
n_operations :: non_neg_integer(),
|
atomic :: boolean(),
|
||||||
|
n_messages :: non_neg_integer(),
|
||||||
payload_bytes :: non_neg_integer()
|
payload_bytes :: non_neg_integer()
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-callback init_buffer(emqx_ds:db(), _Shard, _Options) -> {ok, _State}.
|
-callback init_buffer(emqx_ds:db(), _Shard, _Options) -> {ok, _State}.
|
||||||
|
|
||||||
-callback flush_buffer(emqx_ds:db(), _Shard, [emqx_ds:operation()], State) ->
|
-callback flush_buffer(emqx_ds:db(), _Shard, [emqx_types:message()], State) ->
|
||||||
{State, ok | {error, recoverable | unrecoverable, _}}.
|
{State, ok | {error, recoverable | unrecoverable, _}}.
|
||||||
|
|
||||||
-callback shard_of_operation(emqx_ds:db(), emqx_ds:operation(), topic | clientid, _Options) ->
|
-callback shard_of_message(emqx_ds:db(), emqx_types:message(), topic | clientid, _Options) ->
|
||||||
_Shard.
|
_Shard.
|
||||||
|
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
|
@ -76,33 +77,39 @@ start_link(CallbackModule, CallbackOptions, DB, Shard) ->
|
||||||
?via(DB, Shard), ?MODULE, [CallbackModule, CallbackOptions, DB, Shard], []
|
?via(DB, Shard), ?MODULE, [CallbackModule, CallbackOptions, DB, Shard], []
|
||||||
).
|
).
|
||||||
|
|
||||||
-spec store_batch(emqx_ds:db(), [emqx_ds:operation()], emqx_ds:message_store_opts()) ->
|
-spec store_batch(emqx_ds:db(), [emqx_types:message()], emqx_ds:message_store_opts()) ->
|
||||||
emqx_ds:store_batch_result().
|
emqx_ds:store_batch_result().
|
||||||
store_batch(DB, Operations, Opts) ->
|
store_batch(DB, Messages, Opts) ->
|
||||||
Sync = maps:get(sync, Opts, true),
|
Sync = maps:get(sync, Opts, true),
|
||||||
|
Atomic = maps:get(atomic, Opts, false),
|
||||||
%% Usually we expect all messages in the batch to go into the
|
%% Usually we expect all messages in the batch to go into the
|
||||||
%% single shard, so this function is optimized for the happy case.
|
%% single shard, so this function is optimized for the happy case.
|
||||||
case shards_of_batch(DB, Operations) of
|
case shards_of_batch(DB, Messages) of
|
||||||
[{Shard, {NOps, NBytes}}] ->
|
[{Shard, {NMsgs, NBytes}}] ->
|
||||||
%% Happy case:
|
%% Happy case:
|
||||||
enqueue_call_or_cast(
|
enqueue_call_or_cast(
|
||||||
?via(DB, Shard),
|
?via(DB, Shard),
|
||||||
#enqueue_req{
|
#enqueue_req{
|
||||||
operations = Operations,
|
messages = Messages,
|
||||||
sync = Sync,
|
sync = Sync,
|
||||||
n_operations = NOps,
|
atomic = Atomic,
|
||||||
|
n_messages = NMsgs,
|
||||||
payload_bytes = NBytes
|
payload_bytes = NBytes
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
[_, _ | _] when Atomic ->
|
||||||
|
%% It's impossible to commit a batch to multiple shards
|
||||||
|
%% atomically
|
||||||
|
{error, unrecoverable, atomic_commit_to_multiple_shards};
|
||||||
_Shards ->
|
_Shards ->
|
||||||
%% Use a slower implementation for the unlikely case:
|
%% Use a slower implementation for the unlikely case:
|
||||||
repackage_messages(DB, Operations, Sync)
|
repackage_messages(DB, Messages, Sync)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec shard_of_operation(emqx_ds:db(), emqx_ds:operation(), clientid | topic) -> _Shard.
|
-spec shard_of_message(emqx_ds:db(), emqx_types:message(), clientid | topic) -> _Shard.
|
||||||
shard_of_operation(DB, Operation, ShardBy) ->
|
shard_of_message(DB, Message, ShardBy) ->
|
||||||
{CBM, Options} = persistent_term:get(?cbm(DB)),
|
{CBM, Options} = persistent_term:get(?cbm(DB)),
|
||||||
CBM:shard_of_operation(DB, Operation, ShardBy, Options).
|
CBM:shard_of_message(DB, Message, ShardBy, Options).
|
||||||
|
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
%% behavior callbacks
|
%% behavior callbacks
|
||||||
|
@ -122,7 +129,7 @@ shard_of_operation(DB, Operation, ShardBy) ->
|
||||||
n = 0 :: non_neg_integer(),
|
n = 0 :: non_neg_integer(),
|
||||||
n_bytes = 0 :: non_neg_integer(),
|
n_bytes = 0 :: non_neg_integer(),
|
||||||
tref :: undefined | reference(),
|
tref :: undefined | reference(),
|
||||||
queue :: queue:queue(emqx_ds:operation()),
|
queue :: queue:queue(emqx_types:message()),
|
||||||
pending_replies = [] :: [gen_server:from()]
|
pending_replies = [] :: [gen_server:from()]
|
||||||
}).
|
}).
|
||||||
|
|
||||||
|
@ -161,29 +168,31 @@ format_status(Status) ->
|
||||||
|
|
||||||
handle_call(
|
handle_call(
|
||||||
#enqueue_req{
|
#enqueue_req{
|
||||||
operations = Operations,
|
messages = Msgs,
|
||||||
sync = Sync,
|
sync = Sync,
|
||||||
n_operations = NOps,
|
atomic = Atomic,
|
||||||
|
n_messages = NMsgs,
|
||||||
payload_bytes = NBytes
|
payload_bytes = NBytes
|
||||||
},
|
},
|
||||||
From,
|
From,
|
||||||
S0 = #s{pending_replies = Replies0}
|
S0 = #s{pending_replies = Replies0}
|
||||||
) ->
|
) ->
|
||||||
S = S0#s{pending_replies = [From | Replies0]},
|
S = S0#s{pending_replies = [From | Replies0]},
|
||||||
{noreply, enqueue(Sync, Operations, NOps, NBytes, S)};
|
{noreply, enqueue(Sync, Atomic, Msgs, NMsgs, NBytes, S)};
|
||||||
handle_call(_Call, _From, S) ->
|
handle_call(_Call, _From, S) ->
|
||||||
{reply, {error, unknown_call}, S}.
|
{reply, {error, unknown_call}, S}.
|
||||||
|
|
||||||
handle_cast(
|
handle_cast(
|
||||||
#enqueue_req{
|
#enqueue_req{
|
||||||
operations = Operations,
|
messages = Msgs,
|
||||||
sync = Sync,
|
sync = Sync,
|
||||||
n_operations = NOps,
|
atomic = Atomic,
|
||||||
|
n_messages = NMsgs,
|
||||||
payload_bytes = NBytes
|
payload_bytes = NBytes
|
||||||
},
|
},
|
||||||
S
|
S
|
||||||
) ->
|
) ->
|
||||||
{noreply, enqueue(Sync, Operations, NOps, NBytes, S)};
|
{noreply, enqueue(Sync, Atomic, Msgs, NMsgs, NBytes, S)};
|
||||||
handle_cast(_Cast, S) ->
|
handle_cast(_Cast, S) ->
|
||||||
{noreply, S}.
|
{noreply, S}.
|
||||||
|
|
||||||
|
@ -206,10 +215,11 @@ terminate(_Reason, #s{db = DB}) ->
|
||||||
|
|
||||||
enqueue(
|
enqueue(
|
||||||
Sync,
|
Sync,
|
||||||
Ops,
|
Atomic,
|
||||||
|
Msgs,
|
||||||
BatchSize,
|
BatchSize,
|
||||||
BatchBytes,
|
BatchBytes,
|
||||||
S0 = #s{n = NOps0, n_bytes = NBytes0, queue = Q0}
|
S0 = #s{n = NMsgs0, n_bytes = NBytes0, queue = Q0}
|
||||||
) ->
|
) ->
|
||||||
%% At this point we don't split the batches, even when they aren't
|
%% At this point we don't split the batches, even when they aren't
|
||||||
%% atomic. It wouldn't win us anything in terms of memory, and
|
%% atomic. It wouldn't win us anything in terms of memory, and
|
||||||
|
@ -217,18 +227,18 @@ enqueue(
|
||||||
%% granularity should be fine enough.
|
%% granularity should be fine enough.
|
||||||
NMax = application:get_env(emqx_durable_storage, egress_batch_size, 1000),
|
NMax = application:get_env(emqx_durable_storage, egress_batch_size, 1000),
|
||||||
NBytesMax = application:get_env(emqx_durable_storage, egress_batch_bytes, infinity),
|
NBytesMax = application:get_env(emqx_durable_storage, egress_batch_bytes, infinity),
|
||||||
NMsgs = NOps0 + BatchSize,
|
NMsgs = NMsgs0 + BatchSize,
|
||||||
NBytes = NBytes0 + BatchBytes,
|
NBytes = NBytes0 + BatchBytes,
|
||||||
case (NMsgs >= NMax orelse NBytes >= NBytesMax) andalso (NOps0 > 0) of
|
case (NMsgs >= NMax orelse NBytes >= NBytesMax) andalso (NMsgs0 > 0) of
|
||||||
true ->
|
true ->
|
||||||
%% Adding this batch would cause buffer to overflow. Flush
|
%% Adding this batch would cause buffer to overflow. Flush
|
||||||
%% it now, and retry:
|
%% it now, and retry:
|
||||||
S1 = flush(S0),
|
S1 = flush(S0),
|
||||||
enqueue(Sync, Ops, BatchSize, BatchBytes, S1);
|
enqueue(Sync, Atomic, Msgs, BatchSize, BatchBytes, S1);
|
||||||
false ->
|
false ->
|
||||||
%% The buffer is empty, we enqueue the atomic batch in its
|
%% The buffer is empty, we enqueue the atomic batch in its
|
||||||
%% entirety:
|
%% entirety:
|
||||||
Q1 = lists:foldl(fun queue:in/2, Q0, Ops),
|
Q1 = lists:foldl(fun queue:in/2, Q0, Msgs),
|
||||||
S1 = S0#s{n = NMsgs, n_bytes = NBytes, queue = Q1},
|
S1 = S0#s{n = NMsgs, n_bytes = NBytes, queue = Q1},
|
||||||
case NMsgs >= NMax orelse NBytes >= NBytesMax of
|
case NMsgs >= NMax orelse NBytes >= NBytesMax of
|
||||||
true ->
|
true ->
|
||||||
|
@ -326,18 +336,18 @@ do_flush(
|
||||||
}
|
}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec shards_of_batch(emqx_ds:db(), [emqx_ds:operation()]) ->
|
-spec shards_of_batch(emqx_ds:db(), [emqx_types:message()]) ->
|
||||||
[{_ShardId, {NMessages, NBytes}}]
|
[{_ShardId, {NMessages, NBytes}}]
|
||||||
when
|
when
|
||||||
NMessages :: non_neg_integer(),
|
NMessages :: non_neg_integer(),
|
||||||
NBytes :: non_neg_integer().
|
NBytes :: non_neg_integer().
|
||||||
shards_of_batch(DB, Batch) ->
|
shards_of_batch(DB, Messages) ->
|
||||||
maps:to_list(
|
maps:to_list(
|
||||||
lists:foldl(
|
lists:foldl(
|
||||||
fun(Operation, Acc) ->
|
fun(Message, Acc) ->
|
||||||
%% TODO: sharding strategy must be part of the DS DB schema:
|
%% TODO: sharding strategy must be part of the DS DB schema:
|
||||||
Shard = shard_of_operation(DB, Operation, clientid),
|
Shard = shard_of_message(DB, Message, clientid),
|
||||||
Size = payload_size(Operation),
|
Size = payload_size(Message),
|
||||||
maps:update_with(
|
maps:update_with(
|
||||||
Shard,
|
Shard,
|
||||||
fun({N, S}) ->
|
fun({N, S}) ->
|
||||||
|
@ -348,35 +358,36 @@ shards_of_batch(DB, Batch) ->
|
||||||
)
|
)
|
||||||
end,
|
end,
|
||||||
#{},
|
#{},
|
||||||
Batch
|
Messages
|
||||||
)
|
)
|
||||||
).
|
).
|
||||||
|
|
||||||
repackage_messages(DB, Batch, Sync) ->
|
repackage_messages(DB, Messages, Sync) ->
|
||||||
Batches = lists:foldl(
|
Batches = lists:foldl(
|
||||||
fun(Operation, Acc) ->
|
fun(Message, Acc) ->
|
||||||
Shard = shard_of_operation(DB, Operation, clientid),
|
Shard = shard_of_message(DB, Message, clientid),
|
||||||
Size = payload_size(Operation),
|
Size = payload_size(Message),
|
||||||
maps:update_with(
|
maps:update_with(
|
||||||
Shard,
|
Shard,
|
||||||
fun({N, S, Msgs}) ->
|
fun({N, S, Msgs}) ->
|
||||||
{N + 1, S + Size, [Operation | Msgs]}
|
{N + 1, S + Size, [Message | Msgs]}
|
||||||
end,
|
end,
|
||||||
{1, Size, [Operation]},
|
{1, Size, [Message]},
|
||||||
Acc
|
Acc
|
||||||
)
|
)
|
||||||
end,
|
end,
|
||||||
#{},
|
#{},
|
||||||
Batch
|
Messages
|
||||||
),
|
),
|
||||||
maps:fold(
|
maps:fold(
|
||||||
fun(Shard, {NOps, ByteSize, RevOperations}, ErrAcc) ->
|
fun(Shard, {NMsgs, ByteSize, RevMessages}, ErrAcc) ->
|
||||||
Err = enqueue_call_or_cast(
|
Err = enqueue_call_or_cast(
|
||||||
?via(DB, Shard),
|
?via(DB, Shard),
|
||||||
#enqueue_req{
|
#enqueue_req{
|
||||||
operations = lists:reverse(RevOperations),
|
messages = lists:reverse(RevMessages),
|
||||||
sync = Sync,
|
sync = Sync,
|
||||||
n_operations = NOps,
|
atomic = false,
|
||||||
|
n_messages = NMsgs,
|
||||||
payload_bytes = ByteSize
|
payload_bytes = ByteSize
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
|
@ -416,6 +427,4 @@ cancel_timer(S = #s{tref = TRef}) ->
|
||||||
%% @doc Return approximate size of the MQTT message (it doesn't take
|
%% @doc Return approximate size of the MQTT message (it doesn't take
|
||||||
%% all things into account, for example headers and extras)
|
%% all things into account, for example headers and extras)
|
||||||
payload_size(#message{payload = P, topic = T}) ->
|
payload_size(#message{payload = P, topic = T}) ->
|
||||||
size(P) + size(T);
|
size(P) + size(T).
|
||||||
payload_size({_OpName, _}) ->
|
|
||||||
0.
|
|
||||||
|
|
|
@ -1,184 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%
|
|
||||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
%% you may not use this file except in compliance with the License.
|
|
||||||
%% You may obtain a copy of the License at
|
|
||||||
%%
|
|
||||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
%%
|
|
||||||
%% Unless required by applicable law or agreed to in writing, software
|
|
||||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
%% See the License for the specific language governing permissions and
|
|
||||||
%% limitations under the License.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
-module(emqx_ds_precondition).
|
|
||||||
-include_lib("emqx_utils/include/emqx_message.hrl").
|
|
||||||
-include_lib("emqx_durable_storage/include/emqx_ds.hrl").
|
|
||||||
|
|
||||||
-export([verify/3]).
|
|
||||||
-export([matches/2]).
|
|
||||||
|
|
||||||
-export_type([matcher/0, mismatch/0]).
|
|
||||||
|
|
||||||
-type matcher() :: #message_matcher{}.
|
|
||||||
-type mismatch() :: emqx_types:message() | not_found.
|
|
||||||
|
|
||||||
-callback lookup_message(_Ctx, matcher()) ->
|
|
||||||
emqx_types:message() | not_found | emqx_ds:error(_).
|
|
||||||
|
|
||||||
%%
|
|
||||||
|
|
||||||
-spec verify(module(), _Ctx, [emqx_ds:precondition()]) ->
|
|
||||||
ok | {precondition_failed, mismatch()} | emqx_ds:error(_).
|
|
||||||
verify(Mod, Ctx, [_Precondition = {Cond, Msg} | Rest]) ->
|
|
||||||
case verify_precondition(Mod, Ctx, Cond, Msg) of
|
|
||||||
ok ->
|
|
||||||
verify(Mod, Ctx, Rest);
|
|
||||||
Failed ->
|
|
||||||
Failed
|
|
||||||
end;
|
|
||||||
verify(_Mod, _Ctx, []) ->
|
|
||||||
ok.
|
|
||||||
|
|
||||||
verify_precondition(Mod, Ctx, if_exists, Matcher) ->
|
|
||||||
case Mod:lookup_message(Ctx, Matcher) of
|
|
||||||
Msg = #message{} ->
|
|
||||||
verify_match(Msg, Matcher);
|
|
||||||
not_found ->
|
|
||||||
{precondition_failed, not_found};
|
|
||||||
Error = {error, _, _} ->
|
|
||||||
Error
|
|
||||||
end;
|
|
||||||
verify_precondition(Mod, Ctx, unless_exists, Matcher) ->
|
|
||||||
case Mod:lookup_message(Ctx, Matcher) of
|
|
||||||
Msg = #message{} ->
|
|
||||||
verify_nomatch(Msg, Matcher);
|
|
||||||
not_found ->
|
|
||||||
ok;
|
|
||||||
Error = {error, _, _} ->
|
|
||||||
Error
|
|
||||||
end.
|
|
||||||
|
|
||||||
verify_match(Msg, Matcher) ->
|
|
||||||
case matches(Msg, Matcher) of
|
|
||||||
true -> ok;
|
|
||||||
false -> {precondition_failed, Msg}
|
|
||||||
end.
|
|
||||||
|
|
||||||
verify_nomatch(Msg, Matcher) ->
|
|
||||||
case matches(Msg, Matcher) of
|
|
||||||
false -> ok;
|
|
||||||
true -> {precondition_failed, Msg}
|
|
||||||
end.
|
|
||||||
|
|
||||||
-spec matches(emqx_types:message(), matcher()) -> boolean().
|
|
||||||
matches(
|
|
||||||
Message,
|
|
||||||
#message_matcher{from = From, topic = Topic, payload = Pat, headers = Headers}
|
|
||||||
) ->
|
|
||||||
case Message of
|
|
||||||
#message{from = From, topic = Topic} when Pat =:= '_' ->
|
|
||||||
matches_headers(Message, Headers);
|
|
||||||
#message{from = From, topic = Topic, payload = Pat} ->
|
|
||||||
matches_headers(Message, Headers);
|
|
||||||
_ ->
|
|
||||||
false
|
|
||||||
end.
|
|
||||||
|
|
||||||
matches_headers(_Message, MatchHeaders) when map_size(MatchHeaders) =:= 0 ->
|
|
||||||
true;
|
|
||||||
matches_headers(#message{headers = Headers}, MatchHeaders) ->
|
|
||||||
maps:intersect(MatchHeaders, Headers) =:= MatchHeaders.
|
|
||||||
|
|
||||||
%% Basic tests
|
|
||||||
|
|
||||||
-ifdef(TEST).
|
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
|
||||||
-compile(export_all).
|
|
||||||
|
|
||||||
conjunction_test() ->
|
|
||||||
%% Contradictory preconditions, always false.
|
|
||||||
Preconditions = [
|
|
||||||
{if_exists, matcher(<<"c1">>, <<"t/1">>, 0, '_')},
|
|
||||||
{unless_exists, matcher(<<"c1">>, <<"t/1">>, 0, '_')}
|
|
||||||
],
|
|
||||||
?assertEqual(
|
|
||||||
{precondition_failed, not_found},
|
|
||||||
verify(?MODULE, [], Preconditions)
|
|
||||||
),
|
|
||||||
%% Check that the order does not matter.
|
|
||||||
?assertEqual(
|
|
||||||
{precondition_failed, not_found},
|
|
||||||
verify(?MODULE, [], lists:reverse(Preconditions))
|
|
||||||
),
|
|
||||||
?assertEqual(
|
|
||||||
{precondition_failed, message(<<"c1">>, <<"t/1">>, 0, <<>>)},
|
|
||||||
verify(
|
|
||||||
?MODULE,
|
|
||||||
[message(<<"c1">>, <<"t/1">>, 0, <<>>)],
|
|
||||||
Preconditions
|
|
||||||
)
|
|
||||||
).
|
|
||||||
|
|
||||||
matches_test() ->
|
|
||||||
?assert(
|
|
||||||
matches(
|
|
||||||
message(<<"mtest1">>, <<"t/same">>, 12345, <<?MODULE_STRING>>),
|
|
||||||
matcher(<<"mtest1">>, <<"t/same">>, 12345, '_')
|
|
||||||
)
|
|
||||||
).
|
|
||||||
|
|
||||||
matches_headers_test() ->
|
|
||||||
?assert(
|
|
||||||
matches(
|
|
||||||
message(<<"mtest2">>, <<"t/same">>, 23456, <<?MODULE_STRING>>, #{h1 => 42, h2 => <<>>}),
|
|
||||||
matcher(<<"mtest2">>, <<"t/same">>, 23456, '_', #{h2 => <<>>})
|
|
||||||
)
|
|
||||||
).
|
|
||||||
|
|
||||||
mismatches_headers_test() ->
|
|
||||||
?assertNot(
|
|
||||||
matches(
|
|
||||||
message(<<"mtest3">>, <<"t/same">>, 23456, <<?MODULE_STRING>>, #{h1 => 42, h2 => <<>>}),
|
|
||||||
matcher(<<"mtest3">>, <<"t/same">>, 23456, '_', #{h2 => <<>>, h3 => <<"required">>})
|
|
||||||
)
|
|
||||||
).
|
|
||||||
|
|
||||||
matcher(ClientID, Topic, TS, Payload) ->
|
|
||||||
matcher(ClientID, Topic, TS, Payload, #{}).
|
|
||||||
|
|
||||||
matcher(ClientID, Topic, TS, Payload, Headers) ->
|
|
||||||
#message_matcher{
|
|
||||||
from = ClientID,
|
|
||||||
topic = Topic,
|
|
||||||
timestamp = TS,
|
|
||||||
payload = Payload,
|
|
||||||
headers = Headers
|
|
||||||
}.
|
|
||||||
|
|
||||||
message(ClientID, Topic, TS, Payload) ->
|
|
||||||
message(ClientID, Topic, TS, Payload, #{}).
|
|
||||||
|
|
||||||
message(ClientID, Topic, TS, Payload, Headers) ->
|
|
||||||
#message{
|
|
||||||
id = <<>>,
|
|
||||||
qos = 0,
|
|
||||||
from = ClientID,
|
|
||||||
topic = Topic,
|
|
||||||
timestamp = TS,
|
|
||||||
payload = Payload,
|
|
||||||
headers = Headers
|
|
||||||
}.
|
|
||||||
|
|
||||||
lookup_message(Messages, Matcher) ->
|
|
||||||
case lists:search(fun(M) -> matches(M, Matcher) end, Messages) of
|
|
||||||
{value, Message} ->
|
|
||||||
Message;
|
|
||||||
false ->
|
|
||||||
not_found
|
|
||||||
end.
|
|
||||||
|
|
||||||
-endif.
|
|
|
@ -37,7 +37,6 @@
|
||||||
update_iterator/4,
|
update_iterator/4,
|
||||||
next/6,
|
next/6,
|
||||||
delete_next/7,
|
delete_next/7,
|
||||||
lookup_message/3,
|
|
||||||
|
|
||||||
handle_event/4
|
handle_event/4
|
||||||
]).
|
]).
|
||||||
|
@ -47,7 +46,6 @@
|
||||||
|
|
||||||
-export_type([options/0]).
|
-export_type([options/0]).
|
||||||
|
|
||||||
-include("emqx_ds.hrl").
|
|
||||||
-include("emqx_ds_metrics.hrl").
|
-include("emqx_ds_metrics.hrl").
|
||||||
-include_lib("emqx_utils/include/emqx_message.hrl").
|
-include_lib("emqx_utils/include/emqx_message.hrl").
|
||||||
-include_lib("snabbkaffe/include/trace.hrl").
|
-include_lib("snabbkaffe/include/trace.hrl").
|
||||||
|
@ -70,13 +68,10 @@
|
||||||
-define(start_time, 3).
|
-define(start_time, 3).
|
||||||
-define(storage_key, 4).
|
-define(storage_key, 4).
|
||||||
-define(last_seen_key, 5).
|
-define(last_seen_key, 5).
|
||||||
-define(cooked_msg_ops, 6).
|
-define(cooked_payloads, 6).
|
||||||
-define(cooked_lts_ops, 7).
|
-define(cooked_lts_ops, 7).
|
||||||
-define(cooked_ts, 8).
|
-define(cooked_ts, 8).
|
||||||
|
|
||||||
%% atoms:
|
|
||||||
-define(delete, 100).
|
|
||||||
|
|
||||||
-type options() ::
|
-type options() ::
|
||||||
#{
|
#{
|
||||||
bits_per_wildcard_level => pos_integer(),
|
bits_per_wildcard_level => pos_integer(),
|
||||||
|
@ -115,7 +110,7 @@
|
||||||
|
|
||||||
-type cooked_batch() ::
|
-type cooked_batch() ::
|
||||||
#{
|
#{
|
||||||
?cooked_msg_ops := [{binary(), binary() | ?delete}],
|
?cooked_payloads := [{binary(), binary()}],
|
||||||
?cooked_lts_ops := [{binary(), binary()}],
|
?cooked_lts_ops := [{binary(), binary()}],
|
||||||
?cooked_ts := integer()
|
?cooked_ts := integer()
|
||||||
}.
|
}.
|
||||||
|
@ -276,28 +271,24 @@ drop(_Shard, DBHandle, GenId, CFRefs, #s{trie = Trie, gvars = GVars}) ->
|
||||||
-spec prepare_batch(
|
-spec prepare_batch(
|
||||||
emqx_ds_storage_layer:shard_id(),
|
emqx_ds_storage_layer:shard_id(),
|
||||||
s(),
|
s(),
|
||||||
emqx_ds_storage_layer:batch(),
|
[{emqx_ds:time(), emqx_types:message()}, ...],
|
||||||
emqx_ds_storage_layer:batch_store_opts()
|
emqx_ds_storage_layer:batch_store_opts()
|
||||||
) ->
|
) ->
|
||||||
{ok, cooked_batch()}.
|
{ok, cooked_batch()}.
|
||||||
prepare_batch(_ShardId, S, Batch, _Options) ->
|
prepare_batch(_ShardId, S, Messages, _Options) ->
|
||||||
_ = erase(?lts_persist_ops),
|
_ = erase(?lts_persist_ops),
|
||||||
{Operations, MaxTs} =
|
{Payloads, MaxTs} =
|
||||||
lists:mapfoldl(
|
lists:mapfoldl(
|
||||||
fun
|
fun({Timestamp, Msg}, Acc) ->
|
||||||
({Timestamp, Msg = #message{topic = Topic}}, Acc) ->
|
{Key, _} = make_key(S, Timestamp, Msg),
|
||||||
{Key, _} = make_key(S, Timestamp, Topic),
|
Payload = {Key, message_to_value_v1(Msg)},
|
||||||
Op = {Key, message_to_value_v1(Msg)},
|
{Payload, max(Acc, Timestamp)}
|
||||||
{Op, max(Acc, Timestamp)};
|
|
||||||
({delete, #message_matcher{topic = Topic, timestamp = Timestamp}}, Acc) ->
|
|
||||||
{Key, _} = make_key(S, Timestamp, Topic),
|
|
||||||
{_Op = {Key, ?delete}, Acc}
|
|
||||||
end,
|
end,
|
||||||
0,
|
0,
|
||||||
Batch
|
Messages
|
||||||
),
|
),
|
||||||
{ok, #{
|
{ok, #{
|
||||||
?cooked_msg_ops => Operations,
|
?cooked_payloads => Payloads,
|
||||||
?cooked_lts_ops => pop_lts_persist_ops(),
|
?cooked_lts_ops => pop_lts_persist_ops(),
|
||||||
?cooked_ts => MaxTs
|
?cooked_ts => MaxTs
|
||||||
}}.
|
}}.
|
||||||
|
@ -311,7 +302,7 @@ prepare_batch(_ShardId, S, Batch, _Options) ->
|
||||||
commit_batch(
|
commit_batch(
|
||||||
_ShardId,
|
_ShardId,
|
||||||
_Data,
|
_Data,
|
||||||
#{?cooked_msg_ops := [], ?cooked_lts_ops := LTS},
|
#{?cooked_payloads := [], ?cooked_lts_ops := LTS},
|
||||||
_Options
|
_Options
|
||||||
) ->
|
) ->
|
||||||
%% Assert:
|
%% Assert:
|
||||||
|
@ -320,7 +311,7 @@ commit_batch(
|
||||||
commit_batch(
|
commit_batch(
|
||||||
_ShardId,
|
_ShardId,
|
||||||
#s{db = DB, data = DataCF, trie = Trie, trie_cf = TrieCF, gvars = Gvars},
|
#s{db = DB, data = DataCF, trie = Trie, trie_cf = TrieCF, gvars = Gvars},
|
||||||
#{?cooked_lts_ops := LtsOps, ?cooked_msg_ops := Operations, ?cooked_ts := MaxTs},
|
#{?cooked_lts_ops := LtsOps, ?cooked_payloads := Payloads, ?cooked_ts := MaxTs},
|
||||||
Options
|
Options
|
||||||
) ->
|
) ->
|
||||||
{ok, Batch} = rocksdb:batch(),
|
{ok, Batch} = rocksdb:batch(),
|
||||||
|
@ -335,13 +326,10 @@ commit_batch(
|
||||||
_ = emqx_ds_lts:trie_update(Trie, LtsOps),
|
_ = emqx_ds_lts:trie_update(Trie, LtsOps),
|
||||||
%% Commit payloads:
|
%% Commit payloads:
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun
|
fun({Key, Val}) ->
|
||||||
({Key, Val}) when is_tuple(Val) ->
|
ok = rocksdb:batch_put(Batch, DataCF, Key, term_to_binary(Val))
|
||||||
ok = rocksdb:batch_put(Batch, DataCF, Key, term_to_binary(Val));
|
|
||||||
({Key, ?delete}) ->
|
|
||||||
ok = rocksdb:batch_delete(Batch, DataCF, Key)
|
|
||||||
end,
|
end,
|
||||||
Operations
|
Payloads
|
||||||
),
|
),
|
||||||
Result = rocksdb:write_batch(DB, Batch, write_batch_opts(Options)),
|
Result = rocksdb:write_batch(DB, Batch, write_batch_opts(Options)),
|
||||||
rocksdb:release_batch(Batch),
|
rocksdb:release_batch(Batch),
|
||||||
|
@ -568,23 +556,6 @@ delete_next_until(
|
||||||
rocksdb:iterator_close(ITHandle)
|
rocksdb:iterator_close(ITHandle)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec lookup_message(emqx_ds_storage_layer:shard_id(), s(), emqx_ds_precondition:matcher()) ->
|
|
||||||
emqx_types:message() | not_found | emqx_ds:error(_).
|
|
||||||
lookup_message(
|
|
||||||
_ShardId,
|
|
||||||
S = #s{db = DB, data = CF},
|
|
||||||
#message_matcher{topic = Topic, timestamp = Timestamp}
|
|
||||||
) ->
|
|
||||||
{Key, _} = make_key(S, Timestamp, Topic),
|
|
||||||
case rocksdb:get(DB, CF, Key, _ReadOpts = []) of
|
|
||||||
{ok, Blob} ->
|
|
||||||
deserialize(Blob);
|
|
||||||
not_found ->
|
|
||||||
not_found;
|
|
||||||
Error ->
|
|
||||||
{error, unrecoverable, {rocksdb, Error}}
|
|
||||||
end.
|
|
||||||
|
|
||||||
handle_event(_ShardId, State = #s{gvars = Gvars}, Time, tick) ->
|
handle_event(_ShardId, State = #s{gvars = Gvars}, Time, tick) ->
|
||||||
%% If the last message was published more than one epoch ago, and
|
%% If the last message was published more than one epoch ago, and
|
||||||
%% the shard remains idle, we need to advance safety cutoff
|
%% the shard remains idle, we need to advance safety cutoff
|
||||||
|
@ -840,9 +811,9 @@ format_key(KeyMapper, Key) ->
|
||||||
Vec = [integer_to_list(I, 16) || I <- emqx_ds_bitmask_keymapper:key_to_vector(KeyMapper, Key)],
|
Vec = [integer_to_list(I, 16) || I <- emqx_ds_bitmask_keymapper:key_to_vector(KeyMapper, Key)],
|
||||||
lists:flatten(io_lib:format("~.16B (~s)", [Key, string:join(Vec, ",")])).
|
lists:flatten(io_lib:format("~.16B (~s)", [Key, string:join(Vec, ",")])).
|
||||||
|
|
||||||
-spec make_key(s(), emqx_ds:time(), emqx_types:topic()) -> {binary(), [binary()]}.
|
-spec make_key(s(), emqx_ds:time(), emqx_types:message()) -> {binary(), [binary()]}.
|
||||||
make_key(#s{keymappers = KeyMappers, trie = Trie}, Timestamp, Topic) ->
|
make_key(#s{keymappers = KeyMappers, trie = Trie}, Timestamp, #message{topic = TopicBin}) ->
|
||||||
Tokens = emqx_topic:words(Topic),
|
Tokens = emqx_topic:words(TopicBin),
|
||||||
{TopicIndex, Varying} = emqx_ds_lts:topic_key(Trie, fun threshold_fun/1, Tokens),
|
{TopicIndex, Varying} = emqx_ds_lts:topic_key(Trie, fun threshold_fun/1, Tokens),
|
||||||
VaryingHashes = [hash_topic_level(I) || I <- Varying],
|
VaryingHashes = [hash_topic_level(I) || I <- Varying],
|
||||||
KeyMapper = array:get(length(Varying), KeyMappers),
|
KeyMapper = array:get(length(Varying), KeyMappers),
|
||||||
|
|
|
@ -37,9 +37,6 @@
|
||||||
next/4,
|
next/4,
|
||||||
delete_next/5,
|
delete_next/5,
|
||||||
|
|
||||||
%% Preconditions
|
|
||||||
lookup_message/2,
|
|
||||||
|
|
||||||
%% Generations
|
%% Generations
|
||||||
update_config/3,
|
update_config/3,
|
||||||
add_generation/2,
|
add_generation/2,
|
||||||
|
@ -64,7 +61,6 @@
|
||||||
-export_type([
|
-export_type([
|
||||||
gen_id/0,
|
gen_id/0,
|
||||||
generation/0,
|
generation/0,
|
||||||
batch/0,
|
|
||||||
cf_refs/0,
|
cf_refs/0,
|
||||||
stream/0,
|
stream/0,
|
||||||
delete_stream/0,
|
delete_stream/0,
|
||||||
|
@ -78,7 +74,6 @@
|
||||||
batch_store_opts/0
|
batch_store_opts/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-include("emqx_ds.hrl").
|
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
-define(REF(ShardId), {via, gproc, {n, l, {?MODULE, ShardId}}}).
|
-define(REF(ShardId), {via, gproc, {n, l, {?MODULE, ShardId}}}).
|
||||||
|
@ -120,11 +115,6 @@
|
||||||
|
|
||||||
-type gen_id() :: 0..16#ffff.
|
-type gen_id() :: 0..16#ffff.
|
||||||
|
|
||||||
-type batch() :: [
|
|
||||||
{emqx_ds:time(), emqx_types:message()}
|
|
||||||
| emqx_ds:deletion()
|
|
||||||
].
|
|
||||||
|
|
||||||
%% Options affecting how batches should be stored.
|
%% Options affecting how batches should be stored.
|
||||||
%% See also: `emqx_ds:message_store_opts()'.
|
%% See also: `emqx_ds:message_store_opts()'.
|
||||||
-type batch_store_opts() ::
|
-type batch_store_opts() ::
|
||||||
|
@ -304,10 +294,6 @@
|
||||||
| {ok, end_of_stream}
|
| {ok, end_of_stream}
|
||||||
| emqx_ds:error(_).
|
| emqx_ds:error(_).
|
||||||
|
|
||||||
%% Lookup a single message, for preconditions to work.
|
|
||||||
-callback lookup_message(shard_id(), generation_data(), emqx_ds_precondition:matcher()) ->
|
|
||||||
emqx_types:message() | not_found | emqx_ds:error(_).
|
|
||||||
|
|
||||||
-callback handle_event(shard_id(), generation_data(), emqx_ds:time(), CustomEvent | tick) ->
|
-callback handle_event(shard_id(), generation_data(), emqx_ds:time(), CustomEvent | tick) ->
|
||||||
[CustomEvent].
|
[CustomEvent].
|
||||||
|
|
||||||
|
@ -331,10 +317,14 @@ drop_shard(Shard) ->
|
||||||
|
|
||||||
%% @doc This is a convenicence wrapper that combines `prepare' and
|
%% @doc This is a convenicence wrapper that combines `prepare' and
|
||||||
%% `commit' operations.
|
%% `commit' operations.
|
||||||
-spec store_batch(shard_id(), batch(), batch_store_opts()) ->
|
-spec store_batch(
|
||||||
|
shard_id(),
|
||||||
|
[{emqx_ds:time(), emqx_types:message()}],
|
||||||
|
batch_store_opts()
|
||||||
|
) ->
|
||||||
emqx_ds:store_batch_result().
|
emqx_ds:store_batch_result().
|
||||||
store_batch(Shard, Batch, Options) ->
|
store_batch(Shard, Messages, Options) ->
|
||||||
case prepare_batch(Shard, Batch, #{}) of
|
case prepare_batch(Shard, Messages, #{}) of
|
||||||
{ok, CookedBatch} ->
|
{ok, CookedBatch} ->
|
||||||
commit_batch(Shard, CookedBatch, Options);
|
commit_batch(Shard, CookedBatch, Options);
|
||||||
ignore ->
|
ignore ->
|
||||||
|
@ -352,21 +342,23 @@ store_batch(Shard, Batch, Options) ->
|
||||||
%%
|
%%
|
||||||
%% The underlying storage layout MAY use timestamp as a unique message
|
%% The underlying storage layout MAY use timestamp as a unique message
|
||||||
%% ID.
|
%% ID.
|
||||||
-spec prepare_batch(shard_id(), batch(), batch_prepare_opts()) ->
|
-spec prepare_batch(
|
||||||
{ok, cooked_batch()} | ignore | emqx_ds:error(_).
|
shard_id(),
|
||||||
prepare_batch(Shard, Batch, Options) ->
|
[{emqx_ds:time(), emqx_types:message()}],
|
||||||
|
batch_prepare_opts()
|
||||||
|
) -> {ok, cooked_batch()} | ignore | emqx_ds:error(_).
|
||||||
|
prepare_batch(Shard, Messages = [{Time, _} | _], Options) ->
|
||||||
%% NOTE
|
%% NOTE
|
||||||
%% We assume that batches do not span generations. Callers should enforce this.
|
%% We assume that batches do not span generations. Callers should enforce this.
|
||||||
?tp(emqx_ds_storage_layer_prepare_batch, #{
|
?tp(emqx_ds_storage_layer_prepare_batch, #{
|
||||||
shard => Shard, batch => Batch, options => Options
|
shard => Shard, messages => Messages, options => Options
|
||||||
}),
|
}),
|
||||||
%% FIXME: always store messages in the current generation
|
%% FIXME: always store messages in the current generation
|
||||||
Time = batch_starts_at(Batch),
|
case generation_at(Shard, Time) of
|
||||||
case is_integer(Time) andalso generation_at(Shard, Time) of
|
|
||||||
{GenId, #{module := Mod, data := GenData}} ->
|
{GenId, #{module := Mod, data := GenData}} ->
|
||||||
T0 = erlang:monotonic_time(microsecond),
|
T0 = erlang:monotonic_time(microsecond),
|
||||||
Result =
|
Result =
|
||||||
case Mod:prepare_batch(Shard, GenData, Batch, Options) of
|
case Mod:prepare_batch(Shard, GenData, Messages, Options) of
|
||||||
{ok, CookedBatch} ->
|
{ok, CookedBatch} ->
|
||||||
{ok, #{?tag => ?COOKED_BATCH, ?generation => GenId, ?enc => CookedBatch}};
|
{ok, #{?tag => ?COOKED_BATCH, ?generation => GenId, ?enc => CookedBatch}};
|
||||||
Error = {error, _, _} ->
|
Error = {error, _, _} ->
|
||||||
|
@ -376,21 +368,11 @@ prepare_batch(Shard, Batch, Options) ->
|
||||||
%% TODO store->prepare
|
%% TODO store->prepare
|
||||||
emqx_ds_builtin_metrics:observe_store_batch_time(Shard, T1 - T0),
|
emqx_ds_builtin_metrics:observe_store_batch_time(Shard, T1 - T0),
|
||||||
Result;
|
Result;
|
||||||
false ->
|
|
||||||
%% No write operations in this batch.
|
|
||||||
ignore;
|
|
||||||
not_found ->
|
not_found ->
|
||||||
%% Generation is likely already GCed.
|
|
||||||
ignore
|
ignore
|
||||||
end.
|
end;
|
||||||
|
prepare_batch(_Shard, [], _Options) ->
|
||||||
-spec batch_starts_at(batch()) -> emqx_ds:time() | undefined.
|
ignore.
|
||||||
batch_starts_at([{Time, _Message} | _]) when is_integer(Time) ->
|
|
||||||
Time;
|
|
||||||
batch_starts_at([{delete, #message_matcher{timestamp = Time}} | _]) ->
|
|
||||||
Time;
|
|
||||||
batch_starts_at([]) ->
|
|
||||||
undefined.
|
|
||||||
|
|
||||||
%% @doc Commit cooked batch to the storage.
|
%% @doc Commit cooked batch to the storage.
|
||||||
%%
|
%%
|
||||||
|
@ -577,16 +559,6 @@ update_config(ShardId, Since, Options) ->
|
||||||
add_generation(ShardId, Since) ->
|
add_generation(ShardId, Since) ->
|
||||||
gen_server:call(?REF(ShardId), #call_add_generation{since = Since}, infinity).
|
gen_server:call(?REF(ShardId), #call_add_generation{since = Since}, infinity).
|
||||||
|
|
||||||
-spec lookup_message(shard_id(), emqx_ds_precondition:matcher()) ->
|
|
||||||
emqx_types:message() | not_found | emqx_ds:error(_).
|
|
||||||
lookup_message(ShardId, Matcher = #message_matcher{timestamp = Time}) ->
|
|
||||||
case generation_at(ShardId, Time) of
|
|
||||||
{_GenId, #{module := Mod, data := GenData}} ->
|
|
||||||
Mod:lookup_message(ShardId, GenData, Matcher);
|
|
||||||
not_found ->
|
|
||||||
not_found
|
|
||||||
end.
|
|
||||||
|
|
||||||
-spec list_generations_with_lifetimes(shard_id()) ->
|
-spec list_generations_with_lifetimes(shard_id()) ->
|
||||||
#{
|
#{
|
||||||
gen_id() => #{
|
gen_id() => #{
|
||||||
|
|
|
@ -21,8 +21,6 @@
|
||||||
%% used for testing.
|
%% used for testing.
|
||||||
-module(emqx_ds_storage_reference).
|
-module(emqx_ds_storage_reference).
|
||||||
|
|
||||||
-include("emqx_ds.hrl").
|
|
||||||
|
|
||||||
-behaviour(emqx_ds_storage_layer).
|
-behaviour(emqx_ds_storage_layer).
|
||||||
|
|
||||||
%% API:
|
%% API:
|
||||||
|
@ -41,8 +39,7 @@
|
||||||
make_delete_iterator/5,
|
make_delete_iterator/5,
|
||||||
update_iterator/4,
|
update_iterator/4,
|
||||||
next/6,
|
next/6,
|
||||||
delete_next/7,
|
delete_next/7
|
||||||
lookup_message/3
|
|
||||||
]).
|
]).
|
||||||
|
|
||||||
%% internal exports:
|
%% internal exports:
|
||||||
|
@ -52,8 +49,6 @@
|
||||||
|
|
||||||
-include_lib("emqx_utils/include/emqx_message.hrl").
|
-include_lib("emqx_utils/include/emqx_message.hrl").
|
||||||
|
|
||||||
-define(DB_KEY(TIMESTAMP), <<TIMESTAMP:64>>).
|
|
||||||
|
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
%% Type declarations
|
%% Type declarations
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
|
@ -107,22 +102,23 @@ drop(_ShardId, DBHandle, _GenId, _CFRefs, #s{cf = CFHandle}) ->
|
||||||
ok = rocksdb:drop_column_family(DBHandle, CFHandle),
|
ok = rocksdb:drop_column_family(DBHandle, CFHandle),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
prepare_batch(_ShardId, _Data, Batch, _Options) ->
|
prepare_batch(_ShardId, _Data, Messages, _Options) ->
|
||||||
{ok, Batch}.
|
{ok, Messages}.
|
||||||
|
|
||||||
commit_batch(_ShardId, S = #s{db = DB}, Batch, Options) ->
|
commit_batch(_ShardId, #s{db = DB, cf = CF}, Messages, Options) ->
|
||||||
{ok, BatchHandle} = rocksdb:batch(),
|
{ok, Batch} = rocksdb:batch(),
|
||||||
lists:foreach(fun(Op) -> process_batch_operation(S, Op, BatchHandle) end, Batch),
|
lists:foreach(
|
||||||
Res = rocksdb:write_batch(DB, BatchHandle, write_batch_opts(Options)),
|
fun({TS, Msg}) ->
|
||||||
rocksdb:release_batch(BatchHandle),
|
Key = <<TS:64>>,
|
||||||
|
Val = term_to_binary(Msg),
|
||||||
|
rocksdb:batch_put(Batch, CF, Key, Val)
|
||||||
|
end,
|
||||||
|
Messages
|
||||||
|
),
|
||||||
|
Res = rocksdb:write_batch(DB, Batch, write_batch_opts(Options)),
|
||||||
|
rocksdb:release_batch(Batch),
|
||||||
Res.
|
Res.
|
||||||
|
|
||||||
process_batch_operation(S, {TS, Msg = #message{}}, BatchHandle) ->
|
|
||||||
Val = encode_message(Msg),
|
|
||||||
rocksdb:batch_put(BatchHandle, S#s.cf, ?DB_KEY(TS), Val);
|
|
||||||
process_batch_operation(S, {delete, #message_matcher{timestamp = TS}}, BatchHandle) ->
|
|
||||||
rocksdb:batch_delete(BatchHandle, S#s.cf, ?DB_KEY(TS)).
|
|
||||||
|
|
||||||
get_streams(_Shard, _Data, _TopicFilter, _StartTime) ->
|
get_streams(_Shard, _Data, _TopicFilter, _StartTime) ->
|
||||||
[#stream{}].
|
[#stream{}].
|
||||||
|
|
||||||
|
@ -209,16 +205,6 @@ delete_next(_Shard, #s{db = DB, cf = CF}, It0, Selector, BatchSize, _Now, IsCurr
|
||||||
{ok, It, NumDeleted, NumIterated}
|
{ok, It, NumDeleted, NumIterated}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
lookup_message(_ShardId, #s{db = DB, cf = CF}, #message_matcher{timestamp = TS}) ->
|
|
||||||
case rocksdb:get(DB, CF, ?DB_KEY(TS), _ReadOpts = []) of
|
|
||||||
{ok, Val} ->
|
|
||||||
decode_message(Val);
|
|
||||||
not_found ->
|
|
||||||
not_found;
|
|
||||||
{error, Reason} ->
|
|
||||||
{error, unrecoverable, Reason}
|
|
||||||
end.
|
|
||||||
|
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
%% Internal functions
|
%% Internal functions
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
|
@ -228,7 +214,7 @@ do_next(_, _, _, _, 0, Key, Acc) ->
|
||||||
do_next(TopicFilter, StartTime, IT, Action, NLeft, Key0, Acc) ->
|
do_next(TopicFilter, StartTime, IT, Action, NLeft, Key0, Acc) ->
|
||||||
case rocksdb:iterator_move(IT, Action) of
|
case rocksdb:iterator_move(IT, Action) of
|
||||||
{ok, Key = <<TS:64>>, Blob} ->
|
{ok, Key = <<TS:64>>, Blob} ->
|
||||||
Msg = #message{topic = Topic} = decode_message(Blob),
|
Msg = #message{topic = Topic} = binary_to_term(Blob),
|
||||||
TopicWords = emqx_topic:words(Topic),
|
TopicWords = emqx_topic:words(Topic),
|
||||||
case emqx_topic:match(TopicWords, TopicFilter) andalso TS >= StartTime of
|
case emqx_topic:match(TopicWords, TopicFilter) andalso TS >= StartTime of
|
||||||
true ->
|
true ->
|
||||||
|
@ -248,7 +234,7 @@ do_delete_next(
|
||||||
) ->
|
) ->
|
||||||
case rocksdb:iterator_move(IT, Action) of
|
case rocksdb:iterator_move(IT, Action) of
|
||||||
{ok, Key, Blob} ->
|
{ok, Key, Blob} ->
|
||||||
Msg = #message{topic = Topic, timestamp = TS} = decode_message(Blob),
|
Msg = #message{topic = Topic, timestamp = TS} = binary_to_term(Blob),
|
||||||
TopicWords = emqx_topic:words(Topic),
|
TopicWords = emqx_topic:words(Topic),
|
||||||
case emqx_topic:match(TopicWords, TopicFilter) andalso TS >= StartTime of
|
case emqx_topic:match(TopicWords, TopicFilter) andalso TS >= StartTime of
|
||||||
true ->
|
true ->
|
||||||
|
@ -299,12 +285,6 @@ do_delete_next(
|
||||||
{Key0, {AccDel, AccIter}}
|
{Key0, {AccDel, AccIter}}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
encode_message(Msg) ->
|
|
||||||
term_to_binary(Msg).
|
|
||||||
|
|
||||||
decode_message(Val) ->
|
|
||||||
binary_to_term(Val).
|
|
||||||
|
|
||||||
%% @doc Generate a column family ID for the MQTT messages
|
%% @doc Generate a column family ID for the MQTT messages
|
||||||
-spec data_cf(emqx_ds_storage_layer:gen_id()) -> [char()].
|
-spec data_cf(emqx_ds_storage_layer:gen_id()) -> [char()].
|
||||||
data_cf(GenId) ->
|
data_cf(GenId) ->
|
||||||
|
|
|
@ -33,8 +33,7 @@
|
||||||
make_delete_iterator/5,
|
make_delete_iterator/5,
|
||||||
update_iterator/4,
|
update_iterator/4,
|
||||||
next/6,
|
next/6,
|
||||||
delete_next/7,
|
delete_next/7
|
||||||
lookup_message/3
|
|
||||||
]).
|
]).
|
||||||
|
|
||||||
%% internal exports:
|
%% internal exports:
|
||||||
|
@ -44,7 +43,6 @@
|
||||||
|
|
||||||
-include_lib("emqx_utils/include/emqx_message.hrl").
|
-include_lib("emqx_utils/include/emqx_message.hrl").
|
||||||
-include_lib("snabbkaffe/include/trace.hrl").
|
-include_lib("snabbkaffe/include/trace.hrl").
|
||||||
-include("emqx_ds.hrl").
|
|
||||||
-include("emqx_ds_metrics.hrl").
|
-include("emqx_ds_metrics.hrl").
|
||||||
|
|
||||||
-ifdef(TEST).
|
-ifdef(TEST).
|
||||||
|
@ -58,12 +56,11 @@
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
|
|
||||||
%% TLOG entry
|
%% TLOG entry
|
||||||
%% Keys:
|
%% keys:
|
||||||
-define(cooked_msg_ops, 6).
|
-define(cooked_payloads, 6).
|
||||||
-define(cooked_lts_ops, 7).
|
-define(cooked_lts_ops, 7).
|
||||||
%% Payload:
|
%% Payload:
|
||||||
-define(cooked_delete, 100).
|
-define(cooked_payload(TIMESTAMP, STATIC, VARYING, VALUE),
|
||||||
-define(cooked_msg_op(TIMESTAMP, STATIC, VARYING, VALUE),
|
|
||||||
{TIMESTAMP, STATIC, VARYING, VALUE}
|
{TIMESTAMP, STATIC, VARYING, VALUE}
|
||||||
).
|
).
|
||||||
|
|
||||||
|
@ -179,39 +176,25 @@ drop(_ShardId, DBHandle, _GenId, _CFRefs, #s{data_cf = DataCF, trie_cf = TrieCF,
|
||||||
ok = rocksdb:drop_column_family(DBHandle, TrieCF),
|
ok = rocksdb:drop_column_family(DBHandle, TrieCF),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
prepare_batch(
|
prepare_batch(_ShardId, S = #s{trie = Trie}, Messages, _Options) ->
|
||||||
_ShardId,
|
|
||||||
S = #s{trie = Trie},
|
|
||||||
Operations,
|
|
||||||
_Options
|
|
||||||
) ->
|
|
||||||
_ = erase(?lts_persist_ops),
|
_ = erase(?lts_persist_ops),
|
||||||
OperationsCooked = emqx_utils:flattermap(
|
Payloads = [
|
||||||
fun
|
begin
|
||||||
({Timestamp, Msg = #message{topic = Topic}}) ->
|
Tokens = words(Topic),
|
||||||
Tokens = words(Topic),
|
{Static, Varying} = emqx_ds_lts:topic_key(Trie, fun threshold_fun/1, Tokens),
|
||||||
{Static, Varying} = emqx_ds_lts:topic_key(Trie, fun threshold_fun/1, Tokens),
|
?cooked_payload(Timestamp, Static, Varying, serialize(S, Varying, Msg))
|
||||||
?cooked_msg_op(Timestamp, Static, Varying, serialize(S, Varying, Msg));
|
end
|
||||||
({delete, #message_matcher{topic = Topic, timestamp = Timestamp}}) ->
|
|| {Timestamp, Msg = #message{topic = Topic}} <- Messages
|
||||||
case emqx_ds_lts:lookup_topic_key(Trie, words(Topic)) of
|
],
|
||||||
{ok, {Static, Varying}} ->
|
|
||||||
?cooked_msg_op(Timestamp, Static, Varying, ?cooked_delete);
|
|
||||||
undefined ->
|
|
||||||
%% Topic is unknown, nothing to delete.
|
|
||||||
[]
|
|
||||||
end
|
|
||||||
end,
|
|
||||||
Operations
|
|
||||||
),
|
|
||||||
{ok, #{
|
{ok, #{
|
||||||
?cooked_msg_ops => OperationsCooked,
|
?cooked_payloads => Payloads,
|
||||||
?cooked_lts_ops => pop_lts_persist_ops()
|
?cooked_lts_ops => pop_lts_persist_ops()
|
||||||
}}.
|
}}.
|
||||||
|
|
||||||
commit_batch(
|
commit_batch(
|
||||||
_ShardId,
|
_ShardId,
|
||||||
#s{db = DB, trie_cf = TrieCF, data_cf = DataCF, trie = Trie, hash_bytes = HashBytes},
|
#s{db = DB, trie_cf = TrieCF, data_cf = DataCF, trie = Trie, hash_bytes = HashBytes},
|
||||||
#{?cooked_lts_ops := LtsOps, ?cooked_msg_ops := Operations},
|
#{?cooked_lts_ops := LtsOps, ?cooked_payloads := Payloads},
|
||||||
Options
|
Options
|
||||||
) ->
|
) ->
|
||||||
{ok, Batch} = rocksdb:batch(),
|
{ok, Batch} = rocksdb:batch(),
|
||||||
|
@ -227,17 +210,12 @@ commit_batch(
|
||||||
_ = emqx_ds_lts:trie_update(Trie, LtsOps),
|
_ = emqx_ds_lts:trie_update(Trie, LtsOps),
|
||||||
%% Commit payloads:
|
%% Commit payloads:
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun
|
fun(?cooked_payload(Timestamp, Static, Varying, ValBlob)) ->
|
||||||
(?cooked_msg_op(Timestamp, Static, Varying, ValBlob = <<_/bytes>>)) ->
|
MasterKey = mk_key(Static, 0, <<>>, Timestamp),
|
||||||
MasterKey = mk_key(Static, 0, <<>>, Timestamp),
|
ok = rocksdb:batch_put(Batch, DataCF, MasterKey, ValBlob),
|
||||||
ok = rocksdb:batch_put(Batch, DataCF, MasterKey, ValBlob),
|
mk_index(Batch, DataCF, HashBytes, Static, Varying, Timestamp)
|
||||||
mk_index(Batch, DataCF, HashBytes, Static, Varying, Timestamp);
|
|
||||||
(?cooked_msg_op(Timestamp, Static, Varying, ?cooked_delete)) ->
|
|
||||||
MasterKey = mk_key(Static, 0, <<>>, Timestamp),
|
|
||||||
ok = rocksdb:batch_delete(Batch, DataCF, MasterKey),
|
|
||||||
delete_index(Batch, DataCF, HashBytes, Static, Varying, Timestamp)
|
|
||||||
end,
|
end,
|
||||||
Operations
|
Payloads
|
||||||
),
|
),
|
||||||
Result = rocksdb:write_batch(DB, Batch, [
|
Result = rocksdb:write_batch(DB, Batch, [
|
||||||
{disable_wal, not maps:get(durable, Options, true)}
|
{disable_wal, not maps:get(durable, Options, true)}
|
||||||
|
@ -307,28 +285,6 @@ delete_next(Shard, S, It0, Selector, BatchSize, Now, IsCurrent) ->
|
||||||
Ret
|
Ret
|
||||||
end.
|
end.
|
||||||
|
|
||||||
lookup_message(
|
|
||||||
Shard,
|
|
||||||
S = #s{db = DB, data_cf = CF, trie = Trie},
|
|
||||||
#message_matcher{topic = Topic, timestamp = Timestamp}
|
|
||||||
) ->
|
|
||||||
case emqx_ds_lts:lookup_topic_key(Trie, words(Topic)) of
|
|
||||||
{ok, {StaticIdx, _Varying}} ->
|
|
||||||
DSKey = mk_key(StaticIdx, 0, <<>>, Timestamp),
|
|
||||||
case rocksdb:get(DB, CF, DSKey, _ReadOpts = []) of
|
|
||||||
{ok, Val} ->
|
|
||||||
{ok, TopicStructure} = emqx_ds_lts:reverse_lookup(Trie, StaticIdx),
|
|
||||||
Msg = deserialize(S, Val),
|
|
||||||
enrich(Shard, S, TopicStructure, DSKey, Msg);
|
|
||||||
not_found ->
|
|
||||||
not_found;
|
|
||||||
{error, Reason} ->
|
|
||||||
{error, unrecoverable, Reason}
|
|
||||||
end;
|
|
||||||
undefined ->
|
|
||||||
not_found
|
|
||||||
end.
|
|
||||||
|
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
%% Internal exports
|
%% Internal exports
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
|
@ -374,18 +330,12 @@ serialize(#s{serialization_schema = SSchema, with_guid = WithGuid}, Varying, Msg
|
||||||
},
|
},
|
||||||
emqx_ds_msg_serializer:serialize(SSchema, Msg).
|
emqx_ds_msg_serializer:serialize(SSchema, Msg).
|
||||||
|
|
||||||
enrich(#ctx{shard = Shard, s = S, topic_structure = TopicStructure}, DSKey, Msg0) ->
|
|
||||||
enrich(Shard, S, TopicStructure, DSKey, Msg0).
|
|
||||||
|
|
||||||
enrich(
|
enrich(
|
||||||
Shard,
|
#ctx{shard = Shard, topic_structure = Structure, s = #s{with_guid = WithGuid}},
|
||||||
#s{with_guid = WithGuid},
|
|
||||||
TopicStructure,
|
|
||||||
DSKey,
|
DSKey,
|
||||||
Msg0
|
Msg0
|
||||||
) ->
|
) ->
|
||||||
Tokens = words(Msg0#message.topic),
|
Topic = emqx_topic:join(emqx_ds_lts:decompress_topic(Structure, words(Msg0#message.topic))),
|
||||||
Topic = emqx_topic:join(emqx_ds_lts:decompress_topic(TopicStructure, Tokens)),
|
|
||||||
Msg0#message{
|
Msg0#message{
|
||||||
topic = Topic,
|
topic = Topic,
|
||||||
id =
|
id =
|
||||||
|
@ -634,16 +584,6 @@ mk_index(Batch, CF, HashBytes, Static, Timestamp, N, [TopicLevel | Varying]) ->
|
||||||
mk_index(_Batch, _CF, _HashBytes, _Static, _Timestamp, _N, []) ->
|
mk_index(_Batch, _CF, _HashBytes, _Static, _Timestamp, _N, []) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
delete_index(Batch, CF, HashBytes, Static, Varying, Timestamp) ->
|
|
||||||
delete_index(Batch, CF, HashBytes, Static, Timestamp, 1, Varying).
|
|
||||||
|
|
||||||
delete_index(Batch, CF, HashBytes, Static, Timestamp, N, [TopicLevel | Varying]) ->
|
|
||||||
Key = mk_key(Static, N, hash(HashBytes, TopicLevel), Timestamp),
|
|
||||||
ok = rocksdb:batch_delete(Batch, CF, Key),
|
|
||||||
delete_index(Batch, CF, HashBytes, Static, Timestamp, N + 1, Varying);
|
|
||||||
delete_index(_Batch, _CF, _HashBytes, _Static, _Timestamp, _N, []) ->
|
|
||||||
ok.
|
|
||||||
|
|
||||||
%%%%%%%% Keys %%%%%%%%%%
|
%%%%%%%% Keys %%%%%%%%%%
|
||||||
|
|
||||||
get_key_range(StaticIdx, WildcardIdx, Hash) ->
|
get_key_range(StaticIdx, WildcardIdx, Hash) ->
|
||||||
|
|
|
@ -18,14 +18,11 @@
|
||||||
-compile(export_all).
|
-compile(export_all).
|
||||||
-compile(nowarn_export_all).
|
-compile(nowarn_export_all).
|
||||||
|
|
||||||
-include("emqx_ds.hrl").
|
|
||||||
-include("../../emqx/include/emqx.hrl").
|
-include("../../emqx/include/emqx.hrl").
|
||||||
-include_lib("common_test/include/ct.hrl").
|
-include_lib("common_test/include/ct.hrl").
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
-include_lib("stdlib/include/assert.hrl").
|
-include_lib("stdlib/include/assert.hrl").
|
||||||
|
|
||||||
-define(assertSameSet(A, B), ?assertEqual(lists:sort(A), lists:sort(B))).
|
|
||||||
|
|
||||||
-define(FUTURE, (1 bsl 64 - 1)).
|
-define(FUTURE, (1 bsl 64 - 1)).
|
||||||
|
|
||||||
-define(SHARD, shard(?FUNCTION_NAME)).
|
-define(SHARD, shard(?FUNCTION_NAME)).
|
||||||
|
@ -69,30 +66,6 @@ t_store(_Config) ->
|
||||||
},
|
},
|
||||||
?assertMatch(ok, emqx_ds:store_batch(?FUNCTION_NAME, [Msg])).
|
?assertMatch(ok, emqx_ds:store_batch(?FUNCTION_NAME, [Msg])).
|
||||||
|
|
||||||
%% Smoke test of applying batch operations
|
|
||||||
t_operations(db_config, _Config) ->
|
|
||||||
#{force_monotonic_timestamps => false}.
|
|
||||||
|
|
||||||
t_operations(_Config) ->
|
|
||||||
Batch1 = [
|
|
||||||
make_message(100, <<"t/1">>, <<"M1">>),
|
|
||||||
make_message(200, <<"t/2">>, <<"M2">>),
|
|
||||||
make_message(300, <<"t/3">>, <<"M3">>)
|
|
||||||
],
|
|
||||||
Batch2 = [
|
|
||||||
make_deletion(200, <<"t/2">>, <<"M2">>),
|
|
||||||
make_deletion(300, <<"t/3">>, '_'),
|
|
||||||
make_deletion(400, <<"t/4">>, '_')
|
|
||||||
],
|
|
||||||
?assertEqual(ok, emqx_ds:store_batch(?FUNCTION_NAME, Batch1)),
|
|
||||||
?assertEqual(ok, emqx_ds:store_batch(?FUNCTION_NAME, Batch2)),
|
|
||||||
?assertMatch(
|
|
||||||
[
|
|
||||||
#message{timestamp = 100, topic = <<"t/1">>, payload = <<"M1">>}
|
|
||||||
],
|
|
||||||
dump_messages(?SHARD, <<"t/#">>, 0)
|
|
||||||
).
|
|
||||||
|
|
||||||
%% Smoke test for iteration through a concrete topic
|
%% Smoke test for iteration through a concrete topic
|
||||||
t_iterate(_Config) ->
|
t_iterate(_Config) ->
|
||||||
%% Prepare data:
|
%% Prepare data:
|
||||||
|
@ -151,6 +124,8 @@ t_delete(_Config) ->
|
||||||
?assertNot(is_map_key(TopicToDelete, MessagesByTopic), #{msgs => MessagesByTopic}),
|
?assertNot(is_map_key(TopicToDelete, MessagesByTopic), #{msgs => MessagesByTopic}),
|
||||||
?assertEqual(20, length(Messages)).
|
?assertEqual(20, length(Messages)).
|
||||||
|
|
||||||
|
-define(assertSameSet(A, B), ?assertEqual(lists:sort(A), lists:sort(B))).
|
||||||
|
|
||||||
%% Smoke test that verifies that concrete topics are mapped to
|
%% Smoke test that verifies that concrete topics are mapped to
|
||||||
%% individual streams, unless there's too many of them.
|
%% individual streams, unless there's too many of them.
|
||||||
t_get_streams(Config) ->
|
t_get_streams(Config) ->
|
||||||
|
@ -442,26 +417,79 @@ dump_stream(Shard, Stream, TopicFilter, StartTime) ->
|
||||||
%% || Topic <- Topics, PublishedAt <- Timestamps
|
%% || Topic <- Topics, PublishedAt <- Timestamps
|
||||||
%% ].
|
%% ].
|
||||||
|
|
||||||
|
%% t_iterate_multigen(_Config) ->
|
||||||
|
%% {ok, 1} = emqx_ds_storage_layer:create_generation(?SHARD, 10, ?COMPACT_CONFIG),
|
||||||
|
%% {ok, 2} = emqx_ds_storage_layer:create_generation(?SHARD, 50, ?DEFAULT_CONFIG),
|
||||||
|
%% {ok, 3} = emqx_ds_storage_layer:create_generation(?SHARD, 1000, ?DEFAULT_CONFIG),
|
||||||
|
%% Topics = ["foo/bar", "foo/bar/baz", "a", "a/bar"],
|
||||||
|
%% Timestamps = lists:seq(1, 100),
|
||||||
|
%% _ = [
|
||||||
|
%% store(?SHARD, PublishedAt, Topic, term_to_binary({Topic, PublishedAt}))
|
||||||
|
%% || Topic <- Topics, PublishedAt <- Timestamps
|
||||||
|
%% ],
|
||||||
|
%% ?assertEqual(
|
||||||
|
%% lists:sort([
|
||||||
|
%% {Topic, PublishedAt}
|
||||||
|
%% || Topic <- ["foo/bar", "foo/bar/baz"], PublishedAt <- Timestamps
|
||||||
|
%% ]),
|
||||||
|
%% lists:sort([binary_to_term(Payload) || Payload <- iterate(?SHARD, "foo/#", 0)])
|
||||||
|
%% ),
|
||||||
|
%% ?assertEqual(
|
||||||
|
%% lists:sort([
|
||||||
|
%% {Topic, PublishedAt}
|
||||||
|
%% || Topic <- ["a", "a/bar"], PublishedAt <- lists:seq(60, 100)
|
||||||
|
%% ]),
|
||||||
|
%% lists:sort([binary_to_term(Payload) || Payload <- iterate(?SHARD, "a/#", 60)])
|
||||||
|
%% ).
|
||||||
|
|
||||||
|
%% t_iterate_multigen_preserve_restore(_Config) ->
|
||||||
|
%% ReplayID = atom_to_binary(?FUNCTION_NAME),
|
||||||
|
%% {ok, 1} = emqx_ds_storage_layer:create_generation(?SHARD, 10, ?COMPACT_CONFIG),
|
||||||
|
%% {ok, 2} = emqx_ds_storage_layer:create_generation(?SHARD, 50, ?DEFAULT_CONFIG),
|
||||||
|
%% {ok, 3} = emqx_ds_storage_layer:create_generation(?SHARD, 100, ?DEFAULT_CONFIG),
|
||||||
|
%% Topics = ["foo/bar", "foo/bar/baz", "a/bar"],
|
||||||
|
%% Timestamps = lists:seq(1, 100),
|
||||||
|
%% TopicFilter = "foo/#",
|
||||||
|
%% TopicsMatching = ["foo/bar", "foo/bar/baz"],
|
||||||
|
%% _ = [
|
||||||
|
%% store(?SHARD, TS, Topic, term_to_binary({Topic, TS}))
|
||||||
|
%% || Topic <- Topics, TS <- Timestamps
|
||||||
|
%% ],
|
||||||
|
%% It0 = iterator(?SHARD, TopicFilter, 0),
|
||||||
|
%% {It1, Res10} = iterate(It0, 10),
|
||||||
|
%% % preserve mid-generation
|
||||||
|
%% ok = emqx_ds_storage_layer:preserve_iterator(It1, ReplayID),
|
||||||
|
%% {ok, It2} = emqx_ds_storage_layer:restore_iterator(?SHARD, ReplayID),
|
||||||
|
%% {It3, Res100} = iterate(It2, 88),
|
||||||
|
%% % preserve on the generation boundary
|
||||||
|
%% ok = emqx_ds_storage_layer:preserve_iterator(It3, ReplayID),
|
||||||
|
%% {ok, It4} = emqx_ds_storage_layer:restore_iterator(?SHARD, ReplayID),
|
||||||
|
%% {It5, Res200} = iterate(It4, 1000),
|
||||||
|
%% ?assertEqual({end_of_stream, []}, iterate(It5, 1)),
|
||||||
|
%% ?assertEqual(
|
||||||
|
%% lists:sort([{Topic, TS} || Topic <- TopicsMatching, TS <- Timestamps]),
|
||||||
|
%% lists:sort([binary_to_term(Payload) || Payload <- Res10 ++ Res100 ++ Res200])
|
||||||
|
%% ),
|
||||||
|
%% ?assertEqual(
|
||||||
|
%% ok,
|
||||||
|
%% emqx_ds_storage_layer:discard_iterator(?SHARD, ReplayID)
|
||||||
|
%% ),
|
||||||
|
%% ?assertEqual(
|
||||||
|
%% {error, not_found},
|
||||||
|
%% emqx_ds_storage_layer:restore_iterator(?SHARD, ReplayID)
|
||||||
|
%% ).
|
||||||
|
|
||||||
make_message(PublishedAt, Topic, Payload) when is_list(Topic) ->
|
make_message(PublishedAt, Topic, Payload) when is_list(Topic) ->
|
||||||
make_message(PublishedAt, list_to_binary(Topic), Payload);
|
make_message(PublishedAt, list_to_binary(Topic), Payload);
|
||||||
make_message(PublishedAt, Topic, Payload) when is_binary(Topic) ->
|
make_message(PublishedAt, Topic, Payload) when is_binary(Topic) ->
|
||||||
ID = emqx_guid:gen(),
|
ID = emqx_guid:gen(),
|
||||||
#message{
|
#message{
|
||||||
id = ID,
|
id = ID,
|
||||||
from = <<?MODULE_STRING>>,
|
|
||||||
topic = Topic,
|
topic = Topic,
|
||||||
timestamp = PublishedAt,
|
timestamp = PublishedAt,
|
||||||
payload = Payload
|
payload = Payload
|
||||||
}.
|
}.
|
||||||
|
|
||||||
make_deletion(Timestamp, Topic, Payload) ->
|
|
||||||
{delete, #message_matcher{
|
|
||||||
from = <<?MODULE_STRING>>,
|
|
||||||
topic = Topic,
|
|
||||||
timestamp = Timestamp,
|
|
||||||
payload = Payload
|
|
||||||
}}.
|
|
||||||
|
|
||||||
make_topic(Tokens = [_ | _]) ->
|
make_topic(Tokens = [_ | _]) ->
|
||||||
emqx_topic:join([bin(T) || T <- Tokens]).
|
emqx_topic:join([bin(T) || T <- Tokens]).
|
||||||
|
|
||||||
|
@ -507,23 +535,13 @@ end_per_suite(Config) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
init_per_testcase(TC, Config) ->
|
init_per_testcase(TC, Config) ->
|
||||||
ok = emqx_ds:open_db(TC, db_config(TC, Config)),
|
ok = emqx_ds:open_db(TC, ?DB_CONFIG(Config)),
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_testcase(TC, _Config) ->
|
end_per_testcase(TC, _Config) ->
|
||||||
emqx_ds:drop_db(TC),
|
emqx_ds:drop_db(TC),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
db_config(TC, Config) ->
|
|
||||||
ConfigBase = ?DB_CONFIG(Config),
|
|
||||||
SpecificConfig =
|
|
||||||
try
|
|
||||||
?MODULE:TC(?FUNCTION_NAME, Config)
|
|
||||||
catch
|
|
||||||
error:undef -> #{}
|
|
||||||
end,
|
|
||||||
maps:merge(ConfigBase, SpecificConfig).
|
|
||||||
|
|
||||||
shard(TC) ->
|
shard(TC) ->
|
||||||
{TC, <<"0">>}.
|
{TC, <<"0">>}.
|
||||||
|
|
||||||
|
|
|
@ -377,7 +377,7 @@ nodes_of_clientid(DB, ClientId, Nodes = [N0 | _]) ->
|
||||||
shard_of_clientid(DB, Node, ClientId) ->
|
shard_of_clientid(DB, Node, ClientId) ->
|
||||||
?ON(
|
?ON(
|
||||||
Node,
|
Node,
|
||||||
emqx_ds_buffer:shard_of_operation(DB, #message{from = ClientId}, clientid)
|
emqx_ds_buffer:shard_of_message(DB, #message{from = ClientId}, clientid)
|
||||||
).
|
).
|
||||||
|
|
||||||
%% Consume eagerly:
|
%% Consume eagerly:
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_gateway_coap, [
|
{application, emqx_gateway_coap, [
|
||||||
{description, "CoAP Gateway"},
|
{description, "CoAP Gateway"},
|
||||||
{vsn, "0.1.10"},
|
{vsn, "0.1.9"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, emqx, emqx_gateway]},
|
{applications, [kernel, stdlib, emqx, emqx_gateway]},
|
||||||
{env, []},
|
{env, []},
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_gateway_exproto, [
|
{application, emqx_gateway_exproto, [
|
||||||
{description, "ExProto Gateway"},
|
{description, "ExProto Gateway"},
|
||||||
{vsn, "0.1.13"},
|
{vsn, "0.1.12"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, grpc, emqx, emqx_gateway]},
|
{applications, [kernel, stdlib, grpc, emqx, emqx_gateway]},
|
||||||
{env, []},
|
{env, []},
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_gateway_gbt32960, [
|
{application, emqx_gateway_gbt32960, [
|
||||||
{description, "GBT32960 Gateway"},
|
{description, "GBT32960 Gateway"},
|
||||||
{vsn, "0.1.5"},
|
{vsn, "0.1.4"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, emqx, emqx_gateway]},
|
{applications, [kernel, stdlib, emqx, emqx_gateway]},
|
||||||
{env, []},
|
{env, []},
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_gateway_jt808, [
|
{application, emqx_gateway_jt808, [
|
||||||
{description, "JT/T 808 Gateway"},
|
{description, "JT/T 808 Gateway"},
|
||||||
{vsn, "0.1.1"},
|
{vsn, "0.1.0"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, emqx, emqx_gateway]},
|
{applications, [kernel, stdlib, emqx, emqx_gateway]},
|
||||||
{env, []},
|
{env, []},
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_gateway_mqttsn, [
|
{application, emqx_gateway_mqttsn, [
|
||||||
{description, "MQTT-SN Gateway"},
|
{description, "MQTT-SN Gateway"},
|
||||||
{vsn, "0.2.3"},
|
{vsn, "0.2.2"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, emqx, emqx_gateway]},
|
{applications, [kernel, stdlib, emqx, emqx_gateway]},
|
||||||
{env, []},
|
{env, []},
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
{id, "emqx_machine"},
|
{id, "emqx_machine"},
|
||||||
{description, "The EMQX Machine"},
|
{description, "The EMQX Machine"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "0.3.4"},
|
{vsn, "0.3.3"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, emqx_ctl, redbug]},
|
{applications, [kernel, stdlib, emqx_ctl, redbug]},
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{application, emqx_management, [
|
{application, emqx_management, [
|
||||||
{description, "EMQX Management API and CLI"},
|
{description, "EMQX Management API and CLI"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "5.2.4"},
|
{vsn, "5.2.3"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, [emqx_management_sup]},
|
{registered, [emqx_management_sup]},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -29,9 +29,13 @@
|
||||||
|
|
||||||
start(_Type, _Args) ->
|
start(_Type, _Args) ->
|
||||||
ok = mria:wait_for_tables(emqx_mgmt_auth:create_tables()),
|
ok = mria:wait_for_tables(emqx_mgmt_auth:create_tables()),
|
||||||
emqx_mgmt_auth:try_init_bootstrap_file(),
|
case emqx_mgmt_auth:init_bootstrap_file() of
|
||||||
emqx_conf:add_handler([api_key], emqx_mgmt_auth),
|
ok ->
|
||||||
emqx_mgmt_sup:start_link().
|
emqx_conf:add_handler([api_key], emqx_mgmt_auth),
|
||||||
|
emqx_mgmt_sup:start_link();
|
||||||
|
{error, Reason} ->
|
||||||
|
{error, Reason}
|
||||||
|
end.
|
||||||
|
|
||||||
stop(_State) ->
|
stop(_State) ->
|
||||||
emqx_conf:remove_handler([api_key]),
|
emqx_conf:remove_handler([api_key]),
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
update/5,
|
update/5,
|
||||||
delete/1,
|
delete/1,
|
||||||
list/0,
|
list/0,
|
||||||
try_init_bootstrap_file/0,
|
init_bootstrap_file/0,
|
||||||
format/1
|
format/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
@ -52,7 +52,6 @@
|
||||||
-ifdef(TEST).
|
-ifdef(TEST).
|
||||||
-export([create/7]).
|
-export([create/7]).
|
||||||
-export([trans/2, force_create_app/1]).
|
-export([trans/2, force_create_app/1]).
|
||||||
-export([init_bootstrap_file/1]).
|
|
||||||
-endif.
|
-endif.
|
||||||
|
|
||||||
-define(APP, emqx_app).
|
-define(APP, emqx_app).
|
||||||
|
@ -115,12 +114,11 @@ post_config_update([api_key], _Req, NewConf, _OldConf, _AppEnvs) ->
|
||||||
end,
|
end,
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
-spec try_init_bootstrap_file() -> ok | {error, _}.
|
-spec init_bootstrap_file() -> ok | {error, _}.
|
||||||
try_init_bootstrap_file() ->
|
init_bootstrap_file() ->
|
||||||
File = bootstrap_file(),
|
File = bootstrap_file(),
|
||||||
?SLOG(debug, #{msg => "init_bootstrap_api_keys_from_file", file => File}),
|
?SLOG(debug, #{msg => "init_bootstrap_api_keys_from_file", file => File}),
|
||||||
_ = init_bootstrap_file(File),
|
init_bootstrap_file(File).
|
||||||
ok.
|
|
||||||
|
|
||||||
create(Name, Enable, ExpiredAt, Desc, Role) ->
|
create(Name, Enable, ExpiredAt, Desc, Role) ->
|
||||||
ApiKey = generate_unique_api_key(Name),
|
ApiKey = generate_unique_api_key(Name),
|
||||||
|
@ -359,6 +357,10 @@ init_bootstrap_file(File) ->
|
||||||
init_bootstrap_file(File, Dev, MP);
|
init_bootstrap_file(File, Dev, MP);
|
||||||
{error, Reason0} ->
|
{error, Reason0} ->
|
||||||
Reason = emqx_utils:explain_posix(Reason0),
|
Reason = emqx_utils:explain_posix(Reason0),
|
||||||
|
FmtReason = emqx_utils:format(
|
||||||
|
"load API bootstrap file failed, file:~ts, reason:~ts",
|
||||||
|
[File, Reason]
|
||||||
|
),
|
||||||
|
|
||||||
?SLOG(
|
?SLOG(
|
||||||
error,
|
error,
|
||||||
|
@ -369,7 +371,7 @@ init_bootstrap_file(File) ->
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
|
|
||||||
{error, Reason}
|
{error, FmtReason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
init_bootstrap_file(File, Dev, MP) ->
|
init_bootstrap_file(File, Dev, MP) ->
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
-include_lib("emqx/include/logger.hrl").
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
|
||||||
-define(DATA_BACKUP_OPTS, #{print_fun => fun emqx_ctl:print/2}).
|
-define(DATA_BACKUP_OPTS, #{print_fun => fun emqx_ctl:print/2}).
|
||||||
-define(EXCLUSIVE_TAB, emqx_exclusive_subscription).
|
|
||||||
|
|
||||||
-export([load/0]).
|
-export([load/0]).
|
||||||
|
|
||||||
|
@ -46,8 +45,7 @@
|
||||||
olp/1,
|
olp/1,
|
||||||
data/1,
|
data/1,
|
||||||
ds/1,
|
ds/1,
|
||||||
cluster_info/0,
|
cluster_info/0
|
||||||
exclusive/1
|
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-spec load() -> ok.
|
-spec load() -> ok.
|
||||||
|
@ -1026,9 +1024,7 @@ print({?SUBOPTION, {{Topic, Pid}, Options}}) when is_pid(Pid) ->
|
||||||
NL = maps:get(nl, Options, 0),
|
NL = maps:get(nl, Options, 0),
|
||||||
RH = maps:get(rh, Options, 0),
|
RH = maps:get(rh, Options, 0),
|
||||||
RAP = maps:get(rap, Options, 0),
|
RAP = maps:get(rap, Options, 0),
|
||||||
emqx_ctl:print("~ts -> topic:~ts qos:~p nl:~p rh:~p rap:~p~n", [SubId, Topic, QoS, NL, RH, RAP]);
|
emqx_ctl:print("~ts -> topic:~ts qos:~p nl:~p rh:~p rap:~p~n", [SubId, Topic, QoS, NL, RH, RAP]).
|
||||||
print({exclusive, {exclusive_subscription, Topic, ClientId}}) ->
|
|
||||||
emqx_ctl:print("topic:~ts -> ClientId:~ts~n", [Topic, ClientId]).
|
|
||||||
|
|
||||||
format(_, undefined) ->
|
format(_, undefined) ->
|
||||||
undefined;
|
undefined;
|
||||||
|
@ -1089,19 +1085,3 @@ safe_call_mria(Fun, Args, OnFail) ->
|
||||||
}),
|
}),
|
||||||
OnFail
|
OnFail
|
||||||
end.
|
end.
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% @doc Exclusive topics
|
|
||||||
exclusive(["list"]) ->
|
|
||||||
case ets:info(?EXCLUSIVE_TAB, size) of
|
|
||||||
0 -> emqx_ctl:print("No topics.~n");
|
|
||||||
_ -> dump(?EXCLUSIVE_TAB, exclusive)
|
|
||||||
end;
|
|
||||||
exclusive(["delete", Topic0]) ->
|
|
||||||
Topic = erlang:iolist_to_binary(Topic0),
|
|
||||||
emqx_exclusive_subscription:unsubscribe(Topic, #{is_exclusive => true}),
|
|
||||||
emqx_ctl:print("ok~n");
|
|
||||||
exclusive(_) ->
|
|
||||||
emqx_ctl:usage([
|
|
||||||
{"exclusive list", "List all exclusive topics"},
|
|
||||||
{"exclusive delete <Topic>", "Delete an exclusive topic"}
|
|
||||||
]).
|
|
||||||
|
|
|
@ -100,7 +100,7 @@ t_bootstrap_file(_) ->
|
||||||
BadBin = <<"test-1:secret-11\ntest-2 secret-12">>,
|
BadBin = <<"test-1:secret-11\ntest-2 secret-12">>,
|
||||||
ok = file:write_file(File, BadBin),
|
ok = file:write_file(File, BadBin),
|
||||||
update_file(File),
|
update_file(File),
|
||||||
?assertMatch({error, #{reason := "invalid_format"}}, emqx_mgmt_auth:init_bootstrap_file(File)),
|
?assertMatch({error, #{reason := "invalid_format"}}, emqx_mgmt_auth:init_bootstrap_file()),
|
||||||
?assertEqual(ok, auth_authorize(TestPath, <<"test-1">>, <<"secret-11">>)),
|
?assertEqual(ok, auth_authorize(TestPath, <<"test-1">>, <<"secret-11">>)),
|
||||||
?assertMatch({error, _}, auth_authorize(TestPath, <<"test-2">>, <<"secret-12">>)),
|
?assertMatch({error, _}, auth_authorize(TestPath, <<"test-2">>, <<"secret-12">>)),
|
||||||
update_file(<<>>),
|
update_file(<<>>),
|
||||||
|
@ -123,7 +123,7 @@ t_bootstrap_file_override(_) ->
|
||||||
ok = file:write_file(File, Bin),
|
ok = file:write_file(File, Bin),
|
||||||
update_file(File),
|
update_file(File),
|
||||||
|
|
||||||
?assertEqual(ok, emqx_mgmt_auth:init_bootstrap_file(File)),
|
?assertEqual(ok, emqx_mgmt_auth:init_bootstrap_file()),
|
||||||
|
|
||||||
MatchFun = fun(ApiKey) -> mnesia:match_object(#?APP{api_key = ApiKey, _ = '_'}) end,
|
MatchFun = fun(ApiKey) -> mnesia:match_object(#?APP{api_key = ApiKey, _ = '_'}) end,
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
|
@ -156,7 +156,7 @@ t_bootstrap_file_dup_override(_) ->
|
||||||
File = "./bootstrap_api_keys.txt",
|
File = "./bootstrap_api_keys.txt",
|
||||||
ok = file:write_file(File, Bin),
|
ok = file:write_file(File, Bin),
|
||||||
update_file(File),
|
update_file(File),
|
||||||
?assertEqual(ok, emqx_mgmt_auth:init_bootstrap_file(File)),
|
?assertEqual(ok, emqx_mgmt_auth:init_bootstrap_file()),
|
||||||
|
|
||||||
SameAppWithDiffName = #?APP{
|
SameAppWithDiffName = #?APP{
|
||||||
name = <<"name-1">>,
|
name = <<"name-1">>,
|
||||||
|
@ -190,7 +190,7 @@ t_bootstrap_file_dup_override(_) ->
|
||||||
|
|
||||||
%% Similar to loading bootstrap file at node startup
|
%% Similar to loading bootstrap file at node startup
|
||||||
%% the duplicated apikey in mnesia will be cleaned up
|
%% the duplicated apikey in mnesia will be cleaned up
|
||||||
?assertEqual(ok, emqx_mgmt_auth:init_bootstrap_file(File)),
|
?assertEqual(ok, emqx_mgmt_auth:init_bootstrap_file()),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
{ok, [
|
{ok, [
|
||||||
#?APP{
|
#?APP{
|
||||||
|
|
|
@ -360,9 +360,4 @@ t_autocluster_leave(Config) ->
|
||||||
)
|
)
|
||||||
).
|
).
|
||||||
|
|
||||||
t_exclusive(_Config) ->
|
|
||||||
emqx_ctl:run_command(["exclusive", "list"]),
|
|
||||||
emqx_ctl:run_command(["exclusive", "delete", "t/1"]),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
format(Str, Opts) -> io:format("str:~s: Opts:~p", [Str, Opts]).
|
format(Str, Opts) -> io:format("str:~s: Opts:~p", [Str, Opts]).
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_modules, [
|
{application, emqx_modules, [
|
||||||
{description, "EMQX Modules"},
|
{description, "EMQX Modules"},
|
||||||
{vsn, "5.0.28"},
|
{vsn, "5.0.27"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{applications, [kernel, stdlib, emqx, emqx_ctl, observer_cli]},
|
{applications, [kernel, stdlib, emqx, emqx_ctl, observer_cli]},
|
||||||
{mod, {emqx_modules_app, []}},
|
{mod, {emqx_modules_app, []}},
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue