feat(sessds): preserve acks / ranges in mnesia for replays
This commit is contained in:
parent
5b40304d1f
commit
1246d714c5
|
@ -357,18 +357,12 @@ do_t_session_discard(Params) ->
|
||||||
_Attempts0 = 50,
|
_Attempts0 = 50,
|
||||||
true = map_size(emqx_persistent_session_ds:list_all_streams()) > 0
|
true = map_size(emqx_persistent_session_ds:list_all_streams()) > 0
|
||||||
),
|
),
|
||||||
?retry(
|
|
||||||
_Sleep0 = 100,
|
|
||||||
_Attempts0 = 50,
|
|
||||||
true = map_size(emqx_persistent_session_ds:list_all_iterators()) > 0
|
|
||||||
),
|
|
||||||
ok = emqtt:stop(Client0),
|
ok = emqtt:stop(Client0),
|
||||||
?tp(notice, "disconnected", #{}),
|
?tp(notice, "disconnected", #{}),
|
||||||
|
|
||||||
?tp(notice, "reconnecting", #{}),
|
?tp(notice, "reconnecting", #{}),
|
||||||
%% we still have iterators and streams
|
%% we still have streams
|
||||||
?assert(map_size(emqx_persistent_session_ds:list_all_streams()) > 0),
|
?assert(map_size(emqx_persistent_session_ds:list_all_streams()) > 0),
|
||||||
?assert(map_size(emqx_persistent_session_ds:list_all_iterators()) > 0),
|
|
||||||
Client1 = start_client(ReconnectOpts),
|
Client1 = start_client(ReconnectOpts),
|
||||||
{ok, _} = emqtt:connect(Client1),
|
{ok, _} = emqtt:connect(Client1),
|
||||||
?assertEqual([], emqtt:subscriptions(Client1)),
|
?assertEqual([], emqtt:subscriptions(Client1)),
|
||||||
|
@ -381,7 +375,7 @@ do_t_session_discard(Params) ->
|
||||||
?assertEqual(#{}, emqx_persistent_session_ds:list_all_subscriptions()),
|
?assertEqual(#{}, emqx_persistent_session_ds:list_all_subscriptions()),
|
||||||
?assertEqual([], emqx_persistent_session_ds_router:topics()),
|
?assertEqual([], emqx_persistent_session_ds_router:topics()),
|
||||||
?assertEqual(#{}, emqx_persistent_session_ds:list_all_streams()),
|
?assertEqual(#{}, emqx_persistent_session_ds:list_all_streams()),
|
||||||
?assertEqual(#{}, emqx_persistent_session_ds:list_all_iterators()),
|
?assertEqual(#{}, emqx_persistent_session_ds:list_all_pubranges()),
|
||||||
ok = emqtt:stop(Client1),
|
ok = emqtt:stop(Client1),
|
||||||
?tp(notice, "disconnected", #{}),
|
?tp(notice, "disconnected", #{}),
|
||||||
|
|
||||||
|
|
|
@ -19,12 +19,12 @@
|
||||||
-module(emqx_persistent_message_ds_replayer).
|
-module(emqx_persistent_message_ds_replayer).
|
||||||
|
|
||||||
%% API:
|
%% API:
|
||||||
-export([new/0, next_packet_id/1, replay/2, commit_offset/3, poll/3, n_inflight/1]).
|
-export([new/0, open/1, next_packet_id/1, replay/1, commit_offset/3, poll/3, n_inflight/1]).
|
||||||
|
|
||||||
%% internal exports:
|
%% internal exports:
|
||||||
-export([]).
|
-export([]).
|
||||||
|
|
||||||
-export_type([inflight/0]).
|
-export_type([inflight/0, seqno/0]).
|
||||||
|
|
||||||
-include_lib("emqx/include/logger.hrl").
|
-include_lib("emqx/include/logger.hrl").
|
||||||
-include("emqx_persistent_session_ds.hrl").
|
-include("emqx_persistent_session_ds.hrl").
|
||||||
|
@ -42,17 +42,28 @@
|
||||||
-type seqno() :: non_neg_integer().
|
-type seqno() :: non_neg_integer().
|
||||||
|
|
||||||
-record(range, {
|
-record(range, {
|
||||||
stream :: emqx_ds:stream(),
|
stream :: _StreamRef,
|
||||||
first :: seqno(),
|
first :: seqno(),
|
||||||
last :: seqno(),
|
until :: seqno(),
|
||||||
iterator_next :: emqx_ds:iterator() | undefined
|
%% Type of a range:
|
||||||
|
%% * Inflight range is a range of yet unacked messages from this stream.
|
||||||
|
%% * Checkpoint range was already acked, its purpose is to keep track of the
|
||||||
|
%% very last iterator for this stream.
|
||||||
|
type :: inflight | checkpoint,
|
||||||
|
%% Meaning of this depends on the type of the range:
|
||||||
|
%% * For inflight range, this is the iterator pointing to the first message in
|
||||||
|
%% the range.
|
||||||
|
%% * For checkpoint range, this is the iterator pointing right past the last
|
||||||
|
%% message in the range.
|
||||||
|
iterator :: emqx_ds:iterator()
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-type range() :: #range{}.
|
-type range() :: #range{}.
|
||||||
|
|
||||||
-record(inflight, {
|
-record(inflight, {
|
||||||
next_seqno = 0 :: seqno(),
|
next_seqno = 1 :: seqno(),
|
||||||
acked_seqno = 0 :: seqno(),
|
acked_until = 1 :: seqno(),
|
||||||
|
%% Ranges are sorted in ascending order of their sequence numbers.
|
||||||
offset_ranges = [] :: [range()]
|
offset_ranges = [] :: [range()]
|
||||||
}).
|
}).
|
||||||
|
|
||||||
|
@ -66,34 +77,37 @@
|
||||||
new() ->
|
new() ->
|
||||||
#inflight{}.
|
#inflight{}.
|
||||||
|
|
||||||
|
-spec open(emqx_persistent_session_ds:id()) -> inflight().
|
||||||
|
open(SessionId) ->
|
||||||
|
Ranges = ro_transaction(fun() -> get_ranges(SessionId) end),
|
||||||
|
{AckedUntil, NextSeqno} = compute_inflight_range(Ranges),
|
||||||
|
#inflight{
|
||||||
|
acked_until = AckedUntil,
|
||||||
|
next_seqno = NextSeqno,
|
||||||
|
offset_ranges = Ranges
|
||||||
|
}.
|
||||||
|
|
||||||
-spec next_packet_id(inflight()) -> {emqx_types:packet_id(), inflight()}.
|
-spec next_packet_id(inflight()) -> {emqx_types:packet_id(), inflight()}.
|
||||||
next_packet_id(Inflight0 = #inflight{next_seqno = LastSeqNo}) ->
|
next_packet_id(Inflight0 = #inflight{next_seqno = LastSeqno}) ->
|
||||||
Inflight = Inflight0#inflight{next_seqno = LastSeqNo + 1},
|
Inflight = Inflight0#inflight{next_seqno = next_seqno(LastSeqno)},
|
||||||
case LastSeqNo rem 16#10000 of
|
{seqno_to_packet_id(LastSeqno), Inflight}.
|
||||||
0 ->
|
|
||||||
%% We skip sequence numbers that lead to PacketId = 0 to
|
|
||||||
%% simplify math. Note: it leads to occasional gaps in the
|
|
||||||
%% sequence numbers.
|
|
||||||
next_packet_id(Inflight);
|
|
||||||
PacketId ->
|
|
||||||
{PacketId, Inflight}
|
|
||||||
end.
|
|
||||||
|
|
||||||
-spec n_inflight(inflight()) -> non_neg_integer().
|
-spec n_inflight(inflight()) -> non_neg_integer().
|
||||||
n_inflight(#inflight{next_seqno = NextSeqNo, acked_seqno = AckedSeqno}) ->
|
n_inflight(#inflight{next_seqno = NextSeqno, acked_until = AckedUntil}) ->
|
||||||
%% NOTE: this function assumes that gaps in the sequence ID occur
|
range_size(AckedUntil, NextSeqno).
|
||||||
%% _only_ when the packet ID wraps:
|
|
||||||
case AckedSeqno >= ((NextSeqNo bsr 16) bsl 16) of
|
|
||||||
true ->
|
|
||||||
NextSeqNo - AckedSeqno;
|
|
||||||
false ->
|
|
||||||
NextSeqNo - AckedSeqno - 1
|
|
||||||
end.
|
|
||||||
|
|
||||||
-spec replay(emqx_persistent_session_ds:id(), inflight()) ->
|
-spec replay(inflight()) ->
|
||||||
emqx_session:replies().
|
{emqx_session:replies(), inflight()}.
|
||||||
replay(_SessionId, _Inflight = #inflight{offset_ranges = _Ranges}) ->
|
replay(Inflight0 = #inflight{acked_until = AckedUntil, offset_ranges = Ranges0}) ->
|
||||||
[].
|
{Ranges, Replies} = lists:mapfoldr(
|
||||||
|
fun(Range, Acc) ->
|
||||||
|
replay_range(Range, AckedUntil, Acc)
|
||||||
|
end,
|
||||||
|
[],
|
||||||
|
Ranges0
|
||||||
|
),
|
||||||
|
Inflight = Inflight0#inflight{offset_ranges = Ranges},
|
||||||
|
{Replies, Inflight}.
|
||||||
|
|
||||||
-spec commit_offset(emqx_persistent_session_ds:id(), emqx_types:packet_id(), inflight()) ->
|
-spec commit_offset(emqx_persistent_session_ds:id(), emqx_types:packet_id(), inflight()) ->
|
||||||
{_IsValidOffset :: boolean(), inflight()}.
|
{_IsValidOffset :: boolean(), inflight()}.
|
||||||
|
@ -101,47 +115,34 @@ commit_offset(
|
||||||
SessionId,
|
SessionId,
|
||||||
PacketId,
|
PacketId,
|
||||||
Inflight0 = #inflight{
|
Inflight0 = #inflight{
|
||||||
acked_seqno = AckedSeqno0, next_seqno = NextSeqNo, offset_ranges = Ranges0
|
acked_until = AckedUntil, next_seqno = NextSeqno
|
||||||
}
|
}
|
||||||
) ->
|
) ->
|
||||||
AckedSeqno =
|
case packet_id_to_seqno(NextSeqno, PacketId) of
|
||||||
case packet_id_to_seqno(NextSeqNo, PacketId) of
|
Seqno when Seqno >= AckedUntil andalso Seqno < NextSeqno ->
|
||||||
N when N > AckedSeqno0; AckedSeqno0 =:= 0 ->
|
%% TODO
|
||||||
N;
|
%% We do not preserve `acked_until` in the database. Instead, we discard
|
||||||
|
%% fully acked ranges from the database. In effect, this means that the
|
||||||
|
%% most recent `acked_until` the client has sent may be lost in case of a
|
||||||
|
%% crash or client loss.
|
||||||
|
Inflight1 = Inflight0#inflight{acked_until = next_seqno(Seqno)},
|
||||||
|
Inflight = discard_acked(SessionId, Inflight1),
|
||||||
|
{true, Inflight};
|
||||||
OutOfRange ->
|
OutOfRange ->
|
||||||
?SLOG(warning, #{
|
?SLOG(warning, #{
|
||||||
msg => "out-of-order_ack",
|
msg => "out-of-order_ack",
|
||||||
prev_seqno => AckedSeqno0,
|
acked_until => AckedUntil,
|
||||||
acked_seqno => OutOfRange,
|
acked_seqno => OutOfRange,
|
||||||
next_seqno => NextSeqNo,
|
next_seqno => NextSeqno,
|
||||||
packet_id => PacketId
|
packet_id => PacketId
|
||||||
}),
|
}),
|
||||||
AckedSeqno0
|
{false, Inflight0}
|
||||||
end,
|
end.
|
||||||
Ranges = lists:filter(
|
|
||||||
fun(#range{stream = Stream, last = LastSeqno, iterator_next = ItNext}) ->
|
|
||||||
case LastSeqno =< AckedSeqno of
|
|
||||||
true ->
|
|
||||||
%% This range has been fully
|
|
||||||
%% acked. Remove it and replace saved
|
|
||||||
%% iterator with the trailing iterator.
|
|
||||||
update_iterator(SessionId, Stream, ItNext),
|
|
||||||
false;
|
|
||||||
false ->
|
|
||||||
%% This range still has unacked
|
|
||||||
%% messages:
|
|
||||||
true
|
|
||||||
end
|
|
||||||
end,
|
|
||||||
Ranges0
|
|
||||||
),
|
|
||||||
Inflight = Inflight0#inflight{acked_seqno = AckedSeqno, offset_ranges = Ranges},
|
|
||||||
{true, Inflight}.
|
|
||||||
|
|
||||||
-spec poll(emqx_persistent_session_ds:id(), inflight(), pos_integer()) ->
|
-spec poll(emqx_persistent_session_ds:id(), inflight(), pos_integer()) ->
|
||||||
{emqx_session:replies(), inflight()}.
|
{emqx_session:replies(), inflight()}.
|
||||||
poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff ->
|
poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff ->
|
||||||
#inflight{next_seqno = NextSeqNo0, acked_seqno = AckedSeqno} =
|
#inflight{next_seqno = NextSeqNo0, acked_until = AckedSeqno} =
|
||||||
Inflight0,
|
Inflight0,
|
||||||
FetchThreshold = max(1, WindowSize div 2),
|
FetchThreshold = max(1, WindowSize div 2),
|
||||||
FreeSpace = AckedSeqno + WindowSize - NextSeqNo0,
|
FreeSpace = AckedSeqno + WindowSize - NextSeqNo0,
|
||||||
|
@ -153,6 +154,7 @@ poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff
|
||||||
%% client get stuck even?
|
%% client get stuck even?
|
||||||
{[], Inflight0};
|
{[], Inflight0};
|
||||||
true ->
|
true ->
|
||||||
|
%% TODO: Wrap this in `mria:async_dirty/2`?
|
||||||
Streams = shuffle(get_streams(SessionId)),
|
Streams = shuffle(get_streams(SessionId)),
|
||||||
fetch(SessionId, Inflight0, Streams, FreeSpace, [])
|
fetch(SessionId, Inflight0, Streams, FreeSpace, [])
|
||||||
end.
|
end.
|
||||||
|
@ -165,75 +167,206 @@ poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff
|
||||||
%% Internal functions
|
%% Internal functions
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
|
|
||||||
fetch(_SessionId, Inflight, _Streams = [], _N, Acc) ->
|
compute_inflight_range([]) ->
|
||||||
{lists:reverse(Acc), Inflight};
|
{1, 1};
|
||||||
fetch(_SessionId, Inflight, _Streams, 0, Acc) ->
|
compute_inflight_range(Ranges) ->
|
||||||
{lists:reverse(Acc), Inflight};
|
_RangeLast = #range{until = LastSeqno} = lists:last(Ranges),
|
||||||
fetch(SessionId, Inflight0, [Stream | Streams], N, Publishes0) ->
|
RangesUnacked = lists:dropwhile(fun(#range{type = T}) -> T == checkpoint end, Ranges),
|
||||||
#inflight{next_seqno = FirstSeqNo, offset_ranges = Ranges0} = Inflight0,
|
case RangesUnacked of
|
||||||
ItBegin = get_last_iterator(SessionId, Stream, Ranges0),
|
[#range{first = AckedUntil} | _] ->
|
||||||
{ok, ItEnd, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, N),
|
{AckedUntil, LastSeqno};
|
||||||
{NMessages, Publishes, Inflight1} =
|
[] ->
|
||||||
lists:foldl(
|
{LastSeqno, LastSeqno}
|
||||||
fun(Msg, {N0, PubAcc0, InflightAcc0}) ->
|
end.
|
||||||
{PacketId, InflightAcc} = next_packet_id(InflightAcc0),
|
|
||||||
PubAcc = [{PacketId, Msg} | PubAcc0],
|
get_ranges(SessionId) ->
|
||||||
{N0 + 1, PubAcc, InflightAcc}
|
DSRanges = mnesia:match_object(
|
||||||
end,
|
?SESSION_PUBRANGE_TAB,
|
||||||
{0, Publishes0, Inflight0},
|
#ds_pubrange{id = {SessionId, '_'}, _ = '_'},
|
||||||
Messages
|
read
|
||||||
),
|
),
|
||||||
#inflight{next_seqno = LastSeqNo} = Inflight1,
|
lists:map(fun export_range/1, DSRanges).
|
||||||
case NMessages > 0 of
|
|
||||||
true ->
|
export_range(#ds_pubrange{
|
||||||
Range = #range{
|
type = Type, id = {_, First}, until = Until, stream = StreamRef, iterator = It
|
||||||
first = FirstSeqNo,
|
}) ->
|
||||||
last = LastSeqNo - 1,
|
#range{type = Type, stream = StreamRef, first = First, until = Until, iterator = It}.
|
||||||
stream = Stream,
|
|
||||||
iterator_next = ItEnd
|
fetch(SessionId, Inflight0, [DSStream | Streams], N, Acc) when N > 0 ->
|
||||||
|
#inflight{next_seqno = FirstSeqno, offset_ranges = Ranges0} = Inflight0,
|
||||||
|
ItBegin = get_last_iterator(DSStream, Ranges0),
|
||||||
|
{ok, ItEnd, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, N),
|
||||||
|
{Publishes, UntilSeqno} = publish(FirstSeqno, Messages),
|
||||||
|
case range_size(FirstSeqno, UntilSeqno) of
|
||||||
|
Size when Size > 0 ->
|
||||||
|
Range0 = #range{
|
||||||
|
type = inflight,
|
||||||
|
first = FirstSeqno,
|
||||||
|
until = UntilSeqno,
|
||||||
|
stream = DSStream#ds_stream.ref,
|
||||||
|
iterator = ItBegin
|
||||||
},
|
},
|
||||||
Inflight = Inflight1#inflight{offset_ranges = Ranges0 ++ [Range]},
|
%% We need to preserve the iterator pointing to the beginning of the
|
||||||
fetch(SessionId, Inflight, Streams, N - NMessages, Publishes);
|
%% range, so that we can replay it if needed.
|
||||||
false ->
|
ok = preserve_range(SessionId, Range0),
|
||||||
fetch(SessionId, Inflight1, Streams, N, Publishes)
|
%% ...Yet we need to keep the iterator pointing past the end of the
|
||||||
end.
|
%% range, so that we can pick up where we left off: it will become
|
||||||
|
%% `ItBegin` of the next range for this stream.
|
||||||
|
Range = Range0#range{iterator = ItEnd},
|
||||||
|
Ranges = Ranges0 ++ [Range#range{iterator = ItEnd}],
|
||||||
|
Inflight = Inflight0#inflight{
|
||||||
|
next_seqno = UntilSeqno,
|
||||||
|
offset_ranges = Ranges
|
||||||
|
},
|
||||||
|
fetch(SessionId, Inflight, Streams, N - Size, [Publishes | Acc]);
|
||||||
|
0 ->
|
||||||
|
fetch(SessionId, Inflight0, Streams, N, Acc)
|
||||||
|
end;
|
||||||
|
fetch(_SessionId, Inflight, _Streams, _N, Acc) ->
|
||||||
|
Publishes = lists:append(lists:reverse(Acc)),
|
||||||
|
{Publishes, Inflight}.
|
||||||
|
|
||||||
-spec update_iterator(emqx_persistent_session_ds:id(), emqx_ds:stream(), emqx_ds:iterator()) -> ok.
|
discard_acked(
|
||||||
update_iterator(DSSessionId, Stream, Iterator) ->
|
SessionId,
|
||||||
%% Workaround: we convert `Stream' to a binary before attempting to store it in
|
Inflight0 = #inflight{acked_until = AckedUntil, offset_ranges = Ranges0}
|
||||||
%% mnesia(rocksdb) because of a bug in `mnesia_rocksdb' when trying to do
|
) ->
|
||||||
%% `mnesia:dirty_all_keys' later.
|
%% TODO: This could be kept and incrementally updated in the inflight state.
|
||||||
StreamBin = term_to_binary(Stream),
|
Checkpoints = find_checkpoints(Ranges0),
|
||||||
mria:dirty_write(?SESSION_ITER_TAB, #ds_iter{id = {DSSessionId, StreamBin}, iter = Iterator}).
|
%% TODO: Wrap this in `mria:async_dirty/2`?
|
||||||
|
Ranges = discard_acked_ranges(SessionId, AckedUntil, Checkpoints, Ranges0),
|
||||||
|
Inflight0#inflight{offset_ranges = Ranges}.
|
||||||
|
|
||||||
get_last_iterator(SessionId, Stream, Ranges) ->
|
find_checkpoints(Ranges) ->
|
||||||
case lists:keyfind(Stream, #range.stream, lists:reverse(Ranges)) of
|
lists:foldl(
|
||||||
false ->
|
fun(#range{stream = StreamRef, until = Until}, Acc) ->
|
||||||
get_iterator(SessionId, Stream);
|
%% For each stream, remember the last range over this stream.
|
||||||
#range{iterator_next = Next} ->
|
Acc#{StreamRef => Until}
|
||||||
Next
|
|
||||||
end.
|
|
||||||
|
|
||||||
-spec get_iterator(emqx_persistent_session_ds:id(), emqx_ds:stream()) -> emqx_ds:iterator().
|
|
||||||
get_iterator(DSSessionId, Stream) ->
|
|
||||||
%% See comment in `update_iterator'.
|
|
||||||
StreamBin = term_to_binary(Stream),
|
|
||||||
Id = {DSSessionId, StreamBin},
|
|
||||||
[#ds_iter{iter = It}] = mnesia:dirty_read(?SESSION_ITER_TAB, Id),
|
|
||||||
It.
|
|
||||||
|
|
||||||
-spec get_streams(emqx_persistent_session_ds:id()) -> [emqx_ds:stream()].
|
|
||||||
get_streams(SessionId) ->
|
|
||||||
lists:map(
|
|
||||||
fun(#ds_stream{stream = Stream}) ->
|
|
||||||
Stream
|
|
||||||
end,
|
end,
|
||||||
mnesia:dirty_read(?SESSION_STREAM_TAB, SessionId)
|
#{},
|
||||||
|
Ranges
|
||||||
).
|
).
|
||||||
|
|
||||||
|
discard_acked_ranges(
|
||||||
|
SessionId,
|
||||||
|
AckedUntil,
|
||||||
|
Checkpoints,
|
||||||
|
[Range = #range{until = Until, stream = StreamRef} | Rest]
|
||||||
|
) when Until =< AckedUntil ->
|
||||||
|
%% This range has been fully acked.
|
||||||
|
%% Either discard it completely, or preserve the iterator for the next range
|
||||||
|
%% over this stream (i.e. a checkpoint).
|
||||||
|
RangeKept =
|
||||||
|
case maps:get(StreamRef, Checkpoints) of
|
||||||
|
CP when CP > Until ->
|
||||||
|
discard_range(SessionId, Range),
|
||||||
|
[];
|
||||||
|
Until ->
|
||||||
|
checkpoint_range(SessionId, Range),
|
||||||
|
[Range#range{type = checkpoint}]
|
||||||
|
end,
|
||||||
|
%% Since we're (intentionally) not using transactions here, it's important to
|
||||||
|
%% issue database writes in the same order in which ranges are stored: from
|
||||||
|
%% the oldest to the newest. This is also why we need to compute which ranges
|
||||||
|
%% should become checkpoints before we start writing anything.
|
||||||
|
RangeKept ++ discard_acked_ranges(SessionId, AckedUntil, Checkpoints, Rest);
|
||||||
|
discard_acked_ranges(_SessionId, _AckedUntil, _Checkpoints, Ranges) ->
|
||||||
|
%% The rest of ranges (if any) still have unacked messages.
|
||||||
|
Ranges.
|
||||||
|
|
||||||
|
replay_range(
|
||||||
|
Range0 = #range{type = inflight, first = First, until = Until, iterator = It},
|
||||||
|
AckedUntil,
|
||||||
|
Acc
|
||||||
|
) ->
|
||||||
|
Size = range_size(First, Until),
|
||||||
|
FirstUnacked = max(First, AckedUntil),
|
||||||
|
{ok, ItNext, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, It, Size),
|
||||||
|
MessagesUnacked =
|
||||||
|
case FirstUnacked of
|
||||||
|
First ->
|
||||||
|
Messages;
|
||||||
|
_ ->
|
||||||
|
lists:nthtail(range_size(First, FirstUnacked), Messages)
|
||||||
|
end,
|
||||||
|
%% Asserting that range is consistent with the message storage state.
|
||||||
|
{Replies, Until} = publish(FirstUnacked, MessagesUnacked),
|
||||||
|
Range = Range0#range{iterator = ItNext},
|
||||||
|
{Range, Replies ++ Acc};
|
||||||
|
replay_range(Range0 = #range{type = checkpoint}, _AckedUntil, Acc) ->
|
||||||
|
{Range0, Acc}.
|
||||||
|
|
||||||
|
publish(FirstSeqno, Messages) ->
|
||||||
|
lists:mapfoldl(
|
||||||
|
fun(Message, Seqno) ->
|
||||||
|
PacketId = seqno_to_packet_id(Seqno),
|
||||||
|
{{PacketId, Message}, next_seqno(Seqno)}
|
||||||
|
end,
|
||||||
|
FirstSeqno,
|
||||||
|
Messages
|
||||||
|
).
|
||||||
|
|
||||||
|
-spec preserve_range(emqx_persistent_session_ds:id(), range()) -> ok.
|
||||||
|
preserve_range(
|
||||||
|
SessionId,
|
||||||
|
#range{first = First, until = Until, stream = StreamRef, iterator = It}
|
||||||
|
) ->
|
||||||
|
DSRange = #ds_pubrange{
|
||||||
|
id = {SessionId, First},
|
||||||
|
until = Until,
|
||||||
|
stream = StreamRef,
|
||||||
|
type = inflight,
|
||||||
|
iterator = It
|
||||||
|
},
|
||||||
|
mria:dirty_write(?SESSION_PUBRANGE_TAB, DSRange).
|
||||||
|
|
||||||
|
-spec discard_range(emqx_persistent_session_ds:id(), range()) -> ok.
|
||||||
|
discard_range(SessionId, #range{first = First}) ->
|
||||||
|
mria:dirty_delete(?SESSION_PUBRANGE_TAB, {SessionId, First}).
|
||||||
|
|
||||||
|
-spec checkpoint_range(emqx_persistent_session_ds:id(), range()) -> ok.
|
||||||
|
checkpoint_range(
|
||||||
|
SessionId,
|
||||||
|
#range{type = inflight, first = First, until = Until, stream = StreamRef, iterator = ItNext}
|
||||||
|
) ->
|
||||||
|
DSRange = #ds_pubrange{
|
||||||
|
id = {SessionId, First},
|
||||||
|
until = Until,
|
||||||
|
stream = StreamRef,
|
||||||
|
type = checkpoint,
|
||||||
|
iterator = ItNext
|
||||||
|
},
|
||||||
|
mria:dirty_write(?SESSION_PUBRANGE_TAB, DSRange);
|
||||||
|
checkpoint_range(_SessionId, #range{type = checkpoint}) ->
|
||||||
|
%% This range should have been checkpointed already.
|
||||||
|
ok.
|
||||||
|
|
||||||
|
get_last_iterator(DSStream = #ds_stream{ref = StreamRef}, Ranges) ->
|
||||||
|
case lists:keyfind(StreamRef, #range.stream, lists:reverse(Ranges)) of
|
||||||
|
false ->
|
||||||
|
DSStream#ds_stream.beginning;
|
||||||
|
#range{iterator = ItNext} ->
|
||||||
|
ItNext
|
||||||
|
end.
|
||||||
|
|
||||||
|
-spec get_streams(emqx_persistent_session_ds:id()) -> [ds_stream()].
|
||||||
|
get_streams(SessionId) ->
|
||||||
|
mnesia:dirty_read(?SESSION_STREAM_TAB, SessionId).
|
||||||
|
|
||||||
|
next_seqno(Seqno) ->
|
||||||
|
NextSeqno = Seqno + 1,
|
||||||
|
case seqno_to_packet_id(NextSeqno) of
|
||||||
|
0 ->
|
||||||
|
%% We skip sequence numbers that lead to PacketId = 0 to
|
||||||
|
%% simplify math. Note: it leads to occasional gaps in the
|
||||||
|
%% sequence numbers.
|
||||||
|
NextSeqno + 1;
|
||||||
|
_ ->
|
||||||
|
NextSeqno
|
||||||
|
end.
|
||||||
|
|
||||||
%% Reconstruct session counter by adding most significant bits from
|
%% Reconstruct session counter by adding most significant bits from
|
||||||
%% the current counter to the packet id.
|
%% the current counter to the packet id.
|
||||||
-spec packet_id_to_seqno(non_neg_integer(), emqx_types:packet_id()) -> non_neg_integer().
|
-spec packet_id_to_seqno(_Next :: seqno(), emqx_types:packet_id()) -> seqno().
|
||||||
packet_id_to_seqno(NextSeqNo, PacketId) ->
|
packet_id_to_seqno(NextSeqNo, PacketId) ->
|
||||||
Epoch = NextSeqNo bsr 16,
|
Epoch = NextSeqNo bsr 16,
|
||||||
case packet_id_to_seqno_(Epoch, PacketId) of
|
case packet_id_to_seqno_(Epoch, PacketId) of
|
||||||
|
@ -243,10 +376,20 @@ packet_id_to_seqno(NextSeqNo, PacketId) ->
|
||||||
packet_id_to_seqno_(Epoch - 1, PacketId)
|
packet_id_to_seqno_(Epoch - 1, PacketId)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec packet_id_to_seqno_(non_neg_integer(), emqx_types:packet_id()) -> non_neg_integer().
|
-spec packet_id_to_seqno_(non_neg_integer(), emqx_types:packet_id()) -> seqno().
|
||||||
packet_id_to_seqno_(Epoch, PacketId) ->
|
packet_id_to_seqno_(Epoch, PacketId) ->
|
||||||
(Epoch bsl 16) + PacketId.
|
(Epoch bsl 16) + PacketId.
|
||||||
|
|
||||||
|
-spec seqno_to_packet_id(seqno()) -> emqx_types:packet_id().
|
||||||
|
seqno_to_packet_id(Seqno) ->
|
||||||
|
Seqno rem 16#10000.
|
||||||
|
|
||||||
|
range_size(FirstSeqno, UntilSeqno) ->
|
||||||
|
%% This function assumes that gaps in the sequence ID occur _only_ when the
|
||||||
|
%% packet ID wraps.
|
||||||
|
Size = UntilSeqno - FirstSeqno,
|
||||||
|
Size + (FirstSeqno bsr 16) - (UntilSeqno bsr 16).
|
||||||
|
|
||||||
-spec shuffle([A]) -> [A].
|
-spec shuffle([A]) -> [A].
|
||||||
shuffle(L0) ->
|
shuffle(L0) ->
|
||||||
L1 = lists:map(
|
L1 = lists:map(
|
||||||
|
@ -259,6 +402,10 @@ shuffle(L0) ->
|
||||||
{_, L} = lists:unzip(L2),
|
{_, L} = lists:unzip(L2),
|
||||||
L.
|
L.
|
||||||
|
|
||||||
|
ro_transaction(Fun) ->
|
||||||
|
{atomic, Res} = mria:ro_transaction(?DS_MRIA_SHARD, Fun),
|
||||||
|
Res.
|
||||||
|
|
||||||
-ifdef(TEST).
|
-ifdef(TEST).
|
||||||
|
|
||||||
%% This test only tests boundary conditions (to make sure property-based test didn't skip them):
|
%% This test only tests boundary conditions (to make sure property-based test didn't skip them):
|
||||||
|
@ -311,4 +458,40 @@ seqno_gen(NextSeqNo) ->
|
||||||
Max = max(0, NextSeqNo - 1),
|
Max = max(0, NextSeqNo - 1),
|
||||||
range(Min, Max).
|
range(Min, Max).
|
||||||
|
|
||||||
|
range_size_test_() ->
|
||||||
|
[
|
||||||
|
?_assertEqual(0, range_size(42, 42)),
|
||||||
|
?_assertEqual(1, range_size(42, 43)),
|
||||||
|
?_assertEqual(1, range_size(16#ffff, 16#10001)),
|
||||||
|
?_assertEqual(16#ffff - 456 + 123, range_size(16#1f0000 + 456, 16#200000 + 123))
|
||||||
|
].
|
||||||
|
|
||||||
|
compute_inflight_range_test_() ->
|
||||||
|
[
|
||||||
|
?_assertEqual(
|
||||||
|
{1, 1},
|
||||||
|
compute_inflight_range([])
|
||||||
|
),
|
||||||
|
?_assertEqual(
|
||||||
|
{12, 42},
|
||||||
|
compute_inflight_range([
|
||||||
|
#range{first = 1, until = 2, type = checkpoint},
|
||||||
|
#range{first = 4, until = 8, type = checkpoint},
|
||||||
|
#range{first = 11, until = 12, type = checkpoint},
|
||||||
|
#range{first = 12, until = 13, type = inflight},
|
||||||
|
#range{first = 13, until = 20, type = inflight},
|
||||||
|
#range{first = 20, until = 42, type = inflight}
|
||||||
|
])
|
||||||
|
),
|
||||||
|
?_assertEqual(
|
||||||
|
{13, 13},
|
||||||
|
compute_inflight_range([
|
||||||
|
#range{first = 1, until = 2, type = checkpoint},
|
||||||
|
#range{first = 4, until = 8, type = checkpoint},
|
||||||
|
#range{first = 11, until = 12, type = checkpoint},
|
||||||
|
#range{first = 12, until = 13, type = checkpoint}
|
||||||
|
])
|
||||||
|
)
|
||||||
|
].
|
||||||
|
|
||||||
-endif.
|
-endif.
|
||||||
|
|
|
@ -76,7 +76,7 @@
|
||||||
list_all_sessions/0,
|
list_all_sessions/0,
|
||||||
list_all_subscriptions/0,
|
list_all_subscriptions/0,
|
||||||
list_all_streams/0,
|
list_all_streams/0,
|
||||||
list_all_iterators/0
|
list_all_pubranges/0
|
||||||
]).
|
]).
|
||||||
-endif.
|
-endif.
|
||||||
|
|
||||||
|
@ -359,15 +359,16 @@ handle_timeout(
|
||||||
end,
|
end,
|
||||||
ensure_timer(pull, Timeout),
|
ensure_timer(pull, Timeout),
|
||||||
{ok, Publishes, Session#{inflight => Inflight}};
|
{ok, Publishes, Session#{inflight => Inflight}};
|
||||||
handle_timeout(_ClientInfo, get_streams, Session = #{id := Id}) ->
|
handle_timeout(_ClientInfo, get_streams, Session) ->
|
||||||
renew_streams(Id),
|
renew_streams(Session),
|
||||||
ensure_timer(get_streams),
|
ensure_timer(get_streams),
|
||||||
{ok, [], Session}.
|
{ok, [], Session}.
|
||||||
|
|
||||||
-spec replay(clientinfo(), [], session()) ->
|
-spec replay(clientinfo(), [], session()) ->
|
||||||
{ok, replies(), session()}.
|
{ok, replies(), session()}.
|
||||||
replay(_ClientInfo, [], Session = #{}) ->
|
replay(_ClientInfo, [], Session = #{inflight := Inflight0}) ->
|
||||||
{ok, [], Session}.
|
{Replies, Inflight} = emqx_persistent_message_ds_replayer:replay(Inflight0),
|
||||||
|
{ok, Replies, Session#{inflight := Inflight}}.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
@ -474,17 +475,20 @@ create_tables() ->
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
ok = mria:create_table(
|
ok = mria:create_table(
|
||||||
?SESSION_ITER_TAB,
|
?SESSION_PUBRANGE_TAB,
|
||||||
[
|
[
|
||||||
{rlog_shard, ?DS_MRIA_SHARD},
|
{rlog_shard, ?DS_MRIA_SHARD},
|
||||||
{type, set},
|
{type, ordered_set},
|
||||||
{storage, storage()},
|
{storage, storage()},
|
||||||
{record_name, ds_iter},
|
{record_name, ds_pubrange},
|
||||||
{attributes, record_info(fields, ds_iter)}
|
{attributes, record_info(fields, ds_pubrange)}
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
ok = mria:wait_for_tables([
|
ok = mria:wait_for_tables([
|
||||||
?SESSION_TAB, ?SESSION_SUBSCRIPTIONS_TAB, ?SESSION_STREAM_TAB, ?SESSION_ITER_TAB
|
?SESSION_TAB,
|
||||||
|
?SESSION_SUBSCRIPTIONS_TAB,
|
||||||
|
?SESSION_STREAM_TAB,
|
||||||
|
?SESSION_PUBRANGE_TAB
|
||||||
]),
|
]),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
@ -512,9 +516,10 @@ session_open(SessionId) ->
|
||||||
Session = export_session(Record),
|
Session = export_session(Record),
|
||||||
DSSubs = session_read_subscriptions(SessionId),
|
DSSubs = session_read_subscriptions(SessionId),
|
||||||
Subscriptions = export_subscriptions(DSSubs),
|
Subscriptions = export_subscriptions(DSSubs),
|
||||||
|
Inflight = emqx_persistent_message_ds_replayer:open(SessionId),
|
||||||
Session#{
|
Session#{
|
||||||
subscriptions => Subscriptions,
|
subscriptions => Subscriptions,
|
||||||
inflight => emqx_persistent_message_ds_replayer:new()
|
inflight => Inflight
|
||||||
};
|
};
|
||||||
[] ->
|
[] ->
|
||||||
false
|
false
|
||||||
|
@ -549,7 +554,7 @@ session_create(SessionId, Props) ->
|
||||||
session_drop(DSSessionId) ->
|
session_drop(DSSessionId) ->
|
||||||
transaction(fun() ->
|
transaction(fun() ->
|
||||||
ok = session_drop_subscriptions(DSSessionId),
|
ok = session_drop_subscriptions(DSSessionId),
|
||||||
ok = session_drop_iterators(DSSessionId),
|
ok = session_drop_pubranges(DSSessionId),
|
||||||
ok = session_drop_streams(DSSessionId),
|
ok = session_drop_streams(DSSessionId),
|
||||||
ok = mnesia:delete(?SESSION_TAB, DSSessionId, write)
|
ok = mnesia:delete(?SESSION_TAB, DSSessionId, write)
|
||||||
end).
|
end).
|
||||||
|
@ -663,77 +668,82 @@ do_ensure_all_iterators_closed(_DSSessionID) ->
|
||||||
%% Reading batches
|
%% Reading batches
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
-spec renew_streams(id()) -> ok.
|
-spec renew_streams(session()) -> ok.
|
||||||
renew_streams(DSSessionId) ->
|
renew_streams(#{id := SessionId, subscriptions := Subscriptions}) ->
|
||||||
Subscriptions = ro_transaction(fun() -> session_read_subscriptions(DSSessionId) end),
|
transaction(fun() ->
|
||||||
ExistingStreams = ro_transaction(fun() -> mnesia:read(?SESSION_STREAM_TAB, DSSessionId) end),
|
ExistingStreams = mnesia:read(?SESSION_STREAM_TAB, SessionId, write),
|
||||||
lists:foreach(
|
maps:fold(
|
||||||
fun(#ds_sub{id = {_, TopicFilter}, start_time = StartTime}) ->
|
fun(TopicFilter, #{start_time := StartTime}, Streams) ->
|
||||||
renew_streams(DSSessionId, ExistingStreams, TopicFilter, StartTime)
|
TopicFilterWords = emqx_topic:words(TopicFilter),
|
||||||
|
renew_topic_streams(SessionId, TopicFilterWords, StartTime, Streams)
|
||||||
end,
|
end,
|
||||||
|
ExistingStreams,
|
||||||
Subscriptions
|
Subscriptions
|
||||||
|
)
|
||||||
|
end),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
-spec renew_topic_streams(id(), topic_filter_words(), emqx_ds:time(), _Acc :: [ds_stream()]) -> ok.
|
||||||
|
renew_topic_streams(DSSessionId, TopicFilter, StartTime, ExistingStreams) ->
|
||||||
|
TopicStreams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime),
|
||||||
|
lists:foldl(
|
||||||
|
fun({Rank, Stream}, Streams) ->
|
||||||
|
case lists:keymember(Stream, #ds_stream.stream, Streams) of
|
||||||
|
true ->
|
||||||
|
Streams;
|
||||||
|
false ->
|
||||||
|
StreamRef = length(Streams) + 1,
|
||||||
|
DSStream = session_store_stream(
|
||||||
|
DSSessionId,
|
||||||
|
StreamRef,
|
||||||
|
Stream,
|
||||||
|
Rank,
|
||||||
|
TopicFilter,
|
||||||
|
StartTime
|
||||||
|
),
|
||||||
|
[DSStream | Streams]
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
ExistingStreams,
|
||||||
|
TopicStreams
|
||||||
).
|
).
|
||||||
|
|
||||||
-spec renew_streams(id(), [ds_stream()], topic_filter_words(), emqx_ds:time()) -> ok.
|
session_store_stream(DSSessionId, StreamRef, Stream, Rank, TopicFilter, StartTime) ->
|
||||||
renew_streams(DSSessionId, ExistingStreams, TopicFilter, StartTime) ->
|
{ok, ItBegin} = emqx_ds:make_iterator(
|
||||||
AllStreams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime),
|
?PERSISTENT_MESSAGE_DB,
|
||||||
transaction(
|
Stream,
|
||||||
fun() ->
|
TopicFilter,
|
||||||
lists:foreach(
|
StartTime
|
||||||
fun({Rank, Stream}) ->
|
|
||||||
Rec = #ds_stream{
|
|
||||||
session = DSSessionId,
|
|
||||||
topic_filter = TopicFilter,
|
|
||||||
stream = Stream,
|
|
||||||
rank = Rank
|
|
||||||
},
|
|
||||||
case lists:member(Rec, ExistingStreams) of
|
|
||||||
true ->
|
|
||||||
ok;
|
|
||||||
false ->
|
|
||||||
mnesia:write(?SESSION_STREAM_TAB, Rec, write),
|
|
||||||
{ok, Iterator} = emqx_ds:make_iterator(
|
|
||||||
?PERSISTENT_MESSAGE_DB, Stream, TopicFilter, StartTime
|
|
||||||
),
|
),
|
||||||
%% Workaround: we convert `Stream' to a binary before
|
DSStream = #ds_stream{
|
||||||
%% attempting to store it in mnesia(rocksdb) because of a bug
|
session = DSSessionId,
|
||||||
%% in `mnesia_rocksdb' when trying to do
|
ref = StreamRef,
|
||||||
%% `mnesia:dirty_all_keys' later.
|
stream = Stream,
|
||||||
StreamBin = term_to_binary(Stream),
|
rank = Rank,
|
||||||
IterRec = #ds_iter{id = {DSSessionId, StreamBin}, iter = Iterator},
|
beginning = ItBegin
|
||||||
mnesia:write(?SESSION_ITER_TAB, IterRec, write)
|
},
|
||||||
end
|
mnesia:write(?SESSION_STREAM_TAB, DSStream, write),
|
||||||
end,
|
DSStream.
|
||||||
AllStreams
|
|
||||||
)
|
|
||||||
end
|
|
||||||
).
|
|
||||||
|
|
||||||
%% must be called inside a transaction
|
%% must be called inside a transaction
|
||||||
-spec session_drop_streams(id()) -> ok.
|
-spec session_drop_streams(id()) -> ok.
|
||||||
session_drop_streams(DSSessionId) ->
|
session_drop_streams(DSSessionId) ->
|
||||||
MS = ets:fun2ms(
|
mnesia:delete(?SESSION_STREAM_TAB, DSSessionId, write).
|
||||||
fun(#ds_stream{session = DSSessionId0}) when DSSessionId0 =:= DSSessionId ->
|
|
||||||
DSSessionId0
|
|
||||||
end
|
|
||||||
),
|
|
||||||
StreamIDs = mnesia:select(?SESSION_STREAM_TAB, MS, write),
|
|
||||||
lists:foreach(fun(Key) -> mnesia:delete(?SESSION_STREAM_TAB, Key, write) end, StreamIDs).
|
|
||||||
|
|
||||||
%% must be called inside a transaction
|
%% must be called inside a transaction
|
||||||
-spec session_drop_iterators(id()) -> ok.
|
-spec session_drop_pubranges(id()) -> ok.
|
||||||
session_drop_iterators(DSSessionId) ->
|
session_drop_pubranges(DSSessionId) ->
|
||||||
MS = ets:fun2ms(
|
MS = ets:fun2ms(
|
||||||
fun(#ds_iter{id = {DSSessionId0, StreamBin}}) when DSSessionId0 =:= DSSessionId ->
|
fun(#ds_pubrange{id = {DSSessionId0, First}}) when DSSessionId0 =:= DSSessionId ->
|
||||||
StreamBin
|
{DSSessionId, First}
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
StreamBins = mnesia:select(?SESSION_ITER_TAB, MS, write),
|
RangeIds = mnesia:select(?SESSION_PUBRANGE_TAB, MS, write),
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun(StreamBin) ->
|
fun(RangeId) ->
|
||||||
mnesia:delete(?SESSION_ITER_TAB, {DSSessionId, StreamBin}, write)
|
mnesia:delete(?SESSION_PUBRANGE_TAB, RangeId, write)
|
||||||
end,
|
end,
|
||||||
StreamBins
|
RangeIds
|
||||||
).
|
).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------------------
|
%%--------------------------------------------------------------------------------
|
||||||
|
@ -758,7 +768,7 @@ export_subscriptions(DSSubs) ->
|
||||||
).
|
).
|
||||||
|
|
||||||
export_session(#session{} = Record) ->
|
export_session(#session{} = Record) ->
|
||||||
export_record(Record, #session.id, [id, created_at, expires_at, inflight, props], #{}).
|
export_record(Record, #session.id, [id, created_at, expires_at, props], #{}).
|
||||||
|
|
||||||
export_subscription(#ds_sub{} = Record) ->
|
export_subscription(#ds_sub{} = Record) ->
|
||||||
export_record(Record, #ds_sub.start_time, [start_time, props, extra], #{}).
|
export_record(Record, #ds_sub.start_time, [start_time, props, extra], #{}).
|
||||||
|
@ -833,16 +843,18 @@ list_all_streams() ->
|
||||||
),
|
),
|
||||||
maps:from_list(DSStreams).
|
maps:from_list(DSStreams).
|
||||||
|
|
||||||
list_all_iterators() ->
|
list_all_pubranges() ->
|
||||||
DSIterIds = mnesia:dirty_all_keys(?SESSION_ITER_TAB),
|
DSPubranges = mnesia:dirty_match_object(?SESSION_PUBRANGE_TAB, #ds_pubrange{_ = '_'}),
|
||||||
DSIters = lists:map(
|
lists:foldl(
|
||||||
fun(DSIterId) ->
|
fun(Record = #ds_pubrange{id = {SessionId, First}}, Acc) ->
|
||||||
[Record] = mnesia:dirty_read(?SESSION_ITER_TAB, DSIterId),
|
Range = export_record(
|
||||||
{DSIterId, export_record(Record, #ds_iter.id, [id, iter], #{})}
|
Record, #ds_pubrange.until, [until, stream, type, iterator], #{first => First}
|
||||||
end,
|
|
||||||
DSIterIds
|
|
||||||
),
|
),
|
||||||
maps:from_list(DSIters).
|
maps:put(SessionId, maps:get(SessionId, Acc, []) ++ [Range], Acc)
|
||||||
|
end,
|
||||||
|
#{},
|
||||||
|
DSPubranges
|
||||||
|
).
|
||||||
|
|
||||||
%% ifdef(TEST)
|
%% ifdef(TEST)
|
||||||
-endif.
|
-endif.
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
-define(SESSION_TAB, emqx_ds_session).
|
-define(SESSION_TAB, emqx_ds_session).
|
||||||
-define(SESSION_SUBSCRIPTIONS_TAB, emqx_ds_session_subscriptions).
|
-define(SESSION_SUBSCRIPTIONS_TAB, emqx_ds_session_subscriptions).
|
||||||
-define(SESSION_STREAM_TAB, emqx_ds_stream_tab).
|
-define(SESSION_STREAM_TAB, emqx_ds_stream_tab).
|
||||||
-define(SESSION_ITER_TAB, emqx_ds_iter_tab).
|
-define(SESSION_PUBRANGE_TAB, emqx_ds_pubrange_tab).
|
||||||
-define(DS_MRIA_SHARD, emqx_ds_session_shard).
|
-define(DS_MRIA_SHARD, emqx_ds_session_shard).
|
||||||
|
|
||||||
-record(ds_sub, {
|
-record(ds_sub, {
|
||||||
|
@ -34,17 +34,24 @@
|
||||||
|
|
||||||
-record(ds_stream, {
|
-record(ds_stream, {
|
||||||
session :: emqx_persistent_session_ds:id(),
|
session :: emqx_persistent_session_ds:id(),
|
||||||
topic_filter :: emqx_ds:topic_filter(),
|
ref :: _StreamRef,
|
||||||
stream :: emqx_ds:stream(),
|
stream :: emqx_ds:stream(),
|
||||||
rank :: emqx_ds:stream_rank()
|
rank :: emqx_ds:stream_rank(),
|
||||||
|
beginning :: emqx_ds:iterator()
|
||||||
}).
|
}).
|
||||||
-type ds_stream() :: #ds_stream{}.
|
-type ds_stream() :: #ds_stream{}.
|
||||||
-type ds_stream_bin() :: binary().
|
|
||||||
|
|
||||||
-record(ds_iter, {
|
-record(ds_pubrange, {
|
||||||
id :: {emqx_persistent_session_ds:id(), ds_stream_bin()},
|
id :: {
|
||||||
iter :: emqx_ds:iterator()
|
_Session :: emqx_persistent_session_ds:id(),
|
||||||
|
_First :: emqx_persistent_message_ds_replayer:seqno()
|
||||||
|
},
|
||||||
|
until :: emqx_persistent_message_ds_replayer:seqno(),
|
||||||
|
stream :: _StreamRef,
|
||||||
|
type :: inflight | checkpoint,
|
||||||
|
iterator :: emqx_ds:iterator()
|
||||||
}).
|
}).
|
||||||
|
-type ds_pubrange() :: #ds_pubrange{}.
|
||||||
|
|
||||||
-record(session, {
|
-record(session, {
|
||||||
%% same as clientid
|
%% same as clientid
|
||||||
|
@ -52,7 +59,7 @@
|
||||||
%% creation time
|
%% creation time
|
||||||
created_at :: _Millisecond :: non_neg_integer(),
|
created_at :: _Millisecond :: non_neg_integer(),
|
||||||
expires_at = never :: _Millisecond :: non_neg_integer() | never,
|
expires_at = never :: _Millisecond :: non_neg_integer() | never,
|
||||||
inflight :: emqx_persistent_message_ds_replayer:inflight(),
|
% last_ack = 0 :: emqx_persistent_message_ds_replayer:seqno(),
|
||||||
%% for future usage
|
%% for future usage
|
||||||
props = #{} :: map()
|
props = #{} :: map()
|
||||||
}).
|
}).
|
||||||
|
|
Loading…
Reference in New Issue