commit
97d574f62c
|
@ -44,6 +44,11 @@
|
||||||
code_change/3
|
code_change/3
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
%% Internal exports (RPC)
|
||||||
|
-export([
|
||||||
|
do_cleanup_channels/1
|
||||||
|
]).
|
||||||
|
|
||||||
-define(REGISTRY, ?MODULE).
|
-define(REGISTRY, ?MODULE).
|
||||||
-define(TAB, emqx_channel_registry).
|
-define(TAB, emqx_channel_registry).
|
||||||
-define(LOCK, {?MODULE, cleanup_down}).
|
-define(LOCK, {?MODULE, cleanup_down}).
|
||||||
|
@ -155,7 +160,7 @@ cleanup_channels(Node) ->
|
||||||
global:trans(
|
global:trans(
|
||||||
{?LOCK, self()},
|
{?LOCK, self()},
|
||||||
fun() ->
|
fun() ->
|
||||||
mria:transaction(?CM_SHARD, fun do_cleanup_channels/1, [Node])
|
mria:transaction(?CM_SHARD, fun ?MODULE:do_cleanup_channels/1, [Node])
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,14 @@
|
||||||
group_match_spec/1
|
group_match_spec/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
%% Internal exports (RPC)
|
||||||
|
-export([
|
||||||
|
do_destroy/1,
|
||||||
|
do_add_user/2,
|
||||||
|
do_delete_user/2,
|
||||||
|
do_update_user/3
|
||||||
|
]).
|
||||||
|
|
||||||
-define(TAB, ?MODULE).
|
-define(TAB, ?MODULE).
|
||||||
-define(AUTHN_QSCHEMA, [
|
-define(AUTHN_QSCHEMA, [
|
||||||
{<<"like_user_id">>, binary},
|
{<<"like_user_id">>, binary},
|
||||||
|
@ -170,83 +178,79 @@ authenticate(_Credential, _State) ->
|
||||||
ignore.
|
ignore.
|
||||||
|
|
||||||
destroy(#{user_group := UserGroup}) ->
|
destroy(#{user_group := UserGroup}) ->
|
||||||
|
trans(fun ?MODULE:do_destroy/1, [UserGroup]).
|
||||||
|
|
||||||
|
do_destroy(UserGroup) ->
|
||||||
MatchSpec = group_match_spec(UserGroup),
|
MatchSpec = group_match_spec(UserGroup),
|
||||||
trans(
|
ok = lists:foreach(
|
||||||
fun() ->
|
fun(UserInfo) ->
|
||||||
ok = lists:foreach(
|
mnesia:delete_object(?TAB, UserInfo, write)
|
||||||
fun(UserInfo) ->
|
end,
|
||||||
mnesia:delete_object(?TAB, UserInfo, write)
|
mnesia:select(?TAB, MatchSpec, write)
|
||||||
end,
|
|
||||||
mnesia:select(?TAB, MatchSpec, write)
|
|
||||||
)
|
|
||||||
end
|
|
||||||
).
|
).
|
||||||
|
|
||||||
add_user(
|
add_user(UserInfo, State) ->
|
||||||
|
trans(fun ?MODULE:do_add_user/2, [UserInfo, State]).
|
||||||
|
|
||||||
|
do_add_user(
|
||||||
#{
|
#{
|
||||||
user_id := UserID,
|
user_id := UserID,
|
||||||
password := Password
|
password := Password
|
||||||
} = UserInfo,
|
} = UserInfo,
|
||||||
#{user_group := UserGroup} = State
|
#{user_group := UserGroup} = State
|
||||||
) ->
|
) ->
|
||||||
trans(
|
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
||||||
fun() ->
|
[] ->
|
||||||
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
IsSuperuser = maps:get(is_superuser, UserInfo, false),
|
||||||
[] ->
|
add_user(UserGroup, UserID, Password, IsSuperuser, State),
|
||||||
IsSuperuser = maps:get(is_superuser, UserInfo, false),
|
{ok, #{user_id => UserID, is_superuser => IsSuperuser}};
|
||||||
add_user(UserGroup, UserID, Password, IsSuperuser, State),
|
[_] ->
|
||||||
{ok, #{user_id => UserID, is_superuser => IsSuperuser}};
|
{error, already_exist}
|
||||||
[_] ->
|
end.
|
||||||
{error, already_exist}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
).
|
|
||||||
|
|
||||||
delete_user(UserID, #{user_group := UserGroup}) ->
|
delete_user(UserID, State) ->
|
||||||
trans(
|
trans(fun ?MODULE:do_delete_user/2, [UserID, State]).
|
||||||
fun() ->
|
|
||||||
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
|
||||||
[] ->
|
|
||||||
{error, not_found};
|
|
||||||
[_] ->
|
|
||||||
mnesia:delete(?TAB, {UserGroup, UserID}, write)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
).
|
|
||||||
|
|
||||||
update_user(
|
do_delete_user(UserID, #{user_group := UserGroup}) ->
|
||||||
|
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
||||||
|
[] ->
|
||||||
|
{error, not_found};
|
||||||
|
[_] ->
|
||||||
|
mnesia:delete(?TAB, {UserGroup, UserID}, write)
|
||||||
|
end.
|
||||||
|
|
||||||
|
update_user(UserID, User, State) ->
|
||||||
|
trans(fun ?MODULE:do_update_user/3, [UserID, User, State]).
|
||||||
|
|
||||||
|
do_update_user(
|
||||||
UserID,
|
UserID,
|
||||||
User,
|
User,
|
||||||
#{user_group := UserGroup} = State
|
#{user_group := UserGroup} = State
|
||||||
) ->
|
) ->
|
||||||
trans(
|
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
||||||
fun() ->
|
[] ->
|
||||||
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
{error, not_found};
|
||||||
[] ->
|
[#user_info{is_superuser = IsSuperuser} = UserInfo] ->
|
||||||
{error, not_found};
|
UserInfo1 = UserInfo#user_info{
|
||||||
[#user_info{is_superuser = IsSuperuser} = UserInfo] ->
|
is_superuser = maps:get(is_superuser, User, IsSuperuser)
|
||||||
UserInfo1 = UserInfo#user_info{
|
},
|
||||||
is_superuser = maps:get(is_superuser, User, IsSuperuser)
|
UserInfo2 =
|
||||||
},
|
case maps:get(password, User, undefined) of
|
||||||
UserInfo2 =
|
undefined ->
|
||||||
case maps:get(password, User, undefined) of
|
UserInfo1;
|
||||||
undefined ->
|
Password ->
|
||||||
UserInfo1;
|
{StoredKey, ServerKey, Salt} = esasl_scram:generate_authentication_info(
|
||||||
Password ->
|
Password, State
|
||||||
{StoredKey, ServerKey, Salt} = esasl_scram:generate_authentication_info(
|
),
|
||||||
Password, State
|
UserInfo1#user_info{
|
||||||
),
|
stored_key = StoredKey,
|
||||||
UserInfo1#user_info{
|
server_key = ServerKey,
|
||||||
stored_key = StoredKey,
|
salt = Salt
|
||||||
server_key = ServerKey,
|
}
|
||||||
salt = Salt
|
end,
|
||||||
}
|
mnesia:write(?TAB, UserInfo2, write),
|
||||||
end,
|
{ok, format_user_info(UserInfo2)}
|
||||||
mnesia:write(?TAB, UserInfo2, write),
|
end.
|
||||||
{ok, format_user_info(UserInfo2)}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
).
|
|
||||||
|
|
||||||
lookup_user(UserID, #{user_group := UserGroup}) ->
|
lookup_user(UserID, #{user_group := UserGroup}) ->
|
||||||
case mnesia:dirty_read(?TAB, {UserGroup, UserID}) of
|
case mnesia:dirty_read(?TAB, {UserGroup, UserID}) of
|
||||||
|
@ -386,12 +390,10 @@ retrieve(UserID, #{user_group := UserGroup}) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%% TODO: Move to emqx_authn_utils.erl
|
%% TODO: Move to emqx_authn_utils.erl
|
||||||
trans(Fun) ->
|
|
||||||
trans(Fun, []).
|
|
||||||
|
|
||||||
trans(Fun, Args) ->
|
trans(Fun, Args) ->
|
||||||
case mria:transaction(?AUTH_SHARD, Fun, Args) of
|
case mria:transaction(?AUTH_SHARD, Fun, Args) of
|
||||||
{atomic, Res} -> Res;
|
{atomic, Res} -> Res;
|
||||||
|
{aborted, {function_clause, Stack}} -> erlang:raise(error, function_clause, Stack);
|
||||||
{aborted, Reason} -> {error, Reason}
|
{aborted, Reason} -> {error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
|
|
@ -54,6 +54,16 @@
|
||||||
group_match_spec/1
|
group_match_spec/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
%% Internal exports (RPC)
|
||||||
|
-export([
|
||||||
|
do_destroy/1,
|
||||||
|
do_add_user/2,
|
||||||
|
do_delete_user/2,
|
||||||
|
do_update_user/3,
|
||||||
|
import/2,
|
||||||
|
import_csv/3
|
||||||
|
]).
|
||||||
|
|
||||||
-type user_group() :: binary().
|
-type user_group() :: binary().
|
||||||
-type user_id() :: binary().
|
-type user_id() :: binary().
|
||||||
|
|
||||||
|
@ -175,15 +185,14 @@ authenticate(
|
||||||
end.
|
end.
|
||||||
|
|
||||||
destroy(#{user_group := UserGroup}) ->
|
destroy(#{user_group := UserGroup}) ->
|
||||||
trans(
|
trans(fun ?MODULE:do_destroy/1, [UserGroup]).
|
||||||
fun() ->
|
|
||||||
ok = lists:foreach(
|
do_destroy(UserGroup) ->
|
||||||
fun(User) ->
|
ok = lists:foreach(
|
||||||
mnesia:delete_object(?TAB, User, write)
|
fun(User) ->
|
||||||
end,
|
mnesia:delete_object(?TAB, User, write)
|
||||||
mnesia:select(?TAB, group_match_spec(UserGroup), write)
|
end,
|
||||||
)
|
mnesia:select(?TAB, group_match_spec(UserGroup), write)
|
||||||
end
|
|
||||||
).
|
).
|
||||||
|
|
||||||
import_users({Filename0, FileData}, State) ->
|
import_users({Filename0, FileData}, State) ->
|
||||||
|
@ -200,7 +209,10 @@ import_users({Filename0, FileData}, State) ->
|
||||||
{error, {unsupported_file_format, Extension}}
|
{error, {unsupported_file_format, Extension}}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
add_user(
|
add_user(UserInfo, State) ->
|
||||||
|
trans(fun ?MODULE:do_add_user/2, [UserInfo, State]).
|
||||||
|
|
||||||
|
do_add_user(
|
||||||
#{
|
#{
|
||||||
user_id := UserID,
|
user_id := UserID,
|
||||||
password := Password
|
password := Password
|
||||||
|
@ -210,33 +222,31 @@ add_user(
|
||||||
password_hash_algorithm := Algorithm
|
password_hash_algorithm := Algorithm
|
||||||
}
|
}
|
||||||
) ->
|
) ->
|
||||||
trans(
|
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
||||||
fun() ->
|
[] ->
|
||||||
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
{PasswordHash, Salt} = emqx_authn_password_hashing:hash(Algorithm, Password),
|
||||||
[] ->
|
IsSuperuser = maps:get(is_superuser, UserInfo, false),
|
||||||
{PasswordHash, Salt} = emqx_authn_password_hashing:hash(Algorithm, Password),
|
insert_user(UserGroup, UserID, PasswordHash, Salt, IsSuperuser),
|
||||||
IsSuperuser = maps:get(is_superuser, UserInfo, false),
|
{ok, #{user_id => UserID, is_superuser => IsSuperuser}};
|
||||||
insert_user(UserGroup, UserID, PasswordHash, Salt, IsSuperuser),
|
[_] ->
|
||||||
{ok, #{user_id => UserID, is_superuser => IsSuperuser}};
|
{error, already_exist}
|
||||||
[_] ->
|
end.
|
||||||
{error, already_exist}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
).
|
|
||||||
|
|
||||||
delete_user(UserID, #{user_group := UserGroup}) ->
|
delete_user(UserID, State) ->
|
||||||
trans(
|
trans(fun ?MODULE:do_delete_user/2, [UserID, State]).
|
||||||
fun() ->
|
|
||||||
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
|
||||||
[] ->
|
|
||||||
{error, not_found};
|
|
||||||
[_] ->
|
|
||||||
mnesia:delete(?TAB, {UserGroup, UserID}, write)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
).
|
|
||||||
|
|
||||||
update_user(
|
do_delete_user(UserID, #{user_group := UserGroup}) ->
|
||||||
|
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
||||||
|
[] ->
|
||||||
|
{error, not_found};
|
||||||
|
[_] ->
|
||||||
|
mnesia:delete(?TAB, {UserGroup, UserID}, write)
|
||||||
|
end.
|
||||||
|
|
||||||
|
update_user(UserID, UserInfo, State) ->
|
||||||
|
trans(fun ?MODULE:do_update_user/3, [UserID, UserInfo, State]).
|
||||||
|
|
||||||
|
do_update_user(
|
||||||
UserID,
|
UserID,
|
||||||
UserInfo,
|
UserInfo,
|
||||||
#{
|
#{
|
||||||
|
@ -244,33 +254,29 @@ update_user(
|
||||||
password_hash_algorithm := Algorithm
|
password_hash_algorithm := Algorithm
|
||||||
}
|
}
|
||||||
) ->
|
) ->
|
||||||
trans(
|
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
||||||
fun() ->
|
[] ->
|
||||||
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
{error, not_found};
|
||||||
[] ->
|
[
|
||||||
{error, not_found};
|
#user_info{
|
||||||
[
|
password_hash = PasswordHash,
|
||||||
#user_info{
|
salt = Salt,
|
||||||
password_hash = PasswordHash,
|
is_superuser = IsSuperuser
|
||||||
salt = Salt,
|
}
|
||||||
is_superuser = IsSuperuser
|
] ->
|
||||||
}
|
NSuperuser = maps:get(is_superuser, UserInfo, IsSuperuser),
|
||||||
] ->
|
{NPasswordHash, NSalt} =
|
||||||
NSuperuser = maps:get(is_superuser, UserInfo, IsSuperuser),
|
case UserInfo of
|
||||||
{NPasswordHash, NSalt} =
|
#{password := Password} ->
|
||||||
case UserInfo of
|
emqx_authn_password_hashing:hash(
|
||||||
#{password := Password} ->
|
Algorithm, Password
|
||||||
emqx_authn_password_hashing:hash(
|
);
|
||||||
Algorithm, Password
|
#{} ->
|
||||||
);
|
{PasswordHash, Salt}
|
||||||
#{} ->
|
end,
|
||||||
{PasswordHash, Salt}
|
insert_user(UserGroup, UserID, NPasswordHash, NSalt, NSuperuser),
|
||||||
end,
|
{ok, #{user_id => UserID, is_superuser => NSuperuser}}
|
||||||
insert_user(UserGroup, UserID, NPasswordHash, NSalt, NSuperuser),
|
end.
|
||||||
{ok, #{user_id => UserID, is_superuser => NSuperuser}}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
).
|
|
||||||
|
|
||||||
lookup_user(UserID, #{user_group := UserGroup}) ->
|
lookup_user(UserID, #{user_group := UserGroup}) ->
|
||||||
case mnesia:dirty_read(?TAB, {UserGroup, UserID}) of
|
case mnesia:dirty_read(?TAB, {UserGroup, UserID}) of
|
||||||
|
@ -335,7 +341,7 @@ run_fuzzy_filter(
|
||||||
import_users_from_json(Bin, #{user_group := UserGroup}) ->
|
import_users_from_json(Bin, #{user_group := UserGroup}) ->
|
||||||
case emqx_json:safe_decode(Bin, [return_maps]) of
|
case emqx_json:safe_decode(Bin, [return_maps]) of
|
||||||
{ok, List} ->
|
{ok, List} ->
|
||||||
trans(fun import/2, [UserGroup, List]);
|
trans(fun ?MODULE:import/2, [UserGroup, List]);
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
@ -344,7 +350,7 @@ import_users_from_json(Bin, #{user_group := UserGroup}) ->
|
||||||
import_users_from_csv(CSV, #{user_group := UserGroup}) ->
|
import_users_from_csv(CSV, #{user_group := UserGroup}) ->
|
||||||
case get_csv_header(CSV) of
|
case get_csv_header(CSV) of
|
||||||
{ok, Seq, NewCSV} ->
|
{ok, Seq, NewCSV} ->
|
||||||
trans(fun import_csv/3, [UserGroup, NewCSV, Seq]);
|
trans(fun ?MODULE:import_csv/3, [UserGroup, NewCSV, Seq]);
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
@ -435,9 +441,6 @@ get_user_identity(#{clientid := ClientID}, clientid) ->
|
||||||
get_user_identity(_, Type) ->
|
get_user_identity(_, Type) ->
|
||||||
{error, {bad_user_identity_type, Type}}.
|
{error, {bad_user_identity_type, Type}}.
|
||||||
|
|
||||||
trans(Fun) ->
|
|
||||||
trans(Fun, []).
|
|
||||||
|
|
||||||
trans(Fun, Args) ->
|
trans(Fun, Args) ->
|
||||||
case mria:transaction(?AUTH_SHARD, Fun, Args) of
|
case mria:transaction(?AUTH_SHARD, Fun, Args) of
|
||||||
{atomic, Res} -> Res;
|
{atomic, Res} -> Res;
|
||||||
|
|
|
@ -31,10 +31,16 @@
|
||||||
fast_forward_to_commit/2
|
fast_forward_to_commit/2
|
||||||
]).
|
]).
|
||||||
-export([
|
-export([
|
||||||
get_node_tnx_id/1,
|
commit/2,
|
||||||
|
commit_status_trans/2,
|
||||||
get_cluster_tnx_id/0,
|
get_cluster_tnx_id/0,
|
||||||
|
get_node_tnx_id/1,
|
||||||
|
init_mfa/2,
|
||||||
latest_tnx_id/0,
|
latest_tnx_id/0,
|
||||||
make_initiate_call_req/3
|
make_initiate_call_req/3,
|
||||||
|
read_next_mfa/1,
|
||||||
|
trans_query/1,
|
||||||
|
trans_status/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
@ -194,18 +200,18 @@ do_multicall(M, F, A, RequiredSyncs, Timeout) ->
|
||||||
|
|
||||||
-spec query(pos_integer()) -> {'atomic', map()} | {'aborted', Reason :: term()}.
|
-spec query(pos_integer()) -> {'atomic', map()} | {'aborted', Reason :: term()}.
|
||||||
query(TnxId) ->
|
query(TnxId) ->
|
||||||
transaction(fun trans_query/1, [TnxId]).
|
transaction(fun ?MODULE:trans_query/1, [TnxId]).
|
||||||
|
|
||||||
-spec reset() -> reset.
|
-spec reset() -> reset.
|
||||||
reset() -> gen_server:call(?MODULE, reset).
|
reset() -> gen_server:call(?MODULE, reset).
|
||||||
|
|
||||||
-spec status() -> {'atomic', [map()]} | {'aborted', Reason :: term()}.
|
-spec status() -> {'atomic', [map()]} | {'aborted', Reason :: term()}.
|
||||||
status() ->
|
status() ->
|
||||||
transaction(fun trans_status/0, []).
|
transaction(fun ?MODULE:trans_status/0, []).
|
||||||
|
|
||||||
-spec latest_tnx_id() -> pos_integer().
|
-spec latest_tnx_id() -> pos_integer().
|
||||||
latest_tnx_id() ->
|
latest_tnx_id() ->
|
||||||
{atomic, TnxId} = transaction(fun get_cluster_tnx_id/0, []),
|
{atomic, TnxId} = transaction(fun ?MODULE:get_cluster_tnx_id/0, []),
|
||||||
TnxId.
|
TnxId.
|
||||||
|
|
||||||
-spec make_initiate_call_req(module(), atom(), list()) -> init_call_req().
|
-spec make_initiate_call_req(module(), atom(), list()) -> init_call_req().
|
||||||
|
@ -280,7 +286,7 @@ handle_call(reset, _From, State) ->
|
||||||
_ = mria:clear_table(?CLUSTER_MFA),
|
_ = mria:clear_table(?CLUSTER_MFA),
|
||||||
{reply, ok, State, {continue, ?CATCH_UP}};
|
{reply, ok, State, {continue, ?CATCH_UP}};
|
||||||
handle_call(?INITIATE(MFA), _From, State = #{node := Node}) ->
|
handle_call(?INITIATE(MFA), _From, State = #{node := Node}) ->
|
||||||
case transaction(fun init_mfa/2, [Node, MFA]) of
|
case transaction(fun ?MODULE:init_mfa/2, [Node, MFA]) of
|
||||||
{atomic, {ok, TnxId, Result}} ->
|
{atomic, {ok, TnxId, Result}} ->
|
||||||
{reply, {ok, TnxId, Result}, State, {continue, ?CATCH_UP}};
|
{reply, {ok, TnxId, Result}, State, {continue, ?CATCH_UP}};
|
||||||
{aborted, Error} ->
|
{aborted, Error} ->
|
||||||
|
@ -288,7 +294,7 @@ handle_call(?INITIATE(MFA), _From, State = #{node := Node}) ->
|
||||||
end;
|
end;
|
||||||
handle_call(skip_failed_commit, _From, State = #{node := Node}) ->
|
handle_call(skip_failed_commit, _From, State = #{node := Node}) ->
|
||||||
Timeout = catch_up(State, true),
|
Timeout = catch_up(State, true),
|
||||||
{atomic, LatestId} = transaction(fun get_node_tnx_id/1, [Node]),
|
{atomic, LatestId} = transaction(fun ?MODULE:get_node_tnx_id/1, [Node]),
|
||||||
{reply, LatestId, State, Timeout};
|
{reply, LatestId, State, Timeout};
|
||||||
handle_call({fast_forward_to_commit, ToTnxId}, _From, State) ->
|
handle_call({fast_forward_to_commit, ToTnxId}, _From, State) ->
|
||||||
NodeId = do_fast_forward_to_commit(ToTnxId, State),
|
NodeId = do_fast_forward_to_commit(ToTnxId, State),
|
||||||
|
@ -316,14 +322,14 @@ code_change(_OldVsn, State, _Extra) ->
|
||||||
catch_up(State) -> catch_up(State, false).
|
catch_up(State) -> catch_up(State, false).
|
||||||
|
|
||||||
catch_up(#{node := Node, retry_interval := RetryMs} = State, SkipResult) ->
|
catch_up(#{node := Node, retry_interval := RetryMs} = State, SkipResult) ->
|
||||||
case transaction(fun read_next_mfa/1, [Node]) of
|
case transaction(fun ?MODULE:read_next_mfa/1, [Node]) of
|
||||||
{atomic, caught_up} ->
|
{atomic, caught_up} ->
|
||||||
?TIMEOUT;
|
?TIMEOUT;
|
||||||
{atomic, {still_lagging, NextId, MFA}} ->
|
{atomic, {still_lagging, NextId, MFA}} ->
|
||||||
{Succeed, _} = apply_mfa(NextId, MFA, ?APPLY_KIND_REPLICATE),
|
{Succeed, _} = apply_mfa(NextId, MFA, ?APPLY_KIND_REPLICATE),
|
||||||
case Succeed orelse SkipResult of
|
case Succeed orelse SkipResult of
|
||||||
true ->
|
true ->
|
||||||
case transaction(fun commit/2, [Node, NextId]) of
|
case transaction(fun ?MODULE:commit/2, [Node, NextId]) of
|
||||||
{atomic, ok} ->
|
{atomic, ok} ->
|
||||||
catch_up(State, false);
|
catch_up(State, false);
|
||||||
Error ->
|
Error ->
|
||||||
|
@ -367,12 +373,12 @@ commit(Node, TnxId) ->
|
||||||
ok = mnesia:write(?CLUSTER_COMMIT, #cluster_rpc_commit{node = Node, tnx_id = TnxId}, write).
|
ok = mnesia:write(?CLUSTER_COMMIT, #cluster_rpc_commit{node = Node, tnx_id = TnxId}, write).
|
||||||
|
|
||||||
do_fast_forward_to_commit(ToTnxId, State = #{node := Node}) ->
|
do_fast_forward_to_commit(ToTnxId, State = #{node := Node}) ->
|
||||||
{atomic, NodeId} = transaction(fun get_node_tnx_id/1, [Node]),
|
{atomic, NodeId} = transaction(fun ?MODULE:get_node_tnx_id/1, [Node]),
|
||||||
case NodeId >= ToTnxId of
|
case NodeId >= ToTnxId of
|
||||||
true ->
|
true ->
|
||||||
NodeId;
|
NodeId;
|
||||||
false ->
|
false ->
|
||||||
{atomic, LatestId} = transaction(fun get_cluster_tnx_id/0, []),
|
{atomic, LatestId} = transaction(fun ?MODULE:get_cluster_tnx_id/0, []),
|
||||||
case LatestId =< NodeId of
|
case LatestId =< NodeId of
|
||||||
true ->
|
true ->
|
||||||
NodeId;
|
NodeId;
|
||||||
|
@ -529,11 +535,11 @@ wait_for_nodes_commit(RequiredSyncs, TnxId, Delay, Remain) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
lagging_node(TnxId) ->
|
lagging_node(TnxId) ->
|
||||||
{atomic, Nodes} = transaction(fun commit_status_trans/2, ['<', TnxId]),
|
{atomic, Nodes} = transaction(fun ?MODULE:commit_status_trans/2, ['<', TnxId]),
|
||||||
Nodes.
|
Nodes.
|
||||||
|
|
||||||
synced_nodes(TnxId) ->
|
synced_nodes(TnxId) ->
|
||||||
{atomic, Nodes} = transaction(fun commit_status_trans/2, ['>=', TnxId]),
|
{atomic, Nodes} = transaction(fun ?MODULE:commit_status_trans/2, ['>=', TnxId]),
|
||||||
Nodes.
|
Nodes.
|
||||||
|
|
||||||
commit_status_trans(Operator, TnxId) ->
|
commit_status_trans(Operator, TnxId) ->
|
||||||
|
@ -547,5 +553,5 @@ get_retry_ms() ->
|
||||||
|
|
||||||
maybe_init_tnx_id(_Node, TnxId) when TnxId < 0 -> ok;
|
maybe_init_tnx_id(_Node, TnxId) when TnxId < 0 -> ok;
|
||||||
maybe_init_tnx_id(Node, TnxId) ->
|
maybe_init_tnx_id(Node, TnxId) ->
|
||||||
{atomic, _} = transaction(fun commit/2, [Node, TnxId]),
|
{atomic, _} = transaction(fun ?MODULE:commit/2, [Node, TnxId]),
|
||||||
ok.
|
ok.
|
||||||
|
|
|
@ -30,6 +30,11 @@
|
||||||
code_change/3
|
code_change/3
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
%% Internal exports (RPC)
|
||||||
|
-export([
|
||||||
|
del_stale_mfa/1
|
||||||
|
]).
|
||||||
|
|
||||||
start_link() ->
|
start_link() ->
|
||||||
MaxHistory = emqx_conf:get(["node", "cluster_call", "max_history"], 100),
|
MaxHistory = emqx_conf:get(["node", "cluster_call", "max_history"], 100),
|
||||||
CleanupMs = emqx_conf:get(["node", "cluster_call", "cleanup_interval"], 5 * 60 * 1000),
|
CleanupMs = emqx_conf:get(["node", "cluster_call", "cleanup_interval"], 5 * 60 * 1000),
|
||||||
|
@ -56,7 +61,7 @@ handle_cast(Msg, State) ->
|
||||||
{noreply, State}.
|
{noreply, State}.
|
||||||
|
|
||||||
handle_info({timeout, TRef, del_stale_mfa}, State = #{timer := TRef, max_history := MaxHistory}) ->
|
handle_info({timeout, TRef, del_stale_mfa}, State = #{timer := TRef, max_history := MaxHistory}) ->
|
||||||
case mria:transaction(?CLUSTER_RPC_SHARD, fun del_stale_mfa/1, [MaxHistory]) of
|
case mria:transaction(?CLUSTER_RPC_SHARD, fun ?MODULE:del_stale_mfa/1, [MaxHistory]) of
|
||||||
{atomic, ok} -> ok;
|
{atomic, ok} -> ok;
|
||||||
Error -> ?SLOG(error, #{msg => "del_stale_cluster_rpc_mfa_error", error => Error})
|
Error -> ?SLOG(error, #{msg => "del_stale_cluster_rpc_mfa_error", error => Error})
|
||||||
end,
|
end,
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_conf, [
|
{application, emqx_conf, [
|
||||||
{description, "EMQX configuration management"},
|
{description, "EMQX configuration management"},
|
||||||
{vsn, "0.1.2"},
|
{vsn, "0.1.3"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_conf_app, []}},
|
{mod, {emqx_conf_app, []}},
|
||||||
{applications, [kernel, stdlib]},
|
{applications, [kernel, stdlib]},
|
||||||
|
|
|
@ -42,6 +42,11 @@
|
||||||
code_change/3
|
code_change/3
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
%% Internal exports (RPC)
|
||||||
|
-export([
|
||||||
|
do_cleanup_channels/2
|
||||||
|
]).
|
||||||
|
|
||||||
-define(CM_SHARD, emqx_gateway_cm_shard).
|
-define(CM_SHARD, emqx_gateway_cm_shard).
|
||||||
-define(LOCK, {?MODULE, cleanup_down}).
|
-define(LOCK, {?MODULE, cleanup_down}).
|
||||||
|
|
||||||
|
@ -148,7 +153,7 @@ cleanup_channels(Node, Name) ->
|
||||||
global:trans(
|
global:trans(
|
||||||
{?LOCK, self()},
|
{?LOCK, self()},
|
||||||
fun() ->
|
fun() ->
|
||||||
mria:transaction(?CM_SHARD, fun do_cleanup_channels/2, [Node, Tab])
|
mria:transaction(?CM_SHARD, fun ?MODULE:do_cleanup_channels/2, [Node, Tab])
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
|
||||||
|
|
|
@ -46,6 +46,11 @@
|
||||||
code_change/3
|
code_change/3
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
%% Internal exports (RPC)
|
||||||
|
-export([
|
||||||
|
do_register/4
|
||||||
|
]).
|
||||||
|
|
||||||
-export([lookup_name/1]).
|
-export([lookup_name/1]).
|
||||||
|
|
||||||
-define(SN_SHARD, emqx_sn_shard).
|
-define(SN_SHARD, emqx_sn_shard).
|
||||||
|
@ -173,33 +178,11 @@ handle_call(
|
||||||
TopicId when TopicId >= 16#FFFF ->
|
TopicId when TopicId >= 16#FFFF ->
|
||||||
{reply, {error, too_large}, State};
|
{reply, {error, too_large}, State};
|
||||||
TopicId ->
|
TopicId ->
|
||||||
Fun = fun() ->
|
case
|
||||||
mnesia:write(
|
mria:transaction(?SN_SHARD, fun ?MODULE:do_register/4, [
|
||||||
Tab,
|
Tab, ClientId, TopicId, TopicName
|
||||||
#emqx_sn_registry{
|
])
|
||||||
key = {ClientId, next_topic_id},
|
of
|
||||||
value = TopicId + 1
|
|
||||||
},
|
|
||||||
write
|
|
||||||
),
|
|
||||||
mnesia:write(
|
|
||||||
Tab,
|
|
||||||
#emqx_sn_registry{
|
|
||||||
key = {ClientId, TopicName},
|
|
||||||
value = TopicId
|
|
||||||
},
|
|
||||||
write
|
|
||||||
),
|
|
||||||
mnesia:write(
|
|
||||||
Tab,
|
|
||||||
#emqx_sn_registry{
|
|
||||||
key = {ClientId, TopicId},
|
|
||||||
value = TopicName
|
|
||||||
},
|
|
||||||
write
|
|
||||||
)
|
|
||||||
end,
|
|
||||||
case mria:transaction(?SN_SHARD, Fun) of
|
|
||||||
{atomic, ok} ->
|
{atomic, ok} ->
|
||||||
{reply, TopicId, State};
|
{reply, TopicId, State};
|
||||||
{aborted, Error} ->
|
{aborted, Error} ->
|
||||||
|
@ -248,6 +231,32 @@ terminate(_Reason, _State) ->
|
||||||
code_change(_OldVsn, State, _Extra) ->
|
code_change(_OldVsn, State, _Extra) ->
|
||||||
{ok, State}.
|
{ok, State}.
|
||||||
|
|
||||||
|
do_register(Tab, ClientId, TopicId, TopicName) ->
|
||||||
|
mnesia:write(
|
||||||
|
Tab,
|
||||||
|
#emqx_sn_registry{
|
||||||
|
key = {ClientId, next_topic_id},
|
||||||
|
value = TopicId + 1
|
||||||
|
},
|
||||||
|
write
|
||||||
|
),
|
||||||
|
mnesia:write(
|
||||||
|
Tab,
|
||||||
|
#emqx_sn_registry{
|
||||||
|
key = {ClientId, TopicName},
|
||||||
|
value = TopicId
|
||||||
|
},
|
||||||
|
write
|
||||||
|
),
|
||||||
|
mnesia:write(
|
||||||
|
Tab,
|
||||||
|
#emqx_sn_registry{
|
||||||
|
key = {ClientId, TopicId},
|
||||||
|
value = TopicName
|
||||||
|
},
|
||||||
|
write
|
||||||
|
).
|
||||||
|
|
||||||
%%-----------------------------------------------------------------------------
|
%%-----------------------------------------------------------------------------
|
||||||
|
|
||||||
next_topic_id(Tab, PredefId, ClientId) ->
|
next_topic_id(Tab, PredefId, ClientId) ->
|
||||||
|
|
|
@ -54,6 +54,11 @@
|
||||||
|
|
||||||
-export([official_version/1]).
|
-export([official_version/1]).
|
||||||
|
|
||||||
|
%% Internal exports (RPC)
|
||||||
|
-export([
|
||||||
|
do_ensure_uuids/0
|
||||||
|
]).
|
||||||
|
|
||||||
%% internal export
|
%% internal export
|
||||||
-export([read_raw_build_info/0]).
|
-export([read_raw_build_info/0]).
|
||||||
|
|
||||||
|
@ -530,54 +535,56 @@ bin(B) when is_binary(B) ->
|
||||||
B.
|
B.
|
||||||
|
|
||||||
ensure_uuids() ->
|
ensure_uuids() ->
|
||||||
Txn = fun() ->
|
{atomic, {NodeUUID, ClusterUUID}} = mria:transaction(
|
||||||
NodeUUID =
|
?TELEMETRY_SHARD, fun ?MODULE:do_ensure_uuids/0
|
||||||
case mnesia:wread({?TELEMETRY, node()}) of
|
),
|
||||||
[] ->
|
|
||||||
NodeUUID0 =
|
|
||||||
case get_uuid_from_file(node) of
|
|
||||||
{ok, NUUID} -> NUUID;
|
|
||||||
undefined -> generate_uuid()
|
|
||||||
end,
|
|
||||||
mnesia:write(
|
|
||||||
?TELEMETRY,
|
|
||||||
#telemetry{
|
|
||||||
id = node(),
|
|
||||||
uuid = NodeUUID0
|
|
||||||
},
|
|
||||||
write
|
|
||||||
),
|
|
||||||
NodeUUID0;
|
|
||||||
[#telemetry{uuid = NodeUUID0}] ->
|
|
||||||
NodeUUID0
|
|
||||||
end,
|
|
||||||
ClusterUUID =
|
|
||||||
case mnesia:wread({?TELEMETRY, ?CLUSTER_UUID_KEY}) of
|
|
||||||
[] ->
|
|
||||||
ClusterUUID0 =
|
|
||||||
case get_uuid_from_file(cluster) of
|
|
||||||
{ok, CUUID} -> CUUID;
|
|
||||||
undefined -> generate_uuid()
|
|
||||||
end,
|
|
||||||
mnesia:write(
|
|
||||||
?TELEMETRY,
|
|
||||||
#telemetry{
|
|
||||||
id = ?CLUSTER_UUID_KEY,
|
|
||||||
uuid = ClusterUUID0
|
|
||||||
},
|
|
||||||
write
|
|
||||||
),
|
|
||||||
ClusterUUID0;
|
|
||||||
[#telemetry{uuid = ClusterUUID0}] ->
|
|
||||||
ClusterUUID0
|
|
||||||
end,
|
|
||||||
{NodeUUID, ClusterUUID}
|
|
||||||
end,
|
|
||||||
{atomic, {NodeUUID, ClusterUUID}} = mria:transaction(?TELEMETRY_SHARD, Txn),
|
|
||||||
save_uuid_to_file(NodeUUID, node),
|
save_uuid_to_file(NodeUUID, node),
|
||||||
save_uuid_to_file(ClusterUUID, cluster),
|
save_uuid_to_file(ClusterUUID, cluster),
|
||||||
{NodeUUID, ClusterUUID}.
|
{NodeUUID, ClusterUUID}.
|
||||||
|
|
||||||
|
do_ensure_uuids() ->
|
||||||
|
NodeUUID =
|
||||||
|
case mnesia:wread({?TELEMETRY, node()}) of
|
||||||
|
[] ->
|
||||||
|
NodeUUID0 =
|
||||||
|
case get_uuid_from_file(node) of
|
||||||
|
{ok, NUUID} -> NUUID;
|
||||||
|
undefined -> generate_uuid()
|
||||||
|
end,
|
||||||
|
mnesia:write(
|
||||||
|
?TELEMETRY,
|
||||||
|
#telemetry{
|
||||||
|
id = node(),
|
||||||
|
uuid = NodeUUID0
|
||||||
|
},
|
||||||
|
write
|
||||||
|
),
|
||||||
|
NodeUUID0;
|
||||||
|
[#telemetry{uuid = NodeUUID0}] ->
|
||||||
|
NodeUUID0
|
||||||
|
end,
|
||||||
|
ClusterUUID =
|
||||||
|
case mnesia:wread({?TELEMETRY, ?CLUSTER_UUID_KEY}) of
|
||||||
|
[] ->
|
||||||
|
ClusterUUID0 =
|
||||||
|
case get_uuid_from_file(cluster) of
|
||||||
|
{ok, CUUID} -> CUUID;
|
||||||
|
undefined -> generate_uuid()
|
||||||
|
end,
|
||||||
|
mnesia:write(
|
||||||
|
?TELEMETRY,
|
||||||
|
#telemetry{
|
||||||
|
id = ?CLUSTER_UUID_KEY,
|
||||||
|
uuid = ClusterUUID0
|
||||||
|
},
|
||||||
|
write
|
||||||
|
),
|
||||||
|
ClusterUUID0;
|
||||||
|
[#telemetry{uuid = ClusterUUID0}] ->
|
||||||
|
ClusterUUID0
|
||||||
|
end,
|
||||||
|
{NodeUUID, ClusterUUID}.
|
||||||
|
|
||||||
get_uuid_from_file(Type) ->
|
get_uuid_from_file(Type) ->
|
||||||
Path = uuid_file_path(Type),
|
Path = uuid_file_path(Type),
|
||||||
case file:read_file(Path) of
|
case file:read_file(Path) of
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{application, emqx_psk, [
|
{application, emqx_psk, [
|
||||||
{description, "EMQX PSK"},
|
{description, "EMQX PSK"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "5.0.0"},
|
{vsn, "5.0.1"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, [emqx_psk_sup]},
|
{registered, [emqx_psk_sup]},
|
||||||
{applications, [kernel, stdlib]},
|
{applications, [kernel, stdlib]},
|
||||||
|
|
|
@ -43,6 +43,11 @@
|
||||||
code_change/3
|
code_change/3
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
%% Internal exports (RPC)
|
||||||
|
-export([
|
||||||
|
insert_psks/1
|
||||||
|
]).
|
||||||
|
|
||||||
-record(psk_entry, {
|
-record(psk_entry, {
|
||||||
psk_id :: binary(),
|
psk_id :: binary(),
|
||||||
shared_secret :: binary(),
|
shared_secret :: binary(),
|
||||||
|
@ -199,10 +204,10 @@ import_psks(SrcFile) ->
|
||||||
import_psks(Io, Delimiter, ChunkSize, NChunk) ->
|
import_psks(Io, Delimiter, ChunkSize, NChunk) ->
|
||||||
case get_psks(Io, Delimiter, ChunkSize) of
|
case get_psks(Io, Delimiter, ChunkSize) of
|
||||||
{ok, Entries} ->
|
{ok, Entries} ->
|
||||||
_ = trans(fun insert_psks/1, [Entries]),
|
_ = trans(fun ?MODULE:insert_psks/1, [Entries]),
|
||||||
import_psks(Io, Delimiter, ChunkSize, NChunk + 1);
|
import_psks(Io, Delimiter, ChunkSize, NChunk + 1);
|
||||||
{eof, Entries} ->
|
{eof, Entries} ->
|
||||||
_ = trans(fun insert_psks/1, [Entries]),
|
_ = trans(fun ?MODULE:insert_psks/1, [Entries]),
|
||||||
ok;
|
ok;
|
||||||
{error, {bad_format, {line, N}}} ->
|
{error, {bad_format, {line, N}}} ->
|
||||||
{error, {bad_format, {line, NChunk * ChunkSize + N}}};
|
{error, {bad_format, {line, NChunk * ChunkSize + N}}};
|
||||||
|
|
|
@ -36,6 +36,15 @@
|
||||||
size/1
|
size/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
%% Internal exports (RPC)
|
||||||
|
-export([
|
||||||
|
do_store_retained/1,
|
||||||
|
do_clear_expired/0,
|
||||||
|
do_delete_message/1,
|
||||||
|
do_populate_index_meta/1,
|
||||||
|
do_reindex_batch/2
|
||||||
|
]).
|
||||||
|
|
||||||
%% Management API:
|
%% Management API:
|
||||||
-export([topics/0]).
|
-export([topics/0]).
|
||||||
|
|
||||||
|
@ -126,26 +135,8 @@ create_table(Table, RecordName, Attributes, Type, StorageType) ->
|
||||||
ok
|
ok
|
||||||
end.
|
end.
|
||||||
|
|
||||||
store_retained(_, #message{topic = Topic} = Msg) ->
|
store_retained(_, Msg = #message{topic = Topic}) ->
|
||||||
ExpiryTime = emqx_retainer:get_expiry_time(Msg),
|
case mria:transaction(?RETAINER_SHARD, fun ?MODULE:do_store_retained/1, [Msg]) of
|
||||||
Tokens = topic_to_tokens(Topic),
|
|
||||||
Fun =
|
|
||||||
case is_table_full() of
|
|
||||||
false ->
|
|
||||||
fun() ->
|
|
||||||
store_retained(db_indices(write), Msg, Tokens, ExpiryTime)
|
|
||||||
end;
|
|
||||||
_ ->
|
|
||||||
fun() ->
|
|
||||||
case mnesia:read(?TAB_MESSAGE, Tokens, write) of
|
|
||||||
[_] ->
|
|
||||||
store_retained(db_indices(write), Msg, Tokens, ExpiryTime);
|
|
||||||
[] ->
|
|
||||||
mnesia:abort(table_is_full)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end,
|
|
||||||
case mria:transaction(?RETAINER_SHARD, Fun) of
|
|
||||||
{atomic, ok} ->
|
{atomic, ok} ->
|
||||||
?tp(debug, message_retained, #{topic => Topic}),
|
?tp(debug, message_retained, #{topic => Topic}),
|
||||||
ok;
|
ok;
|
||||||
|
@ -157,7 +148,26 @@ store_retained(_, #message{topic = Topic} = Msg) ->
|
||||||
})
|
})
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
do_store_retained(#message{topic = Topic} = Msg) ->
|
||||||
|
ExpiryTime = emqx_retainer:get_expiry_time(Msg),
|
||||||
|
Tokens = topic_to_tokens(Topic),
|
||||||
|
case is_table_full() of
|
||||||
|
false ->
|
||||||
|
store_retained(db_indices(write), Msg, Tokens, ExpiryTime);
|
||||||
|
_ ->
|
||||||
|
case mnesia:read(?TAB_MESSAGE, Tokens, write) of
|
||||||
|
[_] ->
|
||||||
|
store_retained(db_indices(write), Msg, Tokens, ExpiryTime);
|
||||||
|
[] ->
|
||||||
|
mnesia:abort(table_is_full)
|
||||||
|
end
|
||||||
|
end.
|
||||||
|
|
||||||
clear_expired(_) ->
|
clear_expired(_) ->
|
||||||
|
{atomic, _} = mria:transaction(?RETAINER_SHARD, fun ?MODULE:do_clear_expired/0),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
do_clear_expired() ->
|
||||||
NowMs = erlang:system_time(millisecond),
|
NowMs = erlang:system_time(millisecond),
|
||||||
QH = qlc:q([
|
QH = qlc:q([
|
||||||
TopicTokens
|
TopicTokens
|
||||||
|
@ -167,36 +177,29 @@ clear_expired(_) ->
|
||||||
} <- mnesia:table(?TAB_MESSAGE, [{lock, write}]),
|
} <- mnesia:table(?TAB_MESSAGE, [{lock, write}]),
|
||||||
(ExpiryTime =/= 0) and (ExpiryTime < NowMs)
|
(ExpiryTime =/= 0) and (ExpiryTime < NowMs)
|
||||||
]),
|
]),
|
||||||
Fun = fun() ->
|
QC = qlc:cursor(QH),
|
||||||
QC = qlc:cursor(QH),
|
clear_batch(db_indices(write), QC).
|
||||||
clear_batch(db_indices(write), QC)
|
|
||||||
end,
|
|
||||||
{atomic, _} = mria:transaction(?RETAINER_SHARD, Fun),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
delete_message(_, Topic) ->
|
delete_message(_, Topic) ->
|
||||||
Tokens = topic_to_tokens(Topic),
|
{atomic, _} = mria:transaction(?RETAINER_SHARD, fun ?MODULE:do_delete_message/1, [Topic]),
|
||||||
DeleteFun =
|
|
||||||
case emqx_topic:wildcard(Topic) of
|
|
||||||
false ->
|
|
||||||
fun() ->
|
|
||||||
ok = delete_message_by_topic(Tokens, db_indices(write))
|
|
||||||
end;
|
|
||||||
true ->
|
|
||||||
fun() ->
|
|
||||||
QH = topic_search_table(Tokens),
|
|
||||||
qlc:fold(
|
|
||||||
fun(TopicTokens, _) ->
|
|
||||||
ok = delete_message_by_topic(TopicTokens, db_indices(write))
|
|
||||||
end,
|
|
||||||
undefined,
|
|
||||||
QH
|
|
||||||
)
|
|
||||||
end
|
|
||||||
end,
|
|
||||||
{atomic, _} = mria:transaction(?RETAINER_SHARD, DeleteFun),
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
do_delete_message(Topic) ->
|
||||||
|
Tokens = topic_to_tokens(Topic),
|
||||||
|
case emqx_topic:wildcard(Topic) of
|
||||||
|
false ->
|
||||||
|
ok = delete_message_by_topic(Tokens, db_indices(write));
|
||||||
|
true ->
|
||||||
|
QH = topic_search_table(Tokens),
|
||||||
|
qlc:fold(
|
||||||
|
fun(TopicTokens, _) ->
|
||||||
|
ok = delete_message_by_topic(TopicTokens, db_indices(write))
|
||||||
|
end,
|
||||||
|
undefined,
|
||||||
|
QH
|
||||||
|
)
|
||||||
|
end.
|
||||||
|
|
||||||
read_message(_, Topic) ->
|
read_message(_, Topic) ->
|
||||||
{ok, read_messages(Topic)}.
|
{ok, read_messages(Topic)}.
|
||||||
|
|
||||||
|
@ -267,16 +270,11 @@ reindex(Force, StatusFun) ->
|
||||||
reindex(config_indices(), Force, StatusFun).
|
reindex(config_indices(), Force, StatusFun).
|
||||||
|
|
||||||
reindex_status() ->
|
reindex_status() ->
|
||||||
Fun = fun() ->
|
case mnesia:dirty_read(?TAB_INDEX_META, ?META_KEY) of
|
||||||
mnesia:read(?TAB_INDEX_META, ?META_KEY)
|
[#retained_index_meta{reindexing = true}] ->
|
||||||
end,
|
|
||||||
case mria:transaction(?RETAINER_SHARD, Fun) of
|
|
||||||
{atomic, [#retained_index_meta{reindexing = true}]} ->
|
|
||||||
true;
|
true;
|
||||||
{atomic, _} ->
|
_ ->
|
||||||
false;
|
false
|
||||||
{aborted, Reason} ->
|
|
||||||
{error, Reason}
|
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -439,37 +437,7 @@ config_indices() ->
|
||||||
|
|
||||||
populate_index_meta() ->
|
populate_index_meta() ->
|
||||||
ConfigIndices = config_indices(),
|
ConfigIndices = config_indices(),
|
||||||
Fun = fun() ->
|
case mria:transaction(?RETAINER_SHARD, fun ?MODULE:do_populate_index_meta/1, [ConfigIndices]) of
|
||||||
case mnesia:read(?TAB_INDEX_META, ?META_KEY, write) of
|
|
||||||
[
|
|
||||||
#retained_index_meta{
|
|
||||||
read_indices = ReadIndices,
|
|
||||||
write_indices = WriteIndices,
|
|
||||||
reindexing = Reindexing
|
|
||||||
}
|
|
||||||
] ->
|
|
||||||
case {ReadIndices, WriteIndices, Reindexing} of
|
|
||||||
{_, _, true} ->
|
|
||||||
ok;
|
|
||||||
{ConfigIndices, ConfigIndices, false} ->
|
|
||||||
ok;
|
|
||||||
{DBWriteIndices, DBReadIndices, false} ->
|
|
||||||
{error, DBWriteIndices, DBReadIndices}
|
|
||||||
end;
|
|
||||||
[] ->
|
|
||||||
mnesia:write(
|
|
||||||
?TAB_INDEX_META,
|
|
||||||
#retained_index_meta{
|
|
||||||
key = ?META_KEY,
|
|
||||||
read_indices = ConfigIndices,
|
|
||||||
write_indices = ConfigIndices,
|
|
||||||
reindexing = false
|
|
||||||
},
|
|
||||||
write
|
|
||||||
)
|
|
||||||
end
|
|
||||||
end,
|
|
||||||
case mria:transaction(?RETAINER_SHARD, Fun) of
|
|
||||||
{atomic, ok} ->
|
{atomic, ok} ->
|
||||||
ok;
|
ok;
|
||||||
{atomic, {error, DBWriteIndices, DBReadIndices}} ->
|
{atomic, {error, DBWriteIndices, DBReadIndices}} ->
|
||||||
|
@ -488,6 +456,36 @@ populate_index_meta() ->
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
do_populate_index_meta(ConfigIndices) ->
|
||||||
|
case mnesia:read(?TAB_INDEX_META, ?META_KEY, write) of
|
||||||
|
[
|
||||||
|
#retained_index_meta{
|
||||||
|
read_indices = ReadIndices,
|
||||||
|
write_indices = WriteIndices,
|
||||||
|
reindexing = Reindexing
|
||||||
|
}
|
||||||
|
] ->
|
||||||
|
case {ReadIndices, WriteIndices, Reindexing} of
|
||||||
|
{_, _, true} ->
|
||||||
|
ok;
|
||||||
|
{ConfigIndices, ConfigIndices, false} ->
|
||||||
|
ok;
|
||||||
|
{DBWriteIndices, DBReadIndices, false} ->
|
||||||
|
{error, DBWriteIndices, DBReadIndices}
|
||||||
|
end;
|
||||||
|
[] ->
|
||||||
|
mnesia:write(
|
||||||
|
?TAB_INDEX_META,
|
||||||
|
#retained_index_meta{
|
||||||
|
key = ?META_KEY,
|
||||||
|
read_indices = ConfigIndices,
|
||||||
|
write_indices = ConfigIndices,
|
||||||
|
reindexing = false
|
||||||
|
},
|
||||||
|
write
|
||||||
|
)
|
||||||
|
end.
|
||||||
|
|
||||||
db_indices(Type) ->
|
db_indices(Type) ->
|
||||||
case mnesia:read(?TAB_INDEX_META, ?META_KEY) of
|
case mnesia:read(?TAB_INDEX_META, ?META_KEY) of
|
||||||
[#retained_index_meta{read_indices = ReadIndices, write_indices = WriteIndices}] ->
|
[#retained_index_meta{read_indices = ReadIndices, write_indices = WriteIndices}] ->
|
||||||
|
@ -533,6 +531,7 @@ reindex(NewIndices, Force, StatusFun) when
|
||||||
end.
|
end.
|
||||||
|
|
||||||
try_start_reindex(NewIndices, true) ->
|
try_start_reindex(NewIndices, true) ->
|
||||||
|
%% Note: we don't expect reindexing during upgrade, so this function is internal
|
||||||
mria:transaction(
|
mria:transaction(
|
||||||
?RETAINER_SHARD,
|
?RETAINER_SHARD,
|
||||||
fun() -> start_reindex(NewIndices) end
|
fun() -> start_reindex(NewIndices) end
|
||||||
|
@ -566,6 +565,7 @@ start_reindex(NewIndices) ->
|
||||||
).
|
).
|
||||||
|
|
||||||
finalize_reindex() ->
|
finalize_reindex() ->
|
||||||
|
%% Note: we don't expect reindexing during upgrade, so this function is internal
|
||||||
{atomic, ok} = mria:transaction(
|
{atomic, ok} = mria:transaction(
|
||||||
?RETAINER_SHARD,
|
?RETAINER_SHARD,
|
||||||
fun() ->
|
fun() ->
|
||||||
|
@ -601,16 +601,7 @@ reindex_topic(Indices, Topic) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
reindex_batch(QC, Done, StatusFun) ->
|
reindex_batch(QC, Done, StatusFun) ->
|
||||||
Fun = fun() ->
|
case mria:transaction(?RETAINER_SHARD, fun ?MODULE:do_reindex_batch/2, [QC, Done]) of
|
||||||
Indices = db_indices(write),
|
|
||||||
{Status, Topics} = qlc_next_answers(QC, ?REINDEX_BATCH_SIZE),
|
|
||||||
ok = lists:foreach(
|
|
||||||
fun(Topic) -> reindex_topic(Indices, Topic) end,
|
|
||||||
Topics
|
|
||||||
),
|
|
||||||
{Status, Done + length(Topics)}
|
|
||||||
end,
|
|
||||||
case mria:transaction(?RETAINER_SHARD, Fun) of
|
|
||||||
{atomic, {more, NewDone}} ->
|
{atomic, {more, NewDone}} ->
|
||||||
_ = StatusFun(NewDone),
|
_ = StatusFun(NewDone),
|
||||||
reindex_batch(QC, NewDone, StatusFun);
|
reindex_batch(QC, NewDone, StatusFun);
|
||||||
|
@ -625,6 +616,15 @@ reindex_batch(QC, Done, StatusFun) ->
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
do_reindex_batch(QC, Done) ->
|
||||||
|
Indices = db_indices(write),
|
||||||
|
{Status, Topics} = qlc_next_answers(QC, ?REINDEX_BATCH_SIZE),
|
||||||
|
ok = lists:foreach(
|
||||||
|
fun(Topic) -> reindex_topic(Indices, Topic) end,
|
||||||
|
Topics
|
||||||
|
),
|
||||||
|
{Status, Done + length(Topics)}.
|
||||||
|
|
||||||
wait_dispatch_complete(Timeout) ->
|
wait_dispatch_complete(Timeout) ->
|
||||||
Nodes = mria_mnesia:running_nodes(),
|
Nodes = mria_mnesia:running_nodes(),
|
||||||
{Results, []} = emqx_retainer_proto_v1:wait_dispatch_complete(Nodes, Timeout),
|
{Results, []} = emqx_retainer_proto_v1:wait_dispatch_complete(Nodes, Timeout),
|
||||||
|
|
|
@ -45,9 +45,7 @@ retainer(["reindex", "status"]) ->
|
||||||
true ->
|
true ->
|
||||||
?PRINT_MSG("Reindexing is in progress~n");
|
?PRINT_MSG("Reindexing is in progress~n");
|
||||||
false ->
|
false ->
|
||||||
?PRINT_MSG("Reindexing is not running~n");
|
?PRINT_MSG("Reindexing is not running~n")
|
||||||
{error, Reason} ->
|
|
||||||
?PRINT("Can't get reindex status: ~p~n", [Reason])
|
|
||||||
end;
|
end;
|
||||||
retainer(["reindex", "start"]) ->
|
retainer(["reindex", "start"]) ->
|
||||||
retainer(["reindex", "start", "false"]);
|
retainer(["reindex", "start", "false"]);
|
||||||
|
|
Loading…
Reference in New Issue