Merge branch 'master' into api-key-secret
This commit is contained in:
commit
f95ef04cf7
|
@ -13,7 +13,7 @@
|
|||
, {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}
|
||||
, {typerefl, {git, "https://github.com/k32/typerefl", {tag, "0.8.5"}}}
|
||||
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}
|
||||
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.8.3"}}}
|
||||
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}
|
||||
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.0"}}}
|
||||
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.11.1"}}}
|
||||
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.5.1"}}}
|
||||
|
|
|
@ -82,7 +82,7 @@
|
|||
%% Authentication Data Cache
|
||||
auth_cache :: maybe(map()),
|
||||
%% Quota checkers
|
||||
quota :: maybe(emqx_limiter:limiter()),
|
||||
quota :: maybe(emqx_limiter_container:limiter()),
|
||||
%% Timers
|
||||
timers :: #{atom() => disabled | maybe(reference())},
|
||||
%% Conn State
|
||||
|
@ -120,6 +120,7 @@
|
|||
}).
|
||||
|
||||
-define(INFO_KEYS, [conninfo, conn_state, clientinfo, session, will_msg]).
|
||||
-define(LIMITER_ROUTING, message_routing).
|
||||
|
||||
-dialyzer({no_match, [shutdown/4, ensure_timer/2, interval/2]}).
|
||||
|
||||
|
@ -200,14 +201,13 @@ caps(#channel{clientinfo = #{zone := Zone}}) ->
|
|||
-spec(init(emqx_types:conninfo(), opts()) -> channel()).
|
||||
init(ConnInfo = #{peername := {PeerHost, _Port},
|
||||
sockname := {_Host, SockPort}},
|
||||
#{zone := Zone, listener := {Type, Listener}}) ->
|
||||
#{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}}) ->
|
||||
Peercert = maps:get(peercert, ConnInfo, undefined),
|
||||
Protocol = maps:get(protocol, ConnInfo, mqtt),
|
||||
MountPoint = case emqx_config:get_listener_conf(Type, Listener, [mountpoint]) of
|
||||
<<>> -> undefined;
|
||||
MP -> MP
|
||||
end,
|
||||
QuotaPolicy = emqx_config:get_zone_conf(Zone, [quota], #{}),
|
||||
ClientInfo = set_peercert_infos(
|
||||
Peercert,
|
||||
#{zone => Zone,
|
||||
|
@ -228,7 +228,7 @@ init(ConnInfo = #{peername := {PeerHost, _Port},
|
|||
outbound => #{}
|
||||
},
|
||||
auth_cache = #{},
|
||||
quota = emqx_limiter:init(Zone, quota_policy(QuotaPolicy)),
|
||||
quota = emqx_limiter_container:get_limiter_by_names([?LIMITER_ROUTING], LimiterCfg),
|
||||
timers = #{},
|
||||
conn_state = idle,
|
||||
takeover = false,
|
||||
|
@ -236,11 +236,6 @@ init(ConnInfo = #{peername := {PeerHost, _Port},
|
|||
pendings = []
|
||||
}.
|
||||
|
||||
quota_policy(RawPolicy) ->
|
||||
[{Name, {list_to_integer(StrCount),
|
||||
erlang:trunc(hocon_postprocess:duration(StrWind) / 1000)}}
|
||||
|| {Name, [StrCount, StrWind]} <- maps:to_list(RawPolicy)].
|
||||
|
||||
set_peercert_infos(NoSSL, ClientInfo, _)
|
||||
when NoSSL =:= nossl;
|
||||
NoSSL =:= undefined ->
|
||||
|
@ -255,7 +250,7 @@ set_peercert_infos(Peercert, ClientInfo, Zone) ->
|
|||
dn -> DN;
|
||||
crt -> Peercert;
|
||||
pem when is_binary(Peercert) -> base64:encode(Peercert);
|
||||
md5 when is_binary(Peercert) -> emqx_passwd:hash(md5, Peercert);
|
||||
md5 when is_binary(Peercert) -> emqx_passwd:hash_data(md5, Peercert);
|
||||
_ -> undefined
|
||||
end
|
||||
end,
|
||||
|
@ -653,10 +648,10 @@ ensure_quota(PubRes, Channel = #channel{quota = Limiter}) ->
|
|||
({_, _, {ok, I}}, N) -> N + I;
|
||||
(_, N) -> N
|
||||
end, 1, PubRes),
|
||||
case emqx_limiter:check(#{cnt => Cnt, oct => 0}, Limiter) of
|
||||
case emqx_limiter_container:check(Cnt, ?LIMITER_ROUTING, Limiter) of
|
||||
{ok, NLimiter} ->
|
||||
Channel#channel{quota = NLimiter};
|
||||
{pause, Intv, NLimiter} ->
|
||||
{_, Intv, NLimiter} ->
|
||||
ensure_timer(quota_timer, Intv, Channel#channel{quota = NLimiter})
|
||||
end.
|
||||
|
||||
|
@ -1005,10 +1000,9 @@ handle_call({takeover, 'end'}, Channel = #channel{session = Session,
|
|||
handle_call(list_authz_cache, Channel) ->
|
||||
{reply, emqx_authz_cache:list_authz_cache(), Channel};
|
||||
|
||||
handle_call({quota, Policy}, Channel) ->
|
||||
Zone = info(zone, Channel),
|
||||
Quota = emqx_limiter:init(Zone, Policy),
|
||||
reply(ok, Channel#channel{quota = Quota});
|
||||
handle_call({quota, Bucket}, #channel{quota = Quota} = Channel) ->
|
||||
Quota2 = emqx_limiter_container:update_by_name(message_routing, Bucket, Quota),
|
||||
reply(ok, Channel#channel{quota = Quota2});
|
||||
|
||||
handle_call({keepalive, Interval}, Channel = #channel{keepalive = KeepAlive,
|
||||
conninfo = ConnInfo}) ->
|
||||
|
@ -1147,8 +1141,15 @@ handle_timeout(_TRef, will_message, Channel = #channel{will_msg = WillMsg}) ->
|
|||
(WillMsg =/= undefined) andalso publish_will_msg(WillMsg),
|
||||
{ok, clean_timer(will_timer, Channel#channel{will_msg = undefined})};
|
||||
|
||||
handle_timeout(_TRef, expire_quota_limit, Channel) ->
|
||||
{ok, clean_timer(quota_timer, Channel)};
|
||||
handle_timeout(_TRef, expire_quota_limit,
|
||||
#channel{quota = Quota} = Channel) ->
|
||||
case emqx_limiter_container:retry(?LIMITER_ROUTING, Quota) of
|
||||
{_, Intv, Quota2} ->
|
||||
Channel2 = ensure_timer(quota_timer, Intv, Channel#channel{quota = Quota2}),
|
||||
{ok, Channel2};
|
||||
{_, Quota2} ->
|
||||
{ok, clean_timer(quota_timer, Channel#channel{quota = Quota2})}
|
||||
end;
|
||||
|
||||
handle_timeout(_TRef, Msg, Channel) ->
|
||||
?SLOG(error, #{msg => "unexpected_timeout", timeout_msg => Msg}),
|
||||
|
|
|
@ -291,8 +291,9 @@ create_session(ClientInfo, ConnInfo) ->
|
|||
ok = emqx_hooks:run('session.created', [ClientInfo, emqx_session:info(Session)]),
|
||||
Session.
|
||||
|
||||
get_session_confs(#{zone := Zone}, #{receive_maximum := MaxInflight, expiry_interval := EI}) ->
|
||||
#{max_subscriptions => get_mqtt_conf(Zone, max_subscriptions),
|
||||
get_session_confs(#{zone := Zone, clientid := ClientId}, #{receive_maximum := MaxInflight, expiry_interval := EI}) ->
|
||||
#{clientid => ClientId,
|
||||
max_subscriptions => get_mqtt_conf(Zone, max_subscriptions),
|
||||
upgrade_qos => get_mqtt_conf(Zone, upgrade_qos),
|
||||
max_inflight => MaxInflight,
|
||||
retry_interval => get_mqtt_conf(Zone, retry_interval),
|
||||
|
@ -301,7 +302,8 @@ get_session_confs(#{zone := Zone}, #{receive_maximum := MaxInflight, expiry_inte
|
|||
%% TODO: Add conf for allowing/disallowing persistent sessions.
|
||||
%% Note that the connection info is already enriched to have
|
||||
%% default config values for session expiry.
|
||||
is_persistent => EI > 0
|
||||
is_persistent => EI > 0,
|
||||
latency_stats => emqx_config:get_zone_conf(Zone, [latency_stats])
|
||||
}.
|
||||
|
||||
mqueue_confs(Zone) ->
|
||||
|
|
|
@ -248,7 +248,7 @@ init_load(SchemaMod) ->
|
|||
init_load(SchemaMod, ConfFiles).
|
||||
|
||||
%% @doc Initial load of the given config files.
|
||||
%% NOTE: The order of the files is significant, configs from files orderd
|
||||
%% NOTE: The order of the files is significant, configs from files ordered
|
||||
%% in the rear of the list overrides prior values.
|
||||
-spec init_load(module(), [string()] | binary() | hocon:config()) -> ok.
|
||||
init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) ->
|
||||
|
|
|
@ -67,8 +67,7 @@
|
|||
-export([set_field/3]).
|
||||
|
||||
-import(emqx_misc,
|
||||
[ maybe_apply/2
|
||||
, start_timer/2
|
||||
[ start_timer/2
|
||||
]).
|
||||
|
||||
-record(state, {
|
||||
|
@ -82,11 +81,6 @@
|
|||
sockname :: emqx_types:peername(),
|
||||
%% Sock State
|
||||
sockstate :: emqx_types:sockstate(),
|
||||
%% Limiter
|
||||
limiter :: maybe(emqx_limiter:limiter()),
|
||||
%% Limit Timer
|
||||
limit_timer :: maybe(reference()),
|
||||
%% Parse State
|
||||
parse_state :: emqx_frame:parse_state(),
|
||||
%% Serialize options
|
||||
serialize :: emqx_frame:serialize_opts(),
|
||||
|
@ -103,10 +97,30 @@
|
|||
%% Zone name
|
||||
zone :: atom(),
|
||||
%% Listener Type and Name
|
||||
listener :: {Type::atom(), Name::atom()}
|
||||
}).
|
||||
listener :: {Type::atom(), Name::atom()},
|
||||
|
||||
%% Limiter
|
||||
limiter :: maybe(limiter()),
|
||||
|
||||
%% cache operation when overload
|
||||
limiter_cache :: queue:queue(cache()),
|
||||
|
||||
%% limiter timers
|
||||
limiter_timer :: undefined | reference()
|
||||
}).
|
||||
|
||||
-record(retry, { types :: list(limiter_type())
|
||||
, data :: any()
|
||||
, next :: check_succ_handler()
|
||||
}).
|
||||
|
||||
-record(cache, { need :: list({pos_integer(), limiter_type()})
|
||||
, data :: any()
|
||||
, next :: check_succ_handler()
|
||||
}).
|
||||
|
||||
-type(state() :: #state{}).
|
||||
-type cache() :: #cache{}.
|
||||
|
||||
-define(ACTIVE_N, 100).
|
||||
-define(INFO_KEYS, [socktype, peername, sockname, sockstate]).
|
||||
|
@ -127,6 +141,11 @@
|
|||
-define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]).
|
||||
-define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]).
|
||||
|
||||
%% use macro to do compile time limiter's type check
|
||||
-define(LIMITER_BYTES_IN, bytes_in).
|
||||
-define(LIMITER_MESSAGE_IN, message_in).
|
||||
-define(EMPTY_QUEUE, {[], []}).
|
||||
|
||||
-dialyzer({no_match, [info/2]}).
|
||||
-dialyzer({nowarn_function, [ init/4
|
||||
, init_state/3
|
||||
|
@ -170,10 +189,10 @@ info(sockstate, #state{sockstate = SockSt}) ->
|
|||
SockSt;
|
||||
info(stats_timer, #state{stats_timer = StatsTimer}) ->
|
||||
StatsTimer;
|
||||
info(limit_timer, #state{limit_timer = LimitTimer}) ->
|
||||
LimitTimer;
|
||||
info(limiter, #state{limiter = Limiter}) ->
|
||||
maybe_apply(fun emqx_limiter:info/1, Limiter).
|
||||
Limiter;
|
||||
info(limiter_timer, #state{limiter_timer = Timer}) ->
|
||||
Timer.
|
||||
|
||||
%% @doc Get stats of the connection/channel.
|
||||
-spec(stats(pid() | state()) -> emqx_types:stats()).
|
||||
|
@ -244,7 +263,8 @@ init(Parent, Transport, RawSocket, Options) ->
|
|||
exit_on_sock_error(Reason)
|
||||
end.
|
||||
|
||||
init_state(Transport, Socket, #{zone := Zone, listener := Listener} = Opts) ->
|
||||
init_state(Transport, Socket,
|
||||
#{zone := Zone, limiter := LimiterCfg, listener := Listener} = Opts) ->
|
||||
{ok, Peername} = Transport:ensure_ok_or_exit(peername, [Socket]),
|
||||
{ok, Sockname} = Transport:ensure_ok_or_exit(sockname, [Socket]),
|
||||
Peercert = Transport:ensure_ok_or_exit(peercert, [Socket]),
|
||||
|
@ -254,7 +274,10 @@ init_state(Transport, Socket, #{zone := Zone, listener := Listener} = Opts) ->
|
|||
peercert => Peercert,
|
||||
conn_mod => ?MODULE
|
||||
},
|
||||
Limiter = emqx_limiter:init(Zone, undefined, undefined, []),
|
||||
|
||||
LimiterTypes = [?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN],
|
||||
Limiter = emqx_limiter_container:get_limiter_by_names(LimiterTypes, LimiterCfg),
|
||||
|
||||
FrameOpts = #{
|
||||
strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]),
|
||||
max_size => emqx_config:get_zone_conf(Zone, [mqtt, max_packet_size])
|
||||
|
@ -286,7 +309,9 @@ init_state(Transport, Socket, #{zone := Zone, listener := Listener} = Opts) ->
|
|||
idle_timeout = IdleTimeout,
|
||||
idle_timer = IdleTimer,
|
||||
zone = Zone,
|
||||
listener = Listener
|
||||
listener = Listener,
|
||||
limiter_cache = queue:new(),
|
||||
limiter_timer = undefined
|
||||
}.
|
||||
|
||||
run_loop(Parent, State = #state{transport = Transport,
|
||||
|
@ -428,14 +453,23 @@ handle_msg({Inet, _Sock, Data}, State) when Inet == tcp; Inet == ssl ->
|
|||
Oct = iolist_size(Data),
|
||||
inc_counter(incoming_bytes, Oct),
|
||||
ok = emqx_metrics:inc('bytes.received', Oct),
|
||||
parse_incoming(Data, State);
|
||||
when_bytes_in(Oct, Data, State);
|
||||
|
||||
handle_msg({quic, Data, _Sock, _, _, _}, State) ->
|
||||
?SLOG(debug, #{msg => "RECV_data", data => Data, transport => quic}),
|
||||
Oct = iolist_size(Data),
|
||||
inc_counter(incoming_bytes, Oct),
|
||||
ok = emqx_metrics:inc('bytes.received', Oct),
|
||||
parse_incoming(Data, State);
|
||||
when_bytes_in(Oct, Data, State);
|
||||
|
||||
handle_msg(check_cache, #state{limiter_cache = Cache} = State) ->
|
||||
case queue:peek(Cache) of
|
||||
empty ->
|
||||
activate_socket(State);
|
||||
{value, #cache{need = Needs, data = Data, next = Next}} ->
|
||||
State2 = State#state{limiter_cache = queue:drop(Cache)},
|
||||
check_limiter(Needs, Data, Next, [check_cache], State2)
|
||||
end;
|
||||
|
||||
handle_msg({incoming, Packet = ?CONNECT_PACKET(ConnPkt)},
|
||||
State = #state{idle_timer = IdleTimer}) ->
|
||||
|
@ -466,14 +500,12 @@ handle_msg({Passive, _Sock}, State)
|
|||
Pubs = emqx_pd:reset_counter(incoming_pubs),
|
||||
Bytes = emqx_pd:reset_counter(incoming_bytes),
|
||||
InStats = #{cnt => Pubs, oct => Bytes},
|
||||
%% Ensure Rate Limit
|
||||
NState = ensure_rate_limit(InStats, State),
|
||||
%% Run GC and Check OOM
|
||||
NState1 = check_oom(run_gc(InStats, NState)),
|
||||
NState1 = check_oom(run_gc(InStats, State)),
|
||||
handle_info(activate_socket, NState1);
|
||||
|
||||
handle_msg(Deliver = {deliver, _Topic, _Msg}, #state{
|
||||
listener = {Type, Listener}} = State) ->
|
||||
handle_msg(Deliver = {deliver, _Topic, _Msg},
|
||||
#state{listener = {Type, Listener}} = State) ->
|
||||
ActiveN = get_active_n(Type, Listener),
|
||||
Delivers = [Deliver | emqx_misc:drain_deliver(ActiveN)],
|
||||
with_channel(handle_deliver, [Delivers], State);
|
||||
|
@ -579,10 +611,12 @@ handle_call(_From, info, State) ->
|
|||
handle_call(_From, stats, State) ->
|
||||
{reply, stats(State), State};
|
||||
|
||||
handle_call(_From, {ratelimit, Policy}, State = #state{channel = Channel}) ->
|
||||
Zone = emqx_channel:info(zone, Channel),
|
||||
Limiter = emqx_limiter:init(Zone, Policy),
|
||||
{reply, ok, State#state{limiter = Limiter}};
|
||||
handle_call(_From, {ratelimit, Changes}, State = #state{limiter = Limiter}) ->
|
||||
Fun = fun({Type, Bucket}, Acc) ->
|
||||
emqx_limiter_container:update_by_name(Type, Bucket, Acc)
|
||||
end,
|
||||
Limiter2 = lists:foldl(Fun, Limiter, Changes),
|
||||
{reply, ok, State#state{limiter = Limiter2}};
|
||||
|
||||
handle_call(_From, Req, State = #state{channel = Channel}) ->
|
||||
case emqx_channel:handle_call(Req, Channel) of
|
||||
|
@ -603,10 +637,7 @@ handle_timeout(_TRef, idle_timeout, State) ->
|
|||
shutdown(idle_timeout, State);
|
||||
|
||||
handle_timeout(_TRef, limit_timeout, State) ->
|
||||
NState = State#state{sockstate = idle,
|
||||
limit_timer = undefined
|
||||
},
|
||||
handle_info(activate_socket, NState);
|
||||
retry_limiter(State);
|
||||
|
||||
handle_timeout(_TRef, emit_stats, State = #state{channel = Channel, transport = Transport,
|
||||
socket = Socket}) ->
|
||||
|
@ -634,11 +665,23 @@ handle_timeout(TRef, Msg, State) ->
|
|||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Parse incoming data
|
||||
|
||||
-compile({inline, [parse_incoming/2]}).
|
||||
parse_incoming(Data, State) ->
|
||||
-compile({inline, [when_bytes_in/3]}).
|
||||
when_bytes_in(Oct, Data, State) ->
|
||||
{Packets, NState} = parse_incoming(Data, [], State),
|
||||
{ok, next_incoming_msgs(Packets), NState}.
|
||||
Len = erlang:length(Packets),
|
||||
check_limiter([{Oct, ?LIMITER_BYTES_IN}, {Len, ?LIMITER_MESSAGE_IN}],
|
||||
Packets,
|
||||
fun next_incoming_msgs/3,
|
||||
[],
|
||||
NState).
|
||||
|
||||
-compile({inline, [next_incoming_msgs/3]}).
|
||||
next_incoming_msgs([Packet], Msgs, State) ->
|
||||
{ok, [{incoming, Packet} | Msgs], State};
|
||||
next_incoming_msgs(Packets, Msgs, State) ->
|
||||
Fun = fun(Packet, Acc) -> [{incoming, Packet} | Acc] end,
|
||||
Msgs2 = lists:foldl(Fun, Msgs, Packets),
|
||||
{ok, Msgs2, State}.
|
||||
|
||||
parse_incoming(<<>>, Packets, State) ->
|
||||
{Packets, State};
|
||||
|
@ -668,12 +711,6 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
|
|||
{[{frame_error, Reason} | Packets], State}
|
||||
end.
|
||||
|
||||
-compile({inline, [next_incoming_msgs/1]}).
|
||||
next_incoming_msgs([Packet]) ->
|
||||
{incoming, Packet};
|
||||
next_incoming_msgs(Packets) ->
|
||||
[{incoming, Packet} || Packet <- lists:reverse(Packets)].
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Handle incoming packet
|
||||
|
||||
|
@ -810,20 +847,82 @@ handle_cast(Req, State) ->
|
|||
State.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Ensure rate limit
|
||||
%% rate limit
|
||||
|
||||
ensure_rate_limit(Stats, State = #state{limiter = Limiter}) ->
|
||||
case ?ENABLED(Limiter) andalso emqx_limiter:check(Stats, Limiter) of
|
||||
false -> State;
|
||||
{ok, Limiter1} ->
|
||||
State#state{limiter = Limiter1};
|
||||
{pause, Time, Limiter1} ->
|
||||
?SLOG(warning, #{msg => "pause_time_due_to_rate_limit", time_in_ms => Time}),
|
||||
TRef = start_timer(Time, limit_timeout),
|
||||
State#state{sockstate = blocked,
|
||||
limiter = Limiter1,
|
||||
limit_timer = TRef
|
||||
}
|
||||
-type limiter_type() :: emqx_limiter_container:limiter_type().
|
||||
-type limiter() :: emqx_limiter_container:limiter().
|
||||
-type check_succ_handler() ::
|
||||
fun((any(), list(any()), state()) -> _).
|
||||
|
||||
%% check limiters, if successed call WhenOk with Data and Msgs
|
||||
%% Data is the data to be processed
|
||||
%% Msgs include the next msg which after Data processed
|
||||
-spec check_limiter(list({pos_integer(), limiter_type()}),
|
||||
any(),
|
||||
check_succ_handler(),
|
||||
list(any()),
|
||||
state()) -> _.
|
||||
check_limiter(Needs,
|
||||
Data,
|
||||
WhenOk,
|
||||
Msgs,
|
||||
#state{limiter = Limiter,
|
||||
limiter_timer = LimiterTimer,
|
||||
limiter_cache = Cache} = State) when Limiter =/= undefined ->
|
||||
case LimiterTimer of
|
||||
undefined ->
|
||||
case emqx_limiter_container:check_list(Needs, Limiter) of
|
||||
{ok, Limiter2} ->
|
||||
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
|
||||
{pause, Time, Limiter2} ->
|
||||
?SLOG(warning, #{msg => "pause time dueto rate limit",
|
||||
needs => Needs,
|
||||
time_in_ms => Time}),
|
||||
|
||||
Retry = #retry{types = [Type || {_, Type} <- Needs],
|
||||
data = Data,
|
||||
next = WhenOk},
|
||||
|
||||
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
|
||||
|
||||
TRef = start_timer(Time, limit_timeout),
|
||||
|
||||
{ok, State#state{limiter = Limiter3,
|
||||
limiter_timer = TRef}};
|
||||
{drop, Limiter2} ->
|
||||
{ok, State#state{limiter = Limiter2}}
|
||||
end;
|
||||
_ ->
|
||||
%% if there has a retry timer, cache the operation and execute it after the retry is over
|
||||
%% TODO: maybe we need to set socket to passive if size of queue is very large
|
||||
%% because we queue up lots of ops that checks with the limiters.
|
||||
New = #cache{need = Needs, data = Data, next = WhenOk},
|
||||
{ok, State#state{limiter_cache = queue:in(New, Cache)}}
|
||||
end;
|
||||
|
||||
check_limiter(_, Data, WhenOk, Msgs, State) ->
|
||||
WhenOk(Data, Msgs, State).
|
||||
|
||||
%% try to perform a retry
|
||||
-spec retry_limiter(state()) -> _.
|
||||
retry_limiter(#state{limiter = Limiter} = State) ->
|
||||
#retry{types = Types, data = Data, next = Next} = emqx_limiter_container:get_retry_context(Limiter),
|
||||
case emqx_limiter_container:retry_list(Types, Limiter) of
|
||||
{ok, Limiter2} ->
|
||||
Next(Data,
|
||||
[check_cache],
|
||||
State#state{ limiter = Limiter2
|
||||
, limiter_timer = undefined
|
||||
});
|
||||
{pause, Time, Limiter2} ->
|
||||
?SLOG(warning, #{msg => "pause time dueto rate limit",
|
||||
types => Types,
|
||||
time_in_ms => Time}),
|
||||
|
||||
TRef = start_timer(Time, limit_timeout),
|
||||
|
||||
{ok, State#state{limiter = Limiter2,
|
||||
limiter_timer = TRef}}
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -852,19 +951,25 @@ check_oom(State = #state{channel = Channel}) ->
|
|||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Activate Socket
|
||||
|
||||
%% TODO: maybe we could keep socket passive for receiving socket closed event.
|
||||
-compile({inline, [activate_socket/1]}).
|
||||
activate_socket(State = #state{sockstate = closed}) ->
|
||||
{ok, State};
|
||||
activate_socket(State = #state{sockstate = blocked}) ->
|
||||
{ok, State};
|
||||
activate_socket(State = #state{transport = Transport, socket = Socket,
|
||||
listener = {Type, Listener}}) ->
|
||||
activate_socket(#state{limiter_timer = Timer} = State)
|
||||
when Timer =/= undefined ->
|
||||
{ok, State#state{sockstate = blocked}};
|
||||
|
||||
activate_socket(#state{transport = Transport,
|
||||
sockstate = SockState,
|
||||
socket = Socket,
|
||||
listener = {Type, Listener}} = State)
|
||||
when SockState =/= closed ->
|
||||
ActiveN = get_active_n(Type, Listener),
|
||||
case Transport:setopts(Socket, [{active, ActiveN}]) of
|
||||
ok -> {ok, State#state{sockstate = running}};
|
||||
Error -> Error
|
||||
end.
|
||||
end;
|
||||
|
||||
activate_socket(State) ->
|
||||
{ok, State}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Close Socket
|
||||
|
@ -943,6 +1048,6 @@ get_state(Pid) ->
|
|||
maps:from_list(lists:zip(record_info(fields, state),
|
||||
tl(tuple_to_list(State)))).
|
||||
|
||||
get_active_n(quic, _Listener) -> 100;
|
||||
get_active_n(quic, _Listener) -> ?ACTIVE_N;
|
||||
get_active_n(Type, Listener) ->
|
||||
emqx_config:get_listener_conf(Type, Listener, [tcp, active_n]).
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
##--------------------------------------------------------------------
|
||||
## Emq X Rate Limiter
|
||||
##--------------------------------------------------------------------
|
||||
emqx_limiter {
|
||||
bytes_in {
|
||||
global.rate = infinity # token generation rate
|
||||
zone.default.rate = infinity
|
||||
bucket.default {
|
||||
zone = default
|
||||
aggregated.rate = infinity
|
||||
aggregated.capacity = infinity
|
||||
per_client.rate = infinity
|
||||
per_client.capacity = infinity
|
||||
}
|
||||
}
|
||||
|
||||
message_in {
|
||||
global.rate = infinity
|
||||
zone.default.rate = infinity
|
||||
bucket.default {
|
||||
zone = default
|
||||
aggregated.rate = infinity
|
||||
aggregated.capacity = infinity
|
||||
per_client.rate = infinity
|
||||
per_client.capacity = infinity
|
||||
}
|
||||
}
|
||||
|
||||
connection {
|
||||
global.rate = infinity
|
||||
zone.default.rate = infinity
|
||||
bucket.default {
|
||||
zone = default
|
||||
aggregated.rate = infinity
|
||||
aggregated.capacity = infinity
|
||||
per_client.rate = infinity
|
||||
per_client.capacity = infinity
|
||||
}
|
||||
}
|
||||
|
||||
message_routing {
|
||||
global.rate = infinity
|
||||
zone.default.rate = infinity
|
||||
bucket.default {
|
||||
zone = default
|
||||
aggregated.rate = infinity
|
||||
aggregated.capacity = infinity
|
||||
per_client.rate = infinity
|
||||
per_client.capacity = infinity
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,358 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_htb_limiter).
|
||||
|
||||
%% @doc the limiter of the hierarchical token limiter system
|
||||
%% this module provides api for creating limiters, consume tokens, check tokens and retry
|
||||
%% @end
|
||||
|
||||
%% API
|
||||
-export([ make_token_bucket_limiter/2, make_ref_limiter/2, check/2
|
||||
, consume/2, set_retry/2, retry/1, make_infinity_limiter/1
|
||||
, make_future/1, available/1
|
||||
]).
|
||||
-export_type([token_bucket_limiter/0]).
|
||||
|
||||
%% a token bucket limiter with a limiter server's bucket reference
|
||||
-type token_bucket_limiter() :: #{ tokens := non_neg_integer() %% the number of tokens currently available
|
||||
, rate := decimal()
|
||||
, capacity := decimal()
|
||||
, lasttime := millisecond()
|
||||
, max_retry_time := non_neg_integer() %% @see emqx_limiter_schema
|
||||
, failure_strategy := failure_strategy() %% @see emqx_limiter_schema
|
||||
, divisible := boolean() %% @see emqx_limiter_schema
|
||||
, low_water_mark := non_neg_integer() %% @see emqx_limiter_schema
|
||||
, bucket := bucket() %% the limiter server's bucket
|
||||
|
||||
%% retry contenxt
|
||||
, retry_ctx => undefined %% undefined meaning there is no retry context or no need to retry
|
||||
| retry_context(token_bucket_limiter()) %% the retry context
|
||||
, atom => any() %% allow to add other keys
|
||||
}.
|
||||
|
||||
%% a limiter server's bucket reference
|
||||
-type ref_limiter() :: #{ max_retry_time := non_neg_integer()
|
||||
, failure_strategy := failure_strategy()
|
||||
, divisible := boolean()
|
||||
, low_water_mark := non_neg_integer()
|
||||
, bucket := bucket()
|
||||
|
||||
, retry_ctx => undefined | retry_context(ref_limiter())
|
||||
, atom => any() %% allow to add other keys
|
||||
}.
|
||||
|
||||
-type retry_fun(Limiter) :: fun((pos_integer(), Limiter) -> inner_check_result(Limiter)).
|
||||
-type acquire_type(Limiter) :: integer() | retry_context(Limiter).
|
||||
-type retry_context(Limiter) :: #{ continuation := undefined | retry_fun(Limiter)
|
||||
, diff := non_neg_integer() %% how many tokens are left to obtain
|
||||
|
||||
, need => pos_integer()
|
||||
, start => millisecond()
|
||||
}.
|
||||
|
||||
-type bucket() :: emqx_limiter_bucket_ref:bucket_ref().
|
||||
-type limiter() :: token_bucket_limiter() | ref_limiter() | infinity.
|
||||
-type millisecond() :: non_neg_integer().
|
||||
|
||||
-type pause_type() :: pause | partial.
|
||||
-type check_result_ok(Limiter) :: {ok, Limiter}.
|
||||
-type check_result_pause(Limiter) :: {pause_type(), millisecond(), retry_context(Limiter), Limiter}.
|
||||
-type result_drop(Limiter) :: {drop, Limiter}.
|
||||
|
||||
-type check_result(Limiter) :: check_result_ok(Limiter)
|
||||
| check_result_pause(Limiter)
|
||||
| result_drop(Limiter).
|
||||
|
||||
-type inner_check_result(Limiter) :: check_result_ok(Limiter)
|
||||
| check_result_pause(Limiter).
|
||||
|
||||
-type consume_result(Limiter) :: check_result_ok(Limiter)
|
||||
| result_drop(Limiter).
|
||||
|
||||
-type decimal() :: emqx_limiter_decimal:decimal().
|
||||
-type failure_strategy() :: emqx_limiter_schema:failure_strategy().
|
||||
|
||||
-type limiter_bucket_cfg() :: #{ rate := decimal()
|
||||
, initial := non_neg_integer()
|
||||
, low_water_mark := non_neg_integer()
|
||||
, capacity := decimal()
|
||||
, divisible := boolean()
|
||||
, max_retry_time := non_neg_integer()
|
||||
, failure_strategy := failure_strategy()
|
||||
}.
|
||||
|
||||
-type future() :: pos_integer().
|
||||
|
||||
-define(NOW, erlang:monotonic_time(millisecond)).
|
||||
-define(MINIMUM_PAUSE, 50).
|
||||
-define(MAXIMUM_PAUSE, 5000).
|
||||
|
||||
-import(emqx_limiter_decimal, [sub/2, mul/2, floor_div/2, add/2]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
%%--------------------------------------------------------------------
|
||||
%%@doc create a limiter
|
||||
-spec make_token_bucket_limiter(limiter_bucket_cfg(), bucket()) -> _.
|
||||
make_token_bucket_limiter(Cfg, Bucket) ->
|
||||
Cfg#{ tokens => emqx_limiter_server:get_initial_val(Cfg)
|
||||
, lasttime => ?NOW
|
||||
, bucket => Bucket
|
||||
}.
|
||||
|
||||
%%@doc create a limiter server's reference
|
||||
-spec make_ref_limiter(limiter_bucket_cfg(), bucket()) -> ref_limiter().
|
||||
make_ref_limiter(Cfg, Bucket) when Bucket =/= infinity ->
|
||||
Cfg#{bucket => Bucket}.
|
||||
|
||||
-spec make_infinity_limiter(limiter_bucket_cfg()) -> infinity.
|
||||
make_infinity_limiter(_) ->
|
||||
infinity.
|
||||
|
||||
%% @doc request some tokens
|
||||
%% it will automatically retry when failed until the maximum retry time is reached
|
||||
%% @end
|
||||
-spec consume(integer(), Limiter) -> consume_result(Limiter)
|
||||
when Limiter :: limiter().
|
||||
consume(Need, #{max_retry_time := RetryTime} = Limiter) when Need > 0 ->
|
||||
try_consume(RetryTime, Need, Limiter);
|
||||
|
||||
consume(_, Limiter) ->
|
||||
{ok, Limiter}.
|
||||
|
||||
%% @doc try to request the token and return the result without automatically retrying
|
||||
-spec check(acquire_type(Limiter), Limiter) -> check_result(Limiter)
|
||||
when Limiter :: limiter().
|
||||
check(_, infinity) ->
|
||||
{ok, infinity};
|
||||
|
||||
check(Need, Limiter) when is_integer(Need), Need > 0 ->
|
||||
case do_check(Need, Limiter) of
|
||||
{ok, _} = Done ->
|
||||
Done;
|
||||
{PauseType, Pause, Ctx, Limiter2} ->
|
||||
{PauseType,
|
||||
Pause,
|
||||
Ctx#{start => ?NOW, need => Need}, Limiter2}
|
||||
end;
|
||||
|
||||
%% check with retry context.
|
||||
%% when continuation = undefined, the diff will be 0
|
||||
%% so there is no need to check continuation here
|
||||
check(#{continuation := Cont,
|
||||
diff := Diff,
|
||||
start := Start} = Retry,
|
||||
#{failure_strategy := Failure,
|
||||
max_retry_time := RetryTime} = Limiter) when Diff > 0 ->
|
||||
case Cont(Diff, Limiter) of
|
||||
{ok, _} = Done ->
|
||||
Done;
|
||||
{PauseType, Pause, Ctx, Limiter2} ->
|
||||
IsFailed = ?NOW - Start >= RetryTime,
|
||||
Retry2 = maps:merge(Retry, Ctx),
|
||||
case IsFailed of
|
||||
false ->
|
||||
{PauseType, Pause, Retry2, Limiter2};
|
||||
_ ->
|
||||
on_failure(Failure, try_restore(Retry2, Limiter2))
|
||||
end
|
||||
end;
|
||||
|
||||
check(_, Limiter) ->
|
||||
{ok, Limiter}.
|
||||
|
||||
%% @doc pack the retry context into the limiter data
|
||||
-spec set_retry(retry_context(Limiter), Limiter) -> Limiter
|
||||
when Limiter :: limiter().
|
||||
set_retry(Retry, Limiter) ->
|
||||
Limiter#{retry_ctx => Retry}.
|
||||
|
||||
%% @doc check if there is a retry context, and try again if there is
|
||||
-spec retry(Limiter) -> check_result(Limiter) when Limiter :: limiter().
|
||||
retry(#{retry_ctx := Retry} = Limiter) when is_map(Retry) ->
|
||||
check(Retry, Limiter#{retry_ctx := undefined});
|
||||
|
||||
retry(Limiter) ->
|
||||
{ok, Limiter}.
|
||||
|
||||
%% @doc make a future value
|
||||
%% this similar to retry context, but represents a value that will be checked in the future
|
||||
%% @end
|
||||
-spec make_future(pos_integer()) -> future().
|
||||
make_future(Need) ->
|
||||
Need.
|
||||
|
||||
%% @doc get the number of tokens currently available
|
||||
-spec available(limiter()) -> decimal().
|
||||
available(#{tokens := Tokens,
|
||||
rate := Rate,
|
||||
lasttime := LastTime,
|
||||
capacity := Capacity,
|
||||
bucket := Bucket}) ->
|
||||
Tokens2 = apply_elapsed_time(Rate, ?NOW - LastTime, Tokens, Capacity),
|
||||
erlang:min(Tokens2, emqx_limiter_bucket_ref:available(Bucket));
|
||||
|
||||
available(#{bucket := Bucket}) ->
|
||||
emqx_limiter_bucket_ref:available(Bucket);
|
||||
|
||||
available(infinity) ->
|
||||
infinity.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
-spec try_consume(millisecond(),
|
||||
acquire_type(Limiter),
|
||||
Limiter) -> consume_result(Limiter) when Limiter :: limiter().
|
||||
try_consume(LeftTime, Retry, #{failure_strategy := Failure} = Limiter)
|
||||
when LeftTime =< 0, is_map(Retry) ->
|
||||
on_failure(Failure, try_restore(Retry, Limiter));
|
||||
|
||||
try_consume(LeftTime, Need, Limiter) when is_integer(Need) ->
|
||||
case do_check(Need, Limiter) of
|
||||
{ok, _} = Done ->
|
||||
Done;
|
||||
{_, Pause, Ctx, Limiter2} ->
|
||||
timer:sleep(erlang:min(LeftTime, Pause)),
|
||||
try_consume(LeftTime - Pause, Ctx#{need => Need}, Limiter2)
|
||||
end;
|
||||
|
||||
try_consume(LeftTime,
|
||||
#{continuation := Cont,
|
||||
diff := Diff} = Retry, Limiter) when Diff > 0 ->
|
||||
case Cont(Diff, Limiter) of
|
||||
{ok, _} = Done ->
|
||||
Done;
|
||||
{_, Pause, Ctx, Limiter2} ->
|
||||
timer:sleep(erlang:min(LeftTime, Pause)),
|
||||
try_consume(LeftTime - Pause, maps:merge(Retry, Ctx), Limiter2)
|
||||
end;
|
||||
|
||||
try_consume(_, _, Limiter) ->
|
||||
{ok, Limiter}.
|
||||
|
||||
-spec do_check(acquire_type(Limiter), Limiter) -> inner_check_result(Limiter)
|
||||
when Limiter :: limiter().
|
||||
do_check(Need, #{tokens := Tokens} = Limiter) ->
|
||||
if Need =< Tokens ->
|
||||
do_check_with_parent_limiter(Need, Limiter);
|
||||
true ->
|
||||
do_reset(Need, Limiter)
|
||||
end;
|
||||
|
||||
do_check(Need, #{divisible := Divisible,
|
||||
bucket := Bucket} = Ref) ->
|
||||
case emqx_limiter_bucket_ref:check(Need, Bucket, Divisible) of
|
||||
{ok, Tokens} ->
|
||||
may_return_or_pause(Tokens, Ref);
|
||||
{PauseType, Rate, Obtained} ->
|
||||
return_pause(Rate,
|
||||
PauseType,
|
||||
fun ?FUNCTION_NAME/2, Need - Obtained, Ref)
|
||||
end.
|
||||
|
||||
on_failure(force, Limiter) ->
|
||||
{ok, Limiter};
|
||||
|
||||
on_failure(drop, Limiter) ->
|
||||
{drop, Limiter};
|
||||
|
||||
on_failure(throw, Limiter) ->
|
||||
Message = io_lib:format("limiter consume failed, limiter:~p~n", [Limiter]),
|
||||
erlang:throw({rate_check_fail, Message}).
|
||||
|
||||
-spec do_check_with_parent_limiter(pos_integer(), token_bucket_limiter()) -> inner_check_result(token_bucket_limiter()).
|
||||
do_check_with_parent_limiter(Need,
|
||||
#{tokens := Tokens,
|
||||
divisible := Divisible,
|
||||
bucket := Bucket} = Limiter) ->
|
||||
case emqx_limiter_bucket_ref:check(Need, Bucket, Divisible) of
|
||||
{ok, RefLeft} ->
|
||||
Left = sub(Tokens, Need),
|
||||
may_return_or_pause(erlang:min(RefLeft, Left), Limiter#{tokens := Left});
|
||||
{PauseType, Rate, Obtained} ->
|
||||
return_pause(Rate,
|
||||
PauseType,
|
||||
fun ?FUNCTION_NAME/2,
|
||||
Need - Obtained,
|
||||
Limiter#{tokens := sub(Tokens, Obtained)})
|
||||
end.
|
||||
|
||||
-spec do_reset(pos_integer(), token_bucket_limiter()) -> inner_check_result(token_bucket_limiter()).
|
||||
do_reset(Need,
|
||||
#{tokens := Tokens,
|
||||
rate := Rate,
|
||||
lasttime := LastTime,
|
||||
divisible := Divisible,
|
||||
capacity := Capacity} = Limiter) ->
|
||||
Now = ?NOW,
|
||||
Tokens2 = apply_elapsed_time(Rate, Now - LastTime, Tokens, Capacity),
|
||||
if Tokens2 >= Need ->
|
||||
Limiter2 = Limiter#{tokens := Tokens2, lasttime := Now},
|
||||
do_check_with_parent_limiter(Need, Limiter2);
|
||||
Divisible andalso Tokens2 > 0 ->
|
||||
%% must be allocated here, because may be Need > Capacity
|
||||
return_pause(Rate,
|
||||
partial,
|
||||
fun do_reset/2,
|
||||
Need - Tokens2,
|
||||
Limiter#{tokens := 0, lasttime := Now});
|
||||
true ->
|
||||
return_pause(Rate, pause, fun do_reset/2, Need, Limiter)
|
||||
end.
|
||||
|
||||
-spec return_pause(decimal(), pause_type(), retry_fun(Limiter), pos_integer(), Limiter)
|
||||
-> check_result_pause(Limiter) when Limiter :: limiter().
|
||||
return_pause(infinity, PauseType, Fun, Diff, Limiter) ->
|
||||
%% workaround when emqx_limiter_server's rate is infinity
|
||||
{PauseType, ?MINIMUM_PAUSE, make_retry_context(Fun, Diff), Limiter};
|
||||
|
||||
return_pause(Rate, PauseType, Fun, Diff, Limiter) ->
|
||||
Val = erlang:round(Diff * emqx_limiter_schema:minimum_period() / Rate),
|
||||
Pause = emqx_misc:clamp(Val, ?MINIMUM_PAUSE, ?MAXIMUM_PAUSE),
|
||||
{PauseType, Pause, make_retry_context(Fun, Diff), Limiter}.
|
||||
|
||||
-spec make_retry_context(undefined | retry_fun(Limiter), non_neg_integer()) -> retry_context(Limiter)
|
||||
when Limiter :: limiter().
|
||||
make_retry_context(Fun, Diff) ->
|
||||
#{continuation => Fun, diff => Diff}.
|
||||
|
||||
-spec try_restore(retry_context(Limiter), Limiter) -> Limiter
|
||||
when Limiter :: limiter().
|
||||
try_restore(#{need := Need, diff := Diff},
|
||||
#{tokens := Tokens, capcacity := Capacity, bucket := Bucket} = Limiter) ->
|
||||
Back = Need - Diff,
|
||||
Tokens2 = erlang:min(Capacity, Back + Tokens),
|
||||
emqx_limiter_bucket_ref:try_restore(Back, Bucket),
|
||||
Limiter#{tokens := Tokens2};
|
||||
|
||||
try_restore(#{need := Need, diff := Diff}, #{bucket := Bucket} = Limiter) ->
|
||||
emqx_limiter_bucket_ref:try_restore(Need - Diff, Bucket),
|
||||
Limiter.
|
||||
|
||||
-spec may_return_or_pause(non_neg_integer(), Limiter) -> check_result(Limiter)
|
||||
when Limiter :: limiter().
|
||||
may_return_or_pause(Left, #{low_water_mark := Mark} = Limiter) when Left >= Mark ->
|
||||
{ok, Limiter};
|
||||
|
||||
may_return_or_pause(_, Limiter) ->
|
||||
{pause, ?MINIMUM_PAUSE, make_retry_context(undefined, 0), Limiter}.
|
||||
|
||||
%% @doc apply the elapsed time to the limiter
|
||||
apply_elapsed_time(Rate, Elapsed, Tokens, Capacity) ->
|
||||
Inc = floor_div(mul(Elapsed, Rate), emqx_limiter_schema:minimum_period()),
|
||||
erlang:min(add(Tokens, Inc), Capacity).
|
|
@ -9,7 +9,5 @@
|
|||
{env, []},
|
||||
{licenses, ["Apache-2.0"]},
|
||||
{maintainers, ["EMQ X Team <contact@emqx.io>"]},
|
||||
{links, [{"Homepage", "https://emqx.io/"},
|
||||
{"Github", "https://github.com/emqx/emqx-retainer"}
|
||||
]}
|
||||
{links, []}
|
||||
]}.
|
|
@ -1,5 +1,5 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
|
@ -0,0 +1,102 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_limiter_bucket_ref).
|
||||
|
||||
%% @doc limiter bucket reference
|
||||
%% this module is used to manage the bucket reference of the limiter server
|
||||
%% @end
|
||||
|
||||
%% API
|
||||
-export([ new/3, check/3, try_restore/2
|
||||
, available/1]).
|
||||
|
||||
-export_type([bucket_ref/0]).
|
||||
|
||||
-type infinity_bucket_ref() :: infinity.
|
||||
-type finite_bucket_ref() :: #{ counter := counters:counters_ref()
|
||||
, index := index()
|
||||
, rate := rate()}.
|
||||
|
||||
-type bucket_ref() :: infinity_bucket_ref()
|
||||
| finite_bucket_ref().
|
||||
|
||||
-type index() :: emqx_limiter_server:index().
|
||||
-type rate() :: emqx_limiter_decimal:decimal().
|
||||
-type check_failure_type() :: partial | pause.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
%%--------------------------------------------------------------------
|
||||
-spec new(undefined | counters:countres_ref(),
|
||||
undefined | index(),
|
||||
rate()) -> bucket_ref().
|
||||
new(undefined, _, _) ->
|
||||
infinity;
|
||||
|
||||
new(Counter, Index, Rate) ->
|
||||
#{counter => Counter,
|
||||
index => Index,
|
||||
rate => Rate}.
|
||||
|
||||
%% @doc check tokens
|
||||
-spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) ->
|
||||
HasToken :: {ok, emqx_limiter_decimal:decimal()}
|
||||
| {check_failure_type(), rate(), pos_integer()}.
|
||||
check(_, infinity, _) ->
|
||||
{ok, infinity};
|
||||
|
||||
check(Need,
|
||||
#{counter := Counter,
|
||||
index := Index,
|
||||
rate := Rate},
|
||||
Divisible)->
|
||||
RefToken = counters:get(Counter, Index),
|
||||
if RefToken >= Need ->
|
||||
counters:sub(Counter, Index, Need),
|
||||
{ok, RefToken - Need};
|
||||
Divisible andalso RefToken > 0 ->
|
||||
counters:sub(Counter, Index, RefToken),
|
||||
{partial, Rate, RefToken};
|
||||
true ->
|
||||
{pause, Rate, 0}
|
||||
end.
|
||||
|
||||
%% @doc try to restore token when consume failed
|
||||
-spec try_restore(non_neg_integer(), bucket_ref()) -> ok.
|
||||
try_restore(0, _) ->
|
||||
ok;
|
||||
try_restore(_, infinity) ->
|
||||
ok;
|
||||
try_restore(Inc, #{counter := Counter, index := Index}) ->
|
||||
case counters:get(Counter, Index) of
|
||||
Tokens when Tokens < 0 ->
|
||||
counters:add(Counter, Index, Inc);
|
||||
_ ->
|
||||
ok
|
||||
end.
|
||||
|
||||
%% @doc get the number of tokens currently available
|
||||
-spec available(bucket_ref()) -> emqx_limiter_decimal:decimal().
|
||||
available(#{counter := Counter, index := Index}) ->
|
||||
counters:get(Counter, Index);
|
||||
|
||||
available(infinity) ->
|
||||
infinity.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
|
@ -0,0 +1,157 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_limiter_container).
|
||||
|
||||
%% @doc the container of emqx_htb_limiter
|
||||
%% used to merge limiters of different type of limiters to simplify operations
|
||||
%% @end
|
||||
|
||||
%% API
|
||||
-export([ new/0, new/1, get_limiter_by_names/2
|
||||
, add_new/3, update_by_name/3, set_retry_context/2
|
||||
, check/3, retry/2, get_retry_context/1
|
||||
, check_list/2, retry_list/2
|
||||
]).
|
||||
|
||||
-export_type([container/0, check_result/0]).
|
||||
|
||||
-type container() :: #{ limiter_type() => undefined | limiter()
|
||||
, retry_key() => undefined | retry_context() | future() %% the retry context of the limiter
|
||||
, retry_ctx := undefined | any() %% the retry context of the container
|
||||
}.
|
||||
|
||||
-type future() :: pos_integer().
|
||||
-type limiter_type() :: emqx_limiter_schema:limiter_type().
|
||||
-type limiter() :: emqx_htb_limiter:limiter().
|
||||
-type retry_context() :: emqx_htb_limiter:retry_context().
|
||||
-type bucket_name() :: emqx_limiter_schema:bucket_name().
|
||||
-type millisecond() :: non_neg_integer().
|
||||
-type check_result() :: {ok, container()}
|
||||
| {drop, container()}
|
||||
| {pause, millisecond(), container()}.
|
||||
|
||||
-define(RETRY_KEY(Type), {retry, Type}).
|
||||
-type retry_key() :: ?RETRY_KEY(limiter_type()).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
%%--------------------------------------------------------------------
|
||||
-spec new() -> container().
|
||||
new() ->
|
||||
new([]).
|
||||
|
||||
%% @doc generate default data according to the type of limiter
|
||||
-spec new(list(limiter_type())) -> container().
|
||||
new(Types) ->
|
||||
get_limiter_by_names(Types, #{}).
|
||||
|
||||
%% @doc generate a container
|
||||
%% according to the type of limiter and the bucket name configuration of the limiter
|
||||
%% @end
|
||||
-spec get_limiter_by_names(list(limiter_type()), #{limiter_type() => emqx_limiter_schema:bucket_name()}) -> container().
|
||||
get_limiter_by_names(Types, BucketNames) ->
|
||||
Init = fun(Type, Acc) ->
|
||||
Limiter = emqx_limiter_server:connect(Type, BucketNames),
|
||||
add_new(Type, Limiter, Acc)
|
||||
end,
|
||||
lists:foldl(Init, #{retry_ctx => undefined}, Types).
|
||||
|
||||
%% @doc add the specified type of limiter to the container
|
||||
-spec update_by_name(limiter_type(),
|
||||
bucket_name() | #{limiter_type() => bucket_name()},
|
||||
container()) -> container().
|
||||
update_by_name(Type, Buckets, Container) ->
|
||||
Limiter = emqx_limiter_server:connect(Type, Buckets),
|
||||
add_new(Type, Limiter, Container).
|
||||
|
||||
-spec add_new(limiter_type(), limiter(), container()) -> container().
|
||||
add_new(Type, Limiter, Container) ->
|
||||
Container#{ Type => Limiter
|
||||
, ?RETRY_KEY(Type) => undefined
|
||||
}.
|
||||
|
||||
%% @doc check the specified limiter
|
||||
-spec check(pos_integer(), limiter_type(), container()) -> check_result().
|
||||
check(Need, Type, Container) ->
|
||||
check_list([{Need, Type}], Container).
|
||||
|
||||
%% @doc check multiple limiters
|
||||
-spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result().
|
||||
check_list([{Need, Type} | T], Container) ->
|
||||
Limiter = maps:get(Type, Container),
|
||||
case emqx_htb_limiter:check(Need, Limiter) of
|
||||
{ok, Limiter2} ->
|
||||
check_list(T, Container#{Type := Limiter2});
|
||||
{_, PauseMs, Ctx, Limiter2} ->
|
||||
Fun = fun({FN, FT}, Acc) ->
|
||||
Future = emqx_htb_limiter:make_future(FN),
|
||||
Acc#{?RETRY_KEY(FT) := Future}
|
||||
end,
|
||||
C2 = lists:foldl(Fun,
|
||||
Container#{Type := Limiter2,
|
||||
?RETRY_KEY(Type) := Ctx},
|
||||
T),
|
||||
{pause, PauseMs, C2};
|
||||
{drop, Limiter2} ->
|
||||
{drop, Container#{Type := Limiter2}}
|
||||
end;
|
||||
|
||||
check_list([], Container) ->
|
||||
{ok, Container}.
|
||||
|
||||
%% @doc retry the specified limiter
|
||||
-spec retry(limiter_type(), container()) -> check_result().
|
||||
retry(Type, Container) ->
|
||||
retry_list([Type], Container).
|
||||
|
||||
%% @doc retry multiple limiters
|
||||
-spec retry_list(list(limiter_type()), container()) -> check_result().
|
||||
retry_list([Type | T], Container) ->
|
||||
Key = ?RETRY_KEY(Type),
|
||||
case Container of
|
||||
#{Type := Limiter,
|
||||
Key := Retry} when Retry =/= undefined ->
|
||||
case emqx_htb_limiter:check(Retry, Limiter) of
|
||||
{ok, Limiter2} ->
|
||||
%% undefined meaning there is no retry context or there is no need to retry
|
||||
%% when a limiter has a undefined retry context, the check will always success
|
||||
retry_list(T, Container#{Type := Limiter2, Key := undefined});
|
||||
{_, PauseMs, Ctx, Limiter2} ->
|
||||
{pause,
|
||||
PauseMs,
|
||||
Container#{Type := Limiter2, Key := Ctx}};
|
||||
{drop, Limiter2} ->
|
||||
{drop, Container#{Type := Limiter2}}
|
||||
end;
|
||||
_ ->
|
||||
retry_list(T, Container)
|
||||
end;
|
||||
|
||||
retry_list([], Container) ->
|
||||
{ok, Container}.
|
||||
|
||||
-spec set_retry_context(any(), container()) -> container().
|
||||
set_retry_context(Data, Container) ->
|
||||
Container#{retry_ctx := Data}.
|
||||
|
||||
-spec get_retry_context(container()) -> any().
|
||||
get_retry_context(#{retry_ctx := Data}) ->
|
||||
Data.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
%% API
|
||||
-export([ add/2, sub/2, mul/2
|
||||
, add_to_counter/3, put_to_counter/3]).
|
||||
, add_to_counter/3, put_to_counter/3, floor_div/2]).
|
||||
-export_type([decimal/0, zero_or_float/0]).
|
||||
|
||||
-type decimal() :: infinity | number().
|
||||
|
@ -53,6 +53,13 @@ mul(A, B) when A =:= infinity
|
|||
mul(A, B) ->
|
||||
A * B.
|
||||
|
||||
-spec floor_div(decimal(), number()) -> decimal().
|
||||
floor_div(infinity, _) ->
|
||||
infinity;
|
||||
|
||||
floor_div(A, B) ->
|
||||
erlang:floor(A / B).
|
||||
|
||||
-spec add_to_counter(counters:counters_ref(), pos_integer(), decimal()) ->
|
||||
{zero_or_float(), zero_or_float()}.
|
||||
add_to_counter(_, _, infinity) ->
|
|
@ -22,29 +22,27 @@
|
|||
-include_lib("stdlib/include/ms_transform.hrl").
|
||||
|
||||
%% API
|
||||
-export([ start_link/0, start_server/1, find_counter/1
|
||||
, find_counter/3, insert_counter/4, insert_counter/6
|
||||
-export([ start_link/0, start_server/1, find_bucket/1
|
||||
, find_bucket/3, insert_bucket/2, insert_bucket/4
|
||||
, make_path/3, restart_server/1]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
|
||||
terminate/2, code_change/3, format_status/2]).
|
||||
|
||||
-export_type([path/0]).
|
||||
|
||||
-type path() :: list(atom()).
|
||||
-type limiter_type() :: emqx_limiter_schema:limiter_type().
|
||||
-type zone_name() :: emqx_limiter_schema:zone_name().
|
||||
-type bucket_name() :: emqx_limiter_schema:bucket_name().
|
||||
|
||||
%% counter record in ets table
|
||||
-record(element, {path :: path(),
|
||||
counter :: counters:counters_ref(),
|
||||
index :: index(),
|
||||
rate :: rate()
|
||||
}).
|
||||
-record(bucket, { path :: path()
|
||||
, bucket :: bucket_ref()
|
||||
}).
|
||||
|
||||
|
||||
-type index() :: emqx_limiter_server:index().
|
||||
-type rate() :: emqx_limiter_decimal:decimal().
|
||||
-type bucket_ref() :: emqx_limiter_bucket_ref:bucket_ref().
|
||||
|
||||
-define(TAB, emqx_limiter_counters).
|
||||
|
||||
|
@ -59,43 +57,32 @@ start_server(Type) ->
|
|||
restart_server(Type) ->
|
||||
emqx_limiter_server_sup:restart(Type).
|
||||
|
||||
-spec find_counter(limiter_type(), zone_name(), bucket_name()) ->
|
||||
{ok, counters:counters_ref(), index(), rate()} | undefined.
|
||||
find_counter(Type, Zone, BucketId) ->
|
||||
find_counter(make_path(Type, Zone, BucketId)).
|
||||
-spec find_bucket(limiter_type(), zone_name(), bucket_name()) ->
|
||||
{ok, bucket_ref()} | undefined.
|
||||
find_bucket(Type, Zone, BucketId) ->
|
||||
find_bucket(make_path(Type, Zone, BucketId)).
|
||||
|
||||
-spec find_counter(path()) ->
|
||||
{ok, counters:counters_ref(), index(), rate()} | undefined.
|
||||
find_counter(Path) ->
|
||||
-spec find_bucket(path()) -> {ok, bucket_ref()} | undefined.
|
||||
find_bucket(Path) ->
|
||||
case ets:lookup(?TAB, Path) of
|
||||
[#element{counter = Counter, index = Index, rate = Rate}] ->
|
||||
{ok, Counter, Index, Rate};
|
||||
[#bucket{bucket = Bucket}] ->
|
||||
{ok, Bucket};
|
||||
_ ->
|
||||
undefined
|
||||
end.
|
||||
|
||||
-spec insert_counter(limiter_type(),
|
||||
zone_name(),
|
||||
bucket_name(),
|
||||
counters:counters_ref(),
|
||||
index(),
|
||||
rate()) -> boolean().
|
||||
insert_counter(Type, Zone, BucketId, Counter, Index, Rate) ->
|
||||
insert_counter(make_path(Type, Zone, BucketId),
|
||||
Counter,
|
||||
Index,
|
||||
Rate).
|
||||
-spec insert_bucket(limiter_type(),
|
||||
zone_name(),
|
||||
bucket_name(),
|
||||
bucket_ref()) -> boolean().
|
||||
insert_bucket(Type, Zone, BucketId, Bucket) ->
|
||||
inner_insert_bucket(make_path(Type, Zone, BucketId),
|
||||
Bucket).
|
||||
|
||||
-spec insert_counter(path(),
|
||||
counters:counters_ref(),
|
||||
index(),
|
||||
rate()) -> boolean().
|
||||
insert_counter(Path, Counter, Index, Rate) ->
|
||||
ets:insert(?TAB,
|
||||
#element{path = Path,
|
||||
counter = Counter,
|
||||
index = Index,
|
||||
rate = Rate}).
|
||||
|
||||
-spec insert_bucket(path(), bucket_ref()) -> true.
|
||||
insert_bucket(Path, Bucket) ->
|
||||
inner_insert_bucket(Path, Bucket).
|
||||
|
||||
-spec make_path(limiter_type(), zone_name(), bucket_name()) -> path().
|
||||
make_path(Type, Name, BucketId) ->
|
||||
|
@ -129,7 +116,7 @@ start_link() ->
|
|||
{stop, Reason :: term()} |
|
||||
ignore.
|
||||
init([]) ->
|
||||
_ = ets:new(?TAB, [ set, public, named_table, {keypos, #element.path}
|
||||
_ = ets:new(?TAB, [ set, public, named_table, {keypos, #bucket.path}
|
||||
, {write_concurrency, true}, {read_concurrency, true}
|
||||
, {heir, erlang:whereis(emqx_limiter_sup), none}
|
||||
]),
|
||||
|
@ -227,3 +214,7 @@ format_status(_Opt, Status) ->
|
|||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
-spec inner_insert_bucket(path(), bucket_ref()) -> true.
|
||||
inner_insert_bucket(Path, Bucket) ->
|
||||
ets:insert(?TAB,
|
||||
#bucket{path = Path, bucket = Bucket}).
|
|
@ -0,0 +1,176 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_limiter_schema).
|
||||
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
|
||||
-export([ roots/0, fields/1, to_rate/1, to_capacity/1
|
||||
, minimum_period/0, to_burst_rate/1, to_initial/1]).
|
||||
|
||||
-define(KILOBYTE, 1024).
|
||||
|
||||
-type limiter_type() :: bytes_in
|
||||
| message_in
|
||||
| connection
|
||||
| message_routing.
|
||||
|
||||
-type bucket_name() :: atom().
|
||||
-type zone_name() :: atom().
|
||||
-type rate() :: infinity | float().
|
||||
-type burst_rate() :: 0 | float().
|
||||
-type capacity() :: infinity | number(). %% the capacity of the token bucket
|
||||
-type initial() :: non_neg_integer(). %% initial capacity of the token bucket
|
||||
|
||||
%% the processing strategy after the failure of the token request
|
||||
-type failure_strategy() :: force %% Forced to pass
|
||||
| drop %% discard the current request
|
||||
| throw. %% throw an exception
|
||||
|
||||
-typerefl_from_string({rate/0, ?MODULE, to_rate}).
|
||||
-typerefl_from_string({burst_rate/0, ?MODULE, to_burst_rate}).
|
||||
-typerefl_from_string({capacity/0, ?MODULE, to_capacity}).
|
||||
-typerefl_from_string({initial/0, ?MODULE, to_initial}).
|
||||
|
||||
-reflect_type([ rate/0
|
||||
, burst_rate/0
|
||||
, capacity/0
|
||||
, initial/0
|
||||
, failure_strategy/0
|
||||
]).
|
||||
|
||||
-export_type([limiter_type/0, bucket_name/0, zone_name/0]).
|
||||
|
||||
-import(emqx_schema, [sc/2, map/2]).
|
||||
|
||||
roots() -> [emqx_limiter].
|
||||
|
||||
fields(emqx_limiter) ->
|
||||
[ {bytes_in, sc(ref(limiter), #{})}
|
||||
, {message_in, sc(ref(limiter), #{})}
|
||||
, {connection, sc(ref(limiter), #{})}
|
||||
, {message_routing, sc(ref(limiter), #{})}
|
||||
];
|
||||
|
||||
fields(limiter) ->
|
||||
[ {global, sc(ref(rate_burst), #{})}
|
||||
, {zone, sc(map("zone name", ref(rate_burst)), #{})}
|
||||
, {bucket, sc(map("bucket id", ref(bucket)),
|
||||
#{desc => "token bucket"})}
|
||||
];
|
||||
|
||||
fields(rate_burst) ->
|
||||
[ {rate, sc(rate(), #{})}
|
||||
, {burst, sc(burst_rate(), #{default => "0/0s"})}
|
||||
];
|
||||
|
||||
fields(bucket) ->
|
||||
[ {zone, sc(atom(), #{desc => "the zone which the bucket in"})}
|
||||
, {aggregated, sc(ref(bucket_aggregated), #{})}
|
||||
, {per_client, sc(ref(client_bucket), #{})}
|
||||
];
|
||||
|
||||
fields(bucket_aggregated) ->
|
||||
[ {rate, sc(rate(), #{})}
|
||||
, {initial, sc(initial(), #{default => "0"})}
|
||||
, {capacity, sc(capacity(), #{})}
|
||||
];
|
||||
|
||||
fields(client_bucket) ->
|
||||
[ {rate, sc(rate(), #{})}
|
||||
, {initial, sc(initial(), #{default => "0"})}
|
||||
%% low_water_mark add for emqx_channel and emqx_session
|
||||
%% both modules consume first and then check
|
||||
%% so we need to use this value to prevent excessive consumption (e.g, consumption from an empty bucket)
|
||||
, {low_water_mark, sc(initial(),
|
||||
#{desc => "if the remaining tokens are lower than this value,
|
||||
the check/consume will succeed, but it will be forced to hang for a short period of time",
|
||||
default => "0"})}
|
||||
, {capacity, sc(capacity(), #{desc => "the capacity of the token bucket"})}
|
||||
, {divisible, sc(boolean(),
|
||||
#{desc => "is it possible to split the number of tokens requested",
|
||||
default => false})}
|
||||
, {max_retry_time, sc(emqx_schema:duration(),
|
||||
#{ desc => "the maximum retry time when acquire failed"
|
||||
, default => "5s"})}
|
||||
, {failure_strategy, sc(failure_strategy(),
|
||||
#{ desc => "the strategy when all retry failed"
|
||||
, default => force})}
|
||||
].
|
||||
|
||||
%% minimum period is 100ms
|
||||
minimum_period() ->
|
||||
100.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
ref(Field) -> hoconsc:ref(?MODULE, Field).
|
||||
|
||||
to_rate(Str) ->
|
||||
to_rate(Str, true, false).
|
||||
|
||||
to_burst_rate(Str) ->
|
||||
to_rate(Str, false, true).
|
||||
|
||||
to_rate(Str, CanInfinity, CanZero) ->
|
||||
Tokens = [string:trim(T) || T <- string:tokens(Str, "/")],
|
||||
case Tokens of
|
||||
["infinity"] when CanInfinity ->
|
||||
{ok, infinity};
|
||||
["0", _] when CanZero ->
|
||||
{ok, 0}; %% for burst
|
||||
[Quota, Interval] ->
|
||||
{ok, Val} = to_capacity(Quota),
|
||||
case emqx_schema:to_duration_ms(Interval) of
|
||||
{ok, Ms} when Ms > 0 ->
|
||||
{ok, Val * minimum_period() / Ms};
|
||||
_ ->
|
||||
{error, Str}
|
||||
end;
|
||||
_ ->
|
||||
{error, Str}
|
||||
end.
|
||||
|
||||
to_capacity(Str) ->
|
||||
Regex = "^\s*(?:(?:([1-9][0-9]*)([a-zA-z]*))|infinity)\s*$",
|
||||
to_quota(Str, Regex).
|
||||
|
||||
to_initial(Str) ->
|
||||
Regex = "^\s*([0-9]+)([a-zA-z]*)\s*$",
|
||||
to_quota(Str, Regex).
|
||||
|
||||
to_quota(Str, Regex) ->
|
||||
{ok, MP} = re:compile(Regex),
|
||||
Result = re:run(Str, MP, [{capture, all_but_first, list}]),
|
||||
case Result of
|
||||
{match, [Quota, Unit]} ->
|
||||
Val = erlang:list_to_integer(Quota),
|
||||
Unit2 = string:to_lower(Unit),
|
||||
{ok, apply_unit(Unit2, Val)};
|
||||
{match, [Quota]} ->
|
||||
{ok, erlang:list_to_integer(Quota)};
|
||||
{match, []} ->
|
||||
{ok, infinity};
|
||||
_ ->
|
||||
{error, Str}
|
||||
end.
|
||||
|
||||
apply_unit("", Val) -> Val;
|
||||
apply_unit("kb", Val) -> Val * ?KILOBYTE;
|
||||
apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
|
||||
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
|
||||
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
|
|
@ -0,0 +1,582 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% A hierarchical token bucket algorithm
|
||||
%% Note: this is not the linux HTB algorithm(http://luxik.cdi.cz/~devik/qos/htb/manual/theory.htm)
|
||||
%% Algorithm:
|
||||
%% 1. the root node periodically generates tokens and then distributes them
|
||||
%% just like the oscillation of water waves
|
||||
%% 2. the leaf node has a counter, which is the place where the token is actually held.
|
||||
%% 3. other nodes only play the role of transmission, and the rate of the node is like a valve,
|
||||
%% limiting the oscillation transmitted from the parent node
|
||||
|
||||
-module(emqx_limiter_server).
|
||||
|
||||
-behaviour(gen_server).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
|
||||
terminate/2, code_change/3, format_status/2]).
|
||||
|
||||
-export([ start_link/1, connect/2, info/2
|
||||
, name/1, get_initial_val/1]).
|
||||
|
||||
-type root() :: #{ rate := rate() %% number of tokens generated per period
|
||||
, burst := rate()
|
||||
, period := pos_integer() %% token generation interval(second)
|
||||
, childs := list(node_id()) %% node children
|
||||
, consumed := non_neg_integer()
|
||||
}.
|
||||
|
||||
-type zone() :: #{ id := node_id()
|
||||
, name := zone_name()
|
||||
, rate := rate()
|
||||
, burst := rate()
|
||||
, obtained := non_neg_integer() %% number of tokens obtained
|
||||
, childs := list(node_id())
|
||||
}.
|
||||
|
||||
-type bucket() :: #{ id := node_id()
|
||||
, name := bucket_name()
|
||||
, zone := zone_name() %% pointer to zone node, use for burst
|
||||
, rate := rate()
|
||||
, obtained := non_neg_integer()
|
||||
, correction := emqx_limiter_decimal:zero_or_float() %% token correction value
|
||||
, capacity := capacity()
|
||||
, counter := undefined | counters:counters_ref()
|
||||
, index := undefined | index()
|
||||
}.
|
||||
|
||||
-type state() :: #{ root := undefined | root()
|
||||
, counter := undefined | counters:counters_ref() %% current counter to alloc
|
||||
, index := index()
|
||||
, zones := #{zone_name() => node_id()}
|
||||
, buckets := list(node_id())
|
||||
, nodes := nodes()
|
||||
, type := limiter_type()
|
||||
}.
|
||||
|
||||
-type node_id() :: pos_integer().
|
||||
-type node_data() :: zone() | bucket().
|
||||
-type nodes() :: #{node_id() => node_data()}.
|
||||
-type zone_name() :: emqx_limiter_schema:zone_name().
|
||||
-type limiter_type() :: emqx_limiter_schema:limiter_type().
|
||||
-type bucket_name() :: emqx_limiter_schema:bucket_name().
|
||||
-type rate() :: decimal().
|
||||
-type flow() :: decimal().
|
||||
-type capacity() :: decimal().
|
||||
-type decimal() :: emqx_limiter_decimal:decimal().
|
||||
-type index() :: pos_integer().
|
||||
|
||||
-define(CALL(Type, Msg), gen_server:call(name(Type), {?FUNCTION_NAME, Msg})).
|
||||
-define(OVERLOAD_MIN_ALLOC, 0.3). %% minimum coefficient for overloaded limiter
|
||||
|
||||
-export_type([index/0]).
|
||||
-import(emqx_limiter_decimal, [add/2, sub/2, mul/2, add_to_counter/3, put_to_counter/3]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
%%--------------------------------------------------------------------
|
||||
-spec connect(limiter_type(),
|
||||
bucket_name() | #{limiter_type() => bucket_name()}) -> emqx_htb_limiter:limiter().
|
||||
connect(Type, BucketName) when is_atom(BucketName) ->
|
||||
Path = [emqx_limiter, Type, bucket, BucketName],
|
||||
case emqx:get_config(Path, undefined) of
|
||||
undefined ->
|
||||
?LOG(error, "can't find the config of this bucket: ~p~n", [Path]),
|
||||
throw("bucket's config not found");
|
||||
#{zone := Zone,
|
||||
aggregated := #{rate := AggrRate, capacity := AggrSize},
|
||||
per_client := #{rate := CliRate, capacity := CliSize} = Cfg} ->
|
||||
case emqx_limiter_manager:find_bucket(Type, Zone, BucketName) of
|
||||
{ok, Bucket} ->
|
||||
if CliRate < AggrRate orelse CliSize < AggrSize ->
|
||||
emqx_htb_limiter:make_token_bucket_limiter(Cfg, Bucket);
|
||||
Bucket =:= infinity ->
|
||||
emqx_htb_limiter:make_infinity_limiter(Cfg);
|
||||
true ->
|
||||
emqx_htb_limiter:make_ref_limiter(Cfg, Bucket)
|
||||
end;
|
||||
undefined ->
|
||||
?LOG(error, "can't find the bucket:~p~n", [Path]),
|
||||
throw("invalid bucket")
|
||||
end
|
||||
end;
|
||||
|
||||
connect(Type, Names) ->
|
||||
connect(Type, maps:get(Type, Names, default)).
|
||||
|
||||
-spec info(limiter_type(), atom()) -> term().
|
||||
info(Type, Info) ->
|
||||
?CALL(Type, Info).
|
||||
|
||||
-spec name(limiter_type()) -> atom().
|
||||
name(Type) ->
|
||||
erlang:list_to_atom(io_lib:format("~s_~s", [?MODULE, Type])).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @doc
|
||||
%% Starts the server
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec start_link(limiter_type()) -> _.
|
||||
start_link(Type) ->
|
||||
gen_server:start_link({local, name(Type)}, ?MODULE, [Type], []).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%%% gen_server callbacks
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% Initializes the server
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec init(Args :: term()) -> {ok, State :: term()} |
|
||||
{ok, State :: term(), Timeout :: timeout()} |
|
||||
{ok, State :: term(), hibernate} |
|
||||
{stop, Reason :: term()} |
|
||||
ignore.
|
||||
init([Type]) ->
|
||||
State = #{root => undefined,
|
||||
counter => undefined,
|
||||
index => 1,
|
||||
zones => #{},
|
||||
nodes => #{},
|
||||
buckets => [],
|
||||
type => Type},
|
||||
State2 = init_tree(Type, State),
|
||||
#{root := #{period := Perido}} = State2,
|
||||
oscillate(Perido),
|
||||
{ok, State2}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% Handling call messages
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec handle_call(Request :: term(), From :: {pid(), term()}, State :: term()) ->
|
||||
{reply, Reply :: term(), NewState :: term()} |
|
||||
{reply, Reply :: term(), NewState :: term(), Timeout :: timeout()} |
|
||||
{reply, Reply :: term(), NewState :: term(), hibernate} |
|
||||
{noreply, NewState :: term()} |
|
||||
{noreply, NewState :: term(), Timeout :: timeout()} |
|
||||
{noreply, NewState :: term(), hibernate} |
|
||||
{stop, Reason :: term(), Reply :: term(), NewState :: term()} |
|
||||
{stop, Reason :: term(), NewState :: term()}.
|
||||
handle_call(Req, _From, State) ->
|
||||
?LOG(error, "Unexpected call: ~p", [Req]),
|
||||
{reply, ignored, State}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% Handling cast messages
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec handle_cast(Request :: term(), State :: term()) ->
|
||||
{noreply, NewState :: term()} |
|
||||
{noreply, NewState :: term(), Timeout :: timeout()} |
|
||||
{noreply, NewState :: term(), hibernate} |
|
||||
{stop, Reason :: term(), NewState :: term()}.
|
||||
handle_cast(Req, State) ->
|
||||
?LOG(error, "Unexpected cast: ~p", [Req]),
|
||||
{noreply, State}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% Handling all non call/cast messages
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec handle_info(Info :: timeout() | term(), State :: term()) ->
|
||||
{noreply, NewState :: term()} |
|
||||
{noreply, NewState :: term(), Timeout :: timeout()} |
|
||||
{noreply, NewState :: term(), hibernate} |
|
||||
{stop, Reason :: normal | term(), NewState :: term()}.
|
||||
handle_info(oscillate, State) ->
|
||||
{noreply, oscillation(State)};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?LOG(error, "Unexpected info: ~p", [Info]),
|
||||
{noreply, State}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% This function is called by a gen_server when it is about to
|
||||
%% terminate. It should be the opposite of Module:init/1 and do any
|
||||
%% necessary cleaning up. When it returns, the gen_server terminates
|
||||
%% with Reason. The return value is ignored.
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec terminate(Reason :: normal | shutdown | {shutdown, term()} | term(),
|
||||
State :: term()) -> any().
|
||||
terminate(_Reason, _State) ->
|
||||
ok.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% Convert process state when code is changed
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec code_change(OldVsn :: term() | {down, term()},
|
||||
State :: term(),
|
||||
Extra :: term()) -> {ok, NewState :: term()} |
|
||||
{error, Reason :: term()}.
|
||||
code_change(_OldVsn, State, _Extra) ->
|
||||
{ok, State}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% This function is called for changing the form and appearance
|
||||
%% of gen_server status when it is returned from sys:get_status/1,2
|
||||
%% or when it appears in termination error logs.
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec format_status(Opt :: normal | terminate,
|
||||
Status :: list()) -> Status :: term().
|
||||
format_status(_Opt, Status) ->
|
||||
Status.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
oscillate(Interval) ->
|
||||
erlang:send_after(Interval, self(), ?FUNCTION_NAME).
|
||||
|
||||
%% @doc generate tokens, and then spread to leaf nodes
|
||||
-spec oscillation(state()) -> state().
|
||||
oscillation(#{root := #{rate := Flow,
|
||||
period := Interval,
|
||||
childs := ChildIds,
|
||||
consumed := Consumed} = Root,
|
||||
nodes := Nodes} = State) ->
|
||||
oscillate(Interval),
|
||||
Childs = get_ordered_childs(ChildIds, Nodes),
|
||||
{Alloced, Nodes2} = transverse(Childs, Flow, 0, Nodes),
|
||||
maybe_burst(State#{nodes := Nodes2,
|
||||
root := Root#{consumed := Consumed + Alloced}}).
|
||||
|
||||
%% @doc horizontal spread
|
||||
-spec transverse(list(node_data()),
|
||||
flow(),
|
||||
non_neg_integer(),
|
||||
nodes()) -> {non_neg_integer(), nodes()}.
|
||||
transverse([H | T], InFlow, Alloced, Nodes) when InFlow > 0 ->
|
||||
{NodeAlloced, Nodes2} = longitudinal(H, InFlow, Nodes),
|
||||
InFlow2 = sub(InFlow, NodeAlloced),
|
||||
Alloced2 = Alloced + NodeAlloced,
|
||||
transverse(T, InFlow2, Alloced2, Nodes2);
|
||||
|
||||
transverse(_, _, Alloced, Nodes) ->
|
||||
{Alloced, Nodes}.
|
||||
|
||||
%% @doc vertical spread
|
||||
-spec longitudinal(node_data(), flow(), nodes()) ->
|
||||
{non_neg_integer(), nodes()}.
|
||||
longitudinal(#{id := Id,
|
||||
rate := Rate,
|
||||
obtained := Obtained,
|
||||
childs := ChildIds} = Node, InFlow, Nodes) ->
|
||||
Flow = erlang:min(InFlow, Rate),
|
||||
|
||||
if Flow > 0 ->
|
||||
Childs = get_ordered_childs(ChildIds, Nodes),
|
||||
{Alloced, Nodes2} = transverse(Childs, Flow, 0, Nodes),
|
||||
if Alloced > 0 ->
|
||||
{Alloced,
|
||||
Nodes2#{Id => Node#{obtained := Obtained + Alloced}}};
|
||||
true ->
|
||||
%% childs are empty or all counter childs are full
|
||||
{0, Nodes2}
|
||||
end;
|
||||
true ->
|
||||
{0, Nodes}
|
||||
end;
|
||||
|
||||
longitudinal(#{id := Id,
|
||||
rate := Rate,
|
||||
capacity := Capacity,
|
||||
correction := Correction,
|
||||
counter := Counter,
|
||||
index := Index,
|
||||
obtained := Obtained} = Node,
|
||||
InFlow, Nodes) when Counter =/= undefined ->
|
||||
Flow = add(erlang:min(InFlow, Rate), Correction),
|
||||
|
||||
ShouldAlloc =
|
||||
case counters:get(Counter, Index) of
|
||||
Tokens when Tokens < 0 ->
|
||||
%% toknes's value mayb be a negative value(stolen from the future)
|
||||
%% because ∃ x. add(Capacity, x) < 0, so here we must compare with minimum value
|
||||
erlang:max(add(Capacity, Tokens),
|
||||
mul(Capacity, ?OVERLOAD_MIN_ALLOC));
|
||||
Tokens ->
|
||||
%% is it possible that Tokens > Capacity ???
|
||||
erlang:max(sub(Capacity, Tokens), 0)
|
||||
end,
|
||||
|
||||
case lists:min([ShouldAlloc, Flow, Capacity]) of
|
||||
Avaiable when Avaiable > 0 ->
|
||||
%% XXX if capacity is infinity, and flow always > 0, the value in counter
|
||||
%% will be overflow at some point in the future, do we need to deal with this situation???
|
||||
{Alloced, Decimal} = add_to_counter(Counter, Index, Avaiable),
|
||||
|
||||
{Alloced,
|
||||
Nodes#{Id := Node#{obtained := Obtained + Alloced,
|
||||
correction := Decimal}}};
|
||||
_ ->
|
||||
{0, Nodes}
|
||||
end;
|
||||
|
||||
longitudinal(_, _, Nodes) ->
|
||||
{0, Nodes}.
|
||||
|
||||
-spec get_ordered_childs(list(node_id()), nodes()) -> list(node_data()).
|
||||
get_ordered_childs(Ids, Nodes) ->
|
||||
Childs = [maps:get(Id, Nodes) || Id <- Ids],
|
||||
|
||||
%% sort by obtained, avoid node goes hungry
|
||||
lists:sort(fun(#{obtained := A}, #{obtained := B}) ->
|
||||
A < B
|
||||
end,
|
||||
Childs).
|
||||
|
||||
-spec maybe_burst(state()) -> state().
|
||||
maybe_burst(#{buckets := Buckets,
|
||||
zones := Zones,
|
||||
root := #{burst := Burst},
|
||||
nodes := Nodes} = State) when Burst > 0 ->
|
||||
%% find empty buckets and group by zone name
|
||||
GroupFun = fun(Id, Groups) ->
|
||||
#{counter := Counter,
|
||||
index := Index,
|
||||
zone := Zone} = maps:get(Id, Nodes),
|
||||
case counters:get(Counter, Index) of
|
||||
Any when Any =< 0 ->
|
||||
Group = maps:get(Zone, Groups, []),
|
||||
maps:put(Zone, [Id | Group], Groups);
|
||||
_ ->
|
||||
Groups
|
||||
end
|
||||
end,
|
||||
|
||||
case lists:foldl(GroupFun, #{}, Buckets) of
|
||||
Groups when map_size(Groups) > 0 ->
|
||||
%% remove the zone which don't support burst
|
||||
Filter = fun({Name, Childs}, Acc) ->
|
||||
ZoneId = maps:get(Name, Zones),
|
||||
#{burst := ZoneBurst} = Zone = maps:get(ZoneId, Nodes),
|
||||
case ZoneBurst > 0 of
|
||||
true ->
|
||||
[{Zone, Childs} | Acc];
|
||||
_ ->
|
||||
Acc
|
||||
end
|
||||
end,
|
||||
|
||||
FilterL = lists:foldl(Filter, [], maps:to_list(Groups)),
|
||||
dispatch_burst(FilterL, State);
|
||||
_ ->
|
||||
State
|
||||
end;
|
||||
|
||||
maybe_burst(State) ->
|
||||
State.
|
||||
|
||||
-spec dispatch_burst(list({zone(), list(node_id())}), state()) -> state().
|
||||
dispatch_burst([], State) ->
|
||||
State;
|
||||
|
||||
dispatch_burst(GroupL,
|
||||
#{root := #{burst := Burst},
|
||||
nodes := Nodes} = State) ->
|
||||
InFlow = erlang:floor(Burst / erlang:length(GroupL)),
|
||||
Dispatch = fun({Zone, Childs}, NodeAcc) ->
|
||||
#{id := ZoneId,
|
||||
burst := ZoneBurst,
|
||||
obtained := Obtained} = Zone,
|
||||
|
||||
ZoneFlow = erlang:min(InFlow, ZoneBurst),
|
||||
EachFlow = ZoneFlow div erlang:length(Childs),
|
||||
Zone2 = Zone#{obtained := Obtained + ZoneFlow},
|
||||
NodeAcc2 = NodeAcc#{ZoneId := Zone2},
|
||||
dispatch_burst_to_buckets(Childs, EachFlow, NodeAcc2)
|
||||
end,
|
||||
State#{nodes := lists:foldl(Dispatch, Nodes, GroupL)}.
|
||||
|
||||
-spec dispatch_burst_to_buckets(list(node_id()),
|
||||
non_neg_integer(), nodes()) -> nodes().
|
||||
dispatch_burst_to_buckets(Childs, InFlow, Nodes) ->
|
||||
Each = fun(ChildId, NodeAcc) ->
|
||||
#{counter := Counter,
|
||||
index := Index,
|
||||
obtained := Obtained} = Bucket = maps:get(ChildId, NodeAcc),
|
||||
counters:add(Counter, Index, InFlow),
|
||||
NodeAcc#{ChildId := Bucket#{obtained := Obtained + InFlow}}
|
||||
end,
|
||||
lists:foldl(Each, Nodes, Childs).
|
||||
|
||||
-spec init_tree(emqx_limiter_schema:limiter_type(), state()) -> state().
|
||||
init_tree(Type, State) ->
|
||||
#{global := Global,
|
||||
zone := Zone,
|
||||
bucket := Bucket} = emqx:get_config([emqx_limiter, Type]),
|
||||
{Factor, Root} = make_root(Global, Zone),
|
||||
State2 = State#{root := Root},
|
||||
{NodeId, State3} = make_zone(maps:to_list(Zone), Factor, 1, State2),
|
||||
State4 = State3#{counter := counters:new(maps:size(Bucket),
|
||||
[write_concurrency])},
|
||||
make_bucket(maps:to_list(Bucket), Global, Zone, Factor, NodeId, [], State4).
|
||||
|
||||
-spec make_root(hocons:confg(), hocon:config()) -> {number(), root()}.
|
||||
make_root(#{rate := Rate, burst := Burst}, Zone) ->
|
||||
ZoneNum = maps:size(Zone),
|
||||
Childs = lists:seq(1, ZoneNum),
|
||||
MiniPeriod = emqx_limiter_schema:minimum_period(),
|
||||
if Rate >= 1 ->
|
||||
{1, #{rate => Rate,
|
||||
burst => Burst,
|
||||
period => MiniPeriod,
|
||||
childs => Childs,
|
||||
consumed => 0}};
|
||||
true ->
|
||||
Factor = 1 / Rate,
|
||||
{Factor, #{rate => 1,
|
||||
burst => Burst * Factor,
|
||||
period => erlang:floor(Factor * MiniPeriod),
|
||||
childs => Childs,
|
||||
consumed => 0}}
|
||||
end.
|
||||
|
||||
make_zone([{Name, ZoneCfg} | T], Factor, NodeId, State) ->
|
||||
#{rate := Rate, burst := Burst} = ZoneCfg,
|
||||
#{zones := Zones, nodes := Nodes} = State,
|
||||
Zone = #{id => NodeId,
|
||||
name => Name,
|
||||
rate => mul(Rate, Factor),
|
||||
burst => Burst,
|
||||
obtained => 0,
|
||||
childs => []},
|
||||
State2 = State#{zones := Zones#{Name => NodeId},
|
||||
nodes := Nodes#{NodeId => Zone}},
|
||||
make_zone(T, Factor, NodeId + 1, State2);
|
||||
|
||||
make_zone([], _, NodeId, State2) ->
|
||||
{NodeId, State2}.
|
||||
|
||||
make_bucket([{Name, Conf} | T], Global, Zone, Factor, Id, Buckets, #{type := Type} = State) ->
|
||||
#{zone := ZoneName,
|
||||
aggregated := Aggregated} = Conf,
|
||||
Path = emqx_limiter_manager:make_path(Type, ZoneName, Name),
|
||||
case get_counter_rate(Conf, Zone, Global) of
|
||||
infinity ->
|
||||
State2 = State,
|
||||
Rate = infinity,
|
||||
Capacity = infinity,
|
||||
Counter = undefined,
|
||||
Index = undefined,
|
||||
Ref = emqx_limiter_bucket_ref:new(Counter, Index, Rate),
|
||||
emqx_limiter_manager:insert_bucket(Path, Ref);
|
||||
RawRate ->
|
||||
#{capacity := Capacity} = Aggregated,
|
||||
Initial = get_initial_val(Aggregated),
|
||||
{Counter, Index, State2} = alloc_counter(Path, RawRate, Initial, State),
|
||||
Rate = mul(RawRate, Factor)
|
||||
end,
|
||||
|
||||
Node = #{ id => Id
|
||||
, name => Name
|
||||
, zone => ZoneName
|
||||
, rate => Rate
|
||||
, obtained => 0
|
||||
, correction => 0
|
||||
, capacity => Capacity
|
||||
, counter => Counter
|
||||
, index => Index},
|
||||
|
||||
State3 = add_zone_child(Id, Node, ZoneName, State2),
|
||||
make_bucket(T, Global, Zone, Factor, Id + 1, [Id | Buckets], State3);
|
||||
|
||||
make_bucket([], _, _, _, _, Buckets, State) ->
|
||||
State#{buckets := Buckets}.
|
||||
|
||||
-spec alloc_counter(emqx_limiter_manager:path(), rate(), capacity(), state()) ->
|
||||
{counters:counters_ref(), pos_integer(), state()}.
|
||||
alloc_counter(Path, Rate, Initial,
|
||||
#{counter := Counter, index := Index} = State) ->
|
||||
case emqx_limiter_manager:find_bucket(Path) of
|
||||
{ok, #{counter := ECounter,
|
||||
index := EIndex}} when ECounter =/= undefined ->
|
||||
init_counter(Path, ECounter, EIndex, Rate, Initial, State);
|
||||
_ ->
|
||||
init_counter(Path, Counter, Index,
|
||||
Rate, Initial, State#{index := Index + 1})
|
||||
end.
|
||||
|
||||
init_counter(Path, Counter, Index, Rate, Initial, State) ->
|
||||
_ = put_to_counter(Counter, Index, Initial),
|
||||
Ref = emqx_limiter_bucket_ref:new(Counter, Index, Rate),
|
||||
emqx_limiter_manager:insert_bucket(Path, Ref),
|
||||
{Counter, Index, State}.
|
||||
|
||||
-spec add_zone_child(node_id(), bucket(), zone_name(), state()) -> state().
|
||||
add_zone_child(NodeId, Bucket, Name, #{zones := Zones, nodes := Nodes} = State) ->
|
||||
ZoneId = maps:get(Name, Zones),
|
||||
#{childs := Childs} = Zone = maps:get(ZoneId, Nodes),
|
||||
Nodes2 = Nodes#{ZoneId => Zone#{childs := [NodeId | Childs]},
|
||||
NodeId => Bucket},
|
||||
State#{nodes := Nodes2}.
|
||||
|
||||
%% @doc find first limited node
|
||||
get_counter_rate(#{zone := ZoneName,
|
||||
aggregated := Cfg}, ZoneCfg, Global) ->
|
||||
Zone = maps:get(ZoneName, ZoneCfg),
|
||||
Search = lists:search(fun(E) -> is_limited(E) end,
|
||||
[Cfg, Zone, Global]),
|
||||
case Search of
|
||||
{value, #{rate := Rate}} ->
|
||||
Rate;
|
||||
false ->
|
||||
infinity
|
||||
end.
|
||||
|
||||
is_limited(#{rate := Rate, capacity := Capacity}) ->
|
||||
Rate =/= infinity orelse Capacity =/= infinity;
|
||||
|
||||
is_limited(#{rate := Rate}) ->
|
||||
Rate =/= infinity.
|
||||
|
||||
get_initial_val(#{initial := Initial,
|
||||
rate := Rate,
|
||||
capacity := Capacity}) ->
|
||||
%% initial will nevner be infinity(see the emqx_limiter_schema)
|
||||
if Initial > 0 ->
|
||||
Initial;
|
||||
Rate =/= infinity ->
|
||||
erlang:min(Rate, Capacity);
|
||||
Capacity =/= infinity ->
|
||||
Capacity;
|
||||
true ->
|
||||
0
|
||||
end.
|
|
@ -1,5 +1,5 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
|
@ -24,9 +24,9 @@
|
|||
%% Supervisor callbacks
|
||||
-export([init/1]).
|
||||
|
||||
%%--==================================================================
|
||||
%%--------------------------------------------------------------------
|
||||
%% API functions
|
||||
%%--==================================================================
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @doc
|
||||
|
@ -52,9 +52,9 @@ restart(Type) ->
|
|||
_ = supervisor:terminate_child(?MODULE, Id),
|
||||
supervisor:restart_child(?MODULE, Id).
|
||||
|
||||
%%--==================================================================
|
||||
%%--------------------------------------------------------------------
|
||||
%% Supervisor callbacks
|
||||
%%--==================================================================
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
|
@ -1,5 +1,5 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
|
@ -175,9 +175,10 @@ restart_listener(Type, ListenerName, Conf) ->
|
|||
restart_listener(Type, ListenerName, Conf, Conf).
|
||||
|
||||
restart_listener(Type, ListenerName, OldConf, NewConf) ->
|
||||
case stop_listener(Type, ListenerName, OldConf) of
|
||||
case do_stop_listener(Type, ListenerName, OldConf) of
|
||||
ok -> start_listener(Type, ListenerName, NewConf);
|
||||
Error -> Error
|
||||
{error, not_found} -> start_listener(Type, ListenerName, NewConf);
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
%% @doc Stop all listeners.
|
||||
|
@ -228,7 +229,8 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts)
|
|||
esockd:open(listener_id(Type, ListenerName), ListenOn, merge_default(esockd_opts(Type, Opts)),
|
||||
{emqx_connection, start_link,
|
||||
[#{listener => {Type, ListenerName},
|
||||
zone => zone(Opts)}]});
|
||||
zone => zone(Opts),
|
||||
limiter => limiter(Opts)}]});
|
||||
|
||||
%% Start MQTT/WS listener
|
||||
do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts)
|
||||
|
@ -260,6 +262,7 @@ do_start_listener(quic, ListenerName, #{bind := ListenOn} = Opts) ->
|
|||
, peer_bidi_stream_count => 10
|
||||
, zone => zone(Opts)
|
||||
, listener => {quic, ListenerName}
|
||||
, limiter => limiter(Opts)
|
||||
},
|
||||
StreamOpts = [{stream_callback, emqx_quic_stream}],
|
||||
quicer:start_listener(listener_id(quic, ListenerName),
|
||||
|
@ -315,7 +318,9 @@ esockd_opts(Type, Opts0) ->
|
|||
|
||||
ws_opts(Type, ListenerName, Opts) ->
|
||||
WsPaths = [{maps:get(mqtt_path, Opts, "/mqtt"), emqx_ws_connection,
|
||||
#{zone => zone(Opts), listener => {Type, ListenerName}}}],
|
||||
#{zone => zone(Opts),
|
||||
listener => {Type, ListenerName},
|
||||
limiter => limiter(Opts)}}],
|
||||
Dispatch = cowboy_router:compile([{'_', WsPaths}]),
|
||||
ProxyProto = maps:get(proxy_protocol, Opts, false),
|
||||
#{env => #{dispatch => Dispatch}, proxy_header => ProxyProto}.
|
||||
|
@ -380,6 +385,9 @@ parse_listener_id(Id) ->
|
|||
zone(Opts) ->
|
||||
maps:get(zone, Opts, undefined).
|
||||
|
||||
limiter(Opts) ->
|
||||
maps:get(limiter, Opts).
|
||||
|
||||
ssl_opts(Opts) ->
|
||||
maps:to_list(
|
||||
emqx_tls_lib:drop_tls13_for_old_otp(
|
||||
|
|
|
@ -55,6 +55,8 @@
|
|||
, hexstr2bin/1
|
||||
]).
|
||||
|
||||
-export([clamp/3]).
|
||||
|
||||
-define(SHORT, 8).
|
||||
|
||||
%% @doc Parse v4 or v6 string format address to tuple.
|
||||
|
@ -305,6 +307,13 @@ gen_id(Len) ->
|
|||
<<R:BitLen>> = crypto:strong_rand_bytes(Len div 2),
|
||||
int_to_hex(R, Len).
|
||||
|
||||
-spec clamp(number(), number(), number()) -> number().
|
||||
clamp(Val, Min, Max) ->
|
||||
if Val < Min -> Min;
|
||||
Val > Max -> Max;
|
||||
true -> Val
|
||||
end.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Internal Functions
|
||||
%%------------------------------------------------------------------------------
|
||||
|
|
|
@ -17,81 +17,120 @@
|
|||
-module(emqx_passwd).
|
||||
|
||||
-export([ hash/2
|
||||
, check_pass/2
|
||||
, hash_data/2
|
||||
, check_pass/3
|
||||
]).
|
||||
|
||||
-export_type([ password/0
|
||||
, password_hash/0
|
||||
, hash_type_simple/0
|
||||
, hash_type/0
|
||||
, salt_position/0
|
||||
, salt/0]).
|
||||
|
||||
-include("logger.hrl").
|
||||
|
||||
-type(hash_type() :: plain | md5 | sha | sha256 | sha512 | pbkdf2 | bcrypt).
|
||||
-type(password() :: binary()).
|
||||
-type(password_hash() :: binary()).
|
||||
|
||||
-export_type([hash_type/0]).
|
||||
-type(hash_type_simple() :: plain | md5 | sha | sha256 | sha512).
|
||||
-type(hash_type() :: hash_type_simple() | bcrypt | pbkdf2).
|
||||
|
||||
-type(salt_position() :: prefix | suffix).
|
||||
-type(salt() :: binary()).
|
||||
|
||||
-type(pbkdf2_mac_fun() :: md4 | md5 | ripemd160 | sha | sha224 | sha256 | sha384 | sha512).
|
||||
-type(pbkdf2_iterations() :: pos_integer()).
|
||||
-type(pbkdf2_dk_length() :: pos_integer() | undefined).
|
||||
|
||||
-type(hash_params() ::
|
||||
{bcrypt, salt()} |
|
||||
{pbkdf2, pbkdf2_mac_fun(), salt(), pbkdf2_iterations(), pbkdf2_dk_length()} |
|
||||
{hash_type_simple(), salt(), salt_position()}).
|
||||
|
||||
-export_type([pbkdf2_mac_fun/0]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(check_pass(binary() | tuple(), binary() | tuple())
|
||||
-> ok | {error, term()}).
|
||||
check_pass({PassHash, Password}, bcrypt) ->
|
||||
try
|
||||
Salt = binary:part(PassHash, {0, 29}),
|
||||
check_pass(PassHash, emqx_passwd:hash(bcrypt, {Salt, Password}))
|
||||
catch
|
||||
error:badarg -> {error, incorrect_hash}
|
||||
-spec(check_pass(hash_params(), password_hash(), password()) -> boolean()).
|
||||
check_pass({pbkdf2, MacFun, Salt, Iterations, DKLength}, PasswordHash, Password) ->
|
||||
case pbkdf2(MacFun, Password, Salt, Iterations, DKLength) of
|
||||
{ok, HashPasswd} ->
|
||||
compare_secure(hex(HashPasswd), PasswordHash);
|
||||
{error, _Reason}->
|
||||
false
|
||||
end;
|
||||
check_pass({PassHash, Password}, HashType) ->
|
||||
check_pass(PassHash, emqx_passwd:hash(HashType, Password));
|
||||
check_pass({PassHash, Salt, Password}, {pbkdf2, Macfun, Iterations, Dklen}) ->
|
||||
check_pass(PassHash, emqx_passwd:hash(pbkdf2, {Salt, Password, Macfun, Iterations, Dklen}));
|
||||
check_pass({PassHash, Salt, Password}, {salt, bcrypt}) ->
|
||||
check_pass(PassHash, emqx_passwd:hash(bcrypt, {Salt, Password}));
|
||||
check_pass({PassHash, Salt, Password}, {bcrypt, salt}) ->
|
||||
check_pass(PassHash, emqx_passwd:hash(bcrypt, {Salt, Password}));
|
||||
check_pass({PassHash, Salt, Password}, {salt, HashType}) ->
|
||||
check_pass(PassHash, emqx_passwd:hash(HashType, <<Salt/binary, Password/binary>>));
|
||||
check_pass({PassHash, Salt, Password}, {HashType, salt}) ->
|
||||
check_pass(PassHash, emqx_passwd:hash(HashType, <<Password/binary, Salt/binary>>));
|
||||
check_pass(PassHash, PassHash) -> ok;
|
||||
check_pass(_Hash1, _Hash2) -> {error, password_error}.
|
||||
check_pass({bcrypt, Salt}, PasswordHash, Password) ->
|
||||
case bcrypt:hashpw(Password, Salt) of
|
||||
{ok, HashPasswd} ->
|
||||
compare_secure(list_to_binary(HashPasswd), PasswordHash);
|
||||
{error, _Reason}->
|
||||
false
|
||||
end;
|
||||
check_pass({_SimpleHash, _Salt, _SaltPosition} = HashParams, PasswordHash, Password) ->
|
||||
Hash = hash(HashParams, Password),
|
||||
compare_secure(Hash, PasswordHash).
|
||||
|
||||
-spec(hash(hash_type(), binary() | tuple()) -> binary()).
|
||||
hash(plain, Password) ->
|
||||
Password;
|
||||
hash(md5, Password) ->
|
||||
hexstring(crypto:hash(md5, Password));
|
||||
hash(sha, Password) ->
|
||||
hexstring(crypto:hash(sha, Password));
|
||||
hash(sha256, Password) ->
|
||||
hexstring(crypto:hash(sha256, Password));
|
||||
hash(sha512, Password) ->
|
||||
hexstring(crypto:hash(sha512, Password));
|
||||
hash(pbkdf2, {Salt, Password, Macfun, Iterations, Dklen}) ->
|
||||
case pbkdf2:pbkdf2(Macfun, Password, Salt, Iterations, Dklen) of
|
||||
{ok, Hexstring} ->
|
||||
pbkdf2:to_hex(Hexstring);
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "pbkdf2_hash_error", reason => Reason}),
|
||||
<<>>
|
||||
-spec(hash(hash_params(), password()) -> password_hash()).
|
||||
hash({pbkdf2, MacFun, Salt, Iterations, DKLength}, Password) ->
|
||||
case pbkdf2(MacFun, Password, Salt, Iterations, DKLength) of
|
||||
{ok, HashPasswd} ->
|
||||
hex(HashPasswd);
|
||||
{error, Reason}->
|
||||
error(Reason)
|
||||
end;
|
||||
hash(bcrypt, {Salt, Password}) ->
|
||||
{ok, _} = application:ensure_all_started(bcrypt),
|
||||
hash({bcrypt, Salt}, Password) ->
|
||||
case bcrypt:hashpw(Password, Salt) of
|
||||
{ok, HashPasswd} ->
|
||||
list_to_binary(HashPasswd);
|
||||
{error, Reason}->
|
||||
?SLOG(error, #{msg => "bcrypt_hash_error", reason => Reason}),
|
||||
<<>>
|
||||
error(Reason)
|
||||
end;
|
||||
hash({SimpleHash, Salt, prefix}, Password) when is_binary(Password), is_binary(Salt) ->
|
||||
hash_data(SimpleHash, <<Salt/binary, Password/binary>>);
|
||||
hash({SimpleHash, Salt, suffix}, Password) when is_binary(Password), is_binary(Salt) ->
|
||||
hash_data(SimpleHash, <<Password/binary, Salt/binary>>).
|
||||
|
||||
|
||||
-spec(hash_data(hash_type(), binary()) -> binary()).
|
||||
hash_data(plain, Data) when is_binary(Data) ->
|
||||
Data;
|
||||
hash_data(md5, Data) when is_binary(Data) ->
|
||||
hex(crypto:hash(md5, Data));
|
||||
hash_data(sha, Data) when is_binary(Data) ->
|
||||
hex(crypto:hash(sha, Data));
|
||||
hash_data(sha256, Data) when is_binary(Data) ->
|
||||
hex(crypto:hash(sha256, Data));
|
||||
hash_data(sha512, Data) when is_binary(Data) ->
|
||||
hex(crypto:hash(sha512, Data)).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
compare_secure(X, Y) when is_binary(X), is_binary(Y) ->
|
||||
compare_secure(binary_to_list(X), binary_to_list(Y));
|
||||
compare_secure(X, Y) when is_list(X), is_list(Y) ->
|
||||
case length(X) == length(Y) of
|
||||
true ->
|
||||
compare_secure(X, Y, 0);
|
||||
false ->
|
||||
false
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal funcs
|
||||
%%--------------------------------------------------------------------
|
||||
compare_secure([X | RestX], [Y | RestY], Result) ->
|
||||
compare_secure(RestX, RestY, (X bxor Y) bor Result);
|
||||
compare_secure([], [], Result) ->
|
||||
Result == 0.
|
||||
|
||||
hexstring(<<X:128/big-unsigned-integer>>) ->
|
||||
iolist_to_binary(io_lib:format("~32.16.0b", [X]));
|
||||
hexstring(<<X:160/big-unsigned-integer>>) ->
|
||||
iolist_to_binary(io_lib:format("~40.16.0b", [X]));
|
||||
hexstring(<<X:256/big-unsigned-integer>>) ->
|
||||
iolist_to_binary(io_lib:format("~64.16.0b", [X]));
|
||||
hexstring(<<X:512/big-unsigned-integer>>) ->
|
||||
iolist_to_binary(io_lib:format("~128.16.0b", [X])).
|
||||
|
||||
pbkdf2(MacFun, Password, Salt, Iterations, undefined) ->
|
||||
pbkdf2:pbkdf2(MacFun, Password, Salt, Iterations);
|
||||
pbkdf2(MacFun, Password, Salt, Iterations, DKLength) ->
|
||||
pbkdf2:pbkdf2(MacFun, Password, Salt, Iterations, DKLength).
|
||||
|
||||
|
||||
hex(X) when is_binary(X) ->
|
||||
pbkdf2:to_hex(X).
|
||||
|
|
|
@ -166,9 +166,6 @@ roots(low) ->
|
|||
, {"quota",
|
||||
sc(ref("quota"),
|
||||
#{})}
|
||||
, {"plugins", %% TODO: move to emqx_conf_schema
|
||||
sc(ref("plugins"),
|
||||
#{})}
|
||||
, {"stats",
|
||||
sc(ref("stats"),
|
||||
#{})}
|
||||
|
@ -184,6 +181,9 @@ roots(low) ->
|
|||
, {"persistent_session_store",
|
||||
sc(ref("persistent_session_store"),
|
||||
#{})}
|
||||
, {"latency_stats",
|
||||
sc(ref("latency_stats"),
|
||||
#{})}
|
||||
].
|
||||
|
||||
fields("persistent_session_store") ->
|
||||
|
@ -806,13 +806,6 @@ fields("deflate_opts") ->
|
|||
}
|
||||
];
|
||||
|
||||
fields("plugins") ->
|
||||
[ {"expand_plugins_dir",
|
||||
sc(string(),
|
||||
#{})
|
||||
}
|
||||
];
|
||||
|
||||
fields("broker") ->
|
||||
[ {"sys_msg_interval",
|
||||
sc(hoconsc:union([disabled, duration()]),
|
||||
|
@ -983,6 +976,11 @@ when deactivated, but after the retention time.
|
|||
"""
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
fields("latency_stats") ->
|
||||
[ {"samples", sc(integer(), #{default => 10,
|
||||
desc => "the number of smaples for calculate the average latency of delivery"})}
|
||||
].
|
||||
|
||||
mqtt_listener() ->
|
||||
|
@ -1026,6 +1024,8 @@ base_listener() ->
|
|||
sc(atom(),
|
||||
#{ default => 'default'
|
||||
})}
|
||||
, {"limiter",
|
||||
sc(map("ratelimit bucket's name", atom()), #{default => #{}})}
|
||||
].
|
||||
|
||||
%% utils
|
||||
|
|
|
@ -98,7 +98,8 @@
|
|||
]).
|
||||
|
||||
-record(session, {
|
||||
%% sessionID, fresh for all new sessions unless it is a resumed persistent session
|
||||
%% Client's id
|
||||
clientid :: emqx_types:clientid(),
|
||||
id :: sessionID(),
|
||||
%% Is this session a persistent session i.e. was it started with Session-Expiry > 0
|
||||
is_persistent :: boolean(),
|
||||
|
@ -128,9 +129,16 @@
|
|||
%% Awaiting PUBREL Timeout (Unit: millsecond)
|
||||
await_rel_timeout :: timeout(),
|
||||
%% Created at
|
||||
created_at :: pos_integer()
|
||||
created_at :: pos_integer(),
|
||||
%% Message deliver latency stats
|
||||
latency_stats :: emqx_message_latency_stats:stats()
|
||||
}).
|
||||
|
||||
%% in the previous code, we will replace the message record with the pubrel atom
|
||||
%% in the pubrec function, this will lose the creation time of the message,
|
||||
%% but now we need this time to calculate latency, so now pubrel atom is changed to this record
|
||||
-record(pubrel_await, {timestamp :: non_neg_integer()}).
|
||||
|
||||
-type(session() :: #session{}).
|
||||
|
||||
-type(publish() :: {maybe(emqx_types:packet_id()), emqx_types:message()}).
|
||||
|
@ -157,7 +165,8 @@
|
|||
mqueue_dropped,
|
||||
next_pkt_id,
|
||||
awaiting_rel_cnt,
|
||||
awaiting_rel_max
|
||||
awaiting_rel_max,
|
||||
latency_stats
|
||||
]).
|
||||
|
||||
-define(DEFAULT_BATCH_N, 1000).
|
||||
|
@ -170,6 +179,8 @@
|
|||
, max_inflight => integer()
|
||||
, mqueue => emqx_mqueue:options()
|
||||
, is_persistent => boolean()
|
||||
, clientid => emqx_types:clientid()
|
||||
, latency_stats => emqx_message_latency_stats:create_options()
|
||||
}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -185,6 +196,7 @@ init(Opts) ->
|
|||
}, maps:get(mqueue, Opts, #{})),
|
||||
#session{
|
||||
id = emqx_guid:gen(),
|
||||
clientid = maps:get(clientid, Opts, <<>>),
|
||||
is_persistent = maps:get(is_persistent, Opts, false),
|
||||
max_subscriptions = maps:get(max_subscriptions, Opts, infinity),
|
||||
subscriptions = #{},
|
||||
|
@ -196,7 +208,8 @@ init(Opts) ->
|
|||
awaiting_rel = #{},
|
||||
max_awaiting_rel = maps:get(max_awaiting_rel, Opts, 100),
|
||||
await_rel_timeout = maps:get(await_rel_timeout, Opts, 300000),
|
||||
created_at = erlang:system_time(millisecond)
|
||||
created_at = erlang:system_time(millisecond),
|
||||
latency_stats = emqx_message_latency_stats:new(maps:get(latency_stats, Opts, #{}))
|
||||
}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -252,7 +265,9 @@ info(awaiting_rel_max, #session{max_awaiting_rel = Max}) ->
|
|||
info(await_rel_timeout, #session{await_rel_timeout = Timeout}) ->
|
||||
Timeout;
|
||||
info(created_at, #session{created_at = CreatedAt}) ->
|
||||
CreatedAt.
|
||||
CreatedAt;
|
||||
info(latency_stats, #session{latency_stats = Stats}) ->
|
||||
emqx_message_latency_stats:latency(Stats).
|
||||
|
||||
%% @doc Get stats of the session.
|
||||
-spec(stats(session()) -> emqx_types:stats()).
|
||||
|
@ -365,7 +380,8 @@ puback(PacketId, Session = #session{inflight = Inflight}) ->
|
|||
case emqx_inflight:lookup(PacketId, Inflight) of
|
||||
{value, {Msg, _Ts}} when is_record(Msg, message) ->
|
||||
Inflight1 = emqx_inflight:delete(PacketId, Inflight),
|
||||
return_with(Msg, dequeue(Session#session{inflight = Inflight1}));
|
||||
Session2 = update_latency(Msg, Session),
|
||||
return_with(Msg, dequeue(Session2#session{inflight = Inflight1}));
|
||||
{value, {_Pubrel, _Ts}} ->
|
||||
{error, ?RC_PACKET_IDENTIFIER_IN_USE};
|
||||
none ->
|
||||
|
@ -388,9 +404,10 @@ return_with(Msg, {ok, Publishes, Session}) ->
|
|||
pubrec(PacketId, Session = #session{inflight = Inflight}) ->
|
||||
case emqx_inflight:lookup(PacketId, Inflight) of
|
||||
{value, {Msg, _Ts}} when is_record(Msg, message) ->
|
||||
Inflight1 = emqx_inflight:update(PacketId, with_ts(pubrel), Inflight),
|
||||
Update = with_ts(#pubrel_await{timestamp = Msg#message.timestamp}),
|
||||
Inflight1 = emqx_inflight:update(PacketId, Update, Inflight),
|
||||
{ok, Msg, Session#session{inflight = Inflight1}};
|
||||
{value, {pubrel, _Ts}} ->
|
||||
{value, {_Pubrel, _Ts}} ->
|
||||
{error, ?RC_PACKET_IDENTIFIER_IN_USE};
|
||||
none ->
|
||||
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND}
|
||||
|
@ -419,9 +436,10 @@ pubrel(PacketId, Session = #session{awaiting_rel = AwaitingRel}) ->
|
|||
| {error, emqx_types:reason_code()}).
|
||||
pubcomp(PacketId, Session = #session{inflight = Inflight}) ->
|
||||
case emqx_inflight:lookup(PacketId, Inflight) of
|
||||
{value, {pubrel, _Ts}} ->
|
||||
{value, {Pubrel, _Ts}} when is_record(Pubrel, pubrel_await) ->
|
||||
Session2 = update_latency(Pubrel, Session),
|
||||
Inflight1 = emqx_inflight:delete(PacketId, Inflight),
|
||||
dequeue(Session#session{inflight = Inflight1});
|
||||
dequeue(Session2#session{inflight = Inflight1});
|
||||
{value, _Other} ->
|
||||
{error, ?RC_PACKET_IDENTIFIER_IN_USE};
|
||||
none ->
|
||||
|
@ -588,11 +606,16 @@ await(PacketId, Msg, Session = #session{inflight = Inflight}) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec(retry(session()) -> {ok, session()} | {ok, replies(), timeout(), session()}).
|
||||
retry(Session = #session{inflight = Inflight}) ->
|
||||
retry(Session = #session{inflight = Inflight, retry_interval = RetryInterval}) ->
|
||||
case emqx_inflight:is_empty(Inflight) of
|
||||
true -> {ok, Session};
|
||||
false -> retry_delivery(emqx_inflight:to_list(sort_fun(), Inflight),
|
||||
[], erlang:system_time(millisecond), Session)
|
||||
false ->
|
||||
Now = erlang:system_time(millisecond),
|
||||
Session2 = check_expire_latency(Now, RetryInterval, Session),
|
||||
retry_delivery(emqx_inflight:to_list(sort_fun(), Inflight),
|
||||
[],
|
||||
Now,
|
||||
Session2)
|
||||
end.
|
||||
|
||||
retry_delivery([], Acc, _Now, Session = #session{retry_interval = Interval}) ->
|
||||
|
@ -619,8 +642,8 @@ retry_delivery(PacketId, Msg, Now, Acc, Inflight) when is_record(Msg, message) -
|
|||
{[{PacketId, Msg1}|Acc], Inflight1}
|
||||
end;
|
||||
|
||||
retry_delivery(PacketId, pubrel, Now, Acc, Inflight) ->
|
||||
Inflight1 = emqx_inflight:update(PacketId, {pubrel, Now}, Inflight),
|
||||
retry_delivery(PacketId, Pubrel, Now, Acc, Inflight) ->
|
||||
Inflight1 = emqx_inflight:update(PacketId, {Pubrel, Now}, Inflight),
|
||||
{[{pubrel, PacketId}|Acc], Inflight1}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -664,7 +687,7 @@ resume(ClientInfo = #{clientid := ClientId}, Session = #session{subscriptions =
|
|||
|
||||
-spec(replay(session()) -> {ok, replies(), session()}).
|
||||
replay(Session = #session{inflight = Inflight}) ->
|
||||
Pubs = lists:map(fun({PacketId, {pubrel, _Ts}}) ->
|
||||
Pubs = lists:map(fun({PacketId, {Pubrel, _Ts}}) when is_record(Pubrel, pubrel_await) ->
|
||||
{pubrel, PacketId};
|
||||
({PacketId, {Msg, _Ts}}) ->
|
||||
{PacketId, emqx_message:set_flag(dup, true, Msg)}
|
||||
|
@ -715,6 +738,35 @@ next_pkt_id(Session = #session{next_pkt_id = ?MAX_PACKET_ID}) ->
|
|||
next_pkt_id(Session = #session{next_pkt_id = Id}) ->
|
||||
Session#session{next_pkt_id = Id + 1}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Message Latency Stats
|
||||
%%--------------------------------------------------------------------
|
||||
update_latency(Msg,
|
||||
#session{clientid = ClientId,
|
||||
latency_stats = Stats,
|
||||
created_at = CreateAt} = S) ->
|
||||
case get_birth_timestamp(Msg, CreateAt) of
|
||||
0 -> S;
|
||||
Ts ->
|
||||
Latency = erlang:system_time(millisecond) - Ts,
|
||||
Stats2 = emqx_message_latency_stats:update(ClientId, Latency, Stats),
|
||||
S#session{latency_stats = Stats2}
|
||||
end.
|
||||
|
||||
check_expire_latency(Now, Interval,
|
||||
#session{clientid = ClientId, latency_stats = Stats} = S) ->
|
||||
Stats2 = emqx_message_latency_stats:check_expire(ClientId, Now, Interval, Stats),
|
||||
S#session{latency_stats = Stats2}.
|
||||
|
||||
get_birth_timestamp(#message{timestamp = Ts}, CreateAt) when CreateAt =< Ts ->
|
||||
Ts;
|
||||
|
||||
get_birth_timestamp(#pubrel_await{timestamp = Ts}, CreateAt) when CreateAt =< Ts ->
|
||||
Ts;
|
||||
|
||||
get_birth_timestamp(_, _) ->
|
||||
0.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Helper functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_message_latency_stats).
|
||||
|
||||
%% API
|
||||
-export([new/1, update/3, check_expire/4, latency/1]).
|
||||
|
||||
-export([get_threshold/0, update_threshold/1]).
|
||||
|
||||
-define(NOW, erlang:system_time(millisecond)).
|
||||
-define(MINIMUM_INSERT_INTERVAL, 1000).
|
||||
-define(MINIMUM_THRESHOLD, 100).
|
||||
-define(DEFAULT_THRESHOLD, 500).
|
||||
-define(DEFAULT_SAMPLES, 10).
|
||||
-define(THRESHOLD_KEY, {?MODULE, threshold}).
|
||||
|
||||
-opaque stats() :: #{ ema := emqx_moving_average:ema()
|
||||
, last_update_time := timestamp()
|
||||
, last_access_time := timestamp() %% timestamp of last access top-k
|
||||
, last_insert_value := non_neg_integer()
|
||||
}.
|
||||
|
||||
-type timestamp() :: non_neg_integer().
|
||||
-type timespan() :: number().
|
||||
|
||||
-type latency_type() :: average
|
||||
| expire.
|
||||
|
||||
-type create_options() :: #{samples => pos_integer()}.
|
||||
|
||||
-export_type([stats/0, latency_type/0, create_options/0]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
%%--------------------------------------------------------------------
|
||||
-spec new(non_neg_integer() | create_options()) -> stats().
|
||||
new(SamplesT) when is_integer(SamplesT) ->
|
||||
Samples = erlang:max(1, SamplesT),
|
||||
#{ ema => emqx_moving_average:new(exponential, #{period => Samples})
|
||||
, last_update_time => 0
|
||||
, last_access_time => 0
|
||||
, last_insert_value => 0
|
||||
};
|
||||
|
||||
new(OptsT) ->
|
||||
Opts = maps:merge(#{samples => ?DEFAULT_SAMPLES}, OptsT),
|
||||
#{samples := Samples} = Opts,
|
||||
new(Samples).
|
||||
|
||||
-spec update(emqx_types:clientid(), number(), stats()) -> stats().
|
||||
update(ClientId, Val, #{ema := EMA} = Stats) ->
|
||||
Now = ?NOW,
|
||||
#{average := Latency} = EMA2 = emqx_moving_average:update(Val, EMA),
|
||||
Stats2 = call_hook(ClientId, Now, average, Latency, Stats),
|
||||
Stats2#{ ema := EMA2
|
||||
, last_update_time := ?NOW}.
|
||||
|
||||
-spec check_expire(emqx_types:clientid(), timestamp(), timespan(), stats()) -> stats().
|
||||
check_expire(_, Now, Interval, #{last_update_time := LUT} = S)
|
||||
when LUT >= Now - Interval ->
|
||||
S;
|
||||
|
||||
check_expire(ClientId, Now, _Interval, #{last_update_time := LUT} = S) ->
|
||||
Latency = Now - LUT,
|
||||
call_hook(ClientId, Now, expire, Latency, S).
|
||||
|
||||
-spec latency(stats()) -> number().
|
||||
latency(#{ema := #{average := Average}}) ->
|
||||
Average.
|
||||
|
||||
-spec update_threshold(pos_integer()) -> pos_integer().
|
||||
update_threshold(Threshold) ->
|
||||
Val = erlang:max(Threshold, ?MINIMUM_THRESHOLD),
|
||||
persistent_term:put(?THRESHOLD_KEY, Val),
|
||||
Val.
|
||||
|
||||
get_threshold() ->
|
||||
persistent_term:get(?THRESHOLD_KEY, ?DEFAULT_THRESHOLD).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
-spec call_hook(emqx_types:clientid(), timestamp(), latency_type(), timespan(), stats()) -> stats().
|
||||
call_hook(_, _, _, Latency, S)
|
||||
when Latency =< ?MINIMUM_THRESHOLD ->
|
||||
S;
|
||||
|
||||
call_hook(_, Now, _, _, #{last_access_time := LIT} = S)
|
||||
when Now =< LIT + ?MINIMUM_INSERT_INTERVAL ->
|
||||
S;
|
||||
|
||||
call_hook(ClientId, Now, Type, Latency, #{last_insert_value := LIV} = Stats) ->
|
||||
case Latency =< get_threshold() of
|
||||
true ->
|
||||
Stats#{last_access_time := Now};
|
||||
_ ->
|
||||
ToInsert = erlang:floor(Latency),
|
||||
Arg = #{clientid => ClientId,
|
||||
latency => ToInsert,
|
||||
type => Type,
|
||||
last_insert_value => LIV,
|
||||
update_time => Now},
|
||||
emqx:run_hook('message.slow_subs_stats', [Arg]),
|
||||
Stats#{last_insert_value := ToInsert,
|
||||
last_access_time := Now}
|
||||
end.
|
|
@ -0,0 +1,90 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @see https://en.wikipedia.org/wiki/Moving_average
|
||||
|
||||
-module(emqx_moving_average).
|
||||
|
||||
%% API
|
||||
-export([new/0, new/1, new/2, update/2]).
|
||||
|
||||
-type type() :: cumulative
|
||||
| exponential.
|
||||
|
||||
-type ema() :: #{ type := exponential
|
||||
, average := 0 | float()
|
||||
, coefficient := float()
|
||||
}.
|
||||
|
||||
-type cma() :: #{ type := cumulative
|
||||
, average := 0 | float()
|
||||
, count := non_neg_integer()
|
||||
}.
|
||||
|
||||
-type moving_average() :: ema()
|
||||
| cma().
|
||||
|
||||
-define(DEF_EMA_ARG, #{period => 10}).
|
||||
-define(DEF_AVG_TYPE, exponential).
|
||||
|
||||
-export_type([type/0, moving_average/0, ema/0, cma/0]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
%%--------------------------------------------------------------------
|
||||
-spec new() -> moving_average().
|
||||
new() ->
|
||||
new(?DEF_AVG_TYPE, #{}).
|
||||
|
||||
-spec new(type()) -> moving_average().
|
||||
new(Type) ->
|
||||
new(Type, #{}).
|
||||
|
||||
-spec new(type(), Args :: map()) -> moving_average().
|
||||
new(cumulative, _) ->
|
||||
#{ type => cumulative
|
||||
, average => 0
|
||||
, count => 0
|
||||
};
|
||||
|
||||
new(exponential, Arg) ->
|
||||
#{period := Period} = maps:merge(?DEF_EMA_ARG, Arg),
|
||||
#{ type => exponential
|
||||
, average => 0
|
||||
%% coefficient = 2/(N+1) is a common convention, see the wiki link for details
|
||||
, coefficient => 2 / (Period + 1)
|
||||
}.
|
||||
|
||||
-spec update(number(), moving_average()) -> moving_average().
|
||||
|
||||
update(Val, #{average := 0} = Avg) ->
|
||||
Avg#{average := Val};
|
||||
|
||||
update(Val, #{ type := cumulative
|
||||
, average := Average
|
||||
, count := Count} = CMA) ->
|
||||
NewCount = Count + 1,
|
||||
CMA#{average := (Count * Average + Val) / NewCount,
|
||||
count := NewCount};
|
||||
|
||||
update(Val, #{ type := exponential
|
||||
, average := Average
|
||||
, coefficient := Coefficient} = EMA) ->
|
||||
EMA#{average := Coefficient * Val + (1 - Coefficient) * Average}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
|
@ -68,12 +68,13 @@ init([]) ->
|
|||
SessionSup = child_spec(emqx_persistent_session_sup, supervisor),
|
||||
CMSup = child_spec(emqx_cm_sup, supervisor),
|
||||
SysSup = child_spec(emqx_sys_sup, supervisor),
|
||||
Limiter = child_spec(emqx_limiter_sup, supervisor),
|
||||
Children = [KernelSup] ++
|
||||
[SessionSup || emqx_persistent_session:is_store_enabled()] ++
|
||||
[RouterSup || emqx_boot:is_enabled(router)] ++
|
||||
[BrokerSup || emqx_boot:is_enabled(broker)] ++
|
||||
[CMSup || emqx_boot:is_enabled(broker)] ++
|
||||
[SysSup],
|
||||
[SysSup, Limiter],
|
||||
SupFlags = #{strategy => one_for_all,
|
||||
intensity => 0,
|
||||
period => 1
|
||||
|
|
|
@ -353,11 +353,12 @@ is_valid_pem_file(Path) ->
|
|||
%% @doc This is to return SSL file content in management APIs.
|
||||
file_content_as_options(undefined) -> undefined;
|
||||
file_content_as_options(#{<<"enable">> := false} = SSL) ->
|
||||
maps:without(?SSL_FILE_OPT_NAMES, SSL);
|
||||
{ok, maps:without(?SSL_FILE_OPT_NAMES, SSL)};
|
||||
file_content_as_options(#{<<"enable">> := true} = SSL) ->
|
||||
file_content_as_options(?SSL_FILE_OPT_NAMES, SSL).
|
||||
|
||||
file_content_as_options([], SSL) -> {ok, SSL};
|
||||
file_content_as_options([], SSL) ->
|
||||
{ok, SSL};
|
||||
file_content_as_options([Key | Keys], SSL) ->
|
||||
case maps:get(Key, SSL, undefined) of
|
||||
undefined -> file_content_as_options(Keys, SSL);
|
||||
|
|
|
@ -63,10 +63,6 @@
|
|||
sockstate :: emqx_types:sockstate(),
|
||||
%% MQTT Piggyback
|
||||
mqtt_piggyback :: single | multiple,
|
||||
%% Limiter
|
||||
limiter :: maybe(emqx_limiter:limiter()),
|
||||
%% Limit Timer
|
||||
limit_timer :: maybe(reference()),
|
||||
%% Parse State
|
||||
parse_state :: emqx_frame:parse_state(),
|
||||
%% Serialize options
|
||||
|
@ -86,10 +82,30 @@
|
|||
%% Zone name
|
||||
zone :: atom(),
|
||||
%% Listener Type and Name
|
||||
listener :: {Type::atom(), Name::atom()}
|
||||
}).
|
||||
listener :: {Type::atom(), Name::atom()},
|
||||
|
||||
%% Limiter
|
||||
limiter :: maybe(container()),
|
||||
|
||||
%% cache operation when overload
|
||||
limiter_cache :: queue:queue(cache()),
|
||||
|
||||
%% limiter timers
|
||||
limiter_timer :: undefined | reference()
|
||||
}).
|
||||
|
||||
-record(retry, { types :: list(limiter_type())
|
||||
, data :: any()
|
||||
, next :: check_succ_handler()
|
||||
}).
|
||||
|
||||
-record(cache, { need :: list({pos_integer(), limiter_type()})
|
||||
, data :: any()
|
||||
, next :: check_succ_handler()
|
||||
}).
|
||||
|
||||
-type(state() :: #state{}).
|
||||
-type cache() :: #cache{}.
|
||||
|
||||
-type(ws_cmd() :: {active, boolean()}|close).
|
||||
|
||||
|
@ -99,6 +115,8 @@
|
|||
-define(CONN_STATS, [recv_pkt, recv_msg, send_pkt, send_msg]).
|
||||
|
||||
-define(ENABLED(X), (X =/= undefined)).
|
||||
-define(LIMITER_BYTES_IN, bytes_in).
|
||||
-define(LIMITER_MESSAGE_IN, message_in).
|
||||
|
||||
-dialyzer({no_match, [info/2]}).
|
||||
-dialyzer({nowarn_function, [websocket_init/1]}).
|
||||
|
@ -126,7 +144,7 @@ info(sockname, #state{sockname = Sockname}) ->
|
|||
info(sockstate, #state{sockstate = SockSt}) ->
|
||||
SockSt;
|
||||
info(limiter, #state{limiter = Limiter}) ->
|
||||
maybe_apply(fun emqx_limiter:info/1, Limiter);
|
||||
Limiter;
|
||||
info(channel, #state{channel = Channel}) ->
|
||||
emqx_channel:info(Channel);
|
||||
info(gc_state, #state{gc_state = GcSt}) ->
|
||||
|
@ -242,7 +260,8 @@ check_origin_header(Req, #{listener := {Type, Listener}} = Opts) ->
|
|||
false -> ok
|
||||
end.
|
||||
|
||||
websocket_init([Req, #{zone := Zone, listener := {Type, Listener}} = Opts]) ->
|
||||
websocket_init([Req,
|
||||
#{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}} = Opts]) ->
|
||||
{Peername, Peercert} =
|
||||
case emqx_config:get_listener_conf(Type, Listener, [proxy_protocol]) andalso
|
||||
maps:get(proxy_header, Req) of
|
||||
|
@ -279,7 +298,7 @@ websocket_init([Req, #{zone := Zone, listener := {Type, Listener}} = Opts]) ->
|
|||
ws_cookie => WsCookie,
|
||||
conn_mod => ?MODULE
|
||||
},
|
||||
Limiter = emqx_limiter:init(Zone, undefined, undefined, []),
|
||||
Limiter = emqx_limiter_container:get_limiter_by_names([?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterCfg),
|
||||
MQTTPiggyback = get_ws_opts(Type, Listener, mqtt_piggyback),
|
||||
FrameOpts = #{
|
||||
strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]),
|
||||
|
@ -319,7 +338,9 @@ websocket_init([Req, #{zone := Zone, listener := {Type, Listener}} = Opts]) ->
|
|||
idle_timeout = IdleTimeout,
|
||||
idle_timer = IdleTimer,
|
||||
zone = Zone,
|
||||
listener = {Type, Listener}
|
||||
listener = {Type, Listener},
|
||||
limiter_timer = undefined,
|
||||
limiter_cache = queue:new()
|
||||
}, hibernate}.
|
||||
|
||||
websocket_handle({binary, Data}, State) when is_list(Data) ->
|
||||
|
@ -327,9 +348,17 @@ websocket_handle({binary, Data}, State) when is_list(Data) ->
|
|||
|
||||
websocket_handle({binary, Data}, State) ->
|
||||
?SLOG(debug, #{msg => "RECV_data", data => Data, transport => websocket}),
|
||||
ok = inc_recv_stats(1, iolist_size(Data)),
|
||||
NState = ensure_stats_timer(State),
|
||||
return(parse_incoming(Data, NState));
|
||||
State2 = ensure_stats_timer(State),
|
||||
{Packets, State3} = parse_incoming(Data, [], State2),
|
||||
LenMsg = erlang:length(Packets),
|
||||
ByteSize = erlang:iolist_size(Data),
|
||||
inc_recv_stats(LenMsg, ByteSize),
|
||||
State4 = check_limiter([{ByteSize, ?LIMITER_BYTES_IN}, {LenMsg, ?LIMITER_MESSAGE_IN}],
|
||||
Packets,
|
||||
fun when_msg_in/3,
|
||||
[],
|
||||
State3),
|
||||
return(State4);
|
||||
|
||||
%% Pings should be replied with pongs, cowboy does it automatically
|
||||
%% Pongs can be safely ignored. Clause here simply prevents crash.
|
||||
|
@ -343,7 +372,6 @@ websocket_handle({Frame, _}, State) ->
|
|||
%% TODO: should not close the ws connection
|
||||
?SLOG(error, #{msg => "unexpected_frame", frame => Frame}),
|
||||
shutdown(unexpected_ws_frame, State).
|
||||
|
||||
websocket_info({call, From, Req}, State) ->
|
||||
handle_call(From, Req, State);
|
||||
|
||||
|
@ -351,8 +379,7 @@ websocket_info({cast, rate_limit}, State) ->
|
|||
Stats = #{cnt => emqx_pd:reset_counter(incoming_pubs),
|
||||
oct => emqx_pd:reset_counter(incoming_bytes)
|
||||
},
|
||||
NState = postpone({check_gc, Stats}, State),
|
||||
return(ensure_rate_limit(Stats, NState));
|
||||
return(postpone({check_gc, Stats}, State));
|
||||
|
||||
websocket_info({cast, Msg}, State) ->
|
||||
handle_info(Msg, State);
|
||||
|
@ -377,12 +404,18 @@ websocket_info(Deliver = {deliver, _Topic, _Msg},
|
|||
Delivers = [Deliver|emqx_misc:drain_deliver(ActiveN)],
|
||||
with_channel(handle_deliver, [Delivers], State);
|
||||
|
||||
websocket_info({timeout, TRef, limit_timeout},
|
||||
State = #state{limit_timer = TRef}) ->
|
||||
NState = State#state{sockstate = running,
|
||||
limit_timer = undefined
|
||||
},
|
||||
return(enqueue({active, true}, NState));
|
||||
websocket_info({timeout, _, limit_timeout},
|
||||
State) ->
|
||||
return(retry_limiter(State));
|
||||
|
||||
websocket_info(check_cache, #state{limiter_cache = Cache} = State) ->
|
||||
case queue:peek(Cache) of
|
||||
empty ->
|
||||
return(enqueue({active, true}, State#state{sockstate = running}));
|
||||
{value, #cache{need = Needs, data = Data, next = Next}} ->
|
||||
State2 = State#state{limiter_cache = queue:drop(Cache)},
|
||||
return(check_limiter(Needs, Data, Next, [check_cache], State2))
|
||||
end;
|
||||
|
||||
websocket_info({timeout, TRef, Msg}, State) when is_reference(TRef) ->
|
||||
handle_timeout(TRef, Msg, State);
|
||||
|
@ -421,10 +454,9 @@ handle_call(From, stats, State) ->
|
|||
gen_server:reply(From, stats(State)),
|
||||
return(State);
|
||||
|
||||
handle_call(_From, {ratelimit, Policy}, State = #state{channel = Channel}) ->
|
||||
Zone = emqx_channel:info(zone, Channel),
|
||||
Limiter = emqx_limiter:init(Zone, Policy),
|
||||
{reply, ok, State#state{limiter = Limiter}};
|
||||
handle_call(_From, {ratelimit, Type, Bucket}, State = #state{limiter = Limiter}) ->
|
||||
Limiter2 = emqx_limiter_container:update_by_name(Type, Bucket, Limiter),
|
||||
{reply, ok, State#state{limiter = Limiter2}};
|
||||
|
||||
handle_call(From, Req, State = #state{channel = Channel}) ->
|
||||
case emqx_channel:handle_call(Req, Channel) of
|
||||
|
@ -495,21 +527,80 @@ handle_timeout(TRef, TMsg, State) ->
|
|||
%% Ensure rate limit
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
ensure_rate_limit(Stats, State = #state{limiter = Limiter}) ->
|
||||
case ?ENABLED(Limiter) andalso emqx_limiter:check(Stats, Limiter) of
|
||||
false -> State;
|
||||
{ok, Limiter1} ->
|
||||
State#state{limiter = Limiter1};
|
||||
{pause, Time, Limiter1} ->
|
||||
?SLOG(warning, #{msg => "pause_due_to_rate_limit", time => Time}),
|
||||
TRef = start_timer(Time, limit_timeout),
|
||||
NState = State#state{sockstate = blocked,
|
||||
limiter = Limiter1,
|
||||
limit_timer = TRef
|
||||
},
|
||||
enqueue({active, false}, NState)
|
||||
-type limiter_type() :: emqx_limiter_container:limiter_type().
|
||||
-type container() :: emqx_limiter_container:container().
|
||||
-type check_succ_handler() ::
|
||||
fun((any(), list(any()), state()) -> state()).
|
||||
|
||||
-spec check_limiter(list({pos_integer(), limiter_type()}),
|
||||
any(),
|
||||
check_succ_handler(),
|
||||
list(any()),
|
||||
state()) -> state().
|
||||
check_limiter(Needs,
|
||||
Data,
|
||||
WhenOk,
|
||||
Msgs,
|
||||
#state{limiter = Limiter,
|
||||
limiter_timer = LimiterTimer,
|
||||
limiter_cache = Cache} = State) ->
|
||||
case LimiterTimer of
|
||||
undefined ->
|
||||
case emqx_limiter_container:check_list(Needs, Limiter) of
|
||||
{ok, Limiter2} ->
|
||||
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
|
||||
{pause, Time, Limiter2} ->
|
||||
?SLOG(warning, #{msg => "pause time dueto rate limit",
|
||||
needs => Needs,
|
||||
time_in_ms => Time}),
|
||||
|
||||
Retry = #retry{types = [Type || {_, Type} <- Needs],
|
||||
data = Data,
|
||||
next = WhenOk},
|
||||
|
||||
Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2),
|
||||
|
||||
TRef = start_timer(Time, limit_timeout),
|
||||
|
||||
enqueue({active, false},
|
||||
State#state{sockstate = blocked,
|
||||
limiter = Limiter3,
|
||||
limiter_timer = TRef});
|
||||
{drop, Limiter2} ->
|
||||
{ok, State#state{limiter = Limiter2}}
|
||||
end;
|
||||
_ ->
|
||||
New = #cache{need = Needs, data = Data, next = WhenOk},
|
||||
State#state{limiter_cache = queue:in(New, Cache)}
|
||||
end.
|
||||
|
||||
|
||||
-spec retry_limiter(state()) -> state().
|
||||
retry_limiter(#state{limiter = Limiter} = State) ->
|
||||
#retry{types = Types, data = Data, next = Next} = emqx_limiter_container:get_retry_context(Limiter),
|
||||
case emqx_limiter_container:retry_list(Types, Limiter) of
|
||||
{ok, Limiter2} ->
|
||||
Next(Data,
|
||||
[check_cache],
|
||||
State#state{ limiter = Limiter2
|
||||
, limiter_timer = undefined
|
||||
});
|
||||
{pause, Time, Limiter2} ->
|
||||
?SLOG(warning, #{msg => "pause time dueto rate limit",
|
||||
types => Types,
|
||||
time_in_ms => Time}),
|
||||
|
||||
TRef = start_timer(Time, limit_timeout),
|
||||
|
||||
State#state{limiter = Limiter2, limiter_timer = TRef}
|
||||
end.
|
||||
|
||||
when_msg_in(Packets, [], State) ->
|
||||
postpone(Packets, State);
|
||||
|
||||
when_msg_in(Packets, Msgs, State) ->
|
||||
postpone(Packets, enqueue(Msgs, State)).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Run GC, Check OOM
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -538,16 +629,16 @@ check_oom(State = #state{channel = Channel}) ->
|
|||
%% Parse incoming data
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
parse_incoming(<<>>, State) ->
|
||||
State;
|
||||
parse_incoming(<<>>, Packets, State) ->
|
||||
{Packets, State};
|
||||
|
||||
parse_incoming(Data, State = #state{parse_state = ParseState}) ->
|
||||
parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
|
||||
try emqx_frame:parse(Data, ParseState) of
|
||||
{more, NParseState} ->
|
||||
State#state{parse_state = NParseState};
|
||||
{Packets, State#state{parse_state = NParseState}};
|
||||
{ok, Packet, Rest, NParseState} ->
|
||||
NState = State#state{parse_state = NParseState},
|
||||
parse_incoming(Rest, postpone({incoming, Packet}, NState))
|
||||
parse_incoming(Rest, [{incoming, Packet} | Packets], NState)
|
||||
catch
|
||||
throw : ?FRAME_PARSE_ERROR(Reason) ->
|
||||
?SLOG(info, #{ reason => Reason
|
||||
|
@ -555,7 +646,7 @@ parse_incoming(Data, State = #state{parse_state = ParseState}) ->
|
|||
, input_bytes => Data
|
||||
}),
|
||||
FrameError = {frame_error, Reason},
|
||||
postpone({incoming, FrameError}, State);
|
||||
{[{incoming, FrameError} | Packets], State};
|
||||
error : Reason : Stacktrace ->
|
||||
?SLOG(error, #{ at_state => emqx_frame:describe_state(ParseState)
|
||||
, input_bytes => Data
|
||||
|
@ -563,7 +654,7 @@ parse_incoming(Data, State = #state{parse_state = ParseState}) ->
|
|||
, stacktrace => Stacktrace
|
||||
}),
|
||||
FrameError = {frame_error, Reason},
|
||||
postpone({incoming, FrameError}, State)
|
||||
{[{incoming, FrameError} | Packets], State}
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
|
@ -24,7 +24,7 @@ namespace() -> zone.
|
|||
%% roots are added only for document generation.
|
||||
roots() -> ["mqtt", "stats", "flapping_detect", "force_shutdown",
|
||||
"conn_congestion", "rate_limit", "quota", "force_gc",
|
||||
"overload_protection"
|
||||
"overload_protection", "latency_stats"
|
||||
].
|
||||
|
||||
%% zone schemas are clones from the same name from root level
|
||||
|
|
|
@ -129,7 +129,8 @@ basic_conf() ->
|
|||
rpc => rpc_conf(),
|
||||
stats => stats_conf(),
|
||||
listeners => listeners_conf(),
|
||||
zones => zone_conf()
|
||||
zones => zone_conf(),
|
||||
emqx_limiter => emqx:get_config([emqx_limiter])
|
||||
}.
|
||||
|
||||
set_test_listener_confs() ->
|
||||
|
@ -178,14 +179,48 @@ end_per_suite(_Config) ->
|
|||
emqx_banned
|
||||
]).
|
||||
|
||||
init_per_testcase(_TestCase, Config) ->
|
||||
init_per_testcase(TestCase, Config) ->
|
||||
NewConf = set_test_listener_confs(),
|
||||
emqx_common_test_helpers:start_apps([]),
|
||||
modify_limiter(TestCase, NewConf),
|
||||
[{config, NewConf}|Config].
|
||||
|
||||
end_per_testcase(_TestCase, Config) ->
|
||||
emqx_config:put(?config(config, Config)),
|
||||
emqx_common_test_helpers:stop_apps([]),
|
||||
Config.
|
||||
|
||||
modify_limiter(TestCase, NewConf) ->
|
||||
Checks = [t_quota_qos0, t_quota_qos1, t_quota_qos2],
|
||||
case lists:member(TestCase, Checks) of
|
||||
true ->
|
||||
modify_limiter(NewConf);
|
||||
_ ->
|
||||
ok
|
||||
end.
|
||||
|
||||
%% per_client 5/1s,5
|
||||
%% aggregated 10/1s,10
|
||||
modify_limiter(#{emqx_limiter := Limiter} = NewConf) ->
|
||||
#{message_routing := #{bucket := Bucket} = Routing} = Limiter,
|
||||
#{default := #{per_client := Client} = Default} = Bucket,
|
||||
Client2 = Client#{rate := 5,
|
||||
initial := 0,
|
||||
capacity := 5,
|
||||
low_water_mark := 1},
|
||||
Default2 = Default#{per_client := Client2,
|
||||
aggregated := #{rate => 10,
|
||||
initial => 0,
|
||||
capacity => 10
|
||||
}},
|
||||
Bucket2 = Bucket#{default := Default2},
|
||||
Routing2 = Routing#{bucket := Bucket2},
|
||||
|
||||
NewConf2 = NewConf#{emqx_limiter := Limiter#{message_routing := Routing2}},
|
||||
emqx_config:put(NewConf2),
|
||||
emqx_limiter_manager:restart_server(message_routing),
|
||||
ok.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Test cases for channel info/stats/caps
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -547,6 +582,7 @@ t_quota_qos0(_) ->
|
|||
{ok, Chann1} = emqx_channel:handle_in(Pub, Chann),
|
||||
{ok, Chann2} = emqx_channel:handle_in(Pub, Chann1),
|
||||
M1 = emqx_metrics:val('packets.publish.dropped') - 1,
|
||||
timer:sleep(1000),
|
||||
{ok, Chann3} = emqx_channel:handle_timeout(ref, expire_quota_limit, Chann2),
|
||||
{ok, _} = emqx_channel:handle_in(Pub, Chann3),
|
||||
M1 = emqx_metrics:val('packets.publish.dropped') - 1,
|
||||
|
@ -718,7 +754,7 @@ t_handle_call_takeover_end(_) ->
|
|||
|
||||
t_handle_call_quota(_) ->
|
||||
{reply, ok, _Chan} = emqx_channel:handle_call(
|
||||
{quota, [{conn_messages_routing, {100,1}}]},
|
||||
{quota, default},
|
||||
channel()
|
||||
).
|
||||
|
||||
|
@ -886,7 +922,7 @@ t_ws_cookie_init(_) ->
|
|||
conn_mod => emqx_ws_connection,
|
||||
ws_cookie => WsCookie
|
||||
},
|
||||
Channel = emqx_channel:init(ConnInfo, #{zone => default, listener => {tcp, default}}),
|
||||
Channel = emqx_channel:init(ConnInfo, #{zone => default, limiter => limiter_cfg(), listener => {tcp, default}}),
|
||||
?assertMatch(#{ws_cookie := WsCookie}, emqx_channel:info(clientinfo, Channel)).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -911,7 +947,7 @@ channel(InitFields) ->
|
|||
maps:fold(fun(Field, Value, Channel) ->
|
||||
emqx_channel:set_field(Field, Value, Channel)
|
||||
end,
|
||||
emqx_channel:init(ConnInfo, #{zone => default, listener => {tcp, default}}),
|
||||
emqx_channel:init(ConnInfo, #{zone => default, limiter => limiter_cfg(), listener => {tcp, default}}),
|
||||
maps:merge(#{clientinfo => clientinfo(),
|
||||
session => session(),
|
||||
conn_state => connected
|
||||
|
@ -957,5 +993,6 @@ session(InitFields) when is_map(InitFields) ->
|
|||
|
||||
%% conn: 5/s; overall: 10/s
|
||||
quota() ->
|
||||
emqx_limiter:init(zone, [{conn_messages_routing, {5, 1}},
|
||||
{overall_messages_routing, {10, 1}}]).
|
||||
emqx_limiter_container:get_limiter_by_names([message_routing], limiter_cfg()).
|
||||
|
||||
limiter_cfg() -> #{}.
|
||||
|
|
|
@ -120,6 +120,7 @@ all(Suite) ->
|
|||
string:substr(atom_to_list(F), 1, 2) == "t_"
|
||||
]).
|
||||
|
||||
%% set emqx app boot modules
|
||||
-spec(boot_modules(all|list(atom())) -> ok).
|
||||
boot_modules(Mods) ->
|
||||
application:set_env(emqx, boot_modules, Mods).
|
||||
|
@ -134,6 +135,7 @@ start_apps(Apps, Handler) when is_function(Handler) ->
|
|||
%% Because, minirest, ekka etc.. application will scan these modules
|
||||
lists:foreach(fun load/1, [emqx | Apps]),
|
||||
ekka:start(),
|
||||
ok = emqx_ratelimiter_SUITE:base_conf(),
|
||||
lists:foreach(fun(App) -> start_app(App, Handler) end, [emqx | Apps]).
|
||||
|
||||
load(App) ->
|
||||
|
@ -161,8 +163,7 @@ app_schema(App) ->
|
|||
mustache_vars(App) ->
|
||||
[{platform_data_dir, app_path(App, "data")},
|
||||
{platform_etc_dir, app_path(App, "etc")},
|
||||
{platform_log_dir, app_path(App, "log")},
|
||||
{platform_plugins_dir, app_path(App, "plugins")}
|
||||
{platform_log_dir, app_path(App, "log")}
|
||||
].
|
||||
|
||||
start_app(App, Schema, ConfigFile, SpecAppConfig) ->
|
||||
|
|
|
@ -39,7 +39,7 @@ init_per_suite(Config) ->
|
|||
ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end),
|
||||
ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end),
|
||||
%% Meck Limiter
|
||||
ok = meck:new(emqx_limiter, [passthrough, no_history, no_link]),
|
||||
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
|
||||
%% Meck Pd
|
||||
ok = meck:new(emqx_pd, [passthrough, no_history, no_link]),
|
||||
%% Meck Metrics
|
||||
|
@ -60,17 +60,19 @@ init_per_suite(Config) ->
|
|||
ok = meck:expect(emqx_alarm, deactivate, fun(_, _) -> ok end),
|
||||
|
||||
emqx_channel_SUITE:set_test_listener_confs(),
|
||||
emqx_common_test_helpers:start_apps([]),
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
ok = meck:unload(emqx_transport),
|
||||
catch meck:unload(emqx_channel),
|
||||
ok = meck:unload(emqx_cm),
|
||||
ok = meck:unload(emqx_limiter),
|
||||
ok = meck:unload(emqx_htb_limiter),
|
||||
ok = meck:unload(emqx_pd),
|
||||
ok = meck:unload(emqx_metrics),
|
||||
ok = meck:unload(emqx_hooks),
|
||||
ok = meck:unload(emqx_alarm),
|
||||
emqx_common_test_helpers:stop_apps([]),
|
||||
ok.
|
||||
|
||||
init_per_testcase(TestCase, Config) when
|
||||
|
@ -129,8 +131,9 @@ t_info(_) ->
|
|||
socktype := tcp}, SockInfo).
|
||||
|
||||
t_info_limiter(_) ->
|
||||
St = st(#{limiter => emqx_limiter:init(default, [])}),
|
||||
?assertEqual(undefined, emqx_connection:info(limiter, St)).
|
||||
Limiter = init_limiter(),
|
||||
St = st(#{limiter => Limiter}),
|
||||
?assertEqual(Limiter, emqx_connection:info(limiter, St)).
|
||||
|
||||
t_stats(_) ->
|
||||
CPid = spawn(fun() ->
|
||||
|
@ -250,24 +253,22 @@ t_handle_msg_shutdown(_) ->
|
|||
?assertMatch({stop, {shutdown, for_testing}, _St}, handle_msg({shutdown, for_testing}, st())).
|
||||
|
||||
t_handle_call(_) ->
|
||||
St = st(),
|
||||
St = st(#{limiter => init_limiter()}),
|
||||
?assertMatch({ok, _St}, handle_msg({event, undefined}, St)),
|
||||
?assertMatch({reply, _Info, _NSt}, handle_call(self(), info, St)),
|
||||
?assertMatch({reply, _Stats, _NSt}, handle_call(self(), stats, St)),
|
||||
?assertMatch({reply, ok, _NSt}, handle_call(self(), {ratelimit, []}, St)),
|
||||
?assertMatch({reply, ok, _NSt},
|
||||
handle_call(self(), {ratelimit, [{conn_messages_in, {100, 1}}]}, St)),
|
||||
handle_call(self(), {ratelimit, [{bytes_in, default}]}, St)),
|
||||
?assertEqual({reply, ignored, St}, handle_call(self(), for_testing, St)),
|
||||
?assertMatch({stop, {shutdown,kicked}, ok, _NSt},
|
||||
handle_call(self(), kick, St)).
|
||||
|
||||
t_handle_timeout(_) ->
|
||||
TRef = make_ref(),
|
||||
State = st(#{idle_timer => TRef, limit_timer => TRef, stats_timer => TRef}),
|
||||
State = st(#{idle_timer => TRef, stats_timer => TRef, limiter => init_limiter()}),
|
||||
?assertMatch({stop, {shutdown,idle_timeout}, _NState},
|
||||
emqx_connection:handle_timeout(TRef, idle_timeout, State)),
|
||||
?assertMatch({ok, {event,running}, _NState},
|
||||
emqx_connection:handle_timeout(TRef, limit_timeout, State)),
|
||||
?assertMatch({ok, _NState},
|
||||
emqx_connection:handle_timeout(TRef, emit_stats, State)),
|
||||
?assertMatch({ok, _NState},
|
||||
|
@ -279,13 +280,15 @@ t_handle_timeout(_) ->
|
|||
?assertMatch({ok, _NState}, emqx_connection:handle_timeout(TRef, undefined, State)).
|
||||
|
||||
t_parse_incoming(_) ->
|
||||
?assertMatch({ok, [], _NState}, emqx_connection:parse_incoming(<<>>, st())),
|
||||
?assertMatch({[], _NState}, emqx_connection:parse_incoming(<<>>, [], st())),
|
||||
?assertMatch({[], _NState}, emqx_connection:parse_incoming(<<"for_testing">>, [], st())).
|
||||
|
||||
t_next_incoming_msgs(_) ->
|
||||
?assertEqual({incoming, packet}, emqx_connection:next_incoming_msgs([packet])),
|
||||
?assertEqual([{incoming, packet2}, {incoming, packet1}],
|
||||
emqx_connection:next_incoming_msgs([packet1, packet2])).
|
||||
State = st(#{}),
|
||||
?assertEqual({ok, [{incoming, packet}], State},
|
||||
emqx_connection:next_incoming_msgs([packet], [], State)),
|
||||
?assertEqual({ok, [{incoming, packet2}, {incoming, packet1}], State},
|
||||
emqx_connection:next_incoming_msgs([packet1, packet2], [], State)).
|
||||
|
||||
t_handle_incoming(_) ->
|
||||
?assertMatch({ok, _Out, _NState},
|
||||
|
@ -331,26 +334,28 @@ t_handle_info(_) ->
|
|||
?assertMatch({ok, _NState}, emqx_connection:handle_info(for_testing, st())).
|
||||
|
||||
t_ensure_rate_limit(_) ->
|
||||
State = emqx_connection:ensure_rate_limit(#{}, st(#{limiter => undefined})),
|
||||
WhenOk = fun emqx_connection:next_incoming_msgs/3,
|
||||
{ok, [], State} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => undefined})),
|
||||
?assertEqual(undefined, emqx_connection:info(limiter, State)),
|
||||
|
||||
ok = meck:expect(emqx_limiter, check,
|
||||
fun(_, _) -> {ok, emqx_limiter:init(default, [])} end),
|
||||
State1 = emqx_connection:ensure_rate_limit(#{}, st(#{limiter => #{}})),
|
||||
?assertEqual(undefined, emqx_connection:info(limiter, State1)),
|
||||
Limiter = init_limiter(),
|
||||
{ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})),
|
||||
?assertEqual(Limiter, emqx_connection:info(limiter, State1)),
|
||||
|
||||
ok = meck:expect(emqx_limiter, check,
|
||||
fun(_, _) -> {pause, 3000, emqx_limiter:init(default, [])} end),
|
||||
State2 = emqx_connection:ensure_rate_limit(#{}, st(#{limiter => #{}})),
|
||||
?assertEqual(undefined, emqx_connection:info(limiter, State2)),
|
||||
?assertEqual(blocked, emqx_connection:info(sockstate, State2)).
|
||||
ok = meck:expect(emqx_htb_limiter, check,
|
||||
fun(_, Client) -> {pause, 3000, undefined, Client} end),
|
||||
{ok, State2} = emqx_connection:check_limiter([{1000, bytes_in}], [], WhenOk, [], st(#{limiter => Limiter})),
|
||||
meck:unload(emqx_htb_limiter),
|
||||
ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]),
|
||||
?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)).
|
||||
|
||||
t_activate_socket(_) ->
|
||||
State = st(),
|
||||
Limiter = init_limiter(),
|
||||
State = st(#{limiter => Limiter}),
|
||||
{ok, NStats} = emqx_connection:activate_socket(State),
|
||||
?assertEqual(running, emqx_connection:info(sockstate, NStats)),
|
||||
|
||||
State1 = st(#{sockstate => blocked}),
|
||||
State1 = st(#{sockstate => blocked, limiter_timer => any_timer}),
|
||||
?assertEqual({ok, State1}, emqx_connection:activate_socket(State1)),
|
||||
|
||||
State2 = st(#{sockstate => closed}),
|
||||
|
@ -458,7 +463,10 @@ with_conn(TestFun, Opts) when is_map(Opts) ->
|
|||
TrapExit = maps:get(trap_exit, Opts, false),
|
||||
process_flag(trap_exit, TrapExit),
|
||||
{ok, CPid} = emqx_connection:start_link(emqx_transport, sock,
|
||||
maps:merge(Opts, #{zone => default, listener => {tcp, default}})),
|
||||
maps:merge(Opts,
|
||||
#{zone => default,
|
||||
limiter => limiter_cfg(),
|
||||
listener => {tcp, default}})),
|
||||
TestFun(CPid),
|
||||
TrapExit orelse emqx_connection:stop(CPid),
|
||||
ok.
|
||||
|
@ -481,7 +489,8 @@ st(InitFields) when is_map(InitFields) ->
|
|||
st(InitFields, #{}).
|
||||
st(InitFields, ChannelFields) when is_map(InitFields) ->
|
||||
St = emqx_connection:init_state(emqx_transport, sock, #{zone => default,
|
||||
listener => {tcp, default}}),
|
||||
limiter => limiter_cfg(),
|
||||
listener => {tcp, default}}),
|
||||
maps:fold(fun(N, V, S) -> emqx_connection:set_field(N, V, S) end,
|
||||
emqx_connection:set_field(channel, channel(ChannelFields), St),
|
||||
InitFields
|
||||
|
@ -515,7 +524,7 @@ channel(InitFields) ->
|
|||
maps:fold(fun(Field, Value, Channel) ->
|
||||
emqx_channel:set_field(Field, Value, Channel)
|
||||
end,
|
||||
emqx_channel:init(ConnInfo, #{zone => default, listener => {tcp, default}}),
|
||||
emqx_channel:init(ConnInfo, #{zone => default, limiter => limiter_cfg(), listener => {tcp, default}}),
|
||||
maps:merge(#{clientinfo => ClientInfo,
|
||||
session => Session,
|
||||
conn_state => connected
|
||||
|
@ -524,3 +533,8 @@ channel(InitFields) ->
|
|||
handle_msg(Msg, St) -> emqx_connection:handle_msg(Msg, St).
|
||||
|
||||
handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St).
|
||||
|
||||
limiter_cfg() -> #{}.
|
||||
|
||||
init_limiter() ->
|
||||
emqx_limiter_container:get_limiter_by_names([bytes_in, message_in], limiter_cfg()).
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
|
||||
-define(CERTS_PATH(CertName), filename:join(["../../lib/emqx/etc/certs/", CertName])).
|
||||
|
||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
|
@ -43,19 +45,34 @@ init_per_testcase(Case, Config)
|
|||
{ok, _} = emqx_config_handler:start_link(),
|
||||
PrevListeners = emqx_config:get([listeners, tcp], #{}),
|
||||
PrevRateLimit = emqx_config:get([rate_limit], #{}),
|
||||
emqx_config:put([listeners, tcp], #{ listener_test =>
|
||||
#{ bind => {"127.0.0.1", 9999}
|
||||
, max_connections => 4321
|
||||
}
|
||||
}),
|
||||
emqx_config:put(
|
||||
[listeners, tcp],
|
||||
#{listener_test => #{bind => {"127.0.0.1", 9999},
|
||||
max_connections => 4321,
|
||||
limiter => #{}
|
||||
}
|
||||
}),
|
||||
emqx_config:put([rate_limit], #{max_conn_rate => 1000}),
|
||||
ListenerConf = #{ bind => {"127.0.0.1", 9999}
|
||||
},
|
||||
ok = emqx_listeners:start(),
|
||||
[ {listener_conf, ListenerConf}
|
||||
, {prev_listener_conf, PrevListeners}
|
||||
[ {prev_listener_conf, PrevListeners}
|
||||
, {prev_rate_limit_conf, PrevRateLimit}
|
||||
| Config];
|
||||
init_per_testcase(t_wss_conn, Config) ->
|
||||
{ok, _} = emqx_config_handler:start_link(),
|
||||
PrevListeners = emqx_config:get([listeners, wss], #{}),
|
||||
emqx_config:put(
|
||||
[listeners, wss],
|
||||
#{listener_test => #{bind => {{127,0,0,1}, 9998},
|
||||
limiter => #{},
|
||||
ssl => #{cacertfile => ?CERTS_PATH("cacert.pem"),
|
||||
certfile => ?CERTS_PATH("cert.pem"),
|
||||
keyfile => ?CERTS_PATH("key.pem")
|
||||
}
|
||||
}
|
||||
}),
|
||||
ok = emqx_listeners:start(),
|
||||
[ {prev_listener_conf, PrevListeners}
|
||||
| Config];
|
||||
init_per_testcase(_, Config) ->
|
||||
{ok, _} = emqx_config_handler:start_link(),
|
||||
Config.
|
||||
|
@ -69,6 +86,12 @@ end_per_testcase(Case, Config)
|
|||
emqx_listeners:stop(),
|
||||
_ = emqx_config_handler:stop(),
|
||||
ok;
|
||||
end_per_testcase(t_wss_conn, Config) ->
|
||||
PrevListener = ?config(prev_listener_conf, Config),
|
||||
emqx_config:put([listeners, wss], PrevListener),
|
||||
emqx_listeners:stop(),
|
||||
_ = emqx_config_handler:stop(),
|
||||
ok;
|
||||
end_per_testcase(_, _Config) ->
|
||||
_ = emqx_config_handler:stop(),
|
||||
ok.
|
||||
|
@ -92,6 +115,10 @@ t_max_conns_tcp(_) ->
|
|||
t_current_conns_tcp(_) ->
|
||||
?assertEqual(0, emqx_listeners:current_conns('tcp:listener_test', {{127,0,0,1}, 9999})).
|
||||
|
||||
t_wss_conn(_) ->
|
||||
{ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000),
|
||||
ok = ssl:close(Socket).
|
||||
|
||||
render_config_file() ->
|
||||
Path = local_path(["etc", "emqx.conf"]),
|
||||
{ok, Temp} = file:read_file(Path),
|
||||
|
@ -105,8 +132,7 @@ render_config_file() ->
|
|||
mustache_vars() ->
|
||||
[{platform_data_dir, local_path(["data"])},
|
||||
{platform_etc_dir, local_path(["etc"])},
|
||||
{platform_log_dir, local_path(["log"])},
|
||||
{platform_plugins_dir, local_path(["plugins"])}
|
||||
{platform_log_dir, local_path(["log"])}
|
||||
].
|
||||
|
||||
generate_config() ->
|
||||
|
@ -117,10 +143,6 @@ generate_config() ->
|
|||
set_app_env({App, Lists}) ->
|
||||
lists:foreach(fun({authz_file, _Var}) ->
|
||||
application:set_env(App, authz_file, local_path(["etc", "authz.conf"]));
|
||||
({plugins_loaded_file, _Var}) ->
|
||||
application:set_env(App,
|
||||
plugins_loaded_file,
|
||||
local_path(["test", "emqx_SUITE_data","loaded_plugins"]));
|
||||
({Par, Var}) ->
|
||||
application:set_env(App, Par, Var)
|
||||
end, Lists).
|
||||
|
|
|
@ -19,13 +19,85 @@
|
|||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
all() -> [t_hash].
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
groups() ->
|
||||
[].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
{ok, _} = application:ensure_all_started(bcrypt),
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
ok.
|
||||
|
||||
t_hash_data(_) ->
|
||||
Password = <<"password">>,
|
||||
Password = emqx_passwd:hash_data(plain, Password),
|
||||
|
||||
<<"5f4dcc3b5aa765d61d8327deb882cf99">>
|
||||
= emqx_passwd:hash_data(md5, Password),
|
||||
|
||||
<<"5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8">>
|
||||
= emqx_passwd:hash_data(sha, Password),
|
||||
|
||||
<<"5e884898da28047151d0e56f8dc6292773603d0d6aabbdd62a11ef721d1542d8">>
|
||||
= emqx_passwd:hash_data(sha256, Password),
|
||||
|
||||
Sha512 = iolist_to_binary(
|
||||
[<<"b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb9">>,
|
||||
<<"80b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86">>]),
|
||||
|
||||
Sha512 = emqx_passwd:hash_data(sha512, Password).
|
||||
|
||||
t_hash(_) ->
|
||||
Password = <<"password">>, Salt = <<"salt">>,
|
||||
_ = emqx_passwd:hash(plain, Password),
|
||||
_ = emqx_passwd:hash(md5, Password),
|
||||
_ = emqx_passwd:hash(sha, Password),
|
||||
_ = emqx_passwd:hash(sha256, Password),
|
||||
_ = emqx_passwd:hash(bcrypt, {Salt, Password}),
|
||||
_ = emqx_passwd:hash(pbkdf2, {Salt, Password, sha256, 1000, 20}).
|
||||
Password = <<"password">>,
|
||||
Salt = <<"salt">>,
|
||||
WrongPassword = <<"wrongpass">>,
|
||||
|
||||
Md5 = <<"67a1e09bb1f83f5007dc119c14d663aa">>,
|
||||
Md5 = emqx_passwd:hash({md5, Salt, prefix}, Password),
|
||||
true = emqx_passwd:check_pass({md5, Salt, prefix}, Md5, Password),
|
||||
false = emqx_passwd:check_pass({md5, Salt, prefix}, Md5, WrongPassword),
|
||||
|
||||
Sha = <<"59b3e8d637cf97edbe2384cf59cb7453dfe30789">>,
|
||||
Sha = emqx_passwd:hash({sha, Salt, prefix}, Password),
|
||||
true = emqx_passwd:check_pass({sha, Salt, prefix}, Sha, Password),
|
||||
false = emqx_passwd:check_pass({sha, Salt, prefix}, Sha, WrongPassword),
|
||||
|
||||
Sha256 = <<"7a37b85c8918eac19a9089c0fa5a2ab4dce3f90528dcdeec108b23ddf3607b99">>,
|
||||
Sha256 = emqx_passwd:hash({sha256, Salt, suffix}, Password),
|
||||
true = emqx_passwd:check_pass({sha256, Salt, suffix}, Sha256, Password),
|
||||
false = emqx_passwd:check_pass({sha256, Salt, suffix}, Sha256, WrongPassword),
|
||||
|
||||
Sha512 = iolist_to_binary(
|
||||
[<<"fa6a2185b3e0a9a85ef41ffb67ef3c1fb6f74980f8ebf970e4e72e353ed9537d">>,
|
||||
<<"593083c201dfd6e43e1c8a7aac2bc8dbb119c7dfb7d4b8f131111395bd70e97f">>]),
|
||||
Sha512 = emqx_passwd:hash({sha512, Salt, suffix}, Password),
|
||||
true = emqx_passwd:check_pass({sha512, Salt, suffix}, Sha512, Password),
|
||||
false = emqx_passwd:check_pass({sha512, Salt, suffix}, Sha512, WrongPassword),
|
||||
|
||||
BcryptSalt = <<"$2b$12$wtY3h20mUjjmeaClpqZVve">>,
|
||||
Bcrypt = <<"$2b$12$wtY3h20mUjjmeaClpqZVvehyw7F.V78F3rbK2xDkCzRTMi6pmfUB6">>,
|
||||
Bcrypt = emqx_passwd:hash({bcrypt, BcryptSalt}, Password),
|
||||
true = emqx_passwd:check_pass({bcrypt, Bcrypt}, Bcrypt, Password),
|
||||
false = emqx_passwd:check_pass({bcrypt, Bcrypt}, Bcrypt, WrongPassword),
|
||||
false = emqx_passwd:check_pass({bcrypt, <<>>}, <<>>, WrongPassword),
|
||||
|
||||
%% Invalid salt, bcrypt fails
|
||||
?assertException(error, _, emqx_passwd:hash({bcrypt, Salt}, Password)),
|
||||
|
||||
BadDKlen = 1 bsl 32,
|
||||
Pbkdf2Salt = <<"ATHENA.MIT.EDUraeburn">>,
|
||||
Pbkdf2 = <<"01dbee7f4a9e243e988b62c73cda935d"
|
||||
"a05378b93244ec8f48a99e61ad799d86">>,
|
||||
Pbkdf2 = emqx_passwd:hash({pbkdf2, sha, Pbkdf2Salt, 2, 32}, Password),
|
||||
true = emqx_passwd:check_pass({pbkdf2, sha, Pbkdf2Salt, 2, 32}, Pbkdf2, Password),
|
||||
false = emqx_passwd:check_pass({pbkdf2, sha, Pbkdf2Salt, 2, 32}, Pbkdf2, WrongPassword),
|
||||
false = emqx_passwd:check_pass({pbkdf2, sha, Pbkdf2Salt, 2, BadDKlen}, Pbkdf2, Password),
|
||||
|
||||
%% Invalid derived_length, pbkdf2 fails
|
||||
?assertException(error, _, emqx_passwd:hash({pbkdf2, sha, Pbkdf2Salt, 2, BadDKlen}, Password)).
|
||||
|
|
|
@ -160,9 +160,6 @@ init_per_group(gc_tests, Config) ->
|
|||
init_per_suite(Config) ->
|
||||
Config.
|
||||
|
||||
set_special_confs(emqx) ->
|
||||
Path = emqx_common_test_helpers:deps_path(emqx, "test/emqx_SUITE_data/loaded_plugins"),
|
||||
application:set_env(emqx, plugins_loaded_file, Path);
|
||||
set_special_confs(_) ->
|
||||
ok.
|
||||
|
||||
|
|
|
@ -100,6 +100,7 @@ clientinfo() ->
|
|||
%% See emqx_session:session() type define
|
||||
sessioninfo() ->
|
||||
?LET(Session, {session,
|
||||
clientid(),
|
||||
sessionid(), % id
|
||||
boolean(), % is_persistent
|
||||
subscriptions(), % subscriptions
|
||||
|
@ -112,7 +113,8 @@ sessioninfo() ->
|
|||
awaiting_rel(), % awaiting_rel
|
||||
non_neg_integer(), % max_awaiting_rel
|
||||
safty_timeout(), % await_rel_timeout
|
||||
timestamp() % created_at
|
||||
timestamp(), % created_at
|
||||
latency_stats()
|
||||
},
|
||||
emqx_session:info(Session)).
|
||||
|
||||
|
@ -336,6 +338,30 @@ normal_topic_filter() ->
|
|||
end
|
||||
end).
|
||||
|
||||
%% Type defined emqx_message_lantency_stats.erl - stats()
|
||||
latency_stats() ->
|
||||
Keys = [{threshold, number()},
|
||||
{ema, exp_moving_average()},
|
||||
{last_update_time, non_neg_integer()},
|
||||
{last_access_time, non_neg_integer()},
|
||||
{last_insert_value, non_neg_integer()}
|
||||
],
|
||||
?LET({Ks, M}, {Keys, map(limited_atom(), limited_any_term())},
|
||||
begin
|
||||
maps:merge(maps:from_list(Ks), M)
|
||||
end).
|
||||
|
||||
%% Type defined emqx_moving_average.erl - ema()
|
||||
exp_moving_average() ->
|
||||
Keys = [{type, exponential},
|
||||
{average, number()},
|
||||
{coefficient, float()}
|
||||
],
|
||||
?LET({Ks, M}, {Keys, map(limited_atom(), limited_any_term())},
|
||||
begin
|
||||
maps:merge(maps:from_list(Ks), M)
|
||||
end).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Basic Types
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
|
@ -0,0 +1,659 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_ratelimiter_SUITE).
|
||||
|
||||
-compile(export_all).
|
||||
-compile(nowarn_export_all).
|
||||
|
||||
-define(APP, emqx).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
|
||||
-define(BASE_CONF, <<"""
|
||||
emqx_limiter {
|
||||
bytes_in {
|
||||
global.rate = infinity
|
||||
zone.default.rate = infinity
|
||||
bucket.default {
|
||||
zone = default
|
||||
aggregated.rate = infinity
|
||||
aggregated.capacity = infinity
|
||||
per_client.rate = \"100MB/1s\"
|
||||
per_client.capacity = infinity
|
||||
}
|
||||
}
|
||||
|
||||
message_in {
|
||||
global.rate = infinity
|
||||
zone.default.rate = infinity
|
||||
bucket.default {
|
||||
zone = default
|
||||
aggregated.rate = infinity
|
||||
aggregated.capacity = infinity
|
||||
per_client.rate = infinity
|
||||
per_client.capacity = infinity
|
||||
}
|
||||
}
|
||||
|
||||
connection {
|
||||
global.rate = infinity
|
||||
zone.default.rate = infinity
|
||||
bucket.default {
|
||||
zone = default
|
||||
aggregated.rate = infinity
|
||||
aggregated.capacity = infinity
|
||||
per_client.rate = infinity
|
||||
per_client.capacity = infinity
|
||||
}
|
||||
}
|
||||
|
||||
message_routing {
|
||||
global.rate = infinity
|
||||
zone.default.rate = infinity
|
||||
bucket.default {
|
||||
zone = default
|
||||
aggregated.rate = infinity
|
||||
aggregated.capacity = infinity
|
||||
per_client.rate = infinity
|
||||
per_client.capacity = infinity
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
""">>).
|
||||
|
||||
-record(client, { counter :: counters:counter_ref()
|
||||
, start :: pos_integer()
|
||||
, endtime :: pos_integer()
|
||||
, obtained :: pos_integer()
|
||||
, rate :: float()
|
||||
, client :: emqx_htb_limiter:client()
|
||||
}).
|
||||
|
||||
-define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)).
|
||||
-define(RATE(Rate), to_rate(Rate)).
|
||||
-define(NOW, erlang:system_time(millisecond)).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Setups
|
||||
%%--------------------------------------------------------------------
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
ok = emqx_config:init_load(emqx_limiter_schema, ?BASE_CONF),
|
||||
emqx_common_test_helpers:start_apps([?APP]),
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
emqx_common_test_helpers:stop_apps([?APP]).
|
||||
|
||||
init_per_testcase(_TestCase, Config) ->
|
||||
Config.
|
||||
|
||||
base_conf() ->
|
||||
emqx_config:init_load(emqx_limiter_schema, ?BASE_CONF).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Test Cases Bucket Level
|
||||
%%--------------------------------------------------------------------
|
||||
t_max_retry_time(_) ->
|
||||
Cfg = fun(Cfg) ->
|
||||
Cfg#{rate := 1,
|
||||
capacity := 1,
|
||||
max_retry_time := 500,
|
||||
failure_strategy := drop}
|
||||
end,
|
||||
Case = fun() ->
|
||||
Client = connect(default),
|
||||
Begin = ?NOW,
|
||||
Result = emqx_htb_limiter:consume(101, Client),
|
||||
?assertMatch({drop, _}, Result),
|
||||
Time = ?NOW - Begin,
|
||||
?assert(Time >= 500 andalso Time < 550)
|
||||
end,
|
||||
with_per_client(default, Cfg, Case).
|
||||
|
||||
t_divisible(_) ->
|
||||
Cfg = fun(Cfg) ->
|
||||
Cfg#{divisible := true,
|
||||
rate := ?RATE("1000/1s"),
|
||||
initial := 600,
|
||||
capacity := 600}
|
||||
end,
|
||||
Case = fun() ->
|
||||
Client = connect(default),
|
||||
Result = emqx_htb_limiter:check(1000, Client),
|
||||
?assertMatch({partial,
|
||||
400,
|
||||
#{continuation := _,
|
||||
diff := 400,
|
||||
start := _,
|
||||
need := 1000},
|
||||
_}, Result)
|
||||
end,
|
||||
with_per_client(default, Cfg, Case).
|
||||
|
||||
t_low_water_mark(_) ->
|
||||
Cfg = fun(Cfg) ->
|
||||
Cfg#{low_water_mark := 400,
|
||||
rate := ?RATE("1000/1s"),
|
||||
initial := 1000,
|
||||
capacity := 1000}
|
||||
end,
|
||||
Case = fun() ->
|
||||
Client = connect(default),
|
||||
Result = emqx_htb_limiter:check(500, Client),
|
||||
?assertMatch({ok, _}, Result),
|
||||
{_, Client2} = Result,
|
||||
Result2 = emqx_htb_limiter:check(101, Client2),
|
||||
?assertMatch({pause,
|
||||
_,
|
||||
#{continuation := undefined,
|
||||
diff := 0},
|
||||
_}, Result2)
|
||||
end,
|
||||
with_per_client(default, Cfg, Case).
|
||||
|
||||
t_infinity_client(_) ->
|
||||
Fun = fun(#{aggregated := Aggr, per_client := Cli} = Bucket) ->
|
||||
Aggr2 = Aggr#{rate := infinity,
|
||||
capacity := infinity},
|
||||
Cli2 = Cli#{rate := infinity, capacity := infinity},
|
||||
Bucket#{aggregated := Aggr2,
|
||||
per_client := Cli2}
|
||||
end,
|
||||
Case = fun() ->
|
||||
Client = connect(default),
|
||||
?assertEqual(infinity, Client),
|
||||
Result = emqx_htb_limiter:check(100000, Client),
|
||||
?assertEqual({ok, Client}, Result)
|
||||
end,
|
||||
with_bucket(default, Fun, Case).
|
||||
|
||||
t_short_board(_) ->
|
||||
Fun = fun(#{aggregated := Aggr, per_client := Cli} = Bucket) ->
|
||||
Aggr2 = Aggr#{rate := ?RATE("100/1s"),
|
||||
initial := 0,
|
||||
capacity := 100},
|
||||
Cli2 = Cli#{rate := ?RATE("600/1s"),
|
||||
capacity := 600,
|
||||
initial := 600},
|
||||
Bucket#{aggregated := Aggr2,
|
||||
per_client := Cli2}
|
||||
end,
|
||||
Case = fun() ->
|
||||
Counter = counters:new(1, [write_concurrency]),
|
||||
start_client(default, ?NOW + 2000, Counter, 20),
|
||||
timer:sleep(2100),
|
||||
check_average_rate(Counter, 2, 100, 20)
|
||||
end,
|
||||
with_bucket(default, Fun, Case).
|
||||
|
||||
t_rate(_) ->
|
||||
Fun = fun(#{aggregated := Aggr, per_client := Cli} = Bucket) ->
|
||||
Aggr2 = Aggr#{rate := ?RATE("100/100ms"),
|
||||
initial := 0,
|
||||
capacity := infinity},
|
||||
Cli2 = Cli#{rate := infinity,
|
||||
capacity := infinity,
|
||||
initial := 0},
|
||||
Bucket#{aggregated := Aggr2,
|
||||
per_client := Cli2}
|
||||
end,
|
||||
Case = fun() ->
|
||||
Client = connect(default),
|
||||
Ts1 = erlang:system_time(millisecond),
|
||||
C1 = emqx_htb_limiter:available(Client),
|
||||
timer:sleep(1000),
|
||||
Ts2 = erlang:system_time(millisecond),
|
||||
C2 = emqx_htb_limiter:available(Client),
|
||||
ShouldInc = floor((Ts2 - Ts1) / 100) * 100,
|
||||
Inc = C2 - C1,
|
||||
?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate")
|
||||
end,
|
||||
with_bucket(default, Fun, Case).
|
||||
|
||||
t_capacity(_) ->
|
||||
Capacity = 600,
|
||||
Fun = fun(#{aggregated := Aggr, per_client := Cli} = Bucket) ->
|
||||
Aggr2 = Aggr#{rate := ?RATE("100/100ms"),
|
||||
initial := 0,
|
||||
capacity := 600},
|
||||
Cli2 = Cli#{rate := infinity,
|
||||
capacity := infinity,
|
||||
initial := 0},
|
||||
Bucket#{aggregated := Aggr2,
|
||||
per_client := Cli2}
|
||||
end,
|
||||
Case = fun() ->
|
||||
Client = connect(default),
|
||||
timer:sleep(1000),
|
||||
C1 = emqx_htb_limiter:available(Client),
|
||||
?assertEqual(Capacity, C1, "test bucket capacity")
|
||||
end,
|
||||
with_bucket(default, Fun, Case).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Test Cases Zone Level
|
||||
%%--------------------------------------------------------------------
|
||||
t_limit_zone_with_unlimit_bucket(_) ->
|
||||
ZoneMod = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("600/1s"),
|
||||
burst := ?RATE("60/1s")}
|
||||
end,
|
||||
|
||||
Bucket = fun(#{aggregated := Aggr, per_client := Cli} = Bucket) ->
|
||||
Aggr2 = Aggr#{rate := infinity,
|
||||
initial := 0,
|
||||
capacity := infinity},
|
||||
Cli2 = Cli#{rate := infinity,
|
||||
initial := 0,
|
||||
capacity := infinity,
|
||||
divisible := true},
|
||||
Bucket#{aggregated := Aggr2, per_client := Cli2}
|
||||
end,
|
||||
|
||||
Case = fun() ->
|
||||
C1 = counters:new(1, [write_concurrency]),
|
||||
start_client(b1, ?NOW + 2000, C1, 20),
|
||||
timer:sleep(2100),
|
||||
check_average_rate(C1, 2, 600, 1000)
|
||||
end,
|
||||
|
||||
with_zone(default, ZoneMod, [{b1, Bucket}], Case).
|
||||
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Test Cases Global Level
|
||||
%%--------------------------------------------------------------------
|
||||
t_burst_and_fairness(_) ->
|
||||
GlobalMod = fun(Cfg) ->
|
||||
Cfg#{burst := ?RATE("60/1s")}
|
||||
end,
|
||||
|
||||
ZoneMod = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("600/1s"),
|
||||
burst := ?RATE("60/1s")}
|
||||
end,
|
||||
|
||||
Bucket = fun(#{aggregated := Aggr, per_client := Cli} = Bucket) ->
|
||||
Aggr2 = Aggr#{rate := ?RATE("500/1s"),
|
||||
initial := 0,
|
||||
capacity := 500},
|
||||
Cli2 = Cli#{rate := ?RATE("600/1s"),
|
||||
capacity := 600,
|
||||
initial := 600},
|
||||
Bucket#{aggregated := Aggr2,
|
||||
per_client := Cli2}
|
||||
end,
|
||||
|
||||
Case = fun() ->
|
||||
C1 = counters:new(1, [write_concurrency]),
|
||||
C2 = counters:new(1, [write_concurrency]),
|
||||
start_client(b1, ?NOW + 2000, C1, 20),
|
||||
start_client(b2, ?NOW + 2000, C2, 30),
|
||||
timer:sleep(2100),
|
||||
check_average_rate(C1, 2, 330, 25),
|
||||
check_average_rate(C2, 2, 330, 25)
|
||||
end,
|
||||
|
||||
with_global(GlobalMod,
|
||||
default,
|
||||
ZoneMod,
|
||||
[{b1, Bucket}, {b2, Bucket}],
|
||||
Case).
|
||||
|
||||
t_limit_global_with_unlimit_other(_) ->
|
||||
GlobalMod = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("600/1s")}
|
||||
end,
|
||||
|
||||
ZoneMod = fun(Cfg) -> Cfg#{rate := infinity} end,
|
||||
|
||||
Bucket = fun(#{aggregated := Aggr, per_client := Cli} = Bucket) ->
|
||||
Aggr2 = Aggr#{rate := infinity,
|
||||
initial := 0,
|
||||
capacity := infinity},
|
||||
Cli2 = Cli#{rate := infinity,
|
||||
capacity := infinity,
|
||||
initial := 0},
|
||||
Bucket#{aggregated := Aggr2,
|
||||
per_client := Cli2}
|
||||
end,
|
||||
|
||||
Case = fun() ->
|
||||
C1 = counters:new(1, [write_concurrency]),
|
||||
start_client(b1, ?NOW + 2000, C1, 20),
|
||||
timer:sleep(2100),
|
||||
check_average_rate(C1, 2, 600, 100)
|
||||
end,
|
||||
|
||||
with_global(GlobalMod,
|
||||
default,
|
||||
ZoneMod,
|
||||
[{b1, Bucket}],
|
||||
Case).
|
||||
|
||||
t_multi_zones(_) ->
|
||||
GlobalMod = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("600/1s")}
|
||||
end,
|
||||
|
||||
Zone1 = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("400/1s")}
|
||||
end,
|
||||
|
||||
Zone2 = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("500/1s")}
|
||||
end,
|
||||
|
||||
Bucket = fun(Zone, Rate) ->
|
||||
fun(#{aggregated := Aggr, per_client := Cli} = Bucket) ->
|
||||
Aggr2 = Aggr#{rate := infinity,
|
||||
initial := 0,
|
||||
capacity := infinity},
|
||||
Cli2 = Cli#{rate := Rate,
|
||||
capacity := infinity,
|
||||
initial := 0},
|
||||
Bucket#{aggregated := Aggr2,
|
||||
per_client := Cli2,
|
||||
zone := Zone}
|
||||
end
|
||||
end,
|
||||
|
||||
Case = fun() ->
|
||||
C1 = counters:new(1, [write_concurrency]),
|
||||
C2 = counters:new(1, [write_concurrency]),
|
||||
start_client(b1, ?NOW + 2000, C1, 25),
|
||||
start_client(b2, ?NOW + 2000, C2, 20),
|
||||
timer:sleep(2100),
|
||||
check_average_rate(C1, 2, 300, 25),
|
||||
check_average_rate(C2, 2, 300, 25)
|
||||
end,
|
||||
|
||||
with_global(GlobalMod,
|
||||
[z1, z2],
|
||||
[Zone1, Zone2],
|
||||
[{b1, Bucket(z1, ?RATE("400/1s"))}, {b2, Bucket(z2, ?RATE("500/1s"))}],
|
||||
Case).
|
||||
|
||||
%% because the simulated client will try to reach the maximum rate
|
||||
%% when divisiable = true, a large number of divided tokens will be generated
|
||||
%% so this is not an accurate test
|
||||
t_multi_zones_with_divisible(_) ->
|
||||
GlobalMod = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("600/1s")}
|
||||
end,
|
||||
|
||||
Zone1 = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("400/1s")}
|
||||
end,
|
||||
|
||||
Zone2 = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("500/1s")}
|
||||
end,
|
||||
|
||||
Bucket = fun(Zone, Rate) ->
|
||||
fun(#{aggregated := Aggr, per_client := Cli} = Bucket) ->
|
||||
Aggr2 = Aggr#{rate := Rate,
|
||||
initial := 0,
|
||||
capacity := infinity},
|
||||
Cli2 = Cli#{rate := Rate,
|
||||
divisible := true,
|
||||
capacity := infinity,
|
||||
initial := 0},
|
||||
Bucket#{aggregated := Aggr2,
|
||||
per_client := Cli2,
|
||||
zone := Zone}
|
||||
end
|
||||
end,
|
||||
|
||||
Case = fun() ->
|
||||
C1 = counters:new(1, [write_concurrency]),
|
||||
C2 = counters:new(1, [write_concurrency]),
|
||||
start_client(b1, ?NOW + 2000, C1, 25),
|
||||
start_client(b2, ?NOW + 2000, C2, 20),
|
||||
timer:sleep(2100),
|
||||
check_average_rate(C1, 2, 300, 120),
|
||||
check_average_rate(C2, 2, 300, 120)
|
||||
end,
|
||||
|
||||
with_global(GlobalMod,
|
||||
[z1, z2],
|
||||
[Zone1, Zone2],
|
||||
[{b1, Bucket(z1, ?RATE("400/1s"))}, {b2, Bucket(z2, ?RATE("500/1s"))}],
|
||||
Case).
|
||||
|
||||
t_zone_hunger_and_fair(_) ->
|
||||
GlobalMod = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("600/1s")}
|
||||
end,
|
||||
|
||||
Zone1 = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("600/1s")}
|
||||
end,
|
||||
|
||||
Zone2 = fun(Cfg) ->
|
||||
Cfg#{rate := ?RATE("50/1s")}
|
||||
end,
|
||||
|
||||
Bucket = fun(Zone, Rate) ->
|
||||
fun(#{aggregated := Aggr, per_client := Cli} = Bucket) ->
|
||||
Aggr2 = Aggr#{rate := infinity,
|
||||
initial := 0,
|
||||
capacity := infinity},
|
||||
Cli2 = Cli#{rate := Rate,
|
||||
capacity := infinity,
|
||||
initial := 0},
|
||||
Bucket#{aggregated := Aggr2,
|
||||
per_client := Cli2,
|
||||
zone := Zone}
|
||||
end
|
||||
end,
|
||||
|
||||
Case = fun() ->
|
||||
C1 = counters:new(1, [write_concurrency]),
|
||||
C2 = counters:new(1, [write_concurrency]),
|
||||
start_client(b1, ?NOW + 2000, C1, 20),
|
||||
start_client(b2, ?NOW + 2000, C2, 20),
|
||||
timer:sleep(2100),
|
||||
check_average_rate(C1, 2, 550, 25),
|
||||
check_average_rate(C2, 2, 50, 25)
|
||||
end,
|
||||
|
||||
with_global(GlobalMod,
|
||||
[z1, z2],
|
||||
[Zone1, Zone2],
|
||||
[{b1, Bucket(z1, ?RATE("600/1s"))}, {b2, Bucket(z2, ?RATE("50/1s"))}],
|
||||
Case).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
start_client(Name, EndTime, Counter, Number) ->
|
||||
lists:foreach(fun(_) ->
|
||||
spawn(fun() ->
|
||||
start_client(Name, EndTime, Counter)
|
||||
end)
|
||||
end,
|
||||
lists:seq(1, Number)).
|
||||
|
||||
start_client(Name, EndTime, Counter) ->
|
||||
#{per_client := PerClient} =
|
||||
emqx_config:get([emqx_limiter, message_routing, bucket, Name]),
|
||||
#{rate := Rate} = PerClient,
|
||||
Client = #client{start = ?NOW,
|
||||
endtime = EndTime,
|
||||
counter = Counter,
|
||||
obtained = 0,
|
||||
rate = Rate,
|
||||
client = connect(Name)
|
||||
},
|
||||
client_loop(Client).
|
||||
|
||||
%% the simulated client will try to reach the configured rate as much as possible
|
||||
%% note this client will not considered the capacity, so must make sure rate < capacity
|
||||
client_loop(#client{start = Start,
|
||||
endtime = EndTime,
|
||||
obtained = Obtained,
|
||||
rate = Rate} = State) ->
|
||||
Now = ?NOW,
|
||||
Period = emqx_limiter_schema:minimum_period(),
|
||||
MinPeriod = erlang:ceil(0.25 * Period),
|
||||
if Now >= EndTime ->
|
||||
stop;
|
||||
Now - Start < MinPeriod ->
|
||||
timer:sleep(client_random_val(MinPeriod)),
|
||||
client_loop(State);
|
||||
Obtained =< 0 ->
|
||||
Rand = client_random_val(Rate),
|
||||
client_try_check(Rand, State);
|
||||
true ->
|
||||
Span = Now - Start,
|
||||
CurrRate = Obtained * Period / Span,
|
||||
if CurrRate < Rate ->
|
||||
Rand = client_random_val(Rate),
|
||||
client_try_check(Rand, State);
|
||||
true ->
|
||||
LeftTime = EndTime - Now,
|
||||
CanSleep = erlang:min(LeftTime, client_random_val(MinPeriod div 2)),
|
||||
timer:sleep(CanSleep),
|
||||
client_loop(State)
|
||||
end
|
||||
end.
|
||||
|
||||
client_try_check(Need, #client{counter = Counter,
|
||||
endtime = EndTime,
|
||||
obtained = Obtained,
|
||||
client = Client} = State) ->
|
||||
case emqx_htb_limiter:check(Need, Client) of
|
||||
{ok, Client2} ->
|
||||
case Need of
|
||||
#{need := Val} -> ok;
|
||||
Val -> ok
|
||||
end,
|
||||
counters:add(Counter, 1, Val),
|
||||
client_loop(State#client{obtained = Obtained + Val, client = Client2});
|
||||
{_, Pause, Retry, Client2} ->
|
||||
LeftTime = EndTime - ?NOW,
|
||||
if LeftTime =< 0 ->
|
||||
stop;
|
||||
true ->
|
||||
timer:sleep(erlang:min(Pause, LeftTime)),
|
||||
client_try_check(Retry, State#client{client = Client2})
|
||||
end
|
||||
end.
|
||||
|
||||
|
||||
%% XXX not a god test, because client's rate maybe bigger than global rate
|
||||
%% so if client' rate = infinity
|
||||
%% client's divisible should be true or capacity must be bigger than number of each comsume
|
||||
client_random_val(infinity) ->
|
||||
1000;
|
||||
|
||||
%% random in 0.5Range ~ 1Range
|
||||
client_random_val(Range) ->
|
||||
Half = erlang:floor(Range) div 2,
|
||||
Rand = rand:uniform(Half + 1) + Half,
|
||||
erlang:max(1, Rand).
|
||||
|
||||
to_rate(Str) ->
|
||||
{ok, Rate} = emqx_limiter_schema:to_rate(Str),
|
||||
Rate.
|
||||
|
||||
with_global(Modifier, ZoneName, ZoneModifier, Buckets, Case) ->
|
||||
Path = [emqx_limiter, message_routing],
|
||||
#{global := Global} = Cfg = emqx_config:get(Path),
|
||||
Cfg2 = Cfg#{global := Modifier(Global)},
|
||||
with_zone(Cfg2, ZoneName, ZoneModifier, Buckets, Case).
|
||||
|
||||
with_zone(Name, Modifier, Buckets, Case) ->
|
||||
Path = [emqx_limiter, message_routing],
|
||||
Cfg = emqx_config:get(Path),
|
||||
with_zone(Cfg, Name, Modifier, Buckets, Case).
|
||||
|
||||
with_zone(Cfg, Name, Modifier, Buckets, Case) ->
|
||||
Path = [emqx_limiter, message_routing],
|
||||
#{zone := ZoneCfgs,
|
||||
bucket := BucketCfgs} = Cfg,
|
||||
ZoneCfgs2 = apply_modifier(Name, Modifier, ZoneCfgs),
|
||||
BucketCfgs2 = apply_modifier(Buckets, BucketCfgs),
|
||||
Cfg2 = Cfg#{zone := ZoneCfgs2, bucket := BucketCfgs2},
|
||||
with_config(Path, fun(_) -> Cfg2 end, Case).
|
||||
|
||||
with_bucket(Bucket, Modifier, Case) ->
|
||||
Path = [emqx_limiter, message_routing, bucket, Bucket],
|
||||
with_config(Path, Modifier, Case).
|
||||
|
||||
with_per_client(Bucket, Modifier, Case) ->
|
||||
Path = [emqx_limiter, message_routing, bucket, Bucket, per_client],
|
||||
with_config(Path, Modifier, Case).
|
||||
|
||||
with_config(Path, Modifier, Case) ->
|
||||
Cfg = emqx_config:get(Path),
|
||||
NewCfg = Modifier(Cfg),
|
||||
ct:pal("test with config:~p~n", [NewCfg]),
|
||||
emqx_config:put(Path, NewCfg),
|
||||
emqx_limiter_manager:restart_server(message_routing),
|
||||
timer:sleep(100),
|
||||
DelayReturn
|
||||
= try
|
||||
Return = Case(),
|
||||
fun() -> Return end
|
||||
catch Type:Reason:Trace ->
|
||||
fun() -> erlang:raise(Type, Reason, Trace) end
|
||||
end,
|
||||
emqx_config:put(Path, Cfg),
|
||||
DelayReturn().
|
||||
|
||||
connect(Name) ->
|
||||
emqx_limiter_server:connect(message_routing, Name).
|
||||
|
||||
check_average_rate(Counter, Second, Rate, Margin) ->
|
||||
Cost = counters:get(Counter, 1),
|
||||
PerSec = Cost / Second,
|
||||
?LOGT(">>>> Cost:~p PerSec:~p Rate:~p ~n", [Cost, PerSec, Rate]),
|
||||
?assert(in_range(PerSec, Rate - Margin, Rate + Margin)).
|
||||
|
||||
print_average_rate(Counter, Second) ->
|
||||
Cost = counters:get(Counter, 1),
|
||||
PerSec = Cost / Second,
|
||||
ct:pal(">>>> Cost:~p PerSec:~p ~n", [Cost, PerSec]).
|
||||
|
||||
in_range(Val, Min, _Max) when Val < Min ->
|
||||
ct:pal("Val:~p smaller than min bound:~p~n", [Val, Min]),
|
||||
false;
|
||||
in_range(Val, _Min, Max) when Val > Max->
|
||||
ct:pal("Val:~p bigger than max bound:~p~n", [Val, Max]),
|
||||
false;
|
||||
in_range(_, _, _) ->
|
||||
true.
|
||||
|
||||
apply_modifier(Name, Modifier, Cfg) when is_list(Name) ->
|
||||
Pairs = lists:zip(Name, Modifier),
|
||||
apply_modifier(Pairs, Cfg);
|
||||
|
||||
apply_modifier(Name, Modifier, #{default := Template} = Cfg) ->
|
||||
Cfg#{Name => Modifier(Template)}.
|
||||
|
||||
apply_modifier(Pairs, #{default := Template}) ->
|
||||
Fun = fun({N, M}, Acc) ->
|
||||
Acc#{N => M(Template)}
|
||||
end,
|
||||
lists:foldl(Fun, #{}, Pairs).
|
|
@ -24,6 +24,9 @@
|
|||
|
||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
-define(NOW, erlang:system_time(millisecond)).
|
||||
-record(pubrel_await, {timestamp :: non_neg_integer()}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% CT callbacks
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -181,7 +184,7 @@ t_puback_with_dequeue(_) ->
|
|||
?assertEqual(<<"t2">>, emqx_message:topic(Msg3)).
|
||||
|
||||
t_puback_error_packet_id_in_use(_) ->
|
||||
Inflight = emqx_inflight:insert(1, {pubrel, ts(millisecond)}, emqx_inflight:new()),
|
||||
Inflight = emqx_inflight:insert(1, {#pubrel_await{timestamp = ?NOW}, ts(millisecond)}, emqx_inflight:new()),
|
||||
{error, ?RC_PACKET_IDENTIFIER_IN_USE} =
|
||||
emqx_session:puback(1, session(#{inflight => Inflight})).
|
||||
|
||||
|
@ -193,10 +196,10 @@ t_pubrec(_) ->
|
|||
Inflight = emqx_inflight:insert(2, {Msg, ts(millisecond)}, emqx_inflight:new()),
|
||||
Session = session(#{inflight => Inflight}),
|
||||
{ok, Msg, Session1} = emqx_session:pubrec(2, Session),
|
||||
?assertMatch([{pubrel, _}], emqx_inflight:values(emqx_session:info(inflight, Session1))).
|
||||
?assertMatch([{{pubrel_await, _}, _}], emqx_inflight:values(emqx_session:info(inflight, Session1))).
|
||||
|
||||
t_pubrec_packet_id_in_use_error(_) ->
|
||||
Inflight = emqx_inflight:insert(1, {pubrel, ts(millisecond)}, emqx_inflight:new()),
|
||||
Inflight = emqx_inflight:insert(1, {#pubrel_await{timestamp = ?NOW}, ts(millisecond)}, emqx_inflight:new()),
|
||||
{error, ?RC_PACKET_IDENTIFIER_IN_USE} =
|
||||
emqx_session:pubrec(1, session(#{inflight => Inflight})).
|
||||
|
||||
|
@ -212,7 +215,7 @@ t_pubrel_error_packetid_not_found(_) ->
|
|||
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} = emqx_session:pubrel(1, session()).
|
||||
|
||||
t_pubcomp(_) ->
|
||||
Inflight = emqx_inflight:insert(1, {pubrel, ts(millisecond)}, emqx_inflight:new()),
|
||||
Inflight = emqx_inflight:insert(1, {#pubrel_await{timestamp = ?NOW}, ts(millisecond)}, emqx_inflight:new()),
|
||||
Session = session(#{inflight => Inflight}),
|
||||
{ok, Session1} = emqx_session:pubcomp(1, Session),
|
||||
?assertEqual(0, emqx_session:info(inflight_cnt, Session1)).
|
||||
|
@ -261,7 +264,7 @@ t_deliver_qos0(_) ->
|
|||
t_deliver_qos1(_) ->
|
||||
ok = meck:expect(emqx_broker, subscribe, fun(_, _, _) -> ok end),
|
||||
{ok, Session} = emqx_session:subscribe(
|
||||
clientinfo(), <<"t1">>, subopts(#{qos => ?QOS_1}), session()),
|
||||
clientinfo(), <<"t1">>, subopts(#{qos => ?QOS_1}), session()),
|
||||
Delivers = [delivery(?QOS_1, T) || T <- [<<"t1">>, <<"t2">>]],
|
||||
{ok, [{1, Msg1}, {2, Msg2}], Session1} = emqx_session:deliver(Delivers, Session),
|
||||
?assertEqual(2, emqx_session:info(inflight_cnt, Session1)),
|
||||
|
@ -399,4 +402,3 @@ ts(second) ->
|
|||
erlang:system_time(second);
|
||||
ts(millisecond) ->
|
||||
erlang:system_time(millisecond).
|
||||
|
||||
|
|
|
@ -105,6 +105,15 @@ end_per_testcase(_, Config) ->
|
|||
emqx_common_test_helpers:stop_apps([]),
|
||||
Config.
|
||||
|
||||
init_per_suite(Config) ->
|
||||
emqx_channel_SUITE:set_test_listener_confs(),
|
||||
emqx_common_test_helpers:start_apps([]),
|
||||
Config.
|
||||
|
||||
end_per_suite(_) ->
|
||||
emqx_common_test_helpers:stop_apps([]),
|
||||
ok.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Test Cases
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -131,7 +140,9 @@ t_header(_) ->
|
|||
(<<"x-forwarded-port">>, _, _) -> <<"1000">> end),
|
||||
set_ws_opts(proxy_address_header, <<"x-forwarded-for">>),
|
||||
set_ws_opts(proxy_port_header, <<"x-forwarded-port">>),
|
||||
{ok, St, _} = ?ws_conn:websocket_init([req, #{zone => default, listener => {ws, default}}]),
|
||||
{ok, St, _} = ?ws_conn:websocket_init([req, #{zone => default,
|
||||
limiter => limiter_cfg(),
|
||||
listener => {ws, default}}]),
|
||||
WsPid = spawn(fun() ->
|
||||
receive {call, From, info} ->
|
||||
gen_server:reply(From, ?ws_conn:info(St))
|
||||
|
@ -143,8 +154,9 @@ t_header(_) ->
|
|||
} = SockInfo.
|
||||
|
||||
t_info_limiter(_) ->
|
||||
St = st(#{limiter => emqx_limiter:init(external, [])}),
|
||||
?assertEqual(undefined, ?ws_conn:info(limiter, St)).
|
||||
Limiter = init_limiter(),
|
||||
St = st(#{limiter => Limiter}),
|
||||
?assertEqual(Limiter, ?ws_conn:info(limiter, St)).
|
||||
|
||||
t_info_channel(_) ->
|
||||
#{conn_state := connected} = ?ws_conn:info(channel, st()).
|
||||
|
@ -249,7 +261,7 @@ t_ws_non_check_origin(_) ->
|
|||
headers => [{<<"origin">>, <<"http://localhost:18080">>}]})).
|
||||
|
||||
t_init(_) ->
|
||||
Opts = #{listener => {ws, default}, zone => default},
|
||||
Opts = #{listener => {ws, default}, zone => default, limiter => limiter_cfg()},
|
||||
ok = meck:expect(cowboy_req, parse_header, fun(_, req) -> undefined end),
|
||||
ok = meck:expect(cowboy_req, reply, fun(_, Req) -> Req end),
|
||||
{ok, req, _} = ?ws_conn:init(req, Opts),
|
||||
|
@ -329,8 +341,11 @@ t_websocket_info_deliver(_) ->
|
|||
|
||||
t_websocket_info_timeout_limiter(_) ->
|
||||
Ref = make_ref(),
|
||||
LimiterT = init_limiter(),
|
||||
Next = fun emqx_ws_connection:when_msg_in/3,
|
||||
Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT),
|
||||
Event = {timeout, Ref, limit_timeout},
|
||||
{[{active, true}], St} = websocket_info(Event, st(#{limit_timer => Ref})),
|
||||
{ok, St} = websocket_info(Event, st(#{limiter => Limiter})),
|
||||
?assertEqual([], ?ws_conn:info(postponed, St)).
|
||||
|
||||
t_websocket_info_timeout_keepalive(_) ->
|
||||
|
@ -389,23 +404,27 @@ t_handle_timeout_emit_stats(_) ->
|
|||
?assertEqual(undefined, ?ws_conn:info(stats_timer, St)).
|
||||
|
||||
t_ensure_rate_limit(_) ->
|
||||
Limiter = emqx_limiter:init(external, {1, 10}, {100, 1000}, []),
|
||||
Limiter = init_limiter(),
|
||||
St = st(#{limiter => Limiter}),
|
||||
St1 = ?ws_conn:ensure_rate_limit(#{cnt => 0, oct => 0}, St),
|
||||
St2 = ?ws_conn:ensure_rate_limit(#{cnt => 11, oct => 1200}, St1),
|
||||
?assertEqual(blocked, ?ws_conn:info(sockstate, St2)),
|
||||
?assertEqual([{active, false}], ?ws_conn:info(postponed, St2)).
|
||||
{ok, Need} = emqx_limiter_schema:to_capacity("1GB"), %% must bigger than value in emqx_ratelimit_SUITE
|
||||
St1 = ?ws_conn:check_limiter([{Need, bytes_in}],
|
||||
[],
|
||||
fun(_, _, S) -> S end,
|
||||
[],
|
||||
St),
|
||||
?assertEqual(blocked, ?ws_conn:info(sockstate, St1)),
|
||||
?assertEqual([{active, false}], ?ws_conn:info(postponed, St1)).
|
||||
|
||||
t_parse_incoming(_) ->
|
||||
St = ?ws_conn:parse_incoming(<<48,3>>, st()),
|
||||
St1 = ?ws_conn:parse_incoming(<<0,1,116>>, St),
|
||||
{Packets, St} = ?ws_conn:parse_incoming(<<48,3>>, [], st()),
|
||||
{Packets1, _} = ?ws_conn:parse_incoming(<<0,1,116>>, Packets, St),
|
||||
Packet = ?PUBLISH_PACKET(?QOS_0, <<"t">>, undefined, <<>>),
|
||||
?assertMatch([{incoming, Packet}], ?ws_conn:info(postponed, St1)).
|
||||
?assertMatch([{incoming, Packet}], Packets1).
|
||||
|
||||
t_parse_incoming_frame_error(_) ->
|
||||
St = ?ws_conn:parse_incoming(<<3,2,1,0>>, st()),
|
||||
{Packets, _St} = ?ws_conn:parse_incoming(<<3,2,1,0>>, [], st()),
|
||||
FrameError = {frame_error, function_clause},
|
||||
[{incoming, FrameError}] = ?ws_conn:info(postponed, St).
|
||||
[{incoming, FrameError}] = Packets.
|
||||
|
||||
t_handle_incomming_frame_error(_) ->
|
||||
FrameError = {frame_error, bad_qos},
|
||||
|
@ -440,7 +459,9 @@ t_shutdown(_) ->
|
|||
|
||||
st() -> st(#{}).
|
||||
st(InitFields) when is_map(InitFields) ->
|
||||
{ok, St, _} = ?ws_conn:websocket_init([req, #{zone => default, listener => {ws, default}}]),
|
||||
{ok, St, _} = ?ws_conn:websocket_init([req, #{zone => default,
|
||||
listener => {ws, default},
|
||||
limiter => limiter_cfg()}]),
|
||||
maps:fold(fun(N, V, S) -> ?ws_conn:set_field(N, V, S) end,
|
||||
?ws_conn:set_field(channel, channel(), St),
|
||||
InitFields
|
||||
|
@ -474,7 +495,9 @@ channel(InitFields) ->
|
|||
maps:fold(fun(Field, Value, Channel) ->
|
||||
emqx_channel:set_field(Field, Value, Channel)
|
||||
end,
|
||||
emqx_channel:init(ConnInfo, #{zone => default, listener => {ws, default}}),
|
||||
emqx_channel:init(ConnInfo, #{zone => default,
|
||||
listener => {ws, default},
|
||||
limiter => limiter_cfg()}),
|
||||
maps:merge(#{clientinfo => ClientInfo,
|
||||
session => Session,
|
||||
conn_state => connected
|
||||
|
@ -533,3 +556,8 @@ ws_client(State) ->
|
|||
after 1000 ->
|
||||
ct:fail(ws_timeout)
|
||||
end.
|
||||
|
||||
limiter_cfg() -> #{}.
|
||||
|
||||
init_limiter() ->
|
||||
emqx_limiter_container:get_limiter_by_names([bytes_in, message_in], limiter_cfg()).
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_authn_password_hashing).
|
||||
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
|
||||
-type(simple_algorithm_name() :: plain | md5 | sha | sha256 | sha512).
|
||||
-type(salt_position() :: prefix | suffix).
|
||||
|
||||
-type(simple_algorithm() :: #{name := simple_algorithm_name(),
|
||||
salt_position := salt_position()}).
|
||||
|
||||
-type(bcrypt_algorithm() :: #{name := bcrypt}).
|
||||
-type(bcrypt_algorithm_rw() :: #{name := bcrypt, salt_rounds := integer()}).
|
||||
|
||||
-type(pbkdf2_algorithm() :: #{name := pbkdf2,
|
||||
mac_fun := emqx_passwd:pbkdf2_mac_fun(),
|
||||
iterations := pos_integer()}).
|
||||
|
||||
-type(algorithm() :: simple_algorithm() | pbkdf2_algorithm() | bcrypt_algorithm()).
|
||||
-type(algorithm_rw() :: simple_algorithm() | pbkdf2_algorithm() | bcrypt_algorithm_rw()).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Hocon Schema
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
-behaviour(hocon_schema).
|
||||
|
||||
-export([roots/0,
|
||||
fields/1]).
|
||||
|
||||
-export([type_ro/1,
|
||||
type_rw/1]).
|
||||
|
||||
-export([init/1,
|
||||
gen_salt/1,
|
||||
hash/2,
|
||||
check_password/4]).
|
||||
|
||||
roots() -> [pbkdf2, bcrypt, bcrypt_rw, other_algorithms].
|
||||
|
||||
fields(bcrypt_rw) ->
|
||||
fields(bcrypt) ++
|
||||
[{salt_rounds, fun salt_rounds/1}];
|
||||
|
||||
fields(bcrypt) ->
|
||||
[{name, {enum, [bcrypt]}}];
|
||||
|
||||
fields(pbkdf2) ->
|
||||
[{name, {enum, [pbkdf2]}},
|
||||
{mac_fun, {enum, [md4, md5, ripemd160, sha, sha224, sha256, sha384, sha512]}},
|
||||
{iterations, integer()},
|
||||
{dk_length, fun dk_length/1}];
|
||||
|
||||
fields(other_algorithms) ->
|
||||
[{name, {enum, [plain, md5, sha, sha256, sha512]}},
|
||||
{salt_position, fun salt_position/1}].
|
||||
|
||||
salt_position(type) -> {enum, [prefix, suffix]};
|
||||
salt_position(default) -> prefix;
|
||||
salt_position(_) -> undefined.
|
||||
|
||||
salt_rounds(type) -> integer();
|
||||
salt_rounds(default) -> 10;
|
||||
salt_rounds(_) -> undefined.
|
||||
|
||||
dk_length(type) -> integer();
|
||||
dk_length(nullable) -> true;
|
||||
dk_length(default) -> undefined;
|
||||
dk_length(_) -> undefined.
|
||||
|
||||
type_rw(type) ->
|
||||
hoconsc:union(rw_refs());
|
||||
type_rw(default) -> #{<<"name">> => sha256, <<"salt_position">> => prefix};
|
||||
type_rw(_) -> undefined.
|
||||
|
||||
type_ro(type) ->
|
||||
hoconsc:union(ro_refs());
|
||||
type_ro(default) -> #{<<"name">> => sha256, <<"salt_position">> => prefix};
|
||||
type_ro(_) -> undefined.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
-spec(init(algorithm()) -> ok).
|
||||
init(#{name := bcrypt}) ->
|
||||
{ok, _} = application:ensure_all_started(bcrypt),
|
||||
ok;
|
||||
init(#{name := _Other}) ->
|
||||
ok.
|
||||
|
||||
|
||||
-spec(gen_salt(algorithm_rw()) -> emqx_passwd:salt()).
|
||||
gen_salt(#{name := plain}) ->
|
||||
<<>>;
|
||||
gen_salt(#{name := bcrypt,
|
||||
salt_rounds := Rounds}) ->
|
||||
{ok, Salt} = bcrypt:gen_salt(Rounds),
|
||||
list_to_binary(Salt);
|
||||
gen_salt(#{name := Other}) when Other =/= plain, Other =/= bcrypt ->
|
||||
<<X:128/big-unsigned-integer>> = crypto:strong_rand_bytes(16),
|
||||
iolist_to_binary(io_lib:format("~32.16.0b", [X])).
|
||||
|
||||
|
||||
-spec(hash(algorithm_rw(), emqx_passwd:password()) -> {emqx_passwd:hash(), emqx_passwd:salt()}).
|
||||
hash(#{name := bcrypt, salt_rounds := _} = Algorithm, Password) ->
|
||||
Salt0 = gen_salt(Algorithm),
|
||||
Hash = emqx_passwd:hash({bcrypt, Salt0}, Password),
|
||||
Salt = Hash,
|
||||
{Hash, Salt};
|
||||
hash(#{name := pbkdf2,
|
||||
mac_fun := MacFun,
|
||||
iterations := Iterations} = Algorithm, Password) ->
|
||||
Salt = gen_salt(Algorithm),
|
||||
DKLength = maps:get(dk_length, Algorithm, undefined),
|
||||
Hash = emqx_passwd:hash({pbkdf2, MacFun, Salt, Iterations, DKLength}, Password),
|
||||
{Hash, Salt};
|
||||
hash(#{name := Other, salt_position := SaltPosition} = Algorithm, Password) ->
|
||||
Salt = gen_salt(Algorithm),
|
||||
Hash = emqx_passwd:hash({Other, Salt, SaltPosition}, Password),
|
||||
{Hash, Salt}.
|
||||
|
||||
|
||||
-spec(check_password(
|
||||
algorithm(),
|
||||
emqx_passwd:salt(),
|
||||
emqx_passwd:hash(),
|
||||
emqx_passwd:password()) -> boolean()).
|
||||
check_password(#{name := bcrypt}, _Salt, PasswordHash, Password) ->
|
||||
emqx_passwd:check_pass({bcrypt, PasswordHash}, PasswordHash, Password);
|
||||
check_password(#{name := pbkdf2,
|
||||
mac_fun := MacFun,
|
||||
iterations := Iterations} = Algorithm,
|
||||
Salt, PasswordHash, Password) ->
|
||||
DKLength = maps:get(dk_length, Algorithm, undefined),
|
||||
emqx_passwd:check_pass({pbkdf2, MacFun, Salt, Iterations, DKLength}, PasswordHash, Password);
|
||||
check_password(#{name := Other, salt_position := SaltPosition}, Salt, PasswordHash, Password) ->
|
||||
emqx_passwd:check_pass({Other, Salt, SaltPosition}, PasswordHash, Password).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
rw_refs() ->
|
||||
[hoconsc:ref(?MODULE, bcrypt_rw),
|
||||
hoconsc:ref(?MODULE, pbkdf2),
|
||||
hoconsc:ref(?MODULE, other_algorithms)].
|
||||
|
||||
ro_refs() ->
|
||||
[hoconsc:ref(?MODULE, bcrypt),
|
||||
hoconsc:ref(?MODULE, pbkdf2),
|
||||
hoconsc:ref(?MODULE, other_algorithms)].
|
|
@ -50,8 +50,7 @@ config_refs(Modules) ->
|
|||
%% in emqx_schema, 'authentication' is a map() type which is to allow
|
||||
%% EMQ X more plugable.
|
||||
root_type() ->
|
||||
T = authenticator_type(),
|
||||
hoconsc:union([T, hoconsc:array(T)]).
|
||||
hoconsc:array(authenticator_type()).
|
||||
|
||||
mechanism(Name) ->
|
||||
hoconsc:mk(hoconsc:enum([Name]),
|
||||
|
|
|
@ -18,12 +18,10 @@
|
|||
|
||||
-include_lib("emqx/include/emqx_placeholder.hrl").
|
||||
|
||||
-export([ replace_placeholders/2
|
||||
-export([ check_password_from_selected_map/3
|
||||
, replace_placeholders/2
|
||||
, replace_placeholder/2
|
||||
, check_password/3
|
||||
, is_superuser/1
|
||||
, hash/4
|
||||
, gen_salt/0
|
||||
, bin/1
|
||||
, ensure_apps_started/1
|
||||
, cleanup_resources/0
|
||||
|
@ -36,6 +34,17 @@
|
|||
%% APIs
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
check_password_from_selected_map(_Algorithm, _Selected, undefined) ->
|
||||
{error, bad_username_or_password};
|
||||
check_password_from_selected_map(
|
||||
Algorithm, #{<<"password_hash">> := Hash} = Selected, Password) ->
|
||||
Salt = maps:get(<<"salt">>, Selected, <<>>),
|
||||
case emqx_authn_password_hashing:check_password(Algorithm, Salt, Hash, Password) of
|
||||
true -> ok;
|
||||
false ->
|
||||
{error, bad_username_or_password}
|
||||
end.
|
||||
|
||||
replace_placeholders(PlaceHolders, Data) ->
|
||||
replace_placeholders(PlaceHolders, Data, []).
|
||||
|
||||
|
@ -64,27 +73,6 @@ replace_placeholder(?PH_CERT_CN_NAME, Credential) ->
|
|||
replace_placeholder(Constant, _) ->
|
||||
Constant.
|
||||
|
||||
check_password(undefined, _Selected, _State) ->
|
||||
{error, bad_username_or_password};
|
||||
check_password(Password,
|
||||
#{<<"password_hash">> := Hash},
|
||||
#{password_hash_algorithm := bcrypt}) ->
|
||||
case emqx_passwd:hash(bcrypt, {Hash, Password}) of
|
||||
Hash -> ok;
|
||||
_ ->
|
||||
{error, bad_username_or_password}
|
||||
end;
|
||||
check_password(Password,
|
||||
#{<<"password_hash">> := Hash} = Selected,
|
||||
#{password_hash_algorithm := Algorithm,
|
||||
salt_position := SaltPosition}) ->
|
||||
Salt = maps:get(<<"salt">>, Selected, <<>>),
|
||||
case hash(Algorithm, Password, Salt, SaltPosition) of
|
||||
Hash -> ok;
|
||||
_ ->
|
||||
{error, bad_username_or_password}
|
||||
end.
|
||||
|
||||
is_superuser(#{<<"is_superuser">> := <<"">>}) ->
|
||||
#{is_superuser => false};
|
||||
is_superuser(#{<<"is_superuser">> := <<"0">>}) ->
|
||||
|
@ -108,15 +96,6 @@ ensure_apps_started(bcrypt) ->
|
|||
ensure_apps_started(_) ->
|
||||
ok.
|
||||
|
||||
hash(Algorithm, Password, Salt, prefix) ->
|
||||
emqx_passwd:hash(Algorithm, <<Salt/binary, Password/binary>>);
|
||||
hash(Algorithm, Password, Salt, suffix) ->
|
||||
emqx_passwd:hash(Algorithm, <<Password/binary, Salt/binary>>).
|
||||
|
||||
gen_salt() ->
|
||||
<<X:128/big-unsigned-integer>> = crypto:strong_rand_bytes(16),
|
||||
iolist_to_binary(io_lib:format("~32.16.0b", [X])).
|
||||
|
||||
bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
|
||||
bin(L) when is_list(L) -> list_to_binary(L);
|
||||
bin(X) -> X.
|
||||
|
|
|
@ -91,31 +91,13 @@ fields(?CONF_NS) ->
|
|||
[ {mechanism, emqx_authn_schema:mechanism('password-based')}
|
||||
, {backend, emqx_authn_schema:backend('built-in-database')}
|
||||
, {user_id_type, fun user_id_type/1}
|
||||
, {password_hash_algorithm, fun password_hash_algorithm/1}
|
||||
] ++ emqx_authn_schema:common_fields();
|
||||
|
||||
fields(bcrypt) ->
|
||||
[ {name, {enum, [bcrypt]}}
|
||||
, {salt_rounds, fun salt_rounds/1}
|
||||
];
|
||||
|
||||
fields(other_algorithms) ->
|
||||
[ {name, {enum, [plain, md5, sha, sha256, sha512]}}
|
||||
].
|
||||
, {password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1}
|
||||
] ++ emqx_authn_schema:common_fields().
|
||||
|
||||
user_id_type(type) -> user_id_type();
|
||||
user_id_type(default) -> <<"username">>;
|
||||
user_id_type(_) -> undefined.
|
||||
|
||||
password_hash_algorithm(type) -> hoconsc:union([hoconsc:ref(?MODULE, bcrypt),
|
||||
hoconsc:ref(?MODULE, other_algorithms)]);
|
||||
password_hash_algorithm(default) -> #{<<"name">> => sha256};
|
||||
password_hash_algorithm(_) -> undefined.
|
||||
|
||||
salt_rounds(type) -> integer();
|
||||
salt_rounds(default) -> 10;
|
||||
salt_rounds(_) -> undefined.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%------------------------------------------------------------------------------
|
||||
|
@ -125,22 +107,11 @@ refs() ->
|
|||
|
||||
create(AuthenticatorID,
|
||||
#{user_id_type := Type,
|
||||
password_hash_algorithm := #{name := bcrypt,
|
||||
salt_rounds := SaltRounds}}) ->
|
||||
ok = emqx_authn_utils:ensure_apps_started(bcrypt),
|
||||
password_hash_algorithm := Algorithm}) ->
|
||||
ok = emqx_authn_password_hashing:init(Algorithm),
|
||||
State = #{user_group => AuthenticatorID,
|
||||
user_id_type => Type,
|
||||
password_hash_algorithm => bcrypt,
|
||||
salt_rounds => SaltRounds},
|
||||
{ok, State};
|
||||
|
||||
create(AuthenticatorID,
|
||||
#{user_id_type := Type,
|
||||
password_hash_algorithm := #{name := Name}}) ->
|
||||
ok = emqx_authn_utils:ensure_apps_started(Name),
|
||||
State = #{user_group => AuthenticatorID,
|
||||
user_id_type => Type,
|
||||
password_hash_algorithm => Name},
|
||||
password_hash_algorithm => Algorithm},
|
||||
{ok, State}.
|
||||
|
||||
update(Config, #{user_group := ID}) ->
|
||||
|
@ -156,12 +127,9 @@ authenticate(#{password := Password} = Credential,
|
|||
case mnesia:dirty_read(?TAB, {UserGroup, UserID}) of
|
||||
[] ->
|
||||
ignore;
|
||||
[#user_info{password_hash = PasswordHash, salt = Salt0, is_superuser = IsSuperuser}] ->
|
||||
Salt = case Algorithm of
|
||||
bcrypt -> PasswordHash;
|
||||
_ -> Salt0
|
||||
end,
|
||||
case PasswordHash =:= hash(Algorithm, Password, Salt) of
|
||||
[#user_info{password_hash = PasswordHash, salt = Salt, is_superuser = IsSuperuser}] ->
|
||||
case emqx_authn_password_hashing:check_password(
|
||||
Algorithm, Salt, PasswordHash, Password) of
|
||||
true -> {ok, #{is_superuser => IsSuperuser}};
|
||||
false -> {error, bad_username_or_password}
|
||||
end
|
||||
|
@ -193,12 +161,13 @@ import_users(Filename0, State) ->
|
|||
|
||||
add_user(#{user_id := UserID,
|
||||
password := Password} = UserInfo,
|
||||
#{user_group := UserGroup} = State) ->
|
||||
#{user_group := UserGroup,
|
||||
password_hash_algorithm := Algorithm}) ->
|
||||
trans(
|
||||
fun() ->
|
||||
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
||||
[] ->
|
||||
{PasswordHash, Salt} = hash(Password, State),
|
||||
{PasswordHash, Salt} = emqx_authn_password_hashing:hash(Algorithm, Password),
|
||||
IsSuperuser = maps:get(is_superuser, UserInfo, false),
|
||||
insert_user(UserGroup, UserID, PasswordHash, Salt, IsSuperuser),
|
||||
{ok, #{user_id => UserID, is_superuser => IsSuperuser}};
|
||||
|
@ -219,7 +188,8 @@ delete_user(UserID, #{user_group := UserGroup}) ->
|
|||
end).
|
||||
|
||||
update_user(UserID, UserInfo,
|
||||
#{user_group := UserGroup} = State) ->
|
||||
#{user_group := UserGroup,
|
||||
password_hash_algorithm := Algorithm}) ->
|
||||
trans(
|
||||
fun() ->
|
||||
case mnesia:read(?TAB, {UserGroup, UserID}, write) of
|
||||
|
@ -229,11 +199,12 @@ update_user(UserID, UserInfo,
|
|||
, salt = Salt
|
||||
, is_superuser = IsSuperuser}] ->
|
||||
NSuperuser = maps:get(is_superuser, UserInfo, IsSuperuser),
|
||||
{NPasswordHash, NSalt} = case maps:get(password, UserInfo, undefined) of
|
||||
undefined ->
|
||||
{PasswordHash, Salt};
|
||||
Password ->
|
||||
hash(Password, State)
|
||||
{NPasswordHash, NSalt} = case UserInfo of
|
||||
#{password := Password} ->
|
||||
emqx_authn_password_hashing:hash(
|
||||
Algorithm, Password);
|
||||
#{} ->
|
||||
{PasswordHash, Salt}
|
||||
end,
|
||||
insert_user(UserGroup, UserID, NPasswordHash, NSalt, NSuperuser),
|
||||
{ok, #{user_id => UserID, is_superuser => NSuperuser}}
|
||||
|
@ -349,26 +320,6 @@ get_user_info_by_seq([<<"false">> | More1], [<<"is_superuser">> | More2], Acc) -
|
|||
get_user_info_by_seq(_, _, _) ->
|
||||
{error, bad_format}.
|
||||
|
||||
gen_salt(#{password_hash_algorithm := plain}) ->
|
||||
<<>>;
|
||||
gen_salt(#{password_hash_algorithm := bcrypt,
|
||||
salt_rounds := Rounds}) ->
|
||||
{ok, Salt} = bcrypt:gen_salt(Rounds),
|
||||
Salt;
|
||||
gen_salt(_) ->
|
||||
emqx_authn_utils:gen_salt().
|
||||
|
||||
hash(bcrypt, Password, Salt) ->
|
||||
{ok, Hash} = bcrypt:hashpw(Password, Salt),
|
||||
list_to_binary(Hash);
|
||||
hash(Algorithm, Password, Salt) ->
|
||||
emqx_passwd:hash(Algorithm, <<Salt/binary, Password/binary>>).
|
||||
|
||||
hash(Password, #{password_hash_algorithm := Algorithm} = State) ->
|
||||
Salt = gen_salt(State),
|
||||
PasswordHash = hash(Algorithm, Password, Salt),
|
||||
{PasswordHash, Salt}.
|
||||
|
||||
insert_user(UserGroup, UserID, PasswordHash, Salt, IsSuperuser) ->
|
||||
UserInfo = #user_info{user_id = {UserGroup, UserID},
|
||||
password_hash = PasswordHash,
|
||||
|
|
|
@ -63,8 +63,7 @@ common_fields() ->
|
|||
, {password_hash_field, fun password_hash_field/1}
|
||||
, {salt_field, fun salt_field/1}
|
||||
, {is_superuser_field, fun is_superuser_field/1}
|
||||
, {password_hash_algorithm, fun password_hash_algorithm/1}
|
||||
, {salt_position, fun salt_position/1}
|
||||
, {password_hash_algorithm, fun emqx_authn_password_hashing:type_ro/1}
|
||||
] ++ emqx_authn_schema:common_fields().
|
||||
|
||||
collection(type) -> binary();
|
||||
|
@ -84,14 +83,6 @@ is_superuser_field(type) -> binary();
|
|||
is_superuser_field(nullable) -> true;
|
||||
is_superuser_field(_) -> undefined.
|
||||
|
||||
password_hash_algorithm(type) -> {enum, [plain, md5, sha, sha256, sha512, bcrypt]};
|
||||
password_hash_algorithm(default) -> sha256;
|
||||
password_hash_algorithm(_) -> undefined.
|
||||
|
||||
salt_position(type) -> {enum, [prefix, suffix]};
|
||||
salt_position(default) -> prefix;
|
||||
salt_position(_) -> undefined.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%------------------------------------------------------------------------------
|
||||
|
@ -116,7 +107,7 @@ create(#{selector := Selector} = Config) ->
|
|||
salt_position],
|
||||
Config),
|
||||
#{password_hash_algorithm := Algorithm} = State,
|
||||
ok = emqx_authn_utils:ensure_apps_started(Algorithm),
|
||||
ok = emqx_authn_password_hashing:init(Algorithm),
|
||||
ResourceId = emqx_authn_utils:make_resource_id(?MODULE),
|
||||
NState = State#{
|
||||
selector => NSelector,
|
||||
|
@ -203,24 +194,10 @@ normalize_selector(Selector) ->
|
|||
|
||||
check_password(undefined, _Selected, _State) ->
|
||||
{error, bad_username_or_password};
|
||||
check_password(Password,
|
||||
Doc,
|
||||
#{password_hash_algorithm := bcrypt,
|
||||
password_hash_field := PasswordHashField}) ->
|
||||
case maps:get(PasswordHashField, Doc, undefined) of
|
||||
undefined ->
|
||||
{error, {cannot_find_password_hash_field, PasswordHashField}};
|
||||
Hash ->
|
||||
case {ok, to_list(Hash)} =:= bcrypt:hashpw(Password, Hash) of
|
||||
true -> ok;
|
||||
false -> {error, bad_username_or_password}
|
||||
end
|
||||
end;
|
||||
check_password(Password,
|
||||
Doc,
|
||||
#{password_hash_algorithm := Algorithm,
|
||||
password_hash_field := PasswordHashField,
|
||||
salt_position := SaltPosition} = State) ->
|
||||
password_hash_field := PasswordHashField} = State) ->
|
||||
case maps:get(PasswordHashField, Doc, undefined) of
|
||||
undefined ->
|
||||
{error, {cannot_find_password_hash_field, PasswordHashField}};
|
||||
|
@ -229,7 +206,7 @@ check_password(Password,
|
|||
undefined -> <<>>;
|
||||
SaltField -> maps:get(SaltField, Doc, <<>>)
|
||||
end,
|
||||
case Hash =:= hash(Algorithm, Password, Salt, SaltPosition) of
|
||||
case emqx_authn_password_hashing:check_password(Algorithm, Salt, Hash, Password) of
|
||||
true -> ok;
|
||||
false -> {error, bad_username_or_password}
|
||||
end
|
||||
|
@ -240,12 +217,3 @@ is_superuser(Doc, #{is_superuser_field := IsSuperuserField}) ->
|
|||
emqx_authn_utils:is_superuser(#{<<"is_superuser">> => IsSuperuser});
|
||||
is_superuser(_, _) ->
|
||||
emqx_authn_utils:is_superuser(#{<<"is_superuser">> => false}).
|
||||
|
||||
hash(Algorithm, Password, Salt, prefix) ->
|
||||
emqx_passwd:hash(Algorithm, <<Salt/binary, Password/binary>>);
|
||||
hash(Algorithm, Password, Salt, suffix) ->
|
||||
emqx_passwd:hash(Algorithm, <<Password/binary, Salt/binary>>).
|
||||
|
||||
to_list(L) when is_list(L) -> L;
|
||||
to_list(L) when is_binary(L) -> binary_to_list(L);
|
||||
to_list(X) -> X.
|
||||
|
|
|
@ -46,22 +46,13 @@ roots() -> [?CONF_NS].
|
|||
fields(?CONF_NS) ->
|
||||
[ {mechanism, emqx_authn_schema:mechanism('password-based')}
|
||||
, {backend, emqx_authn_schema:backend(mysql)}
|
||||
, {password_hash_algorithm, fun password_hash_algorithm/1}
|
||||
, {salt_position, fun salt_position/1}
|
||||
, {password_hash_algorithm, fun emqx_authn_password_hashing:type_ro/1}
|
||||
, {query, fun query/1}
|
||||
, {query_timeout, fun query_timeout/1}
|
||||
] ++ emqx_authn_schema:common_fields()
|
||||
++ emqx_connector_schema_lib:relational_db_fields()
|
||||
++ emqx_connector_schema_lib:ssl_fields().
|
||||
|
||||
password_hash_algorithm(type) -> {enum, [plain, md5, sha, sha256, sha512, bcrypt]};
|
||||
password_hash_algorithm(default) -> sha256;
|
||||
password_hash_algorithm(_) -> undefined.
|
||||
|
||||
salt_position(type) -> {enum, [prefix, suffix]};
|
||||
salt_position(default) -> prefix;
|
||||
salt_position(_) -> undefined.
|
||||
|
||||
query(type) -> string();
|
||||
query(_) -> undefined.
|
||||
|
||||
|
@ -80,14 +71,13 @@ create(_AuthenticatorID, Config) ->
|
|||
create(Config).
|
||||
|
||||
create(#{password_hash_algorithm := Algorithm,
|
||||
salt_position := SaltPosition,
|
||||
query := Query0,
|
||||
query_timeout := QueryTimeout
|
||||
} = Config) ->
|
||||
ok = emqx_authn_password_hashing:init(Algorithm),
|
||||
{Query, PlaceHolders} = parse_query(Query0),
|
||||
ResourceId = emqx_authn_utils:make_resource_id(?MODULE),
|
||||
State = #{password_hash_algorithm => Algorithm,
|
||||
salt_position => SaltPosition,
|
||||
query => Query,
|
||||
placeholders => PlaceHolders,
|
||||
query_timeout => QueryTimeout,
|
||||
|
@ -116,13 +106,15 @@ authenticate(#{password := Password} = Credential,
|
|||
#{placeholders := PlaceHolders,
|
||||
query := Query,
|
||||
query_timeout := Timeout,
|
||||
resource_id := ResourceId} = State) ->
|
||||
resource_id := ResourceId,
|
||||
password_hash_algorithm := Algorithm}) ->
|
||||
Params = emqx_authn_utils:replace_placeholders(PlaceHolders, Credential),
|
||||
case emqx_resource:query(ResourceId, {sql, Query, Params, Timeout}) of
|
||||
{ok, _Columns, []} -> ignore;
|
||||
{ok, Columns, [Row | _]} ->
|
||||
Selected = maps:from_list(lists:zip(Columns, Row)),
|
||||
case emqx_authn_utils:check_password(Password, Selected, State) of
|
||||
case emqx_authn_utils:check_password_from_selected_map(
|
||||
Algorithm, Selected, Password) of
|
||||
ok ->
|
||||
{ok, emqx_authn_utils:is_superuser(Selected)};
|
||||
{error, Reason} ->
|
||||
|
|
|
@ -52,21 +52,12 @@ roots() -> [?CONF_NS].
|
|||
fields(?CONF_NS) ->
|
||||
[ {mechanism, emqx_authn_schema:mechanism('password-based')}
|
||||
, {backend, emqx_authn_schema:backend(postgresql)}
|
||||
, {password_hash_algorithm, fun password_hash_algorithm/1}
|
||||
, {salt_position, fun salt_position/1}
|
||||
, {password_hash_algorithm, fun emqx_authn_password_hashing:type_ro/1}
|
||||
, {query, fun query/1}
|
||||
] ++ emqx_authn_schema:common_fields()
|
||||
++ emqx_connector_schema_lib:relational_db_fields()
|
||||
++ emqx_connector_schema_lib:ssl_fields().
|
||||
|
||||
password_hash_algorithm(type) -> {enum, [plain, md5, sha, sha256, sha512, bcrypt]};
|
||||
password_hash_algorithm(default) -> sha256;
|
||||
password_hash_algorithm(_) -> undefined.
|
||||
|
||||
salt_position(type) -> {enum, [prefix, suffix]};
|
||||
salt_position(default) -> prefix;
|
||||
salt_position(_) -> undefined.
|
||||
|
||||
query(type) -> string();
|
||||
query(_) -> undefined.
|
||||
|
||||
|
@ -81,14 +72,13 @@ create(_AuthenticatorID, Config) ->
|
|||
create(Config).
|
||||
|
||||
create(#{query := Query0,
|
||||
password_hash_algorithm := Algorithm,
|
||||
salt_position := SaltPosition} = Config) ->
|
||||
password_hash_algorithm := Algorithm} = Config) ->
|
||||
ok = emqx_authn_password_hashing:init(Algorithm),
|
||||
{Query, PlaceHolders} = parse_query(Query0),
|
||||
ResourceId = emqx_authn_utils:make_resource_id(?MODULE),
|
||||
State = #{query => Query,
|
||||
placeholders => PlaceHolders,
|
||||
password_hash_algorithm => Algorithm,
|
||||
salt_position => SaltPosition,
|
||||
resource_id => ResourceId},
|
||||
case emqx_resource:create_local(ResourceId, emqx_connector_pgsql, Config) of
|
||||
{ok, already_created} ->
|
||||
|
@ -113,14 +103,16 @@ authenticate(#{auth_method := _}, _) ->
|
|||
authenticate(#{password := Password} = Credential,
|
||||
#{query := Query,
|
||||
placeholders := PlaceHolders,
|
||||
resource_id := ResourceId} = State) ->
|
||||
resource_id := ResourceId,
|
||||
password_hash_algorithm := Algorithm}) ->
|
||||
Params = emqx_authn_utils:replace_placeholders(PlaceHolders, Credential),
|
||||
case emqx_resource:query(ResourceId, {sql, Query, Params}) of
|
||||
{ok, _Columns, []} -> ignore;
|
||||
{ok, Columns, [Row | _]} ->
|
||||
NColumns = [Name || #column{name = Name} <- Columns],
|
||||
Selected = maps:from_list(lists:zip(NColumns, erlang:tuple_to_list(Row))),
|
||||
case emqx_authn_utils:check_password(Password, Selected, State) of
|
||||
case emqx_authn_utils:check_password_from_selected_map(
|
||||
Algorithm, Selected, Password) of
|
||||
ok ->
|
||||
{ok, emqx_authn_utils:is_superuser(Selected)};
|
||||
{error, Reason} ->
|
||||
|
|
|
@ -59,21 +59,12 @@ common_fields() ->
|
|||
[ {mechanism, emqx_authn_schema:mechanism('password-based')}
|
||||
, {backend, emqx_authn_schema:backend(redis)}
|
||||
, {cmd, fun cmd/1}
|
||||
, {password_hash_algorithm, fun password_hash_algorithm/1}
|
||||
, {salt_position, fun salt_position/1}
|
||||
, {password_hash_algorithm, fun emqx_authn_password_hashing:type_ro/1}
|
||||
] ++ emqx_authn_schema:common_fields().
|
||||
|
||||
cmd(type) -> string();
|
||||
cmd(_) -> undefined.
|
||||
|
||||
password_hash_algorithm(type) -> {enum, [plain, md5, sha, sha256, sha512, bcrypt]};
|
||||
password_hash_algorithm(default) -> sha256;
|
||||
password_hash_algorithm(_) -> undefined.
|
||||
|
||||
salt_position(type) -> {enum, [prefix, suffix]};
|
||||
salt_position(default) -> prefix;
|
||||
salt_position(_) -> undefined.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%------------------------------------------------------------------------------
|
||||
|
@ -89,6 +80,7 @@ create(_AuthenticatorID, Config) ->
|
|||
|
||||
create(#{cmd := Cmd,
|
||||
password_hash_algorithm := Algorithm} = Config) ->
|
||||
ok = emqx_authn_password_hashing:init(Algorithm),
|
||||
try
|
||||
NCmd = parse_cmd(Cmd),
|
||||
ok = emqx_authn_utils:ensure_apps_started(Algorithm),
|
||||
|
@ -129,13 +121,15 @@ authenticate(#{auth_method := _}, _) ->
|
|||
ignore;
|
||||
authenticate(#{password := Password} = Credential,
|
||||
#{cmd := {Command, Key, Fields},
|
||||
resource_id := ResourceId} = State) ->
|
||||
resource_id := ResourceId,
|
||||
password_hash_algorithm := Algorithm}) ->
|
||||
NKey = binary_to_list(iolist_to_binary(replace_placeholders(Key, Credential))),
|
||||
case emqx_resource:query(ResourceId, {cmd, [Command, NKey | Fields]}) of
|
||||
{ok, Values} ->
|
||||
case merge(Fields, Values) of
|
||||
#{<<"password_hash">> := _} = Selected ->
|
||||
case emqx_authn_utils:check_password(Password, Selected, State) of
|
||||
case emqx_authn_utils:check_password_from_selected_map(
|
||||
Algorithm, Selected, Password) of
|
||||
ok ->
|
||||
{ok, emqx_authn_utils:is_superuser(Selected)};
|
||||
{error, Reason} ->
|
||||
|
|
|
@ -238,22 +238,22 @@ test_is_superuser({Value, ExpectedValue}) ->
|
|||
|
||||
raw_mongo_auth_config() ->
|
||||
#{
|
||||
mechanism => <<"password-based">>,
|
||||
password_hash_algorithm => <<"plain">>,
|
||||
salt_position => <<"suffix">>,
|
||||
enable => <<"true">>,
|
||||
mechanism => <<"password-based">>,
|
||||
password_hash_algorithm => #{name => <<"plain">>,
|
||||
salt_position => <<"suffix">>},
|
||||
enable => <<"true">>,
|
||||
|
||||
backend => <<"mongodb">>,
|
||||
mongo_type => <<"single">>,
|
||||
database => <<"mqtt">>,
|
||||
collection => <<"users">>,
|
||||
server => mongo_server(),
|
||||
backend => <<"mongodb">>,
|
||||
mongo_type => <<"single">>,
|
||||
database => <<"mqtt">>,
|
||||
collection => <<"users">>,
|
||||
server => mongo_server(),
|
||||
|
||||
selector => #{<<"username">> => <<"${username}">>},
|
||||
password_hash_field => <<"password_hash">>,
|
||||
salt_field => <<"salt">>,
|
||||
is_superuser_field => <<"is_superuser">>
|
||||
}.
|
||||
selector => #{<<"username">> => <<"${username}">>},
|
||||
password_hash_field => <<"password_hash">>,
|
||||
salt_field => <<"salt">>,
|
||||
is_superuser_field => <<"is_superuser">>
|
||||
}.
|
||||
|
||||
user_seeds() ->
|
||||
[#{data => #{
|
||||
|
@ -282,8 +282,8 @@ user_seeds() ->
|
|||
password => <<"md5">>
|
||||
},
|
||||
config_params => #{
|
||||
password_hash_algorithm => <<"md5">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"md5">>,
|
||||
salt_position => <<"suffix">> }
|
||||
},
|
||||
result => {ok,#{is_superuser => false}}
|
||||
},
|
||||
|
@ -300,8 +300,8 @@ user_seeds() ->
|
|||
},
|
||||
config_params => #{
|
||||
selector => #{<<"username">> => <<"${clientid}">>},
|
||||
password_hash_algorithm => <<"sha256">>,
|
||||
salt_position => <<"prefix">>
|
||||
password_hash_algorithm => #{name => <<"sha256">>,
|
||||
salt_position => <<"prefix">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => true}}
|
||||
},
|
||||
|
@ -317,8 +317,7 @@ user_seeds() ->
|
|||
password => <<"bcrypt">>
|
||||
},
|
||||
config_params => #{
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">> % should be ignored
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => false}}
|
||||
},
|
||||
|
@ -336,8 +335,7 @@ user_seeds() ->
|
|||
config_params => #{
|
||||
% clientid variable & username credentials
|
||||
selector => #{<<"username">> => <<"${clientid}">>},
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,not_authorized}
|
||||
},
|
||||
|
@ -354,8 +352,7 @@ user_seeds() ->
|
|||
},
|
||||
config_params => #{
|
||||
selector => #{<<"userid">> => <<"${clientid}">>},
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,not_authorized}
|
||||
},
|
||||
|
@ -372,8 +369,7 @@ user_seeds() ->
|
|||
password => <<"wrongpass">>
|
||||
},
|
||||
config_params => #{
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,bad_username_or_password}
|
||||
}
|
||||
|
|
|
@ -204,20 +204,20 @@ t_update(_Config) ->
|
|||
|
||||
raw_mysql_auth_config() ->
|
||||
#{
|
||||
mechanism => <<"password-based">>,
|
||||
password_hash_algorithm => <<"plain">>,
|
||||
salt_position => <<"suffix">>,
|
||||
enable => <<"true">>,
|
||||
mechanism => <<"password-based">>,
|
||||
password_hash_algorithm => #{name => <<"plain">>,
|
||||
salt_position => <<"suffix">>},
|
||||
enable => <<"true">>,
|
||||
|
||||
backend => <<"mysql">>,
|
||||
database => <<"mqtt">>,
|
||||
username => <<"root">>,
|
||||
password => <<"public">>,
|
||||
backend => <<"mysql">>,
|
||||
database => <<"mqtt">>,
|
||||
username => <<"root">>,
|
||||
password => <<"public">>,
|
||||
|
||||
query => <<"SELECT password_hash, salt, is_superuser_str as is_superuser
|
||||
query => <<"SELECT password_hash, salt, is_superuser_str as is_superuser
|
||||
FROM users where username = ${username} LIMIT 1">>,
|
||||
server => mysql_server()
|
||||
}.
|
||||
server => mysql_server()
|
||||
}.
|
||||
|
||||
user_seeds() ->
|
||||
[#{data => #{
|
||||
|
@ -244,8 +244,8 @@ user_seeds() ->
|
|||
password => <<"md5">>
|
||||
},
|
||||
config_params => #{
|
||||
password_hash_algorithm => <<"md5">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"md5">>,
|
||||
salt_position => <<"suffix">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => false}}
|
||||
},
|
||||
|
@ -263,8 +263,8 @@ user_seeds() ->
|
|||
config_params => #{
|
||||
query => <<"SELECT password_hash, salt, is_superuser_int as is_superuser
|
||||
FROM users where username = ${clientid} LIMIT 1">>,
|
||||
password_hash_algorithm => <<"sha256">>,
|
||||
salt_position => <<"prefix">>
|
||||
password_hash_algorithm => #{name => <<"sha256">>,
|
||||
salt_position => <<"prefix">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => true}}
|
||||
},
|
||||
|
@ -282,8 +282,7 @@ user_seeds() ->
|
|||
config_params => #{
|
||||
query => <<"SELECT password_hash, salt, is_superuser_int as is_superuser
|
||||
FROM users where username = ${username} LIMIT 1">>,
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">> % should be ignored
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => false}}
|
||||
},
|
||||
|
@ -300,8 +299,7 @@ user_seeds() ->
|
|||
config_params => #{
|
||||
query => <<"SELECT password_hash, salt, is_superuser_int as is_superuser
|
||||
FROM users where username = ${username} LIMIT 1">>,
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">> % should be ignored
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => false}}
|
||||
},
|
||||
|
@ -320,8 +318,7 @@ user_seeds() ->
|
|||
% clientid variable & username credentials
|
||||
query => <<"SELECT password_hash, salt, is_superuser_int as is_superuser
|
||||
FROM users where username = ${clientid} LIMIT 1">>,
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,not_authorized}
|
||||
},
|
||||
|
@ -340,8 +337,7 @@ user_seeds() ->
|
|||
% Bad keys in query
|
||||
query => <<"SELECT 1 AS unknown_field
|
||||
FROM users where username = ${username} LIMIT 1">>,
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,not_authorized}
|
||||
},
|
||||
|
@ -358,8 +354,7 @@ user_seeds() ->
|
|||
password => <<"wrongpass">>
|
||||
},
|
||||
config_params => #{
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,bad_username_or_password}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,155 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_authn_password_hashing_SUITE).
|
||||
|
||||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
|
||||
-define(SIMPLE_HASHES, [plain, md5, sha, sha256, sha512]).
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
groups() ->
|
||||
[].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
{ok, _} = application:ensure_all_started(bcrypt),
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
ok.
|
||||
|
||||
t_gen_salt(_Config) ->
|
||||
Algorithms = [#{name => Type, salt_position => suffix} || Type <- ?SIMPLE_HASHES]
|
||||
++ [#{name => bcrypt, salt_rounds => 10}],
|
||||
|
||||
lists:foreach(
|
||||
fun(Algorithm) ->
|
||||
Salt = emqx_authn_password_hashing:gen_salt(Algorithm),
|
||||
ct:pal("gen_salt(~p): ~p", [Algorithm, Salt]),
|
||||
?assert(is_binary(Salt))
|
||||
end,
|
||||
Algorithms).
|
||||
|
||||
t_init(_Config) ->
|
||||
Algorithms = [#{name => Type, salt_position => suffix} || Type <- ?SIMPLE_HASHES]
|
||||
++ [#{name => bcrypt, salt_rounds => 10}],
|
||||
|
||||
lists:foreach(
|
||||
fun(Algorithm) ->
|
||||
ok = emqx_authn_password_hashing:init(Algorithm)
|
||||
end,
|
||||
Algorithms).
|
||||
|
||||
t_check_password(_Config) ->
|
||||
lists:foreach(
|
||||
fun test_check_password/1,
|
||||
hash_examples()).
|
||||
|
||||
test_check_password(#{
|
||||
password_hash := Hash,
|
||||
salt := Salt,
|
||||
password := Password,
|
||||
password_hash_algorithm := Algorithm
|
||||
} = Sample) ->
|
||||
ct:pal("t_check_password sample: ~p", [Sample]),
|
||||
true = emqx_authn_password_hashing:check_password(Algorithm, Salt, Hash, Password),
|
||||
false = emqx_authn_password_hashing:check_password(Algorithm, Salt, Hash, <<"wrongpass">>).
|
||||
|
||||
t_hash(_Config) ->
|
||||
lists:foreach(
|
||||
fun test_hash/1,
|
||||
hash_examples()).
|
||||
|
||||
test_hash(#{password := Password,
|
||||
password_hash_algorithm := Algorithm
|
||||
} = Sample) ->
|
||||
ct:pal("t_hash sample: ~p", [Sample]),
|
||||
{Hash, Salt} = emqx_authn_password_hashing:hash(Algorithm, Password),
|
||||
true = emqx_authn_password_hashing:check_password(Algorithm, Salt, Hash, Password).
|
||||
|
||||
hash_examples() ->
|
||||
[#{
|
||||
password_hash => <<"plainsalt">>,
|
||||
salt => <<"salt">>,
|
||||
password => <<"plain">>,
|
||||
password_hash_algorithm => #{name => plain,
|
||||
salt_position => suffix}
|
||||
},
|
||||
#{
|
||||
password_hash => <<"9b4d0c43d206d48279e69b9ad7132e22">>,
|
||||
salt => <<"salt">>,
|
||||
password => <<"md5">>,
|
||||
password_hash_algorithm => #{name => md5,
|
||||
salt_position => suffix}
|
||||
},
|
||||
#{
|
||||
password_hash => <<"c665d4c0a9e5498806b7d9fd0b417d272853660e">>,
|
||||
salt => <<"salt">>,
|
||||
password => <<"sha">>,
|
||||
password_hash_algorithm => #{name => sha,
|
||||
salt_position => prefix}
|
||||
},
|
||||
#{
|
||||
password_hash => <<"ac63a624e7074776d677dd61a003b8c803eb11db004d0ec6ae032a5d7c9c5caf">>,
|
||||
salt => <<"salt">>,
|
||||
password => <<"sha256">>,
|
||||
password_hash_algorithm => #{name => sha256,
|
||||
salt_position => prefix}
|
||||
},
|
||||
#{
|
||||
password_hash => <<"a1509ab67bfacbad020927b5ac9d91e9100a82e33a0ebb01459367ce921c0aa8"
|
||||
"157aa5652f94bc84fa3babc08283e44887d61c48bcf8ad7bcb3259ee7d0eafcd">>,
|
||||
salt => <<"salt">>,
|
||||
password => <<"sha512">>,
|
||||
password_hash_algorithm => #{name => sha512,
|
||||
salt_position => prefix}
|
||||
},
|
||||
#{
|
||||
password_hash => <<"$2b$12$wtY3h20mUjjmeaClpqZVveDWGlHzCGsvuThMlneGHA7wVeFYyns2u">>,
|
||||
salt => <<"$2b$12$wtY3h20mUjjmeaClpqZVve">>,
|
||||
password => <<"bcrypt">>,
|
||||
|
||||
password_hash_algorithm => #{name => bcrypt,
|
||||
salt_rounds => 10}
|
||||
},
|
||||
|
||||
#{
|
||||
password_hash => <<"01dbee7f4a9e243e988b62c73cda935d"
|
||||
"a05378b93244ec8f48a99e61ad799d86">>,
|
||||
salt => <<"ATHENA.MIT.EDUraeburn">>,
|
||||
password => <<"password">>,
|
||||
|
||||
password_hash_algorithm => #{name => pbkdf2,
|
||||
iterations => 2,
|
||||
dk_length => 32,
|
||||
mac_fun => sha}
|
||||
},
|
||||
#{
|
||||
password_hash => <<"01dbee7f4a9e243e988b62c73cda935da05378b9">>,
|
||||
salt => <<"ATHENA.MIT.EDUraeburn">>,
|
||||
password => <<"password">>,
|
||||
|
||||
password_hash_algorithm => #{name => pbkdf2,
|
||||
iterations => 2,
|
||||
mac_fun => sha}
|
||||
}
|
||||
].
|
|
@ -272,20 +272,20 @@ t_parse_query(_) ->
|
|||
|
||||
raw_pgsql_auth_config() ->
|
||||
#{
|
||||
mechanism => <<"password-based">>,
|
||||
password_hash_algorithm => <<"plain">>,
|
||||
salt_position => <<"suffix">>,
|
||||
enable => <<"true">>,
|
||||
mechanism => <<"password-based">>,
|
||||
password_hash_algorithm => #{name => <<"plain">>,
|
||||
salt_position => <<"suffix">>},
|
||||
enable => <<"true">>,
|
||||
|
||||
backend => <<"postgresql">>,
|
||||
database => <<"mqtt">>,
|
||||
username => <<"root">>,
|
||||
password => <<"public">>,
|
||||
backend => <<"postgresql">>,
|
||||
database => <<"mqtt">>,
|
||||
username => <<"root">>,
|
||||
password => <<"public">>,
|
||||
|
||||
query => <<"SELECT password_hash, salt, is_superuser_str as is_superuser
|
||||
query => <<"SELECT password_hash, salt, is_superuser_str as is_superuser
|
||||
FROM users where username = ${username} LIMIT 1">>,
|
||||
server => pgsql_server()
|
||||
}.
|
||||
server => pgsql_server()
|
||||
}.
|
||||
|
||||
user_seeds() ->
|
||||
[#{data => #{
|
||||
|
@ -312,8 +312,8 @@ user_seeds() ->
|
|||
password => <<"md5">>
|
||||
},
|
||||
config_params => #{
|
||||
password_hash_algorithm => <<"md5">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"md5">>,
|
||||
salt_position => <<"suffix">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => false}}
|
||||
},
|
||||
|
@ -331,8 +331,8 @@ user_seeds() ->
|
|||
config_params => #{
|
||||
query => <<"SELECT password_hash, salt, is_superuser_int as is_superuser
|
||||
FROM users where username = ${clientid} LIMIT 1">>,
|
||||
password_hash_algorithm => <<"sha256">>,
|
||||
salt_position => <<"prefix">>
|
||||
password_hash_algorithm => #{name => <<"sha256">>,
|
||||
salt_position => <<"prefix">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => true}}
|
||||
},
|
||||
|
@ -350,8 +350,7 @@ user_seeds() ->
|
|||
config_params => #{
|
||||
query => <<"SELECT password_hash, salt, is_superuser_int as is_superuser
|
||||
FROM users where username = ${username} LIMIT 1">>,
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">> % should be ignored
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => false}}
|
||||
},
|
||||
|
@ -370,8 +369,7 @@ user_seeds() ->
|
|||
% clientid variable & username credentials
|
||||
query => <<"SELECT password_hash, salt, is_superuser_int as is_superuser
|
||||
FROM users where username = ${clientid} LIMIT 1">>,
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,not_authorized}
|
||||
},
|
||||
|
@ -390,8 +388,7 @@ user_seeds() ->
|
|||
% Bad keys in query
|
||||
query => <<"SELECT 1 AS unknown_field
|
||||
FROM users where username = ${username} LIMIT 1">>,
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,not_authorized}
|
||||
},
|
||||
|
@ -408,8 +405,7 @@ user_seeds() ->
|
|||
password => <<"wrongpass">>
|
||||
},
|
||||
config_params => #{
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,bad_username_or_password}
|
||||
}
|
||||
|
|
|
@ -208,137 +208,150 @@ t_update(_Config) ->
|
|||
|
||||
raw_redis_auth_config() ->
|
||||
#{
|
||||
mechanism => <<"password-based">>,
|
||||
password_hash_algorithm => <<"plain">>,
|
||||
salt_position => <<"suffix">>,
|
||||
enable => <<"true">>,
|
||||
mechanism => <<"password-based">>,
|
||||
password_hash_algorithm => #{name => <<"plain">>,
|
||||
salt_position => <<"suffix">>},
|
||||
enable => <<"true">>,
|
||||
|
||||
backend => <<"redis">>,
|
||||
cmd => <<"HMGET mqtt_user:${username} password_hash salt is_superuser">>,
|
||||
database => <<"1">>,
|
||||
password => <<"public">>,
|
||||
server => redis_server()
|
||||
}.
|
||||
backend => <<"redis">>,
|
||||
cmd => <<"HMGET mqtt_user:${username} password_hash salt is_superuser">>,
|
||||
database => <<"1">>,
|
||||
password => <<"public">>,
|
||||
server => redis_server()
|
||||
}.
|
||||
|
||||
user_seeds() ->
|
||||
[#{data => #{
|
||||
password_hash => "plainsalt",
|
||||
salt => "salt",
|
||||
is_superuser => "1"
|
||||
password_hash => <<"plainsalt">>,
|
||||
salt => <<"salt">>,
|
||||
is_superuser => <<"1">>
|
||||
},
|
||||
credentials => #{
|
||||
username => <<"plain">>,
|
||||
password => <<"plain">>},
|
||||
key => "mqtt_user:plain",
|
||||
key => <<"mqtt_user:plain">>,
|
||||
config_params => #{},
|
||||
result => {ok,#{is_superuser => true}}
|
||||
},
|
||||
|
||||
#{data => #{
|
||||
password_hash => "9b4d0c43d206d48279e69b9ad7132e22",
|
||||
salt => "salt",
|
||||
is_superuser => "0"
|
||||
password_hash => <<"9b4d0c43d206d48279e69b9ad7132e22">>,
|
||||
salt => <<"salt">>,
|
||||
is_superuser => <<"0">>
|
||||
},
|
||||
credentials => #{
|
||||
username => <<"md5">>,
|
||||
password => <<"md5">>
|
||||
},
|
||||
key => "mqtt_user:md5",
|
||||
key => <<"mqtt_user:md5">>,
|
||||
config_params => #{
|
||||
password_hash_algorithm => <<"md5">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"md5">>,
|
||||
salt_position => <<"suffix">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => false}}
|
||||
},
|
||||
|
||||
#{data => #{
|
||||
password_hash => "ac63a624e7074776d677dd61a003b8c803eb11db004d0ec6ae032a5d7c9c5caf",
|
||||
salt => "salt",
|
||||
is_superuser => "1"
|
||||
password_hash => <<"ac63a624e7074776d677dd61a003b8c803eb11db004d0ec6ae032a5d7c9c5caf">>,
|
||||
salt => <<"salt">>,
|
||||
is_superuser => <<"1">>
|
||||
},
|
||||
credentials => #{
|
||||
clientid => <<"sha256">>,
|
||||
password => <<"sha256">>
|
||||
},
|
||||
key => "mqtt_user:sha256",
|
||||
key => <<"mqtt_user:sha256">>,
|
||||
config_params => #{
|
||||
cmd => <<"HMGET mqtt_user:${clientid} password_hash salt is_superuser">>,
|
||||
password_hash_algorithm => <<"sha256">>,
|
||||
salt_position => <<"prefix">>
|
||||
password_hash_algorithm => #{name => <<"sha256">>,
|
||||
salt_position => <<"prefix">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => true}}
|
||||
},
|
||||
|
||||
#{data => #{
|
||||
password_hash => "$2b$12$wtY3h20mUjjmeaClpqZVveDWGlHzCGsvuThMlneGHA7wVeFYyns2u",
|
||||
salt => "$2b$12$wtY3h20mUjjmeaClpqZVve",
|
||||
is_superuser => "0"
|
||||
password_hash => <<"$2b$12$wtY3h20mUjjmeaClpqZVveDWGlHzCGsvuThMlneGHA7wVeFYyns2u">>,
|
||||
salt => <<"$2b$12$wtY3h20mUjjmeaClpqZVve">>,
|
||||
is_superuser => <<"0">>
|
||||
},
|
||||
credentials => #{
|
||||
username => <<"bcrypt">>,
|
||||
password => <<"bcrypt">>
|
||||
},
|
||||
key => "mqtt_user:bcrypt",
|
||||
key => <<"mqtt_user:bcrypt">>,
|
||||
config_params => #{
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">> % should be ignored
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {ok,#{is_superuser => false}}
|
||||
},
|
||||
|
||||
#{data => #{
|
||||
password_hash => "$2b$12$wtY3h20mUjjmeaClpqZVveDWGlHzCGsvuThMlneGHA7wVeFYyns2u",
|
||||
salt => "$2b$12$wtY3h20mUjjmeaClpqZVve",
|
||||
is_superuser => "0"
|
||||
password_hash => <<"01dbee7f4a9e243e988b62c73cda935da05378b9">>,
|
||||
salt => <<"ATHENA.MIT.EDUraeburn">>,
|
||||
is_superuser => <<"0">>
|
||||
},
|
||||
credentials => #{
|
||||
username => <<"pbkdf2">>,
|
||||
password => <<"password">>
|
||||
},
|
||||
key => <<"mqtt_user:pbkdf2">>,
|
||||
config_params => #{
|
||||
password_hash_algorithm => #{name => <<"pbkdf2">>,
|
||||
iterations => 2,
|
||||
mac_fun => sha
|
||||
}
|
||||
},
|
||||
result => {ok,#{is_superuser => false}}
|
||||
},
|
||||
#{data => #{
|
||||
password_hash => <<"$2b$12$wtY3h20mUjjmeaClpqZVveDWGlHzCGsvuThMlneGHA7wVeFYyns2u">>,
|
||||
salt => <<"$2b$12$wtY3h20mUjjmeaClpqZVve">>,
|
||||
is_superuser => <<"0">>
|
||||
},
|
||||
credentials => #{
|
||||
username => <<"bcrypt0">>,
|
||||
password => <<"bcrypt">>
|
||||
},
|
||||
key => "mqtt_user:bcrypt0",
|
||||
key => <<"mqtt_user:bcrypt0">>,
|
||||
config_params => #{
|
||||
% clientid variable & username credentials
|
||||
cmd => <<"HMGET mqtt_client:${clientid} password_hash salt is_superuser">>,
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,not_authorized}
|
||||
},
|
||||
|
||||
#{data => #{
|
||||
password_hash => "$2b$12$wtY3h20mUjjmeaClpqZVveDWGlHzCGsvuThMlneGHA7wVeFYyns2u",
|
||||
salt => "$2b$12$wtY3h20mUjjmeaClpqZVve",
|
||||
is_superuser => "0"
|
||||
password_hash => <<"$2b$12$wtY3h20mUjjmeaClpqZVveDWGlHzCGsvuThMlneGHA7wVeFYyns2u">>,
|
||||
salt => <<"$2b$12$wtY3h20mUjjmeaClpqZVve">>,
|
||||
is_superuser => <<"0">>
|
||||
},
|
||||
credentials => #{
|
||||
username => <<"bcrypt1">>,
|
||||
password => <<"bcrypt">>
|
||||
},
|
||||
key => "mqtt_user:bcrypt1",
|
||||
key => <<"mqtt_user:bcrypt1">>,
|
||||
config_params => #{
|
||||
% Bad key in cmd
|
||||
cmd => <<"HMGET badkey:${username} password_hash salt is_superuser">>,
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,not_authorized}
|
||||
},
|
||||
|
||||
#{data => #{
|
||||
password_hash => "$2b$12$wtY3h20mUjjmeaClpqZVveDWGlHzCGsvuThMlneGHA7wVeFYyns2u",
|
||||
salt => "$2b$12$wtY3h20mUjjmeaClpqZVve",
|
||||
is_superuser => "0"
|
||||
password_hash => <<"$2b$12$wtY3h20mUjjmeaClpqZVveDWGlHzCGsvuThMlneGHA7wVeFYyns2u">>,
|
||||
salt => <<"$2b$12$wtY3h20mUjjmeaClpqZVve">>,
|
||||
is_superuser => <<"0">>
|
||||
},
|
||||
credentials => #{
|
||||
username => <<"bcrypt2">>,
|
||||
% Wrong password
|
||||
password => <<"wrongpass">>
|
||||
},
|
||||
key => "mqtt_user:bcrypt2",
|
||||
key => <<"mqtt_user:bcrypt2">>,
|
||||
config_params => #{
|
||||
cmd => <<"HMGET mqtt_user:${username} password_hash salt is_superuser">>,
|
||||
password_hash_algorithm => <<"bcrypt">>,
|
||||
salt_position => <<"suffix">>
|
||||
password_hash_algorithm => #{name => <<"bcrypt">>}
|
||||
},
|
||||
result => {error,bad_username_or_password}
|
||||
}
|
||||
|
|
|
@ -1,24 +1,3 @@
|
|||
-type(ipaddress() :: {ipaddr, esockd_cidr:cidr_string()} |
|
||||
{ipaddrs, list(esockd_cidr:cidr_string())}).
|
||||
|
||||
-type(username() :: {username, binary()}).
|
||||
|
||||
-type(clientid() :: {clientid, binary()}).
|
||||
|
||||
-type(who() :: ipaddress() | username() | clientid() |
|
||||
{'and', [ipaddress() | username() | clientid()]} |
|
||||
{'or', [ipaddress() | username() | clientid()]} |
|
||||
all).
|
||||
|
||||
-type(action() :: subscribe | publish | all).
|
||||
|
||||
-type(permission() :: allow | deny).
|
||||
|
||||
-type(rule() :: {permission(), who(), action(), list(emqx_types:topic())}).
|
||||
-type(rules() :: [rule()]).
|
||||
|
||||
-type(sources() :: [map()]).
|
||||
|
||||
-define(APP, emqx_authz).
|
||||
|
||||
-define(ALLOW_DENY(A), ((A =:= allow) orelse (A =:= <<"allow">>) orelse
|
||||
|
@ -29,26 +8,6 @@
|
|||
(A =:= all) orelse (A =:= <<"all">>)
|
||||
)).
|
||||
|
||||
-define(ACL_SHARDED, emqx_acl_sharded).
|
||||
|
||||
-define(ACL_TABLE, emqx_acl).
|
||||
|
||||
%% To save some space, use an integer for label, 0 for 'all', {1, Username} and {2, ClientId}.
|
||||
-define(ACL_TABLE_ALL, 0).
|
||||
-define(ACL_TABLE_USERNAME, 1).
|
||||
-define(ACL_TABLE_CLIENTID, 2).
|
||||
|
||||
-record(emqx_acl, {
|
||||
who :: ?ACL_TABLE_ALL| {?ACL_TABLE_USERNAME, binary()} | {?ACL_TABLE_CLIENTID, binary()},
|
||||
rules :: [ {permission(), action(), emqx_topic:topic()} ]
|
||||
}).
|
||||
|
||||
-record(authz_metrics, {
|
||||
allow = 'client.authorize.allow',
|
||||
deny = 'client.authorize.deny',
|
||||
ignore = 'client.authorize.ignore'
|
||||
}).
|
||||
|
||||
-define(CMD_REPLACE, replace).
|
||||
-define(CMD_DELETE, delete).
|
||||
-define(CMD_PREPEND, prepend).
|
||||
|
@ -60,12 +19,6 @@
|
|||
-define(CMD_MOVE_BEFORE(Before), {<<"before">>, Before}).
|
||||
-define(CMD_MOVE_AFTER(After), {<<"after">>, After}).
|
||||
|
||||
-define(METRICS(Type), tl(tuple_to_list(#Type{}))).
|
||||
-define(METRICS(Type, K), #Type{}#Type.K).
|
||||
|
||||
-define(AUTHZ_METRICS, ?METRICS(authz_metrics)).
|
||||
-define(AUTHZ_METRICS(K), ?METRICS(authz_metrics, K)).
|
||||
|
||||
-define(CONF_KEY_PATH, [authorization, sources]).
|
||||
|
||||
-define(RE_PLACEHOLDER, "\\$\\{[a-z0-9\\-]+\\}").
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
|
||||
-export([ register_metrics/0
|
||||
, init/0
|
||||
, deinit/0
|
||||
, lookup/0
|
||||
, lookup/1
|
||||
, move/2
|
||||
|
@ -42,9 +43,51 @@
|
|||
|
||||
-export([ph_to_re/1]).
|
||||
|
||||
-type(source() :: map()).
|
||||
|
||||
-type(match_result() :: {matched, allow} | {matched, deny} | nomatch).
|
||||
|
||||
-type(default_result() :: allow | deny).
|
||||
|
||||
-type(authz_result() :: {stop, allow} | {ok, deny}).
|
||||
|
||||
-type(sources() :: [source()]).
|
||||
|
||||
-define(METRIC_ALLOW, 'client.authorize.allow').
|
||||
-define(METRIC_DENY, 'client.authorize.deny').
|
||||
-define(METRIC_NOMATCH, 'client.authorize.nomatch').
|
||||
|
||||
-define(METRICS, [?METRIC_ALLOW, ?METRIC_DENY, ?METRIC_NOMATCH]).
|
||||
|
||||
%% Initialize authz backend.
|
||||
%% Populate the passed configuration map with necessary data,
|
||||
%% like `ResourceID`s
|
||||
-callback(init(source()) -> source()).
|
||||
|
||||
%% Get authz text description.
|
||||
-callback(description() -> string()).
|
||||
|
||||
%% Destroy authz backend.
|
||||
%% Make cleanup of all allocated data.
|
||||
%% An authz backend will not be used after `destroy`.
|
||||
-callback(destroy(source()) -> ok).
|
||||
|
||||
%% Check if a configuration map is valid for further
|
||||
%% authz backend initialization.
|
||||
%% The callback must deallocate all resources allocated
|
||||
%% during verification.
|
||||
-callback(dry_run(source()) -> ok | {error, term()}).
|
||||
|
||||
%% Authorize client action.
|
||||
-callback(authorize(
|
||||
emqx_types:clientinfo(),
|
||||
emqx_types:pubsub(),
|
||||
emqx_types:topic(),
|
||||
source()) -> match_result()).
|
||||
|
||||
-spec(register_metrics() -> ok).
|
||||
register_metrics() ->
|
||||
lists:foreach(fun emqx_metrics:ensure/1, ?AUTHZ_METRICS).
|
||||
lists:foreach(fun emqx_metrics:ensure/1, ?METRICS).
|
||||
|
||||
init() ->
|
||||
ok = register_metrics(),
|
||||
|
@ -54,6 +97,11 @@ init() ->
|
|||
NSources = init_sources(Sources),
|
||||
ok = emqx_hooks:add('client.authorize', {?MODULE, authorize, [NSources]}, -1).
|
||||
|
||||
deinit() ->
|
||||
ok = emqx_hooks:del('client.authorize', {?MODULE, authorize}),
|
||||
emqx_conf:remove_handler(?CONF_KEY_PATH),
|
||||
emqx_authz_utils:cleanup_resources().
|
||||
|
||||
lookup() ->
|
||||
{_M, _F, [A]}= find_action_in_hooks(),
|
||||
A.
|
||||
|
@ -115,7 +163,7 @@ do_update({{?CMD_REPLACE, Type}, #{<<"enable">> := true} = Source}, Conf) when i
|
|||
NConf = Front ++ [Source | Rear],
|
||||
ok = check_dup_types(NConf),
|
||||
NConf;
|
||||
Error -> Error
|
||||
{error, _} = Error -> Error
|
||||
end;
|
||||
do_update({{?CMD_REPLACE, Type}, Source}, Conf) when is_map(Source), is_list(Conf) ->
|
||||
{_Old, Front, Rear} = take(Type, Conf),
|
||||
|
@ -178,9 +226,9 @@ do_post_update(_, NewSources) ->
|
|||
ok = emqx_authz_cache:drain_cache().
|
||||
|
||||
ensure_resource_deleted(#{enable := false}) -> ok;
|
||||
ensure_resource_deleted(#{type := file}) -> ok;
|
||||
ensure_resource_deleted(#{type := 'built-in-database'}) -> ok;
|
||||
ensure_resource_deleted(#{annotations := #{id := Id}}) -> ok = emqx_resource:remove(Id).
|
||||
ensure_resource_deleted(#{type := Type} = Source) ->
|
||||
Module = authz_module(Type),
|
||||
Module:destroy(Source).
|
||||
|
||||
check_dup_types(Sources) ->
|
||||
check_dup_types(Sources, []).
|
||||
|
@ -204,26 +252,10 @@ check_dup_types([Source | Sources], Checked) ->
|
|||
check_dup_types(Sources, [Type | Checked])
|
||||
end.
|
||||
|
||||
create_dry_run(T, Source) ->
|
||||
case is_connector_source(T) of
|
||||
true ->
|
||||
[CheckedSource] = check_sources([Source]),
|
||||
case T of
|
||||
http ->
|
||||
URIMap = maps:get(url, CheckedSource),
|
||||
NSource = maps:put(base_url, maps:remove(query, URIMap), CheckedSource)
|
||||
end,
|
||||
emqx_resource:create_dry_run(connector_module(T), NSource);
|
||||
false ->
|
||||
ok
|
||||
end.
|
||||
|
||||
is_connector_source(http) -> true;
|
||||
is_connector_source(mongodb) -> true;
|
||||
is_connector_source(mysql) -> true;
|
||||
is_connector_source(postgresql) -> true;
|
||||
is_connector_source(redis) -> true;
|
||||
is_connector_source(_) -> false.
|
||||
create_dry_run(Type, Source) ->
|
||||
[CheckedSource] = check_sources([Source]),
|
||||
Module = authz_module(Type),
|
||||
Module:dry_run(CheckedSource).
|
||||
|
||||
init_sources(Sources) ->
|
||||
{_Enabled, Disabled} = lists:partition(fun(#{enable := Enable}) -> Enable end, Sources),
|
||||
|
@ -234,54 +266,9 @@ init_sources(Sources) ->
|
|||
lists:map(fun init_source/1, Sources).
|
||||
|
||||
init_source(#{enable := false} = Source) -> Source;
|
||||
init_source(#{type := file,
|
||||
path := Path
|
||||
} = Source) ->
|
||||
Rules = case file:consult(Path) of
|
||||
{ok, Terms} ->
|
||||
[emqx_authz_rule:compile(Term) || Term <- Terms];
|
||||
{error, eacces} ->
|
||||
?SLOG(alert, #{msg => "insufficient_permissions_to_read_file", path => Path}),
|
||||
error(eaccess);
|
||||
{error, enoent} ->
|
||||
?SLOG(alert, #{msg => "file_does_not_exist", path => Path}),
|
||||
error(enoent);
|
||||
{error, Reason} ->
|
||||
?SLOG(alert, #{msg => "failed_to_read_file", path => Path, reason => Reason}),
|
||||
error(Reason)
|
||||
end,
|
||||
Source#{annotations => #{rules => Rules}};
|
||||
init_source(#{type := http,
|
||||
url := Url
|
||||
} = Source) ->
|
||||
NSource= maps:put(base_url, maps:remove(query, Url), Source),
|
||||
case create_resource(NSource) of
|
||||
{error, Reason} -> error({load_config_error, Reason});
|
||||
Id -> Source#{annotations => #{id => Id}}
|
||||
end;
|
||||
init_source(#{type := 'built-in-database'
|
||||
} = Source) ->
|
||||
Source;
|
||||
init_source(#{type := DB
|
||||
} = Source) when DB =:= redis;
|
||||
DB =:= mongodb ->
|
||||
case create_resource(Source) of
|
||||
{error, Reason} -> error({load_config_error, Reason});
|
||||
Id -> Source#{annotations => #{id => Id}}
|
||||
end;
|
||||
init_source(#{type := DB,
|
||||
query := SQL
|
||||
} = Source) when DB =:= mysql;
|
||||
DB =:= postgresql ->
|
||||
Mod = authz_module(DB),
|
||||
case create_resource(Source) of
|
||||
{error, Reason} -> error({load_config_error, Reason});
|
||||
Id -> Source#{annotations =>
|
||||
#{id => Id,
|
||||
query => erlang:apply(Mod, parse_query, [SQL])
|
||||
}
|
||||
}
|
||||
end.
|
||||
init_source(#{type := Type} = Source) ->
|
||||
Module = authz_module(Type),
|
||||
Module:init(Source).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% AuthZ callbacks
|
||||
|
@ -289,11 +276,11 @@ init_source(#{type := DB,
|
|||
|
||||
%% @doc Check AuthZ
|
||||
-spec(authorize( emqx_types:clientinfo()
|
||||
, emqx_types:all()
|
||||
, emqx_types:pubsub()
|
||||
, emqx_types:topic()
|
||||
, allow | deny
|
||||
, default_result()
|
||||
, sources())
|
||||
-> {stop, allow} | {ok, deny}).
|
||||
-> authz_result()).
|
||||
authorize(#{username := Username,
|
||||
peerhost := IpAddress
|
||||
} = Client, PubSub, Topic, DefaultResult, Sources) ->
|
||||
|
@ -303,14 +290,14 @@ authorize(#{username := Username,
|
|||
username => Username,
|
||||
ipaddr => IpAddress,
|
||||
topic => Topic}),
|
||||
emqx_metrics:inc(?AUTHZ_METRICS(allow)),
|
||||
emqx_metrics:inc(?METRIC_ALLOW),
|
||||
{stop, allow};
|
||||
{matched, deny} ->
|
||||
?SLOG(info, #{msg => "authorization_permission_denied",
|
||||
username => Username,
|
||||
ipaddr => IpAddress,
|
||||
topic => Topic}),
|
||||
emqx_metrics:inc(?AUTHZ_METRICS(deny)),
|
||||
emqx_metrics:inc(?METRIC_DENY),
|
||||
{stop, deny};
|
||||
nomatch ->
|
||||
?SLOG(info, #{msg => "authorization_failed_nomatch",
|
||||
|
@ -318,6 +305,7 @@ authorize(#{username := Username,
|
|||
ipaddr => IpAddress,
|
||||
topic => Topic,
|
||||
reason => "no-match rule"}),
|
||||
emqx_metrics:inc(?METRIC_NOMATCH),
|
||||
{stop, DefaultResult}
|
||||
end.
|
||||
|
||||
|
@ -325,16 +313,10 @@ do_authorize(_Client, _PubSub, _Topic, []) ->
|
|||
nomatch;
|
||||
do_authorize(Client, PubSub, Topic, [#{enable := false} | Rest]) ->
|
||||
do_authorize(Client, PubSub, Topic, Rest);
|
||||
do_authorize(Client, PubSub, Topic, [#{type := file} = F | Tail]) ->
|
||||
#{annotations := #{rules := Rules}} = F,
|
||||
case emqx_authz_rule:matches(Client, PubSub, Topic, Rules) of
|
||||
nomatch -> do_authorize(Client, PubSub, Topic, Tail);
|
||||
Matched -> Matched
|
||||
end;
|
||||
do_authorize(Client, PubSub, Topic,
|
||||
[Connector = #{type := Type} | Tail] ) ->
|
||||
Mod = authz_module(Type),
|
||||
case erlang:apply(Mod, authorize, [Client, PubSub, Topic, Connector]) of
|
||||
Module = authz_module(Type),
|
||||
case Module:authorize(Client, PubSub, Topic, Connector) of
|
||||
nomatch -> do_authorize(Client, PubSub, Topic, Tail);
|
||||
Matched -> Matched
|
||||
end.
|
||||
|
@ -367,29 +349,11 @@ find_action_in_hooks() ->
|
|||
[Action] = [Action || {callback,{?MODULE, authorize, _} = Action, _, _} <- Callbacks ],
|
||||
Action.
|
||||
|
||||
gen_id(Type) ->
|
||||
iolist_to_binary([io_lib:format("~ts_~ts",[?APP, Type])]).
|
||||
|
||||
create_resource(#{type := DB} = Source) ->
|
||||
ResourceID = gen_id(DB),
|
||||
case emqx_resource:create(ResourceID, connector_module(DB), Source) of
|
||||
{ok, already_created} -> ResourceID;
|
||||
{ok, _} -> ResourceID;
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
authz_module('built-in-database') ->
|
||||
emqx_authz_mnesia;
|
||||
authz_module(Type) ->
|
||||
list_to_existing_atom("emqx_authz_" ++ atom_to_list(Type)).
|
||||
|
||||
connector_module(mongodb) ->
|
||||
emqx_connector_mongo;
|
||||
connector_module(postgresql) ->
|
||||
emqx_connector_pgsql;
|
||||
connector_module(Type) ->
|
||||
list_to_existing_atom("emqx_connector_" ++ atom_to_list(Type)).
|
||||
|
||||
type(#{type := Type}) -> type(Type);
|
||||
type(#{<<"type">> := Type}) -> type(Type);
|
||||
type(file) -> file;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
|
||||
-include("emqx_authz.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("stdlib/include/ms_transform.hrl").
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
|
||||
-define(FORMAT_USERNAME_FUN, {?MODULE, format_by_username}).
|
||||
|
@ -269,39 +268,27 @@ fields(meta) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
users(get, #{query_string := PageParams}) ->
|
||||
MatchSpec = ets:fun2ms(
|
||||
fun({?ACL_TABLE, {?ACL_TABLE_USERNAME, Username}, Rules}) ->
|
||||
[{username, Username}, {rules, Rules}]
|
||||
end),
|
||||
{200, emqx_mgmt_api:paginate(?ACL_TABLE, MatchSpec, PageParams, ?FORMAT_USERNAME_FUN)};
|
||||
{Table, MatchSpec} = emqx_authz_mnesia:list_username_rules(),
|
||||
{200, emqx_mgmt_api:paginate(Table, MatchSpec, PageParams, ?FORMAT_USERNAME_FUN)};
|
||||
users(post, #{body := Body}) when is_list(Body) ->
|
||||
lists:foreach(fun(#{<<"username">> := Username, <<"rules">> := Rules}) ->
|
||||
mria:dirty_write(#emqx_acl{
|
||||
who = {?ACL_TABLE_USERNAME, Username},
|
||||
rules = format_rules(Rules)
|
||||
})
|
||||
emqx_authz_mnesia:store_rules({username, Username}, format_rules(Rules))
|
||||
end, Body),
|
||||
{204}.
|
||||
|
||||
clients(get, #{query_string := PageParams}) ->
|
||||
MatchSpec = ets:fun2ms(
|
||||
fun({?ACL_TABLE, {?ACL_TABLE_CLIENTID, Clientid}, Rules}) ->
|
||||
[{clientid, Clientid}, {rules, Rules}]
|
||||
end),
|
||||
{200, emqx_mgmt_api:paginate(?ACL_TABLE, MatchSpec, PageParams, ?FORMAT_CLIENTID_FUN)};
|
||||
{Table, MatchSpec} = emqx_authz_mnesia:list_clientid_rules(),
|
||||
{200, emqx_mgmt_api:paginate(Table, MatchSpec, PageParams, ?FORMAT_CLIENTID_FUN)};
|
||||
clients(post, #{body := Body}) when is_list(Body) ->
|
||||
lists:foreach(fun(#{<<"clientid">> := Clientid, <<"rules">> := Rules}) ->
|
||||
mria:dirty_write(#emqx_acl{
|
||||
who = {?ACL_TABLE_CLIENTID, Clientid},
|
||||
rules = format_rules(Rules)
|
||||
})
|
||||
emqx_authz_mnesia:store_rules({clientid, Clientid}, format_rules(Rules))
|
||||
end, Body),
|
||||
{204}.
|
||||
|
||||
user(get, #{bindings := #{username := Username}}) ->
|
||||
case mnesia:dirty_read(?ACL_TABLE, {?ACL_TABLE_USERNAME, Username}) of
|
||||
[] -> {404, #{code => <<"NOT_FOUND">>, message => <<"Not Found">>}};
|
||||
[#emqx_acl{who = {?ACL_TABLE_USERNAME, Username}, rules = Rules}] ->
|
||||
case emqx_authz_mnesia:get_rules({username, Username}) of
|
||||
not_found -> {404, #{code => <<"NOT_FOUND">>, message => <<"Not Found">>}};
|
||||
{ok, Rules} ->
|
||||
{200, #{username => Username,
|
||||
rules => [ #{topic => Topic,
|
||||
action => Action,
|
||||
|
@ -311,19 +298,16 @@ user(get, #{bindings := #{username := Username}}) ->
|
|||
end;
|
||||
user(put, #{bindings := #{username := Username},
|
||||
body := #{<<"username">> := Username, <<"rules">> := Rules}}) ->
|
||||
mria:dirty_write(#emqx_acl{
|
||||
who = {?ACL_TABLE_USERNAME, Username},
|
||||
rules = format_rules(Rules)
|
||||
}),
|
||||
emqx_authz_mnesia:store_rules({username, Username}, format_rules(Rules)),
|
||||
{204};
|
||||
user(delete, #{bindings := #{username := Username}}) ->
|
||||
mria:dirty_delete({?ACL_TABLE, {?ACL_TABLE_USERNAME, Username}}),
|
||||
emqx_authz_mnesia:delete_rules({username, Username}),
|
||||
{204}.
|
||||
|
||||
client(get, #{bindings := #{clientid := Clientid}}) ->
|
||||
case mnesia:dirty_read(?ACL_TABLE, {?ACL_TABLE_CLIENTID, Clientid}) of
|
||||
[] -> {404, #{code => <<"NOT_FOUND">>, message => <<"Not Found">>}};
|
||||
[#emqx_acl{who = {?ACL_TABLE_CLIENTID, Clientid}, rules = Rules}] ->
|
||||
case emqx_authz_mnesia:get_rules({clientid, Clientid}) of
|
||||
not_found -> {404, #{code => <<"NOT_FOUND">>, message => <<"Not Found">>}};
|
||||
{ok, Rules} ->
|
||||
{200, #{clientid => Clientid,
|
||||
rules => [ #{topic => Topic,
|
||||
action => Action,
|
||||
|
@ -333,20 +317,17 @@ client(get, #{bindings := #{clientid := Clientid}}) ->
|
|||
end;
|
||||
client(put, #{bindings := #{clientid := Clientid},
|
||||
body := #{<<"clientid">> := Clientid, <<"rules">> := Rules}}) ->
|
||||
mria:dirty_write(#emqx_acl{
|
||||
who = {?ACL_TABLE_CLIENTID, Clientid},
|
||||
rules = format_rules(Rules)
|
||||
}),
|
||||
emqx_authz_mnesia:store_rules({clientid, Clientid}, format_rules(Rules)),
|
||||
{204};
|
||||
client(delete, #{bindings := #{clientid := Clientid}}) ->
|
||||
mria:dirty_delete({?ACL_TABLE, {?ACL_TABLE_CLIENTID, Clientid}}),
|
||||
emqx_authz_mnesia:delete_rules({clientid, Clientid}),
|
||||
{204}.
|
||||
|
||||
all(get, _) ->
|
||||
case mnesia:dirty_read(?ACL_TABLE, ?ACL_TABLE_ALL) of
|
||||
[] ->
|
||||
case emqx_authz_mnesia:get_rules(all) of
|
||||
not_found ->
|
||||
{200, #{rules => []}};
|
||||
[#emqx_acl{who = ?ACL_TABLE_ALL, rules = Rules}] ->
|
||||
{ok, Rules} ->
|
||||
{200, #{rules => [ #{topic => Topic,
|
||||
action => Action,
|
||||
permission => Permission
|
||||
|
@ -354,18 +335,13 @@ all(get, _) ->
|
|||
}
|
||||
end;
|
||||
all(put, #{body := #{<<"rules">> := Rules}}) ->
|
||||
mria:dirty_write(#emqx_acl{
|
||||
who = ?ACL_TABLE_ALL,
|
||||
rules = format_rules(Rules)
|
||||
}),
|
||||
emqx_authz_mnesia:store_rules(all, format_rules(Rules)),
|
||||
{204}.
|
||||
|
||||
purge(delete, _) ->
|
||||
case emqx_authz_api_sources:get_raw_source(<<"built-in-database">>) of
|
||||
[#{<<"enable">> := false}] ->
|
||||
ok = lists:foreach(fun(Key) ->
|
||||
ok = mria:dirty_delete(?ACL_TABLE, Key)
|
||||
end, mnesia:dirty_all_keys(?ACL_TABLE)),
|
||||
ok = emqx_authz_mnesia:purge_rules(),
|
||||
{204};
|
||||
[#{<<"enable">> := true}] ->
|
||||
{400, #{code => <<"BAD_REQUEST">>,
|
||||
|
|
|
@ -23,18 +23,14 @@
|
|||
|
||||
-behaviour(application).
|
||||
|
||||
-include("emqx_authz.hrl").
|
||||
|
||||
-export([start/2, stop/1]).
|
||||
|
||||
start(_StartType, _StartArgs) ->
|
||||
ok = mria_rlog:wait_for_shards([?ACL_SHARDED], infinity),
|
||||
ok = emqx_authz_mnesia:init_tables(),
|
||||
{ok, Sup} = emqx_authz_sup:start_link(),
|
||||
ok = emqx_authz:init(),
|
||||
{ok, Sup}.
|
||||
|
||||
stop(_State) ->
|
||||
emqx_conf:remove_handler(?CONF_KEY_PATH),
|
||||
ok = emqx_authz:deinit(),
|
||||
ok.
|
||||
|
||||
%% internal functions
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_authz_file).
|
||||
|
||||
-include("emqx_authz.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
-behaviour(emqx_authz).
|
||||
|
||||
-ifdef(TEST).
|
||||
-compile(export_all).
|
||||
-compile(nowarn_export_all).
|
||||
-endif.
|
||||
|
||||
%% APIs
|
||||
-export([ description/0
|
||||
, init/1
|
||||
, destroy/1
|
||||
, dry_run/1
|
||||
, authorize/4
|
||||
]).
|
||||
|
||||
description() ->
|
||||
"AuthZ with static rules".
|
||||
|
||||
init(#{path := Path} = Source) ->
|
||||
Rules = case file:consult(Path) of
|
||||
{ok, Terms} ->
|
||||
[emqx_authz_rule:compile(Term) || Term <- Terms];
|
||||
{error, eacces} ->
|
||||
?SLOG(alert, #{msg => "insufficient_permissions_to_read_file", path => Path}),
|
||||
error(eaccess);
|
||||
{error, enoent} ->
|
||||
?SLOG(alert, #{msg => "file_does_not_exist", path => Path}),
|
||||
error(enoent);
|
||||
{error, Reason} ->
|
||||
?SLOG(alert, #{msg => "failed_to_read_file", path => Path, reason => Reason}),
|
||||
error(Reason)
|
||||
end,
|
||||
Source#{annotations => #{rules => Rules}}.
|
||||
|
||||
destroy(_Source) -> ok.
|
||||
|
||||
dry_run(_Source) -> ok.
|
||||
|
||||
authorize(Client, PubSub, Topic, #{annotations := #{rules := Rules}}) ->
|
||||
emqx_authz_rule:matches(Client, PubSub, Topic, Rules).
|
|
@ -21,9 +21,14 @@
|
|||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx/include/emqx_placeholder.hrl").
|
||||
|
||||
-behaviour(emqx_authz).
|
||||
|
||||
%% AuthZ Callbacks
|
||||
-export([ authorize/4
|
||||
, description/0
|
||||
-export([ description/0
|
||||
, init/1
|
||||
, destroy/1
|
||||
, dry_run/1
|
||||
, authorize/4
|
||||
, parse_url/1
|
||||
]).
|
||||
|
||||
|
@ -35,6 +40,21 @@
|
|||
description() ->
|
||||
"AuthZ with http".
|
||||
|
||||
init(#{url := Url} = Source) ->
|
||||
NSource = maps:put(base_url, maps:remove(query, Url), Source),
|
||||
case emqx_authz_utils:create_resource(emqx_connector_http, NSource) of
|
||||
{error, Reason} -> error({load_config_error, Reason});
|
||||
{ok, Id} -> Source#{annotations => #{id => Id}}
|
||||
end.
|
||||
|
||||
destroy(#{annotations := #{id := Id}}) ->
|
||||
ok = emqx_resource:remove(Id).
|
||||
|
||||
dry_run(Source) ->
|
||||
URIMap = maps:get(url, Source),
|
||||
NSource = maps:put(base_url, maps:remove(query, URIMap), Source),
|
||||
emqx_resource:create_dry_run(emqx_connector_http, NSource).
|
||||
|
||||
authorize(Client, PubSub, Topic,
|
||||
#{type := http,
|
||||
url := #{path := Path} = URL,
|
||||
|
|
|
@ -16,14 +16,51 @@
|
|||
|
||||
-module(emqx_authz_mnesia).
|
||||
|
||||
-include("emqx_authz.hrl").
|
||||
-include_lib("emqx/include/emqx.hrl").
|
||||
-include_lib("stdlib/include/ms_transform.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
-define(ACL_SHARDED, emqx_acl_sharded).
|
||||
|
||||
-define(ACL_TABLE, emqx_acl).
|
||||
|
||||
%% To save some space, use an integer for label, 0 for 'all', {1, Username} and {2, ClientId}.
|
||||
-define(ACL_TABLE_ALL, 0).
|
||||
-define(ACL_TABLE_USERNAME, 1).
|
||||
-define(ACL_TABLE_CLIENTID, 2).
|
||||
|
||||
-type(username() :: {username, binary()}).
|
||||
-type(clientid() :: {clientid, binary()}).
|
||||
-type(who() :: username() | clientid() | all).
|
||||
|
||||
-type(rule() :: {emqx_authz_rule:permission(), emqx_authz_rule:action(), emqx_topic:topic()}).
|
||||
-type(rules() :: [rule()]).
|
||||
|
||||
-record(emqx_acl, {
|
||||
who :: ?ACL_TABLE_ALL | {?ACL_TABLE_USERNAME, binary()} | {?ACL_TABLE_CLIENTID, binary()},
|
||||
rules :: rules()
|
||||
}).
|
||||
|
||||
-behaviour(emqx_authz).
|
||||
|
||||
%% AuthZ Callbacks
|
||||
-export([ mnesia/1
|
||||
-export([ description/0
|
||||
, init/1
|
||||
, destroy/1
|
||||
, dry_run/1
|
||||
, authorize/4
|
||||
, description/0
|
||||
]).
|
||||
|
||||
%% Management API
|
||||
-export([ mnesia/1
|
||||
, init_tables/0
|
||||
, store_rules/2
|
||||
, purge_rules/0
|
||||
, get_rules/1
|
||||
, delete_rules/1
|
||||
, list_clientid_rules/0
|
||||
, list_username_rules/0
|
||||
, record_count/0
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
|
@ -42,9 +79,19 @@ mnesia(boot) ->
|
|||
{attributes, record_info(fields, ?ACL_TABLE)},
|
||||
{storage_properties, [{ets, [{read_concurrency, true}]}]}]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% emqx_authz callbacks
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
description() ->
|
||||
"AuthZ with Mnesia".
|
||||
|
||||
init(Source) -> Source.
|
||||
|
||||
destroy(_Source) -> ok.
|
||||
|
||||
dry_run(_Source) -> ok.
|
||||
|
||||
authorize(#{username := Username,
|
||||
clientid := Clientid
|
||||
} = Client, PubSub, Topic, #{type := 'built-in-database'}) ->
|
||||
|
@ -63,6 +110,78 @@ authorize(#{username := Username,
|
|||
end,
|
||||
do_authorize(Client, PubSub, Topic, Rules).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Management API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
init_tables() ->
|
||||
ok = mria_rlog:wait_for_shards([?ACL_SHARDED], infinity).
|
||||
|
||||
-spec(store_rules(who(), rules()) -> ok).
|
||||
store_rules({username, Username}, Rules) ->
|
||||
Record = #emqx_acl{who = {?ACL_TABLE_USERNAME, Username}, rules = Rules},
|
||||
mria:dirty_write(Record);
|
||||
store_rules({clientid, Clientid}, Rules) ->
|
||||
Record = #emqx_acl{who = {?ACL_TABLE_CLIENTID, Clientid}, rules = Rules},
|
||||
mria:dirty_write(Record);
|
||||
store_rules(all, Rules) ->
|
||||
Record = #emqx_acl{who = ?ACL_TABLE_ALL, rules = Rules},
|
||||
mria:dirty_write(Record).
|
||||
|
||||
-spec(purge_rules() -> ok).
|
||||
purge_rules() ->
|
||||
ok = lists:foreach(
|
||||
fun(Key) ->
|
||||
ok = mria:dirty_delete(?ACL_TABLE, Key)
|
||||
end,
|
||||
mnesia:dirty_all_keys(?ACL_TABLE)).
|
||||
|
||||
-spec(get_rules(who()) -> {ok, rules()} | not_found).
|
||||
get_rules({username, Username}) ->
|
||||
do_get_rules({?ACL_TABLE_USERNAME, Username});
|
||||
get_rules({clientid, Clientid}) ->
|
||||
do_get_rules({?ACL_TABLE_CLIENTID, Clientid});
|
||||
get_rules(all) ->
|
||||
do_get_rules(?ACL_TABLE_ALL).
|
||||
|
||||
-spec(delete_rules(who()) -> ok).
|
||||
delete_rules({username, Username}) ->
|
||||
mria:dirty_delete(?ACL_TABLE, {?ACL_TABLE_USERNAME, Username});
|
||||
delete_rules({clientid, Clientid}) ->
|
||||
mria:dirty_delete(?ACL_TABLE, {?ACL_TABLE_CLIENTID, Clientid});
|
||||
delete_rules(all) ->
|
||||
mria:dirty_delete(?ACL_TABLE, ?ACL_TABLE_ALL).
|
||||
|
||||
-spec(list_username_rules() -> {mria:table(), ets:match_spec()}).
|
||||
list_username_rules() ->
|
||||
MatchSpec = ets:fun2ms(
|
||||
fun(#emqx_acl{who = {?ACL_TABLE_USERNAME, Username}, rules = Rules}) ->
|
||||
[{username, Username}, {rules, Rules}]
|
||||
end),
|
||||
{?ACL_TABLE, MatchSpec}.
|
||||
|
||||
-spec(list_clientid_rules() -> {mria:table(), ets:match_spec()}).
|
||||
list_clientid_rules() ->
|
||||
MatchSpec = ets:fun2ms(
|
||||
fun(#emqx_acl{who = {?ACL_TABLE_CLIENTID, Clientid}, rules = Rules}) ->
|
||||
[{clientid, Clientid}, {rules, Rules}]
|
||||
end),
|
||||
{?ACL_TABLE, MatchSpec}.
|
||||
|
||||
-spec(record_count() -> non_neg_integer()).
|
||||
record_count() ->
|
||||
mnesia:table_info(?ACL_TABLE, size).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
do_get_rules(Key) ->
|
||||
case mnesia:dirty_read(?ACL_TABLE, Key) of
|
||||
[#emqx_acl{rules = Rules}] -> {ok, Rules};
|
||||
[] -> not_found
|
||||
end.
|
||||
|
||||
do_authorize(_Client, _PubSub, _Topic, []) -> nomatch;
|
||||
do_authorize(Client, PubSub, Topic, [ {Permission, Action, TopicFilter} | Tail]) ->
|
||||
case emqx_authz_rule:match(Client, PubSub, Topic,
|
||||
|
|
|
@ -21,9 +21,14 @@
|
|||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx/include/emqx_placeholder.hrl").
|
||||
|
||||
-behaviour(emqx_authz).
|
||||
|
||||
%% AuthZ Callbacks
|
||||
-export([ authorize/4
|
||||
, description/0
|
||||
-export([ description/0
|
||||
, init/1
|
||||
, destroy/1
|
||||
, dry_run/1
|
||||
, authorize/4
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
|
@ -34,6 +39,18 @@
|
|||
description() ->
|
||||
"AuthZ with MongoDB".
|
||||
|
||||
init(Source) ->
|
||||
case emqx_authz_utils:create_resource(emqx_connector_mongo, Source) of
|
||||
{error, Reason} -> error({load_config_error, Reason});
|
||||
{ok, Id} -> Source#{annotations => #{id => Id}}
|
||||
end.
|
||||
|
||||
dry_run(Source) ->
|
||||
emqx_resource:create_dry_run(emqx_connector_mongo, Source).
|
||||
|
||||
destroy(#{annotations := #{id := Id}}) ->
|
||||
ok = emqx_resource:remove(Id).
|
||||
|
||||
authorize(Client, PubSub, Topic,
|
||||
#{collection := Collection,
|
||||
selector := Selector,
|
||||
|
|
|
@ -21,9 +21,13 @@
|
|||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx/include/emqx_placeholder.hrl").
|
||||
|
||||
-behaviour(emqx_authz).
|
||||
|
||||
%% AuthZ Callbacks
|
||||
-export([ description/0
|
||||
, parse_query/1
|
||||
, init/1
|
||||
, destroy/1
|
||||
, dry_run/1
|
||||
, authorize/4
|
||||
]).
|
||||
|
||||
|
@ -35,6 +39,20 @@
|
|||
description() ->
|
||||
"AuthZ with Mysql".
|
||||
|
||||
init(#{query := SQL} = Source) ->
|
||||
case emqx_authz_utils:create_resource(emqx_connector_mysql, Source) of
|
||||
{error, Reason} -> error({load_config_error, Reason});
|
||||
{ok, Id} -> Source#{annotations =>
|
||||
#{id => Id,
|
||||
query => parse_query(SQL)}}
|
||||
end.
|
||||
|
||||
dry_run(Source) ->
|
||||
emqx_resource:create_dry_run(emqx_connector_mysql, Source).
|
||||
|
||||
destroy(#{annotations := #{id := Id}}) ->
|
||||
ok = emqx_resource:remove(Id).
|
||||
|
||||
parse_query(undefined) ->
|
||||
undefined;
|
||||
parse_query(Sql) ->
|
||||
|
|
|
@ -21,9 +21,13 @@
|
|||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx/include/emqx_placeholder.hrl").
|
||||
|
||||
-behaviour(emqx_authz).
|
||||
|
||||
%% AuthZ Callbacks
|
||||
-export([ description/0
|
||||
, parse_query/1
|
||||
, init/1
|
||||
, destroy/1
|
||||
, dry_run/1
|
||||
, authorize/4
|
||||
]).
|
||||
|
||||
|
@ -33,7 +37,21 @@
|
|||
-endif.
|
||||
|
||||
description() ->
|
||||
"AuthZ with postgresql".
|
||||
"AuthZ with Postgresql".
|
||||
|
||||
init(#{query := SQL} = Source) ->
|
||||
case emqx_authz_utils:create_resource(emqx_connector_pgsql, Source) of
|
||||
{error, Reason} -> error({load_config_error, Reason});
|
||||
{ok, Id} -> Source#{annotations =>
|
||||
#{id => Id,
|
||||
query => parse_query(SQL)}}
|
||||
end.
|
||||
|
||||
destroy(#{annotations := #{id := Id}}) ->
|
||||
ok = emqx_resource:remove(Id).
|
||||
|
||||
dry_run(Source) ->
|
||||
emqx_resource:create_dry_run(emqx_connector_pgsql, Source).
|
||||
|
||||
parse_query(undefined) ->
|
||||
undefined;
|
||||
|
|
|
@ -21,9 +21,14 @@
|
|||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx/include/emqx_placeholder.hrl").
|
||||
|
||||
-behaviour(emqx_authz).
|
||||
|
||||
%% AuthZ Callbacks
|
||||
-export([ authorize/4
|
||||
, description/0
|
||||
-export([ description/0
|
||||
, init/1
|
||||
, destroy/1
|
||||
, dry_run/1
|
||||
, authorize/4
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
|
@ -32,7 +37,19 @@
|
|||
-endif.
|
||||
|
||||
description() ->
|
||||
"AuthZ with redis".
|
||||
"AuthZ with Redis".
|
||||
|
||||
init(Source) ->
|
||||
case emqx_authz_utils:create_resource(emqx_connector_redis, Source) of
|
||||
{error, Reason} -> error({load_config_error, Reason});
|
||||
{ok, Id} -> Source#{annotations => #{id => Id}}
|
||||
end.
|
||||
|
||||
destroy(#{annotations := #{id := Id}}) ->
|
||||
ok = emqx_resource:remove(Id).
|
||||
|
||||
dry_run(Source) ->
|
||||
emqx_resource:create_dry_run(emqx_connector_redis, Source).
|
||||
|
||||
authorize(Client, PubSub, Topic,
|
||||
#{cmd := CMD,
|
||||
|
|
|
@ -31,7 +31,26 @@
|
|||
, compile/1
|
||||
]).
|
||||
|
||||
-export_type([rule/0]).
|
||||
-type(ipaddress() :: {ipaddr, esockd_cidr:cidr_string()} |
|
||||
{ipaddrs, list(esockd_cidr:cidr_string())}).
|
||||
|
||||
-type(username() :: {username, binary()}).
|
||||
|
||||
-type(clientid() :: {clientid, binary()}).
|
||||
|
||||
-type(who() :: ipaddress() | username() | clientid() |
|
||||
{'and', [ipaddress() | username() | clientid()]} |
|
||||
{'or', [ipaddress() | username() | clientid()]} |
|
||||
all).
|
||||
|
||||
-type(action() :: subscribe | publish | all).
|
||||
-type(permission() :: allow | deny).
|
||||
|
||||
-type(rule() :: {permission(), who(), action(), list(emqx_types:topic())}).
|
||||
|
||||
-export_type([ action/0
|
||||
, permission/0
|
||||
]).
|
||||
|
||||
compile({Permission, all})
|
||||
when ?ALLOW_DENY(Permission) -> {Permission, all, all, [compile_topic(<<"#">>)]};
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_authz_utils).
|
||||
|
||||
-include_lib("emqx/include/emqx_placeholder.hrl").
|
||||
|
||||
-export([cleanup_resources/0,
|
||||
make_resource_id/1,
|
||||
create_resource/2]).
|
||||
|
||||
-define(RESOURCE_GROUP, <<"emqx_authz">>).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% APIs
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
create_resource(Module, Config) ->
|
||||
ResourceID = make_resource_id(Module),
|
||||
case emqx_resource:create(ResourceID, Module, Config) of
|
||||
{ok, already_created} -> {ok, ResourceID};
|
||||
{ok, _} -> {ok, ResourceID};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
cleanup_resources() ->
|
||||
lists:foreach(
|
||||
fun emqx_resource:remove/1,
|
||||
emqx_resource:list_group_instances(?RESOURCE_GROUP)).
|
||||
|
||||
make_resource_id(Name) ->
|
||||
NameBin = bin(Name),
|
||||
emqx_resource:generate_id(?RESOURCE_GROUP, NameBin).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
|
||||
bin(L) when is_list(L) -> list_to_binary(L);
|
||||
bin(X) -> X.
|
|
@ -33,7 +33,8 @@ init_per_suite(Config) ->
|
|||
meck:new(emqx_resource, [non_strict, passthrough, no_history, no_link]),
|
||||
meck:expect(emqx_resource, create, fun(_, _, _) -> {ok, meck_data} end),
|
||||
meck:expect(emqx_resource, update, fun(_, _, _, _) -> {ok, meck_data} end),
|
||||
meck:expect(emqx_resource, remove, fun(_) -> ok end ),
|
||||
meck:expect(emqx_resource, remove, fun(_) -> ok end),
|
||||
meck:expect(emqx_resource, create_dry_run, fun(_, _) -> ok end),
|
||||
|
||||
ok = emqx_common_test_helpers:start_apps(
|
||||
[emqx_connector, emqx_conf, emqx_authz],
|
||||
|
@ -137,6 +138,13 @@ t_update_source(_) ->
|
|||
, #{type := file, enable := true}
|
||||
], emqx_conf:get([authorization, sources], [])),
|
||||
|
||||
{ok, _} = emqx_authz:update({?CMD_REPLACE, http}, ?SOURCE1#{<<"enable">> := true}),
|
||||
{ok, _} = emqx_authz:update({?CMD_REPLACE, mongodb}, ?SOURCE2#{<<"enable">> := true}),
|
||||
{ok, _} = emqx_authz:update({?CMD_REPLACE, mysql}, ?SOURCE3#{<<"enable">> := true}),
|
||||
{ok, _} = emqx_authz:update({?CMD_REPLACE, postgresql}, ?SOURCE4#{<<"enable">> := true}),
|
||||
{ok, _} = emqx_authz:update({?CMD_REPLACE, redis}, ?SOURCE5#{<<"enable">> := true}),
|
||||
{ok, _} = emqx_authz:update({?CMD_REPLACE, file}, ?SOURCE6#{<<"enable">> := true}),
|
||||
|
||||
{ok, _} = emqx_authz:update({?CMD_REPLACE, http}, ?SOURCE1#{<<"enable">> := false}),
|
||||
{ok, _} = emqx_authz:update({?CMD_REPLACE, mongodb}, ?SOURCE2#{<<"enable">> := false}),
|
||||
{ok, _} = emqx_authz:update({?CMD_REPLACE, mysql}, ?SOURCE3#{<<"enable">> := false}),
|
||||
|
|
|
@ -217,7 +217,7 @@ t_api(_) ->
|
|||
request( delete
|
||||
, uri(["authorization", "sources", "built-in-database", "purge-all"])
|
||||
, []),
|
||||
?assertEqual([], mnesia:dirty_all_keys(?ACL_TABLE)),
|
||||
?assertEqual(0, emqx_authz_mnesia:record_count()),
|
||||
ok.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
|
@ -55,24 +55,25 @@ set_special_configs(_App) ->
|
|||
ok.
|
||||
|
||||
init_per_testcase(t_authz, Config) ->
|
||||
mria:dirty_write(#emqx_acl{who = {?ACL_TABLE_USERNAME, <<"test_username">>},
|
||||
rules = [{allow, publish, <<"test/", ?PH_S_USERNAME>>},
|
||||
{allow, subscribe, <<"eq #">>}
|
||||
]
|
||||
}),
|
||||
mria:dirty_write(#emqx_acl{who = {?ACL_TABLE_CLIENTID, <<"test_clientid">>},
|
||||
rules = [{allow, publish, <<"test/", ?PH_S_CLIENTID>>},
|
||||
{deny, subscribe, <<"eq #">>}
|
||||
]
|
||||
}),
|
||||
mria:dirty_write(#emqx_acl{who = ?ACL_TABLE_ALL,
|
||||
rules = [{deny, all, <<"#">>}]
|
||||
}),
|
||||
emqx_authz_mnesia:store_rules(
|
||||
{username, <<"test_username">>},
|
||||
[{allow, publish, <<"test/", ?PH_S_USERNAME>>},
|
||||
{allow, subscribe, <<"eq #">>}]),
|
||||
|
||||
emqx_authz_mnesia:store_rules(
|
||||
{clientid, <<"test_clientid">>},
|
||||
[{allow, publish, <<"test/", ?PH_S_CLIENTID>>},
|
||||
{deny, subscribe, <<"eq #">>}]),
|
||||
|
||||
emqx_authz_mnesia:store_rules(
|
||||
all,
|
||||
[{deny, all, <<"#">>}]),
|
||||
|
||||
Config;
|
||||
init_per_testcase(_, Config) -> Config.
|
||||
|
||||
end_per_testcase(t_authz, Config) ->
|
||||
[ mria:dirty_delete(?ACL_TABLE, K) || K <- mnesia:dirty_all_keys(?ACL_TABLE)],
|
||||
ok = emqx_authz_mnesia:purge_rules(),
|
||||
Config;
|
||||
end_per_testcase(_, Config) -> Config.
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
, emqx_authz_schema
|
||||
, emqx_auto_subscribe_schema
|
||||
, emqx_modules_schema
|
||||
, emqx_plugins_schema
|
||||
, emqx_dashboard_schema
|
||||
, emqx_gateway_schema
|
||||
, emqx_prometheus_schema
|
||||
|
@ -58,6 +59,7 @@
|
|||
, emqx_psk_schema
|
||||
, emqx_limiter_schema
|
||||
, emqx_connector_schema
|
||||
, emqx_slow_subs_schema
|
||||
]).
|
||||
|
||||
namespace() -> undefined.
|
||||
|
@ -96,6 +98,10 @@ roots() ->
|
|||
sc(ref("db"),
|
||||
#{ desc => "Settings of the embedded database."
|
||||
})}
|
||||
, {"system_monitor",
|
||||
sc(ref("system_monitor"),
|
||||
#{ desc => "Erlang process and application monitoring."
|
||||
})}
|
||||
] ++
|
||||
emqx_schema:roots(medium) ++
|
||||
emqx_schema:roots(low) ++
|
||||
|
@ -316,6 +322,64 @@ a crash dump
|
|||
)}
|
||||
];
|
||||
|
||||
fields("system_monitor") ->
|
||||
[ {"top_num_items",
|
||||
sc(non_neg_integer(),
|
||||
#{ mapping => "system_monitor.top_num_items"
|
||||
, default => 10
|
||||
, desc => "The number of top processes per monitoring group"
|
||||
})
|
||||
}
|
||||
, {"top_sample_interval",
|
||||
sc(emqx_schema:duration(),
|
||||
#{ mapping => "system_monitor.top_sample_interval"
|
||||
, default => "2s"
|
||||
, desc => "Specifies how often process top should be collected"
|
||||
})
|
||||
}
|
||||
, {"top_max_procs",
|
||||
sc(non_neg_integer(),
|
||||
#{ mapping => "system_monitor.top_max_procs"
|
||||
, default => 200000
|
||||
, desc => "Stop collecting data when the number of processes exceeds this value"
|
||||
})
|
||||
}
|
||||
, {"db_hostname",
|
||||
sc(string(),
|
||||
#{ mapping => "system_monitor.db_hostname"
|
||||
, desc => "Hostname of the postgres database that collects the data points"
|
||||
})
|
||||
}
|
||||
, {"db_port",
|
||||
sc(integer(),
|
||||
#{ mapping => "system_monitor.db_port"
|
||||
, default => 5432
|
||||
, desc => "Port of the postgres database that collects the data points"
|
||||
})
|
||||
}
|
||||
, {"db_username",
|
||||
sc(string(),
|
||||
#{ mapping => "system_monitor.db_username"
|
||||
, default => "system_monitor"
|
||||
, desc => "EMQX user name in the postgres database"
|
||||
})
|
||||
}
|
||||
, {"db_password",
|
||||
sc(binary(),
|
||||
#{ mapping => "system_monitor.db_password"
|
||||
, default => "system_monitor_password"
|
||||
, desc => "EMQX user password in the postgres database"
|
||||
})
|
||||
}
|
||||
, {"db_name",
|
||||
sc(string(),
|
||||
#{ mapping => "system_monitor.db_name"
|
||||
, default => "postgres"
|
||||
, desc => "Postgres database name"
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
fields("db") ->
|
||||
[ {"backend",
|
||||
sc(hoconsc:enum([mnesia, rlog]),
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
{deps, [
|
||||
{eldap2, {git, "https://github.com/emqx/eldap2", {tag, "v0.2.2"}}},
|
||||
{mysql, {git, "https://github.com/emqx/mysql-otp", {tag, "1.7.1"}}},
|
||||
{epgsql, {git, "https://github.com/epgsql/epgsql", {tag, "4.4.0"}}},
|
||||
{epgsql, {git, "https://github.com/emqx/epgsql", {tag, "4.6.0"}}},
|
||||
%% NOTE: mind poolboy version when updating mongodb-erlang version
|
||||
{mongodb, {git,"https://github.com/emqx/mongodb-erlang", {tag, "v3.0.10"}}},
|
||||
%% NOTE: mind poolboy version when updating eredis_cluster version
|
||||
|
|
|
@ -36,7 +36,9 @@
|
|||
|
||||
-export([ roots/0
|
||||
, fields/1
|
||||
, validations/0]).
|
||||
, validations/0
|
||||
, namespace/0
|
||||
]).
|
||||
|
||||
-export([ check_ssl_opts/2
|
||||
]).
|
||||
|
@ -50,6 +52,9 @@
|
|||
|
||||
%%=====================================================================
|
||||
%% Hocon schema
|
||||
|
||||
namespace() -> "connector-http".
|
||||
|
||||
roots() ->
|
||||
fields(config).
|
||||
|
||||
|
|
|
@ -224,6 +224,15 @@ destroy_token_by_username(Username, Token) ->
|
|||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
|
||||
hash(Password) ->
|
||||
SaltBin = emqx_dashboard_token:salt(),
|
||||
<<SaltBin/binary, (sha256(SaltBin, Password))/binary>>.
|
||||
|
||||
sha256(SaltBin, Password) ->
|
||||
crypto:hash('sha256', <<SaltBin/binary, Password/binary>>).
|
||||
|
||||
-spec(add_default_user() -> {ok, map() | empty | default_user_exists } | {error, any()}).
|
||||
add_default_user() ->
|
||||
add_default_user(binenv(default_username), binenv(default_password)).
|
||||
|
||||
|
@ -231,7 +240,8 @@ binenv(Key) ->
|
|||
iolist_to_binary(emqx_conf:get([emqx_dashboard, Key], "")).
|
||||
|
||||
add_default_user(Username, Password) when ?EMPTY_KEY(Username) orelse ?EMPTY_KEY(Password) ->
|
||||
{ok, default_not_found};
|
||||
{ok, empty};
|
||||
|
||||
add_default_user(Username, Password) ->
|
||||
case lookup_user(Username) of
|
||||
[] -> add_user(Username, Password, <<"administrator">>);
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
, sent
|
||||
, dropped]).
|
||||
|
||||
-define(EMPTY_COLLECTION, {0, 0, 0, 0}).
|
||||
|
||||
api_spec() ->
|
||||
{[ monitor_api()
|
||||
, monitor_nodes_api()
|
||||
|
@ -175,7 +177,7 @@ current_counters(get, _Params) ->
|
|||
{200, Response}.
|
||||
|
||||
format_current_metrics(Collects) ->
|
||||
format_current_metrics(Collects, {0,0,0,0}).
|
||||
format_current_metrics(Collects, ?EMPTY_COLLECTION).
|
||||
format_current_metrics([], Acc) ->
|
||||
Acc;
|
||||
format_current_metrics([{Received, Sent, Sub, Conn} | Collects],
|
||||
|
@ -217,7 +219,7 @@ get_collect(Node) when Node =:= node() ->
|
|||
emqx_dashboard_collection:get_collect();
|
||||
get_collect(Node) ->
|
||||
case rpc:call(Node, emqx_dashboard_collection, get_collect, []) of
|
||||
{badrpc, _Reason} -> #{};
|
||||
{badrpc, _Reason} -> ?EMPTY_COLLECTION;
|
||||
Res -> Res
|
||||
end.
|
||||
|
||||
|
|
|
@ -437,8 +437,15 @@ typename_to_spec("log_level()", _Mod) ->
|
|||
};
|
||||
typename_to_spec("rate()", _Mod) ->
|
||||
#{type => string, example => <<"10M/s">>};
|
||||
typename_to_spec("bucket_rate()", _Mod) ->
|
||||
#{type => string, example => <<"10M/s, 100M">>};
|
||||
typename_to_spec("capacity()", _Mod) ->
|
||||
#{type => string, example => <<"100M">>};
|
||||
typename_to_spec("burst_rate()", _Mod) ->
|
||||
%% 0/0s = no burst
|
||||
#{type => string, example => <<"10M/1s">>};
|
||||
typename_to_spec("failure_strategy()", _Mod) ->
|
||||
#{type => string, example => <<"force">>};
|
||||
typename_to_spec("initial()", _Mod) ->
|
||||
#{type => string, example => <<"0M">>};
|
||||
typename_to_spec(Name, Mod) ->
|
||||
Spec = range(Name),
|
||||
Spec1 = remote_module_type(Spec, Name, Mod),
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_dashboard_monitor_api_SUITE).
|
||||
|
||||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("emqx/include/emqx.hrl").
|
||||
-include("emqx_dashboard.hrl").
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_testcase(t_badrpc_collect, Config) ->
|
||||
Cluster = cluster_specs(2),
|
||||
Apps = [emqx_modules, emqx_dashboard],
|
||||
Nodes = [N1, N2] = lists:map(fun(Spec) -> start_slave(Spec, Apps) end, Cluster),
|
||||
%% form the cluster
|
||||
ok = rpc:call(N2, mria, join, [N1]),
|
||||
%% Wait until all nodes are healthy:
|
||||
[rpc:call(Node, mria_rlog, wait_for_shards, [[?DASHBOARD_SHARD], 5000])
|
||||
|| Node <- Nodes],
|
||||
[ {nodes, Nodes}
|
||||
, {apps, Apps}
|
||||
| Config];
|
||||
init_per_testcase(_, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_testcase(t_badrpc_collect, Config) ->
|
||||
Apps = ?config(apps, Config),
|
||||
Nodes = ?config(nodes, Config),
|
||||
lists:foreach(fun(Node) -> stop_slave(Node, Apps) end, Nodes),
|
||||
ok;
|
||||
end_per_testcase(_, _Config) ->
|
||||
ok.
|
||||
|
||||
t_badrpc_collect(Config) ->
|
||||
[N1, N2] = ?config(nodes, Config),
|
||||
%% simulate badrpc on one node
|
||||
ok = rpc:call(N2, meck, new, [emqx_dashboard_collection, [no_history, no_link]]),
|
||||
%% we don't mock the `emqx_dashboard_collection:get_collect/0' to
|
||||
%% provoke the `badrpc' error.
|
||||
?assertMatch(
|
||||
{200, #{nodes := 2}},
|
||||
rpc:call(N1, emqx_dashboard_monitor_api, current_counters, [get, #{}])),
|
||||
ok = rpc:call(N2, meck, unload, [emqx_dashboard_collection]),
|
||||
ok.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
cluster_specs(NumNodes) ->
|
||||
BaseGenRpcPort = 9000,
|
||||
Specs0 = [#{ name => node_name(N)
|
||||
, num => N
|
||||
}
|
||||
|| N <- lists:seq(1, NumNodes)],
|
||||
GenRpcPorts = maps:from_list([{node_id(Name), {tcp, BaseGenRpcPort + N}}
|
||||
|| #{name := Name, num := N} <- Specs0]),
|
||||
[ Spec#{env => [ {gen_rpc, tcp_server_port, BaseGenRpcPort + N}
|
||||
, {gen_rpc, client_config_per_node, {internal, GenRpcPorts}}
|
||||
]}
|
||||
|| Spec = #{num := N} <- Specs0].
|
||||
|
||||
node_name(N) ->
|
||||
list_to_atom("n" ++ integer_to_list(N)).
|
||||
|
||||
node_id(Name) ->
|
||||
list_to_atom(lists:concat([Name, "@", host()])).
|
||||
|
||||
start_slave(Spec = #{ name := Name}, Apps) ->
|
||||
CommonBeamOpts = "+S 1:1 ", % We want VMs to only occupy a single core
|
||||
{ok, Node} = slave:start_link(host(), Name, CommonBeamOpts ++ ebin_path()),
|
||||
setup_node(Node, Spec, Apps),
|
||||
Node.
|
||||
|
||||
stop_slave(Node, Apps) ->
|
||||
ok = rpc:call(Node, emqx_common_test_helpers, start_apps, [Apps]),
|
||||
slave:stop(Node).
|
||||
|
||||
host() ->
|
||||
[_, Host] = string:tokens(atom_to_list(node()), "@"), Host.
|
||||
|
||||
ebin_path() ->
|
||||
string:join(["-pa" | lists:filter(fun is_lib/1, code:get_path())], " ").
|
||||
|
||||
is_lib(Path) ->
|
||||
string:prefix(Path, code:lib_dir()) =:= nomatch.
|
||||
|
||||
setenv(Node, Env) ->
|
||||
[rpc:call(Node, application, set_env, [App, Key, Val]) || {App, Key, Val} <- Env].
|
||||
|
||||
setup_node(Node, _Spec = #{env := Env}, Apps) ->
|
||||
%% load these before starting ekka and such
|
||||
[rpc:call(Node, application, load, [App]) || App <- [gen_rpc, emqx_conf, emqx]],
|
||||
setenv(Node, Env),
|
||||
EnvHandler =
|
||||
fun(emqx) ->
|
||||
application:set_env(emqx, boot_modules, [router, broker]);
|
||||
(_) ->
|
||||
ok
|
||||
end,
|
||||
ok = rpc:call(Node, emqx_common_test_helpers, start_apps, [Apps, EnvHandler]),
|
||||
ok.
|
|
@ -79,9 +79,9 @@ gateway(post, Request) ->
|
|||
undefined -> error(badarg);
|
||||
_ ->
|
||||
GwConf = maps:without([<<"name">>], Body),
|
||||
case emqx_gateway_conf:load_gateway(GwName, GwConf) of
|
||||
ok ->
|
||||
{204};
|
||||
case emqx_gateway_conf:load_gateway(GwName, GwConf) of
|
||||
{ok, NGwConf} ->
|
||||
{201, NGwConf};
|
||||
{error, Reason} ->
|
||||
return_http_error(500, Reason)
|
||||
end
|
||||
|
@ -126,13 +126,15 @@ gateway_insta(get, #{bindings := #{name := Name0}}) ->
|
|||
error : badarg ->
|
||||
return_http_error(400, "Bad gateway name")
|
||||
end;
|
||||
gateway_insta(put, #{body := GwConf,
|
||||
gateway_insta(put, #{body := GwConf0,
|
||||
bindings := #{name := Name0}
|
||||
}) ->
|
||||
with_gateway(Name0, fun(GwName, _) ->
|
||||
%% XXX: Clear the unused fields
|
||||
GwConf = maps:without([<<"name">>], GwConf0),
|
||||
case emqx_gateway_conf:update_gateway(GwName, GwConf) of
|
||||
ok ->
|
||||
{204};
|
||||
{ok, Gateway} ->
|
||||
{200, Gateway};
|
||||
{error, Reason} ->
|
||||
return_http_error(500, Reason)
|
||||
end
|
||||
|
@ -151,10 +153,14 @@ schema("/gateway") ->
|
|||
#{ description => <<"Get gateway list">>
|
||||
, parameters => params_gateway_status_in_qs()
|
||||
, responses =>
|
||||
?STANDARD_RESP(#{200 => ref(gateway_overview)})
|
||||
?STANDARD_RESP(
|
||||
#{200 => emqx_dashboard_swagger:schema_with_example(
|
||||
hoconsc:array(ref(gateway_overview)),
|
||||
examples_gateway_overview())})
|
||||
},
|
||||
post =>
|
||||
#{ description => <<"Load a gateway">>
|
||||
%% TODO: distinguish create & response swagger schema
|
||||
, 'requestBody' => schema_gateways_conf()
|
||||
, responses =>
|
||||
?STANDARD_RESP(#{201 => schema_gateways_conf()})
|
||||
|
@ -177,7 +183,7 @@ schema("/gateway/:name") ->
|
|||
put =>
|
||||
#{ description => <<"Update the gateway configurations/status">>
|
||||
, parameters => params_gateway_name_in_path()
|
||||
, 'requestBody' => schema_gateways_conf()
|
||||
, 'requestBody' => schema_update_gateways_conf()
|
||||
, responses =>
|
||||
?STANDARD_RESP(#{200 => schema_gateways_conf()})
|
||||
}
|
||||
|
@ -204,15 +210,18 @@ params_gateway_name_in_path() ->
|
|||
mk(binary(),
|
||||
#{ in => path
|
||||
, desc => <<"Gateway Name">>
|
||||
, example => <<"">>
|
||||
})}
|
||||
].
|
||||
|
||||
params_gateway_status_in_qs() ->
|
||||
%% FIXME: enum in swagger ??
|
||||
[{status,
|
||||
mk(binary(),
|
||||
#{ in => query
|
||||
, nullable => true
|
||||
, desc => <<"Gateway Status">>
|
||||
, example => <<"">>
|
||||
})}
|
||||
].
|
||||
|
||||
|
@ -226,20 +235,20 @@ roots() ->
|
|||
|
||||
fields(gateway_overview) ->
|
||||
[ {name,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Gateway Name">>})}
|
||||
, {status,
|
||||
mk(hoconsc:enum([running, stopped, unloaded]),
|
||||
#{ desc => <<"The Gateway status">>})}
|
||||
, {created_at,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{desc => <<"The Gateway created datetime">>})}
|
||||
, {started_at,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ nullable => true
|
||||
, desc => <<"The Gateway started datetime">>})}
|
||||
, {stopped_at,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ nullable => true
|
||||
, desc => <<"The Gateway stopped datetime">>})}
|
||||
, {max_connections,
|
||||
|
@ -256,7 +265,7 @@ fields(gateway_overview) ->
|
|||
];
|
||||
fields(gateway_listener_overview) ->
|
||||
[ {id,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Listener ID">>})}
|
||||
, {running,
|
||||
mk(boolean(),
|
||||
|
@ -270,21 +279,29 @@ fields(Gw) when Gw == stomp; Gw == mqttsn;
|
|||
Gw == coap; Gw == lwm2m;
|
||||
Gw == exproto ->
|
||||
[{name,
|
||||
mk(string(), #{ desc => <<"Gateway Name">>})}
|
||||
mk(hoconsc:union([Gw]), #{ desc => <<"Gateway Name">>})}
|
||||
] ++ convert_listener_struct(emqx_gateway_schema:fields(Gw));
|
||||
|
||||
fields(Gw) when Gw == update_stomp; Gw == update_mqttsn;
|
||||
Gw == update_coap; Gw == update_lwm2m;
|
||||
Gw == update_exproto ->
|
||||
"update_" ++ GwStr = atom_to_list(Gw),
|
||||
Gw1 = list_to_existing_atom(GwStr),
|
||||
remove_listener_and_authn(emqx_gateway_schema:fields(Gw1));
|
||||
|
||||
fields(Listener) when Listener == tcp_listener;
|
||||
Listener == ssl_listener;
|
||||
Listener == udp_listener;
|
||||
Listener == dtls_listener ->
|
||||
[ {id,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ nullable => true
|
||||
, desc => <<"Listener ID">>})}
|
||||
, {type,
|
||||
mk(hoconsc:union([tcp, ssl, udp, dtls]),
|
||||
#{ desc => <<"Listener type">>})}
|
||||
, {name,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Listener Name">>})}
|
||||
, {running,
|
||||
mk(boolean(),
|
||||
|
@ -293,11 +310,19 @@ fields(Listener) when Listener == tcp_listener;
|
|||
] ++ emqx_gateway_schema:fields(Listener);
|
||||
|
||||
fields(gateway_stats) ->
|
||||
[{key, mk(string(), #{})}].
|
||||
[{key, mk(binary(), #{})}].
|
||||
|
||||
schema_update_gateways_conf() ->
|
||||
emqx_dashboard_swagger:schema_with_examples(
|
||||
hoconsc:union([ref(?MODULE, update_stomp),
|
||||
ref(?MODULE, update_mqttsn),
|
||||
ref(?MODULE, update_coap),
|
||||
ref(?MODULE, update_lwm2m),
|
||||
ref(?MODULE, update_exproto)]),
|
||||
examples_update_gateway_confs()
|
||||
).
|
||||
|
||||
schema_gateways_conf() ->
|
||||
%% XXX: We need convert the emqx_gateway_schema's listener map
|
||||
%% structure to array
|
||||
emqx_dashboard_swagger:schema_with_examples(
|
||||
hoconsc:union([ref(?MODULE, stomp), ref(?MODULE, mqttsn),
|
||||
ref(?MODULE, coap), ref(?MODULE, lwm2m),
|
||||
|
@ -314,6 +339,11 @@ convert_listener_struct(Schema) ->
|
|||
}),
|
||||
lists:keystore(listeners, 1, Schema1, {listeners, ListenerSchema}).
|
||||
|
||||
remove_listener_and_authn(Schmea) ->
|
||||
lists:keydelete(
|
||||
authentication, 1,
|
||||
lists:keydelete(listeners, 1, Schmea)).
|
||||
|
||||
listeners_schema(?R_REF(_Mod, tcp_listeners)) ->
|
||||
hoconsc:array(hoconsc:union([ref(tcp_listener), ref(ssl_listener)]));
|
||||
listeners_schema(?R_REF(_Mod, udp_listeners)) ->
|
||||
|
@ -325,18 +355,202 @@ listeners_schema(?R_REF(_Mod, udp_tcp_listeners)) ->
|
|||
%%--------------------------------------------------------------------
|
||||
%% examples
|
||||
|
||||
examples_gateway_overview() ->
|
||||
[ #{ name => <<"coap">>
|
||||
, status => <<"unloaded">>
|
||||
}
|
||||
, #{ name => <<"exproto">>
|
||||
, status => <<"unloaded">>
|
||||
}
|
||||
, #{ name => <<"lwm2m">>
|
||||
, status => <<"running">>
|
||||
, current_connections => 0
|
||||
, max_connections => 1024000
|
||||
, listeners =>
|
||||
[ #{ id => <<"lwm2m:udp:default">>
|
||||
, type => <<"udp">>
|
||||
, name => <<"default">>
|
||||
, running => true
|
||||
}
|
||||
]
|
||||
, created_at => <<"2021-12-08T14:41:26.171+08:00">>
|
||||
, started_at => <<"2021-12-08T14:41:26.202+08:00">>
|
||||
}
|
||||
, #{ name => <<"mqttsn">>
|
||||
, status => <<"stopped">>
|
||||
, current_connections => 0
|
||||
, max_connections => 1024000
|
||||
, listeners =>
|
||||
[ #{ id => <<"mqttsn:udp:default">>
|
||||
, name => <<"default">>
|
||||
, running => false
|
||||
, type => <<"udp">>
|
||||
}
|
||||
]
|
||||
, created_at => <<"2021-12-08T14:41:45.071+08:00">>
|
||||
, stopped_at => <<"2021-12-08T14:56:35.576+08:00">>
|
||||
}
|
||||
, #{ name => <<"stomp">>
|
||||
, status => <<"running">>
|
||||
, current_connections => 0
|
||||
, max_connections => 1024000
|
||||
, listeners =>
|
||||
[ #{ id => <<"stomp:tcp:default">>
|
||||
, name => <<"default">>
|
||||
, running => true
|
||||
, type => <<"tcp">>
|
||||
}
|
||||
]
|
||||
, created_at => <<"2021-12-08T14:42:15.272+08:00">>
|
||||
, started_at => <<"2021-12-08T14:42:15.274+08:00">>
|
||||
}
|
||||
].
|
||||
|
||||
examples_gateway_confs() ->
|
||||
#{ stomp_gateway =>
|
||||
#{ summary => <<"A simple STOMP gateway configs">>
|
||||
, value =>
|
||||
#{ enable => true
|
||||
, name => <<"stomp">>
|
||||
, enable_stats => true
|
||||
, idle_timeout => <<"30s">>
|
||||
, mountpoint => <<"stomp/">>
|
||||
, frame =>
|
||||
#{ max_headers => 10
|
||||
, max_headers_length => 1024
|
||||
, max_body_length => 65535
|
||||
}
|
||||
, listeners =>
|
||||
[ #{ type => <<"tcp">>
|
||||
, name => <<"default">>
|
||||
, bind => <<"61613">>
|
||||
, max_connections => 1024000
|
||||
, max_conn_rate => 1000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
, mqttsn_gateway =>
|
||||
#{ summary => <<"A simple MQTT-SN gateway configs">>
|
||||
, value =>
|
||||
#{ enable => true
|
||||
, name => <<"mqttsn">>
|
||||
, enable_stats => true
|
||||
, idle_timeout => <<"30s">>
|
||||
, mountpoint => <<"mqttsn/">>
|
||||
, gateway_id => 1
|
||||
, broadcast => true
|
||||
, enable_qos3 => true
|
||||
, predefined =>
|
||||
[ #{ id => <<"1001">>
|
||||
, topic => <<"pred/1001">>
|
||||
}
|
||||
, #{ id => <<"1002">>
|
||||
, topic => <<"pred/1002">>
|
||||
}
|
||||
]
|
||||
, listeners =>
|
||||
[ #{ type => <<"udp">>
|
||||
, name => <<"default">>
|
||||
, bind => <<"1884">>
|
||||
, max_connections => 1024000
|
||||
, max_conn_rate => 1000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
, coap_gateway =>
|
||||
#{ summary => <<"A simple CoAP gateway configs">>
|
||||
, value =>
|
||||
#{ enable => true
|
||||
, name => <<"coap">>
|
||||
, enable_stats => true
|
||||
, idle_timeout => <<"30s">>
|
||||
, mountpoint => <<"coap/">>
|
||||
, heartbeat => <<"30s">>
|
||||
, connection_required => false
|
||||
, notify_type => <<"qos">>
|
||||
, subscribe_qos => <<"coap">>
|
||||
, publish_qos => <<"coap">>
|
||||
, listeners =>
|
||||
[ #{ type => <<"udp">>
|
||||
, name => <<"default">>
|
||||
, bind => <<"5683">>
|
||||
, max_connections => 1024000
|
||||
, max_conn_rate => 1000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
, lwm2m_gateway =>
|
||||
#{ summary => <<"A simple LwM2M gateway configs">>
|
||||
, value =>
|
||||
#{ enable => true
|
||||
, name => <<"lwm2m">>
|
||||
, enable_stats => true
|
||||
, idle_timeout => <<"30s">>
|
||||
, mountpoint => <<"lwm2m/">>
|
||||
, xml_dir => <<"etc/lwm2m_xml">>
|
||||
, lifetime_min => <<"1s">>
|
||||
, lifetime_max => <<"86400s">>
|
||||
, qmode_time_window => <<"22s">>
|
||||
, auto_observe => false
|
||||
, update_msg_publish_condition => <<"always">>
|
||||
, translators =>
|
||||
#{ command => #{topic => <<"/dn/#">>}
|
||||
, response => #{topic => <<"/up/resp">>}
|
||||
, notify => #{topic => <<"/up/notify">>}
|
||||
, register => #{topic => <<"/up/resp">>}
|
||||
, update => #{topic => <<"/up/resp">>}
|
||||
}
|
||||
, listeners =>
|
||||
[ #{ type => <<"udp">>
|
||||
, name => <<"default">>
|
||||
, bind => <<"5783">>
|
||||
, max_connections => 1024000
|
||||
, max_conn_rate => 1000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
, exproto_gateway =>
|
||||
#{ summary => <<"A simple ExProto gateway configs">>
|
||||
, value =>
|
||||
#{ enable => true
|
||||
, name => <<"exproto">>
|
||||
, enable_stats => true
|
||||
, idle_timeout => <<"30s">>
|
||||
, mountpoint => <<"exproto/">>
|
||||
, server =>
|
||||
#{ bind => <<"9100">>
|
||||
}
|
||||
, handler =>
|
||||
#{ address => <<"http://127.0.0.1:9001">>
|
||||
}
|
||||
, listeners =>
|
||||
[ #{ type => <<"tcp">>
|
||||
, name => <<"default">>
|
||||
, bind => <<"7993">>
|
||||
, max_connections => 1024000
|
||||
, max_conn_rate => 1000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}.
|
||||
|
||||
examples_update_gateway_confs() ->
|
||||
#{ stomp_gateway =>
|
||||
#{ summary => <<"A simple STOMP gateway configs">>
|
||||
, value =>
|
||||
#{ enable => true
|
||||
, enable_stats => true
|
||||
, idle_timeout => <<"30s">>
|
||||
, mountpoint => <<"stomp/">>
|
||||
, mountpoint => <<"stomp2/">>
|
||||
, frame =>
|
||||
#{ max_header => 10
|
||||
, make_header_length => 1024
|
||||
, max_body_length => 65535
|
||||
#{ max_headers => 100
|
||||
, max_headers_length => 10240
|
||||
, max_body_length => 655350
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -345,6 +559,67 @@ examples_gateway_confs() ->
|
|||
, value =>
|
||||
#{ enable => true
|
||||
, enable_stats => true
|
||||
, idle_timeout => <<"30s">>
|
||||
, mountpoint => <<"mqttsn2/">>
|
||||
, gateway_id => 1
|
||||
, broadcast => true
|
||||
, enable_qos3 => false
|
||||
, predefined =>
|
||||
[ #{ id => <<"1003">>
|
||||
, topic => <<"pred/1003">>
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
, coap_gateway =>
|
||||
#{ summary => <<"A simple CoAP gateway configs">>
|
||||
, value =>
|
||||
#{ enable => true
|
||||
, enable_stats => true
|
||||
, idle_timeout => <<"30s">>
|
||||
, mountpoint => <<"coap2/">>
|
||||
, heartbeat => <<"30s">>
|
||||
, connection_required => false
|
||||
, notify_type => <<"qos">>
|
||||
, subscribe_qos => <<"coap">>
|
||||
, publish_qos => <<"coap">>
|
||||
}
|
||||
}
|
||||
, lwm2m_gateway =>
|
||||
#{ summary => <<"A simple LwM2M gateway configs">>
|
||||
, value =>
|
||||
#{ enable => true
|
||||
, enable_stats => true
|
||||
, idle_timeout => <<"30s">>
|
||||
, mountpoint => <<"lwm2m2/">>
|
||||
, xml_dir => <<"etc/lwm2m_xml">>
|
||||
, lifetime_min => <<"1s">>
|
||||
, lifetime_max => <<"86400s">>
|
||||
, qmode_time_window => <<"22s">>
|
||||
, auto_observe => false
|
||||
, update_msg_publish_condition => <<"always">>
|
||||
, translators =>
|
||||
#{ command => #{topic => <<"/dn/#">>}
|
||||
, response => #{topic => <<"/up/resp">>}
|
||||
, notify => #{topic => <<"/up/notify">>}
|
||||
, register => #{topic => <<"/up/resp">>}
|
||||
, update => #{topic => <<"/up/resp">>}
|
||||
}
|
||||
}
|
||||
}
|
||||
, exproto_gateway =>
|
||||
#{ summary => <<"A simple ExProto gateway configs">>
|
||||
, value =>
|
||||
#{ enable => true
|
||||
, enable_stats => true
|
||||
, idle_timeout => <<"30s">>
|
||||
, mountpoint => <<"exproto2/">>
|
||||
, server =>
|
||||
#{ bind => <<"9100">>
|
||||
}
|
||||
, handler =>
|
||||
#{ address => <<"http://127.0.0.1:9001">>
|
||||
}
|
||||
}
|
||||
}
|
||||
}.
|
||||
|
|
|
@ -272,6 +272,7 @@ params_gateway_name_in_path() ->
|
|||
mk(binary(),
|
||||
#{ in => path
|
||||
, desc => <<"Gateway Name">>
|
||||
, example => <<"">>
|
||||
})}
|
||||
].
|
||||
|
||||
|
@ -279,6 +280,7 @@ params_userid_in_path() ->
|
|||
[{uid, mk(binary(),
|
||||
#{ in => path
|
||||
, desc => <<"User ID">>
|
||||
, example => <<"">>
|
||||
})}
|
||||
].
|
||||
|
||||
|
@ -287,11 +289,13 @@ params_paging_in_qs() ->
|
|||
#{ in => query
|
||||
, nullable => true
|
||||
, desc => <<"Page Index">>
|
||||
, example => 1
|
||||
})},
|
||||
{limit, mk(integer(),
|
||||
#{ in => query
|
||||
, nullable => true
|
||||
, desc => <<"Page Limit">>
|
||||
, example => 100
|
||||
})}
|
||||
].
|
||||
|
||||
|
|
|
@ -412,10 +412,7 @@ schema("/gateway/:name/clients") ->
|
|||
#{ description => <<"Get the gateway client list">>
|
||||
, parameters => params_client_query()
|
||||
, responses =>
|
||||
?STANDARD_RESP(
|
||||
#{ 200 => emqx_dashboard_swagger:schema_with_examples(
|
||||
hoconsc:array(ref(client)),
|
||||
examples_client_list())})
|
||||
?STANDARD_RESP(#{200 => schema_client_list()})
|
||||
}
|
||||
};
|
||||
schema("/gateway/:name/clients/:clientid") ->
|
||||
|
@ -424,10 +421,7 @@ schema("/gateway/:name/clients/:clientid") ->
|
|||
#{ description => <<"Get the gateway client infomation">>
|
||||
, parameters => params_client_insta()
|
||||
, responses =>
|
||||
?STANDARD_RESP(
|
||||
#{ 200 => emqx_dashboard_swagger:schema_with_examples(
|
||||
ref(client),
|
||||
examples_client())})
|
||||
?STANDARD_RESP(#{200 => schema_client()})
|
||||
}
|
||||
, delete =>
|
||||
#{ description => <<"Kick out the gateway client">>
|
||||
|
@ -443,9 +437,9 @@ schema("/gateway/:name/clients/:clientid/subscriptions") ->
|
|||
, parameters => params_client_insta()
|
||||
, responses =>
|
||||
?STANDARD_RESP(
|
||||
#{ 200 => emqx_dashboard_swagger:schema_with_examples(
|
||||
hoconsc:array(ref(subscription)),
|
||||
examples_subsctiption_list())})
|
||||
#{200 => emqx_dashboard_swagger:schema_with_examples(
|
||||
hoconsc:array(ref(subscription)),
|
||||
examples_subsctiption_list())})
|
||||
}
|
||||
, post =>
|
||||
#{ description => <<"Create a subscription membership">>
|
||||
|
@ -481,7 +475,7 @@ params_client_insta() ->
|
|||
++ params_gateway_name_in_path().
|
||||
|
||||
params_client_searching_in_qs() ->
|
||||
M = #{in => query, nullable => true},
|
||||
M = #{in => query, nullable => true, example => <<"">>},
|
||||
[ {node,
|
||||
mk(binary(),
|
||||
M#{desc => <<"Match the client's node name">>})}
|
||||
|
@ -532,12 +526,16 @@ params_paging() ->
|
|||
mk(integer(),
|
||||
#{ in => query
|
||||
, nullable => true
|
||||
, desc => <<"Page Index">>})}
|
||||
, {limit,
|
||||
mk(integer(),
|
||||
#{ in => query
|
||||
, desc => <<"Page Limit">>
|
||||
, nullable => true})}
|
||||
, desc => <<"Page Index">>
|
||||
, example => 1
|
||||
})}
|
||||
, {limit,
|
||||
mk(integer(),
|
||||
#{ in => query
|
||||
, desc => <<"Page Limit">>
|
||||
, nullable => true
|
||||
, example => 100
|
||||
})}
|
||||
].
|
||||
|
||||
params_gateway_name_in_path() ->
|
||||
|
@ -567,31 +565,103 @@ params_topic_name_in_path() ->
|
|||
%%--------------------------------------------------------------------
|
||||
%% schemas
|
||||
|
||||
schema_client_list() ->
|
||||
emqx_dashboard_swagger:schema_with_examples(
|
||||
hoconsc:union([hoconsc:array(ref(?MODULE, stomp_client)),
|
||||
hoconsc:array(ref(?MODULE, mqttsn_client)),
|
||||
hoconsc:array(ref(?MODULE, coap_client)),
|
||||
hoconsc:array(ref(?MODULE, lwm2m_client)),
|
||||
hoconsc:array(ref(?MODULE, exproto_client))
|
||||
]),
|
||||
examples_client_list()
|
||||
).
|
||||
|
||||
schema_client() ->
|
||||
emqx_dashboard_swagger:schema_with_examples(
|
||||
hoconsc:union([ref(?MODULE, stomp_client),
|
||||
ref(?MODULE, mqttsn_client),
|
||||
ref(?MODULE, coap_client),
|
||||
ref(?MODULE, lwm2m_client),
|
||||
ref(?MODULE, exproto_client)
|
||||
]),
|
||||
examples_client()
|
||||
).
|
||||
|
||||
roots() ->
|
||||
[ client
|
||||
[ stomp_client
|
||||
, mqttsn_client
|
||||
, coap_client
|
||||
, lwm2m_client
|
||||
, exproto_client
|
||||
, subscription
|
||||
].
|
||||
|
||||
fields(client) ->
|
||||
%% XXX: enum for every protocol's client
|
||||
fields(test) ->
|
||||
[{key, mk(binary(), #{ desc => <<"Desc">>})}];
|
||||
|
||||
fields(stomp_client) ->
|
||||
common_client_props();
|
||||
fields(mqttsn_client) ->
|
||||
common_client_props();
|
||||
fields(coap_client) ->
|
||||
common_client_props();
|
||||
fields(lwm2m_client) ->
|
||||
[ {endpoint_name,
|
||||
mk(binary(),
|
||||
#{ desc => <<"The LwM2M client endpoint name">>})}
|
||||
, {lifetime,
|
||||
mk(integer(),
|
||||
#{ desc => <<"Life time">>})}
|
||||
] ++ common_client_props();
|
||||
fields(exproto_client) ->
|
||||
common_client_props();
|
||||
|
||||
fields(subscription) ->
|
||||
[ {topic,
|
||||
mk(binary(),
|
||||
#{ desc => <<"Topic Fillter">>})}
|
||||
, {qos,
|
||||
mk(integer(),
|
||||
#{ desc => <<"QoS level, enum: 0, 1, 2">>})}
|
||||
, {nl,
|
||||
mk(integer(), %% FIXME: why not boolean?
|
||||
#{ desc => <<"No Local option, enum: 0, 1">>})}
|
||||
, {rap,
|
||||
mk(integer(),
|
||||
#{ desc => <<"Retain as Published option, enum: 0, 1">>})}
|
||||
, {rh,
|
||||
mk(integer(),
|
||||
#{ desc => <<"Retain Handling option, enum: 0, 1, 2">>})}
|
||||
, {sub_props,
|
||||
mk(ref(extra_sub_props),
|
||||
#{desc => <<"Subscription properties">>})}
|
||||
];
|
||||
fields(extra_sub_props) ->
|
||||
[ {subid,
|
||||
mk(binary(),
|
||||
#{ desc => <<"Only stomp protocol, an uniquely identity for "
|
||||
"the subscription. range: 1-65535.">>})}
|
||||
].
|
||||
|
||||
common_client_props() ->
|
||||
[ {node,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Name of the node to which the client is "
|
||||
"connected">>})}
|
||||
, {clientid,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Client identifier">>})}
|
||||
, {username,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Username of client when connecting">>})}
|
||||
, {proto_name,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Client protocol name">>})}
|
||||
, {proto_ver,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Protocol version used by the client">>})}
|
||||
, {ip_address,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Client's IP address">>})}
|
||||
, {port,
|
||||
mk(integer(),
|
||||
|
@ -601,10 +671,10 @@ fields(client) ->
|
|||
#{ desc => <<"Indicates whether the client is connected via "
|
||||
"bridge">>})}
|
||||
, {connected_at,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Client connection time">>})}
|
||||
, {disconnected_at,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Client offline time, This field is only valid and "
|
||||
"returned when connected is false">>})}
|
||||
, {connected,
|
||||
|
@ -615,10 +685,10 @@ fields(client) ->
|
|||
%% want it
|
||||
%%
|
||||
%, {will_msg,
|
||||
% mk(string(),
|
||||
% mk(binary(),
|
||||
% #{ desc => <<"Client will message">>})}
|
||||
%, {zone,
|
||||
% mk(string(),
|
||||
% mk(binary(),
|
||||
% #{ desc => <<"Indicate the configuration group used by the "
|
||||
% "client">>})}
|
||||
, {keepalive,
|
||||
|
@ -633,7 +703,7 @@ fields(client) ->
|
|||
#{ desc => <<"Session expiration interval, with the unit of "
|
||||
"second">>})}
|
||||
, {created_at,
|
||||
mk(string(),
|
||||
mk(binary(),
|
||||
#{ desc => <<"Session creation time">>})}
|
||||
, {subscriptions_cnt,
|
||||
mk(integer(),
|
||||
|
@ -699,45 +769,114 @@ fields(client) ->
|
|||
, {reductions,
|
||||
mk(integer(),
|
||||
#{ desc => <<"Erlang reduction">>})}
|
||||
];
|
||||
fields(subscription) ->
|
||||
[ {topic,
|
||||
mk(string(),
|
||||
#{ desc => <<"Topic Fillter">>})}
|
||||
, {qos,
|
||||
mk(integer(),
|
||||
#{ desc => <<"QoS level, enum: 0, 1, 2">>})}
|
||||
, {nl,
|
||||
mk(integer(), %% FIXME: why not boolean?
|
||||
#{ desc => <<"No Local option, enum: 0, 1">>})}
|
||||
, {rap,
|
||||
mk(integer(),
|
||||
#{ desc => <<"Retain as Published option, enum: 0, 1">>})}
|
||||
, {rh,
|
||||
mk(integer(),
|
||||
#{ desc => <<"Retain Handling option, enum: 0, 1, 2">>})}
|
||||
, {sub_props,
|
||||
mk(ref(extra_sub_props),
|
||||
#{desc => <<"Subscription properties">>})}
|
||||
];
|
||||
fields(extra_sub_props) ->
|
||||
[ {subid,
|
||||
mk(string(),
|
||||
#{ desc => <<"Only stomp protocol, an uniquely identity for "
|
||||
"the subscription. range: 1-65535.">>})}
|
||||
].
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% examples
|
||||
|
||||
examples_client_list() ->
|
||||
#{}.
|
||||
#{ general_client_list =>
|
||||
#{ summary => <<"General Client List">>
|
||||
, value => [example_general_client()]
|
||||
}
|
||||
, lwm2m_client_list =>
|
||||
#{ summary => <<"LwM2M Client List">>
|
||||
, value => [example_lwm2m_client()]
|
||||
}
|
||||
}.
|
||||
|
||||
examples_client() ->
|
||||
#{}.
|
||||
#{ general_client =>
|
||||
#{ summary => <<"General Client Info">>
|
||||
, value => example_general_client()
|
||||
}
|
||||
, lwm2m_client =>
|
||||
#{ summary => <<"LwM2M Client Info">>
|
||||
, value => example_lwm2m_client()
|
||||
}
|
||||
}.
|
||||
|
||||
examples_subsctiption_list() ->
|
||||
#{}.
|
||||
#{ general_subscription_list =>
|
||||
#{ summary => <<"A General Subscription List">>
|
||||
, value => [example_general_subscription()]
|
||||
}
|
||||
, stomp_subscription_list =>
|
||||
#{ summary => <<"The Stomp Subscription List">>
|
||||
, value => [example_stomp_subscription]
|
||||
}
|
||||
}.
|
||||
|
||||
examples_subsctiption() ->
|
||||
#{}.
|
||||
#{ general_subscription =>
|
||||
#{ summary => <<"A General Subscription">>
|
||||
, value => example_general_subscription()
|
||||
}
|
||||
, stomp_subscription =>
|
||||
#{ summary => <<"A Stomp Subscription">>
|
||||
, value => example_stomp_subscription()
|
||||
}
|
||||
}.
|
||||
|
||||
example_lwm2m_client() ->
|
||||
maps:merge(
|
||||
example_general_client(),
|
||||
#{ proto_name => <<"LwM2M">>
|
||||
, proto_ver => <<"1.0">>
|
||||
, endpoint_name => <<"urn:imei:154928475237123">>
|
||||
, lifetime => 86400
|
||||
}).
|
||||
|
||||
example_general_client() ->
|
||||
#{ clientid => <<"MzAyMzEzNTUwNzk1NDA1MzYyMzIwNzUxNjQwMTY1NzQ0NjE">>
|
||||
, username => <<"guest">>
|
||||
, node => <<"emqx@127.0.0.1">>
|
||||
, proto_name => "STOMP"
|
||||
, proto_ver => <<"1.0">>
|
||||
, ip_address => <<"127.0.0.1">>
|
||||
, port => 50675
|
||||
, clean_start => true
|
||||
, connected => true
|
||||
, is_bridge => false
|
||||
, keepalive => 0
|
||||
, expiry_interval => 0
|
||||
, subscriptions_cnt => 0
|
||||
, subscriptions_max => <<"infinity">>
|
||||
, awaiting_rel_cnt => 0
|
||||
, awaiting_rel_max => <<"infinity">>
|
||||
, mqueue_len => 0
|
||||
, mqueue_max => <<"infinity">>
|
||||
, mqueue_dropped => 0
|
||||
, inflight_cnt => 0
|
||||
, inflight_max => <<"infinity">>
|
||||
, heap_size => 4185
|
||||
, recv_oct => 56
|
||||
, recv_cnt => 1
|
||||
, recv_pkt => 1
|
||||
, recv_msg => 0
|
||||
, send_oct => 61
|
||||
, send_cnt => 1
|
||||
, send_pkt => 1
|
||||
, send_msg => 0
|
||||
, reductions => 72022
|
||||
, mailbox_len => 0
|
||||
, created_at => <<"2021-12-07T10:44:02.721+08:00">>
|
||||
, connected_at => <<"2021-12-07T10:44:02.721+08:00">>
|
||||
, disconnected_at => null
|
||||
}.
|
||||
|
||||
example_stomp_subscription() ->
|
||||
maps:merge(
|
||||
example_general_subscription(),
|
||||
#{ topic => <<"stomp/topic">>
|
||||
, sub_props => #{subid => <<"10">>}
|
||||
}).
|
||||
|
||||
example_general_subscription() ->
|
||||
#{ topic => <<"test/topic">>
|
||||
, qos => 1
|
||||
, nl => 0
|
||||
, rap => 0
|
||||
, rh => 0
|
||||
, sub_props => #{}
|
||||
}.
|
||||
|
|
|
@ -233,6 +233,8 @@ schema("/gateway/:name/listeners") ->
|
|||
post =>
|
||||
#{ description => <<"Create the gateway listener">>
|
||||
, parameters => params_gateway_name_in_path()
|
||||
%% XXX: How to distinguish the different listener supported by
|
||||
%% different types of gateways?
|
||||
, 'requestBody' => emqx_dashboard_swagger:schema_with_examples(
|
||||
ref(listener),
|
||||
examples_listener())
|
||||
|
@ -288,7 +290,7 @@ schema("/gateway/:name/listeners/:id/authentication") ->
|
|||
, responses =>
|
||||
?STANDARD_RESP(
|
||||
#{ 200 => schema_authn()
|
||||
, 204 => <<"Authentication does not initiated">>
|
||||
, 204 => <<"Authentication or listener does not existed">>
|
||||
})
|
||||
},
|
||||
post =>
|
||||
|
@ -408,6 +410,7 @@ params_gateway_name_in_path() ->
|
|||
mk(binary(),
|
||||
#{ in => path
|
||||
, desc => <<"Gateway Name">>
|
||||
, example => <<"">>
|
||||
})}
|
||||
].
|
||||
|
||||
|
@ -416,6 +419,7 @@ params_listener_id_in_path() ->
|
|||
mk(binary(),
|
||||
#{ in => path
|
||||
, desc => <<"Listener ID">>
|
||||
, example => <<"">>
|
||||
})}
|
||||
].
|
||||
|
||||
|
@ -423,6 +427,7 @@ params_userid_in_path() ->
|
|||
[{uid, mk(binary(),
|
||||
#{ in => path
|
||||
, desc => <<"User ID">>
|
||||
, example => <<"">>
|
||||
})}
|
||||
].
|
||||
|
||||
|
@ -431,11 +436,13 @@ params_paging_in_qs() ->
|
|||
#{ in => query
|
||||
, nullable => true
|
||||
, desc => <<"Page Index">>
|
||||
, example => 1
|
||||
})},
|
||||
{limit, mk(integer(),
|
||||
#{ in => query
|
||||
, nullable => true
|
||||
, desc => <<"Page Limit">>
|
||||
, example => 100
|
||||
})}
|
||||
].
|
||||
|
||||
|
@ -487,7 +494,6 @@ fields(ssl_listener_opts) ->
|
|||
, {keyfile, binary()}
|
||||
, {verify, binary()}
|
||||
, {fail_if_no_peer_cert, boolean()}
|
||||
, {server_name_indication, boolean()}
|
||||
, {depth, integer()}
|
||||
, {password, binary()}
|
||||
, {handshake_timeout, binary()}
|
||||
|
@ -586,7 +592,9 @@ examples_listener() ->
|
|||
#{ tcp_listener=>
|
||||
#{ summary => <<"A simple tcp listener example">>
|
||||
, value =>
|
||||
#{ bind => <<"61613">>
|
||||
#{ name => <<"tcp-def">>
|
||||
, type => <<"tcp">>
|
||||
, bind => <<"22210">>
|
||||
, acceptors => 16
|
||||
, max_connections => 1024000
|
||||
, max_conn_rate => 1000
|
||||
|
@ -607,7 +615,9 @@ examples_listener() ->
|
|||
, ssl_listener =>
|
||||
#{ summary => <<"A simple ssl listener example">>
|
||||
, value =>
|
||||
#{ bind => <<"61614">>
|
||||
#{ name => <<"ssl-def">>
|
||||
, type => <<"ssl">>
|
||||
, bind => <<"22211">>
|
||||
, acceptors => 16
|
||||
, max_connections => 1024000
|
||||
, max_conn_rate => 1000
|
||||
|
@ -620,7 +630,6 @@ examples_listener() ->
|
|||
, keyfile => <<"etc/certs/key.pem">>
|
||||
, verify => <<"verify_none">>
|
||||
, fail_if_no_peer_cert => false
|
||||
, server_name_indication => disable
|
||||
}
|
||||
, tcp =>
|
||||
#{ active_n => 100
|
||||
|
@ -631,7 +640,9 @@ examples_listener() ->
|
|||
, udp_listener =>
|
||||
#{ summary => <<"A simple udp listener example">>
|
||||
, value =>
|
||||
#{ bind => <<"0.0.0.0:1884">>
|
||||
#{ name => <<"udp-def">>
|
||||
, type => udp
|
||||
, bind => <<"22212">>
|
||||
, udp =>
|
||||
#{ active_n => 100
|
||||
, recbuf => <<"10KB">>
|
||||
|
@ -644,32 +655,67 @@ examples_listener() ->
|
|||
, dtls_listener =>
|
||||
#{ summary => <<"A simple dtls listener example">>
|
||||
, value =>
|
||||
#{ bind => <<"5684">>
|
||||
#{ name => <<"dtls-def">>
|
||||
, type => <<"dtls">>
|
||||
, bind => <<"22213">>
|
||||
, acceptors => 16
|
||||
, max_connections => 1024000
|
||||
, max_conn_rate => 1000
|
||||
, access_rules => [<<"allow all">>]
|
||||
, ssl =>
|
||||
, dtls =>
|
||||
#{ versions => [<<"dtlsv1.2">>, <<"dtlsv1">>]
|
||||
, cacertfile => <<"etc/certs/cacert.pem">>
|
||||
, certfile => <<"etc/certs/cert.pem">>
|
||||
, keyfile => <<"etc/certs/key.pem">>
|
||||
, verify => <<"verify_none">>
|
||||
, fail_if_no_peer_cert => false
|
||||
, server_name_indication => disable
|
||||
}
|
||||
, tcp =>
|
||||
, udp =>
|
||||
#{ active_n => 100
|
||||
, backlog => 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
, dtls_listener_with_psk_ciphers =>
|
||||
#{ summary => <<"todo">>
|
||||
#{ summary => <<"A dtls listener with PSK example">>
|
||||
, value =>
|
||||
#{}
|
||||
#{ name => <<"dtls-psk">>
|
||||
, type => <<"dtls">>
|
||||
, bind => <<"22214">>
|
||||
, acceptors => 16
|
||||
, max_connections => 1024000
|
||||
, max_conn_rate => 1000
|
||||
, dtls =>
|
||||
#{ versions => [<<"dtlsv1.2">>, <<"dtlsv1">>]
|
||||
, cacertfile => <<"etc/certs/cacert.pem">>
|
||||
, certfile => <<"etc/certs/cert.pem">>
|
||||
, keyfile => <<"etc/certs/key.pem">>
|
||||
, verify => <<"verify_none">>
|
||||
, user_lookup_fun => <<"emqx_tls_psk:lookup">>
|
||||
, ciphers =>
|
||||
<<"RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384,RSA-PSK-AES128-GCM-SHA256,"
|
||||
"RSA-PSK-AES128-CBC-SHA256,RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA">>
|
||||
, fail_if_no_peer_cert => false
|
||||
}
|
||||
}
|
||||
}
|
||||
, lisetner_with_authn =>
|
||||
#{ summary => <<"todo">>
|
||||
, value => #{}}
|
||||
#{ summary => <<"A tcp listener with authentication example">>
|
||||
, value =>
|
||||
#{ name => <<"tcp-with-authn">>
|
||||
, type => <<"tcp">>
|
||||
, bind => <<"22215">>
|
||||
, acceptors => 16
|
||||
, max_connections => 1024000
|
||||
, max_conn_rate => 1000
|
||||
, authentication =>
|
||||
#{ backend => <<"built-in-database">>
|
||||
, mechanism => <<"password-based">>
|
||||
, password_hash_algorithm =>
|
||||
#{ name => <<"sha256">>
|
||||
}
|
||||
, user_id_type => <<"username">>
|
||||
}
|
||||
}
|
||||
}
|
||||
}.
|
||||
|
|
|
@ -69,7 +69,7 @@ gateway(["load", Name, Conf]) ->
|
|||
bin(Name),
|
||||
emqx_json:decode(Conf, [return_maps])
|
||||
) of
|
||||
ok ->
|
||||
{ok, _} ->
|
||||
print("ok~n");
|
||||
{error, Reason} ->
|
||||
print("Error: ~p~n", [Reason])
|
||||
|
@ -88,7 +88,7 @@ gateway(["stop", Name]) ->
|
|||
bin(Name),
|
||||
#{<<"enable">> => <<"false">>}
|
||||
) of
|
||||
ok ->
|
||||
{ok, _} ->
|
||||
print("ok~n");
|
||||
{error, Reason} ->
|
||||
print("Error: ~p~n", [Reason])
|
||||
|
@ -99,7 +99,7 @@ gateway(["start", Name]) ->
|
|||
bin(Name),
|
||||
#{<<"enable">> => <<"true">>}
|
||||
) of
|
||||
ok ->
|
||||
{ok, _} ->
|
||||
print("ok~n");
|
||||
{error, Reason} ->
|
||||
print("Error: ~p~n", [Reason])
|
||||
|
|
|
@ -79,15 +79,14 @@ unload() ->
|
|||
%%--------------------------------------------------------------------
|
||||
%% APIs
|
||||
|
||||
-spec load_gateway(atom_or_bin(), map()) -> ok_or_err().
|
||||
-spec load_gateway(atom_or_bin(), map()) -> map_or_err().
|
||||
load_gateway(GwName, Conf) ->
|
||||
NConf = case maps:take(<<"listeners">>, Conf) of
|
||||
error -> Conf;
|
||||
{Ls, Conf1} ->
|
||||
Conf1#{<<"listeners">> => unconvert_listeners(Ls)}
|
||||
end,
|
||||
%% TODO:
|
||||
ret_ok_err(update({?FUNCTION_NAME, bin(GwName), NConf})).
|
||||
ret_gw(GwName, update({?FUNCTION_NAME, bin(GwName), NConf})).
|
||||
|
||||
%% @doc convert listener array to map
|
||||
unconvert_listeners(Ls) when is_list(Ls) ->
|
||||
|
@ -108,13 +107,12 @@ maps_key_take([K | Ks], M, Acc) ->
|
|||
maps_key_take(Ks, M1, [V | Acc])
|
||||
end.
|
||||
|
||||
-spec update_gateway(atom_or_bin(), map()) -> ok_or_err().
|
||||
-spec update_gateway(atom_or_bin(), map()) -> map_or_err().
|
||||
update_gateway(GwName, Conf0) ->
|
||||
Exclude0 = [listeners, ?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM],
|
||||
Exclude1 = [atom_to_binary(K, utf8) || K <- Exclude0],
|
||||
Conf = maps:without(Exclude0 ++ Exclude1, Conf0),
|
||||
|
||||
ret_ok_err(update({?FUNCTION_NAME, bin(GwName), Conf})).
|
||||
ret_gw(GwName, update({?FUNCTION_NAME, bin(GwName), Conf})).
|
||||
|
||||
%% FIXME: delete cert files ??
|
||||
|
||||
|
@ -261,6 +259,22 @@ bin(B) when is_binary(B) ->
|
|||
ret_ok_err({ok, _}) -> ok;
|
||||
ret_ok_err(Err) -> Err.
|
||||
|
||||
ret_gw(GwName, {ok, #{raw_config := GwConf}}) ->
|
||||
GwConf1 = emqx_map_lib:deep_get([bin(GwName)], GwConf),
|
||||
LsConf = emqx_map_lib:deep_get(
|
||||
[bin(GwName), <<"listeners">>],
|
||||
GwConf, #{}),
|
||||
NLsConf =
|
||||
lists:foldl(fun({LType, SubConf}, Acc) ->
|
||||
NLConfs =
|
||||
lists:map(fun({LName, LConf}) ->
|
||||
do_convert_listener2(GwName, LType, LName, LConf)
|
||||
end, maps:to_list(SubConf)),
|
||||
[NLConfs|Acc]
|
||||
end, [], maps:to_list(LsConf)),
|
||||
{ok, maps:merge(GwConf1, #{<<"listeners">> => NLsConf})};
|
||||
ret_gw(_GwName, Err) -> Err.
|
||||
|
||||
ret_authn(GwName, {ok, #{raw_config := GwConf}}) ->
|
||||
Authn = emqx_map_lib:deep_get(
|
||||
[bin(GwName), <<"authentication">>],
|
||||
|
|
|
@ -223,6 +223,8 @@ remove_authn(GwName, ListenerId) ->
|
|||
|
||||
confexp(ok) -> ok;
|
||||
confexp({ok, Res}) -> {ok, Res};
|
||||
confexp({error, badarg}) ->
|
||||
error({update_conf_error, badarg});
|
||||
confexp({error, not_found}) ->
|
||||
error({update_conf_error, not_found});
|
||||
confexp({error, already_exist}) ->
|
||||
|
@ -372,6 +374,8 @@ with_gateway(GwName0, Fun) ->
|
|||
lists:join(".", lists:map(fun to_list/1, Path0))),
|
||||
return_http_error(404, "Resource not found. path: " ++ Path);
|
||||
%% Exceptions from: confexp/1
|
||||
error : {update_conf_error, badarg} ->
|
||||
return_http_error(400, "Bad arguments");
|
||||
error : {update_conf_error, not_found} ->
|
||||
return_http_error(404, "Resource not found");
|
||||
error : {update_conf_error, already_exist} ->
|
||||
|
|
|
@ -221,7 +221,7 @@ fields(lwm2m) ->
|
|||
})}
|
||||
, {lifetime_min,
|
||||
sc(duration(),
|
||||
#{ default => "1s"
|
||||
#{ default => "15s"
|
||||
, desc => "Minimum value of lifetime allowed to be set by the LwM2M client"
|
||||
})}
|
||||
, {lifetime_max,
|
||||
|
|
|
@ -61,7 +61,7 @@ start_grpc_server(GwName, Options = #{bind := ListenOn}) ->
|
|||
end,
|
||||
case grpc:start_server(GwName, ListenOn, Services, SvrOptions) of
|
||||
{ok, _SvrPid} ->
|
||||
console_print("Start ~ts gRPC server on ~p successfully.",
|
||||
console_print("Start ~ts gRPC server on ~p successfully.~n",
|
||||
[GwName, ListenOn]);
|
||||
{error, Reason} ->
|
||||
?ELOG("Falied to start ~ts gRPC server on ~p, reason: ~p",
|
||||
|
|
|
@ -59,7 +59,7 @@ t_gateway(_) ->
|
|||
lists:foreach(fun assert_gw_unloaded/1, Gateways),
|
||||
{400, BadReq} = request(get, "/gateway/uname_gateway"),
|
||||
assert_bad_request(BadReq),
|
||||
{204, _} = request(post, "/gateway", #{name => <<"stomp">>}),
|
||||
{201, _} = request(post, "/gateway", #{name => <<"stomp">>}),
|
||||
{200, StompGw1} = request(get, "/gateway/stomp"),
|
||||
assert_feilds_apperence([name, status, enable, created_at, started_at],
|
||||
StompGw1),
|
||||
|
@ -81,12 +81,12 @@ t_gateway_stomp(_) ->
|
|||
#{name => <<"def">>, type => <<"tcp">>, bind => <<"61613">>}
|
||||
]
|
||||
},
|
||||
{204, _} = request(post, "/gateway", GwConf),
|
||||
{201, _} = request(post, "/gateway", GwConf),
|
||||
{200, ConfResp} = request(get, "/gateway/stomp"),
|
||||
assert_confs(GwConf, ConfResp),
|
||||
%% put
|
||||
GwConf2 = emqx_map_lib:deep_merge(GwConf, #{frame => #{max_headers => 10}}),
|
||||
{204, _} = request(put, "/gateway/stomp", maps:without([name], GwConf2)),
|
||||
{200, _} = request(put, "/gateway/stomp", maps:without([name, listeners], GwConf2)),
|
||||
{200, ConfResp2} = request(get, "/gateway/stomp"),
|
||||
assert_confs(GwConf2, ConfResp2),
|
||||
{204, _} = request(delete, "/gateway/stomp").
|
||||
|
@ -104,12 +104,12 @@ t_gateway_mqttsn(_) ->
|
|||
#{name => <<"def">>, type => <<"udp">>, bind => <<"1884">>}
|
||||
]
|
||||
},
|
||||
{204, _} = request(post, "/gateway", GwConf),
|
||||
{201, _} = request(post, "/gateway", GwConf),
|
||||
{200, ConfResp} = request(get, "/gateway/mqttsn"),
|
||||
assert_confs(GwConf, ConfResp),
|
||||
%% put
|
||||
GwConf2 = emqx_map_lib:deep_merge(GwConf, #{predefined => []}),
|
||||
{204, _} = request(put, "/gateway/mqttsn", maps:without([name], GwConf2)),
|
||||
{200, _} = request(put, "/gateway/mqttsn", maps:without([name, listeners], GwConf2)),
|
||||
{200, ConfResp2} = request(get, "/gateway/mqttsn"),
|
||||
assert_confs(GwConf2, ConfResp2),
|
||||
{204, _} = request(delete, "/gateway/mqttsn").
|
||||
|
@ -125,12 +125,12 @@ t_gateway_coap(_) ->
|
|||
#{name => <<"def">>, type => <<"udp">>, bind => <<"5683">>}
|
||||
]
|
||||
},
|
||||
{204, _} = request(post, "/gateway", GwConf),
|
||||
{201, _} = request(post, "/gateway", GwConf),
|
||||
{200, ConfResp} = request(get, "/gateway/coap"),
|
||||
assert_confs(GwConf, ConfResp),
|
||||
%% put
|
||||
GwConf2 = emqx_map_lib:deep_merge(GwConf, #{heartbeat => <<"10s">>}),
|
||||
{204, _} = request(put, "/gateway/coap", maps:without([name], GwConf2)),
|
||||
{200, _} = request(put, "/gateway/coap", maps:without([name, listeners], GwConf2)),
|
||||
{200, ConfResp2} = request(get, "/gateway/coap"),
|
||||
assert_confs(GwConf2, ConfResp2),
|
||||
{204, _} = request(delete, "/gateway/coap").
|
||||
|
@ -156,12 +156,12 @@ t_gateway_lwm2m(_) ->
|
|||
#{name => <<"def">>, type => <<"udp">>, bind => <<"5783">>}
|
||||
]
|
||||
},
|
||||
{204, _} = request(post, "/gateway", GwConf),
|
||||
{201, _} = request(post, "/gateway", GwConf),
|
||||
{200, ConfResp} = request(get, "/gateway/lwm2m"),
|
||||
assert_confs(GwConf, ConfResp),
|
||||
%% put
|
||||
GwConf2 = emqx_map_lib:deep_merge(GwConf, #{qmode_time_window => <<"10s">>}),
|
||||
{204, _} = request(put, "/gateway/lwm2m", maps:without([name], GwConf2)),
|
||||
{200, _} = request(put, "/gateway/lwm2m", maps:without([name, listeners], GwConf2)),
|
||||
{200, ConfResp2} = request(get, "/gateway/lwm2m"),
|
||||
assert_confs(GwConf2, ConfResp2),
|
||||
{204, _} = request(delete, "/gateway/lwm2m").
|
||||
|
@ -177,19 +177,19 @@ t_gateway_exproto(_) ->
|
|||
#{name => <<"def">>, type => <<"tcp">>, bind => <<"7993">>}
|
||||
]
|
||||
},
|
||||
{204, _} = request(post, "/gateway", GwConf),
|
||||
{201, _} = request(post, "/gateway", GwConf),
|
||||
{200, ConfResp} = request(get, "/gateway/exproto"),
|
||||
assert_confs(GwConf, ConfResp),
|
||||
%% put
|
||||
GwConf2 = emqx_map_lib:deep_merge(GwConf, #{server => #{bind => <<"9200">>}}),
|
||||
{204, _} = request(put, "/gateway/exproto", maps:without([name], GwConf2)),
|
||||
{200, _} = request(put, "/gateway/exproto", maps:without([name, listeners], GwConf2)),
|
||||
{200, ConfResp2} = request(get, "/gateway/exproto"),
|
||||
assert_confs(GwConf2, ConfResp2),
|
||||
{204, _} = request(delete, "/gateway/exproto").
|
||||
|
||||
t_authn(_) ->
|
||||
GwConf = #{name => <<"stomp">>},
|
||||
{204, _} = request(post, "/gateway", GwConf),
|
||||
{201, _} = request(post, "/gateway", GwConf),
|
||||
{204, _} = request(get, "/gateway/stomp/authentication"),
|
||||
|
||||
AuthConf = #{mechanism => <<"password-based">>,
|
||||
|
@ -212,7 +212,7 @@ t_authn(_) ->
|
|||
|
||||
t_authn_data_mgmt(_) ->
|
||||
GwConf = #{name => <<"stomp">>},
|
||||
{204, _} = request(post, "/gateway", GwConf),
|
||||
{201, _} = request(post, "/gateway", GwConf),
|
||||
{204, _} = request(get, "/gateway/stomp/authentication"),
|
||||
|
||||
AuthConf = #{mechanism => <<"password-based">>,
|
||||
|
@ -256,7 +256,7 @@ t_authn_data_mgmt(_) ->
|
|||
|
||||
t_listeners(_) ->
|
||||
GwConf = #{name => <<"stomp">>},
|
||||
{204, _} = request(post, "/gateway", GwConf),
|
||||
{201, _} = request(post, "/gateway", GwConf),
|
||||
{404, _} = request(get, "/gateway/stomp/listeners"),
|
||||
LisConf = #{name => <<"def">>,
|
||||
type => <<"tcp">>,
|
||||
|
@ -289,7 +289,7 @@ t_listeners_authn(_) ->
|
|||
type => <<"tcp">>,
|
||||
bind => <<"61613">>
|
||||
}]},
|
||||
{204, _} = request(post, "/gateway", GwConf),
|
||||
{201, _} = request(post, "/gateway", GwConf),
|
||||
{200, ConfResp} = request(get, "/gateway/stomp"),
|
||||
assert_confs(GwConf, ConfResp),
|
||||
|
||||
|
@ -316,7 +316,7 @@ t_listeners_authn_data_mgmt(_) ->
|
|||
type => <<"tcp">>,
|
||||
bind => <<"61613">>
|
||||
}]},
|
||||
{204, _} = request(post, "/gateway", GwConf),
|
||||
{201, _} = request(post, "/gateway", GwConf),
|
||||
{200, ConfResp} = request(get, "/gateway/stomp"),
|
||||
assert_confs(GwConf, ConfResp),
|
||||
|
||||
|
|
|
@ -244,12 +244,12 @@ t_load_unload_gateway(_) ->
|
|||
StompConf2 = compose(?CONF_STOMP_BAISC_2,
|
||||
?CONF_STOMP_AUTHN_1,
|
||||
?CONF_STOMP_LISTENER_1),
|
||||
ok = emqx_gateway_conf:load_gateway(stomp, StompConf1),
|
||||
{ok, _} = emqx_gateway_conf:load_gateway(stomp, StompConf1),
|
||||
{error, already_exist} =
|
||||
emqx_gateway_conf:load_gateway(stomp, StompConf1),
|
||||
assert_confs(StompConf1, emqx:get_raw_config([gateway, stomp])),
|
||||
|
||||
ok = emqx_gateway_conf:update_gateway(stomp, StompConf2),
|
||||
{ok, _} = emqx_gateway_conf:update_gateway(stomp, StompConf2),
|
||||
assert_confs(StompConf2, emqx:get_raw_config([gateway, stomp])),
|
||||
|
||||
ok = emqx_gateway_conf:unload_gateway(stomp),
|
||||
|
@ -265,7 +265,7 @@ t_load_unload_gateway(_) ->
|
|||
t_load_remove_authn(_) ->
|
||||
StompConf = compose_listener(?CONF_STOMP_BAISC_1, ?CONF_STOMP_LISTENER_1),
|
||||
|
||||
ok = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
{ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])),
|
||||
|
||||
{ok, _} = emqx_gateway_conf:add_authn(<<"stomp">>, ?CONF_STOMP_AUTHN_1),
|
||||
|
@ -292,7 +292,7 @@ t_load_remove_authn(_) ->
|
|||
t_load_remove_listeners(_) ->
|
||||
StompConf = compose_authn(?CONF_STOMP_BAISC_1, ?CONF_STOMP_AUTHN_1),
|
||||
|
||||
ok = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
{ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])),
|
||||
|
||||
{ok, _} = emqx_gateway_conf:add_listener(
|
||||
|
@ -338,7 +338,7 @@ t_load_remove_listener_authn(_) ->
|
|||
?CONF_STOMP_AUTHN_2
|
||||
),
|
||||
|
||||
ok = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
{ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])),
|
||||
|
||||
{ok, _} = emqx_gateway_conf:add_authn(
|
||||
|
@ -368,7 +368,7 @@ t_load_gateway_with_certs_content(_) ->
|
|||
?CONF_STOMP_BAISC_1,
|
||||
?CONF_STOMP_LISTENER_SSL
|
||||
),
|
||||
ok = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
{ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])),
|
||||
SslConf = emqx_map_lib:deep_get(
|
||||
[<<"listeners">>, <<"ssl">>, <<"default">>, <<"ssl">>],
|
||||
|
@ -388,7 +388,7 @@ t_load_gateway_with_certs_content(_) ->
|
|||
% ?CONF_STOMP_BAISC_1,
|
||||
% ?CONF_STOMP_LISTENER_SSL_PATH
|
||||
% ),
|
||||
% ok = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
% {ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
% assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])),
|
||||
% SslConf = emqx_map_lib:deep_get(
|
||||
% [<<"listeners">>, <<"ssl">>, <<"default">>, <<"ssl">>],
|
||||
|
@ -402,7 +402,7 @@ t_load_gateway_with_certs_content(_) ->
|
|||
|
||||
t_add_listener_with_certs_content(_) ->
|
||||
StompConf = ?CONF_STOMP_BAISC_1,
|
||||
ok = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
{ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
|
||||
assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])),
|
||||
|
||||
{ok, _} = emqx_gateway_conf:add_listener(
|
||||
|
|
|
@ -70,13 +70,14 @@ all() ->
|
|||
|
||||
init_per_suite(Config) ->
|
||||
ok = emqx_config:init_load(emqx_gateway_schema, ?CONF_DEFAULT),
|
||||
emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_gateway]),
|
||||
application:load(emqx_gateway),
|
||||
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
||||
Config.
|
||||
|
||||
end_per_suite(Config) ->
|
||||
timer:sleep(300),
|
||||
{ok, _} = emqx_conf:remove([<<"gateway">>,<<"lwm2m">>], #{}),
|
||||
emqx_mgmt_api_test_util:end_suite([emqx_gateway, emqx_conf]),
|
||||
emqx_mgmt_api_test_util:end_suite([emqx_conf]),
|
||||
Config.
|
||||
|
||||
init_per_testcase(_AllTestCase, Config) ->
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
##--------------------------------------------------------------------
|
||||
## Emq X Rate Limiter
|
||||
##--------------------------------------------------------------------
|
||||
emqx_limiter {
|
||||
bytes_in {
|
||||
global = "100KB/10s" # token generation rate
|
||||
zone.default = "100kB/10s"
|
||||
zone.external = "20kB/10s"
|
||||
bucket.tcp {
|
||||
zone = default
|
||||
aggregated = "100kB/10s,1Mb"
|
||||
per_client = "100KB/10s,10Kb"
|
||||
}
|
||||
bucket.ssl {
|
||||
zone = external
|
||||
aggregated = "100kB/10s,1Mb"
|
||||
per_client = "100KB/10s,10Kb"
|
||||
}
|
||||
}
|
||||
|
||||
message_in {
|
||||
global = "100/10s"
|
||||
zone.default = "100/10s"
|
||||
bucket.bucket1 {
|
||||
zone = default
|
||||
aggregated = "100/10s,1000"
|
||||
per_client = "100/10s,100"
|
||||
}
|
||||
}
|
||||
|
||||
connection {
|
||||
global = "100/10s"
|
||||
zone.default = "100/10s"
|
||||
bucket.bucket1 {
|
||||
zone = default
|
||||
aggregated = "100/10s,1000"
|
||||
per_client = "100/10s,100"
|
||||
}
|
||||
}
|
||||
|
||||
message_routing {
|
||||
global = "100/10s"
|
||||
zone.default = "100/10s"
|
||||
bucket.bucket1 {
|
||||
zone = default
|
||||
aggregated = "100/10s,100"
|
||||
per_client = "100/10s,10"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,144 +0,0 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2019-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_limiter_client).
|
||||
|
||||
%% API
|
||||
-export([create/5, make_ref/3, consume/2]).
|
||||
-export_type([limiter/0]).
|
||||
|
||||
%% tocket bucket algorithm
|
||||
-record(limiter, { tokens :: non_neg_integer()
|
||||
, rate :: float()
|
||||
, capacity :: decimal()
|
||||
, lasttime :: millisecond()
|
||||
, ref :: ref_limiter()
|
||||
}).
|
||||
|
||||
-record(ref, { counter :: counters:counters_ref()
|
||||
, index :: index()
|
||||
, rate :: decimal()
|
||||
, obtained :: non_neg_integer()
|
||||
}).
|
||||
|
||||
%% TODO
|
||||
%% we should add a nop-limiter, when all the upper layers (global, zone, and buckets ) are infinity
|
||||
|
||||
-type limiter() :: #limiter{}.
|
||||
-type ref_limiter() :: #ref{}.
|
||||
-type client() :: limiter() | ref_limiter().
|
||||
-type millisecond() :: non_neg_integer().
|
||||
-type pause_result(Client) :: {pause, millisecond(), Client}.
|
||||
-type consume_result(Client) :: {ok, Client}
|
||||
| pause_result(Client).
|
||||
-type decimal() :: emqx_limiter_decimal:decimal().
|
||||
-type index() :: emqx_limiter_server:index().
|
||||
|
||||
-define(NOW, erlang:monotonic_time(millisecond)).
|
||||
-define(MINIUMN_PAUSE, 100).
|
||||
|
||||
-import(emqx_limiter_decimal, [sub/2]).
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
%%--------------------------------------------------------------------
|
||||
-spec create(float(),
|
||||
decimal(),
|
||||
counters:counters_ref(),
|
||||
index(),
|
||||
decimal()) -> limiter().
|
||||
create(Rate, Capacity, Counter, Index, CounterRate) ->
|
||||
#limiter{ tokens = Capacity
|
||||
, rate = Rate
|
||||
, capacity = Capacity
|
||||
, lasttime = ?NOW
|
||||
, ref = make_ref(Counter, Index, CounterRate)
|
||||
}.
|
||||
|
||||
-spec make_ref(counters:counters_ref(), index(), decimal()) -> ref_limiter().
|
||||
make_ref(Counter, Idx, Rate) ->
|
||||
#ref{counter = Counter, index = Idx, rate = Rate, obtained = 0}.
|
||||
|
||||
-spec consume(pos_integer(), Client) -> consume_result(Client)
|
||||
when Client :: client().
|
||||
consume(Need, #limiter{tokens = Tokens,
|
||||
capacity = Capacity} = Limiter) ->
|
||||
if Need =< Tokens ->
|
||||
try_consume_counter(Need, Limiter);
|
||||
Need > Capacity ->
|
||||
%% FIXME
|
||||
%% The client should be able to send 4kb data if the rate is configured to be 2kb/s, it just needs 2s to complete.
|
||||
throw("too big request"); %% FIXME how to deal this?
|
||||
true ->
|
||||
try_reset(Need, Limiter)
|
||||
end;
|
||||
|
||||
consume(Need, #ref{counter = Counter,
|
||||
index = Index,
|
||||
rate = Rate,
|
||||
obtained = Obtained} = Ref) ->
|
||||
Tokens = counters:get(Counter, Index),
|
||||
if Tokens >= Need ->
|
||||
counters:sub(Counter, Index, Need),
|
||||
{ok, Ref#ref{obtained = Obtained + Need}};
|
||||
true ->
|
||||
return_pause(Need - Tokens, Rate, Ref)
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
-spec try_consume_counter(pos_integer(), limiter()) -> consume_result(limiter()).
|
||||
try_consume_counter(Need,
|
||||
#limiter{tokens = Tokens,
|
||||
ref = #ref{counter = Counter,
|
||||
index = Index,
|
||||
obtained = Obtained,
|
||||
rate = CounterRate} = Ref} = Limiter) ->
|
||||
CT = counters:get(Counter, Index),
|
||||
if CT >= Need ->
|
||||
counters:sub(Counter, Index, Need),
|
||||
{ok, Limiter#limiter{tokens = sub(Tokens, Need),
|
||||
ref = Ref#ref{obtained = Obtained + Need}}};
|
||||
true ->
|
||||
return_pause(Need - CT, CounterRate, Limiter)
|
||||
end.
|
||||
|
||||
-spec try_reset(pos_integer(), limiter()) -> consume_result(limiter()).
|
||||
try_reset(Need,
|
||||
#limiter{tokens = Tokens,
|
||||
rate = Rate,
|
||||
lasttime = LastTime,
|
||||
capacity = Capacity} = Limiter) ->
|
||||
Now = ?NOW,
|
||||
Inc = erlang:floor((Now - LastTime) * Rate / emqx_limiter_schema:minimum_period()),
|
||||
Tokens2 = erlang:min(Tokens + Inc, Capacity),
|
||||
if Need > Tokens2 ->
|
||||
return_pause(Need, Rate, Limiter);
|
||||
true ->
|
||||
Limiter2 = Limiter#limiter{tokens = Tokens2,
|
||||
lasttime = Now},
|
||||
try_consume_counter(Need, Limiter2)
|
||||
end.
|
||||
|
||||
-spec return_pause(pos_integer(), decimal(), Client) -> pause_result(Client)
|
||||
when Client :: client().
|
||||
return_pause(_, infinity, Limiter) ->
|
||||
%% workaround when emqx_limiter_server's rate is infinity
|
||||
{pause, ?MINIUMN_PAUSE, Limiter};
|
||||
|
||||
return_pause(Diff, Rate, Limiter) ->
|
||||
Pause = erlang:round(Diff * emqx_limiter_schema:minimum_period() / Rate),
|
||||
{pause, erlang:max(Pause, ?MINIUMN_PAUSE), Limiter}.
|
|
@ -1,140 +0,0 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_limiter_schema).
|
||||
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
|
||||
-export([ roots/0, fields/1, to_rate/1
|
||||
, to_bucket_rate/1, minimum_period/0]).
|
||||
|
||||
-define(KILOBYTE, 1024).
|
||||
|
||||
-type limiter_type() :: bytes_in
|
||||
| message_in
|
||||
| connection
|
||||
| message_routing.
|
||||
|
||||
-type bucket_name() :: atom().
|
||||
-type zone_name() :: atom().
|
||||
-type rate() :: infinity | float().
|
||||
-type bucket_rate() :: list(infinity | number()).
|
||||
|
||||
-typerefl_from_string({rate/0, ?MODULE, to_rate}).
|
||||
-typerefl_from_string({bucket_rate/0, ?MODULE, to_bucket_rate}).
|
||||
|
||||
-reflect_type([ rate/0
|
||||
, bucket_rate/0
|
||||
]).
|
||||
|
||||
-export_type([limiter_type/0, bucket_name/0, zone_name/0]).
|
||||
|
||||
-import(emqx_schema, [sc/2, map/2]).
|
||||
|
||||
roots() -> [emqx_limiter].
|
||||
|
||||
fields(emqx_limiter) ->
|
||||
[ {bytes_in, sc(ref(limiter), #{})}
|
||||
, {message_in, sc(ref(limiter), #{})}
|
||||
, {connection, sc(ref(limiter), #{})}
|
||||
, {message_routing, sc(ref(limiter), #{})}
|
||||
];
|
||||
|
||||
fields(limiter) ->
|
||||
[ {global, sc(rate(), #{})}
|
||||
, {zone, sc(map("zone name", rate()), #{})}
|
||||
, {bucket, sc(map("bucket id", ref(bucket)),
|
||||
#{desc => "Token Buckets"})}
|
||||
];
|
||||
|
||||
fields(bucket) ->
|
||||
[ {zone, sc(atom(), #{desc => "the zone which the bucket in"})}
|
||||
, {aggregated, sc(bucket_rate(), #{})}
|
||||
, {per_client, sc(bucket_rate(), #{})}
|
||||
].
|
||||
|
||||
%% minimum period is 100ms
|
||||
minimum_period() ->
|
||||
100.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
ref(Field) -> hoconsc:ref(?MODULE, Field).
|
||||
|
||||
to_rate(Str) ->
|
||||
Tokens = [string:trim(T) || T <- string:tokens(Str, "/")],
|
||||
case Tokens of
|
||||
["infinity"] ->
|
||||
{ok, infinity};
|
||||
[Quota, Interval] ->
|
||||
{ok, Val} = to_quota(Quota),
|
||||
case emqx_schema:to_duration_ms(Interval) of
|
||||
{ok, Ms} when Ms > 0 ->
|
||||
{ok, Val * minimum_period() / Ms};
|
||||
_ ->
|
||||
{error, Str}
|
||||
end;
|
||||
_ ->
|
||||
{error, Str}
|
||||
end.
|
||||
|
||||
to_bucket_rate(Str) ->
|
||||
Tokens = [string:trim(T) || T <- string:tokens(Str, "/,")],
|
||||
case Tokens of
|
||||
[Rate, Capa] ->
|
||||
{ok, infinity} = to_quota(Rate),
|
||||
{ok, CapaVal} = to_quota(Capa),
|
||||
if CapaVal =/= infinity ->
|
||||
{ok, [infinity, CapaVal]};
|
||||
true ->
|
||||
{error, Str}
|
||||
end;
|
||||
[Quota, Interval, Capacity] ->
|
||||
{ok, Val} = to_quota(Quota),
|
||||
case emqx_schema:to_duration_ms(Interval) of
|
||||
{ok, Ms} when Ms > 0 ->
|
||||
{ok, CapaVal} = to_quota(Capacity),
|
||||
{ok, [Val * minimum_period() / Ms, CapaVal]};
|
||||
_ ->
|
||||
{error, Str}
|
||||
end;
|
||||
_ ->
|
||||
{error, Str}
|
||||
end.
|
||||
|
||||
|
||||
to_quota(Str) ->
|
||||
{ok, MP} = re:compile("^\s*(?:(?:([1-9][0-9]*)([a-zA-z]*))|infinity)\s*$"),
|
||||
Result = re:run(Str, MP, [{capture, all_but_first, list}]),
|
||||
case Result of
|
||||
{match, [Quota, Unit]} ->
|
||||
Val = erlang:list_to_integer(Quota),
|
||||
Unit2 = string:to_lower(Unit),
|
||||
{ok, apply_unit(Unit2, Val)};
|
||||
{match, [Quota]} ->
|
||||
{ok, erlang:list_to_integer(Quota)};
|
||||
{match, []} ->
|
||||
{ok, infinity};
|
||||
_ ->
|
||||
{error, Str}
|
||||
end.
|
||||
|
||||
apply_unit("", Val) -> Val;
|
||||
apply_unit("kb", Val) -> Val * ?KILOBYTE;
|
||||
apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
|
||||
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
|
||||
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
|
|
@ -1,426 +0,0 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% A hierachical token bucket algorithm
|
||||
%% Note: this is not the linux HTB algorithm(http://luxik.cdi.cz/~devik/qos/htb/manual/theory.htm)
|
||||
%% Algorithm:
|
||||
%% 1. the root node periodically generates tokens and then distributes them
|
||||
%% just like the oscillation of water waves
|
||||
%% 2. the leaf node has a counter, which is the place where the token is actually held.
|
||||
%% 3. other nodes only play the role of transmission, and the rate of the node is like a valve,
|
||||
%% limiting the oscillation transmitted from the parent node
|
||||
|
||||
-module(emqx_limiter_server).
|
||||
|
||||
-behaviour(gen_server).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
|
||||
terminate/2, code_change/3, format_status/2]).
|
||||
|
||||
-export([ start_link/1, connect/2, info/2
|
||||
, name/1]).
|
||||
|
||||
-record(root, { rate :: rate() %% number of tokens generated per period
|
||||
, period :: pos_integer() %% token generation interval(second)
|
||||
, childs :: list(node_id()) %% node children
|
||||
, consumed :: non_neg_integer()
|
||||
}).
|
||||
|
||||
-record(zone, { id :: pos_integer()
|
||||
, name :: zone_name()
|
||||
, rate :: rate()
|
||||
, obtained :: non_neg_integer() %% number of tokens obtained
|
||||
, childs :: list(node_id())
|
||||
}).
|
||||
|
||||
-record(bucket, { id :: pos_integer()
|
||||
, name :: bucket_name()
|
||||
, rate :: rate()
|
||||
, obtained :: non_neg_integer()
|
||||
, correction :: emqx_limiter_decimal:zero_or_float() %% token correction value
|
||||
, capacity :: capacity()
|
||||
, counter :: counters:counters_ref()
|
||||
, index :: index()
|
||||
}).
|
||||
|
||||
-record(state, { root :: undefined | root()
|
||||
, counter :: undefined | counters:counters_ref() %% current counter to alloc
|
||||
, index :: index()
|
||||
, zones :: #{zone_name() => node_id()}
|
||||
, nodes :: nodes()
|
||||
, type :: limiter_type()
|
||||
}).
|
||||
|
||||
%% maybe use maps is better, but record is fastter
|
||||
-define(FIELD_OBTAINED, #zone.obtained).
|
||||
-define(GET_FIELD(F, Node), element(F, Node)).
|
||||
-define(CALL(Type, Msg), gen_server:call(name(Type), {?FUNCTION_NAME, Msg})).
|
||||
|
||||
-type node_id() :: pos_integer().
|
||||
-type root() :: #root{}.
|
||||
-type zone() :: #zone{}.
|
||||
-type bucket() :: #bucket{}.
|
||||
-type node_data() :: zone() | bucket().
|
||||
-type nodes() :: #{node_id() => node_data()}.
|
||||
-type zone_name() :: emqx_limiter_schema:zone_name().
|
||||
-type limiter_type() :: emqx_limiter_schema:limiter_type().
|
||||
-type bucket_name() :: emqx_limiter_schema:bucket_name().
|
||||
-type rate() :: decimal().
|
||||
-type flow() :: decimal().
|
||||
-type capacity() :: decimal().
|
||||
-type decimal() :: emqx_limiter_decimal:decimal().
|
||||
-type state() :: #state{}.
|
||||
-type index() :: pos_integer().
|
||||
|
||||
-export_type([index/0]).
|
||||
-import(emqx_limiter_decimal, [add/2, sub/2, mul/2, add_to_counter/3, put_to_counter/3]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API
|
||||
%%--------------------------------------------------------------------
|
||||
-spec connect(limiter_type(), bucket_name()) -> emqx_limiter_client:client().
|
||||
connect(Type, Bucket) ->
|
||||
#{zone := Zone,
|
||||
aggregated := [Aggr, Capacity],
|
||||
per_client := [Client, ClientCapa]} = emqx:get_config([emqx_limiter, Type, bucket, Bucket]),
|
||||
case emqx_limiter_manager:find_counter(Type, Zone, Bucket) of
|
||||
{ok, Counter, Idx, Rate} ->
|
||||
if Client =/= infinity andalso (Client < Aggr orelse ClientCapa < Capacity) ->
|
||||
emqx_limiter_client:create(Client, ClientCapa, Counter, Idx, Rate);
|
||||
true ->
|
||||
emqx_limiter_client:make_ref(Counter, Idx, Rate)
|
||||
end;
|
||||
_ ->
|
||||
?LOG(error, "can't find the bucket:~p which type is:~p~n", [Bucket, Type]),
|
||||
throw("invalid bucket")
|
||||
end.
|
||||
|
||||
-spec info(limiter_type(), atom()) -> term().
|
||||
info(Type, Info) ->
|
||||
?CALL(Type, Info).
|
||||
|
||||
-spec name(limiter_type()) -> atom().
|
||||
name(Type) ->
|
||||
erlang:list_to_atom(io_lib:format("~s_~s", [?MODULE, Type])).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @doc
|
||||
%% Starts the server
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec start_link(limiter_type()) -> _.
|
||||
start_link(Type) ->
|
||||
gen_server:start_link({local, name(Type)}, ?MODULE, [Type], []).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%%% gen_server callbacks
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% Initializes the server
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec init(Args :: term()) -> {ok, State :: term()} |
|
||||
{ok, State :: term(), Timeout :: timeout()} |
|
||||
{ok, State :: term(), hibernate} |
|
||||
{stop, Reason :: term()} |
|
||||
ignore.
|
||||
init([Type]) ->
|
||||
State = #state{zones = #{},
|
||||
nodes = #{},
|
||||
type = Type,
|
||||
index = 1},
|
||||
State2 = init_tree(Type, State),
|
||||
oscillate(State2#state.root#root.period),
|
||||
{ok, State2}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% Handling call messages
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec handle_call(Request :: term(), From :: {pid(), term()}, State :: term()) ->
|
||||
{reply, Reply :: term(), NewState :: term()} |
|
||||
{reply, Reply :: term(), NewState :: term(), Timeout :: timeout()} |
|
||||
{reply, Reply :: term(), NewState :: term(), hibernate} |
|
||||
{noreply, NewState :: term()} |
|
||||
{noreply, NewState :: term(), Timeout :: timeout()} |
|
||||
{noreply, NewState :: term(), hibernate} |
|
||||
{stop, Reason :: term(), Reply :: term(), NewState :: term()} |
|
||||
{stop, Reason :: term(), NewState :: term()}.
|
||||
handle_call(Req, _From, State) ->
|
||||
?LOG(error, "Unexpected call: ~p", [Req]),
|
||||
{reply, ignored, State}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% Handling cast messages
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec handle_cast(Request :: term(), State :: term()) ->
|
||||
{noreply, NewState :: term()} |
|
||||
{noreply, NewState :: term(), Timeout :: timeout()} |
|
||||
{noreply, NewState :: term(), hibernate} |
|
||||
{stop, Reason :: term(), NewState :: term()}.
|
||||
handle_cast(Req, State) ->
|
||||
?LOG(error, "Unexpected cast: ~p", [Req]),
|
||||
{noreply, State}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% Handling all non call/cast messages
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec handle_info(Info :: timeout() | term(), State :: term()) ->
|
||||
{noreply, NewState :: term()} |
|
||||
{noreply, NewState :: term(), Timeout :: timeout()} |
|
||||
{noreply, NewState :: term(), hibernate} |
|
||||
{stop, Reason :: normal | term(), NewState :: term()}.
|
||||
handle_info(oscillate, State) ->
|
||||
{noreply, oscillation(State)};
|
||||
|
||||
handle_info(Info, State) ->
|
||||
?LOG(error, "Unexpected info: ~p", [Info]),
|
||||
{noreply, State}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% This function is called by a gen_server when it is about to
|
||||
%% terminate. It should be the opposite of Module:init/1 and do any
|
||||
%% necessary cleaning up. When it returns, the gen_server terminates
|
||||
%% with Reason. The return value is ignored.
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec terminate(Reason :: normal | shutdown | {shutdown, term()} | term(),
|
||||
State :: term()) -> any().
|
||||
terminate(_Reason, _State) ->
|
||||
ok.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% Convert process state when code is changed
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec code_change(OldVsn :: term() | {down, term()},
|
||||
State :: term(),
|
||||
Extra :: term()) -> {ok, NewState :: term()} |
|
||||
{error, Reason :: term()}.
|
||||
code_change(_OldVsn, State, _Extra) ->
|
||||
{ok, State}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @private
|
||||
%% @doc
|
||||
%% This function is called for changing the form and appearance
|
||||
%% of gen_server status when it is returned from sys:get_status/1,2
|
||||
%% or when it appears in termination error logs.
|
||||
%% @end
|
||||
%%--------------------------------------------------------------------
|
||||
-spec format_status(Opt :: normal | terminate,
|
||||
Status :: list()) -> Status :: term().
|
||||
format_status(_Opt, Status) ->
|
||||
Status.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
oscillate(Interval) ->
|
||||
erlang:send_after(Interval, self(), ?FUNCTION_NAME).
|
||||
|
||||
%% @doc generate tokens, and then spread to leaf nodes
|
||||
-spec oscillation(state()) -> state().
|
||||
oscillation(#state{root = #root{rate = Flow,
|
||||
period = Interval,
|
||||
childs = ChildIds,
|
||||
consumed = Consumed} = Root,
|
||||
nodes = Nodes} = State) ->
|
||||
oscillate(Interval),
|
||||
Childs = get_orderd_childs(ChildIds, Nodes),
|
||||
{Alloced, Nodes2} = transverse(Childs, Flow, 0, Nodes),
|
||||
State#state{nodes = Nodes2,
|
||||
root = Root#root{consumed = Consumed + Alloced}}.
|
||||
|
||||
%% @doc horizontal spread
|
||||
-spec transverse(list(node_data()),
|
||||
flow(),
|
||||
non_neg_integer(),
|
||||
nodes()) -> {non_neg_integer(), nodes()}.
|
||||
transverse([H | T], InFlow, Alloced, Nodes) when InFlow > 0 ->
|
||||
{NodeAlloced, Nodes2} = longitudinal(H, InFlow, Nodes),
|
||||
InFlow2 = sub(InFlow, NodeAlloced),
|
||||
Alloced2 = Alloced + NodeAlloced,
|
||||
transverse(T, InFlow2, Alloced2, Nodes2);
|
||||
|
||||
transverse(_, _, Alloced, Nodes) ->
|
||||
{Alloced, Nodes}.
|
||||
|
||||
%% @doc vertical spread
|
||||
-spec longitudinal(node_data(), flow(), nodes()) ->
|
||||
{non_neg_integer(), nodes()}.
|
||||
longitudinal(#zone{id = Id,
|
||||
rate = Rate,
|
||||
obtained = Obtained,
|
||||
childs = ChildIds} = Node, InFlow, Nodes) ->
|
||||
Flow = erlang:min(InFlow, Rate),
|
||||
|
||||
if Flow > 0 ->
|
||||
Childs = get_orderd_childs(ChildIds, Nodes),
|
||||
{Alloced, Nodes2} = transverse(Childs, Flow, 0, Nodes),
|
||||
if Alloced > 0 ->
|
||||
{Alloced,
|
||||
Nodes2#{Id => Node#zone{obtained = Obtained + Alloced}}};
|
||||
true ->
|
||||
%% childs are empty or all counter childs are full
|
||||
{0, Nodes}
|
||||
end;
|
||||
true ->
|
||||
{0, Nodes}
|
||||
end;
|
||||
|
||||
longitudinal(#bucket{id = Id,
|
||||
rate = Rate,
|
||||
capacity = Capacity,
|
||||
correction = Correction,
|
||||
counter = Counter,
|
||||
index = Index,
|
||||
obtained = Obtained} = Node, InFlow, Nodes) ->
|
||||
Flow = add(erlang:min(InFlow, Rate), Correction),
|
||||
|
||||
Tokens = counters:get(Counter, Index),
|
||||
%% toknes's value mayb be a negative value(stolen from the future)
|
||||
Avaiable = erlang:min(if Tokens < 0 ->
|
||||
add(Capacity, Tokens);
|
||||
true ->
|
||||
sub(Capacity, Tokens)
|
||||
end, Flow),
|
||||
FixAvaiable = erlang:min(Capacity, Avaiable),
|
||||
if FixAvaiable > 0 ->
|
||||
{Alloced, Decimal} = add_to_counter(Counter, Index, FixAvaiable),
|
||||
|
||||
{Alloced,
|
||||
Nodes#{Id => Node#bucket{obtained = Obtained + Alloced,
|
||||
correction = Decimal}}};
|
||||
true ->
|
||||
{0, Nodes}
|
||||
end.
|
||||
|
||||
-spec get_orderd_childs(list(node_id()), nodes()) -> list(node_data()).
|
||||
get_orderd_childs(Ids, Nodes) ->
|
||||
Childs = [maps:get(Id, Nodes) || Id <- Ids],
|
||||
|
||||
%% sort by obtained, avoid node goes hungry
|
||||
lists:sort(fun(A, B) ->
|
||||
?GET_FIELD(?FIELD_OBTAINED, A) < ?GET_FIELD(?FIELD_OBTAINED, B)
|
||||
end,
|
||||
Childs).
|
||||
|
||||
-spec init_tree(emqx_limiter_schema:limiter_type(), state()) -> state().
|
||||
init_tree(Type, State) ->
|
||||
#{global := Global,
|
||||
zone := Zone,
|
||||
bucket := Bucket} = emqx:get_config([emqx_limiter, Type]),
|
||||
{Factor, Root} = make_root(Global, Zone),
|
||||
State2 = State#state{root = Root},
|
||||
{NodeId, State3} = make_zone(maps:to_list(Zone), Factor, 1, State2),
|
||||
State4 = State3#state{counter = counters:new(maps:size(Bucket),
|
||||
[write_concurrency])},
|
||||
make_bucket(maps:to_list(Bucket), Factor, NodeId, State4).
|
||||
|
||||
-spec make_root(decimal(), hocon:config()) -> {number(), root()}.
|
||||
make_root(Rate, Zone) ->
|
||||
ZoneNum = maps:size(Zone),
|
||||
Childs = lists:seq(1, ZoneNum),
|
||||
MiniPeriod = emqx_limiter_schema:minimum_period(),
|
||||
if Rate >= 1 ->
|
||||
{1, #root{rate = Rate,
|
||||
period = MiniPeriod,
|
||||
childs = Childs,
|
||||
consumed = 0}};
|
||||
true ->
|
||||
Factor = 1 / Rate,
|
||||
{Factor, #root{rate = 1,
|
||||
period = erlang:floor(Factor * MiniPeriod),
|
||||
childs = Childs,
|
||||
consumed = 0}}
|
||||
end.
|
||||
|
||||
make_zone([{Name, Rate} | T], Factor, NodeId, State) ->
|
||||
#state{zones = Zones, nodes = Nodes} = State,
|
||||
Zone = #zone{id = NodeId,
|
||||
name = Name,
|
||||
rate = mul(Rate, Factor),
|
||||
obtained = 0,
|
||||
childs = []},
|
||||
State2 = State#state{zones = Zones#{Name => NodeId},
|
||||
nodes = Nodes#{NodeId => Zone}},
|
||||
make_zone(T, Factor, NodeId + 1, State2);
|
||||
|
||||
make_zone([], _, NodeId, State2) ->
|
||||
{NodeId, State2}.
|
||||
|
||||
make_bucket([{Name, Conf} | T], Factor, NodeId, State) ->
|
||||
#{zone := ZoneName,
|
||||
aggregated := [Rate, Capacity]} = Conf,
|
||||
{Counter, Idx, State2} = alloc_counter(ZoneName, Name, Rate, State),
|
||||
Node = #bucket{ id = NodeId
|
||||
, name = Name
|
||||
, rate = mul(Rate, Factor)
|
||||
, obtained = 0
|
||||
, correction = 0
|
||||
, capacity = Capacity
|
||||
, counter = Counter
|
||||
, index = Idx},
|
||||
State3 = add_zone_child(NodeId, Node, ZoneName, State2),
|
||||
make_bucket(T, Factor, NodeId + 1, State3);
|
||||
|
||||
make_bucket([], _, _, State) ->
|
||||
State.
|
||||
|
||||
-spec alloc_counter(zone_name(), bucket_name(), rate(), state()) ->
|
||||
{counters:counters_ref(), pos_integer(), state()}.
|
||||
alloc_counter(Zone, Bucket, Rate,
|
||||
#state{type = Type, counter = Counter, index = Index} = State) ->
|
||||
Path = emqx_limiter_manager:make_path(Type, Zone, Bucket),
|
||||
case emqx_limiter_manager:find_counter(Path) of
|
||||
undefined ->
|
||||
init_counter(Path, Counter, Index,
|
||||
Rate, State#state{index = Index + 1});
|
||||
{ok, ECounter, EIndex, _} ->
|
||||
init_counter(Path, ECounter, EIndex, Rate, State)
|
||||
end.
|
||||
|
||||
init_counter(Path, Counter, Index, Rate, State) ->
|
||||
_ = put_to_counter(Counter, Index, 0),
|
||||
emqx_limiter_manager:insert_counter(Path, Counter, Index, Rate),
|
||||
{Counter, Index, State}.
|
||||
|
||||
-spec add_zone_child(node_id(), bucket(), zone_name(), state()) -> state().
|
||||
add_zone_child(NodeId, Bucket, Name, #state{zones = Zones, nodes = Nodes} = State) ->
|
||||
ZoneId = maps:get(Name, Zones),
|
||||
#zone{childs = Childs} = Zone = maps:get(ZoneId, Nodes),
|
||||
Nodes2 = Nodes#{ZoneId => Zone#zone{childs = [NodeId | Childs]},
|
||||
NodeId => Bucket},
|
||||
State#state{nodes = Nodes2}.
|
|
@ -1,272 +0,0 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_limiter_SUITE).
|
||||
|
||||
-compile(export_all).
|
||||
-compile(nowarn_export_all).
|
||||
|
||||
-define(APP, emqx_limiter).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
|
||||
-define(BASE_CONF, <<"""
|
||||
emqx_limiter {
|
||||
bytes_in {global = \"100KB/10s\"
|
||||
zone.default = \"100kB/10s\"
|
||||
zone.external = \"20kB/10s\"
|
||||
bucket.tcp {zone = default
|
||||
aggregated = \"100kB/10s,1Mb\"
|
||||
per_client = \"100KB/10s,10Kb\"}
|
||||
bucket.ssl {zone = external
|
||||
aggregated = \"100kB/10s,1Mb\"
|
||||
per_client = \"100KB/10s,10Kb\"}
|
||||
}
|
||||
|
||||
message_in {global = \"100/10s\"
|
||||
zone.default = \"100/10s\"
|
||||
bucket.bucket1 {zone = default
|
||||
aggregated = \"100/10s,1000\"
|
||||
per_client = \"100/10s,100\"}
|
||||
}
|
||||
|
||||
connection {global = \"100/10s\"
|
||||
zone.default = \"100/10s\"
|
||||
bucket.bucket1 {zone = default
|
||||
aggregated = \"100/10s,100\"
|
||||
per_client = \"100/10s,10\"
|
||||
}
|
||||
}
|
||||
|
||||
message_routing {global = \"100/10s\"
|
||||
zone.default = \"100/10s\"
|
||||
bucket.bucket1 {zone = default
|
||||
aggregated = \"100/10s,100\"
|
||||
per_client = \"100/10s,10\"
|
||||
}
|
||||
}
|
||||
}""">>).
|
||||
|
||||
-define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)).
|
||||
|
||||
-record(client_options, { interval :: non_neg_integer()
|
||||
, per_cost :: non_neg_integer()
|
||||
, type :: atom()
|
||||
, bucket :: atom()
|
||||
, lifetime :: non_neg_integer()
|
||||
, rates :: list(tuple())
|
||||
}).
|
||||
|
||||
-record(client_state, { client :: emqx_limiter_client:limiter()
|
||||
, pid :: pid()
|
||||
, got :: non_neg_integer()
|
||||
, options :: #client_options{}}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Setups
|
||||
%%--------------------------------------------------------------------
|
||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
ok = emqx_config:init_load(emqx_limiter_schema, ?BASE_CONF),
|
||||
emqx_common_test_helpers:start_apps([?APP]),
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
emqx_common_test_helpers:stop_apps([?APP]).
|
||||
|
||||
init_per_testcase(_TestCase, Config) ->
|
||||
Config.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Test Cases
|
||||
%%--------------------------------------------------------------------
|
||||
t_un_overload(_) ->
|
||||
Conf = emqx:get_config([emqx_limiter]),
|
||||
Conn = #{global => to_rate("infinity"),
|
||||
zone => #{z1 => to_rate("1000/1s"),
|
||||
z2 => to_rate("1000/1s")},
|
||||
bucket => #{b1 => #{zone => z1,
|
||||
aggregated => to_bucket_rate("100/1s, 500"),
|
||||
per_client => to_bucket_rate("10/1s, 50")},
|
||||
b2 => #{zone => z2,
|
||||
aggregated => to_bucket_rate("500/1s, 500"),
|
||||
per_client => to_bucket_rate("100/1s, infinity")
|
||||
}}},
|
||||
Conf2 = Conf#{connection => Conn},
|
||||
emqx_config:put([emqx_limiter], Conf2),
|
||||
{ok, _} = emqx_limiter_manager:restart_server(connection),
|
||||
|
||||
timer:sleep(200),
|
||||
|
||||
B1C = #client_options{interval = 100,
|
||||
per_cost = 1,
|
||||
type = connection,
|
||||
bucket = b1,
|
||||
lifetime = timer:seconds(3),
|
||||
rates = [{fun erlang:'=<'/2, ["1000/1s", "100/1s"]},
|
||||
{fun erlang:'=:='/2, ["10/1s"]}]},
|
||||
|
||||
B2C = #client_options{interval = 100,
|
||||
per_cost = 10,
|
||||
type = connection,
|
||||
bucket = b2,
|
||||
lifetime = timer:seconds(3),
|
||||
rates = [{fun erlang:'=<'/2, ["1000/1s", "500/1s"]},
|
||||
{fun erlang:'=:='/2, ["100/1s"]}]},
|
||||
|
||||
lists:foreach(fun(_) -> start_client(B1C) end,
|
||||
lists:seq(1, 10)),
|
||||
|
||||
|
||||
lists:foreach(fun(_) -> start_client(B2C) end,
|
||||
lists:seq(1, 5)),
|
||||
|
||||
?assert(check_client_result(10 + 5)).
|
||||
|
||||
t_infinity(_) ->
|
||||
Conf = emqx:get_config([emqx_limiter]),
|
||||
Conn = #{global => to_rate("infinity"),
|
||||
zone => #{z1 => to_rate("1000/1s"),
|
||||
z2 => to_rate("infinity")},
|
||||
bucket => #{b1 => #{zone => z1,
|
||||
aggregated => to_bucket_rate("100/1s, infinity"),
|
||||
per_client => to_bucket_rate("10/1s, 100")},
|
||||
b2 => #{zone => z2,
|
||||
aggregated => to_bucket_rate("infinity, 600"),
|
||||
per_client => to_bucket_rate("100/1s, infinity")
|
||||
}}},
|
||||
Conf2 = Conf#{connection => Conn},
|
||||
emqx_config:put([emqx_limiter], Conf2),
|
||||
{ok, _} = emqx_limiter_manager:restart_server(connection),
|
||||
|
||||
timer:sleep(200),
|
||||
|
||||
B1C = #client_options{interval = 100,
|
||||
per_cost = 1,
|
||||
type = connection,
|
||||
bucket = b1,
|
||||
lifetime = timer:seconds(3),
|
||||
rates = [{fun erlang:'=<'/2, ["1000/1s", "100/1s"]},
|
||||
{fun erlang:'=:='/2, ["10/1s"]}]},
|
||||
|
||||
B2C = #client_options{interval = 100,
|
||||
per_cost = 10,
|
||||
type = connection,
|
||||
bucket = b2,
|
||||
lifetime = timer:seconds(3),
|
||||
rates = [{fun erlang:'=:='/2, ["100/1s"]}]},
|
||||
|
||||
lists:foreach(fun(_) -> start_client(B1C) end,
|
||||
lists:seq(1, 8)),
|
||||
|
||||
lists:foreach(fun(_) -> start_client(B2C) end,
|
||||
lists:seq(1, 4)),
|
||||
|
||||
?assert(check_client_result(8 + 4)).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
start_client(Opts) ->
|
||||
Pid = self(),
|
||||
erlang:spawn(fun() -> enter_client(Opts, Pid) end).
|
||||
|
||||
enter_client(#client_options{type = Type,
|
||||
bucket = Bucket,
|
||||
lifetime = Lifetime} = Opts,
|
||||
Pid) ->
|
||||
erlang:send_after(Lifetime, self(), stop),
|
||||
erlang:send(self(), consume),
|
||||
Client = emqx_limiter_server:connect(Type, Bucket),
|
||||
client_loop(#client_state{client = Client,
|
||||
pid = Pid,
|
||||
got = 0,
|
||||
options = Opts}).
|
||||
|
||||
client_loop(#client_state{client = Client,
|
||||
got = Got,
|
||||
pid = Pid,
|
||||
options = #client_options{interval = Interval,
|
||||
per_cost = PerCost,
|
||||
lifetime = Lifetime,
|
||||
rates = Rates}} = State) ->
|
||||
receive
|
||||
consume ->
|
||||
case emqx_limiter_client:consume(PerCost, Client) of
|
||||
{ok, Client2} ->
|
||||
erlang:send_after(Interval, self(), consume),
|
||||
client_loop(State#client_state{client = Client2,
|
||||
got = Got + PerCost});
|
||||
{pause, MS, Client2} ->
|
||||
erlang:send_after(MS, self(), {resume, erlang:system_time(millisecond)}),
|
||||
client_loop(State#client_state{client = Client2})
|
||||
end;
|
||||
stop ->
|
||||
Rate = Got * emqx_limiter_schema:minimum_period() / Lifetime,
|
||||
?LOGT("Got:~p, Rate is:~p Checks:~p~n", [Got, Rate, Rate]),
|
||||
Check = check_rates(Rate, Rates),
|
||||
erlang:send(Pid, {client, Check});
|
||||
{resume, Begin} ->
|
||||
case emqx_limiter_client:consume(PerCost, Client) of
|
||||
{ok, Client2} ->
|
||||
Now = erlang:system_time(millisecond),
|
||||
Diff = erlang:max(0, Interval - (Now - Begin)),
|
||||
erlang:send_after(Diff, self(), consume),
|
||||
client_loop(State#client_state{client = Client2,
|
||||
got = Got + PerCost});
|
||||
{pause, MS, Client2} ->
|
||||
erlang:send_after(MS, self(), {resume, Begin}),
|
||||
client_loop(State#client_state{client = Client2})
|
||||
end
|
||||
end.
|
||||
|
||||
check_rates(Rate, [{Fun, Rates} | T]) ->
|
||||
case lists:all(fun(E) -> Fun(Rate, to_rate(E)) end, Rates) of
|
||||
true ->
|
||||
check_rates(Rate, T);
|
||||
false ->
|
||||
false
|
||||
end;
|
||||
check_rates(_, _) ->
|
||||
true.
|
||||
|
||||
check_client_result(0) ->
|
||||
true;
|
||||
|
||||
check_client_result(N) ->
|
||||
?LOGT("check_client_result:~p~n", [N]),
|
||||
receive
|
||||
{client, true} ->
|
||||
check_client_result(N - 1);
|
||||
{client, false} ->
|
||||
false;
|
||||
Any ->
|
||||
?LOGT(">>>> other:~p~n", [Any])
|
||||
|
||||
after 3500 ->
|
||||
?LOGT(">>>> timeout~n", []),
|
||||
false
|
||||
end.
|
||||
|
||||
to_rate(Str) ->
|
||||
{ok, Rate} = emqx_limiter_schema:to_rate(Str),
|
||||
Rate.
|
||||
|
||||
to_bucket_rate(Str) ->
|
||||
{ok, Result} = emqx_limiter_schema:to_bucket_rate(Str),
|
||||
Result.
|
|
@ -32,6 +32,7 @@ start() ->
|
|||
os:set_signal(sigterm, handle) %% default is handle
|
||||
end,
|
||||
ok = set_backtrace_depth(),
|
||||
start_sysmon(),
|
||||
ekka:start(),
|
||||
ok = print_otp_version_warning().
|
||||
|
||||
|
@ -54,3 +55,15 @@ print_otp_version_warning() ->
|
|||
?ULOG("WARNING: Running on Erlang/OTP version ~p. Recommended: 23~n",
|
||||
[?OTP_RELEASE]).
|
||||
-endif. % OTP_RELEASE > 22
|
||||
|
||||
start_sysmon() ->
|
||||
case application:get_env(system_monitor, db_hostname) of
|
||||
undefined ->
|
||||
%% If there is no sink for the events, there is no reason
|
||||
%% to run system_monitor_top, ignore it:
|
||||
ok;
|
||||
_ ->
|
||||
application:set_env(system_monitor, callback_mod, system_monitor_pg),
|
||||
_ = application:ensure_all_started(system_monitor, temporary),
|
||||
ok
|
||||
end.
|
||||
|
|
|
@ -61,10 +61,18 @@ do_paginate(Qh, Count, Params, {Module, FormatFun}) ->
|
|||
|
||||
query_handle(Table) when is_atom(Table) ->
|
||||
qlc:q([R || R <- ets:table(Table)]);
|
||||
|
||||
query_handle({Table, Opts}) when is_atom(Table) ->
|
||||
qlc:q([R || R <- ets:table(Table, Opts)]);
|
||||
|
||||
query_handle([Table]) when is_atom(Table) ->
|
||||
qlc:q([R || R <- ets:table(Table)]);
|
||||
|
||||
query_handle([{Table, Opts}]) when is_atom(Table) ->
|
||||
qlc:q([R || R <- ets:table(Table, Opts)]);
|
||||
|
||||
query_handle(Tables) ->
|
||||
qlc:append([qlc:q([E || E <- ets:table(T)]) || T <- Tables]).
|
||||
qlc:append([query_handle(T) || T <- Tables]). %
|
||||
|
||||
query_handle(Table, MatchSpec) when is_atom(Table) ->
|
||||
Options = {traverse, {select, MatchSpec}},
|
||||
|
@ -78,8 +86,16 @@ query_handle(Tables, MatchSpec) ->
|
|||
|
||||
count(Table) when is_atom(Table) ->
|
||||
ets:info(Table, size);
|
||||
|
||||
count({Table, _}) when is_atom(Table) ->
|
||||
ets:info(Table, size);
|
||||
|
||||
count([Table]) when is_atom(Table) ->
|
||||
ets:info(Table, size);
|
||||
|
||||
count([{Table, _}]) when is_atom(Table) ->
|
||||
ets:info(Table, size);
|
||||
|
||||
count(Tables) ->
|
||||
lists:sum([count(T) || T <- Tables]).
|
||||
|
||||
|
|
|
@ -177,6 +177,6 @@ t_keepalive(_Config) ->
|
|||
[Pid] = emqx_cm:lookup_channels(list_to_binary(ClientId)),
|
||||
State = sys:get_state(Pid),
|
||||
ct:pal("~p~n", [State]),
|
||||
?assertEqual(11000, element(2, element(5, element(11, State)))),
|
||||
?assertEqual(11000, element(2, element(5, element(9, State)))),
|
||||
emqtt:disconnect(C1),
|
||||
ok.
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
plugins {
|
||||
prebuilt {
|
||||
}
|
||||
external {
|
||||
}
|
||||
install_dir = "{{ platform_plugins_dir }}"
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_plugins,
|
||||
[{description, "EMQ X Plugin Management"},
|
||||
{vsn, "0.1.0"},
|
||||
{modules, []},
|
||||
{mod, {emqx_plugins_app,[]}},
|
||||
{applications, [kernel,stdlib,emqx]},
|
||||
{env, []}
|
||||
]}.
|
|
@ -0,0 +1,8 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{"0.1.0",
|
||||
[ {<<".*">>, []}
|
||||
],
|
||||
[
|
||||
{<<".*">>, []}
|
||||
]
|
||||
}.
|
|
@ -16,9 +16,8 @@
|
|||
|
||||
-module(emqx_plugins).
|
||||
|
||||
-include("emqx.hrl").
|
||||
-include("logger.hrl").
|
||||
|
||||
-include_lib("emqx/include/emqx.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
-export([ load/0
|
||||
, load/1
|
||||
|
@ -41,7 +40,7 @@
|
|||
%% @doc Load all plugins when the broker started.
|
||||
-spec(load() -> ok | ignore | {error, term()}).
|
||||
load() ->
|
||||
ok = load_ext_plugins(emqx:get_config([plugins, expand_plugins_dir], undefined)).
|
||||
ok = load_ext_plugins(emqx:get_config([plugins, install_dir], undefined)).
|
||||
|
||||
%% @doc Load a Plugin
|
||||
-spec(load(atom()) -> ok | {error, term()}).
|
|
@ -0,0 +1,30 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_plugins_app).
|
||||
|
||||
-behaviour(application).
|
||||
|
||||
-export([ start/2
|
||||
, stop/1
|
||||
]).
|
||||
|
||||
start(_Type, _Args) ->
|
||||
{ok, Sup} = emqx_plugins_sup:start_link(),
|
||||
{ok, Sup}.
|
||||
|
||||
stop(_State) ->
|
||||
ok.
|
|
@ -0,0 +1,99 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_plugins_schema).
|
||||
|
||||
-behaviour(hocon_schema).
|
||||
|
||||
-export([ roots/0
|
||||
, fields/1
|
||||
]).
|
||||
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
|
||||
roots() -> ["plugins"].
|
||||
|
||||
fields("plugins") ->
|
||||
#{fields => fields(),
|
||||
desc => """
|
||||
Manage EMQ X plugins.
|
||||
<br>
|
||||
Plugins can be pre-built as a part of EMQ X package,
|
||||
or installed as a standalone package in a location specified by
|
||||
<code>install_dir</code> config key
|
||||
<br>
|
||||
The standalone-installed plugins are referred to as 'external' plugins.
|
||||
"""
|
||||
}.
|
||||
|
||||
fields() ->
|
||||
[ {prebuilt, fun prebuilt/1}
|
||||
, {external, fun external/1}
|
||||
, {install_dir, fun install_dir/1}
|
||||
].
|
||||
|
||||
prebuilt(type) -> hoconsc:map("name", boolean());
|
||||
prebuilt(nullable) -> true;
|
||||
prebuilt(T) when T=/= desc -> undefined;
|
||||
prebuilt(desc) -> """
|
||||
A map() from plugin name to a boolean (true | false) flag to indicate
|
||||
whether or not to enable the prebuilt plugin.
|
||||
<br>
|
||||
Most of the prebuilt plugins from 4.x are converted into features since 5.0.
|
||||
""" ++ prebuilt_plugins() ++
|
||||
"""
|
||||
<br>
|
||||
Enabled plugins are loaded (started) as a part of EMQ X node's boot sequence.
|
||||
Plugins can be loaded on the fly, and enabled from dashbaord UI and/or CLI.
|
||||
<br>
|
||||
Example config: <code>{emqx_foo_bar: true, emqx_bazz: false}</code>
|
||||
""".
|
||||
|
||||
external(type) -> hoconsc:map("name", string());
|
||||
external(nullable) -> true;
|
||||
external(T) when T =/= desc -> undefined;
|
||||
external(desc) ->
|
||||
"""
|
||||
A map from plugin name to a version number string for enabled ones.
|
||||
To disable an external plugin, set the value to 'false'.
|
||||
<br>
|
||||
Enabled plugins are loaded (started) as a part of EMQ X node's boot sequence.
|
||||
Plugins can be loaded on the fly, and enabled from dashbaord UI and/or CLI.
|
||||
<br>
|
||||
Example config: <code>{emqx_extplug1: \"0.1.0\", emqx_extplug2: false}</code>
|
||||
""".
|
||||
|
||||
install_dir(type) -> string();
|
||||
install_dir(nullable) -> true;
|
||||
install_dir(default) -> "plugins"; %% runner's root dir
|
||||
install_dir(T) when T =/= desc -> undefined;
|
||||
install_dir(desc) -> """
|
||||
In which directory are the external plugins installed.
|
||||
The plugin beam files and configuration files should reside in
|
||||
the sub-directory named as <code>emqx_foo_bar-0.1.0</code>.
|
||||
<br>
|
||||
NOTE: For security reasons, this directory should **NOT** be writable
|
||||
by anyone expect for <code>emqx</code> (or any user which runs EMQ X)
|
||||
""".
|
||||
|
||||
%% TODO: when we have some prebuilt plugins, change this function to:
|
||||
%% """
|
||||
%% The names should be one of
|
||||
%% - name1
|
||||
%% - name2
|
||||
%% """
|
||||
prebuilt_plugins() ->
|
||||
"So far, we do not have any prebuilt plugins".
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue