Merge pull request #8211 from lafirest/fix/limiter_infinity_update

fix(limiter): set maximum value for `infinity` rate and capacity
This commit is contained in:
JianBo He 2022-06-15 20:04:55 +08:00 committed by GitHub
commit 89ff67a1e6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 84 additions and 78 deletions

View File

@ -1,13 +1,11 @@
listeners.tcp.default { listeners.tcp.default {
bind = "0.0.0.0:1883" bind = "0.0.0.0:1883"
max_connections = 1024000 max_connections = 1024000
limiter.connection = default
} }
listeners.ssl.default { listeners.ssl.default {
bind = "0.0.0.0:8883" bind = "0.0.0.0:8883"
max_connections = 512000 max_connections = 512000
limiter.connection = default
ssl_options { ssl_options {
keyfile = "{{ platform_etc_dir }}/certs/key.pem" keyfile = "{{ platform_etc_dir }}/certs/key.pem"
certfile = "{{ platform_etc_dir }}/certs/cert.pem" certfile = "{{ platform_etc_dir }}/certs/cert.pem"
@ -18,14 +16,12 @@ listeners.ssl.default {
listeners.ws.default { listeners.ws.default {
bind = "0.0.0.0:8083" bind = "0.0.0.0:8083"
max_connections = 1024000 max_connections = 1024000
limiter.connection = default
websocket.mqtt_path = "/mqtt" websocket.mqtt_path = "/mqtt"
} }
listeners.wss.default { listeners.wss.default {
bind = "0.0.0.0:8084" bind = "0.0.0.0:8084"
max_connections = 512000 max_connections = 512000
limiter.connection = default
websocket.mqtt_path = "/mqtt" websocket.mqtt_path = "/mqtt"
ssl_options { ssl_options {
keyfile = "{{ platform_etc_dir }}/certs/key.pem" keyfile = "{{ platform_etc_dir }}/certs/key.pem"
@ -38,7 +34,6 @@ listeners.wss.default {
# enabled = false # enabled = false
# bind = "0.0.0.0:14567" # bind = "0.0.0.0:14567"
# max_connections = 1024000 # max_connections = 1024000
# limiter.connection = default
# keyfile = "{{ platform_etc_dir }}/certs/key.pem" # keyfile = "{{ platform_etc_dir }}/certs/key.pem"
# certfile = "{{ platform_etc_dir }}/certs/cert.pem" # certfile = "{{ platform_etc_dir }}/certs/cert.pem"
#} #}

View File

@ -956,7 +956,7 @@ check_limiter(
{ok, Limiter2} -> {ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2}); WhenOk(Data, Msgs, State#state{limiter = Limiter2});
{pause, Time, Limiter2} -> {pause, Time, Limiter2} ->
?SLOG(warning, #{ ?SLOG(debug, #{
msg => "pause_time_dueto_rate_limit", msg => "pause_time_dueto_rate_limit",
needs => Needs, needs => Needs,
time_in_ms => Time time_in_ms => Time
@ -982,8 +982,7 @@ check_limiter(
_ -> _ ->
%% if there has a retry timer, %% if there has a retry timer,
%% cache the operation and execute it after the retry is over %% cache the operation and execute it after the retry is over
%% TODO: maybe we need to set socket to passive if size of queue is very large %% the maximum length of the cache queue is equal to the active_n
%% because we queue up lots of ops that checks with the limiters.
New = #cache{need = Needs, data = Data, next = WhenOk}, New = #cache{need = Needs, data = Data, next = WhenOk},
{ok, State#state{limiter_cache = queue:in(New, Cache)}} {ok, State#state{limiter_cache = queue:in(New, Cache)}}
end; end;
@ -1006,7 +1005,7 @@ retry_limiter(#state{limiter = Limiter} = State) ->
} }
); );
{pause, Time, Limiter2} -> {pause, Time, Limiter2} ->
?SLOG(warning, #{ ?SLOG(debug, #{
msg => "pause_time_dueto_rate_limit", msg => "pause_time_dueto_rate_limit",
types => Types, types => Types,
time_in_ms => Time time_in_ms => Time

View File

@ -1,11 +0,0 @@
limiter {
connection {
rate = "1000/s"
bucket {
default {
rate = "1000/s"
capacity = 1000
}
}
}
}

View File

@ -50,13 +50,7 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% API %% API
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec new( -spec new(counters:countres_ref(), index(), rate()) -> bucket_ref().
undefined | counters:countres_ref(),
undefined | index(),
rate()
) -> bucket_ref().
new(undefined, _, _) ->
infinity;
new(Counter, Index, Rate) -> new(Counter, Index, Rate) ->
#{ #{
counter => Counter, counter => Counter,

View File

@ -30,7 +30,8 @@
namespace/0, namespace/0,
get_bucket_cfg_path/2, get_bucket_cfg_path/2,
desc/1, desc/1,
types/0 types/0,
infinity_value/0
]). ]).
-define(KILOBYTE, 1024). -define(KILOBYTE, 1024).
@ -46,7 +47,7 @@
-type rate() :: infinity | float(). -type rate() :: infinity | float().
-type burst_rate() :: 0 | float(). -type burst_rate() :: 0 | float().
%% the capacity of the token bucket %% the capacity of the token bucket
-type capacity() :: infinity | number(). -type capacity() :: non_neg_integer().
%% initial capacity of the token bucket %% initial capacity of the token bucket
-type initial() :: non_neg_integer(). -type initial() :: non_neg_integer().
-type bucket_path() :: list(atom()). -type bucket_path() :: list(atom()).
@ -88,7 +89,7 @@ fields(limiter) ->
{Type, {Type,
?HOCON(?R_REF(limiter_opts), #{ ?HOCON(?R_REF(limiter_opts), #{
desc => ?DESC(Type), desc => ?DESC(Type),
default => #{} default => make_limiter_default(Type)
})} })}
|| Type <- types() || Type <- types()
]; ];
@ -207,6 +208,18 @@ types() ->
%% Internal functions %% Internal functions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% `infinity` to `infinity_value` rules:
%% 1. all infinity capacity will change to infinity_value
%% 2. if the rate of global and bucket both are `infinity`,
%% use `infinity_value` as bucket rate. see `emqx_limiter_server:get_counter_rate/2`
infinity_value() ->
%% 1 TB
1099511627776.
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
to_burst_rate(Str) -> to_burst_rate(Str) ->
to_rate(Str, false, true). to_rate(Str, false, true).
@ -294,7 +307,7 @@ to_quota(Str, Regex) ->
{match, [Quota, ""]} -> {match, [Quota, ""]} ->
{ok, erlang:list_to_integer(Quota)}; {ok, erlang:list_to_integer(Quota)};
{match, ""} -> {match, ""} ->
{ok, infinity}; {ok, infinity_value()};
_ -> _ ->
{error, Str} {error, Str}
end end
@ -308,3 +321,17 @@ apply_unit("kb", Val) -> Val * ?KILOBYTE;
apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE; apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE; apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit). apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
make_limiter_default(connection) ->
#{
<<"rate">> => <<"1000/s">>,
<<"bucket">> => #{
<<"default">> =>
#{
<<"rate">> => <<"1000/s">>,
<<"capacity">> => 1000
}
}
};
make_limiter_default(_) ->
#{}.

View File

@ -112,24 +112,26 @@
%% If no bucket path is set in config, there will be no limit %% If no bucket path is set in config, there will be no limit
connect(_Type, undefined) -> connect(_Type, undefined) ->
{ok, emqx_htb_limiter:make_infinity_limiter()}; {ok, emqx_htb_limiter:make_infinity_limiter()};
%% Workaround.
%% After API updated some config, the bucket name maybe become (converted from empty binary)
connect(_Type, '') ->
{ok, emqx_htb_limiter:make_infinity_limiter()};
connect(Type, BucketName) when is_atom(BucketName) -> connect(Type, BucketName) when is_atom(BucketName) ->
case get_bucket_cfg(Type, BucketName) of case get_bucket_cfg(Type, BucketName) of
undefined -> undefined ->
?SLOG(error, #{msg => "bucket_config_not_found", type => Type, bucket => BucketName}), ?SLOG(error, #{msg => "bucket_config_not_found", type => Type, bucket => BucketName}),
{error, config_not_found}; {error, config_not_found};
#{ #{
rate := AggrRate, rate := BucketRate,
capacity := AggrSize, capacity := BucketSize,
per_client := #{rate := CliRate, capacity := CliSize} = Cfg per_client := #{rate := CliRate, capacity := CliSize} = Cfg
} -> } ->
case emqx_limiter_manager:find_bucket(Type, BucketName) of case emqx_limiter_manager:find_bucket(Type, BucketName) of
{ok, Bucket} -> {ok, Bucket} ->
{ok, {ok,
if if
CliRate < AggrRate orelse CliSize < AggrSize -> CliRate < BucketRate orelse CliSize < BucketSize ->
emqx_htb_limiter:make_token_bucket_limiter(Cfg, Bucket); emqx_htb_limiter:make_token_bucket_limiter(Cfg, Bucket);
Bucket =:= infinity ->
emqx_htb_limiter:make_infinity_limiter();
true -> true ->
emqx_htb_limiter:make_ref_limiter(Cfg, Bucket) emqx_htb_limiter:make_ref_limiter(Cfg, Bucket)
end}; end};
@ -372,9 +374,6 @@ longitudinal(
case lists:min([ShouldAlloc, Flow, Capacity]) of case lists:min([ShouldAlloc, Flow, Capacity]) of
Available when Available > 0 -> Available when Available > 0 ->
%% XXX if capacity is infinity, and flow always > 0, the value in
%% counter will be overflow at some point in the future, do we need
%% to deal with this situation???
{Inc, Bucket2} = emqx_limiter_correction:add(Available, Bucket), {Inc, Bucket2} = emqx_limiter_correction:add(Available, Bucket),
counters:add(Counter, Index, Inc), counters:add(Counter, Index, Inc),
@ -491,26 +490,14 @@ make_root(#{rate := Rate, burst := Burst}) ->
make_bucket([{Name, Conf} | T], Type, GlobalCfg, CounterNum, DelayBuckets) -> make_bucket([{Name, Conf} | T], Type, GlobalCfg, CounterNum, DelayBuckets) ->
Path = emqx_limiter_manager:make_path(Type, Name), Path = emqx_limiter_manager:make_path(Type, Name),
case get_counter_rate(Conf, GlobalCfg) of Rate = get_counter_rate(Conf, GlobalCfg),
infinity -> #{capacity := Capacity} = Conf,
Rate = infinity, Initial = get_initial_val(Conf),
Capacity = infinity, CounterNum2 = CounterNum + 1,
Initial = 0, InitFun = fun(#{name := BucketName} = Bucket, #{buckets := Buckets} = State) ->
Ref = emqx_limiter_bucket_ref:new(undefined, undefined, Rate), {Counter, Idx, State2} = alloc_counter(Path, Rate, Initial, State),
emqx_limiter_manager:insert_bucket(Path, Ref), Bucket2 = Bucket#{counter := Counter, index := Idx},
CounterNum2 = CounterNum, State2#{buckets := Buckets#{BucketName => Bucket2}}
InitFun = fun(#{name := BucketName} = Bucket, #{buckets := Buckets} = State) ->
State#{buckets := Buckets#{BucketName => Bucket}}
end;
Rate ->
#{capacity := Capacity} = Conf,
Initial = get_initial_val(Conf),
CounterNum2 = CounterNum + 1,
InitFun = fun(#{name := BucketName} = Bucket, #{buckets := Buckets} = State) ->
{Counter, Idx, State2} = alloc_counter(Path, Rate, Initial, State),
Bucket2 = Bucket#{counter := Counter, index := Idx},
State2#{buckets := Buckets#{BucketName => Bucket2}}
end
end, end,
Bucket = #{ Bucket = #{
@ -569,22 +556,27 @@ init_counter(Path, Counter, Index, Rate, Initial, State) ->
%% @doc find first limited node %% @doc find first limited node
get_counter_rate(#{rate := Rate}, _GlobalCfg) when Rate =/= infinity -> get_counter_rate(#{rate := Rate}, _GlobalCfg) when Rate =/= infinity ->
Rate; Rate;
get_counter_rate(_Cfg, #{rate := Rate}) -> get_counter_rate(_Cfg, #{rate := Rate}) when Rate =/= infinity ->
Rate. Rate;
get_counter_rate(_Cfg, _GlobalCfg) ->
emqx_limiter_schema:infinity_value().
-spec get_initial_val(hocons:config()) -> decimal(). -spec get_initial_val(hocons:config()) -> decimal().
get_initial_val(#{ get_initial_val(
initial := Initial, #{
rate := Rate, initial := Initial,
capacity := Capacity rate := Rate,
}) -> capacity := Capacity
}
) ->
%% initial will nevner be infinity(see the emqx_limiter_schema) %% initial will nevner be infinity(see the emqx_limiter_schema)
InfVal = emqx_limiter_schema:infinity_value(),
if if
Initial > 0 -> Initial > 0 ->
Initial; Initial;
Rate =/= infinity -> Rate =/= infinity ->
erlang:min(Rate, Capacity); erlang:min(Rate, Capacity);
Capacity =/= infinity -> Capacity =/= infinity andalso Capacity =/= InfVal ->
Capacity; Capacity;
true -> true ->
0 0

View File

@ -1614,7 +1614,7 @@ base_listener(Bind) ->
map("ratelimit_name", emqx_limiter_schema:bucket_name()), map("ratelimit_name", emqx_limiter_schema:bucket_name()),
#{ #{
desc => ?DESC(base_listener_limiter), desc => ?DESC(base_listener_limiter),
default => #{} default => #{<<"connection">> => <<"default">>}
} }
)} )}
]. ].

View File

@ -586,7 +586,7 @@ check_limiter(
{ok, Limiter2} -> {ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2}); WhenOk(Data, Msgs, State#state{limiter = Limiter2});
{pause, Time, Limiter2} -> {pause, Time, Limiter2} ->
?SLOG(warning, #{ ?SLOG(debug, #{
msg => "pause_time_due_to_rate_limit", msg => "pause_time_due_to_rate_limit",
needs => Needs, needs => Needs,
time_in_ms => Time time_in_ms => Time
@ -634,7 +634,7 @@ retry_limiter(#state{limiter = Limiter} = State) ->
} }
); );
{pause, Time, Limiter2} -> {pause, Time, Limiter2} ->
?SLOG(warning, #{ ?SLOG(debug, #{
msg => "pause_time_due_to_rate_limit", msg => "pause_time_due_to_rate_limit",
types => Types, types => Types,
time_in_ms => Time time_in_ms => Time

View File

@ -468,6 +468,8 @@ t_handle_in_qos1_publish(_) ->
t_handle_in_qos2_publish(_) -> t_handle_in_qos2_publish(_) ->
ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 1}}] end), ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 1}}] end),
Channel = channel(#{conn_state => connected, session => session()}), Channel = channel(#{conn_state => connected, session => session()}),
%% waiting limiter server
timer:sleep(200),
Publish1 = ?PUBLISH_PACKET(?QOS_2, <<"topic">>, 1, <<"payload">>), Publish1 = ?PUBLISH_PACKET(?QOS_2, <<"topic">>, 1, <<"payload">>),
{ok, ?PUBREC_PACKET(1, ?RC_SUCCESS), Channel1} = {ok, ?PUBREC_PACKET(1, ?RC_SUCCESS), Channel1} =
emqx_channel:handle_in(Publish1, Channel), emqx_channel:handle_in(Publish1, Channel),
@ -482,6 +484,8 @@ t_handle_in_qos2_publish_with_error_return(_) ->
ok = meck:expect(emqx_broker, publish, fun(_) -> [] end), ok = meck:expect(emqx_broker, publish, fun(_) -> [] end),
Session = session(#{max_awaiting_rel => 2, awaiting_rel => #{1 => 1}}), Session = session(#{max_awaiting_rel => 2, awaiting_rel => #{1 => 1}}),
Channel = channel(#{conn_state => connected, session => Session}), Channel = channel(#{conn_state => connected, session => Session}),
%% waiting limiter server
timer:sleep(200),
Publish1 = ?PUBLISH_PACKET(?QOS_2, <<"topic">>, 1, <<"payload">>), Publish1 = ?PUBLISH_PACKET(?QOS_2, <<"topic">>, 1, <<"payload">>),
{ok, ?PUBREC_PACKET(1, ?RC_PACKET_IDENTIFIER_IN_USE), Channel} = {ok, ?PUBREC_PACKET(1, ?RC_PACKET_IDENTIFIER_IN_USE), Channel} =
emqx_channel:handle_in(Publish1, Channel), emqx_channel:handle_in(Publish1, Channel),

View File

@ -246,7 +246,8 @@ t_infinity_client(_) ->
end, end,
Case = fun() -> Case = fun() ->
Client = connect(default), Client = connect(default),
?assertEqual(infinity, Client), InfVal = emqx_limiter_schema:infinity_value(),
?assertMatch(#{bucket := #{rate := InfVal}}, Client),
Result = emqx_htb_limiter:check(100000, Client), Result = emqx_htb_limiter:check(100000, Client),
?assertEqual({ok, Client}, Result) ?assertEqual({ok, Client}, Result)
end, end,
@ -596,7 +597,7 @@ t_schema_unit(_) ->
?assertMatch({error, _}, M:to_rate("100MB/1")), ?assertMatch({error, _}, M:to_rate("100MB/1")),
?assertMatch({error, _}, M:to_rate("100/10x")), ?assertMatch({error, _}, M:to_rate("100/10x")),
?assertEqual({ok, infinity}, M:to_capacity("infinity")), ?assertEqual({ok, emqx_limiter_schema:infinity_value()}, M:to_capacity("infinity")),
?assertEqual({ok, 100}, M:to_capacity("100")), ?assertEqual({ok, 100}, M:to_capacity("100")),
?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")), ?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")),
?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")), ?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")),

View File

@ -201,6 +201,7 @@ init([]) ->
handle_call({update_config, NewConf, OldConf}, _, State) -> handle_call({update_config, NewConf, OldConf}, _, State) ->
State2 = update_config(State, NewConf, OldConf), State2 = update_config(State, NewConf, OldConf),
emqx_retainer_dispatcher:refresh_limiter(NewConf),
{reply, ok, State2}; {reply, ok, State2};
handle_call(clean, _, #{context := Context} = State) -> handle_call(clean, _, #{context := Context} = State) ->
clean(Context), clean(Context),

View File

@ -26,6 +26,7 @@
start_link/2, start_link/2,
dispatch/2, dispatch/2,
refresh_limiter/0, refresh_limiter/0,
refresh_limiter/1,
wait_dispatch_complete/1, wait_dispatch_complete/1,
worker/0 worker/0
]). ]).
@ -51,13 +52,16 @@
dispatch(Context, Topic) -> dispatch(Context, Topic) ->
cast({?FUNCTION_NAME, Context, self(), Topic}). cast({?FUNCTION_NAME, Context, self(), Topic}).
%% sometimes it is necessary to reset the client's limiter after updated the limiter's config %% reset the client's limiter after updated the limiter's config
%% an limiter update handler maybe added later, now this is a workaround
refresh_limiter() -> refresh_limiter() ->
Conf = emqx:get_config([retainer]),
refresh_limiter(Conf).
refresh_limiter(Conf) ->
Workers = gproc_pool:active_workers(?POOL), Workers = gproc_pool:active_workers(?POOL),
lists:foreach( lists:foreach(
fun({_, Pid}) -> fun({_, Pid}) ->
gen_server:cast(Pid, ?FUNCTION_NAME) gen_server:cast(Pid, {?FUNCTION_NAME, Conf})
end, end,
Workers Workers
). ).
@ -150,8 +154,8 @@ handle_call(Req, _From, State) ->
handle_cast({dispatch, Context, Pid, Topic}, #{limiter := Limiter} = State) -> handle_cast({dispatch, Context, Pid, Topic}, #{limiter := Limiter} = State) ->
{ok, Limiter2} = dispatch(Context, Pid, Topic, undefined, Limiter), {ok, Limiter2} = dispatch(Context, Pid, Topic, undefined, Limiter),
{noreply, State#{limiter := Limiter2}}; {noreply, State#{limiter := Limiter2}};
handle_cast(refresh_limiter, State) -> handle_cast({refresh_limiter, Conf}, State) ->
BucketName = emqx_conf:get([retainer, flow_control, batch_deliver_limiter]), BucketName = emqx_map_lib:deep_get([flow_control, batch_deliver_limiter], Conf, undefined),
{ok, Limiter} = emqx_limiter_server:connect(batch, BucketName), {ok, Limiter} = emqx_limiter_server:connect(batch, BucketName),
{noreply, State#{limiter := Limiter}}; {noreply, State#{limiter := Limiter}};
handle_cast(Msg, State) -> handle_cast(Msg, State) ->
@ -273,7 +277,7 @@ do_deliver(Msgs, DeliverNum, Pid, Topic, Limiter) ->
do_deliver(ToDelivers, Pid, Topic), do_deliver(ToDelivers, Pid, Topic),
do_deliver(Msgs2, DeliverNum, Pid, Topic, Limiter2); do_deliver(Msgs2, DeliverNum, Pid, Topic, Limiter2);
{drop, _} = Drop -> {drop, _} = Drop ->
?SLOG(error, #{ ?SLOG(debug, #{
msg => "retained_message_dropped", msg => "retained_message_dropped",
reason => "reached_ratelimit", reason => "reached_ratelimit",
dropped_count => length(ToDelivers) dropped_count => length(ToDelivers)