chore: merge upstream/master
This commit is contained in:
commit
dc78ecb41c
3
Makefile
3
Makefile
|
@ -6,7 +6,7 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2
|
||||||
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
||||||
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
||||||
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
||||||
export EMQX_DASHBOARD_VERSION ?= v1.2.1
|
export EMQX_DASHBOARD_VERSION ?= v1.2.2
|
||||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.1
|
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.1
|
||||||
export EMQX_REL_FORM ?= tgz
|
export EMQX_REL_FORM ?= tgz
|
||||||
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
||||||
|
@ -239,7 +239,6 @@ $(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt))))
|
||||||
.PHONY:
|
.PHONY:
|
||||||
merge-config:
|
merge-config:
|
||||||
@$(SCRIPTS)/merge-config.escript
|
@$(SCRIPTS)/merge-config.escript
|
||||||
@$(SCRIPTS)/merge-i18n.escript
|
|
||||||
|
|
||||||
## elixir target is to create release packages using Elixir's Mix
|
## elixir target is to create release packages using Elixir's Mix
|
||||||
.PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir)
|
.PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
%% This additional config file is used when the config 'cluster.proto_dis' in emqx.conf is set to 'inet_tls'.
|
%% This additional config file is used when the config 'cluster.proto_dist' in emqx.conf is set to 'inet_tls'.
|
||||||
%% Which means the EMQX nodes will connect to each other over TLS.
|
%% Which means the EMQX nodes will connect to each other over TLS.
|
||||||
%% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html
|
%% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html
|
||||||
|
|
||||||
|
|
|
@ -57,16 +57,16 @@
|
||||||
-define(ERROR_CODES, [
|
-define(ERROR_CODES, [
|
||||||
{?BAD_USERNAME_OR_PWD, <<"Bad username or password">>},
|
{?BAD_USERNAME_OR_PWD, <<"Bad username or password">>},
|
||||||
{?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>},
|
{?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>},
|
||||||
{'BAD_REQUEST', <<"Request parameters are not legal">>},
|
{'BAD_REQUEST', <<"Request parameters are invalid">>},
|
||||||
{'NOT_MATCH', <<"Conditions are not matched">>},
|
{'NOT_MATCH', <<"Conditions are not matched">>},
|
||||||
{'ALREADY_EXISTS', <<"Resource already existed">>},
|
{'ALREADY_EXISTS', <<"Resource already existed">>},
|
||||||
{'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>},
|
{'BAD_CONFIG_SCHEMA', <<"Configuration data is invalid">>},
|
||||||
{'BAD_LISTENER_ID', <<"Bad listener ID">>},
|
{'BAD_LISTENER_ID', <<"Bad listener ID">>},
|
||||||
{'BAD_NODE_NAME', <<"Bad Node Name">>},
|
{'BAD_NODE_NAME', <<"Bad Node Name">>},
|
||||||
{'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>},
|
{'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>},
|
||||||
{'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>},
|
{'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>},
|
||||||
{'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>},
|
{'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>},
|
||||||
{'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>},
|
{'INVALID_PARAMETER', <<"Request parameters is invalid and exceeds the boundary value">>},
|
||||||
{'CONFLICT', <<"Conflicting request resources">>},
|
{'CONFLICT', <<"Conflicting request resources">>},
|
||||||
{'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>},
|
{'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>},
|
||||||
{'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>},
|
{'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>},
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
|
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
|
||||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}},
|
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}},
|
||||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
||||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.38.1"}}},
|
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}},
|
||||||
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
|
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
|
||||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||||
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
||||||
|
|
|
@ -30,6 +30,12 @@
|
||||||
stop/0
|
stop/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
%% Cluster API
|
||||||
|
-export([
|
||||||
|
cluster_nodes/1,
|
||||||
|
running_nodes/0
|
||||||
|
]).
|
||||||
|
|
||||||
%% PubSub API
|
%% PubSub API
|
||||||
-export([
|
-export([
|
||||||
subscribe/1,
|
subscribe/1,
|
||||||
|
@ -102,6 +108,18 @@ is_running() ->
|
||||||
_ -> true
|
_ -> true
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Cluster API
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-spec running_nodes() -> [node()].
|
||||||
|
running_nodes() ->
|
||||||
|
mria:running_nodes().
|
||||||
|
|
||||||
|
-spec cluster_nodes(all | running | cores | stopped) -> [node()].
|
||||||
|
cluster_nodes(Type) ->
|
||||||
|
mria:cluster_nodes(Type).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% PubSub API
|
%% PubSub API
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
|
@ -182,10 +182,8 @@
|
||||||
-define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]).
|
-define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]).
|
||||||
-define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]).
|
-define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]).
|
||||||
|
|
||||||
%% use macro to do compile time limiter's type check
|
-define(LIMITER_BYTES_IN, bytes).
|
||||||
-define(LIMITER_BYTES_IN, bytes_in).
|
-define(LIMITER_MESSAGE_IN, messages).
|
||||||
-define(LIMITER_MESSAGE_IN, message_in).
|
|
||||||
-define(EMPTY_QUEUE, {[], []}).
|
|
||||||
|
|
||||||
-dialyzer({no_match, [info/2]}).
|
-dialyzer({no_match, [info/2]}).
|
||||||
-dialyzer(
|
-dialyzer(
|
||||||
|
|
|
@ -139,7 +139,8 @@ make_token_bucket_limiter(Cfg, Bucket) ->
|
||||||
Cfg#{
|
Cfg#{
|
||||||
tokens => emqx_limiter_server:get_initial_val(Cfg),
|
tokens => emqx_limiter_server:get_initial_val(Cfg),
|
||||||
lasttime => ?NOW,
|
lasttime => ?NOW,
|
||||||
bucket => Bucket
|
bucket => Bucket,
|
||||||
|
capacity => emqx_limiter_schema:calc_capacity(Cfg)
|
||||||
}.
|
}.
|
||||||
|
|
||||||
%%@doc create a limiter server's reference
|
%%@doc create a limiter server's reference
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
%% API
|
%% API
|
||||||
-export([
|
-export([
|
||||||
new/3,
|
new/3,
|
||||||
|
infinity_bucket/0,
|
||||||
check/3,
|
check/3,
|
||||||
try_restore/2,
|
try_restore/2,
|
||||||
available/1
|
available/1
|
||||||
|
@ -58,6 +59,10 @@ new(Counter, Index, Rate) ->
|
||||||
rate => Rate
|
rate => Rate
|
||||||
}.
|
}.
|
||||||
|
|
||||||
|
-spec infinity_bucket() -> bucket_ref().
|
||||||
|
infinity_bucket() ->
|
||||||
|
infinity.
|
||||||
|
|
||||||
%% @doc check tokens
|
%% @doc check tokens
|
||||||
-spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) ->
|
-spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) ->
|
||||||
HasToken ::
|
HasToken ::
|
||||||
|
|
|
@ -31,20 +31,20 @@
|
||||||
get_bucket_cfg_path/2,
|
get_bucket_cfg_path/2,
|
||||||
desc/1,
|
desc/1,
|
||||||
types/0,
|
types/0,
|
||||||
infinity_value/0
|
calc_capacity/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-define(KILOBYTE, 1024).
|
-define(KILOBYTE, 1024).
|
||||||
-define(BUCKET_KEYS, [
|
-define(BUCKET_KEYS, [
|
||||||
{bytes_in, bucket_infinity},
|
{bytes, bucket_infinity},
|
||||||
{message_in, bucket_infinity},
|
{messages, bucket_infinity},
|
||||||
{connection, bucket_limit},
|
{connection, bucket_limit},
|
||||||
{message_routing, bucket_infinity}
|
{message_routing, bucket_infinity}
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-type limiter_type() ::
|
-type limiter_type() ::
|
||||||
bytes_in
|
bytes
|
||||||
| message_in
|
| messages
|
||||||
| connection
|
| connection
|
||||||
| message_routing
|
| message_routing
|
||||||
%% internal limiter for unclassified resources
|
%% internal limiter for unclassified resources
|
||||||
|
@ -90,14 +90,17 @@
|
||||||
|
|
||||||
namespace() -> limiter.
|
namespace() -> limiter.
|
||||||
|
|
||||||
roots() -> [limiter].
|
roots() ->
|
||||||
|
[{limiter, hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{importance => ?IMPORTANCE_HIDDEN})}].
|
||||||
|
|
||||||
fields(limiter) ->
|
fields(limiter) ->
|
||||||
[
|
[
|
||||||
{Type,
|
{Type,
|
||||||
?HOCON(?R_REF(node_opts), #{
|
?HOCON(?R_REF(node_opts), #{
|
||||||
desc => ?DESC(Type),
|
desc => ?DESC(Type),
|
||||||
default => #{}
|
default => #{},
|
||||||
|
importance => ?IMPORTANCE_HIDDEN,
|
||||||
|
aliases => alias_of_type(Type)
|
||||||
})}
|
})}
|
||||||
|| Type <- types()
|
|| Type <- types()
|
||||||
] ++
|
] ++
|
||||||
|
@ -107,6 +110,7 @@ fields(limiter) ->
|
||||||
?R_REF(client_fields),
|
?R_REF(client_fields),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(client),
|
desc => ?DESC(client),
|
||||||
|
importance => ?IMPORTANCE_HIDDEN,
|
||||||
default => maps:from_list([
|
default => maps:from_list([
|
||||||
{erlang:atom_to_binary(Type), #{}}
|
{erlang:atom_to_binary(Type), #{}}
|
||||||
|| Type <- types()
|
|| Type <- types()
|
||||||
|
@ -124,30 +128,50 @@ fields(node_opts) ->
|
||||||
})}
|
})}
|
||||||
];
|
];
|
||||||
fields(client_fields) ->
|
fields(client_fields) ->
|
||||||
[
|
client_fields(types(), #{default => #{}});
|
||||||
{Type,
|
|
||||||
?HOCON(?R_REF(client_opts), #{
|
|
||||||
desc => ?DESC(Type),
|
|
||||||
default => #{}
|
|
||||||
})}
|
|
||||||
|| Type <- types()
|
|
||||||
];
|
|
||||||
fields(bucket_infinity) ->
|
fields(bucket_infinity) ->
|
||||||
[
|
[
|
||||||
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})},
|
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})},
|
||||||
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"infinity">>})},
|
{burst,
|
||||||
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}
|
?HOCON(capacity(), #{
|
||||||
|
desc => ?DESC(capacity),
|
||||||
|
default => <<"0">>,
|
||||||
|
importance => ?IMPORTANCE_HIDDEN,
|
||||||
|
aliases => [capacity]
|
||||||
|
})},
|
||||||
|
{initial,
|
||||||
|
?HOCON(initial(), #{
|
||||||
|
default => <<"0">>,
|
||||||
|
desc => ?DESC(initial),
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
|
})}
|
||||||
];
|
];
|
||||||
fields(bucket_limit) ->
|
fields(bucket_limit) ->
|
||||||
[
|
[
|
||||||
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"1000/s">>})},
|
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"1000/s">>})},
|
||||||
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"1000">>})},
|
{burst,
|
||||||
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}
|
?HOCON(capacity(), #{
|
||||||
|
desc => ?DESC(burst),
|
||||||
|
default => <<"0">>,
|
||||||
|
importance => ?IMPORTANCE_HIDDEN,
|
||||||
|
aliases => [capacity]
|
||||||
|
})},
|
||||||
|
{initial,
|
||||||
|
?HOCON(initial(), #{
|
||||||
|
default => <<"0">>,
|
||||||
|
desc => ?DESC(initial),
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
|
})}
|
||||||
];
|
];
|
||||||
fields(client_opts) ->
|
fields(client_opts) ->
|
||||||
[
|
[
|
||||||
{rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})},
|
{rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})},
|
||||||
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})},
|
{initial,
|
||||||
|
?HOCON(initial(), #{
|
||||||
|
default => <<"0">>,
|
||||||
|
desc => ?DESC(initial),
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
|
})},
|
||||||
%% low_watermark add for emqx_channel and emqx_session
|
%% low_watermark add for emqx_channel and emqx_session
|
||||||
%% both modules consume first and then check
|
%% both modules consume first and then check
|
||||||
%% so we need to use this value to prevent excessive consumption
|
%% so we need to use this value to prevent excessive consumption
|
||||||
|
@ -157,20 +181,24 @@ fields(client_opts) ->
|
||||||
initial(),
|
initial(),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(low_watermark),
|
desc => ?DESC(low_watermark),
|
||||||
default => <<"0">>
|
default => <<"0">>,
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{capacity,
|
{burst,
|
||||||
?HOCON(capacity(), #{
|
?HOCON(capacity(), #{
|
||||||
desc => ?DESC(client_bucket_capacity),
|
desc => ?DESC(burst),
|
||||||
default => <<"infinity">>
|
default => <<"0">>,
|
||||||
|
importance => ?IMPORTANCE_HIDDEN,
|
||||||
|
aliases => [capacity]
|
||||||
})},
|
})},
|
||||||
{divisible,
|
{divisible,
|
||||||
?HOCON(
|
?HOCON(
|
||||||
boolean(),
|
boolean(),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(divisible),
|
desc => ?DESC(divisible),
|
||||||
default => false
|
default => false,
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{max_retry_time,
|
{max_retry_time,
|
||||||
|
@ -178,7 +206,8 @@ fields(client_opts) ->
|
||||||
emqx_schema:duration(),
|
emqx_schema:duration(),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(max_retry_time),
|
desc => ?DESC(max_retry_time),
|
||||||
default => <<"10s">>
|
default => <<"10s">>,
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{failure_strategy,
|
{failure_strategy,
|
||||||
|
@ -186,16 +215,18 @@ fields(client_opts) ->
|
||||||
failure_strategy(),
|
failure_strategy(),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(failure_strategy),
|
desc => ?DESC(failure_strategy),
|
||||||
default => force
|
default => force,
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
fields(listener_fields) ->
|
fields(listener_fields) ->
|
||||||
bucket_fields(?BUCKET_KEYS, listener_client_fields);
|
composite_bucket_fields(?BUCKET_KEYS, listener_client_fields);
|
||||||
fields(listener_client_fields) ->
|
fields(listener_client_fields) ->
|
||||||
client_fields(?BUCKET_KEYS);
|
{Types, _} = lists:unzip(?BUCKET_KEYS),
|
||||||
|
client_fields(Types, #{required => false});
|
||||||
fields(Type) ->
|
fields(Type) ->
|
||||||
bucket_field(Type).
|
simple_bucket_field(Type).
|
||||||
|
|
||||||
desc(limiter) ->
|
desc(limiter) ->
|
||||||
"Settings for the rate limiter.";
|
"Settings for the rate limiter.";
|
||||||
|
@ -230,19 +261,14 @@ get_bucket_cfg_path(Type, BucketName) ->
|
||||||
[limiter, Type, bucket, BucketName].
|
[limiter, Type, bucket, BucketName].
|
||||||
|
|
||||||
types() ->
|
types() ->
|
||||||
[bytes_in, message_in, connection, message_routing, internal].
|
[bytes, messages, connection, message_routing, internal].
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
calc_capacity(#{rate := infinity}) ->
|
||||||
%% Internal functions
|
infinity;
|
||||||
%%--------------------------------------------------------------------
|
calc_capacity(#{burst := infinity}) ->
|
||||||
|
infinity;
|
||||||
%% `infinity` to `infinity_value` rules:
|
calc_capacity(#{rate := Rate, burst := Burst}) ->
|
||||||
%% 1. all infinity capacity will change to infinity_value
|
erlang:floor(1000 * Rate / default_period()) + Burst.
|
||||||
%% 2. if the rate of global and bucket both are `infinity`,
|
|
||||||
%% use `infinity_value` as bucket rate. see `emqx_limiter_server:get_counter_rate/2`
|
|
||||||
infinity_value() ->
|
|
||||||
%% 1 TB
|
|
||||||
1099511627776.
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Internal functions
|
%% Internal functions
|
||||||
|
@ -335,7 +361,7 @@ to_quota(Str, Regex) ->
|
||||||
{match, [Quota, ""]} ->
|
{match, [Quota, ""]} ->
|
||||||
{ok, erlang:list_to_integer(Quota)};
|
{ok, erlang:list_to_integer(Quota)};
|
||||||
{match, ""} ->
|
{match, ""} ->
|
||||||
{ok, infinity_value()};
|
{ok, infinity};
|
||||||
_ ->
|
_ ->
|
||||||
{error, Str}
|
{error, Str}
|
||||||
end
|
end
|
||||||
|
@ -350,7 +376,8 @@ apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
|
||||||
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
|
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
|
||||||
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
|
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
|
||||||
|
|
||||||
bucket_field(Type) when is_atom(Type) ->
|
%% A bucket with only one type
|
||||||
|
simple_bucket_field(Type) when is_atom(Type) ->
|
||||||
fields(bucket_infinity) ++
|
fields(bucket_infinity) ++
|
||||||
[
|
[
|
||||||
{client,
|
{client,
|
||||||
|
@ -358,16 +385,22 @@ bucket_field(Type) when is_atom(Type) ->
|
||||||
?R_REF(?MODULE, client_opts),
|
?R_REF(?MODULE, client_opts),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(client),
|
desc => ?DESC(client),
|
||||||
required => false
|
required => false,
|
||||||
|
importance => importance_of_type(Type),
|
||||||
|
aliases => alias_of_type(Type)
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
bucket_fields(Types, ClientRef) ->
|
|
||||||
|
%% A bucket with multi types
|
||||||
|
composite_bucket_fields(Types, ClientRef) ->
|
||||||
[
|
[
|
||||||
{Type,
|
{Type,
|
||||||
?HOCON(?R_REF(?MODULE, Opts), #{
|
?HOCON(?R_REF(?MODULE, Opts), #{
|
||||||
desc => ?DESC(?MODULE, Type),
|
desc => ?DESC(?MODULE, Type),
|
||||||
required => false
|
required => false,
|
||||||
|
importance => importance_of_type(Type),
|
||||||
|
aliases => alias_of_type(Type)
|
||||||
})}
|
})}
|
||||||
|| {Type, Opts} <- Types
|
|| {Type, Opts} <- Types
|
||||||
] ++
|
] ++
|
||||||
|
@ -382,12 +415,29 @@ bucket_fields(Types, ClientRef) ->
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
||||||
client_fields(Types) ->
|
client_fields(Types, Meta) ->
|
||||||
[
|
[
|
||||||
{Type,
|
{Type,
|
||||||
?HOCON(?R_REF(client_opts), #{
|
?HOCON(?R_REF(client_opts), Meta#{
|
||||||
desc => ?DESC(Type),
|
desc => ?DESC(Type),
|
||||||
required => false
|
importance => importance_of_type(Type),
|
||||||
|
aliases => alias_of_type(Type)
|
||||||
})}
|
})}
|
||||||
|| {Type, _} <- Types
|
|| Type <- Types
|
||||||
].
|
].
|
||||||
|
|
||||||
|
importance_of_type(interval) ->
|
||||||
|
?IMPORTANCE_HIDDEN;
|
||||||
|
importance_of_type(message_routing) ->
|
||||||
|
?IMPORTANCE_HIDDEN;
|
||||||
|
importance_of_type(connection) ->
|
||||||
|
?IMPORTANCE_HIDDEN;
|
||||||
|
importance_of_type(_) ->
|
||||||
|
?DEFAULT_IMPORTANCE.
|
||||||
|
|
||||||
|
alias_of_type(messages) ->
|
||||||
|
[message_in];
|
||||||
|
alias_of_type(bytes) ->
|
||||||
|
[bytes_in];
|
||||||
|
alias_of_type(_) ->
|
||||||
|
[].
|
||||||
|
|
|
@ -118,17 +118,24 @@ connect(_Id, _Type, undefined) ->
|
||||||
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
||||||
connect(Id, Type, Cfg) ->
|
connect(Id, Type, Cfg) ->
|
||||||
case find_limiter_cfg(Type, Cfg) of
|
case find_limiter_cfg(Type, Cfg) of
|
||||||
{undefined, _} ->
|
{_ClientCfg, undefined, _NodeCfg} ->
|
||||||
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
||||||
|
{#{rate := infinity}, #{rate := infinity}, #{rate := infinity}} ->
|
||||||
|
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
||||||
|
{ClientCfg, #{rate := infinity}, #{rate := infinity}} ->
|
||||||
|
{ok,
|
||||||
|
emqx_htb_limiter:make_token_bucket_limiter(
|
||||||
|
ClientCfg, emqx_limiter_bucket_ref:infinity_bucket()
|
||||||
|
)};
|
||||||
{
|
{
|
||||||
#{
|
#{rate := CliRate} = ClientCfg,
|
||||||
rate := BucketRate,
|
#{rate := BucketRate} = BucketCfg,
|
||||||
capacity := BucketSize
|
_
|
||||||
},
|
|
||||||
#{rate := CliRate, capacity := CliSize} = ClientCfg
|
|
||||||
} ->
|
} ->
|
||||||
case emqx_limiter_manager:find_bucket(Id, Type) of
|
case emqx_limiter_manager:find_bucket(Id, Type) of
|
||||||
{ok, Bucket} ->
|
{ok, Bucket} ->
|
||||||
|
BucketSize = emqx_limiter_schema:calc_capacity(BucketCfg),
|
||||||
|
CliSize = emqx_limiter_schema:calc_capacity(ClientCfg),
|
||||||
{ok,
|
{ok,
|
||||||
if
|
if
|
||||||
CliRate < BucketRate orelse CliSize < BucketSize ->
|
CliRate < BucketRate orelse CliSize < BucketSize ->
|
||||||
|
@ -493,12 +500,14 @@ make_root(#{rate := Rate, burst := Burst}) ->
|
||||||
produced => 0.0
|
produced => 0.0
|
||||||
}.
|
}.
|
||||||
|
|
||||||
do_add_bucket(Id, #{rate := Rate, capacity := Capacity} = Cfg, #{buckets := Buckets} = State) ->
|
do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) ->
|
||||||
|
State;
|
||||||
|
do_add_bucket(Id, #{rate := Rate} = Cfg, #{buckets := Buckets} = State) ->
|
||||||
case maps:get(Id, Buckets, undefined) of
|
case maps:get(Id, Buckets, undefined) of
|
||||||
undefined ->
|
undefined ->
|
||||||
make_bucket(Id, Cfg, State);
|
make_bucket(Id, Cfg, State);
|
||||||
Bucket ->
|
Bucket ->
|
||||||
Bucket2 = Bucket#{rate := Rate, capacity := Capacity},
|
Bucket2 = Bucket#{rate := Rate, capacity := emqx_limiter_schema:calc_capacity(Cfg)},
|
||||||
State#{buckets := Buckets#{Id := Bucket2}}
|
State#{buckets := Buckets#{Id := Bucket2}}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
@ -509,7 +518,7 @@ make_bucket(Id, Cfg, #{index := ?COUNTER_SIZE} = State) ->
|
||||||
});
|
});
|
||||||
make_bucket(
|
make_bucket(
|
||||||
Id,
|
Id,
|
||||||
#{rate := Rate, capacity := Capacity} = Cfg,
|
#{rate := Rate} = Cfg,
|
||||||
#{type := Type, counter := Counter, index := Index, buckets := Buckets} = State
|
#{type := Type, counter := Counter, index := Index, buckets := Buckets} = State
|
||||||
) ->
|
) ->
|
||||||
NewIndex = Index + 1,
|
NewIndex = Index + 1,
|
||||||
|
@ -519,7 +528,7 @@ make_bucket(
|
||||||
rate => Rate,
|
rate => Rate,
|
||||||
obtained => Initial,
|
obtained => Initial,
|
||||||
correction => 0,
|
correction => 0,
|
||||||
capacity => Capacity,
|
capacity => emqx_limiter_schema:calc_capacity(Cfg),
|
||||||
counter => Counter,
|
counter => Counter,
|
||||||
index => NewIndex
|
index => NewIndex
|
||||||
},
|
},
|
||||||
|
@ -541,19 +550,14 @@ do_del_bucket(Id, #{type := Type, buckets := Buckets} = State) ->
|
||||||
get_initial_val(
|
get_initial_val(
|
||||||
#{
|
#{
|
||||||
initial := Initial,
|
initial := Initial,
|
||||||
rate := Rate,
|
rate := Rate
|
||||||
capacity := Capacity
|
|
||||||
}
|
}
|
||||||
) ->
|
) ->
|
||||||
%% initial will nevner be infinity(see the emqx_limiter_schema)
|
|
||||||
InfVal = emqx_limiter_schema:infinity_value(),
|
|
||||||
if
|
if
|
||||||
Initial > 0 ->
|
Initial > 0 ->
|
||||||
Initial;
|
Initial;
|
||||||
Rate =/= infinity ->
|
Rate =/= infinity ->
|
||||||
erlang:min(Rate, Capacity);
|
Rate;
|
||||||
Capacity =/= infinity andalso Capacity =/= InfVal ->
|
|
||||||
Capacity;
|
|
||||||
true ->
|
true ->
|
||||||
0
|
0
|
||||||
end.
|
end.
|
||||||
|
@ -568,11 +572,12 @@ call(Type, Msg) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
find_limiter_cfg(Type, #{rate := _} = Cfg) ->
|
find_limiter_cfg(Type, #{rate := _} = Cfg) ->
|
||||||
{Cfg, find_client_cfg(Type, maps:get(client, Cfg, undefined))};
|
{find_client_cfg(Type, maps:get(client, Cfg, undefined)), Cfg, find_node_cfg(Type)};
|
||||||
find_limiter_cfg(Type, Cfg) ->
|
find_limiter_cfg(Type, Cfg) ->
|
||||||
{
|
{
|
||||||
|
find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined)),
|
||||||
maps:get(Type, Cfg, undefined),
|
maps:get(Type, Cfg, undefined),
|
||||||
find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined))
|
find_node_cfg(Type)
|
||||||
}.
|
}.
|
||||||
|
|
||||||
find_client_cfg(Type, BucketCfg) ->
|
find_client_cfg(Type, BucketCfg) ->
|
||||||
|
@ -585,3 +590,6 @@ merge_client_cfg(NodeCfg, undefined) ->
|
||||||
NodeCfg;
|
NodeCfg;
|
||||||
merge_client_cfg(NodeCfg, BucketCfg) ->
|
merge_client_cfg(NodeCfg, BucketCfg) ->
|
||||||
maps:merge(NodeCfg, BucketCfg).
|
maps:merge(NodeCfg, BucketCfg).
|
||||||
|
|
||||||
|
find_node_cfg(Type) ->
|
||||||
|
emqx:get_config([limiter, Type], #{rate => infinity, burst => 0}).
|
||||||
|
|
|
@ -164,7 +164,7 @@ roots(high) ->
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(global)},
|
{?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(global)},
|
||||||
%% NOTE: authorization schema here is only to keep emqx app prue
|
%% NOTE: authorization schema here is only to keep emqx app pure
|
||||||
%% the full schema for EMQX node is injected in emqx_conf_schema.
|
%% the full schema for EMQX node is injected in emqx_conf_schema.
|
||||||
{?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME,
|
{?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME,
|
||||||
sc(
|
sc(
|
||||||
|
@ -2225,6 +2225,7 @@ common_ssl_opts_schema(Defaults) ->
|
||||||
#{
|
#{
|
||||||
default => AvailableVersions,
|
default => AvailableVersions,
|
||||||
desc => ?DESC(common_ssl_opts_schema_versions),
|
desc => ?DESC(common_ssl_opts_schema_versions),
|
||||||
|
importance => ?IMPORTANCE_HIGH,
|
||||||
validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end
|
validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
|
@ -2235,6 +2236,7 @@ common_ssl_opts_schema(Defaults) ->
|
||||||
#{
|
#{
|
||||||
default => <<"emqx_tls_psk:lookup">>,
|
default => <<"emqx_tls_psk:lookup">>,
|
||||||
converter => fun ?MODULE:user_lookup_fun_tr/2,
|
converter => fun ?MODULE:user_lookup_fun_tr/2,
|
||||||
|
importance => ?IMPORTANCE_HIDDEN,
|
||||||
desc => ?DESC(common_ssl_opts_schema_user_lookup_fun)
|
desc => ?DESC(common_ssl_opts_schema_user_lookup_fun)
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
|
@ -2762,10 +2764,16 @@ str(S) when is_list(S) ->
|
||||||
S.
|
S.
|
||||||
|
|
||||||
authentication(Which) ->
|
authentication(Which) ->
|
||||||
Desc =
|
{Importance, Desc} =
|
||||||
case Which of
|
case Which of
|
||||||
global -> ?DESC(global_authentication);
|
global ->
|
||||||
listener -> ?DESC(listener_authentication)
|
%% For root level authentication, it is recommended to configure
|
||||||
|
%% from the dashboard or API.
|
||||||
|
%% Hence it's considered a low-importance when it comes to
|
||||||
|
%% configuration importance.
|
||||||
|
{?IMPORTANCE_LOW, ?DESC(global_authentication)};
|
||||||
|
listener ->
|
||||||
|
{?IMPORTANCE_HIDDEN, ?DESC(listener_authentication)}
|
||||||
end,
|
end,
|
||||||
%% poor man's dependency injection
|
%% poor man's dependency injection
|
||||||
%% this is due to the fact that authn is implemented outside of 'emqx' app.
|
%% this is due to the fact that authn is implemented outside of 'emqx' app.
|
||||||
|
@ -2781,7 +2789,7 @@ authentication(Which) ->
|
||||||
hoconsc:mk(Type, #{
|
hoconsc:mk(Type, #{
|
||||||
desc => Desc,
|
desc => Desc,
|
||||||
converter => fun ensure_array/2,
|
converter => fun ensure_array/2,
|
||||||
importance => ?IMPORTANCE_HIDDEN
|
importance => Importance
|
||||||
}).
|
}).
|
||||||
|
|
||||||
%% the older version schema allows individual element (instead of a chain) in config
|
%% the older version schema allows individual element (instead of a chain) in config
|
||||||
|
|
|
@ -121,8 +121,8 @@
|
||||||
-define(SOCK_STATS, [recv_oct, recv_cnt, send_oct, send_cnt]).
|
-define(SOCK_STATS, [recv_oct, recv_cnt, send_oct, send_cnt]).
|
||||||
|
|
||||||
-define(ENABLED(X), (X =/= undefined)).
|
-define(ENABLED(X), (X =/= undefined)).
|
||||||
-define(LIMITER_BYTES_IN, bytes_in).
|
-define(LIMITER_BYTES_IN, bytes).
|
||||||
-define(LIMITER_MESSAGE_IN, message_in).
|
-define(LIMITER_MESSAGE_IN, messages).
|
||||||
|
|
||||||
-dialyzer({no_match, [info/2]}).
|
-dialyzer({no_match, [info/2]}).
|
||||||
-dialyzer({nowarn_function, [websocket_init/1]}).
|
-dialyzer({nowarn_function, [websocket_init/1]}).
|
||||||
|
|
|
@ -148,6 +148,14 @@ t_run_hook(_) ->
|
||||||
?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)),
|
?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)),
|
||||||
?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)).
|
?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)).
|
||||||
|
|
||||||
|
t_cluster_nodes(_) ->
|
||||||
|
Expected = [node()],
|
||||||
|
?assertEqual(Expected, emqx:running_nodes()),
|
||||||
|
?assertEqual(Expected, emqx:cluster_nodes(running)),
|
||||||
|
?assertEqual(Expected, emqx:cluster_nodes(all)),
|
||||||
|
?assertEqual(Expected, emqx:cluster_nodes(cores)),
|
||||||
|
?assertEqual([], emqx:cluster_nodes(stopped)).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Hook fun
|
%% Hook fun
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
|
@ -162,8 +162,7 @@ limiter_conf() ->
|
||||||
Make = fun() ->
|
Make = fun() ->
|
||||||
#{
|
#{
|
||||||
burst => 0,
|
burst => 0,
|
||||||
rate => infinity,
|
rate => infinity
|
||||||
capacity => infinity
|
|
||||||
}
|
}
|
||||||
end,
|
end,
|
||||||
|
|
||||||
|
@ -172,7 +171,7 @@ limiter_conf() ->
|
||||||
Acc#{Name => Make()}
|
Acc#{Name => Make()}
|
||||||
end,
|
end,
|
||||||
#{},
|
#{},
|
||||||
[bytes_in, message_in, message_routing, connection, internal]
|
[bytes, messages, message_routing, connection, internal]
|
||||||
).
|
).
|
||||||
|
|
||||||
stats_conf() ->
|
stats_conf() ->
|
||||||
|
@ -1258,7 +1257,7 @@ limiter_cfg() ->
|
||||||
Client = #{
|
Client = #{
|
||||||
rate => 5,
|
rate => 5,
|
||||||
initial => 0,
|
initial => 0,
|
||||||
capacity => 5,
|
burst => 0,
|
||||||
low_watermark => 1,
|
low_watermark => 1,
|
||||||
divisible => false,
|
divisible => false,
|
||||||
max_retry_time => timer:seconds(5),
|
max_retry_time => timer:seconds(5),
|
||||||
|
@ -1270,7 +1269,7 @@ limiter_cfg() ->
|
||||||
}.
|
}.
|
||||||
|
|
||||||
bucket_cfg() ->
|
bucket_cfg() ->
|
||||||
#{rate => 10, initial => 0, capacity => 10}.
|
#{rate => 10, initial => 0, burst => 0}.
|
||||||
|
|
||||||
add_bucket() ->
|
add_bucket() ->
|
||||||
emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()).
|
emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()).
|
||||||
|
|
|
@ -427,7 +427,7 @@ t_ensure_rate_limit(_) ->
|
||||||
fun(_, Client) -> {pause, 3000, undefined, Client} end
|
fun(_, Client) -> {pause, 3000, undefined, Client} end
|
||||||
),
|
),
|
||||||
{ok, State2} = emqx_connection:check_limiter(
|
{ok, State2} = emqx_connection:check_limiter(
|
||||||
[{1000, bytes_in}],
|
[{1000, bytes}],
|
||||||
[],
|
[],
|
||||||
WhenOk,
|
WhenOk,
|
||||||
[],
|
[],
|
||||||
|
@ -703,31 +703,29 @@ handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St).
|
||||||
-define(LIMITER_ID, 'tcp:default').
|
-define(LIMITER_ID, 'tcp:default').
|
||||||
|
|
||||||
init_limiter() ->
|
init_limiter() ->
|
||||||
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], limiter_cfg()).
|
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], limiter_cfg()).
|
||||||
|
|
||||||
limiter_cfg() ->
|
limiter_cfg() ->
|
||||||
Infinity = emqx_limiter_schema:infinity_value(),
|
|
||||||
Cfg = bucket_cfg(),
|
Cfg = bucket_cfg(),
|
||||||
Client = #{
|
Client = #{
|
||||||
rate => Infinity,
|
rate => infinity,
|
||||||
initial => 0,
|
initial => 0,
|
||||||
capacity => Infinity,
|
burst => 0,
|
||||||
low_watermark => 1,
|
low_watermark => 1,
|
||||||
divisible => false,
|
divisible => false,
|
||||||
max_retry_time => timer:seconds(5),
|
max_retry_time => timer:seconds(5),
|
||||||
failure_strategy => force
|
failure_strategy => force
|
||||||
},
|
},
|
||||||
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
|
#{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
|
||||||
|
|
||||||
bucket_cfg() ->
|
bucket_cfg() ->
|
||||||
Infinity = emqx_limiter_schema:infinity_value(),
|
#{rate => infinity, initial => 0, burst => 0}.
|
||||||
#{rate => Infinity, initial => 0, capacity => Infinity}.
|
|
||||||
|
|
||||||
add_bucket() ->
|
add_bucket() ->
|
||||||
Cfg = bucket_cfg(),
|
Cfg = bucket_cfg(),
|
||||||
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
|
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg),
|
||||||
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
|
emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg).
|
||||||
|
|
||||||
del_bucket() ->
|
del_bucket() ->
|
||||||
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
|
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes),
|
||||||
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).
|
emqx_limiter_server:del_bucket(?LIMITER_ID, messages).
|
||||||
|
|
|
@ -72,7 +72,7 @@ t_consume(_) ->
|
||||||
Cfg = fun(Cfg) ->
|
Cfg = fun(Cfg) ->
|
||||||
Cfg#{
|
Cfg#{
|
||||||
rate := 100,
|
rate := 100,
|
||||||
capacity := 100,
|
burst := 0,
|
||||||
initial := 100,
|
initial := 100,
|
||||||
max_retry_time := 1000,
|
max_retry_time := 1000,
|
||||||
failure_strategy := force
|
failure_strategy := force
|
||||||
|
@ -89,7 +89,7 @@ t_retry(_) ->
|
||||||
Cfg = fun(Cfg) ->
|
Cfg = fun(Cfg) ->
|
||||||
Cfg#{
|
Cfg#{
|
||||||
rate := 50,
|
rate := 50,
|
||||||
capacity := 200,
|
burst := 150,
|
||||||
initial := 0,
|
initial := 0,
|
||||||
max_retry_time := 1000,
|
max_retry_time := 1000,
|
||||||
failure_strategy := force
|
failure_strategy := force
|
||||||
|
@ -109,7 +109,7 @@ t_restore(_) ->
|
||||||
Cfg = fun(Cfg) ->
|
Cfg = fun(Cfg) ->
|
||||||
Cfg#{
|
Cfg#{
|
||||||
rate := 1,
|
rate := 1,
|
||||||
capacity := 200,
|
burst := 199,
|
||||||
initial := 50,
|
initial := 50,
|
||||||
max_retry_time := 100,
|
max_retry_time := 100,
|
||||||
failure_strategy := force
|
failure_strategy := force
|
||||||
|
@ -129,7 +129,7 @@ t_max_retry_time(_) ->
|
||||||
Cfg = fun(Cfg) ->
|
Cfg = fun(Cfg) ->
|
||||||
Cfg#{
|
Cfg#{
|
||||||
rate := 1,
|
rate := 1,
|
||||||
capacity := 1,
|
burst := 0,
|
||||||
max_retry_time := 500,
|
max_retry_time := 500,
|
||||||
failure_strategy := drop
|
failure_strategy := drop
|
||||||
}
|
}
|
||||||
|
@ -139,8 +139,12 @@ t_max_retry_time(_) ->
|
||||||
Begin = ?NOW,
|
Begin = ?NOW,
|
||||||
Result = emqx_htb_limiter:consume(101, Client),
|
Result = emqx_htb_limiter:consume(101, Client),
|
||||||
?assertMatch({drop, _}, Result),
|
?assertMatch({drop, _}, Result),
|
||||||
Time = ?NOW - Begin,
|
End = ?NOW,
|
||||||
?assert(Time >= 500 andalso Time < 550)
|
Time = End - Begin,
|
||||||
|
?assert(
|
||||||
|
Time >= 500 andalso Time < 550,
|
||||||
|
lists:flatten(io_lib:format("Begin:~p, End:~p, Time:~p~n", [Begin, End, Time]))
|
||||||
|
)
|
||||||
end,
|
end,
|
||||||
with_per_client(Cfg, Case).
|
with_per_client(Cfg, Case).
|
||||||
|
|
||||||
|
@ -150,7 +154,7 @@ t_divisible(_) ->
|
||||||
divisible := true,
|
divisible := true,
|
||||||
rate := ?RATE("1000/1s"),
|
rate := ?RATE("1000/1s"),
|
||||||
initial := 600,
|
initial := 600,
|
||||||
capacity := 600
|
burst := 0
|
||||||
}
|
}
|
||||||
end,
|
end,
|
||||||
Case = fun(BucketCfg) ->
|
Case = fun(BucketCfg) ->
|
||||||
|
@ -176,7 +180,7 @@ t_low_watermark(_) ->
|
||||||
low_watermark := 400,
|
low_watermark := 400,
|
||||||
rate := ?RATE("1000/1s"),
|
rate := ?RATE("1000/1s"),
|
||||||
initial := 1000,
|
initial := 1000,
|
||||||
capacity := 1000
|
burst := 0
|
||||||
}
|
}
|
||||||
end,
|
end,
|
||||||
Case = fun(BucketCfg) ->
|
Case = fun(BucketCfg) ->
|
||||||
|
@ -201,8 +205,7 @@ t_infinity_client(_) ->
|
||||||
Fun = fun(Cfg) -> Cfg end,
|
Fun = fun(Cfg) -> Cfg end,
|
||||||
Case = fun(Cfg) ->
|
Case = fun(Cfg) ->
|
||||||
Client = connect(Cfg),
|
Client = connect(Cfg),
|
||||||
InfVal = emqx_limiter_schema:infinity_value(),
|
?assertMatch(infinity, Client),
|
||||||
?assertMatch(#{bucket := #{rate := InfVal}}, Client),
|
|
||||||
Result = emqx_htb_limiter:check(100000, Client),
|
Result = emqx_htb_limiter:check(100000, Client),
|
||||||
?assertEqual({ok, Client}, Result)
|
?assertEqual({ok, Client}, Result)
|
||||||
end,
|
end,
|
||||||
|
@ -212,12 +215,12 @@ t_try_restore_agg(_) ->
|
||||||
Fun = fun(#{client := Cli} = Bucket) ->
|
Fun = fun(#{client := Cli} = Bucket) ->
|
||||||
Bucket2 = Bucket#{
|
Bucket2 = Bucket#{
|
||||||
rate := 1,
|
rate := 1,
|
||||||
capacity := 200,
|
burst := 199,
|
||||||
initial := 50
|
initial := 50
|
||||||
},
|
},
|
||||||
Cli2 = Cli#{
|
Cli2 = Cli#{
|
||||||
rate := infinity,
|
rate := infinity,
|
||||||
capacity := infinity,
|
burst := infinity,
|
||||||
divisible := true,
|
divisible := true,
|
||||||
max_retry_time := 100,
|
max_retry_time := 100,
|
||||||
failure_strategy := force
|
failure_strategy := force
|
||||||
|
@ -239,11 +242,11 @@ t_short_board(_) ->
|
||||||
Bucket2 = Bucket#{
|
Bucket2 = Bucket#{
|
||||||
rate := ?RATE("100/1s"),
|
rate := ?RATE("100/1s"),
|
||||||
initial := 0,
|
initial := 0,
|
||||||
capacity := 100
|
burst := 0
|
||||||
},
|
},
|
||||||
Cli2 = Cli#{
|
Cli2 = Cli#{
|
||||||
rate := ?RATE("600/1s"),
|
rate := ?RATE("600/1s"),
|
||||||
capacity := 600,
|
burst := 0,
|
||||||
initial := 600
|
initial := 600
|
||||||
},
|
},
|
||||||
Bucket2#{client := Cli2}
|
Bucket2#{client := Cli2}
|
||||||
|
@ -261,46 +264,45 @@ t_rate(_) ->
|
||||||
Bucket2 = Bucket#{
|
Bucket2 = Bucket#{
|
||||||
rate := ?RATE("100/100ms"),
|
rate := ?RATE("100/100ms"),
|
||||||
initial := 0,
|
initial := 0,
|
||||||
capacity := infinity
|
burst := infinity
|
||||||
},
|
},
|
||||||
Cli2 = Cli#{
|
Cli2 = Cli#{
|
||||||
rate := infinity,
|
rate := infinity,
|
||||||
capacity := infinity,
|
burst := infinity,
|
||||||
initial := 0
|
initial := 0
|
||||||
},
|
},
|
||||||
Bucket2#{client := Cli2}
|
Bucket2#{client := Cli2}
|
||||||
end,
|
end,
|
||||||
Case = fun(Cfg) ->
|
Case = fun(Cfg) ->
|
||||||
|
Time = 1000,
|
||||||
Client = connect(Cfg),
|
Client = connect(Cfg),
|
||||||
Ts1 = erlang:system_time(millisecond),
|
|
||||||
C1 = emqx_htb_limiter:available(Client),
|
C1 = emqx_htb_limiter:available(Client),
|
||||||
timer:sleep(1000),
|
timer:sleep(1100),
|
||||||
Ts2 = erlang:system_time(millisecond),
|
|
||||||
C2 = emqx_htb_limiter:available(Client),
|
C2 = emqx_htb_limiter:available(Client),
|
||||||
ShouldInc = floor((Ts2 - Ts1) / 100) * 100,
|
ShouldInc = floor(Time / 100) * 100,
|
||||||
Inc = C2 - C1,
|
Inc = C2 - C1,
|
||||||
?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate")
|
?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate")
|
||||||
end,
|
end,
|
||||||
with_bucket(Fun, Case).
|
with_bucket(Fun, Case).
|
||||||
|
|
||||||
t_capacity(_) ->
|
t_capacity(_) ->
|
||||||
Capacity = 600,
|
Capacity = 1200,
|
||||||
Fun = fun(#{client := Cli} = Bucket) ->
|
Fun = fun(#{client := Cli} = Bucket) ->
|
||||||
Bucket2 = Bucket#{
|
Bucket2 = Bucket#{
|
||||||
rate := ?RATE("100/100ms"),
|
rate := ?RATE("100/100ms"),
|
||||||
initial := 0,
|
initial := 0,
|
||||||
capacity := 600
|
burst := 200
|
||||||
},
|
},
|
||||||
Cli2 = Cli#{
|
Cli2 = Cli#{
|
||||||
rate := infinity,
|
rate := infinity,
|
||||||
capacity := infinity,
|
burst := infinity,
|
||||||
initial := 0
|
initial := 0
|
||||||
},
|
},
|
||||||
Bucket2#{client := Cli2}
|
Bucket2#{client := Cli2}
|
||||||
end,
|
end,
|
||||||
Case = fun(Cfg) ->
|
Case = fun(Cfg) ->
|
||||||
Client = connect(Cfg),
|
Client = connect(Cfg),
|
||||||
timer:sleep(1000),
|
timer:sleep(1500),
|
||||||
C1 = emqx_htb_limiter:available(Client),
|
C1 = emqx_htb_limiter:available(Client),
|
||||||
?assertEqual(Capacity, C1, "test bucket capacity")
|
?assertEqual(Capacity, C1, "test bucket capacity")
|
||||||
end,
|
end,
|
||||||
|
@ -318,11 +320,11 @@ t_collaborative_alloc(_) ->
|
||||||
Bucket2 = Bucket#{
|
Bucket2 = Bucket#{
|
||||||
rate := ?RATE("400/1s"),
|
rate := ?RATE("400/1s"),
|
||||||
initial := 0,
|
initial := 0,
|
||||||
capacity := 600
|
burst := 200
|
||||||
},
|
},
|
||||||
Cli2 = Cli#{
|
Cli2 = Cli#{
|
||||||
rate := ?RATE("50"),
|
rate := ?RATE("50"),
|
||||||
capacity := 100,
|
burst := 50,
|
||||||
initial := 100
|
initial := 100
|
||||||
},
|
},
|
||||||
Bucket2#{client := Cli2}
|
Bucket2#{client := Cli2}
|
||||||
|
@ -363,11 +365,11 @@ t_burst(_) ->
|
||||||
Bucket2 = Bucket#{
|
Bucket2 = Bucket#{
|
||||||
rate := ?RATE("200/1s"),
|
rate := ?RATE("200/1s"),
|
||||||
initial := 0,
|
initial := 0,
|
||||||
capacity := 200
|
burst := 0
|
||||||
},
|
},
|
||||||
Cli2 = Cli#{
|
Cli2 = Cli#{
|
||||||
rate := ?RATE("50/1s"),
|
rate := ?RATE("50/1s"),
|
||||||
capacity := 200,
|
burst := 150,
|
||||||
divisible := true
|
divisible := true
|
||||||
},
|
},
|
||||||
Bucket2#{client := Cli2}
|
Bucket2#{client := Cli2}
|
||||||
|
@ -401,11 +403,11 @@ t_limit_global_with_unlimit_other(_) ->
|
||||||
Bucket2 = Bucket#{
|
Bucket2 = Bucket#{
|
||||||
rate := infinity,
|
rate := infinity,
|
||||||
initial := 0,
|
initial := 0,
|
||||||
capacity := infinity
|
burst := infinity
|
||||||
},
|
},
|
||||||
Cli2 = Cli#{
|
Cli2 = Cli#{
|
||||||
rate := infinity,
|
rate := infinity,
|
||||||
capacity := infinity,
|
burst := infinity,
|
||||||
initial := 0
|
initial := 0
|
||||||
},
|
},
|
||||||
Bucket2#{client := Cli2}
|
Bucket2#{client := Cli2}
|
||||||
|
@ -414,7 +416,7 @@ t_limit_global_with_unlimit_other(_) ->
|
||||||
Case = fun() ->
|
Case = fun() ->
|
||||||
C1 = counters:new(1, []),
|
C1 = counters:new(1, []),
|
||||||
start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
|
start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
|
||||||
timer:sleep(2100),
|
timer:sleep(2200),
|
||||||
check_average_rate(C1, 2, 600)
|
check_average_rate(C1, 2, 600)
|
||||||
end,
|
end,
|
||||||
|
|
||||||
|
@ -432,7 +434,7 @@ t_check_container(_) ->
|
||||||
Cfg#{
|
Cfg#{
|
||||||
rate := ?RATE("1000/1s"),
|
rate := ?RATE("1000/1s"),
|
||||||
initial := 1000,
|
initial := 1000,
|
||||||
capacity := 1000
|
burst := 0
|
||||||
}
|
}
|
||||||
end,
|
end,
|
||||||
Case = fun(#{client := Client} = BucketCfg) ->
|
Case = fun(#{client := Client} = BucketCfg) ->
|
||||||
|
@ -565,7 +567,7 @@ t_schema_unit(_) ->
|
||||||
?assertMatch({error, _}, M:to_rate("100MB/1")),
|
?assertMatch({error, _}, M:to_rate("100MB/1")),
|
||||||
?assertMatch({error, _}, M:to_rate("100/10x")),
|
?assertMatch({error, _}, M:to_rate("100/10x")),
|
||||||
|
|
||||||
?assertEqual({ok, emqx_limiter_schema:infinity_value()}, M:to_capacity("infinity")),
|
?assertEqual({ok, infinity}, M:to_capacity("infinity")),
|
||||||
?assertEqual({ok, 100}, M:to_capacity("100")),
|
?assertEqual({ok, 100}, M:to_capacity("100")),
|
||||||
?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")),
|
?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")),
|
||||||
?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")),
|
?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")),
|
||||||
|
@ -748,17 +750,16 @@ connect(Name, Cfg) ->
|
||||||
Limiter.
|
Limiter.
|
||||||
|
|
||||||
make_limiter_cfg() ->
|
make_limiter_cfg() ->
|
||||||
Infinity = emqx_limiter_schema:infinity_value(),
|
|
||||||
Client = #{
|
Client = #{
|
||||||
rate => Infinity,
|
rate => infinity,
|
||||||
initial => 0,
|
initial => 0,
|
||||||
capacity => Infinity,
|
burst => infinity,
|
||||||
low_watermark => 0,
|
low_watermark => 0,
|
||||||
divisible => false,
|
divisible => false,
|
||||||
max_retry_time => timer:seconds(5),
|
max_retry_time => timer:seconds(5),
|
||||||
failure_strategy => force
|
failure_strategy => force
|
||||||
},
|
},
|
||||||
#{client => Client, rate => Infinity, initial => 0, capacity => Infinity}.
|
#{client => Client, rate => infinity, initial => 0, burst => infinity}.
|
||||||
|
|
||||||
add_bucket(Cfg) ->
|
add_bucket(Cfg) ->
|
||||||
add_bucket(?MODULE, Cfg).
|
add_bucket(?MODULE, Cfg).
|
||||||
|
|
|
@ -509,16 +509,16 @@ t_handle_timeout_emit_stats(_) ->
|
||||||
t_ensure_rate_limit(_) ->
|
t_ensure_rate_limit(_) ->
|
||||||
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
|
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
|
||||||
Limiter = init_limiter(#{
|
Limiter = init_limiter(#{
|
||||||
bytes_in => bucket_cfg(),
|
bytes => bucket_cfg(),
|
||||||
message_in => bucket_cfg(),
|
messages => bucket_cfg(),
|
||||||
client => #{bytes_in => client_cfg(Rate)}
|
client => #{bytes => client_cfg(Rate)}
|
||||||
}),
|
}),
|
||||||
St = st(#{limiter => Limiter}),
|
St = st(#{limiter => Limiter}),
|
||||||
|
|
||||||
%% must bigger than value in emqx_ratelimit_SUITE
|
%% must bigger than value in emqx_ratelimit_SUITE
|
||||||
{ok, Need} = emqx_limiter_schema:to_capacity("1GB"),
|
{ok, Need} = emqx_limiter_schema:to_capacity("1GB"),
|
||||||
St1 = ?ws_conn:check_limiter(
|
St1 = ?ws_conn:check_limiter(
|
||||||
[{Need, bytes_in}],
|
[{Need, bytes}],
|
||||||
[],
|
[],
|
||||||
fun(_, _, S) -> S end,
|
fun(_, _, S) -> S end,
|
||||||
[],
|
[],
|
||||||
|
@ -699,23 +699,21 @@ init_limiter() ->
|
||||||
init_limiter(limiter_cfg()).
|
init_limiter(limiter_cfg()).
|
||||||
|
|
||||||
init_limiter(LimiterCfg) ->
|
init_limiter(LimiterCfg) ->
|
||||||
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], LimiterCfg).
|
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], LimiterCfg).
|
||||||
|
|
||||||
limiter_cfg() ->
|
limiter_cfg() ->
|
||||||
Cfg = bucket_cfg(),
|
Cfg = bucket_cfg(),
|
||||||
Client = client_cfg(),
|
Client = client_cfg(),
|
||||||
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
|
#{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
|
||||||
|
|
||||||
client_cfg() ->
|
client_cfg() ->
|
||||||
Infinity = emqx_limiter_schema:infinity_value(),
|
client_cfg(infinity).
|
||||||
client_cfg(Infinity).
|
|
||||||
|
|
||||||
client_cfg(Rate) ->
|
client_cfg(Rate) ->
|
||||||
Infinity = emqx_limiter_schema:infinity_value(),
|
|
||||||
#{
|
#{
|
||||||
rate => Rate,
|
rate => Rate,
|
||||||
initial => 0,
|
initial => 0,
|
||||||
capacity => Infinity,
|
burst => 0,
|
||||||
low_watermark => 1,
|
low_watermark => 1,
|
||||||
divisible => false,
|
divisible => false,
|
||||||
max_retry_time => timer:seconds(5),
|
max_retry_time => timer:seconds(5),
|
||||||
|
@ -723,14 +721,13 @@ client_cfg(Rate) ->
|
||||||
}.
|
}.
|
||||||
|
|
||||||
bucket_cfg() ->
|
bucket_cfg() ->
|
||||||
Infinity = emqx_limiter_schema:infinity_value(),
|
#{rate => infinity, initial => 0, burst => 0}.
|
||||||
#{rate => Infinity, initial => 0, capacity => Infinity}.
|
|
||||||
|
|
||||||
add_bucket() ->
|
add_bucket() ->
|
||||||
Cfg = bucket_cfg(),
|
Cfg = bucket_cfg(),
|
||||||
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
|
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg),
|
||||||
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
|
emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg).
|
||||||
|
|
||||||
del_bucket() ->
|
del_bucket() ->
|
||||||
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
|
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes),
|
||||||
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).
|
emqx_limiter_server:del_bucket(?LIMITER_ID, messages).
|
||||||
|
|
|
@ -105,14 +105,16 @@ mnesia(boot) ->
|
||||||
%% Hocon Schema
|
%% Hocon Schema
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
namespace() -> "authn-scram-builtin_db".
|
namespace() -> "authn".
|
||||||
|
|
||||||
tags() ->
|
tags() ->
|
||||||
[<<"Authentication">>].
|
[<<"Authentication">>].
|
||||||
|
|
||||||
roots() -> [?CONF_NS].
|
%% used for config check when the schema module is resolved
|
||||||
|
roots() ->
|
||||||
|
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, scram))}].
|
||||||
|
|
||||||
fields(?CONF_NS) ->
|
fields(scram) ->
|
||||||
[
|
[
|
||||||
{mechanism, emqx_authn_schema:mechanism(scram)},
|
{mechanism, emqx_authn_schema:mechanism(scram)},
|
||||||
{backend, emqx_authn_schema:backend(built_in_database)},
|
{backend, emqx_authn_schema:backend(built_in_database)},
|
||||||
|
@ -120,7 +122,7 @@ fields(?CONF_NS) ->
|
||||||
{iteration_count, fun iteration_count/1}
|
{iteration_count, fun iteration_count/1}
|
||||||
] ++ emqx_authn_schema:common_fields().
|
] ++ emqx_authn_schema:common_fields().
|
||||||
|
|
||||||
desc(?CONF_NS) ->
|
desc(scram) ->
|
||||||
"Settings for Salted Challenge Response Authentication Mechanism\n"
|
"Settings for Salted Challenge Response Authentication Mechanism\n"
|
||||||
"(SCRAM) authentication.";
|
"(SCRAM) authentication.";
|
||||||
desc(_) ->
|
desc(_) ->
|
||||||
|
@ -141,7 +143,7 @@ iteration_count(_) -> undefined.
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
refs() ->
|
refs() ->
|
||||||
[hoconsc:ref(?MODULE, ?CONF_NS)].
|
[hoconsc:ref(?MODULE, scram)].
|
||||||
|
|
||||||
create(
|
create(
|
||||||
AuthenticatorID,
|
AuthenticatorID,
|
||||||
|
|
|
@ -51,34 +51,35 @@
|
||||||
%% Hocon Schema
|
%% Hocon Schema
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
namespace() -> "authn-http".
|
namespace() -> "authn".
|
||||||
|
|
||||||
tags() ->
|
tags() ->
|
||||||
[<<"Authentication">>].
|
[<<"Authentication">>].
|
||||||
|
|
||||||
|
%% used for config check when the schema module is resolved
|
||||||
roots() ->
|
roots() ->
|
||||||
[
|
[
|
||||||
{?CONF_NS,
|
{?CONF_NS,
|
||||||
hoconsc:mk(
|
hoconsc:mk(
|
||||||
hoconsc:union(fun union_member_selector/1),
|
hoconsc:union(fun ?MODULE:union_member_selector/1),
|
||||||
#{}
|
#{}
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
||||||
fields(get) ->
|
fields(http_get) ->
|
||||||
[
|
[
|
||||||
{method, #{type => get, required => true, desc => ?DESC(method)}},
|
{method, #{type => get, required => true, desc => ?DESC(method)}},
|
||||||
{headers, fun headers_no_content_type/1}
|
{headers, fun headers_no_content_type/1}
|
||||||
] ++ common_fields();
|
] ++ common_fields();
|
||||||
fields(post) ->
|
fields(http_post) ->
|
||||||
[
|
[
|
||||||
{method, #{type => post, required => true, desc => ?DESC(method)}},
|
{method, #{type => post, required => true, desc => ?DESC(method)}},
|
||||||
{headers, fun headers/1}
|
{headers, fun headers/1}
|
||||||
] ++ common_fields().
|
] ++ common_fields().
|
||||||
|
|
||||||
desc(get) ->
|
desc(http_get) ->
|
||||||
?DESC(get);
|
?DESC(get);
|
||||||
desc(post) ->
|
desc(http_post) ->
|
||||||
?DESC(post);
|
?DESC(post);
|
||||||
desc(_) ->
|
desc(_) ->
|
||||||
undefined.
|
undefined.
|
||||||
|
@ -156,8 +157,8 @@ request_timeout(_) -> undefined.
|
||||||
|
|
||||||
refs() ->
|
refs() ->
|
||||||
[
|
[
|
||||||
hoconsc:ref(?MODULE, get),
|
hoconsc:ref(?MODULE, http_get),
|
||||||
hoconsc:ref(?MODULE, post)
|
hoconsc:ref(?MODULE, http_post)
|
||||||
].
|
].
|
||||||
|
|
||||||
union_member_selector(all_union_members) ->
|
union_member_selector(all_union_members) ->
|
||||||
|
@ -166,9 +167,9 @@ union_member_selector({value, Value}) ->
|
||||||
refs(Value).
|
refs(Value).
|
||||||
|
|
||||||
refs(#{<<"method">> := <<"get">>}) ->
|
refs(#{<<"method">> := <<"get">>}) ->
|
||||||
[hoconsc:ref(?MODULE, get)];
|
[hoconsc:ref(?MODULE, http_get)];
|
||||||
refs(#{<<"method">> := <<"post">>}) ->
|
refs(#{<<"method">> := <<"post">>}) ->
|
||||||
[hoconsc:ref(?MODULE, post)];
|
[hoconsc:ref(?MODULE, http_post)];
|
||||||
refs(_) ->
|
refs(_) ->
|
||||||
throw(#{
|
throw(#{
|
||||||
field_name => method,
|
field_name => method,
|
||||||
|
|
|
@ -35,18 +35,17 @@
|
||||||
callback_mode() -> always_sync.
|
callback_mode() -> always_sync.
|
||||||
|
|
||||||
on_start(InstId, Opts) ->
|
on_start(InstId, Opts) ->
|
||||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
|
||||||
PoolOpts = [
|
PoolOpts = [
|
||||||
{pool_size, maps:get(pool_size, Opts, ?DEFAULT_POOL_SIZE)},
|
{pool_size, maps:get(pool_size, Opts, ?DEFAULT_POOL_SIZE)},
|
||||||
{connector_opts, Opts}
|
{connector_opts, Opts}
|
||||||
],
|
],
|
||||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, PoolOpts) of
|
case emqx_resource_pool:start(InstId, ?MODULE, PoolOpts) of
|
||||||
ok -> {ok, #{pool_name => PoolName}};
|
ok -> {ok, #{pool_name => InstId}};
|
||||||
{error, Reason} -> {error, Reason}
|
{error, Reason} -> {error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_stop(_InstId, #{pool_name := PoolName}) ->
|
on_stop(_InstId, #{pool_name := PoolName}) ->
|
||||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
emqx_resource_pool:stop(PoolName).
|
||||||
|
|
||||||
on_query(InstId, get_jwks, #{pool_name := PoolName}) ->
|
on_query(InstId, get_jwks, #{pool_name := PoolName}) ->
|
||||||
Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover),
|
Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover),
|
||||||
|
@ -72,18 +71,17 @@ on_query(_InstId, {update, Opts}, #{pool_name := PoolName}) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
on_get_status(_InstId, #{pool_name := PoolName}) ->
|
on_get_status(_InstId, #{pool_name := PoolName}) ->
|
||||||
Func =
|
case emqx_resource_pool:health_check_workers(PoolName, fun health_check/1) of
|
||||||
fun(Conn) ->
|
|
||||||
case emqx_authn_jwks_client:get_jwks(Conn) of
|
|
||||||
{ok, _} -> true;
|
|
||||||
_ -> false
|
|
||||||
end
|
|
||||||
end,
|
|
||||||
case emqx_plugin_libs_pool:health_check_ecpool_workers(PoolName, Func) of
|
|
||||||
true -> connected;
|
true -> connected;
|
||||||
false -> disconnected
|
false -> disconnected
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
health_check(Conn) ->
|
||||||
|
case emqx_authn_jwks_client:get_jwks(Conn) of
|
||||||
|
{ok, _} -> true;
|
||||||
|
_ -> false
|
||||||
|
end.
|
||||||
|
|
||||||
connect(Opts) ->
|
connect(Opts) ->
|
||||||
ConnectorOpts = proplists:get_value(connector_opts, Opts),
|
ConnectorOpts = proplists:get_value(connector_opts, Opts),
|
||||||
emqx_authn_jwks_client:start_link(ConnectorOpts).
|
emqx_authn_jwks_client:start_link(ConnectorOpts).
|
||||||
|
|
|
@ -43,36 +43,57 @@
|
||||||
%% Hocon Schema
|
%% Hocon Schema
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
namespace() -> "authn-jwt".
|
namespace() -> "authn".
|
||||||
|
|
||||||
tags() ->
|
tags() ->
|
||||||
[<<"Authentication">>].
|
[<<"Authentication">>].
|
||||||
|
|
||||||
|
%% used for config check when the schema module is resolved
|
||||||
roots() ->
|
roots() ->
|
||||||
[
|
[
|
||||||
{?CONF_NS,
|
{?CONF_NS,
|
||||||
hoconsc:mk(
|
hoconsc:mk(
|
||||||
hoconsc:union(fun union_member_selector/1),
|
hoconsc:union(fun ?MODULE:union_member_selector/1),
|
||||||
#{}
|
#{}
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
||||||
fields('hmac-based') ->
|
fields(jwt_hmac) ->
|
||||||
[
|
[
|
||||||
{use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})},
|
%% for hmac, it's the 'algorithm' field which selects this type
|
||||||
|
%% use_jwks field can be ignored (kept for backward compatibility)
|
||||||
|
{use_jwks,
|
||||||
|
sc(
|
||||||
|
hoconsc:enum([false]),
|
||||||
|
#{
|
||||||
|
required => false,
|
||||||
|
desc => ?DESC(use_jwks),
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
|
}
|
||||||
|
)},
|
||||||
{algorithm,
|
{algorithm,
|
||||||
sc(hoconsc:enum(['hmac-based']), #{required => true, desc => ?DESC(algorithm)})},
|
sc(hoconsc:enum(['hmac-based']), #{required => true, desc => ?DESC(algorithm)})},
|
||||||
{secret, fun secret/1},
|
{secret, fun secret/1},
|
||||||
{secret_base64_encoded, fun secret_base64_encoded/1}
|
{secret_base64_encoded, fun secret_base64_encoded/1}
|
||||||
] ++ common_fields();
|
] ++ common_fields();
|
||||||
fields('public-key') ->
|
fields(jwt_public_key) ->
|
||||||
[
|
[
|
||||||
{use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})},
|
%% for public-key, it's the 'algorithm' field which selects this type
|
||||||
|
%% use_jwks field can be ignored (kept for backward compatibility)
|
||||||
|
{use_jwks,
|
||||||
|
sc(
|
||||||
|
hoconsc:enum([false]),
|
||||||
|
#{
|
||||||
|
required => false,
|
||||||
|
desc => ?DESC(use_jwks),
|
||||||
|
importance => ?IMPORTANCE_HIDDEN
|
||||||
|
}
|
||||||
|
)},
|
||||||
{algorithm,
|
{algorithm,
|
||||||
sc(hoconsc:enum(['public-key']), #{required => true, desc => ?DESC(algorithm)})},
|
sc(hoconsc:enum(['public-key']), #{required => true, desc => ?DESC(algorithm)})},
|
||||||
{public_key, fun public_key/1}
|
{public_key, fun public_key/1}
|
||||||
] ++ common_fields();
|
] ++ common_fields();
|
||||||
fields('jwks') ->
|
fields(jwt_jwks) ->
|
||||||
[
|
[
|
||||||
{use_jwks, sc(hoconsc:enum([true]), #{required => true, desc => ?DESC(use_jwks)})},
|
{use_jwks, sc(hoconsc:enum([true]), #{required => true, desc => ?DESC(use_jwks)})},
|
||||||
{endpoint, fun endpoint/1},
|
{endpoint, fun endpoint/1},
|
||||||
|
@ -85,12 +106,12 @@ fields('jwks') ->
|
||||||
}}
|
}}
|
||||||
] ++ common_fields().
|
] ++ common_fields().
|
||||||
|
|
||||||
desc('hmac-based') ->
|
desc(jwt_hmac) ->
|
||||||
?DESC('hmac-based');
|
?DESC(jwt_hmac);
|
||||||
desc('public-key') ->
|
desc(jwt_public_key) ->
|
||||||
?DESC('public-key');
|
?DESC(jwt_public_key);
|
||||||
desc('jwks') ->
|
desc(jwt_jwks) ->
|
||||||
?DESC('jwks');
|
?DESC(jwt_jwks);
|
||||||
desc(undefined) ->
|
desc(undefined) ->
|
||||||
undefined.
|
undefined.
|
||||||
|
|
||||||
|
@ -160,9 +181,9 @@ from(_) -> undefined.
|
||||||
|
|
||||||
refs() ->
|
refs() ->
|
||||||
[
|
[
|
||||||
hoconsc:ref(?MODULE, 'hmac-based'),
|
hoconsc:ref(?MODULE, jwt_hmac),
|
||||||
hoconsc:ref(?MODULE, 'public-key'),
|
hoconsc:ref(?MODULE, jwt_public_key),
|
||||||
hoconsc:ref(?MODULE, 'jwks')
|
hoconsc:ref(?MODULE, jwt_jwks)
|
||||||
].
|
].
|
||||||
|
|
||||||
union_member_selector(all_union_members) ->
|
union_member_selector(all_union_members) ->
|
||||||
|
@ -179,11 +200,11 @@ boolean(<<"false">>) -> false;
|
||||||
boolean(Other) -> Other.
|
boolean(Other) -> Other.
|
||||||
|
|
||||||
select_ref(true, _) ->
|
select_ref(true, _) ->
|
||||||
[hoconsc:ref(?MODULE, 'jwks')];
|
[hoconsc:ref(?MODULE, 'jwt_jwks')];
|
||||||
select_ref(false, #{<<"public_key">> := _}) ->
|
select_ref(false, #{<<"public_key">> := _}) ->
|
||||||
[hoconsc:ref(?MODULE, 'public-key')];
|
[hoconsc:ref(?MODULE, jwt_public_key)];
|
||||||
select_ref(false, _) ->
|
select_ref(false, _) ->
|
||||||
[hoconsc:ref(?MODULE, 'hmac-based')];
|
[hoconsc:ref(?MODULE, jwt_hmac)];
|
||||||
select_ref(_, _) ->
|
select_ref(_, _) ->
|
||||||
throw(#{
|
throw(#{
|
||||||
field_name => use_jwks,
|
field_name => use_jwks,
|
||||||
|
|
|
@ -107,14 +107,16 @@ mnesia(boot) ->
|
||||||
%% Hocon Schema
|
%% Hocon Schema
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
namespace() -> "authn-builtin_db".
|
namespace() -> "authn".
|
||||||
|
|
||||||
tags() ->
|
tags() ->
|
||||||
[<<"Authentication">>].
|
[<<"Authentication">>].
|
||||||
|
|
||||||
roots() -> [?CONF_NS].
|
%% used for config check when the schema module is resolved
|
||||||
|
roots() ->
|
||||||
|
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, builtin_db))}].
|
||||||
|
|
||||||
fields(?CONF_NS) ->
|
fields(builtin_db) ->
|
||||||
[
|
[
|
||||||
{mechanism, emqx_authn_schema:mechanism(password_based)},
|
{mechanism, emqx_authn_schema:mechanism(password_based)},
|
||||||
{backend, emqx_authn_schema:backend(built_in_database)},
|
{backend, emqx_authn_schema:backend(built_in_database)},
|
||||||
|
@ -122,8 +124,8 @@ fields(?CONF_NS) ->
|
||||||
{password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1}
|
{password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1}
|
||||||
] ++ emqx_authn_schema:common_fields().
|
] ++ emqx_authn_schema:common_fields().
|
||||||
|
|
||||||
desc(?CONF_NS) ->
|
desc(builtin_db) ->
|
||||||
?DESC(?CONF_NS);
|
?DESC(builtin_db);
|
||||||
desc(_) ->
|
desc(_) ->
|
||||||
undefined.
|
undefined.
|
||||||
|
|
||||||
|
@ -138,7 +140,7 @@ user_id_type(_) -> undefined.
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
refs() ->
|
refs() ->
|
||||||
[hoconsc:ref(?MODULE, ?CONF_NS)].
|
[hoconsc:ref(?MODULE, builtin_db)].
|
||||||
|
|
||||||
create(_AuthenticatorID, Config) ->
|
create(_AuthenticatorID, Config) ->
|
||||||
create(Config).
|
create(Config).
|
||||||
|
|
|
@ -44,32 +44,33 @@
|
||||||
%% Hocon Schema
|
%% Hocon Schema
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
namespace() -> "authn-mongodb".
|
namespace() -> "authn".
|
||||||
|
|
||||||
tags() ->
|
tags() ->
|
||||||
[<<"Authentication">>].
|
[<<"Authentication">>].
|
||||||
|
|
||||||
|
%% used for config check when the schema module is resolved
|
||||||
roots() ->
|
roots() ->
|
||||||
[
|
[
|
||||||
{?CONF_NS,
|
{?CONF_NS,
|
||||||
hoconsc:mk(
|
hoconsc:mk(
|
||||||
hoconsc:union(fun union_member_selector/1),
|
hoconsc:union(fun ?MODULE:union_member_selector/1),
|
||||||
#{}
|
#{}
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
||||||
fields(standalone) ->
|
fields(mongo_single) ->
|
||||||
common_fields() ++ emqx_connector_mongo:fields(single);
|
common_fields() ++ emqx_connector_mongo:fields(single);
|
||||||
fields('replica-set') ->
|
fields(mongo_rs) ->
|
||||||
common_fields() ++ emqx_connector_mongo:fields(rs);
|
common_fields() ++ emqx_connector_mongo:fields(rs);
|
||||||
fields('sharded-cluster') ->
|
fields(mongo_sharded) ->
|
||||||
common_fields() ++ emqx_connector_mongo:fields(sharded).
|
common_fields() ++ emqx_connector_mongo:fields(sharded).
|
||||||
|
|
||||||
desc(standalone) ->
|
desc(mongo_single) ->
|
||||||
?DESC(standalone);
|
?DESC(single);
|
||||||
desc('replica-set') ->
|
desc(mongo_rs) ->
|
||||||
?DESC('replica-set');
|
?DESC('replica-set');
|
||||||
desc('sharded-cluster') ->
|
desc(mongo_sharded) ->
|
||||||
?DESC('sharded-cluster');
|
?DESC('sharded-cluster');
|
||||||
desc(_) ->
|
desc(_) ->
|
||||||
undefined.
|
undefined.
|
||||||
|
@ -126,9 +127,9 @@ is_superuser_field(_) -> undefined.
|
||||||
|
|
||||||
refs() ->
|
refs() ->
|
||||||
[
|
[
|
||||||
hoconsc:ref(?MODULE, standalone),
|
hoconsc:ref(?MODULE, mongo_single),
|
||||||
hoconsc:ref(?MODULE, 'replica-set'),
|
hoconsc:ref(?MODULE, mongo_rs),
|
||||||
hoconsc:ref(?MODULE, 'sharded-cluster')
|
hoconsc:ref(?MODULE, mongo_sharded)
|
||||||
].
|
].
|
||||||
|
|
||||||
create(_AuthenticatorID, Config) ->
|
create(_AuthenticatorID, Config) ->
|
||||||
|
@ -254,11 +255,11 @@ union_member_selector({value, Value}) ->
|
||||||
refs(Value).
|
refs(Value).
|
||||||
|
|
||||||
refs(#{<<"mongo_type">> := <<"single">>}) ->
|
refs(#{<<"mongo_type">> := <<"single">>}) ->
|
||||||
[hoconsc:ref(?MODULE, standalone)];
|
[hoconsc:ref(?MODULE, mongo_single)];
|
||||||
refs(#{<<"mongo_type">> := <<"rs">>}) ->
|
refs(#{<<"mongo_type">> := <<"rs">>}) ->
|
||||||
[hoconsc:ref(?MODULE, 'replica-set')];
|
[hoconsc:ref(?MODULE, mongo_rs)];
|
||||||
refs(#{<<"mongo_type">> := <<"sharded">>}) ->
|
refs(#{<<"mongo_type">> := <<"sharded">>}) ->
|
||||||
[hoconsc:ref(?MODULE, 'sharded-cluster')];
|
[hoconsc:ref(?MODULE, mongo_sharded)];
|
||||||
refs(_) ->
|
refs(_) ->
|
||||||
throw(#{
|
throw(#{
|
||||||
field_name => mongo_type,
|
field_name => mongo_type,
|
||||||
|
|
|
@ -45,14 +45,16 @@
|
||||||
%% Hocon Schema
|
%% Hocon Schema
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
namespace() -> "authn-mysql".
|
namespace() -> "authn".
|
||||||
|
|
||||||
tags() ->
|
tags() ->
|
||||||
[<<"Authentication">>].
|
[<<"Authentication">>].
|
||||||
|
|
||||||
roots() -> [?CONF_NS].
|
%% used for config check when the schema module is resolved
|
||||||
|
roots() ->
|
||||||
|
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, mysql))}].
|
||||||
|
|
||||||
fields(?CONF_NS) ->
|
fields(mysql) ->
|
||||||
[
|
[
|
||||||
{mechanism, emqx_authn_schema:mechanism(password_based)},
|
{mechanism, emqx_authn_schema:mechanism(password_based)},
|
||||||
{backend, emqx_authn_schema:backend(mysql)},
|
{backend, emqx_authn_schema:backend(mysql)},
|
||||||
|
@ -62,8 +64,8 @@ fields(?CONF_NS) ->
|
||||||
] ++ emqx_authn_schema:common_fields() ++
|
] ++ emqx_authn_schema:common_fields() ++
|
||||||
proplists:delete(prepare_statement, emqx_connector_mysql:fields(config)).
|
proplists:delete(prepare_statement, emqx_connector_mysql:fields(config)).
|
||||||
|
|
||||||
desc(?CONF_NS) ->
|
desc(mysql) ->
|
||||||
?DESC(?CONF_NS);
|
?DESC(mysql);
|
||||||
desc(_) ->
|
desc(_) ->
|
||||||
undefined.
|
undefined.
|
||||||
|
|
||||||
|
@ -82,7 +84,7 @@ query_timeout(_) -> undefined.
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
refs() ->
|
refs() ->
|
||||||
[hoconsc:ref(?MODULE, ?CONF_NS)].
|
[hoconsc:ref(?MODULE, mysql)].
|
||||||
|
|
||||||
create(_AuthenticatorID, Config) ->
|
create(_AuthenticatorID, Config) ->
|
||||||
create(Config).
|
create(Config).
|
||||||
|
|
|
@ -49,14 +49,16 @@
|
||||||
%% Hocon Schema
|
%% Hocon Schema
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
namespace() -> "authn-postgresql".
|
namespace() -> "authn".
|
||||||
|
|
||||||
tags() ->
|
tags() ->
|
||||||
[<<"Authentication">>].
|
[<<"Authentication">>].
|
||||||
|
|
||||||
roots() -> [?CONF_NS].
|
%% used for config check when the schema module is resolved
|
||||||
|
roots() ->
|
||||||
|
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, postgresql))}].
|
||||||
|
|
||||||
fields(?CONF_NS) ->
|
fields(postgresql) ->
|
||||||
[
|
[
|
||||||
{mechanism, emqx_authn_schema:mechanism(password_based)},
|
{mechanism, emqx_authn_schema:mechanism(password_based)},
|
||||||
{backend, emqx_authn_schema:backend(postgresql)},
|
{backend, emqx_authn_schema:backend(postgresql)},
|
||||||
|
@ -66,8 +68,8 @@ fields(?CONF_NS) ->
|
||||||
emqx_authn_schema:common_fields() ++
|
emqx_authn_schema:common_fields() ++
|
||||||
proplists:delete(prepare_statement, emqx_connector_pgsql:fields(config)).
|
proplists:delete(prepare_statement, emqx_connector_pgsql:fields(config)).
|
||||||
|
|
||||||
desc(?CONF_NS) ->
|
desc(postgresql) ->
|
||||||
?DESC(?CONF_NS);
|
?DESC(postgresql);
|
||||||
desc(_) ->
|
desc(_) ->
|
||||||
undefined.
|
undefined.
|
||||||
|
|
||||||
|
@ -81,7 +83,7 @@ query(_) -> undefined.
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
refs() ->
|
refs() ->
|
||||||
[hoconsc:ref(?MODULE, ?CONF_NS)].
|
[hoconsc:ref(?MODULE, postgresql)].
|
||||||
|
|
||||||
create(_AuthenticatorID, Config) ->
|
create(_AuthenticatorID, Config) ->
|
||||||
create(Config).
|
create(Config).
|
||||||
|
|
|
@ -44,32 +44,33 @@
|
||||||
%% Hocon Schema
|
%% Hocon Schema
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
namespace() -> "authn-redis".
|
namespace() -> "authn".
|
||||||
|
|
||||||
tags() ->
|
tags() ->
|
||||||
[<<"Authentication">>].
|
[<<"Authentication">>].
|
||||||
|
|
||||||
|
%% used for config check when the schema module is resolved
|
||||||
roots() ->
|
roots() ->
|
||||||
[
|
[
|
||||||
{?CONF_NS,
|
{?CONF_NS,
|
||||||
hoconsc:mk(
|
hoconsc:mk(
|
||||||
hoconsc:union(fun union_member_selector/1),
|
hoconsc:union(fun ?MODULE:union_member_selector/1),
|
||||||
#{}
|
#{}
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
||||||
fields(standalone) ->
|
fields(redis_single) ->
|
||||||
common_fields() ++ emqx_connector_redis:fields(single);
|
common_fields() ++ emqx_connector_redis:fields(single);
|
||||||
fields(cluster) ->
|
fields(redis_cluster) ->
|
||||||
common_fields() ++ emqx_connector_redis:fields(cluster);
|
common_fields() ++ emqx_connector_redis:fields(cluster);
|
||||||
fields(sentinel) ->
|
fields(redis_sentinel) ->
|
||||||
common_fields() ++ emqx_connector_redis:fields(sentinel).
|
common_fields() ++ emqx_connector_redis:fields(sentinel).
|
||||||
|
|
||||||
desc(standalone) ->
|
desc(redis_single) ->
|
||||||
?DESC(standalone);
|
?DESC(single);
|
||||||
desc(cluster) ->
|
desc(redis_cluster) ->
|
||||||
?DESC(cluster);
|
?DESC(cluster);
|
||||||
desc(sentinel) ->
|
desc(redis_sentinel) ->
|
||||||
?DESC(sentinel);
|
?DESC(sentinel);
|
||||||
desc(_) ->
|
desc(_) ->
|
||||||
"".
|
"".
|
||||||
|
@ -93,9 +94,9 @@ cmd(_) -> undefined.
|
||||||
|
|
||||||
refs() ->
|
refs() ->
|
||||||
[
|
[
|
||||||
hoconsc:ref(?MODULE, standalone),
|
hoconsc:ref(?MODULE, redis_single),
|
||||||
hoconsc:ref(?MODULE, cluster),
|
hoconsc:ref(?MODULE, redis_cluster),
|
||||||
hoconsc:ref(?MODULE, sentinel)
|
hoconsc:ref(?MODULE, redis_sentinel)
|
||||||
].
|
].
|
||||||
|
|
||||||
union_member_selector(all_union_members) ->
|
union_member_selector(all_union_members) ->
|
||||||
|
@ -104,11 +105,11 @@ union_member_selector({value, Value}) ->
|
||||||
refs(Value).
|
refs(Value).
|
||||||
|
|
||||||
refs(#{<<"redis_type">> := <<"single">>}) ->
|
refs(#{<<"redis_type">> := <<"single">>}) ->
|
||||||
[hoconsc:ref(?MODULE, standalone)];
|
[hoconsc:ref(?MODULE, redis_single)];
|
||||||
refs(#{<<"redis_type">> := <<"cluster">>}) ->
|
refs(#{<<"redis_type">> := <<"cluster">>}) ->
|
||||||
[hoconsc:ref(?MODULE, cluster)];
|
[hoconsc:ref(?MODULE, redis_cluster)];
|
||||||
refs(#{<<"redis_type">> := <<"sentinel">>}) ->
|
refs(#{<<"redis_type">> := <<"sentinel">>}) ->
|
||||||
[hoconsc:ref(?MODULE, sentinel)];
|
[hoconsc:ref(?MODULE, redis_sentinel)];
|
||||||
refs(_) ->
|
refs(_) ->
|
||||||
throw(#{
|
throw(#{
|
||||||
field_name => redis_type,
|
field_name => redis_type,
|
||||||
|
|
|
@ -107,7 +107,7 @@ t_update_with_invalid_config(_Config) ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
{error, #{
|
{error, #{
|
||||||
kind := validation_error,
|
kind := validation_error,
|
||||||
matched_type := "authn-postgresql:authentication",
|
matched_type := "authn:postgresql",
|
||||||
path := "authentication.1.server",
|
path := "authentication.1.server",
|
||||||
reason := required_field
|
reason := required_field
|
||||||
}},
|
}},
|
||||||
|
|
|
@ -162,7 +162,7 @@ t_create_invalid_config(_Config) ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
{error, #{
|
{error, #{
|
||||||
kind := validation_error,
|
kind := validation_error,
|
||||||
matched_type := "authn-redis:standalone",
|
matched_type := "authn:redis_single",
|
||||||
path := "authentication.1.server",
|
path := "authentication.1.server",
|
||||||
reason := required_field
|
reason := required_field
|
||||||
}},
|
}},
|
||||||
|
|
|
@ -53,7 +53,7 @@ t_check_schema(_Config) ->
|
||||||
?assertThrow(
|
?assertThrow(
|
||||||
#{
|
#{
|
||||||
path := "authentication.1.password_hash_algorithm.name",
|
path := "authentication.1.password_hash_algorithm.name",
|
||||||
matched_type := "authn-builtin_db:authentication/authn-hash:simple",
|
matched_type := "authn:builtin_db/authn-hash:simple",
|
||||||
reason := unable_to_convert_to_enum_symbol
|
reason := unable_to_convert_to_enum_symbol
|
||||||
},
|
},
|
||||||
Check(ConfigNotOk)
|
Check(ConfigNotOk)
|
||||||
|
@ -72,7 +72,7 @@ t_check_schema(_Config) ->
|
||||||
#{
|
#{
|
||||||
path := "authentication.1.password_hash_algorithm",
|
path := "authentication.1.password_hash_algorithm",
|
||||||
reason := "algorithm_name_missing",
|
reason := "algorithm_name_missing",
|
||||||
matched_type := "authn-builtin_db:authentication"
|
matched_type := "authn:builtin_db"
|
||||||
},
|
},
|
||||||
Check(ConfigMissingAlgoName)
|
Check(ConfigMissingAlgoName)
|
||||||
).
|
).
|
||||||
|
|
|
@ -32,19 +32,19 @@ union_member_selector_mongo_test_() ->
|
||||||
end},
|
end},
|
||||||
{"single", fun() ->
|
{"single", fun() ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
?ERR(#{matched_type := "authn-mongodb:standalone"}),
|
?ERR(#{matched_type := "authn:mongo_single"}),
|
||||||
Check("{mongo_type: single}")
|
Check("{mongo_type: single}")
|
||||||
)
|
)
|
||||||
end},
|
end},
|
||||||
{"replica-set", fun() ->
|
{"replica-set", fun() ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
?ERR(#{matched_type := "authn-mongodb:replica-set"}),
|
?ERR(#{matched_type := "authn:mongo_rs"}),
|
||||||
Check("{mongo_type: rs}")
|
Check("{mongo_type: rs}")
|
||||||
)
|
)
|
||||||
end},
|
end},
|
||||||
{"sharded", fun() ->
|
{"sharded", fun() ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
?ERR(#{matched_type := "authn-mongodb:sharded-cluster"}),
|
?ERR(#{matched_type := "authn:mongo_sharded"}),
|
||||||
Check("{mongo_type: sharded}")
|
Check("{mongo_type: sharded}")
|
||||||
)
|
)
|
||||||
end}
|
end}
|
||||||
|
@ -61,19 +61,19 @@ union_member_selector_jwt_test_() ->
|
||||||
end},
|
end},
|
||||||
{"jwks", fun() ->
|
{"jwks", fun() ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
?ERR(#{matched_type := "authn-jwt:jwks"}),
|
?ERR(#{matched_type := "authn:jwt_jwks"}),
|
||||||
Check("{use_jwks = true}")
|
Check("{use_jwks = true}")
|
||||||
)
|
)
|
||||||
end},
|
end},
|
||||||
{"publick-key", fun() ->
|
{"publick-key", fun() ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
?ERR(#{matched_type := "authn-jwt:public-key"}),
|
?ERR(#{matched_type := "authn:jwt_public_key"}),
|
||||||
Check("{use_jwks = false, public_key = 1}")
|
Check("{use_jwks = false, public_key = 1}")
|
||||||
)
|
)
|
||||||
end},
|
end},
|
||||||
{"hmac-based", fun() ->
|
{"hmac-based", fun() ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
?ERR(#{matched_type := "authn-jwt:hmac-based"}),
|
?ERR(#{matched_type := "authn:jwt_hmac"}),
|
||||||
Check("{use_jwks = false}")
|
Check("{use_jwks = false}")
|
||||||
)
|
)
|
||||||
end}
|
end}
|
||||||
|
@ -90,19 +90,19 @@ union_member_selector_redis_test_() ->
|
||||||
end},
|
end},
|
||||||
{"single", fun() ->
|
{"single", fun() ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
?ERR(#{matched_type := "authn-redis:standalone"}),
|
?ERR(#{matched_type := "authn:redis_single"}),
|
||||||
Check("{redis_type = single}")
|
Check("{redis_type = single}")
|
||||||
)
|
)
|
||||||
end},
|
end},
|
||||||
{"cluster", fun() ->
|
{"cluster", fun() ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
?ERR(#{matched_type := "authn-redis:cluster"}),
|
?ERR(#{matched_type := "authn:redis_cluster"}),
|
||||||
Check("{redis_type = cluster}")
|
Check("{redis_type = cluster}")
|
||||||
)
|
)
|
||||||
end},
|
end},
|
||||||
{"sentinel", fun() ->
|
{"sentinel", fun() ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
?ERR(#{matched_type := "authn-redis:sentinel"}),
|
?ERR(#{matched_type := "authn:redis_sentinel"}),
|
||||||
Check("{redis_type = sentinel}")
|
Check("{redis_type = sentinel}")
|
||||||
)
|
)
|
||||||
end}
|
end}
|
||||||
|
@ -119,13 +119,13 @@ union_member_selector_http_test_() ->
|
||||||
end},
|
end},
|
||||||
{"get", fun() ->
|
{"get", fun() ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
?ERR(#{matched_type := "authn-http:get"}),
|
?ERR(#{matched_type := "authn:http_get"}),
|
||||||
Check("{method = get}")
|
Check("{method = get}")
|
||||||
)
|
)
|
||||||
end},
|
end},
|
||||||
{"post", fun() ->
|
{"post", fun() ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
?ERR(#{matched_type := "authn-http:post"}),
|
?ERR(#{matched_type := "authn:http_post"}),
|
||||||
Check("{method = post}")
|
Check("{method = post}")
|
||||||
)
|
)
|
||||||
end}
|
end}
|
||||||
|
|
|
@ -185,7 +185,7 @@ match_who(#{peerhost := IpAddress}, {ipaddrs, CIDRs}) ->
|
||||||
match_who(ClientInfo, {'and', Principals}) when is_list(Principals) ->
|
match_who(ClientInfo, {'and', Principals}) when is_list(Principals) ->
|
||||||
lists:foldl(
|
lists:foldl(
|
||||||
fun(Principal, Permission) ->
|
fun(Principal, Permission) ->
|
||||||
match_who(ClientInfo, Principal) andalso Permission
|
Permission andalso match_who(ClientInfo, Principal)
|
||||||
end,
|
end,
|
||||||
true,
|
true,
|
||||||
Principals
|
Principals
|
||||||
|
@ -193,7 +193,7 @@ match_who(ClientInfo, {'and', Principals}) when is_list(Principals) ->
|
||||||
match_who(ClientInfo, {'or', Principals}) when is_list(Principals) ->
|
match_who(ClientInfo, {'or', Principals}) when is_list(Principals) ->
|
||||||
lists:foldl(
|
lists:foldl(
|
||||||
fun(Principal, Permission) ->
|
fun(Principal, Permission) ->
|
||||||
match_who(ClientInfo, Principal) orelse Permission
|
Permission orelse match_who(ClientInfo, Principal)
|
||||||
end,
|
end,
|
||||||
false,
|
false,
|
||||||
Principals
|
Principals
|
||||||
|
|
|
@ -54,7 +54,7 @@ type_names() ->
|
||||||
file,
|
file,
|
||||||
http_get,
|
http_get,
|
||||||
http_post,
|
http_post,
|
||||||
mnesia,
|
builtin_db,
|
||||||
mongo_single,
|
mongo_single,
|
||||||
mongo_rs,
|
mongo_rs,
|
||||||
mongo_sharded,
|
mongo_sharded,
|
||||||
|
@ -93,7 +93,7 @@ fields(http_post) ->
|
||||||
{method, method(post)},
|
{method, method(post)},
|
||||||
{headers, fun headers/1}
|
{headers, fun headers/1}
|
||||||
];
|
];
|
||||||
fields(mnesia) ->
|
fields(builtin_db) ->
|
||||||
authz_common_fields(built_in_database);
|
authz_common_fields(built_in_database);
|
||||||
fields(mongo_single) ->
|
fields(mongo_single) ->
|
||||||
authz_common_fields(mongodb) ++
|
authz_common_fields(mongodb) ++
|
||||||
|
@ -191,8 +191,8 @@ desc(http_get) ->
|
||||||
?DESC(http_get);
|
?DESC(http_get);
|
||||||
desc(http_post) ->
|
desc(http_post) ->
|
||||||
?DESC(http_post);
|
?DESC(http_post);
|
||||||
desc(mnesia) ->
|
desc(builtin_db) ->
|
||||||
?DESC(mnesia);
|
?DESC(builtin_db);
|
||||||
desc(mongo_single) ->
|
desc(mongo_single) ->
|
||||||
?DESC(mongo_single);
|
?DESC(mongo_single);
|
||||||
desc(mongo_rs) ->
|
desc(mongo_rs) ->
|
||||||
|
@ -459,7 +459,7 @@ select_union_member(#{<<"type">> := <<"http">>} = Value) ->
|
||||||
})
|
})
|
||||||
end;
|
end;
|
||||||
select_union_member(#{<<"type">> := <<"built_in_database">>}) ->
|
select_union_member(#{<<"type">> := <<"built_in_database">>}) ->
|
||||||
?R_REF(mnesia);
|
?R_REF(builtin_db);
|
||||||
select_union_member(#{<<"type">> := Type}) ->
|
select_union_member(#{<<"type">> := Type}) ->
|
||||||
select_union_member_loop(Type, type_names());
|
select_union_member_loop(Type, type_names());
|
||||||
select_union_member(_) ->
|
select_union_member(_) ->
|
||||||
|
@ -494,7 +494,10 @@ authz_fields() ->
|
||||||
default => [],
|
default => [],
|
||||||
desc => ?DESC(sources),
|
desc => ?DESC(sources),
|
||||||
%% doc_lift is force a root level reference instead of nesting sub-structs
|
%% doc_lift is force a root level reference instead of nesting sub-structs
|
||||||
extra => #{doc_lift => true}
|
extra => #{doc_lift => true},
|
||||||
|
%% it is recommended to configure authz sources from dashboard
|
||||||
|
%% hance the importance level for config is low
|
||||||
|
importance => ?IMPORTANCE_LOW
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
|
@ -137,7 +137,7 @@ namespace() -> "bridge".
|
||||||
tags() ->
|
tags() ->
|
||||||
[<<"Bridge">>].
|
[<<"Bridge">>].
|
||||||
|
|
||||||
roots() -> [{bridges, ?HOCON(?R_REF(bridges), #{importance => ?IMPORTANCE_HIDDEN})}].
|
roots() -> [{bridges, ?HOCON(?R_REF(bridges), #{importance => ?IMPORTANCE_LOW})}].
|
||||||
|
|
||||||
fields(bridges) ->
|
fields(bridges) ->
|
||||||
[
|
[
|
||||||
|
|
|
@ -1,12 +1,7 @@
|
||||||
## NOTE:
|
## NOTE:
|
||||||
## Configs in this file might be overridden by:
|
## The EMQX configuration is prioritized (overlayed) in the following order:
|
||||||
## 1. Environment variables which start with 'EMQX_' prefix
|
## `data/configs/cluster.hocon < etc/emqx.conf < environment variables`.
|
||||||
## 2. File $EMQX_NODE__DATA_DIR/configs/cluster-override.conf
|
|
||||||
## 3. File $EMQX_NODE__DATA_DIR/configs/local-override.conf
|
|
||||||
##
|
|
||||||
## The *-override.conf files are overwritten at runtime when changes
|
|
||||||
## are made from EMQX dashboard UI, management HTTP API, or CLI.
|
|
||||||
## All configuration details can be found in emqx.conf.example
|
|
||||||
|
|
||||||
node {
|
node {
|
||||||
name = "emqx@127.0.0.1"
|
name = "emqx@127.0.0.1"
|
||||||
|
|
|
@ -25,9 +25,15 @@
|
||||||
-export([update/3, update/4]).
|
-export([update/3, update/4]).
|
||||||
-export([remove/2, remove/3]).
|
-export([remove/2, remove/3]).
|
||||||
-export([reset/2, reset/3]).
|
-export([reset/2, reset/3]).
|
||||||
-export([dump_schema/1, dump_schema/3]).
|
-export([dump_schema/2]).
|
||||||
-export([schema_module/0]).
|
-export([schema_module/0]).
|
||||||
-export([gen_example_conf/4]).
|
-export([gen_example_conf/2]).
|
||||||
|
|
||||||
|
%% TODO: move to emqx_dashboard when we stop building api schema at build time
|
||||||
|
-export([
|
||||||
|
hotconf_schema_json/1,
|
||||||
|
bridge_schema_json/1
|
||||||
|
]).
|
||||||
|
|
||||||
%% for rpc
|
%% for rpc
|
||||||
-export([get_node_and_config/1]).
|
-export([get_node_and_config/1]).
|
||||||
|
@ -136,24 +142,22 @@ reset(Node, KeyPath, Opts) ->
|
||||||
emqx_conf_proto_v2:reset(Node, KeyPath, Opts).
|
emqx_conf_proto_v2:reset(Node, KeyPath, Opts).
|
||||||
|
|
||||||
%% @doc Called from build script.
|
%% @doc Called from build script.
|
||||||
-spec dump_schema(file:name_all()) -> ok.
|
%% TODO: move to a external escript after all refactoring is done
|
||||||
dump_schema(Dir) ->
|
dump_schema(Dir, SchemaModule) ->
|
||||||
I18nFile = emqx_dashboard:i18n_file(),
|
_ = application:load(emqx_dashboard),
|
||||||
dump_schema(Dir, emqx_conf_schema, I18nFile).
|
ok = emqx_dashboard_desc_cache:init(),
|
||||||
|
|
||||||
dump_schema(Dir, SchemaModule, I18nFile) ->
|
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun(Lang) ->
|
fun(Lang) ->
|
||||||
gen_config_md(Dir, I18nFile, SchemaModule, Lang),
|
ok = gen_config_md(Dir, SchemaModule, Lang),
|
||||||
gen_api_schema_json(Dir, I18nFile, Lang),
|
ok = gen_api_schema_json(Dir, Lang),
|
||||||
gen_example_conf(Dir, I18nFile, SchemaModule, Lang),
|
ok = gen_schema_json(Dir, SchemaModule, Lang)
|
||||||
gen_schema_json(Dir, I18nFile, SchemaModule, Lang)
|
|
||||||
end,
|
end,
|
||||||
["en", "zh"]
|
["en", "zh"]
|
||||||
).
|
),
|
||||||
|
ok = gen_example_conf(Dir, SchemaModule).
|
||||||
|
|
||||||
%% for scripts/spellcheck.
|
%% for scripts/spellcheck.
|
||||||
gen_schema_json(Dir, I18nFile, SchemaModule, Lang) ->
|
gen_schema_json(Dir, SchemaModule, Lang) ->
|
||||||
SchemaJsonFile = filename:join([Dir, "schema-" ++ Lang ++ ".json"]),
|
SchemaJsonFile = filename:join([Dir, "schema-" ++ Lang ++ ".json"]),
|
||||||
io:format(user, "===< Generating: ~s~n", [SchemaJsonFile]),
|
io:format(user, "===< Generating: ~s~n", [SchemaJsonFile]),
|
||||||
%% EMQX_SCHEMA_FULL_DUMP is quite a hidden API
|
%% EMQX_SCHEMA_FULL_DUMP is quite a hidden API
|
||||||
|
@ -164,40 +168,62 @@ gen_schema_json(Dir, I18nFile, SchemaModule, Lang) ->
|
||||||
false -> ?IMPORTANCE_LOW
|
false -> ?IMPORTANCE_LOW
|
||||||
end,
|
end,
|
||||||
io:format(user, "===< Including fields from importance level: ~p~n", [IncludeImportance]),
|
io:format(user, "===< Including fields from importance level: ~p~n", [IncludeImportance]),
|
||||||
Opts = #{desc_file => I18nFile, lang => Lang, include_importance_up_from => IncludeImportance},
|
Opts = #{
|
||||||
|
include_importance_up_from => IncludeImportance,
|
||||||
|
desc_resolver => make_desc_resolver(Lang)
|
||||||
|
},
|
||||||
JsonMap = hocon_schema_json:gen(SchemaModule, Opts),
|
JsonMap = hocon_schema_json:gen(SchemaModule, Opts),
|
||||||
IoData = emqx_utils_json:encode(JsonMap, [pretty, force_utf8]),
|
IoData = emqx_utils_json:encode(JsonMap, [pretty, force_utf8]),
|
||||||
ok = file:write_file(SchemaJsonFile, IoData).
|
ok = file:write_file(SchemaJsonFile, IoData).
|
||||||
|
|
||||||
gen_api_schema_json(Dir, I18nFile, Lang) ->
|
%% TODO: delete this function when we stop generating this JSON at build time.
|
||||||
emqx_dashboard:init_i18n(I18nFile, list_to_binary(Lang)),
|
gen_api_schema_json(Dir, Lang) ->
|
||||||
gen_api_schema_json_hotconf(Dir, Lang),
|
gen_api_schema_json_hotconf(Dir, Lang),
|
||||||
gen_api_schema_json_bridge(Dir, Lang),
|
gen_api_schema_json_bridge(Dir, Lang).
|
||||||
emqx_dashboard:clear_i18n().
|
|
||||||
|
|
||||||
|
%% TODO: delete this function when we stop generating this JSON at build time.
|
||||||
gen_api_schema_json_hotconf(Dir, Lang) ->
|
gen_api_schema_json_hotconf(Dir, Lang) ->
|
||||||
SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>},
|
|
||||||
File = schema_filename(Dir, "hot-config-schema-", Lang),
|
File = schema_filename(Dir, "hot-config-schema-", Lang),
|
||||||
ok = do_gen_api_schema_json(File, emqx_mgmt_api_configs, SchemaInfo).
|
IoData = hotconf_schema_json(Lang),
|
||||||
|
ok = write_api_schema_json_file(File, IoData).
|
||||||
|
|
||||||
|
%% TODO: delete this function when we stop generating this JSON at build time.
|
||||||
gen_api_schema_json_bridge(Dir, Lang) ->
|
gen_api_schema_json_bridge(Dir, Lang) ->
|
||||||
SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>},
|
|
||||||
File = schema_filename(Dir, "bridge-api-", Lang),
|
File = schema_filename(Dir, "bridge-api-", Lang),
|
||||||
ok = do_gen_api_schema_json(File, emqx_bridge_api, SchemaInfo).
|
IoData = bridge_schema_json(Lang),
|
||||||
|
ok = write_api_schema_json_file(File, IoData).
|
||||||
|
|
||||||
|
%% TODO: delete this function when we stop generating this JSON at build time.
|
||||||
|
write_api_schema_json_file(File, IoData) ->
|
||||||
|
io:format(user, "===< Generating: ~s~n", [File]),
|
||||||
|
file:write_file(File, IoData).
|
||||||
|
|
||||||
|
%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time.
|
||||||
|
hotconf_schema_json(Lang) ->
|
||||||
|
SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>},
|
||||||
|
gen_api_schema_json_iodata(emqx_mgmt_api_configs, SchemaInfo, Lang).
|
||||||
|
|
||||||
|
%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time.
|
||||||
|
bridge_schema_json(Lang) ->
|
||||||
|
SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>},
|
||||||
|
gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo, Lang).
|
||||||
|
|
||||||
schema_filename(Dir, Prefix, Lang) ->
|
schema_filename(Dir, Prefix, Lang) ->
|
||||||
Filename = Prefix ++ Lang ++ ".json",
|
Filename = Prefix ++ Lang ++ ".json",
|
||||||
filename:join([Dir, Filename]).
|
filename:join([Dir, Filename]).
|
||||||
|
|
||||||
gen_config_md(Dir, I18nFile, SchemaModule, Lang) ->
|
%% TODO: remove it and also remove hocon_md.erl and friends.
|
||||||
|
%% markdown generation from schema is a failure and we are moving to an interactive
|
||||||
|
%% viewer like swagger UI.
|
||||||
|
gen_config_md(Dir, SchemaModule, Lang) ->
|
||||||
SchemaMdFile = filename:join([Dir, "config-" ++ Lang ++ ".md"]),
|
SchemaMdFile = filename:join([Dir, "config-" ++ Lang ++ ".md"]),
|
||||||
io:format(user, "===< Generating: ~s~n", [SchemaMdFile]),
|
io:format(user, "===< Generating: ~s~n", [SchemaMdFile]),
|
||||||
ok = gen_doc(SchemaMdFile, SchemaModule, I18nFile, Lang).
|
ok = gen_doc(SchemaMdFile, SchemaModule, Lang).
|
||||||
|
|
||||||
gen_example_conf(Dir, I18nFile, SchemaModule, Lang) ->
|
gen_example_conf(Dir, SchemaModule) ->
|
||||||
SchemaMdFile = filename:join([Dir, "emqx.conf." ++ Lang ++ ".example"]),
|
SchemaMdFile = filename:join([Dir, "emqx.conf.example"]),
|
||||||
io:format(user, "===< Generating: ~s~n", [SchemaMdFile]),
|
io:format(user, "===< Generating: ~s~n", [SchemaMdFile]),
|
||||||
ok = gen_example(SchemaMdFile, SchemaModule, I18nFile, Lang).
|
ok = gen_example(SchemaMdFile, SchemaModule).
|
||||||
|
|
||||||
%% @doc return the root schema module.
|
%% @doc return the root schema module.
|
||||||
-spec schema_module() -> module().
|
-spec schema_module() -> module().
|
||||||
|
@ -211,35 +237,48 @@ schema_module() ->
|
||||||
%% Internal functions
|
%% Internal functions
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
-spec gen_doc(file:name_all(), module(), file:name_all(), string()) -> ok.
|
%% @doc Make a resolver function that can be used to lookup the description by hocon_schema_json dump.
|
||||||
gen_doc(File, SchemaModule, I18nFile, Lang) ->
|
make_desc_resolver(Lang) ->
|
||||||
|
fun
|
||||||
|
({desc, Namespace, Id}) ->
|
||||||
|
emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, desc);
|
||||||
|
(Desc) ->
|
||||||
|
unicode:characters_to_binary(Desc)
|
||||||
|
end.
|
||||||
|
|
||||||
|
-spec gen_doc(file:name_all(), module(), string()) -> ok.
|
||||||
|
gen_doc(File, SchemaModule, Lang) ->
|
||||||
Version = emqx_release:version(),
|
Version = emqx_release:version(),
|
||||||
Title =
|
Title =
|
||||||
"# " ++ emqx_release:description() ++ " Configuration\n\n" ++
|
"# " ++ emqx_release:description() ++ " Configuration\n\n" ++
|
||||||
"<!--" ++ Version ++ "-->",
|
"<!--" ++ Version ++ "-->",
|
||||||
BodyFile = filename:join([rel, "emqx_conf.template." ++ Lang ++ ".md"]),
|
BodyFile = filename:join([rel, "emqx_conf.template." ++ Lang ++ ".md"]),
|
||||||
{ok, Body} = file:read_file(BodyFile),
|
{ok, Body} = file:read_file(BodyFile),
|
||||||
Opts = #{title => Title, body => Body, desc_file => I18nFile, lang => Lang},
|
Resolver = make_desc_resolver(Lang),
|
||||||
|
Opts = #{title => Title, body => Body, desc_resolver => Resolver},
|
||||||
Doc = hocon_schema_md:gen(SchemaModule, Opts),
|
Doc = hocon_schema_md:gen(SchemaModule, Opts),
|
||||||
file:write_file(File, Doc).
|
file:write_file(File, Doc).
|
||||||
|
|
||||||
gen_example(File, SchemaModule, I18nFile, Lang) ->
|
gen_example(File, SchemaModule) ->
|
||||||
|
%% we do not generate description in example files
|
||||||
|
%% so there is no need for a desc_resolver
|
||||||
Opts = #{
|
Opts = #{
|
||||||
title => <<"EMQX Configuration Example">>,
|
title => <<"EMQX Configuration Example">>,
|
||||||
body => <<"">>,
|
body => <<"">>,
|
||||||
desc_file => I18nFile,
|
|
||||||
lang => Lang,
|
|
||||||
include_importance_up_from => ?IMPORTANCE_MEDIUM
|
include_importance_up_from => ?IMPORTANCE_MEDIUM
|
||||||
},
|
},
|
||||||
Example = hocon_schema_example:gen(SchemaModule, Opts),
|
Example = hocon_schema_example:gen(SchemaModule, Opts),
|
||||||
file:write_file(File, Example).
|
file:write_file(File, Example).
|
||||||
|
|
||||||
%% Only gen hot_conf schema, not all configuration fields.
|
%% TODO: move this to emqx_dashboard when we stop generating
|
||||||
do_gen_api_schema_json(File, SchemaMod, SchemaInfo) ->
|
%% this JSON at build time.
|
||||||
io:format(user, "===< Generating: ~s~n", [File]),
|
gen_api_schema_json_iodata(SchemaMod, SchemaInfo, Lang) ->
|
||||||
{ApiSpec0, Components0} = emqx_dashboard_swagger:spec(
|
{ApiSpec0, Components0} = emqx_dashboard_swagger:spec(
|
||||||
SchemaMod,
|
SchemaMod,
|
||||||
#{schema_converter => fun hocon_schema_to_spec/2}
|
#{
|
||||||
|
schema_converter => fun hocon_schema_to_spec/2,
|
||||||
|
i18n_lang => Lang
|
||||||
|
}
|
||||||
),
|
),
|
||||||
ApiSpec = lists:foldl(
|
ApiSpec = lists:foldl(
|
||||||
fun({Path, Spec, _, _}, Acc) ->
|
fun({Path, Spec, _, _}, Acc) ->
|
||||||
|
@ -268,22 +307,14 @@ do_gen_api_schema_json(File, SchemaMod, SchemaInfo) ->
|
||||||
ApiSpec0
|
ApiSpec0
|
||||||
),
|
),
|
||||||
Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0),
|
Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0),
|
||||||
IoData = emqx_utils_json:encode(
|
emqx_utils_json:encode(
|
||||||
#{
|
#{
|
||||||
info => SchemaInfo,
|
info => SchemaInfo,
|
||||||
paths => ApiSpec,
|
paths => ApiSpec,
|
||||||
components => #{schemas => Components}
|
components => #{schemas => Components}
|
||||||
},
|
},
|
||||||
[pretty, force_utf8]
|
[pretty, force_utf8]
|
||||||
),
|
).
|
||||||
file:write_file(File, IoData).
|
|
||||||
|
|
||||||
-define(INIT_SCHEMA, #{
|
|
||||||
fields => #{},
|
|
||||||
translations => #{},
|
|
||||||
validations => [],
|
|
||||||
namespace => undefined
|
|
||||||
}).
|
|
||||||
|
|
||||||
-define(TO_REF(_N_, _F_), iolist_to_binary([to_bin(_N_), ".", to_bin(_F_)])).
|
-define(TO_REF(_N_, _F_), iolist_to_binary([to_bin(_N_), ".", to_bin(_F_)])).
|
||||||
-define(TO_COMPONENTS_SCHEMA(_M_, _F_),
|
-define(TO_COMPONENTS_SCHEMA(_M_, _F_),
|
||||||
|
|
|
@ -100,7 +100,7 @@ roots() ->
|
||||||
?R_REF("rpc"),
|
?R_REF("rpc"),
|
||||||
#{
|
#{
|
||||||
translate_to => ["gen_rpc"],
|
translate_to => ["gen_rpc"],
|
||||||
importance => ?IMPORTANCE_HIDDEN
|
importance => ?IMPORTANCE_LOW
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
] ++
|
] ++
|
||||||
|
@ -135,7 +135,7 @@ fields("cluster") ->
|
||||||
)},
|
)},
|
||||||
{"core_nodes",
|
{"core_nodes",
|
||||||
sc(
|
sc(
|
||||||
emqx_schema:comma_separated_atoms(),
|
node_array(),
|
||||||
#{
|
#{
|
||||||
mapping => "mria.core_nodes",
|
mapping => "mria.core_nodes",
|
||||||
default => [],
|
default => [],
|
||||||
|
@ -203,7 +203,7 @@ fields(cluster_static) ->
|
||||||
[
|
[
|
||||||
{"seeds",
|
{"seeds",
|
||||||
sc(
|
sc(
|
||||||
hoconsc:array(atom()),
|
node_array(),
|
||||||
#{
|
#{
|
||||||
default => [],
|
default => [],
|
||||||
desc => ?DESC(cluster_static_seeds),
|
desc => ?DESC(cluster_static_seeds),
|
||||||
|
@ -1288,7 +1288,7 @@ emqx_schema_high_prio_roots() ->
|
||||||
?R_REF("authorization"),
|
?R_REF("authorization"),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(authorization),
|
desc => ?DESC(authorization),
|
||||||
importance => ?IMPORTANCE_HIDDEN
|
importance => ?IMPORTANCE_HIGH
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
lists:keyreplace("authorization", 1, Roots, Authz).
|
lists:keyreplace("authorization", 1, Roots, Authz).
|
||||||
|
@ -1312,3 +1312,6 @@ validator_string_re(Val, RE, Error) ->
|
||||||
catch
|
catch
|
||||||
_:_ -> {error, Error}
|
_:_ -> {error, Error}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
node_array() ->
|
||||||
|
hoconsc:union([emqx_schema:comma_separated_atoms(), hoconsc:array(atom())]).
|
||||||
|
|
|
@ -5,6 +5,46 @@
|
||||||
-module(emqx_conf_schema_tests).
|
-module(emqx_conf_schema_tests).
|
||||||
|
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
array_nodes_test() ->
|
||||||
|
ExpectNodes = ['emqx1@127.0.0.1', 'emqx2@127.0.0.1'],
|
||||||
|
BaseConf =
|
||||||
|
""
|
||||||
|
"\n"
|
||||||
|
" node {\n"
|
||||||
|
" name = \"emqx1@127.0.0.1\"\n"
|
||||||
|
" cookie = \"emqxsecretcookie\"\n"
|
||||||
|
" data_dir = \"data\"\n"
|
||||||
|
" }\n"
|
||||||
|
" cluster {\n"
|
||||||
|
" name = emqxcl\n"
|
||||||
|
" discovery_strategy = static\n"
|
||||||
|
" static.seeds = ~p\n"
|
||||||
|
" core_nodes = ~p\n"
|
||||||
|
" }\n"
|
||||||
|
" "
|
||||||
|
"",
|
||||||
|
lists:foreach(
|
||||||
|
fun(Nodes) ->
|
||||||
|
ConfFile = iolist_to_binary(io_lib:format(BaseConf, [Nodes, Nodes])),
|
||||||
|
{ok, Conf} = hocon:binary(ConfFile, #{format => richmap}),
|
||||||
|
ConfList = hocon_tconf:generate(emqx_conf_schema, Conf),
|
||||||
|
ClusterDiscovery = proplists:get_value(
|
||||||
|
cluster_discovery, proplists:get_value(ekka, ConfList)
|
||||||
|
),
|
||||||
|
?assertEqual(
|
||||||
|
{static, [{seeds, ExpectNodes}]},
|
||||||
|
ClusterDiscovery,
|
||||||
|
Nodes
|
||||||
|
),
|
||||||
|
?assertEqual(
|
||||||
|
ExpectNodes,
|
||||||
|
proplists:get_value(core_nodes, proplists:get_value(mria, ConfList)),
|
||||||
|
Nodes
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
[["emqx1@127.0.0.1", "emqx2@127.0.0.1"], "emqx1@127.0.0.1, emqx2@127.0.0.1"]
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
doc_gen_test() ->
|
doc_gen_test() ->
|
||||||
%% the json file too large to encode.
|
%% the json file too large to encode.
|
||||||
|
|
|
@ -231,9 +231,8 @@ on_start(
|
||||||
{transport_opts, NTransportOpts},
|
{transport_opts, NTransportOpts},
|
||||||
{enable_pipelining, maps:get(enable_pipelining, Config, ?DEFAULT_PIPELINE_SIZE)}
|
{enable_pipelining, maps:get(enable_pipelining, Config, ?DEFAULT_PIPELINE_SIZE)}
|
||||||
],
|
],
|
||||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
|
||||||
State = #{
|
State = #{
|
||||||
pool_name => PoolName,
|
pool_name => InstId,
|
||||||
pool_type => PoolType,
|
pool_type => PoolType,
|
||||||
host => Host,
|
host => Host,
|
||||||
port => Port,
|
port => Port,
|
||||||
|
@ -241,7 +240,7 @@ on_start(
|
||||||
base_path => BasePath,
|
base_path => BasePath,
|
||||||
request => preprocess_request(maps:get(request, Config, undefined))
|
request => preprocess_request(maps:get(request, Config, undefined))
|
||||||
},
|
},
|
||||||
case ehttpc_sup:start_pool(PoolName, PoolOpts) of
|
case ehttpc_sup:start_pool(InstId, PoolOpts) of
|
||||||
{ok, _} -> {ok, State};
|
{ok, _} -> {ok, State};
|
||||||
{error, {already_started, _}} -> {ok, State};
|
{error, {already_started, _}} -> {ok, State};
|
||||||
{error, Reason} -> {error, Reason}
|
{error, Reason} -> {error, Reason}
|
||||||
|
|
|
@ -87,20 +87,19 @@ on_start(
|
||||||
{pool_size, PoolSize},
|
{pool_size, PoolSize},
|
||||||
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL}
|
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL}
|
||||||
],
|
],
|
||||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
case emqx_resource_pool:start(InstId, ?MODULE, Opts ++ SslOpts) of
|
||||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ SslOpts) of
|
ok -> {ok, #{pool_name => InstId}};
|
||||||
ok -> {ok, #{poolname => PoolName}};
|
|
||||||
{error, Reason} -> {error, Reason}
|
{error, Reason} -> {error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_stop(InstId, #{poolname := PoolName}) ->
|
on_stop(InstId, #{pool_name := PoolName}) ->
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "stopping_ldap_connector",
|
msg => "stopping_ldap_connector",
|
||||||
connector => InstId
|
connector => InstId
|
||||||
}),
|
}),
|
||||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
emqx_resource_pool:stop(PoolName).
|
||||||
|
|
||||||
on_query(InstId, {search, Base, Filter, Attributes}, #{poolname := PoolName} = State) ->
|
on_query(InstId, {search, Base, Filter, Attributes}, #{pool_name := PoolName} = State) ->
|
||||||
Request = {Base, Filter, Attributes},
|
Request = {Base, Filter, Attributes},
|
||||||
?TRACE(
|
?TRACE(
|
||||||
"QUERY",
|
"QUERY",
|
||||||
|
|
|
@ -182,12 +182,11 @@ on_start(
|
||||||
{options, init_topology_options(maps:to_list(Topology), [])},
|
{options, init_topology_options(maps:to_list(Topology), [])},
|
||||||
{worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)}
|
{worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)}
|
||||||
],
|
],
|
||||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
|
||||||
Collection = maps:get(collection, Config, <<"mqtt">>),
|
Collection = maps:get(collection, Config, <<"mqtt">>),
|
||||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts) of
|
case emqx_resource_pool:start(InstId, ?MODULE, Opts) of
|
||||||
ok ->
|
ok ->
|
||||||
{ok, #{
|
{ok, #{
|
||||||
poolname => PoolName,
|
pool_name => InstId,
|
||||||
type => Type,
|
type => Type,
|
||||||
collection => Collection
|
collection => Collection
|
||||||
}};
|
}};
|
||||||
|
@ -195,17 +194,17 @@ on_start(
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_stop(InstId, #{poolname := PoolName}) ->
|
on_stop(InstId, #{pool_name := PoolName}) ->
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "stopping_mongodb_connector",
|
msg => "stopping_mongodb_connector",
|
||||||
connector => InstId
|
connector => InstId
|
||||||
}),
|
}),
|
||||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
emqx_resource_pool:stop(PoolName).
|
||||||
|
|
||||||
on_query(
|
on_query(
|
||||||
InstId,
|
InstId,
|
||||||
{send_message, Document},
|
{send_message, Document},
|
||||||
#{poolname := PoolName, collection := Collection} = State
|
#{pool_name := PoolName, collection := Collection} = State
|
||||||
) ->
|
) ->
|
||||||
Request = {insert, Collection, Document},
|
Request = {insert, Collection, Document},
|
||||||
?TRACE(
|
?TRACE(
|
||||||
|
@ -234,7 +233,7 @@ on_query(
|
||||||
on_query(
|
on_query(
|
||||||
InstId,
|
InstId,
|
||||||
{Action, Collection, Filter, Projector},
|
{Action, Collection, Filter, Projector},
|
||||||
#{poolname := PoolName} = State
|
#{pool_name := PoolName} = State
|
||||||
) ->
|
) ->
|
||||||
Request = {Action, Collection, Filter, Projector},
|
Request = {Action, Collection, Filter, Projector},
|
||||||
?TRACE(
|
?TRACE(
|
||||||
|
@ -263,8 +262,7 @@ on_query(
|
||||||
{ok, Result}
|
{ok, Result}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-dialyzer({nowarn_function, [on_get_status/2]}).
|
on_get_status(InstId, #{pool_name := PoolName}) ->
|
||||||
on_get_status(InstId, #{poolname := PoolName} = _State) ->
|
|
||||||
case health_check(PoolName) of
|
case health_check(PoolName) of
|
||||||
true ->
|
true ->
|
||||||
?tp(debug, emqx_connector_mongo_health_check, #{
|
?tp(debug, emqx_connector_mongo_health_check, #{
|
||||||
|
@ -281,8 +279,10 @@ on_get_status(InstId, #{poolname := PoolName} = _State) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
health_check(PoolName) ->
|
health_check(PoolName) ->
|
||||||
emqx_plugin_libs_pool:health_check_ecpool_workers(
|
emqx_resource_pool:health_check_workers(
|
||||||
PoolName, fun ?MODULE:check_worker_health/1, ?HEALTH_CHECK_TIMEOUT + timer:seconds(1)
|
PoolName,
|
||||||
|
fun ?MODULE:check_worker_health/1,
|
||||||
|
?HEALTH_CHECK_TIMEOUT + timer:seconds(1)
|
||||||
).
|
).
|
||||||
|
|
||||||
%% ===================================================================
|
%% ===================================================================
|
||||||
|
|
|
@ -51,7 +51,7 @@
|
||||||
-type sqls() :: #{atom() => binary()}.
|
-type sqls() :: #{atom() => binary()}.
|
||||||
-type state() ::
|
-type state() ::
|
||||||
#{
|
#{
|
||||||
poolname := atom(),
|
pool_name := binary(),
|
||||||
prepare_statement := prepares(),
|
prepare_statement := prepares(),
|
||||||
params_tokens := params_tokens(),
|
params_tokens := params_tokens(),
|
||||||
batch_inserts := sqls(),
|
batch_inserts := sqls(),
|
||||||
|
@ -123,13 +123,10 @@ on_start(
|
||||||
{pool_size, PoolSize}
|
{pool_size, PoolSize}
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
|
State = parse_prepare_sql(Config),
|
||||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
|
||||||
Prepares = parse_prepare_sql(Config),
|
|
||||||
State = maps:merge(#{poolname => PoolName}, Prepares),
|
|
||||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
|
|
||||||
ok ->
|
ok ->
|
||||||
{ok, init_prepare(State)};
|
{ok, init_prepare(State#{pool_name => InstId})};
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
?tp(
|
?tp(
|
||||||
mysql_connector_start_failed,
|
mysql_connector_start_failed,
|
||||||
|
@ -143,12 +140,12 @@ maybe_add_password_opt(undefined, Options) ->
|
||||||
maybe_add_password_opt(Password, Options) ->
|
maybe_add_password_opt(Password, Options) ->
|
||||||
[{password, Password} | Options].
|
[{password, Password} | Options].
|
||||||
|
|
||||||
on_stop(InstId, #{poolname := PoolName}) ->
|
on_stop(InstId, #{pool_name := PoolName}) ->
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "stopping_mysql_connector",
|
msg => "stopping_mysql_connector",
|
||||||
connector => InstId
|
connector => InstId
|
||||||
}),
|
}),
|
||||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
emqx_resource_pool:stop(PoolName).
|
||||||
|
|
||||||
on_query(InstId, {TypeOrKey, SQLOrKey}, State) ->
|
on_query(InstId, {TypeOrKey, SQLOrKey}, State) ->
|
||||||
on_query(InstId, {TypeOrKey, SQLOrKey, [], default_timeout}, State);
|
on_query(InstId, {TypeOrKey, SQLOrKey, [], default_timeout}, State);
|
||||||
|
@ -157,7 +154,7 @@ on_query(InstId, {TypeOrKey, SQLOrKey, Params}, State) ->
|
||||||
on_query(
|
on_query(
|
||||||
InstId,
|
InstId,
|
||||||
{TypeOrKey, SQLOrKey, Params, Timeout},
|
{TypeOrKey, SQLOrKey, Params, Timeout},
|
||||||
#{poolname := PoolName, prepare_statement := Prepares} = State
|
#{pool_name := PoolName, prepare_statement := Prepares} = State
|
||||||
) ->
|
) ->
|
||||||
MySqlFunction = mysql_function(TypeOrKey),
|
MySqlFunction = mysql_function(TypeOrKey),
|
||||||
{SQLOrKey2, Data} = proc_sql_params(TypeOrKey, SQLOrKey, Params, State),
|
{SQLOrKey2, Data} = proc_sql_params(TypeOrKey, SQLOrKey, Params, State),
|
||||||
|
@ -216,8 +213,8 @@ mysql_function(prepared_query) ->
|
||||||
mysql_function(_) ->
|
mysql_function(_) ->
|
||||||
mysql_function(prepared_query).
|
mysql_function(prepared_query).
|
||||||
|
|
||||||
on_get_status(_InstId, #{poolname := Pool} = State) ->
|
on_get_status(_InstId, #{pool_name := PoolName} = State) ->
|
||||||
case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of
|
case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
|
||||||
true ->
|
true ->
|
||||||
case do_check_prepares(State) of
|
case do_check_prepares(State) of
|
||||||
ok ->
|
ok ->
|
||||||
|
@ -238,7 +235,7 @@ do_get_status(Conn) ->
|
||||||
|
|
||||||
do_check_prepares(#{prepare_statement := Prepares}) when is_map(Prepares) ->
|
do_check_prepares(#{prepare_statement := Prepares}) when is_map(Prepares) ->
|
||||||
ok;
|
ok;
|
||||||
do_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, Prepares}}) ->
|
do_check_prepares(State = #{pool_name := PoolName, prepare_statement := {error, Prepares}}) ->
|
||||||
%% retry to prepare
|
%% retry to prepare
|
||||||
case prepare_sql(Prepares, PoolName) of
|
case prepare_sql(Prepares, PoolName) of
|
||||||
ok ->
|
ok ->
|
||||||
|
@ -253,7 +250,7 @@ do_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, P
|
||||||
connect(Options) ->
|
connect(Options) ->
|
||||||
mysql:start_link(Options).
|
mysql:start_link(Options).
|
||||||
|
|
||||||
init_prepare(State = #{prepare_statement := Prepares, poolname := PoolName}) ->
|
init_prepare(State = #{prepare_statement := Prepares, pool_name := PoolName}) ->
|
||||||
case maps:size(Prepares) of
|
case maps:size(Prepares) of
|
||||||
0 ->
|
0 ->
|
||||||
State;
|
State;
|
||||||
|
@ -409,7 +406,7 @@ on_sql_query(
|
||||||
SQLOrKey,
|
SQLOrKey,
|
||||||
Params,
|
Params,
|
||||||
Timeout,
|
Timeout,
|
||||||
#{poolname := PoolName} = State
|
#{pool_name := PoolName} = State
|
||||||
) ->
|
) ->
|
||||||
LogMeta = #{connector => InstId, sql => SQLOrKey, state => State},
|
LogMeta = #{connector => InstId, sql => SQLOrKey, state => State},
|
||||||
?TRACE("QUERY", "mysql_connector_received", LogMeta),
|
?TRACE("QUERY", "mysql_connector_received", LogMeta),
|
||||||
|
|
|
@ -56,7 +56,7 @@
|
||||||
|
|
||||||
-type state() ::
|
-type state() ::
|
||||||
#{
|
#{
|
||||||
poolname := atom(),
|
pool_name := binary(),
|
||||||
prepare_sql := prepares(),
|
prepare_sql := prepares(),
|
||||||
params_tokens := params_tokens(),
|
params_tokens := params_tokens(),
|
||||||
prepare_statement := epgsql:statement()
|
prepare_statement := epgsql:statement()
|
||||||
|
@ -120,13 +120,10 @@ on_start(
|
||||||
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL},
|
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL},
|
||||||
{pool_size, PoolSize}
|
{pool_size, PoolSize}
|
||||||
],
|
],
|
||||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
State = parse_prepare_sql(Config),
|
||||||
Prepares = parse_prepare_sql(Config),
|
case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
|
||||||
InitState = #{poolname => PoolName, prepare_statement => #{}},
|
|
||||||
State = maps:merge(InitState, Prepares),
|
|
||||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
|
|
||||||
ok ->
|
ok ->
|
||||||
{ok, init_prepare(State)};
|
{ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})};
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
?tp(
|
?tp(
|
||||||
pgsql_connector_start_failed,
|
pgsql_connector_start_failed,
|
||||||
|
@ -135,19 +132,19 @@ on_start(
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_stop(InstId, #{poolname := PoolName}) ->
|
on_stop(InstId, #{pool_name := PoolName}) ->
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "stopping postgresql connector",
|
msg => "stopping postgresql connector",
|
||||||
connector => InstId
|
connector => InstId
|
||||||
}),
|
}),
|
||||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
emqx_resource_pool:stop(PoolName).
|
||||||
|
|
||||||
on_query(InstId, {TypeOrKey, NameOrSQL}, #{poolname := _PoolName} = State) ->
|
on_query(InstId, {TypeOrKey, NameOrSQL}, State) ->
|
||||||
on_query(InstId, {TypeOrKey, NameOrSQL, []}, State);
|
on_query(InstId, {TypeOrKey, NameOrSQL, []}, State);
|
||||||
on_query(
|
on_query(
|
||||||
InstId,
|
InstId,
|
||||||
{TypeOrKey, NameOrSQL, Params},
|
{TypeOrKey, NameOrSQL, Params},
|
||||||
#{poolname := PoolName} = State
|
#{pool_name := PoolName} = State
|
||||||
) ->
|
) ->
|
||||||
?SLOG(debug, #{
|
?SLOG(debug, #{
|
||||||
msg => "postgresql connector received sql query",
|
msg => "postgresql connector received sql query",
|
||||||
|
@ -174,7 +171,7 @@ pgsql_query_type(_) ->
|
||||||
on_batch_query(
|
on_batch_query(
|
||||||
InstId,
|
InstId,
|
||||||
BatchReq,
|
BatchReq,
|
||||||
#{poolname := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State
|
#{pool_name := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State
|
||||||
) ->
|
) ->
|
||||||
case BatchReq of
|
case BatchReq of
|
||||||
[{Key, _} = Request | _] ->
|
[{Key, _} = Request | _] ->
|
||||||
|
@ -258,8 +255,8 @@ on_sql_query(InstId, PoolName, Type, NameOrSQL, Data) ->
|
||||||
{error, {unrecoverable_error, invalid_request}}
|
{error, {unrecoverable_error, invalid_request}}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_get_status(_InstId, #{poolname := Pool} = State) ->
|
on_get_status(_InstId, #{pool_name := PoolName} = State) ->
|
||||||
case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of
|
case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
|
||||||
true ->
|
true ->
|
||||||
case do_check_prepares(State) of
|
case do_check_prepares(State) of
|
||||||
ok ->
|
ok ->
|
||||||
|
@ -280,7 +277,7 @@ do_get_status(Conn) ->
|
||||||
|
|
||||||
do_check_prepares(#{prepare_sql := Prepares}) when is_map(Prepares) ->
|
do_check_prepares(#{prepare_sql := Prepares}) when is_map(Prepares) ->
|
||||||
ok;
|
ok;
|
||||||
do_check_prepares(State = #{poolname := PoolName, prepare_sql := {error, Prepares}}) ->
|
do_check_prepares(State = #{pool_name := PoolName, prepare_sql := {error, Prepares}}) ->
|
||||||
%% retry to prepare
|
%% retry to prepare
|
||||||
case prepare_sql(Prepares, PoolName) of
|
case prepare_sql(Prepares, PoolName) of
|
||||||
{ok, Sts} ->
|
{ok, Sts} ->
|
||||||
|
@ -358,7 +355,7 @@ parse_prepare_sql([], Prepares, Tokens) ->
|
||||||
params_tokens => Tokens
|
params_tokens => Tokens
|
||||||
}.
|
}.
|
||||||
|
|
||||||
init_prepare(State = #{prepare_sql := Prepares, poolname := PoolName}) ->
|
init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName}) ->
|
||||||
case maps:size(Prepares) of
|
case maps:size(Prepares) of
|
||||||
0 ->
|
0 ->
|
||||||
State;
|
State;
|
||||||
|
@ -389,17 +386,17 @@ prepare_sql(Prepares, PoolName) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
do_prepare_sql(Prepares, PoolName) ->
|
do_prepare_sql(Prepares, PoolName) ->
|
||||||
do_prepare_sql(ecpool:workers(PoolName), Prepares, PoolName, #{}).
|
do_prepare_sql(ecpool:workers(PoolName), Prepares, #{}).
|
||||||
|
|
||||||
do_prepare_sql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) ->
|
do_prepare_sql([{_Name, Worker} | T], Prepares, _LastSts) ->
|
||||||
{ok, Conn} = ecpool_worker:client(Worker),
|
{ok, Conn} = ecpool_worker:client(Worker),
|
||||||
case prepare_sql_to_conn(Conn, Prepares) of
|
case prepare_sql_to_conn(Conn, Prepares) of
|
||||||
{ok, Sts} ->
|
{ok, Sts} ->
|
||||||
do_prepare_sql(T, Prepares, PoolName, Sts);
|
do_prepare_sql(T, Prepares, Sts);
|
||||||
Error ->
|
Error ->
|
||||||
Error
|
Error
|
||||||
end;
|
end;
|
||||||
do_prepare_sql([], _Prepares, _PoolName, LastSts) ->
|
do_prepare_sql([], _Prepares, LastSts) ->
|
||||||
{ok, LastSts}.
|
{ok, LastSts}.
|
||||||
|
|
||||||
prepare_sql_to_conn(Conn, Prepares) ->
|
prepare_sql_to_conn(Conn, Prepares) ->
|
||||||
|
|
|
@ -153,11 +153,10 @@ on_start(
|
||||||
false ->
|
false ->
|
||||||
[{ssl, false}]
|
[{ssl, false}]
|
||||||
end ++ [{sentinel, maps:get(sentinel, Config, undefined)}],
|
end ++ [{sentinel, maps:get(sentinel, Config, undefined)}],
|
||||||
PoolName = InstId,
|
State = #{pool_name => InstId, type => Type},
|
||||||
State = #{poolname => PoolName, type => Type},
|
|
||||||
case Type of
|
case Type of
|
||||||
cluster ->
|
cluster ->
|
||||||
case eredis_cluster:start_pool(PoolName, Opts ++ [{options, Options}]) of
|
case eredis_cluster:start_pool(InstId, Opts ++ [{options, Options}]) of
|
||||||
{ok, _} ->
|
{ok, _} ->
|
||||||
{ok, State};
|
{ok, State};
|
||||||
{ok, _, _} ->
|
{ok, _, _} ->
|
||||||
|
@ -166,22 +165,20 @@ on_start(
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end;
|
end;
|
||||||
_ ->
|
_ ->
|
||||||
case
|
case emqx_resource_pool:start(InstId, ?MODULE, Opts ++ [{options, Options}]) of
|
||||||
emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ [{options, Options}])
|
|
||||||
of
|
|
||||||
ok -> {ok, State};
|
ok -> {ok, State};
|
||||||
{error, Reason} -> {error, Reason}
|
{error, Reason} -> {error, Reason}
|
||||||
end
|
end
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_stop(InstId, #{poolname := PoolName, type := Type}) ->
|
on_stop(InstId, #{pool_name := PoolName, type := Type}) ->
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "stopping_redis_connector",
|
msg => "stopping_redis_connector",
|
||||||
connector => InstId
|
connector => InstId
|
||||||
}),
|
}),
|
||||||
case Type of
|
case Type of
|
||||||
cluster -> eredis_cluster:stop_pool(PoolName);
|
cluster -> eredis_cluster:stop_pool(PoolName);
|
||||||
_ -> emqx_plugin_libs_pool:stop_pool(PoolName)
|
_ -> emqx_resource_pool:stop(PoolName)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_query(InstId, {cmd, _} = Query, State) ->
|
on_query(InstId, {cmd, _} = Query, State) ->
|
||||||
|
@ -189,7 +186,7 @@ on_query(InstId, {cmd, _} = Query, State) ->
|
||||||
on_query(InstId, {cmds, _} = Query, State) ->
|
on_query(InstId, {cmds, _} = Query, State) ->
|
||||||
do_query(InstId, Query, State).
|
do_query(InstId, Query, State).
|
||||||
|
|
||||||
do_query(InstId, Query, #{poolname := PoolName, type := Type} = State) ->
|
do_query(InstId, Query, #{pool_name := PoolName, type := Type} = State) ->
|
||||||
?TRACE(
|
?TRACE(
|
||||||
"QUERY",
|
"QUERY",
|
||||||
"redis_connector_received",
|
"redis_connector_received",
|
||||||
|
@ -227,7 +224,7 @@ is_unrecoverable_error({error, invalid_cluster_command}) ->
|
||||||
is_unrecoverable_error(_) ->
|
is_unrecoverable_error(_) ->
|
||||||
false.
|
false.
|
||||||
|
|
||||||
on_get_status(_InstId, #{type := cluster, poolname := PoolName}) ->
|
on_get_status(_InstId, #{type := cluster, pool_name := PoolName}) ->
|
||||||
case eredis_cluster:pool_exists(PoolName) of
|
case eredis_cluster:pool_exists(PoolName) of
|
||||||
true ->
|
true ->
|
||||||
Health = eredis_cluster:ping_all(PoolName),
|
Health = eredis_cluster:ping_all(PoolName),
|
||||||
|
@ -235,8 +232,8 @@ on_get_status(_InstId, #{type := cluster, poolname := PoolName}) ->
|
||||||
false ->
|
false ->
|
||||||
disconnected
|
disconnected
|
||||||
end;
|
end;
|
||||||
on_get_status(_InstId, #{poolname := Pool}) ->
|
on_get_status(_InstId, #{pool_name := PoolName}) ->
|
||||||
Health = emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1),
|
Health = emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1),
|
||||||
status_result(Health).
|
status_result(Health).
|
||||||
|
|
||||||
do_get_status(Conn) ->
|
do_get_status(Conn) ->
|
||||||
|
|
|
@ -64,15 +64,15 @@ t_lifecycle(_Config) ->
|
||||||
mongo_config()
|
mongo_config()
|
||||||
).
|
).
|
||||||
|
|
||||||
perform_lifecycle_check(PoolName, InitialConfig) ->
|
perform_lifecycle_check(ResourceId, InitialConfig) ->
|
||||||
{ok, #{config := CheckedConfig}} =
|
{ok, #{config := CheckedConfig}} =
|
||||||
emqx_resource:check_config(?MONGO_RESOURCE_MOD, InitialConfig),
|
emqx_resource:check_config(?MONGO_RESOURCE_MOD, InitialConfig),
|
||||||
{ok, #{
|
{ok, #{
|
||||||
state := #{poolname := ReturnedPoolName} = State,
|
state := #{pool_name := PoolName} = State,
|
||||||
status := InitialStatus
|
status := InitialStatus
|
||||||
}} =
|
}} =
|
||||||
emqx_resource:create_local(
|
emqx_resource:create_local(
|
||||||
PoolName,
|
ResourceId,
|
||||||
?CONNECTOR_RESOURCE_GROUP,
|
?CONNECTOR_RESOURCE_GROUP,
|
||||||
?MONGO_RESOURCE_MOD,
|
?MONGO_RESOURCE_MOD,
|
||||||
CheckedConfig,
|
CheckedConfig,
|
||||||
|
@ -84,39 +84,39 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
||||||
state := State,
|
state := State,
|
||||||
status := InitialStatus
|
status := InitialStatus
|
||||||
}} =
|
}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||||
% % Perform query as further check that the resource is working as expected
|
% % Perform query as further check that the resource is working as expected
|
||||||
?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())),
|
?assertMatch({ok, []}, emqx_resource:query(ResourceId, test_query_find())),
|
||||||
?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())),
|
?assertMatch({ok, undefined}, emqx_resource:query(ResourceId, test_query_find_one())),
|
||||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||||
% as the worker no longer exists.
|
% as the worker no longer exists.
|
||||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||||
state := State,
|
state := State,
|
||||||
status := StoppedStatus
|
status := StoppedStatus
|
||||||
}} =
|
}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual(stopped, StoppedStatus),
|
?assertEqual(stopped, StoppedStatus),
|
||||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)),
|
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)),
|
||||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||||
% Can call stop/1 again on an already stopped instance
|
% Can call stop/1 again on an already stopped instance
|
||||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||||
% Make sure it can be restarted and the healthchecks and queries work properly
|
% Make sure it can be restarted and the healthchecks and queries work properly
|
||||||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
?assertEqual(ok, emqx_resource:restart(ResourceId)),
|
||||||
% async restart, need to wait resource
|
% async restart, need to wait resource
|
||||||
timer:sleep(500),
|
timer:sleep(500),
|
||||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||||
?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())),
|
?assertMatch({ok, []}, emqx_resource:query(ResourceId, test_query_find())),
|
||||||
?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())),
|
?assertMatch({ok, undefined}, emqx_resource:query(ResourceId, test_query_find_one())),
|
||||||
% Stop and remove the resource in one go.
|
% Stop and remove the resource in one go.
|
||||||
?assertEqual(ok, emqx_resource:remove_local(PoolName)),
|
?assertEqual(ok, emqx_resource:remove_local(ResourceId)),
|
||||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||||
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
||||||
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)).
|
?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)).
|
||||||
|
|
||||||
% %%------------------------------------------------------------------------------
|
% %%------------------------------------------------------------------------------
|
||||||
% %% Helpers
|
% %% Helpers
|
||||||
|
|
|
@ -64,14 +64,14 @@ t_lifecycle(_Config) ->
|
||||||
mysql_config()
|
mysql_config()
|
||||||
).
|
).
|
||||||
|
|
||||||
perform_lifecycle_check(PoolName, InitialConfig) ->
|
perform_lifecycle_check(ResourceId, InitialConfig) ->
|
||||||
{ok, #{config := CheckedConfig}} =
|
{ok, #{config := CheckedConfig}} =
|
||||||
emqx_resource:check_config(?MYSQL_RESOURCE_MOD, InitialConfig),
|
emqx_resource:check_config(?MYSQL_RESOURCE_MOD, InitialConfig),
|
||||||
{ok, #{
|
{ok, #{
|
||||||
state := #{poolname := ReturnedPoolName} = State,
|
state := #{pool_name := PoolName} = State,
|
||||||
status := InitialStatus
|
status := InitialStatus
|
||||||
}} = emqx_resource:create_local(
|
}} = emqx_resource:create_local(
|
||||||
PoolName,
|
ResourceId,
|
||||||
?CONNECTOR_RESOURCE_GROUP,
|
?CONNECTOR_RESOURCE_GROUP,
|
||||||
?MYSQL_RESOURCE_MOD,
|
?MYSQL_RESOURCE_MOD,
|
||||||
CheckedConfig,
|
CheckedConfig,
|
||||||
|
@ -83,53 +83,53 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
||||||
state := State,
|
state := State,
|
||||||
status := InitialStatus
|
status := InitialStatus
|
||||||
}} =
|
}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||||
% % Perform query as further check that the resource is working as expected
|
% % Perform query as further check that the resource is working as expected
|
||||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())),
|
?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_no_params())),
|
||||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())),
|
?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_with_params())),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
{ok, _, [[1]]},
|
{ok, _, [[1]]},
|
||||||
emqx_resource:query(
|
emqx_resource:query(
|
||||||
PoolName,
|
ResourceId,
|
||||||
test_query_with_params_and_timeout()
|
test_query_with_params_and_timeout()
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||||
% as the worker no longer exists.
|
% as the worker no longer exists.
|
||||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||||
state := State,
|
state := State,
|
||||||
status := StoppedStatus
|
status := StoppedStatus
|
||||||
}} =
|
}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual(stopped, StoppedStatus),
|
?assertEqual(stopped, StoppedStatus),
|
||||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)),
|
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)),
|
||||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||||
% Can call stop/1 again on an already stopped instance
|
% Can call stop/1 again on an already stopped instance
|
||||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||||
% Make sure it can be restarted and the healthchecks and queries work properly
|
% Make sure it can be restarted and the healthchecks and queries work properly
|
||||||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
?assertEqual(ok, emqx_resource:restart(ResourceId)),
|
||||||
% async restart, need to wait resource
|
% async restart, need to wait resource
|
||||||
timer:sleep(500),
|
timer:sleep(500),
|
||||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())),
|
?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_no_params())),
|
||||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())),
|
?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_with_params())),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
{ok, _, [[1]]},
|
{ok, _, [[1]]},
|
||||||
emqx_resource:query(
|
emqx_resource:query(
|
||||||
PoolName,
|
ResourceId,
|
||||||
test_query_with_params_and_timeout()
|
test_query_with_params_and_timeout()
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
% Stop and remove the resource in one go.
|
% Stop and remove the resource in one go.
|
||||||
?assertEqual(ok, emqx_resource:remove_local(PoolName)),
|
?assertEqual(ok, emqx_resource:remove_local(ResourceId)),
|
||||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||||
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
||||||
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)).
|
?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)).
|
||||||
|
|
||||||
% %%------------------------------------------------------------------------------
|
% %%------------------------------------------------------------------------------
|
||||||
% %% Helpers
|
% %% Helpers
|
||||||
|
|
|
@ -64,15 +64,15 @@ t_lifecycle(_Config) ->
|
||||||
pgsql_config()
|
pgsql_config()
|
||||||
).
|
).
|
||||||
|
|
||||||
perform_lifecycle_check(PoolName, InitialConfig) ->
|
perform_lifecycle_check(ResourceId, InitialConfig) ->
|
||||||
{ok, #{config := CheckedConfig}} =
|
{ok, #{config := CheckedConfig}} =
|
||||||
emqx_resource:check_config(?PGSQL_RESOURCE_MOD, InitialConfig),
|
emqx_resource:check_config(?PGSQL_RESOURCE_MOD, InitialConfig),
|
||||||
{ok, #{
|
{ok, #{
|
||||||
state := #{poolname := ReturnedPoolName} = State,
|
state := #{pool_name := PoolName} = State,
|
||||||
status := InitialStatus
|
status := InitialStatus
|
||||||
}} =
|
}} =
|
||||||
emqx_resource:create_local(
|
emqx_resource:create_local(
|
||||||
PoolName,
|
ResourceId,
|
||||||
?CONNECTOR_RESOURCE_GROUP,
|
?CONNECTOR_RESOURCE_GROUP,
|
||||||
?PGSQL_RESOURCE_MOD,
|
?PGSQL_RESOURCE_MOD,
|
||||||
CheckedConfig,
|
CheckedConfig,
|
||||||
|
@ -84,39 +84,39 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
||||||
state := State,
|
state := State,
|
||||||
status := InitialStatus
|
status := InitialStatus
|
||||||
}} =
|
}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||||
% % Perform query as further check that the resource is working as expected
|
% % Perform query as further check that the resource is working as expected
|
||||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())),
|
?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_no_params())),
|
||||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())),
|
?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_with_params())),
|
||||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||||
% as the worker no longer exists.
|
% as the worker no longer exists.
|
||||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||||
state := State,
|
state := State,
|
||||||
status := StoppedStatus
|
status := StoppedStatus
|
||||||
}} =
|
}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual(stopped, StoppedStatus),
|
?assertEqual(stopped, StoppedStatus),
|
||||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)),
|
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)),
|
||||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||||
% Can call stop/1 again on an already stopped instance
|
% Can call stop/1 again on an already stopped instance
|
||||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||||
% Make sure it can be restarted and the healthchecks and queries work properly
|
% Make sure it can be restarted and the healthchecks and queries work properly
|
||||||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
?assertEqual(ok, emqx_resource:restart(ResourceId)),
|
||||||
% async restart, need to wait resource
|
% async restart, need to wait resource
|
||||||
timer:sleep(500),
|
timer:sleep(500),
|
||||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())),
|
?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_no_params())),
|
||||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())),
|
?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_with_params())),
|
||||||
% Stop and remove the resource in one go.
|
% Stop and remove the resource in one go.
|
||||||
?assertEqual(ok, emqx_resource:remove_local(PoolName)),
|
?assertEqual(ok, emqx_resource:remove_local(ResourceId)),
|
||||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||||
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
||||||
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)).
|
?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)).
|
||||||
|
|
||||||
% %%------------------------------------------------------------------------------
|
% %%------------------------------------------------------------------------------
|
||||||
% %% Helpers
|
% %% Helpers
|
||||||
|
|
|
@ -102,14 +102,14 @@ t_sentinel_lifecycle(_Config) ->
|
||||||
[<<"PING">>]
|
[<<"PING">>]
|
||||||
).
|
).
|
||||||
|
|
||||||
perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) ->
|
perform_lifecycle_check(ResourceId, InitialConfig, RedisCommand) ->
|
||||||
{ok, #{config := CheckedConfig}} =
|
{ok, #{config := CheckedConfig}} =
|
||||||
emqx_resource:check_config(?REDIS_RESOURCE_MOD, InitialConfig),
|
emqx_resource:check_config(?REDIS_RESOURCE_MOD, InitialConfig),
|
||||||
{ok, #{
|
{ok, #{
|
||||||
state := #{poolname := ReturnedPoolName} = State,
|
state := #{pool_name := PoolName} = State,
|
||||||
status := InitialStatus
|
status := InitialStatus
|
||||||
}} = emqx_resource:create_local(
|
}} = emqx_resource:create_local(
|
||||||
PoolName,
|
ResourceId,
|
||||||
?CONNECTOR_RESOURCE_GROUP,
|
?CONNECTOR_RESOURCE_GROUP,
|
||||||
?REDIS_RESOURCE_MOD,
|
?REDIS_RESOURCE_MOD,
|
||||||
CheckedConfig,
|
CheckedConfig,
|
||||||
|
@ -121,49 +121,49 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) ->
|
||||||
state := State,
|
state := State,
|
||||||
status := InitialStatus
|
status := InitialStatus
|
||||||
}} =
|
}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||||
% Perform query as further check that the resource is working as expected
|
% Perform query as further check that the resource is working as expected
|
||||||
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})),
|
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(ResourceId, {cmd, RedisCommand})),
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
{ok, [{ok, <<"PONG">>}, {ok, <<"PONG">>}]},
|
{ok, [{ok, <<"PONG">>}, {ok, <<"PONG">>}]},
|
||||||
emqx_resource:query(PoolName, {cmds, [RedisCommand, RedisCommand]})
|
emqx_resource:query(ResourceId, {cmds, [RedisCommand, RedisCommand]})
|
||||||
),
|
),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
{error, {unrecoverable_error, [{ok, <<"PONG">>}, {error, _}]}},
|
{error, {unrecoverable_error, [{ok, <<"PONG">>}, {error, _}]}},
|
||||||
emqx_resource:query(
|
emqx_resource:query(
|
||||||
PoolName,
|
ResourceId,
|
||||||
{cmds, [RedisCommand, [<<"INVALID_COMMAND">>]]},
|
{cmds, [RedisCommand, [<<"INVALID_COMMAND">>]]},
|
||||||
#{timeout => 500}
|
#{timeout => 500}
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||||
% as the worker no longer exists.
|
% as the worker no longer exists.
|
||||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||||
state := State,
|
state := State,
|
||||||
status := StoppedStatus
|
status := StoppedStatus
|
||||||
}} =
|
}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual(stopped, StoppedStatus),
|
?assertEqual(stopped, StoppedStatus),
|
||||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)),
|
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)),
|
||||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||||
% Can call stop/1 again on an already stopped instance
|
% Can call stop/1 again on an already stopped instance
|
||||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||||
% Make sure it can be restarted and the healthchecks and queries work properly
|
% Make sure it can be restarted and the healthchecks and queries work properly
|
||||||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
?assertEqual(ok, emqx_resource:restart(ResourceId)),
|
||||||
% async restart, need to wait resource
|
% async restart, need to wait resource
|
||||||
timer:sleep(500),
|
timer:sleep(500),
|
||||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||||
emqx_resource:get_instance(PoolName),
|
emqx_resource:get_instance(ResourceId),
|
||||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||||
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})),
|
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(ResourceId, {cmd, RedisCommand})),
|
||||||
% Stop and remove the resource in one go.
|
% Stop and remove the resource in one go.
|
||||||
?assertEqual(ok, emqx_resource:remove_local(PoolName)),
|
?assertEqual(ok, emqx_resource:remove_local(ResourceId)),
|
||||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||||
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
||||||
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)).
|
?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)).
|
||||||
|
|
||||||
% %%------------------------------------------------------------------------------
|
% %%------------------------------------------------------------------------------
|
||||||
% %% Helpers
|
% %% Helpers
|
||||||
|
|
|
@ -16,22 +16,13 @@
|
||||||
|
|
||||||
-module(emqx_dashboard).
|
-module(emqx_dashboard).
|
||||||
|
|
||||||
-define(APP, ?MODULE).
|
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
start_listeners/0,
|
start_listeners/0,
|
||||||
start_listeners/1,
|
start_listeners/1,
|
||||||
stop_listeners/1,
|
stop_listeners/1,
|
||||||
stop_listeners/0,
|
stop_listeners/0,
|
||||||
list_listeners/0
|
list_listeners/0,
|
||||||
]).
|
wait_for_listeners/0
|
||||||
|
|
||||||
-export([
|
|
||||||
init_i18n/2,
|
|
||||||
init_i18n/0,
|
|
||||||
get_i18n/0,
|
|
||||||
i18n_file/0,
|
|
||||||
clear_i18n/0
|
|
||||||
]).
|
]).
|
||||||
|
|
||||||
%% Authorization
|
%% Authorization
|
||||||
|
@ -90,30 +81,34 @@ start_listeners(Listeners) ->
|
||||||
dispatch => Dispatch,
|
dispatch => Dispatch,
|
||||||
middlewares => [?EMQX_MIDDLE, cowboy_router, cowboy_handler]
|
middlewares => [?EMQX_MIDDLE, cowboy_router, cowboy_handler]
|
||||||
},
|
},
|
||||||
Res =
|
{OkListeners, ErrListeners} =
|
||||||
lists:foldl(
|
lists:foldl(
|
||||||
fun({Name, Protocol, Bind, RanchOptions, ProtoOpts}, Acc) ->
|
fun({Name, Protocol, Bind, RanchOptions, ProtoOpts}, {OkAcc, ErrAcc}) ->
|
||||||
Minirest = BaseMinirest#{protocol => Protocol, protocol_options => ProtoOpts},
|
Minirest = BaseMinirest#{protocol => Protocol, protocol_options => ProtoOpts},
|
||||||
case minirest:start(Name, RanchOptions, Minirest) of
|
case minirest:start(Name, RanchOptions, Minirest) of
|
||||||
{ok, _} ->
|
{ok, _} ->
|
||||||
?ULOG("Listener ~ts on ~ts started.~n", [
|
?ULOG("Listener ~ts on ~ts started.~n", [
|
||||||
Name, emqx_listeners:format_bind(Bind)
|
Name, emqx_listeners:format_bind(Bind)
|
||||||
]),
|
]),
|
||||||
Acc;
|
{[Name | OkAcc], ErrAcc};
|
||||||
{error, _Reason} ->
|
{error, _Reason} ->
|
||||||
%% Don't record the reason because minirest already does(too much logs noise).
|
%% Don't record the reason because minirest already does(too much logs noise).
|
||||||
[Name | Acc]
|
{OkAcc, [Name | ErrAcc]}
|
||||||
end
|
end
|
||||||
end,
|
end,
|
||||||
[],
|
{[], []},
|
||||||
listeners(Listeners)
|
listeners(Listeners)
|
||||||
),
|
),
|
||||||
case Res of
|
case ErrListeners of
|
||||||
[] -> ok;
|
[] ->
|
||||||
_ -> {error, Res}
|
optvar:set(emqx_dashboard_listeners_ready, OkListeners),
|
||||||
|
ok;
|
||||||
|
_ ->
|
||||||
|
{error, ErrListeners}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
stop_listeners(Listeners) ->
|
stop_listeners(Listeners) ->
|
||||||
|
optvar:unset(emqx_dashboard_listeners_ready),
|
||||||
[
|
[
|
||||||
begin
|
begin
|
||||||
case minirest:stop(Name) of
|
case minirest:stop(Name) of
|
||||||
|
@ -129,23 +124,8 @@ stop_listeners(Listeners) ->
|
||||||
],
|
],
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
get_i18n() ->
|
wait_for_listeners() ->
|
||||||
application:get_env(emqx_dashboard, i18n).
|
optvar:read(emqx_dashboard_listeners_ready).
|
||||||
|
|
||||||
init_i18n(File, Lang) when is_atom(Lang) ->
|
|
||||||
init_i18n(File, atom_to_binary(Lang));
|
|
||||||
init_i18n(File, Lang) when is_binary(Lang) ->
|
|
||||||
Cache = hocon_schema:new_desc_cache(File),
|
|
||||||
application:set_env(emqx_dashboard, i18n, #{lang => Lang, cache => Cache}).
|
|
||||||
|
|
||||||
clear_i18n() ->
|
|
||||||
case application:get_env(emqx_dashboard, i18n) of
|
|
||||||
{ok, #{cache := Cache}} ->
|
|
||||||
hocon_schema:delete_desc_cache(Cache),
|
|
||||||
application:unset_env(emqx_dashboard, i18n);
|
|
||||||
undefined ->
|
|
||||||
ok
|
|
||||||
end.
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% internal
|
%% internal
|
||||||
|
@ -187,11 +167,6 @@ ip_port(error, Opts) -> {Opts#{port => 18083}, 18083};
|
||||||
ip_port({Port, Opts}, _) when is_integer(Port) -> {Opts#{port => Port}, Port};
|
ip_port({Port, Opts}, _) when is_integer(Port) -> {Opts#{port => Port}, Port};
|
||||||
ip_port({{IP, Port}, Opts}, _) -> {Opts#{port => Port, ip => IP}, {IP, Port}}.
|
ip_port({{IP, Port}, Opts}, _) -> {Opts#{port => Port, ip => IP}, {IP, Port}}.
|
||||||
|
|
||||||
init_i18n() ->
|
|
||||||
File = i18n_file(),
|
|
||||||
Lang = emqx_conf:get([dashboard, i18n_lang], en),
|
|
||||||
init_i18n(File, Lang).
|
|
||||||
|
|
||||||
ranch_opts(Options) ->
|
ranch_opts(Options) ->
|
||||||
Keys = [
|
Keys = [
|
||||||
handshake_timeout,
|
handshake_timeout,
|
||||||
|
@ -255,12 +230,6 @@ return_unauthorized(Code, Message) ->
|
||||||
},
|
},
|
||||||
#{code => Code, message => Message}}.
|
#{code => Code, message => Message}}.
|
||||||
|
|
||||||
i18n_file() ->
|
|
||||||
case application:get_env(emqx_dashboard, i18n_file) of
|
|
||||||
undefined -> filename:join([code:priv_dir(emqx_dashboard), "i18n.conf"]);
|
|
||||||
{ok, File} -> File
|
|
||||||
end.
|
|
||||||
|
|
||||||
listeners() ->
|
listeners() ->
|
||||||
emqx_conf:get([dashboard, listeners], #{}).
|
emqx_conf:get([dashboard, listeners], #{}).
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,6 @@
|
||||||
|
|
||||||
-behaviour(minirest_api).
|
-behaviour(minirest_api).
|
||||||
|
|
||||||
-include("emqx_dashboard.hrl").
|
|
||||||
-include_lib("hocon/include/hoconsc.hrl").
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
-include_lib("emqx/include/logger.hrl").
|
-include_lib("emqx/include/logger.hrl").
|
||||||
-include_lib("typerefl/include/types.hrl").
|
-include_lib("typerefl/include/types.hrl").
|
||||||
|
|
|
@ -0,0 +1,110 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
%% @doc This module is used to cache the description of the configuration items.
|
||||||
|
-module(emqx_dashboard_desc_cache).
|
||||||
|
|
||||||
|
-export([init/0]).
|
||||||
|
|
||||||
|
%% internal exports
|
||||||
|
-export([load_desc/2, lookup/4, lookup/5]).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
|
||||||
|
%% @doc Global ETS table to cache the description of the configuration items.
|
||||||
|
%% The table is owned by the emqx_dashboard_sup the root supervisor of emqx_dashboard.
|
||||||
|
%% The cache is initialized with the default language (English) and
|
||||||
|
%% all the desc.<lang>.hocon files in the www/static directory (extracted from dashboard package).
|
||||||
|
init() ->
|
||||||
|
ok = ensure_app_loaded(emqx_dashboard),
|
||||||
|
PrivDir = code:priv_dir(emqx_dashboard),
|
||||||
|
EngDesc = filename:join([PrivDir, "desc.en.hocon"]),
|
||||||
|
WwwStaticDir = filename:join([PrivDir, "www", "static"]),
|
||||||
|
OtherLangDesc0 = filelib:wildcard("desc.*.hocon", WwwStaticDir),
|
||||||
|
OtherLangDesc = lists:map(fun(F) -> filename:join([WwwStaticDir, F]) end, OtherLangDesc0),
|
||||||
|
Files = [EngDesc | OtherLangDesc],
|
||||||
|
?MODULE = ets:new(?MODULE, [named_table, public, set, {read_concurrency, true}]),
|
||||||
|
ok = lists:foreach(fun(F) -> load_desc(?MODULE, F) end, Files).
|
||||||
|
|
||||||
|
%% @doc Load the description of the configuration items from the file.
|
||||||
|
%% Load is incremental, so it can be called multiple times.
|
||||||
|
%% NOTE: no garbage collection is done, because stale entries are harmless.
|
||||||
|
load_desc(EtsTab, File) ->
|
||||||
|
?SLOG(info, #{msg => "loading desc", file => File}),
|
||||||
|
{ok, Descs} = hocon:load(File),
|
||||||
|
["desc", Lang, "hocon"] = string:tokens(filename:basename(File), "."),
|
||||||
|
Insert = fun(Namespace, Id, Tag, Text) ->
|
||||||
|
Key = {bin(Lang), bin(Namespace), bin(Id), bin(Tag)},
|
||||||
|
true = ets:insert(EtsTab, {Key, bin(Text)}),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
walk_ns(Insert, maps:to_list(Descs)).
|
||||||
|
|
||||||
|
%% @doc Lookup the description of the configuration item from the global cache.
|
||||||
|
lookup(Lang, Namespace, Id, Tag) ->
|
||||||
|
lookup(?MODULE, Lang, Namespace, Id, Tag).
|
||||||
|
|
||||||
|
%% @doc Lookup the description of the configuration item from the given cache.
|
||||||
|
lookup(EtsTab, Lang0, Namespace, Id, Tag) ->
|
||||||
|
Lang = bin(Lang0),
|
||||||
|
try ets:lookup(EtsTab, {Lang, bin(Namespace), bin(Id), bin(Tag)}) of
|
||||||
|
[{_, Desc}] ->
|
||||||
|
Desc;
|
||||||
|
[] when Lang =/= <<"en">> ->
|
||||||
|
%% fallback to English
|
||||||
|
lookup(EtsTab, <<"en">>, Namespace, Id, Tag);
|
||||||
|
_ ->
|
||||||
|
%% undefined but not <<>>
|
||||||
|
undefined
|
||||||
|
catch
|
||||||
|
error:badarg ->
|
||||||
|
%% schema is not initialized
|
||||||
|
%% most likely in test cases
|
||||||
|
undefined
|
||||||
|
end.
|
||||||
|
|
||||||
|
%% The desc files are of names like:
|
||||||
|
%% desc.en.hocon or desc.zh.hocon
|
||||||
|
%% And with content like:
|
||||||
|
%% namespace.id.desc = "description"
|
||||||
|
%% namespace.id.label = "label"
|
||||||
|
walk_ns(_Insert, []) ->
|
||||||
|
ok;
|
||||||
|
walk_ns(Insert, [{Namespace, Ids} | Rest]) ->
|
||||||
|
walk_id(Insert, Namespace, maps:to_list(Ids)),
|
||||||
|
walk_ns(Insert, Rest).
|
||||||
|
|
||||||
|
walk_id(_Insert, _Namespace, []) ->
|
||||||
|
ok;
|
||||||
|
walk_id(Insert, Namespace, [{Id, Tags} | Rest]) ->
|
||||||
|
walk_tag(Insert, Namespace, Id, maps:to_list(Tags)),
|
||||||
|
walk_id(Insert, Namespace, Rest).
|
||||||
|
|
||||||
|
walk_tag(_Insert, _Namespace, _Id, []) ->
|
||||||
|
ok;
|
||||||
|
walk_tag(Insert, Namespace, Id, [{Tag, Text} | Rest]) ->
|
||||||
|
ok = Insert(Namespace, Id, Tag, Text),
|
||||||
|
walk_tag(Insert, Namespace, Id, Rest).
|
||||||
|
|
||||||
|
bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
|
||||||
|
bin(B) when is_binary(B) -> B;
|
||||||
|
bin(L) when is_list(L) -> list_to_binary(L).
|
||||||
|
|
||||||
|
ensure_app_loaded(App) ->
|
||||||
|
case application:load(App) of
|
||||||
|
ok -> ok;
|
||||||
|
{error, {already_loaded, _}} -> ok
|
||||||
|
end.
|
|
@ -15,9 +15,11 @@
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
-module(emqx_dashboard_listener).
|
-module(emqx_dashboard_listener).
|
||||||
|
|
||||||
-include_lib("emqx/include/logger.hrl").
|
|
||||||
-behaviour(emqx_config_handler).
|
-behaviour(emqx_config_handler).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
%% API
|
%% API
|
||||||
-export([add_handler/0, remove_handler/0]).
|
-export([add_handler/0, remove_handler/0]).
|
||||||
-export([pre_config_update/3, post_config_update/5]).
|
-export([pre_config_update/3, post_config_update/5]).
|
||||||
|
@ -54,12 +56,10 @@ init([]) ->
|
||||||
{ok, undefined, {continue, regenerate_dispatch}}.
|
{ok, undefined, {continue, regenerate_dispatch}}.
|
||||||
|
|
||||||
handle_continue(regenerate_dispatch, _State) ->
|
handle_continue(regenerate_dispatch, _State) ->
|
||||||
NewState = regenerate_minirest_dispatch(),
|
%% initialize the swagger dispatches
|
||||||
{noreply, NewState, hibernate}.
|
ready = regenerate_minirest_dispatch(),
|
||||||
|
{noreply, ready, hibernate}.
|
||||||
|
|
||||||
handle_call(is_ready, _From, retry) ->
|
|
||||||
NewState = regenerate_minirest_dispatch(),
|
|
||||||
{reply, NewState, NewState, hibernate};
|
|
||||||
handle_call(is_ready, _From, State) ->
|
handle_call(is_ready, _From, State) ->
|
||||||
{reply, State, State, hibernate};
|
{reply, State, State, hibernate};
|
||||||
handle_call(_Request, _From, State) ->
|
handle_call(_Request, _From, State) ->
|
||||||
|
@ -68,6 +68,9 @@ handle_call(_Request, _From, State) ->
|
||||||
handle_cast(_Request, State) ->
|
handle_cast(_Request, State) ->
|
||||||
{noreply, State, hibernate}.
|
{noreply, State, hibernate}.
|
||||||
|
|
||||||
|
handle_info(i18n_lang_changed, _State) ->
|
||||||
|
NewState = regenerate_minirest_dispatch(),
|
||||||
|
{noreply, NewState, hibernate};
|
||||||
handle_info({update_listeners, OldListeners, NewListeners}, _State) ->
|
handle_info({update_listeners, OldListeners, NewListeners}, _State) ->
|
||||||
ok = emqx_dashboard:stop_listeners(OldListeners),
|
ok = emqx_dashboard:stop_listeners(OldListeners),
|
||||||
ok = emqx_dashboard:start_listeners(NewListeners),
|
ok = emqx_dashboard:start_listeners(NewListeners),
|
||||||
|
@ -83,29 +86,26 @@ terminate(_Reason, _State) ->
|
||||||
code_change(_OldVsn, State, _Extra) ->
|
code_change(_OldVsn, State, _Extra) ->
|
||||||
{ok, State}.
|
{ok, State}.
|
||||||
|
|
||||||
%% generate dispatch is very slow.
|
%% generate dispatch is very slow, takes about 1s.
|
||||||
regenerate_minirest_dispatch() ->
|
regenerate_minirest_dispatch() ->
|
||||||
try
|
%% optvar:read waits for the var to be set
|
||||||
emqx_dashboard:init_i18n(),
|
Names = emqx_dashboard:wait_for_listeners(),
|
||||||
lists:foreach(
|
{Time, ok} = timer:tc(fun() -> do_regenerate_minirest_dispatch(Names) end),
|
||||||
fun(Listener) ->
|
Lang = emqx:get_config([dashboard, i18n_lang]),
|
||||||
minirest:update_dispatch(element(1, Listener))
|
?tp(info, regenerate_minirest_dispatch, #{
|
||||||
end,
|
elapsed => erlang:convert_time_unit(Time, microsecond, millisecond),
|
||||||
emqx_dashboard:list_listeners()
|
listeners => Names,
|
||||||
),
|
i18n_lang => Lang
|
||||||
ready
|
}),
|
||||||
catch
|
ready.
|
||||||
T:E:S ->
|
|
||||||
?SLOG(error, #{
|
do_regenerate_minirest_dispatch(Names) ->
|
||||||
msg => "regenerate_minirest_dispatch_failed",
|
lists:foreach(
|
||||||
reason => E,
|
fun(Name) ->
|
||||||
type => T,
|
ok = minirest:update_dispatch(Name)
|
||||||
stacktrace => S
|
end,
|
||||||
}),
|
Names
|
||||||
retry
|
).
|
||||||
after
|
|
||||||
emqx_dashboard:clear_i18n()
|
|
||||||
end.
|
|
||||||
|
|
||||||
add_handler() ->
|
add_handler() ->
|
||||||
Roots = emqx_dashboard_schema:roots(),
|
Roots = emqx_dashboard_schema:roots(),
|
||||||
|
@ -117,6 +117,12 @@ remove_handler() ->
|
||||||
ok = emqx_config_handler:remove_handler(Roots),
|
ok = emqx_config_handler:remove_handler(Roots),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
pre_config_update(_Path, {change_i18n_lang, NewLang}, RawConf) ->
|
||||||
|
%% e.g. emqx_conf:update([dashboard], {change_i18n_lang, zh}, #{}).
|
||||||
|
%% TODO: check if there is such a language (all languages are cached in emqx_dashboard_desc_cache)
|
||||||
|
Update = #{<<"i18n_lang">> => NewLang},
|
||||||
|
NewConf = emqx_utils_maps:deep_merge(RawConf, Update),
|
||||||
|
{ok, NewConf};
|
||||||
pre_config_update(_Path, UpdateConf0, RawConf) ->
|
pre_config_update(_Path, UpdateConf0, RawConf) ->
|
||||||
UpdateConf = remove_sensitive_data(UpdateConf0),
|
UpdateConf = remove_sensitive_data(UpdateConf0),
|
||||||
NewConf = emqx_utils_maps:deep_merge(RawConf, UpdateConf),
|
NewConf = emqx_utils_maps:deep_merge(RawConf, UpdateConf),
|
||||||
|
@ -139,6 +145,8 @@ remove_sensitive_data(Conf0) ->
|
||||||
Conf1
|
Conf1
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
post_config_update(_, {change_i18n_lang, _}, _NewConf, _OldConf, _AppEnvs) ->
|
||||||
|
delay_job(i18n_lang_changed);
|
||||||
post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) ->
|
post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) ->
|
||||||
OldHttp = get_listener(http, OldConf),
|
OldHttp = get_listener(http, OldConf),
|
||||||
OldHttps = get_listener(https, OldConf),
|
OldHttps = get_listener(https, OldConf),
|
||||||
|
@ -148,7 +156,12 @@ post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) ->
|
||||||
{StopHttps, StartHttps} = diff_listeners(https, OldHttps, NewHttps),
|
{StopHttps, StartHttps} = diff_listeners(https, OldHttps, NewHttps),
|
||||||
Stop = maps:merge(StopHttp, StopHttps),
|
Stop = maps:merge(StopHttp, StopHttps),
|
||||||
Start = maps:merge(StartHttp, StartHttps),
|
Start = maps:merge(StartHttp, StartHttps),
|
||||||
_ = erlang:send_after(500, ?MODULE, {update_listeners, Stop, Start}),
|
delay_job({update_listeners, Stop, Start}).
|
||||||
|
|
||||||
|
%% in post_config_update, the config is not yet persisted to persistent_term
|
||||||
|
%% so we need to delegate the listener update to the gen_server a bit later
|
||||||
|
delay_job(Msg) ->
|
||||||
|
_ = erlang:send_after(500, ?MODULE, Msg),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
get_listener(Type, Conf) ->
|
get_listener(Type, Conf) ->
|
||||||
|
|
|
@ -233,6 +233,8 @@ cors(required) -> false;
|
||||||
cors(desc) -> ?DESC(cors);
|
cors(desc) -> ?DESC(cors);
|
||||||
cors(_) -> undefined.
|
cors(_) -> undefined.
|
||||||
|
|
||||||
|
%% TODO: change it to string type
|
||||||
|
%% It will be up to the dashboard package which languagues to support
|
||||||
i18n_lang(type) -> ?ENUM([en, zh]);
|
i18n_lang(type) -> ?ENUM([en, zh]);
|
||||||
i18n_lang(default) -> en;
|
i18n_lang(default) -> en;
|
||||||
i18n_lang('readOnly') -> true;
|
i18n_lang('readOnly') -> true;
|
||||||
|
|
|
@ -0,0 +1,84 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
%% This module is for dashboard to retrieve the schema hot config and bridges.
|
||||||
|
-module(emqx_dashboard_schema_api).
|
||||||
|
|
||||||
|
-behaviour(minirest_api).
|
||||||
|
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
|
||||||
|
%% minirest API
|
||||||
|
-export([api_spec/0, paths/0, schema/1]).
|
||||||
|
|
||||||
|
-export([get_schema/2]).
|
||||||
|
|
||||||
|
-define(TAGS, [<<"dashboard">>]).
|
||||||
|
-define(BAD_REQUEST, 'BAD_REQUEST').
|
||||||
|
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% minirest API and schema
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
api_spec() ->
|
||||||
|
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
|
||||||
|
|
||||||
|
paths() ->
|
||||||
|
["/schemas/:name"].
|
||||||
|
|
||||||
|
%% This is a rather hidden API, so we don't need to add translations for the description.
|
||||||
|
schema("/schemas/:name") ->
|
||||||
|
#{
|
||||||
|
'operationId' => get_schema,
|
||||||
|
get => #{
|
||||||
|
parameters => [
|
||||||
|
{name, hoconsc:mk(hoconsc:enum([hotconf, bridges]), #{in => path})},
|
||||||
|
{lang,
|
||||||
|
hoconsc:mk(typerefl:string(), #{
|
||||||
|
in => query,
|
||||||
|
default => <<"en">>,
|
||||||
|
desc => <<"The language of the schema.">>
|
||||||
|
})}
|
||||||
|
],
|
||||||
|
desc => <<
|
||||||
|
"Get the schema JSON of the specified name. "
|
||||||
|
"NOTE: you should never need to make use of this API "
|
||||||
|
"unless you are building a multi-lang dashboaard."
|
||||||
|
>>,
|
||||||
|
tags => ?TAGS,
|
||||||
|
security => [],
|
||||||
|
responses => #{
|
||||||
|
200 => hoconsc:mk(binary(), #{desc => <<"The JSON schema of the specified name.">>})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}.
|
||||||
|
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% API Handler funcs
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
get_schema(get, #{
|
||||||
|
bindings := #{name := Name},
|
||||||
|
query_string := #{<<"lang">> := Lang}
|
||||||
|
}) ->
|
||||||
|
{200, gen_schema(Name, iolist_to_binary(Lang))};
|
||||||
|
get_schema(get, _) ->
|
||||||
|
{400, ?BAD_REQUEST, <<"unknown">>}.
|
||||||
|
|
||||||
|
gen_schema(hotconf, Lang) ->
|
||||||
|
emqx_conf:hotconf_schema_json(Lang);
|
||||||
|
gen_schema(bridges, Lang) ->
|
||||||
|
emqx_conf:bridge_schema_json(Lang).
|
|
@ -28,6 +28,8 @@ start_link() ->
|
||||||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||||
|
|
||||||
init([]) ->
|
init([]) ->
|
||||||
|
%% supervisor owns the cache table
|
||||||
|
ok = emqx_dashboard_desc_cache:init(),
|
||||||
{ok,
|
{ok,
|
||||||
{{one_for_one, 5, 100}, [
|
{{one_for_one, 5, 100}, [
|
||||||
?CHILD(emqx_dashboard_listener, brutal_kill),
|
?CHILD(emqx_dashboard_listener, brutal_kill),
|
||||||
|
|
|
@ -84,7 +84,8 @@
|
||||||
-type spec_opts() :: #{
|
-type spec_opts() :: #{
|
||||||
check_schema => boolean() | filter(),
|
check_schema => boolean() | filter(),
|
||||||
translate_body => boolean(),
|
translate_body => boolean(),
|
||||||
schema_converter => fun((hocon_schema:schema(), Module :: atom()) -> map())
|
schema_converter => fun((hocon_schema:schema(), Module :: atom()) -> map()),
|
||||||
|
i18n_lang => atom() | string() | binary()
|
||||||
}.
|
}.
|
||||||
|
|
||||||
-type route_path() :: string() | binary().
|
-type route_path() :: string() | binary().
|
||||||
|
@ -237,8 +238,16 @@ parse_spec_ref(Module, Path, Options) ->
|
||||||
erlang:apply(Module, schema, [Path])
|
erlang:apply(Module, schema, [Path])
|
||||||
%% better error message
|
%% better error message
|
||||||
catch
|
catch
|
||||||
error:Reason ->
|
error:Reason:Stacktrace ->
|
||||||
throw({error, #{mfa => {Module, schema, [Path]}, reason => Reason}})
|
%% raise a new error with the same stacktrace.
|
||||||
|
%% it's a bug if this happens.
|
||||||
|
%% i.e. if a path is listed in the spec but the module doesn't
|
||||||
|
%% implement it or crashes when trying to build the schema.
|
||||||
|
erlang:raise(
|
||||||
|
error,
|
||||||
|
#{mfa => {Module, schema, [Path]}, reason => Reason},
|
||||||
|
Stacktrace
|
||||||
|
)
|
||||||
end,
|
end,
|
||||||
{Specs, Refs} = maps:fold(
|
{Specs, Refs} = maps:fold(
|
||||||
fun(Method, Meta, {Acc, RefsAcc}) ->
|
fun(Method, Meta, {Acc, RefsAcc}) ->
|
||||||
|
@ -333,11 +342,11 @@ check_request_body(#{body := Body}, Spec, _Module, _CheckFun, false) when is_map
|
||||||
|
|
||||||
%% tags, description, summary, security, deprecated
|
%% tags, description, summary, security, deprecated
|
||||||
meta_to_spec(Meta, Module, Options) ->
|
meta_to_spec(Meta, Module, Options) ->
|
||||||
{Params, Refs1} = parameters(maps:get(parameters, Meta, []), Module),
|
{Params, Refs1} = parameters(maps:get(parameters, Meta, []), Module, Options),
|
||||||
{RequestBody, Refs2} = request_body(maps:get('requestBody', Meta, []), Module, Options),
|
{RequestBody, Refs2} = request_body(maps:get('requestBody', Meta, []), Module, Options),
|
||||||
{Responses, Refs3} = responses(maps:get(responses, Meta, #{}), Module, Options),
|
{Responses, Refs3} = responses(maps:get(responses, Meta, #{}), Module, Options),
|
||||||
{
|
{
|
||||||
generate_method_desc(to_spec(Meta, Params, RequestBody, Responses)),
|
generate_method_desc(to_spec(Meta, Params, RequestBody, Responses), Options),
|
||||||
lists:usort(Refs1 ++ Refs2 ++ Refs3)
|
lists:usort(Refs1 ++ Refs2 ++ Refs3)
|
||||||
}.
|
}.
|
||||||
|
|
||||||
|
@ -348,13 +357,13 @@ to_spec(Meta, Params, RequestBody, Responses) ->
|
||||||
Spec = to_spec(Meta, Params, [], Responses),
|
Spec = to_spec(Meta, Params, [], Responses),
|
||||||
maps:put('requestBody', RequestBody, Spec).
|
maps:put('requestBody', RequestBody, Spec).
|
||||||
|
|
||||||
generate_method_desc(Spec = #{desc := _Desc}) ->
|
generate_method_desc(Spec = #{desc := _Desc}, Options) ->
|
||||||
Spec1 = trans_description(maps:remove(desc, Spec), Spec),
|
Spec1 = trans_description(maps:remove(desc, Spec), Spec, Options),
|
||||||
trans_tags(Spec1);
|
trans_tags(Spec1);
|
||||||
generate_method_desc(Spec = #{description := _Desc}) ->
|
generate_method_desc(Spec = #{description := _Desc}, Options) ->
|
||||||
Spec1 = trans_description(Spec, Spec),
|
Spec1 = trans_description(Spec, Spec, Options),
|
||||||
trans_tags(Spec1);
|
trans_tags(Spec1);
|
||||||
generate_method_desc(Spec) ->
|
generate_method_desc(Spec, _Options) ->
|
||||||
trans_tags(Spec).
|
trans_tags(Spec).
|
||||||
|
|
||||||
trans_tags(Spec = #{tags := Tags}) ->
|
trans_tags(Spec = #{tags := Tags}) ->
|
||||||
|
@ -362,7 +371,7 @@ trans_tags(Spec = #{tags := Tags}) ->
|
||||||
trans_tags(Spec) ->
|
trans_tags(Spec) ->
|
||||||
Spec.
|
Spec.
|
||||||
|
|
||||||
parameters(Params, Module) ->
|
parameters(Params, Module, Options) ->
|
||||||
{SpecList, AllRefs} =
|
{SpecList, AllRefs} =
|
||||||
lists:foldl(
|
lists:foldl(
|
||||||
fun(Param, {Acc, RefsAcc}) ->
|
fun(Param, {Acc, RefsAcc}) ->
|
||||||
|
@ -388,7 +397,7 @@ parameters(Params, Module) ->
|
||||||
Type
|
Type
|
||||||
),
|
),
|
||||||
Spec1 = trans_required(Spec0, Required, In),
|
Spec1 = trans_required(Spec0, Required, In),
|
||||||
Spec2 = trans_description(Spec1, Type),
|
Spec2 = trans_description(Spec1, Type, Options),
|
||||||
{[Spec2 | Acc], Refs ++ RefsAcc}
|
{[Spec2 | Acc], Refs ++ RefsAcc}
|
||||||
end
|
end
|
||||||
end,
|
end,
|
||||||
|
@ -432,38 +441,38 @@ trans_required(Spec, true, _) -> Spec#{required => true};
|
||||||
trans_required(Spec, _, path) -> Spec#{required => true};
|
trans_required(Spec, _, path) -> Spec#{required => true};
|
||||||
trans_required(Spec, _, _) -> Spec.
|
trans_required(Spec, _, _) -> Spec.
|
||||||
|
|
||||||
trans_desc(Init, Hocon, Func, Name) ->
|
trans_desc(Init, Hocon, Func, Name, Options) ->
|
||||||
Spec0 = trans_description(Init, Hocon),
|
Spec0 = trans_description(Init, Hocon, Options),
|
||||||
case Func =:= fun hocon_schema_to_spec/2 of
|
case Func =:= fun hocon_schema_to_spec/2 of
|
||||||
true ->
|
true ->
|
||||||
Spec0;
|
Spec0;
|
||||||
false ->
|
false ->
|
||||||
Spec1 = trans_label(Spec0, Hocon, Name),
|
Spec1 = trans_label(Spec0, Hocon, Name, Options),
|
||||||
case Spec1 of
|
case Spec1 of
|
||||||
#{description := _} -> Spec1;
|
#{description := _} -> Spec1;
|
||||||
_ -> Spec1#{description => <<Name/binary, " Description">>}
|
_ -> Spec1#{description => <<Name/binary, " Description">>}
|
||||||
end
|
end
|
||||||
end.
|
end.
|
||||||
|
|
||||||
trans_description(Spec, Hocon) ->
|
trans_description(Spec, Hocon, Options) ->
|
||||||
Desc =
|
Desc =
|
||||||
case desc_struct(Hocon) of
|
case desc_struct(Hocon) of
|
||||||
undefined -> undefined;
|
undefined -> undefined;
|
||||||
?DESC(_, _) = Struct -> get_i18n(<<"desc">>, Struct, undefined);
|
?DESC(_, _) = Struct -> get_i18n(<<"desc">>, Struct, undefined, Options);
|
||||||
Struct -> to_bin(Struct)
|
Text -> to_bin(Text)
|
||||||
end,
|
end,
|
||||||
case Desc of
|
case Desc of
|
||||||
undefined ->
|
undefined ->
|
||||||
Spec;
|
Spec;
|
||||||
Desc ->
|
Desc ->
|
||||||
Desc1 = binary:replace(Desc, [<<"\n">>], <<"<br/>">>, [global]),
|
Desc1 = binary:replace(Desc, [<<"\n">>], <<"<br/>">>, [global]),
|
||||||
maybe_add_summary_from_label(Spec#{description => Desc1}, Hocon)
|
maybe_add_summary_from_label(Spec#{description => Desc1}, Hocon, Options)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
maybe_add_summary_from_label(Spec, Hocon) ->
|
maybe_add_summary_from_label(Spec, Hocon, Options) ->
|
||||||
Label =
|
Label =
|
||||||
case desc_struct(Hocon) of
|
case desc_struct(Hocon) of
|
||||||
?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, undefined);
|
?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, undefined, Options);
|
||||||
_ -> undefined
|
_ -> undefined
|
||||||
end,
|
end,
|
||||||
case Label of
|
case Label of
|
||||||
|
@ -471,29 +480,44 @@ maybe_add_summary_from_label(Spec, Hocon) ->
|
||||||
_ -> Spec#{summary => Label}
|
_ -> Spec#{summary => Label}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
get_i18n(Key, Struct, Default) ->
|
get_i18n(Tag, ?DESC(Namespace, Id), Default, Options) ->
|
||||||
{ok, #{cache := Cache, lang := Lang}} = emqx_dashboard:get_i18n(),
|
Lang = get_lang(Options),
|
||||||
Desc = hocon_schema:resolve_schema(Struct, Cache),
|
case emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, Tag) of
|
||||||
emqx_utils_maps:deep_get([Key, Lang], Desc, Default).
|
undefined ->
|
||||||
|
Default;
|
||||||
|
Text ->
|
||||||
|
Text
|
||||||
|
end.
|
||||||
|
|
||||||
trans_label(Spec, Hocon, Default) ->
|
%% So far i18n_lang in options is only used at build time.
|
||||||
|
%% At runtime, it's still the global config which controls the language.
|
||||||
|
get_lang(#{i18n_lang := Lang}) -> Lang;
|
||||||
|
get_lang(_) -> emqx:get_config([dashboard, i18n_lang]).
|
||||||
|
|
||||||
|
trans_label(Spec, Hocon, Default, Options) ->
|
||||||
Label =
|
Label =
|
||||||
case desc_struct(Hocon) of
|
case desc_struct(Hocon) of
|
||||||
?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, Default);
|
?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, Default, Options);
|
||||||
_ -> Default
|
_ -> Default
|
||||||
end,
|
end,
|
||||||
Spec#{label => Label}.
|
Spec#{label => Label}.
|
||||||
|
|
||||||
desc_struct(Hocon) ->
|
desc_struct(Hocon) ->
|
||||||
case hocon_schema:field_schema(Hocon, desc) of
|
R =
|
||||||
undefined ->
|
case hocon_schema:field_schema(Hocon, desc) of
|
||||||
case hocon_schema:field_schema(Hocon, description) of
|
undefined ->
|
||||||
undefined -> get_ref_desc(Hocon);
|
case hocon_schema:field_schema(Hocon, description) of
|
||||||
Struct1 -> Struct1
|
undefined -> get_ref_desc(Hocon);
|
||||||
end;
|
Struct1 -> Struct1
|
||||||
Struct ->
|
end;
|
||||||
Struct
|
Struct ->
|
||||||
end.
|
Struct
|
||||||
|
end,
|
||||||
|
ensure_bin(R).
|
||||||
|
|
||||||
|
ensure_bin(undefined) -> undefined;
|
||||||
|
ensure_bin(?DESC(_Namespace, _Id) = Desc) -> Desc;
|
||||||
|
ensure_bin(Text) -> to_bin(Text).
|
||||||
|
|
||||||
get_ref_desc(?R_REF(Mod, Name)) ->
|
get_ref_desc(?R_REF(Mod, Name)) ->
|
||||||
case erlang:function_exported(Mod, desc, 1) of
|
case erlang:function_exported(Mod, desc, 1) of
|
||||||
|
@ -524,7 +548,7 @@ responses(Responses, Module, Options) ->
|
||||||
{Spec, Refs}.
|
{Spec, Refs}.
|
||||||
|
|
||||||
response(Status, ?DESC(_Mod, _Id) = Schema, {Acc, RefsAcc, Module, Options}) ->
|
response(Status, ?DESC(_Mod, _Id) = Schema, {Acc, RefsAcc, Module, Options}) ->
|
||||||
Desc = trans_description(#{}, #{desc => Schema}),
|
Desc = trans_description(#{}, #{desc => Schema}, Options),
|
||||||
{Acc#{integer_to_binary(Status) => Desc}, RefsAcc, Module, Options};
|
{Acc#{integer_to_binary(Status) => Desc}, RefsAcc, Module, Options};
|
||||||
response(Status, Bin, {Acc, RefsAcc, Module, Options}) when is_binary(Bin) ->
|
response(Status, Bin, {Acc, RefsAcc, Module, Options}) when is_binary(Bin) ->
|
||||||
{Acc#{integer_to_binary(Status) => #{description => Bin}}, RefsAcc, Module, Options};
|
{Acc#{integer_to_binary(Status) => #{description => Bin}}, RefsAcc, Module, Options};
|
||||||
|
@ -553,7 +577,7 @@ response(Status, Schema, {Acc, RefsAcc, Module, Options}) ->
|
||||||
Hocon = hocon_schema:field_schema(Schema, type),
|
Hocon = hocon_schema:field_schema(Schema, type),
|
||||||
Examples = hocon_schema:field_schema(Schema, examples),
|
Examples = hocon_schema:field_schema(Schema, examples),
|
||||||
{Spec, Refs} = hocon_schema_to_spec(Hocon, Module),
|
{Spec, Refs} = hocon_schema_to_spec(Hocon, Module),
|
||||||
Init = trans_description(#{}, Schema),
|
Init = trans_description(#{}, Schema, Options),
|
||||||
Content = content(Spec, Examples),
|
Content = content(Spec, Examples),
|
||||||
{
|
{
|
||||||
Acc#{integer_to_binary(Status) => Init#{<<"content">> => Content}},
|
Acc#{integer_to_binary(Status) => Init#{<<"content">> => Content}},
|
||||||
|
@ -563,7 +587,7 @@ response(Status, Schema, {Acc, RefsAcc, Module, Options}) ->
|
||||||
};
|
};
|
||||||
false ->
|
false ->
|
||||||
{Props, Refs} = parse_object(Schema, Module, Options),
|
{Props, Refs} = parse_object(Schema, Module, Options),
|
||||||
Init = trans_description(#{}, Schema),
|
Init = trans_description(#{}, Schema, Options),
|
||||||
Content = Init#{<<"content">> => content(Props)},
|
Content = Init#{<<"content">> => content(Props)},
|
||||||
{Acc#{integer_to_binary(Status) => Content}, Refs ++ RefsAcc, Module, Options}
|
{Acc#{integer_to_binary(Status) => Content}, Refs ++ RefsAcc, Module, Options}
|
||||||
end.
|
end.
|
||||||
|
@ -590,7 +614,7 @@ components(Options, [{Module, Field} | Refs], SpecAcc, SubRefsAcc) ->
|
||||||
%% parameters in ref only have one value, not array
|
%% parameters in ref only have one value, not array
|
||||||
components(Options, [{Module, Field, parameter} | Refs], SpecAcc, SubRefsAcc) ->
|
components(Options, [{Module, Field, parameter} | Refs], SpecAcc, SubRefsAcc) ->
|
||||||
Props = hocon_schema_fields(Module, Field),
|
Props = hocon_schema_fields(Module, Field),
|
||||||
{[Param], SubRefs} = parameters(Props, Module),
|
{[Param], SubRefs} = parameters(Props, Module, Options),
|
||||||
Namespace = namespace(Module),
|
Namespace = namespace(Module),
|
||||||
NewSpecAcc = SpecAcc#{?TO_REF(Namespace, Field) => Param},
|
NewSpecAcc = SpecAcc#{?TO_REF(Namespace, Field) => Param},
|
||||||
components(Options, Refs, NewSpecAcc, SubRefs ++ SubRefsAcc).
|
components(Options, Refs, NewSpecAcc, SubRefs ++ SubRefsAcc).
|
||||||
|
@ -869,7 +893,7 @@ parse_object_loop([{Name, Hocon} | Rest], Module, Options, Props, Required, Refs
|
||||||
HoconType = hocon_schema:field_schema(Hocon, type),
|
HoconType = hocon_schema:field_schema(Hocon, type),
|
||||||
Init0 = init_prop([default | ?DEFAULT_FIELDS], #{}, Hocon),
|
Init0 = init_prop([default | ?DEFAULT_FIELDS], #{}, Hocon),
|
||||||
SchemaToSpec = schema_converter(Options),
|
SchemaToSpec = schema_converter(Options),
|
||||||
Init = trans_desc(Init0, Hocon, SchemaToSpec, NameBin),
|
Init = trans_desc(Init0, Hocon, SchemaToSpec, NameBin, Options),
|
||||||
{Prop, Refs1} = SchemaToSpec(HoconType, Module),
|
{Prop, Refs1} = SchemaToSpec(HoconType, Module),
|
||||||
NewRequiredAcc =
|
NewRequiredAcc =
|
||||||
case is_required(Hocon) of
|
case is_required(Hocon) of
|
||||||
|
|
|
@ -57,7 +57,7 @@ t_look_up_code(_) ->
|
||||||
|
|
||||||
t_description_code(_) ->
|
t_description_code(_) ->
|
||||||
{error, not_found} = emqx_dashboard_error_code:description('_____NOT_EXIST_NAME'),
|
{error, not_found} = emqx_dashboard_error_code:description('_____NOT_EXIST_NAME'),
|
||||||
{ok, <<"Request parameters are not legal">>} =
|
{ok, <<"Request parameters are invalid">>} =
|
||||||
emqx_dashboard_error_code:description('BAD_REQUEST'),
|
emqx_dashboard_error_code:description('BAD_REQUEST'),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ t_api_code(_) ->
|
||||||
Url = ?SERVER ++ "/error_codes/BAD_REQUEST",
|
Url = ?SERVER ++ "/error_codes/BAD_REQUEST",
|
||||||
{ok, #{
|
{ok, #{
|
||||||
<<"code">> := <<"BAD_REQUEST">>,
|
<<"code">> := <<"BAD_REQUEST">>,
|
||||||
<<"description">> := <<"Request parameters are not legal">>
|
<<"description">> := <<"Request parameters are invalid">>
|
||||||
}} = request(Url),
|
}} = request(Url),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_dashboard_listener_SUITE).
|
||||||
|
|
||||||
|
-compile(nowarn_export_all).
|
||||||
|
-compile(export_all).
|
||||||
|
|
||||||
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
|
all() ->
|
||||||
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
|
init_per_suite(Config) ->
|
||||||
|
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
||||||
|
ok = change_i18n_lang(en),
|
||||||
|
Config.
|
||||||
|
|
||||||
|
end_per_suite(_Config) ->
|
||||||
|
ok = change_i18n_lang(en),
|
||||||
|
emqx_mgmt_api_test_util:end_suite([emqx_conf]).
|
||||||
|
|
||||||
|
t_change_i18n_lang(_Config) ->
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
ok = change_i18n_lang(zh),
|
||||||
|
{ok, _} = ?block_until(#{?snk_kind := regenerate_minirest_dispatch}, 10_000),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
fun(ok, Trace) ->
|
||||||
|
?assertMatch([#{i18n_lang := zh}], ?of_kind(regenerate_minirest_dispatch, Trace))
|
||||||
|
end
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
change_i18n_lang(Lang) ->
|
||||||
|
{ok, _} = emqx_conf:update([dashboard], {change_i18n_lang, Lang}, #{}),
|
||||||
|
ok.
|
|
@ -64,7 +64,6 @@ groups() ->
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
||||||
emqx_dashboard:init_i18n(),
|
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_suite(_Config) ->
|
end_per_suite(_Config) ->
|
||||||
|
|
|
@ -33,7 +33,6 @@ init_per_suite(Config) ->
|
||||||
mria:start(),
|
mria:start(),
|
||||||
application:load(emqx_dashboard),
|
application:load(emqx_dashboard),
|
||||||
emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1),
|
emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1),
|
||||||
emqx_dashboard:init_i18n(),
|
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
set_special_configs(emqx_dashboard) ->
|
set_special_configs(emqx_dashboard) ->
|
||||||
|
@ -308,8 +307,11 @@ t_nest_ref(_Config) ->
|
||||||
|
|
||||||
t_none_ref(_Config) ->
|
t_none_ref(_Config) ->
|
||||||
Path = "/ref/none",
|
Path = "/ref/none",
|
||||||
?assertThrow(
|
?assertError(
|
||||||
{error, #{mfa := {?MODULE, schema, [Path]}}},
|
#{
|
||||||
|
mfa := {?MODULE, schema, [Path]},
|
||||||
|
reason := function_clause
|
||||||
|
},
|
||||||
emqx_dashboard_swagger:parse_spec_ref(?MODULE, Path, #{})
|
emqx_dashboard_swagger:parse_spec_ref(?MODULE, Path, #{})
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
|
@ -33,7 +33,6 @@ all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
||||||
emqx_dashboard:init_i18n(),
|
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_suite(Config) ->
|
end_per_suite(Config) ->
|
||||||
|
@ -278,11 +277,11 @@ t_bad_ref(_Config) ->
|
||||||
|
|
||||||
t_none_ref(_Config) ->
|
t_none_ref(_Config) ->
|
||||||
Path = "/ref/none",
|
Path = "/ref/none",
|
||||||
?assertThrow(
|
?assertError(
|
||||||
{error, #{
|
#{
|
||||||
mfa := {?MODULE, schema, ["/ref/none"]},
|
mfa := {?MODULE, schema, ["/ref/none"]},
|
||||||
reason := function_clause
|
reason := function_clause
|
||||||
}},
|
},
|
||||||
validate(Path, #{}, [])
|
validate(Path, #{}, [])
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
namespace() -> exhook.
|
namespace() -> exhook.
|
||||||
|
|
||||||
roots() ->
|
roots() ->
|
||||||
[{exhook, ?HOCON(?R_REF(exhook), #{importance => ?IMPORTANCE_HIDDEN})}].
|
[{exhook, ?HOCON(?R_REF(exhook), #{importance => ?IMPORTANCE_LOW})}].
|
||||||
|
|
||||||
fields(exhook) ->
|
fields(exhook) ->
|
||||||
[
|
[
|
||||||
|
|
|
@ -112,8 +112,8 @@
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
list_nodes() ->
|
list_nodes() ->
|
||||||
Running = mria:cluster_nodes(running),
|
Running = emqx:cluster_nodes(running),
|
||||||
Stopped = mria:cluster_nodes(stopped),
|
Stopped = emqx:cluster_nodes(stopped),
|
||||||
DownNodes = lists:map(fun stopped_node_info/1, Stopped),
|
DownNodes = lists:map(fun stopped_node_info/1, Stopped),
|
||||||
[{Node, Info} || #{node := Node} = Info <- node_info(Running)] ++ DownNodes.
|
[{Node, Info} || #{node := Node} = Info <- node_info(Running)] ++ DownNodes.
|
||||||
|
|
||||||
|
@ -199,7 +199,7 @@ vm_stats() ->
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
list_brokers() ->
|
list_brokers() ->
|
||||||
Running = mria:running_nodes(),
|
Running = emqx:running_nodes(),
|
||||||
[{Node, Broker} || #{node := Node} = Broker <- broker_info(Running)].
|
[{Node, Broker} || #{node := Node} = Broker <- broker_info(Running)].
|
||||||
|
|
||||||
lookup_broker(Node) ->
|
lookup_broker(Node) ->
|
||||||
|
@ -223,7 +223,7 @@ broker_info(Nodes) ->
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
get_metrics() ->
|
get_metrics() ->
|
||||||
nodes_info_count([get_metrics(Node) || Node <- mria:running_nodes()]).
|
nodes_info_count([get_metrics(Node) || Node <- emqx:running_nodes()]).
|
||||||
|
|
||||||
get_metrics(Node) ->
|
get_metrics(Node) ->
|
||||||
unwrap_rpc(emqx_proto_v1:get_metrics(Node)).
|
unwrap_rpc(emqx_proto_v1:get_metrics(Node)).
|
||||||
|
@ -238,13 +238,20 @@ get_stats() ->
|
||||||
'subscriptions.shared.count',
|
'subscriptions.shared.count',
|
||||||
'subscriptions.shared.max'
|
'subscriptions.shared.max'
|
||||||
],
|
],
|
||||||
CountStats = nodes_info_count([
|
CountStats = nodes_info_count(
|
||||||
begin
|
lists:foldl(
|
||||||
Stats = get_stats(Node),
|
fun(Node, Acc) ->
|
||||||
delete_keys(Stats, GlobalStatsKeys)
|
case get_stats(Node) of
|
||||||
end
|
{error, _} ->
|
||||||
|| Node <- mria:running_nodes()
|
Acc;
|
||||||
]),
|
Stats ->
|
||||||
|
[delete_keys(Stats, GlobalStatsKeys) | Acc]
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
[],
|
||||||
|
emqx:running_nodes()
|
||||||
|
)
|
||||||
|
),
|
||||||
GlobalStats = maps:with(GlobalStatsKeys, maps:from_list(get_stats(node()))),
|
GlobalStats = maps:with(GlobalStatsKeys, maps:from_list(get_stats(node()))),
|
||||||
maps:merge(CountStats, GlobalStats).
|
maps:merge(CountStats, GlobalStats).
|
||||||
|
|
||||||
|
@ -275,12 +282,12 @@ nodes_info_count(PropList) ->
|
||||||
lookup_client({clientid, ClientId}, FormatFun) ->
|
lookup_client({clientid, ClientId}, FormatFun) ->
|
||||||
lists:append([
|
lists:append([
|
||||||
lookup_client(Node, {clientid, ClientId}, FormatFun)
|
lookup_client(Node, {clientid, ClientId}, FormatFun)
|
||||||
|| Node <- mria:running_nodes()
|
|| Node <- emqx:running_nodes()
|
||||||
]);
|
]);
|
||||||
lookup_client({username, Username}, FormatFun) ->
|
lookup_client({username, Username}, FormatFun) ->
|
||||||
lists:append([
|
lists:append([
|
||||||
lookup_client(Node, {username, Username}, FormatFun)
|
lookup_client(Node, {username, Username}, FormatFun)
|
||||||
|| Node <- mria:running_nodes()
|
|| Node <- emqx:running_nodes()
|
||||||
]).
|
]).
|
||||||
|
|
||||||
lookup_client(Node, Key, FormatFun) ->
|
lookup_client(Node, Key, FormatFun) ->
|
||||||
|
@ -307,7 +314,7 @@ kickout_client(ClientId) ->
|
||||||
[] ->
|
[] ->
|
||||||
{error, not_found};
|
{error, not_found};
|
||||||
_ ->
|
_ ->
|
||||||
Results = [kickout_client(Node, ClientId) || Node <- mria:running_nodes()],
|
Results = [kickout_client(Node, ClientId) || Node <- emqx:running_nodes()],
|
||||||
check_results(Results)
|
check_results(Results)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
@ -322,7 +329,7 @@ list_client_subscriptions(ClientId) ->
|
||||||
[] ->
|
[] ->
|
||||||
{error, not_found};
|
{error, not_found};
|
||||||
_ ->
|
_ ->
|
||||||
Results = [client_subscriptions(Node, ClientId) || Node <- mria:running_nodes()],
|
Results = [client_subscriptions(Node, ClientId) || Node <- emqx:running_nodes()],
|
||||||
Filter =
|
Filter =
|
||||||
fun
|
fun
|
||||||
({error, _}) ->
|
({error, _}) ->
|
||||||
|
@ -340,18 +347,18 @@ client_subscriptions(Node, ClientId) ->
|
||||||
{Node, unwrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId))}.
|
{Node, unwrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId))}.
|
||||||
|
|
||||||
clean_authz_cache(ClientId) ->
|
clean_authz_cache(ClientId) ->
|
||||||
Results = [clean_authz_cache(Node, ClientId) || Node <- mria:running_nodes()],
|
Results = [clean_authz_cache(Node, ClientId) || Node <- emqx:running_nodes()],
|
||||||
check_results(Results).
|
check_results(Results).
|
||||||
|
|
||||||
clean_authz_cache(Node, ClientId) ->
|
clean_authz_cache(Node, ClientId) ->
|
||||||
unwrap_rpc(emqx_proto_v1:clean_authz_cache(Node, ClientId)).
|
unwrap_rpc(emqx_proto_v1:clean_authz_cache(Node, ClientId)).
|
||||||
|
|
||||||
clean_authz_cache_all() ->
|
clean_authz_cache_all() ->
|
||||||
Results = [{Node, clean_authz_cache_all(Node)} || Node <- mria:running_nodes()],
|
Results = [{Node, clean_authz_cache_all(Node)} || Node <- emqx:running_nodes()],
|
||||||
wrap_results(Results).
|
wrap_results(Results).
|
||||||
|
|
||||||
clean_pem_cache_all() ->
|
clean_pem_cache_all() ->
|
||||||
Results = [{Node, clean_pem_cache_all(Node)} || Node <- mria:running_nodes()],
|
Results = [{Node, clean_pem_cache_all(Node)} || Node <- emqx:running_nodes()],
|
||||||
wrap_results(Results).
|
wrap_results(Results).
|
||||||
|
|
||||||
wrap_results(Results) ->
|
wrap_results(Results) ->
|
||||||
|
@ -379,7 +386,7 @@ set_keepalive(_ClientId, _Interval) ->
|
||||||
|
|
||||||
%% @private
|
%% @private
|
||||||
call_client(ClientId, Req) ->
|
call_client(ClientId, Req) ->
|
||||||
Results = [call_client(Node, ClientId, Req) || Node <- mria:running_nodes()],
|
Results = [call_client(Node, ClientId, Req) || Node <- emqx:running_nodes()],
|
||||||
Expected = lists:filter(
|
Expected = lists:filter(
|
||||||
fun
|
fun
|
||||||
({error, _}) -> false;
|
({error, _}) -> false;
|
||||||
|
@ -428,7 +435,7 @@ list_subscriptions(Node) ->
|
||||||
list_subscriptions_via_topic(Topic, FormatFun) ->
|
list_subscriptions_via_topic(Topic, FormatFun) ->
|
||||||
lists:append([
|
lists:append([
|
||||||
list_subscriptions_via_topic(Node, Topic, FormatFun)
|
list_subscriptions_via_topic(Node, Topic, FormatFun)
|
||||||
|| Node <- mria:running_nodes()
|
|| Node <- emqx:running_nodes()
|
||||||
]).
|
]).
|
||||||
|
|
||||||
list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) ->
|
list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) ->
|
||||||
|
@ -442,7 +449,7 @@ list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) ->
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
subscribe(ClientId, TopicTables) ->
|
subscribe(ClientId, TopicTables) ->
|
||||||
subscribe(mria:running_nodes(), ClientId, TopicTables).
|
subscribe(emqx:running_nodes(), ClientId, TopicTables).
|
||||||
|
|
||||||
subscribe([Node | Nodes], ClientId, TopicTables) ->
|
subscribe([Node | Nodes], ClientId, TopicTables) ->
|
||||||
case unwrap_rpc(emqx_management_proto_v3:subscribe(Node, ClientId, TopicTables)) of
|
case unwrap_rpc(emqx_management_proto_v3:subscribe(Node, ClientId, TopicTables)) of
|
||||||
|
@ -467,7 +474,7 @@ publish(Msg) ->
|
||||||
-spec unsubscribe(emqx_types:clientid(), emqx_types:topic()) ->
|
-spec unsubscribe(emqx_types:clientid(), emqx_types:topic()) ->
|
||||||
{unsubscribe, _} | {error, channel_not_found}.
|
{unsubscribe, _} | {error, channel_not_found}.
|
||||||
unsubscribe(ClientId, Topic) ->
|
unsubscribe(ClientId, Topic) ->
|
||||||
unsubscribe(mria:running_nodes(), ClientId, Topic).
|
unsubscribe(emqx:running_nodes(), ClientId, Topic).
|
||||||
|
|
||||||
-spec unsubscribe([node()], emqx_types:clientid(), emqx_types:topic()) ->
|
-spec unsubscribe([node()], emqx_types:clientid(), emqx_types:topic()) ->
|
||||||
{unsubscribe, _} | {error, channel_not_found}.
|
{unsubscribe, _} | {error, channel_not_found}.
|
||||||
|
@ -490,7 +497,7 @@ do_unsubscribe(ClientId, Topic) ->
|
||||||
-spec unsubscribe_batch(emqx_types:clientid(), [emqx_types:topic()]) ->
|
-spec unsubscribe_batch(emqx_types:clientid(), [emqx_types:topic()]) ->
|
||||||
{unsubscribe, _} | {error, channel_not_found}.
|
{unsubscribe, _} | {error, channel_not_found}.
|
||||||
unsubscribe_batch(ClientId, Topics) ->
|
unsubscribe_batch(ClientId, Topics) ->
|
||||||
unsubscribe_batch(mria:running_nodes(), ClientId, Topics).
|
unsubscribe_batch(emqx:running_nodes(), ClientId, Topics).
|
||||||
|
|
||||||
-spec unsubscribe_batch([node()], emqx_types:clientid(), [emqx_types:topic()]) ->
|
-spec unsubscribe_batch([node()], emqx_types:clientid(), [emqx_types:topic()]) ->
|
||||||
{unsubscribe_batch, _} | {error, channel_not_found}.
|
{unsubscribe_batch, _} | {error, channel_not_found}.
|
||||||
|
@ -515,7 +522,7 @@ do_unsubscribe_batch(ClientId, Topics) ->
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
get_alarms(Type) ->
|
get_alarms(Type) ->
|
||||||
[{Node, get_alarms(Node, Type)} || Node <- mria:running_nodes()].
|
[{Node, get_alarms(Node, Type)} || Node <- emqx:running_nodes()].
|
||||||
|
|
||||||
get_alarms(Node, Type) ->
|
get_alarms(Node, Type) ->
|
||||||
add_duration_field(unwrap_rpc(emqx_proto_v1:get_alarms(Node, Type))).
|
add_duration_field(unwrap_rpc(emqx_proto_v1:get_alarms(Node, Type))).
|
||||||
|
@ -524,7 +531,7 @@ deactivate(Node, Name) ->
|
||||||
unwrap_rpc(emqx_proto_v1:deactivate_alarm(Node, Name)).
|
unwrap_rpc(emqx_proto_v1:deactivate_alarm(Node, Name)).
|
||||||
|
|
||||||
delete_all_deactivated_alarms() ->
|
delete_all_deactivated_alarms() ->
|
||||||
[delete_all_deactivated_alarms(Node) || Node <- mria:running_nodes()].
|
[delete_all_deactivated_alarms(Node) || Node <- emqx:running_nodes()].
|
||||||
|
|
||||||
delete_all_deactivated_alarms(Node) ->
|
delete_all_deactivated_alarms(Node) ->
|
||||||
unwrap_rpc(emqx_proto_v1:delete_all_deactivated_alarms(Node)).
|
unwrap_rpc(emqx_proto_v1:delete_all_deactivated_alarms(Node)).
|
||||||
|
|
|
@ -163,7 +163,7 @@ cluster_query(Tab, QString, QSchema, MsFun, FmtFun) ->
|
||||||
{error, page_limit_invalid};
|
{error, page_limit_invalid};
|
||||||
Meta ->
|
Meta ->
|
||||||
{_CodCnt, NQString} = parse_qstring(QString, QSchema),
|
{_CodCnt, NQString} = parse_qstring(QString, QSchema),
|
||||||
Nodes = mria:running_nodes(),
|
Nodes = emqx:running_nodes(),
|
||||||
ResultAcc = init_query_result(),
|
ResultAcc = init_query_result(),
|
||||||
QueryState = init_query_state(Tab, NQString, MsFun, Meta),
|
QueryState = init_query_state(Tab, NQString, MsFun, Meta),
|
||||||
NResultAcc = do_cluster_query(
|
NResultAcc = do_cluster_query(
|
||||||
|
|
|
@ -101,7 +101,7 @@ cluster_info(get, _) ->
|
||||||
ClusterName = application:get_env(ekka, cluster_name, emqxcl),
|
ClusterName = application:get_env(ekka, cluster_name, emqxcl),
|
||||||
Info = #{
|
Info = #{
|
||||||
name => ClusterName,
|
name => ClusterName,
|
||||||
nodes => mria:running_nodes(),
|
nodes => emqx:running_nodes(),
|
||||||
self => node()
|
self => node()
|
||||||
},
|
},
|
||||||
{200, Info}.
|
{200, Info}.
|
||||||
|
|
|
@ -42,8 +42,6 @@
|
||||||
<<"alarm">>,
|
<<"alarm">>,
|
||||||
<<"sys_topics">>,
|
<<"sys_topics">>,
|
||||||
<<"sysmon">>,
|
<<"sysmon">>,
|
||||||
<<"limiter">>,
|
|
||||||
<<"trace">>,
|
|
||||||
<<"log">>,
|
<<"log">>,
|
||||||
<<"persistent_session_store">>,
|
<<"persistent_session_store">>,
|
||||||
<<"zones">>
|
<<"zones">>
|
||||||
|
@ -260,7 +258,7 @@ configs(get, Params, _Req) ->
|
||||||
QS = maps:get(query_string, Params, #{}),
|
QS = maps:get(query_string, Params, #{}),
|
||||||
Node = maps:get(<<"node">>, QS, node()),
|
Node = maps:get(<<"node">>, QS, node()),
|
||||||
case
|
case
|
||||||
lists:member(Node, mria:running_nodes()) andalso
|
lists:member(Node, emqx:running_nodes()) andalso
|
||||||
emqx_management_proto_v2:get_full_config(Node)
|
emqx_management_proto_v2:get_full_config(Node)
|
||||||
of
|
of
|
||||||
false ->
|
false ->
|
||||||
|
|
|
@ -483,7 +483,7 @@ err_msg_str(Reason) ->
|
||||||
io_lib:format("~p", [Reason]).
|
io_lib:format("~p", [Reason]).
|
||||||
|
|
||||||
list_listeners() ->
|
list_listeners() ->
|
||||||
[list_listeners(Node) || Node <- mria:running_nodes()].
|
[list_listeners(Node) || Node <- emqx:running_nodes()].
|
||||||
|
|
||||||
list_listeners(Node) ->
|
list_listeners(Node) ->
|
||||||
wrap_rpc(emqx_management_proto_v2:list_listeners(Node)).
|
wrap_rpc(emqx_management_proto_v2:list_listeners(Node)).
|
||||||
|
|
|
@ -59,7 +59,7 @@ metrics(get, #{query_string := Qs}) ->
|
||||||
maps:from_list(
|
maps:from_list(
|
||||||
emqx_mgmt:get_metrics(Node) ++ [{node, Node}]
|
emqx_mgmt:get_metrics(Node) ++ [{node, Node}]
|
||||||
)
|
)
|
||||||
|| Node <- mria:running_nodes()
|
|| Node <- emqx:running_nodes()
|
||||||
],
|
],
|
||||||
{200, Data}
|
{200, Data}
|
||||||
end.
|
end.
|
||||||
|
|
|
@ -127,21 +127,21 @@ list(get, #{query_string := Qs}) ->
|
||||||
true ->
|
true ->
|
||||||
{200, emqx_mgmt:get_stats()};
|
{200, emqx_mgmt:get_stats()};
|
||||||
_ ->
|
_ ->
|
||||||
Data = [
|
Data = lists:foldl(
|
||||||
maps:from_list(emqx_mgmt:get_stats(Node) ++ [{node, Node}])
|
fun(Node, Acc) ->
|
||||||
|| Node <- running_nodes()
|
case emqx_mgmt:get_stats(Node) of
|
||||||
],
|
{error, _Err} ->
|
||||||
|
Acc;
|
||||||
|
Stats when is_list(Stats) ->
|
||||||
|
Data = maps:from_list([{node, Node} | Stats]),
|
||||||
|
[Data | Acc]
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
[],
|
||||||
|
emqx:running_nodes()
|
||||||
|
),
|
||||||
{200, Data}
|
{200, Data}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%%%==============================================================================================
|
%%%==============================================================================================
|
||||||
%% Internal
|
%% Internal
|
||||||
|
|
||||||
running_nodes() ->
|
|
||||||
Nodes = erlang:nodes([visible, this]),
|
|
||||||
RpcResults = emqx_proto_v2:are_running(Nodes),
|
|
||||||
[
|
|
||||||
Node
|
|
||||||
|| {Node, IsRunning} <- lists:zip(Nodes, RpcResults),
|
|
||||||
IsRunning =:= {ok, true}
|
|
||||||
].
|
|
||||||
|
|
|
@ -390,7 +390,7 @@ trace(get, _Params) ->
|
||||||
fun(#{start_at := A}, #{start_at := B}) -> A > B end,
|
fun(#{start_at := A}, #{start_at := B}) -> A > B end,
|
||||||
emqx_trace:format(List0)
|
emqx_trace:format(List0)
|
||||||
),
|
),
|
||||||
Nodes = mria:running_nodes(),
|
Nodes = emqx:running_nodes(),
|
||||||
TraceSize = wrap_rpc(emqx_mgmt_trace_proto_v2:get_trace_size(Nodes)),
|
TraceSize = wrap_rpc(emqx_mgmt_trace_proto_v2:get_trace_size(Nodes)),
|
||||||
AllFileSize = lists:foldl(fun(F, Acc) -> maps:merge(Acc, F) end, #{}, TraceSize),
|
AllFileSize = lists:foldl(fun(F, Acc) -> maps:merge(Acc, F) end, #{}, TraceSize),
|
||||||
Now = erlang:system_time(second),
|
Now = erlang:system_time(second),
|
||||||
|
@ -464,7 +464,7 @@ format_trace(Trace0) ->
|
||||||
LogSize = lists:foldl(
|
LogSize = lists:foldl(
|
||||||
fun(Node, Acc) -> Acc#{Node => 0} end,
|
fun(Node, Acc) -> Acc#{Node => 0} end,
|
||||||
#{},
|
#{},
|
||||||
mria:running_nodes()
|
emqx:running_nodes()
|
||||||
),
|
),
|
||||||
Trace2 = maps:without([enable, filter], Trace1),
|
Trace2 = maps:without([enable, filter], Trace1),
|
||||||
Trace2#{
|
Trace2#{
|
||||||
|
@ -560,13 +560,13 @@ group_trace_file(ZipDir, TraceLog, TraceFiles) ->
|
||||||
).
|
).
|
||||||
|
|
||||||
collect_trace_file(undefined, TraceLog) ->
|
collect_trace_file(undefined, TraceLog) ->
|
||||||
Nodes = mria:running_nodes(),
|
Nodes = emqx:running_nodes(),
|
||||||
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog));
|
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog));
|
||||||
collect_trace_file(Node, TraceLog) ->
|
collect_trace_file(Node, TraceLog) ->
|
||||||
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file([Node], TraceLog)).
|
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file([Node], TraceLog)).
|
||||||
|
|
||||||
collect_trace_file_detail(TraceLog) ->
|
collect_trace_file_detail(TraceLog) ->
|
||||||
Nodes = mria:running_nodes(),
|
Nodes = emqx:running_nodes(),
|
||||||
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file_detail(Nodes, TraceLog)).
|
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file_detail(Nodes, TraceLog)).
|
||||||
|
|
||||||
wrap_rpc({GoodRes, BadNodes}) ->
|
wrap_rpc({GoodRes, BadNodes}) ->
|
||||||
|
@ -696,7 +696,7 @@ parse_node(Query, Default) ->
|
||||||
{ok, Default};
|
{ok, Default};
|
||||||
{ok, NodeBin} ->
|
{ok, NodeBin} ->
|
||||||
Node = binary_to_existing_atom(NodeBin),
|
Node = binary_to_existing_atom(NodeBin),
|
||||||
true = lists:member(Node, mria:running_nodes()),
|
true = lists:member(Node, emqx:running_nodes()),
|
||||||
{ok, Node}
|
{ok, Node}
|
||||||
end
|
end
|
||||||
catch
|
catch
|
||||||
|
|
|
@ -36,16 +36,16 @@ end_per_suite(_) ->
|
||||||
emqx_mgmt_api_test_util:end_suite([emqx_management, emqx_conf]).
|
emqx_mgmt_api_test_util:end_suite([emqx_management, emqx_conf]).
|
||||||
|
|
||||||
init_per_testcase(TestCase, Config) ->
|
init_per_testcase(TestCase, Config) ->
|
||||||
meck:expect(mria, running_nodes, 0, [node()]),
|
meck:expect(emqx, running_nodes, 0, [node()]),
|
||||||
emqx_common_test_helpers:init_per_testcase(?MODULE, TestCase, Config).
|
emqx_common_test_helpers:init_per_testcase(?MODULE, TestCase, Config).
|
||||||
|
|
||||||
end_per_testcase(TestCase, Config) ->
|
end_per_testcase(TestCase, Config) ->
|
||||||
meck:unload(mria),
|
meck:unload(emqx),
|
||||||
emqx_common_test_helpers:end_per_testcase(?MODULE, TestCase, Config).
|
emqx_common_test_helpers:end_per_testcase(?MODULE, TestCase, Config).
|
||||||
|
|
||||||
t_list_nodes(init, Config) ->
|
t_list_nodes(init, Config) ->
|
||||||
meck:expect(
|
meck:expect(
|
||||||
mria,
|
emqx,
|
||||||
cluster_nodes,
|
cluster_nodes,
|
||||||
fun
|
fun
|
||||||
(running) -> [node()];
|
(running) -> [node()];
|
||||||
|
@ -125,7 +125,7 @@ t_lookup_client(_Config) ->
|
||||||
emqx_mgmt:lookup_client({username, <<"user1">>}, ?FORMATFUN)
|
emqx_mgmt:lookup_client({username, <<"user1">>}, ?FORMATFUN)
|
||||||
),
|
),
|
||||||
?assertEqual([], emqx_mgmt:lookup_client({clientid, <<"notfound">>}, ?FORMATFUN)),
|
?assertEqual([], emqx_mgmt:lookup_client({clientid, <<"notfound">>}, ?FORMATFUN)),
|
||||||
meck:expect(mria, running_nodes, 0, [node(), 'fake@nonode']),
|
meck:expect(emqx, running_nodes, 0, [node(), 'fake@nonode']),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
[_ | {error, nodedown}], emqx_mgmt:lookup_client({clientid, <<"client1">>}, ?FORMATFUN)
|
[_ | {error, nodedown}], emqx_mgmt:lookup_client({clientid, <<"client1">>}, ?FORMATFUN)
|
||||||
).
|
).
|
||||||
|
@ -188,7 +188,7 @@ t_clean_cache(_Config) ->
|
||||||
{error, _},
|
{error, _},
|
||||||
emqx_mgmt:clean_pem_cache_all()
|
emqx_mgmt:clean_pem_cache_all()
|
||||||
),
|
),
|
||||||
meck:expect(mria, running_nodes, 0, [node(), 'fake@nonode']),
|
meck:expect(emqx, running_nodes, 0, [node(), 'fake@nonode']),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
{error, [{'fake@nonode', {error, _}}]},
|
{error, [{'fake@nonode', {error, _}}]},
|
||||||
emqx_mgmt:clean_authz_cache_all()
|
emqx_mgmt:clean_authz_cache_all()
|
||||||
|
|
|
@ -179,14 +179,14 @@ t_bad_rpc(_) ->
|
||||||
ClientLs1 = [start_emqtt_client(node(), I, 1883) || I <- lists:seq(1, 10)],
|
ClientLs1 = [start_emqtt_client(node(), I, 1883) || I <- lists:seq(1, 10)],
|
||||||
Path = emqx_mgmt_api_test_util:api_path(["clients?limit=2&page=2"]),
|
Path = emqx_mgmt_api_test_util:api_path(["clients?limit=2&page=2"]),
|
||||||
try
|
try
|
||||||
meck:expect(mria, running_nodes, 0, ['fake@nohost']),
|
meck:expect(emqx, running_nodes, 0, ['fake@nohost']),
|
||||||
{error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path),
|
{error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path),
|
||||||
%% good cop, bad cop
|
%% good cop, bad cop
|
||||||
meck:expect(mria, running_nodes, 0, [node(), 'fake@nohost']),
|
meck:expect(emqx, running_nodes, 0, [node(), 'fake@nohost']),
|
||||||
{error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path)
|
{error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path)
|
||||||
after
|
after
|
||||||
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1),
|
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1),
|
||||||
meck:unload(mria),
|
meck:unload(emqx),
|
||||||
emqx_mgmt_api_test_util:end_suite()
|
emqx_mgmt_api_test_util:end_suite()
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
|
|
@ -246,7 +246,7 @@ t_dashboard(_Config) ->
|
||||||
|
|
||||||
t_configs_node({'init', Config}) ->
|
t_configs_node({'init', Config}) ->
|
||||||
Node = node(),
|
Node = node(),
|
||||||
meck:expect(mria, running_nodes, fun() -> [Node, bad_node, other_node] end),
|
meck:expect(emqx, running_nodes, fun() -> [Node, bad_node, other_node] end),
|
||||||
meck:expect(
|
meck:expect(
|
||||||
emqx_management_proto_v2,
|
emqx_management_proto_v2,
|
||||||
get_full_config,
|
get_full_config,
|
||||||
|
@ -258,7 +258,7 @@ t_configs_node({'init', Config}) ->
|
||||||
),
|
),
|
||||||
Config;
|
Config;
|
||||||
t_configs_node({'end', _}) ->
|
t_configs_node({'end', _}) ->
|
||||||
meck:unload([mria, emqx_management_proto_v2]);
|
meck:unload([emqx, emqx_management_proto_v2]);
|
||||||
t_configs_node(_) ->
|
t_configs_node(_) ->
|
||||||
Node = atom_to_list(node()),
|
Node = atom_to_list(node()),
|
||||||
|
|
||||||
|
|
|
@ -168,8 +168,8 @@ t_api_listeners_list_not_ready(Config) when is_list(Config) ->
|
||||||
L3 = get_tcp_listeners(Node2),
|
L3 = get_tcp_listeners(Node2),
|
||||||
|
|
||||||
Comment = #{
|
Comment = #{
|
||||||
node1 => rpc:call(Node1, mria, running_nodes, []),
|
node1 => rpc:call(Node1, emqx, running_nodes, []),
|
||||||
node2 => rpc:call(Node2, mria, running_nodes, [])
|
node2 => rpc:call(Node2, emqx, running_nodes, [])
|
||||||
},
|
},
|
||||||
|
|
||||||
?assert(length(L1) > length(L2), Comment),
|
?assert(length(L1) > length(L2), Comment),
|
||||||
|
|
|
@ -24,10 +24,12 @@ all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
|
meck:expect(emqx, running_nodes, 0, [node(), 'fake@node']),
|
||||||
emqx_mgmt_api_test_util:init_suite(),
|
emqx_mgmt_api_test_util:init_suite(),
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_suite(_) ->
|
end_per_suite(_) ->
|
||||||
|
meck:unload(emqx),
|
||||||
emqx_mgmt_api_test_util:end_suite().
|
emqx_mgmt_api_test_util:end_suite().
|
||||||
|
|
||||||
t_stats_api(_) ->
|
t_stats_api(_) ->
|
||||||
|
|
|
@ -599,8 +599,8 @@ emqx_cluster() ->
|
||||||
].
|
].
|
||||||
|
|
||||||
emqx_cluster_data() ->
|
emqx_cluster_data() ->
|
||||||
Running = mria:cluster_nodes(running),
|
Running = emqx:cluster_nodes(running),
|
||||||
Stopped = mria:cluster_nodes(stopped),
|
Stopped = emqx:cluster_nodes(stopped),
|
||||||
[
|
[
|
||||||
{nodes_running, length(Running)},
|
{nodes_running, length(Running)},
|
||||||
{nodes_stopped, length(Stopped)}
|
{nodes_stopped, length(Stopped)}
|
||||||
|
|
|
@ -14,31 +14,27 @@
|
||||||
%% limitations under the License.
|
%% limitations under the License.
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
-module(emqx_plugin_libs_pool).
|
-module(emqx_resource_pool).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
start_pool/3,
|
start/3,
|
||||||
stop_pool/1,
|
stop/1,
|
||||||
pool_name/1,
|
health_check_workers/2,
|
||||||
health_check_ecpool_workers/2,
|
health_check_workers/3
|
||||||
health_check_ecpool_workers/3
|
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-include_lib("emqx/include/logger.hrl").
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
|
||||||
-define(HEALTH_CHECK_TIMEOUT, 15000).
|
-define(HEALTH_CHECK_TIMEOUT, 15000).
|
||||||
|
|
||||||
pool_name(ID) when is_binary(ID) ->
|
start(Name, Mod, Options) ->
|
||||||
list_to_atom(binary_to_list(ID)).
|
|
||||||
|
|
||||||
start_pool(Name, Mod, Options) ->
|
|
||||||
case ecpool:start_sup_pool(Name, Mod, Options) of
|
case ecpool:start_sup_pool(Name, Mod, Options) of
|
||||||
{ok, _} ->
|
{ok, _} ->
|
||||||
?SLOG(info, #{msg => "start_ecpool_ok", pool_name => Name}),
|
?SLOG(info, #{msg => "start_ecpool_ok", pool_name => Name}),
|
||||||
ok;
|
ok;
|
||||||
{error, {already_started, _Pid}} ->
|
{error, {already_started, _Pid}} ->
|
||||||
stop_pool(Name),
|
stop(Name),
|
||||||
start_pool(Name, Mod, Options);
|
start(Name, Mod, Options);
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
NReason = parse_reason(Reason),
|
NReason = parse_reason(Reason),
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
|
@ -49,7 +45,7 @@ start_pool(Name, Mod, Options) ->
|
||||||
{error, {start_pool_failed, Name, NReason}}
|
{error, {start_pool_failed, Name, NReason}}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
stop_pool(Name) ->
|
stop(Name) ->
|
||||||
case ecpool:stop_sup_pool(Name) of
|
case ecpool:stop_sup_pool(Name) of
|
||||||
ok ->
|
ok ->
|
||||||
?SLOG(info, #{msg => "stop_ecpool_ok", pool_name => Name});
|
?SLOG(info, #{msg => "stop_ecpool_ok", pool_name => Name});
|
||||||
|
@ -64,10 +60,10 @@ stop_pool(Name) ->
|
||||||
error({stop_pool_failed, Name, Reason})
|
error({stop_pool_failed, Name, Reason})
|
||||||
end.
|
end.
|
||||||
|
|
||||||
health_check_ecpool_workers(PoolName, CheckFunc) ->
|
health_check_workers(PoolName, CheckFunc) ->
|
||||||
health_check_ecpool_workers(PoolName, CheckFunc, ?HEALTH_CHECK_TIMEOUT).
|
health_check_workers(PoolName, CheckFunc, ?HEALTH_CHECK_TIMEOUT).
|
||||||
|
|
||||||
health_check_ecpool_workers(PoolName, CheckFunc, Timeout) ->
|
health_check_workers(PoolName, CheckFunc, Timeout) ->
|
||||||
Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)],
|
Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)],
|
||||||
DoPerWorker =
|
DoPerWorker =
|
||||||
fun(Worker) ->
|
fun(Worker) ->
|
|
@ -53,7 +53,8 @@ fields("retainer") ->
|
||||||
sc(
|
sc(
|
||||||
?R_REF(flow_control),
|
?R_REF(flow_control),
|
||||||
flow_control,
|
flow_control,
|
||||||
#{}
|
#{},
|
||||||
|
?IMPORTANCE_HIDDEN
|
||||||
)},
|
)},
|
||||||
{max_payload_size,
|
{max_payload_size,
|
||||||
sc(
|
sc(
|
||||||
|
@ -125,7 +126,9 @@ desc(_) ->
|
||||||
%% hoconsc:mk(Type, #{desc => ?DESC(DescId)}).
|
%% hoconsc:mk(Type, #{desc => ?DESC(DescId)}).
|
||||||
|
|
||||||
sc(Type, DescId, Default) ->
|
sc(Type, DescId, Default) ->
|
||||||
hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId)}).
|
sc(Type, DescId, Default, ?DEFAULT_IMPORTANCE).
|
||||||
|
sc(Type, DescId, Default, Importance) ->
|
||||||
|
hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId), importance => Importance}).
|
||||||
|
|
||||||
backend_config() ->
|
backend_config() ->
|
||||||
hoconsc:mk(hoconsc:ref(?MODULE, mnesia_config), #{desc => ?DESC(backend)}).
|
hoconsc:mk(hoconsc:ref(?MODULE, mnesia_config), #{desc => ?DESC(backend)}).
|
||||||
|
|
|
@ -758,23 +758,22 @@ with_conf(ConfMod, Case) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
make_limiter_cfg(Rate) ->
|
make_limiter_cfg(Rate) ->
|
||||||
Infinity = emqx_limiter_schema:infinity_value(),
|
|
||||||
Client = #{
|
Client = #{
|
||||||
rate => Rate,
|
rate => Rate,
|
||||||
initial => 0,
|
initial => 0,
|
||||||
capacity => Infinity,
|
burst => 0,
|
||||||
low_watermark => 1,
|
low_watermark => 1,
|
||||||
divisible => false,
|
divisible => false,
|
||||||
max_retry_time => timer:seconds(5),
|
max_retry_time => timer:seconds(5),
|
||||||
failure_strategy => force
|
failure_strategy => force
|
||||||
},
|
},
|
||||||
#{client => Client, rate => Infinity, initial => 0, capacity => Infinity}.
|
#{client => Client, rate => Rate, initial => 0, burst => 0}.
|
||||||
|
|
||||||
make_limiter_json(Rate) ->
|
make_limiter_json(Rate) ->
|
||||||
Client = #{
|
Client = #{
|
||||||
<<"rate">> => Rate,
|
<<"rate">> => Rate,
|
||||||
<<"initial">> => 0,
|
<<"initial">> => 0,
|
||||||
<<"capacity">> => <<"infinity">>,
|
<<"burst">> => <<"0">>,
|
||||||
<<"low_watermark">> => 0,
|
<<"low_watermark">> => 0,
|
||||||
<<"divisible">> => <<"false">>,
|
<<"divisible">> => <<"false">>,
|
||||||
<<"max_retry_time">> => <<"5s">>,
|
<<"max_retry_time">> => <<"5s">>,
|
||||||
|
@ -784,5 +783,5 @@ make_limiter_json(Rate) ->
|
||||||
<<"client">> => Client,
|
<<"client">> => Client,
|
||||||
<<"rate">> => <<"infinity">>,
|
<<"rate">> => <<"infinity">>,
|
||||||
<<"initial">> => 0,
|
<<"initial">> => 0,
|
||||||
<<"capacity">> => <<"infinity">>
|
<<"burst">> => <<"0">>
|
||||||
}.
|
}.
|
||||||
|
|
|
@ -38,7 +38,7 @@ namespace() -> rule_engine.
|
||||||
tags() ->
|
tags() ->
|
||||||
[<<"Rule Engine">>].
|
[<<"Rule Engine">>].
|
||||||
|
|
||||||
roots() -> [{"rule_engine", ?HOCON(?R_REF("rule_engine"), #{importance => ?IMPORTANCE_HIDDEN})}].
|
roots() -> [{"rule_engine", ?HOCON(?R_REF("rule_engine"), #{importance => ?IMPORTANCE_LOW})}].
|
||||||
|
|
||||||
fields("rule_engine") ->
|
fields("rule_engine") ->
|
||||||
rule_engine_settings() ++
|
rule_engine_settings() ++
|
||||||
|
|
|
@ -227,6 +227,7 @@
|
||||||
now_timestamp/1,
|
now_timestamp/1,
|
||||||
format_date/3,
|
format_date/3,
|
||||||
format_date/4,
|
format_date/4,
|
||||||
|
date_to_unix_ts/3,
|
||||||
date_to_unix_ts/4
|
date_to_unix_ts/4
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
@ -1085,6 +1086,9 @@ format_date(TimeUnit, Offset, FormatString, TimeEpoch) ->
|
||||||
)
|
)
|
||||||
).
|
).
|
||||||
|
|
||||||
|
date_to_unix_ts(TimeUnit, FormatString, InputString) ->
|
||||||
|
date_to_unix_ts(TimeUnit, "Z", FormatString, InputString).
|
||||||
|
|
||||||
date_to_unix_ts(TimeUnit, Offset, FormatString, InputString) ->
|
date_to_unix_ts(TimeUnit, Offset, FormatString, InputString) ->
|
||||||
emqx_rule_date:parse_date(
|
emqx_rule_date:parse_date(
|
||||||
time_unit(TimeUnit),
|
time_unit(TimeUnit),
|
||||||
|
|
|
@ -686,7 +686,6 @@ t_jq(_) ->
|
||||||
%% Got timeout as expected
|
%% Got timeout as expected
|
||||||
got_timeout
|
got_timeout
|
||||||
end,
|
end,
|
||||||
_ConfigRootKey = emqx_rule_engine_schema:namespace(),
|
|
||||||
?assertThrow(
|
?assertThrow(
|
||||||
{jq_exception, {timeout, _}},
|
{jq_exception, {timeout, _}},
|
||||||
apply_func(jq, [TOProgram, <<"-2">>])
|
apply_func(jq, [TOProgram, <<"-2">>])
|
||||||
|
@ -959,7 +958,7 @@ prop_format_date_fun() ->
|
||||||
Args1 = [<<"second">>, <<"+07:00">>, <<"%m--%d--%y---%H:%M:%S%Z">>],
|
Args1 = [<<"second">>, <<"+07:00">>, <<"%m--%d--%y---%H:%M:%S%Z">>],
|
||||||
?FORALL(
|
?FORALL(
|
||||||
S,
|
S,
|
||||||
erlang:system_time(second),
|
range(0, 4000000000),
|
||||||
S ==
|
S ==
|
||||||
apply_func(
|
apply_func(
|
||||||
date_to_unix_ts,
|
date_to_unix_ts,
|
||||||
|
@ -975,7 +974,7 @@ prop_format_date_fun() ->
|
||||||
Args2 = [<<"millisecond">>, <<"+04:00">>, <<"--%m--%d--%y---%H:%M:%S%Z">>],
|
Args2 = [<<"millisecond">>, <<"+04:00">>, <<"--%m--%d--%y---%H:%M:%S%Z">>],
|
||||||
?FORALL(
|
?FORALL(
|
||||||
S,
|
S,
|
||||||
erlang:system_time(millisecond),
|
range(0, 4000000000),
|
||||||
S ==
|
S ==
|
||||||
apply_func(
|
apply_func(
|
||||||
date_to_unix_ts,
|
date_to_unix_ts,
|
||||||
|
@ -991,7 +990,7 @@ prop_format_date_fun() ->
|
||||||
Args = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>],
|
Args = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>],
|
||||||
?FORALL(
|
?FORALL(
|
||||||
S,
|
S,
|
||||||
erlang:system_time(second),
|
range(0, 4000000000),
|
||||||
S ==
|
S ==
|
||||||
apply_func(
|
apply_func(
|
||||||
date_to_unix_ts,
|
date_to_unix_ts,
|
||||||
|
@ -1003,6 +1002,24 @@ prop_format_date_fun() ->
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
),
|
||||||
|
%% When no offset is specified, the offset should be taken from the formatted time string
|
||||||
|
ArgsNoOffset = [<<"second">>, <<"%y-%m-%d-%H:%M:%S%Z">>],
|
||||||
|
ArgsOffset = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>],
|
||||||
|
?FORALL(
|
||||||
|
S,
|
||||||
|
range(0, 4000000000),
|
||||||
|
S ==
|
||||||
|
apply_func(
|
||||||
|
date_to_unix_ts,
|
||||||
|
ArgsNoOffset ++
|
||||||
|
[
|
||||||
|
apply_func(
|
||||||
|
format_date,
|
||||||
|
ArgsOffset ++ [S]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
).
|
).
|
||||||
|
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
8
build
8
build
|
@ -117,18 +117,14 @@ make_docs() {
|
||||||
mkdir -p "$docdir" "$dashboard_www_static"
|
mkdir -p "$docdir" "$dashboard_www_static"
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
erl -noshell -pa $libs_dir1 $libs_dir2 $libs_dir3 -eval \
|
erl -noshell -pa $libs_dir1 $libs_dir2 $libs_dir3 -eval \
|
||||||
"I18nFile = filename:join([apps, emqx_dashboard, priv, 'i18n.conf']), \
|
"ok = emqx_conf:dump_schema('$docdir', $SCHEMA_MODULE), \
|
||||||
ok = emqx_conf:dump_schema('$docdir', $SCHEMA_MODULE, I18nFile), \
|
|
||||||
halt(0)."
|
halt(0)."
|
||||||
cp "$docdir"/bridge-api-*.json "$dashboard_www_static"
|
cp "$docdir"/bridge-api-*.json "$dashboard_www_static"
|
||||||
cp "$docdir"/hot-config-schema-*.json "$dashboard_www_static"
|
cp "$docdir"/hot-config-schema-*.json "$dashboard_www_static"
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_no_compile_time_only_deps() {
|
assert_no_compile_time_only_deps() {
|
||||||
if [ "$("$FIND" "_build/$PROFILE/rel/emqx/lib/" -maxdepth 1 -name 'gpb-*' -type d)" != "" ]; then
|
:
|
||||||
echo "gpb should not be included in the release"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
make_rel() {
|
make_rel() {
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
Hide `flapping_detect/conn_congestion/stats` configuration.
|
|
||||||
Deprecate `flapping_detect.enable`.
|
|
|
@ -1 +0,0 @@
|
||||||
Hide the `auto_subscribe` configuration items so that they can be modified later only through the HTTP API.
|
|
|
@ -1 +0,0 @@
|
||||||
Hide data items(rule_engine/bridge/authz/authn) from configuration files and documentation.
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
Unify the config formats for `cluster.core_nodes` and `cluster.statics.seeds`.
|
||||||
|
Now they both support formats in array `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` or semicolon-separated string `"emqx1@127.0.0.1,emqx2@127.0.0.1"`.
|
|
@ -1 +1,7 @@
|
||||||
hide exhook/rewrite/topic_metric/persistent_session_store/overload_protection from the docs and configuration file.
|
Hide a large number of advanced options to simplify the configuration file.
|
||||||
|
|
||||||
|
That includes `rewrite`, `topic_metric`, `persistent_session_store`, `overload_protection`,
|
||||||
|
`flapping_detect`, `conn_congestion`, `stats,auto_subscribe`, `broker_perf`,
|
||||||
|
`shared_subscription_group`, `slow_subs`, `ssl_options.user_lookup_fun` and some advance items
|
||||||
|
in `node` and `dashboard` section, [#10358](https://github.com/emqx/emqx/pull/10358),
|
||||||
|
[#10381](https://github.com/emqx/emqx/pull/10381), [#10385](https://github.com/emqx/emqx/pull/10385).
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
A new function to convert a formatted date to an integer timestamp has been added: date_to_unix_ts/3
|
|
@ -0,0 +1,4 @@
|
||||||
|
Optimize the configuration priority mechanism to fix the issue where the configuration
|
||||||
|
changes made to `etc/emqx.conf` do not take effect after restarting EMQX.
|
||||||
|
|
||||||
|
More introduction about the new mechanism: [Configure Override Rules](https://www.emqx.io/docs/en/v5.0/configuration/configuration.html#configure-override-rules)
|
|
@ -0,0 +1,6 @@
|
||||||
|
Fix error in `/api/v5/monitor_current` API endpoint that happens when some EMQX nodes are down.
|
||||||
|
|
||||||
|
Prior to this fix, sometimes the request returned HTTP code 500 and the following message:
|
||||||
|
```
|
||||||
|
{"code":"INTERNAL_ERROR","message":"error, badarg, [{erlang,'++',[{error,nodedown},[{node,'emqx@10.42.0.150'}]], ...
|
||||||
|
```
|
|
@ -0,0 +1,6 @@
|
||||||
|
Simplify the configuration of the limiter feature and optimize some codes
|
||||||
|
- Rename `message_in` to `messages`
|
||||||
|
- Rename `bytes_in` to `bytes`
|
||||||
|
- Use `burst` instead of `capacity`
|
||||||
|
- Hide non-importance fields
|
||||||
|
- Optimize limiter instances in different rate settings
|
|
@ -0,0 +1,2 @@
|
||||||
|
Simplify the configuration of the `retainer` feature.
|
||||||
|
- Mark `flow_control` as non-importance field.
|
|
@ -0,0 +1 @@
|
||||||
|
Add support for [Protocol Buffers](https://protobuf.dev/) schemas in Schema Registry.
|
|
@ -530,15 +530,16 @@ t_write_failure(Config) ->
|
||||||
fun(Trace0) ->
|
fun(Trace0) ->
|
||||||
ct:pal("trace: ~p", [Trace0]),
|
ct:pal("trace: ~p", [Trace0]),
|
||||||
Trace = ?of_kind(buffer_worker_flush_nack, Trace0),
|
Trace = ?of_kind(buffer_worker_flush_nack, Trace0),
|
||||||
?assertMatch([#{result := {async_return, {error, _}}} | _], Trace),
|
[#{result := Result} | _] = Trace,
|
||||||
[#{result := {async_return, {error, Error}}} | _] = Trace,
|
case Result of
|
||||||
case Error of
|
{async_return, {error, {resource_error, _}}} ->
|
||||||
{resource_error, _} ->
|
|
||||||
ok;
|
ok;
|
||||||
{recoverable_error, disconnected} ->
|
{async_return, {error, {recoverable_error, disconnected}}} ->
|
||||||
|
ok;
|
||||||
|
{error, {resource_error, _}} ->
|
||||||
ok;
|
ok;
|
||||||
_ ->
|
_ ->
|
||||||
ct:fail("unexpected error: ~p", [Error])
|
ct:fail("unexpected error: ~p", [Result])
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
|
|
|
@ -917,7 +917,7 @@ t_invalid_private_key(Config) ->
|
||||||
#{<<"private_key">> => InvalidPrivateKeyPEM}
|
#{<<"private_key">> => InvalidPrivateKeyPEM}
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
#{?snk_kind := gcp_pubsub_bridge_jwt_worker_failed_to_start},
|
#{?snk_kind := "gcp_pubsub_bridge_jwt_worker_failed_to_start"},
|
||||||
20_000
|
20_000
|
||||||
),
|
),
|
||||||
Res
|
Res
|
||||||
|
@ -928,7 +928,7 @@ t_invalid_private_key(Config) ->
|
||||||
[#{reason := Reason}] when
|
[#{reason := Reason}] when
|
||||||
Reason =:= noproc orelse
|
Reason =:= noproc orelse
|
||||||
Reason =:= {shutdown, {error, empty_key}},
|
Reason =:= {shutdown, {error, empty_key}},
|
||||||
?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace)
|
?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace)
|
||||||
),
|
),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
[#{error := empty_key}],
|
[#{error := empty_key}],
|
||||||
|
@ -956,14 +956,14 @@ t_jwt_worker_start_timeout(Config) ->
|
||||||
#{<<"private_key">> => InvalidPrivateKeyPEM}
|
#{<<"private_key">> => InvalidPrivateKeyPEM}
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
#{?snk_kind := gcp_pubsub_bridge_jwt_timeout},
|
#{?snk_kind := "gcp_pubsub_bridge_jwt_timeout"},
|
||||||
20_000
|
20_000
|
||||||
),
|
),
|
||||||
Res
|
Res
|
||||||
end,
|
end,
|
||||||
fun(Res, Trace) ->
|
fun(Res, Trace) ->
|
||||||
?assertMatch({ok, _}, Res),
|
?assertMatch({ok, _}, Res),
|
||||||
?assertMatch([_], ?of_kind(gcp_pubsub_bridge_jwt_timeout, Trace)),
|
?assertMatch([_], ?of_kind("gcp_pubsub_bridge_jwt_timeout", Trace)),
|
||||||
ok
|
ok
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
|
@ -1329,7 +1329,7 @@ t_failed_to_start_jwt_worker(Config) ->
|
||||||
fun(Trace) ->
|
fun(Trace) ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
[#{reason := {error, restarting}}],
|
[#{reason := {error, restarting}}],
|
||||||
?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace)
|
?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace)
|
||||||
),
|
),
|
||||||
ok
|
ok
|
||||||
end
|
end
|
||||||
|
|
|
@ -5,8 +5,6 @@
|
||||||
|
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
|
||||||
-import(emqx_ee_bridge_influxdb, [to_influx_lines/1]).
|
|
||||||
|
|
||||||
-define(INVALID_LINES, [
|
-define(INVALID_LINES, [
|
||||||
" ",
|
" ",
|
||||||
" \n",
|
" \n",
|
||||||
|
@ -326,3 +324,13 @@ test_pairs(PairsList) ->
|
||||||
|
|
||||||
join(Sep, LinesList) ->
|
join(Sep, LinesList) ->
|
||||||
lists:flatten(lists:join(Sep, LinesList)).
|
lists:flatten(lists:join(Sep, LinesList)).
|
||||||
|
|
||||||
|
to_influx_lines(RawLines) ->
|
||||||
|
OldLevel = emqx_logger:get_primary_log_level(),
|
||||||
|
try
|
||||||
|
%% mute error logs from this call
|
||||||
|
emqx_logger:set_primary_log_level(none),
|
||||||
|
emqx_ee_bridge_influxdb:to_influx_lines(RawLines)
|
||||||
|
after
|
||||||
|
emqx_logger:set_primary_log_level(OldLevel)
|
||||||
|
end.
|
||||||
|
|
|
@ -147,6 +147,16 @@ ensure_loaded() ->
|
||||||
_ = emqx_ee_bridge:module_info(),
|
_ = emqx_ee_bridge:module_info(),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
mongo_type(Config) ->
|
||||||
|
case ?config(mongo_type, Config) of
|
||||||
|
rs ->
|
||||||
|
{rs, maps:get(<<"replica_set_name">>, ?config(mongo_config, Config))};
|
||||||
|
sharded ->
|
||||||
|
sharded;
|
||||||
|
single ->
|
||||||
|
single
|
||||||
|
end.
|
||||||
|
|
||||||
mongo_type_bin(rs) ->
|
mongo_type_bin(rs) ->
|
||||||
<<"mongodb_rs">>;
|
<<"mongodb_rs">>;
|
||||||
mongo_type_bin(sharded) ->
|
mongo_type_bin(sharded) ->
|
||||||
|
@ -263,17 +273,14 @@ create_bridge_http(Params) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
clear_db(Config) ->
|
clear_db(Config) ->
|
||||||
Type = mongo_type_bin(?config(mongo_type, Config)),
|
Type = mongo_type(Config),
|
||||||
Name = ?config(mongo_name, Config),
|
Host = ?config(mongo_host, Config),
|
||||||
#{<<"collection">> := Collection} = ?config(mongo_config, Config),
|
Port = ?config(mongo_port, Config),
|
||||||
ResourceID = emqx_bridge_resource:resource_id(Type, Name),
|
Server = Host ++ ":" ++ integer_to_list(Port),
|
||||||
{ok, _, #{state := #{connector_state := #{poolname := PoolName}}}} =
|
#{<<"database">> := Db, <<"collection">> := Collection} = ?config(mongo_config, Config),
|
||||||
emqx_resource:get_instance(ResourceID),
|
{ok, Client} = mongo_api:connect(Type, [Server], [], [{database, Db}, {w_mode, unsafe}]),
|
||||||
Selector = #{},
|
{true, _} = mongo_api:delete(Client, Collection, _Selector = #{}),
|
||||||
{true, _} = ecpool:pick_and_do(
|
mongo_api:disconnect(Client).
|
||||||
PoolName, {mongo_api, delete, [Collection, Selector]}, no_handover
|
|
||||||
),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
find_all(Config) ->
|
find_all(Config) ->
|
||||||
Type = mongo_type_bin(?config(mongo_type, Config)),
|
Type = mongo_type_bin(?config(mongo_type, Config)),
|
||||||
|
|
|
@ -265,7 +265,7 @@ unprepare(Config, Key) ->
|
||||||
Name = ?config(mysql_name, Config),
|
Name = ?config(mysql_name, Config),
|
||||||
BridgeType = ?config(mysql_bridge_type, Config),
|
BridgeType = ?config(mysql_bridge_type, Config),
|
||||||
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
|
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
|
||||||
{ok, _, #{state := #{poolname := PoolName}}} = emqx_resource:get_instance(ResourceID),
|
{ok, _, #{state := #{pool_name := PoolName}}} = emqx_resource:get_instance(ResourceID),
|
||||||
[
|
[
|
||||||
begin
|
begin
|
||||||
{ok, Conn} = ecpool_worker:client(Worker),
|
{ok, Conn} = ecpool_worker:client(Worker),
|
||||||
|
|
|
@ -44,7 +44,7 @@
|
||||||
|
|
||||||
-type state() ::
|
-type state() ::
|
||||||
#{
|
#{
|
||||||
poolname := atom(),
|
pool_name := binary(),
|
||||||
prepare_cql := prepares(),
|
prepare_cql := prepares(),
|
||||||
params_tokens := params_tokens(),
|
params_tokens := params_tokens(),
|
||||||
%% returned by ecql:prepare/2
|
%% returned by ecql:prepare/2
|
||||||
|
@ -124,14 +124,10 @@ on_start(
|
||||||
false ->
|
false ->
|
||||||
[]
|
[]
|
||||||
end,
|
end,
|
||||||
%% use InstaId of binary type as Pool name, which is supported in ecpool.
|
State = parse_prepare_cql(Config),
|
||||||
PoolName = InstId,
|
case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
|
||||||
Prepares = parse_prepare_cql(Config),
|
|
||||||
InitState = #{poolname => PoolName, prepare_statement => #{}},
|
|
||||||
State = maps:merge(InitState, Prepares),
|
|
||||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
|
|
||||||
ok ->
|
ok ->
|
||||||
{ok, init_prepare(State)};
|
{ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})};
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
?tp(
|
?tp(
|
||||||
cassandra_connector_start_failed,
|
cassandra_connector_start_failed,
|
||||||
|
@ -140,12 +136,12 @@ on_start(
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_stop(InstId, #{poolname := PoolName}) ->
|
on_stop(InstId, #{pool_name := PoolName}) ->
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "stopping_cassandra_connector",
|
msg => "stopping_cassandra_connector",
|
||||||
connector => InstId
|
connector => InstId
|
||||||
}),
|
}),
|
||||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
emqx_resource_pool:stop(PoolName).
|
||||||
|
|
||||||
-type request() ::
|
-type request() ::
|
||||||
% emqx_bridge.erl
|
% emqx_bridge.erl
|
||||||
|
@ -184,7 +180,7 @@ do_single_query(
|
||||||
InstId,
|
InstId,
|
||||||
Request,
|
Request,
|
||||||
Async,
|
Async,
|
||||||
#{poolname := PoolName} = State
|
#{pool_name := PoolName} = State
|
||||||
) ->
|
) ->
|
||||||
{Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request),
|
{Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request),
|
||||||
?tp(
|
?tp(
|
||||||
|
@ -232,7 +228,7 @@ do_batch_query(
|
||||||
InstId,
|
InstId,
|
||||||
Requests,
|
Requests,
|
||||||
Async,
|
Async,
|
||||||
#{poolname := PoolName} = State
|
#{pool_name := PoolName} = State
|
||||||
) ->
|
) ->
|
||||||
CQLs =
|
CQLs =
|
||||||
lists:map(
|
lists:map(
|
||||||
|
@ -305,8 +301,8 @@ exec_cql_batch_query(InstId, PoolName, Async, CQLs) ->
|
||||||
Result
|
Result
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_get_status(_InstId, #{poolname := Pool} = State) ->
|
on_get_status(_InstId, #{pool_name := PoolName} = State) ->
|
||||||
case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of
|
case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
|
||||||
true ->
|
true ->
|
||||||
case do_check_prepares(State) of
|
case do_check_prepares(State) of
|
||||||
ok ->
|
ok ->
|
||||||
|
@ -327,7 +323,7 @@ do_get_status(Conn) ->
|
||||||
|
|
||||||
do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) ->
|
do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) ->
|
||||||
ok;
|
ok;
|
||||||
do_check_prepares(State = #{poolname := PoolName, prepare_cql := {error, Prepares}}) ->
|
do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepares}}) ->
|
||||||
%% retry to prepare
|
%% retry to prepare
|
||||||
case prepare_cql(Prepares, PoolName) of
|
case prepare_cql(Prepares, PoolName) of
|
||||||
{ok, Sts} ->
|
{ok, Sts} ->
|
||||||
|
@ -397,7 +393,7 @@ parse_prepare_cql([], Prepares, Tokens) ->
|
||||||
params_tokens => Tokens
|
params_tokens => Tokens
|
||||||
}.
|
}.
|
||||||
|
|
||||||
init_prepare(State = #{prepare_cql := Prepares, poolname := PoolName}) ->
|
init_prepare(State = #{prepare_cql := Prepares, pool_name := PoolName}) ->
|
||||||
case maps:size(Prepares) of
|
case maps:size(Prepares) of
|
||||||
0 ->
|
0 ->
|
||||||
State;
|
State;
|
||||||
|
@ -429,17 +425,17 @@ prepare_cql(Prepares, PoolName) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
do_prepare_cql(Prepares, PoolName) ->
|
do_prepare_cql(Prepares, PoolName) ->
|
||||||
do_prepare_cql(ecpool:workers(PoolName), Prepares, PoolName, #{}).
|
do_prepare_cql(ecpool:workers(PoolName), Prepares, #{}).
|
||||||
|
|
||||||
do_prepare_cql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) ->
|
do_prepare_cql([{_Name, Worker} | T], Prepares, _LastSts) ->
|
||||||
{ok, Conn} = ecpool_worker:client(Worker),
|
{ok, Conn} = ecpool_worker:client(Worker),
|
||||||
case prepare_cql_to_conn(Conn, Prepares) of
|
case prepare_cql_to_conn(Conn, Prepares) of
|
||||||
{ok, Sts} ->
|
{ok, Sts} ->
|
||||||
do_prepare_cql(T, Prepares, PoolName, Sts);
|
do_prepare_cql(T, Prepares, Sts);
|
||||||
Error ->
|
Error ->
|
||||||
Error
|
Error
|
||||||
end;
|
end;
|
||||||
do_prepare_cql([], _Prepares, _PoolName, LastSts) ->
|
do_prepare_cql([], _Prepares, LastSts) ->
|
||||||
{ok, LastSts}.
|
{ok, LastSts}.
|
||||||
|
|
||||||
prepare_cql_to_conn(Conn, Prepares) ->
|
prepare_cql_to_conn(Conn, Prepares) ->
|
||||||
|
|
|
@ -62,7 +62,8 @@
|
||||||
-type state() ::
|
-type state() ::
|
||||||
#{
|
#{
|
||||||
templates := templates(),
|
templates := templates(),
|
||||||
poolname := atom()
|
pool_name := binary(),
|
||||||
|
connect_timeout := pos_integer()
|
||||||
}.
|
}.
|
||||||
|
|
||||||
-type clickhouse_config() :: map().
|
-type clickhouse_config() :: map().
|
||||||
|
@ -141,7 +142,6 @@ on_start(
|
||||||
connector => InstanceID,
|
connector => InstanceID,
|
||||||
config => emqx_utils:redact(Config)
|
config => emqx_utils:redact(Config)
|
||||||
}),
|
}),
|
||||||
PoolName = emqx_plugin_libs_pool:pool_name(InstanceID),
|
|
||||||
Options = [
|
Options = [
|
||||||
{url, URL},
|
{url, URL},
|
||||||
{user, maps:get(username, Config, "default")},
|
{user, maps:get(username, Config, "default")},
|
||||||
|
@ -149,46 +149,43 @@ on_start(
|
||||||
{database, DB},
|
{database, DB},
|
||||||
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL},
|
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL},
|
||||||
{pool_size, PoolSize},
|
{pool_size, PoolSize},
|
||||||
{pool, PoolName}
|
{pool, InstanceID}
|
||||||
],
|
],
|
||||||
InitState = #{
|
|
||||||
poolname => PoolName,
|
|
||||||
connect_timeout => ConnectTimeout
|
|
||||||
},
|
|
||||||
try
|
try
|
||||||
Templates = prepare_sql_templates(Config),
|
Templates = prepare_sql_templates(Config),
|
||||||
State = maps:merge(InitState, #{templates => Templates}),
|
State = #{
|
||||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options) of
|
pool_name => InstanceID,
|
||||||
|
templates => Templates,
|
||||||
|
connect_timeout => ConnectTimeout
|
||||||
|
},
|
||||||
|
case emqx_resource_pool:start(InstanceID, ?MODULE, Options) of
|
||||||
ok ->
|
ok ->
|
||||||
{ok, State};
|
{ok, State};
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
log_start_error(Config, Reason, none),
|
?tp(
|
||||||
|
info,
|
||||||
|
"clickhouse_connector_start_failed",
|
||||||
|
#{
|
||||||
|
error => Reason,
|
||||||
|
config => emqx_utils:redact(Config)
|
||||||
|
}
|
||||||
|
),
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
end
|
end
|
||||||
catch
|
catch
|
||||||
_:CatchReason:Stacktrace ->
|
_:CatchReason:Stacktrace ->
|
||||||
log_start_error(Config, CatchReason, Stacktrace),
|
?tp(
|
||||||
|
info,
|
||||||
|
"clickhouse_connector_start_failed",
|
||||||
|
#{
|
||||||
|
error => CatchReason,
|
||||||
|
stacktrace => Stacktrace,
|
||||||
|
config => emqx_utils:redact(Config)
|
||||||
|
}
|
||||||
|
),
|
||||||
{error, CatchReason}
|
{error, CatchReason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
log_start_error(Config, Reason, Stacktrace) ->
|
|
||||||
StacktraceMap =
|
|
||||||
case Stacktrace of
|
|
||||||
none -> #{};
|
|
||||||
_ -> #{stacktrace => Stacktrace}
|
|
||||||
end,
|
|
||||||
LogMessage =
|
|
||||||
#{
|
|
||||||
msg => "clickhouse_connector_start_failed",
|
|
||||||
error_reason => Reason,
|
|
||||||
config => emqx_utils:redact(Config)
|
|
||||||
},
|
|
||||||
?SLOG(info, maps:merge(LogMessage, StacktraceMap)),
|
|
||||||
?tp(
|
|
||||||
clickhouse_connector_start_failed,
|
|
||||||
#{error => Reason}
|
|
||||||
).
|
|
||||||
|
|
||||||
%% Helper functions to prepare SQL tempaltes
|
%% Helper functions to prepare SQL tempaltes
|
||||||
|
|
||||||
prepare_sql_templates(#{
|
prepare_sql_templates(#{
|
||||||
|
@ -240,7 +237,7 @@ split_clickhouse_insert_sql(SQL) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
% This is a callback for ecpool which is triggered by the call to
|
% This is a callback for ecpool which is triggered by the call to
|
||||||
% emqx_plugin_libs_pool:start_pool in on_start/2
|
% emqx_resource_pool:start in on_start/2
|
||||||
|
|
||||||
connect(Options) ->
|
connect(Options) ->
|
||||||
URL = iolist_to_binary(emqx_http_lib:normalize(proplists:get_value(url, Options))),
|
URL = iolist_to_binary(emqx_http_lib:normalize(proplists:get_value(url, Options))),
|
||||||
|
@ -277,23 +274,20 @@ connect(Options) ->
|
||||||
|
|
||||||
-spec on_stop(resource_id(), resource_state()) -> term().
|
-spec on_stop(resource_id(), resource_state()) -> term().
|
||||||
|
|
||||||
on_stop(ResourceID, #{poolname := PoolName}) ->
|
on_stop(InstanceID, #{pool_name := PoolName}) ->
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "stopping clickouse connector",
|
msg => "stopping clickouse connector",
|
||||||
connector => ResourceID
|
connector => InstanceID
|
||||||
}),
|
}),
|
||||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
emqx_resource_pool:stop(PoolName).
|
||||||
|
|
||||||
%% -------------------------------------------------------------------
|
%% -------------------------------------------------------------------
|
||||||
%% on_get_status emqx_resouce callback and related functions
|
%% on_get_status emqx_resouce callback and related functions
|
||||||
%% -------------------------------------------------------------------
|
%% -------------------------------------------------------------------
|
||||||
|
|
||||||
on_get_status(
|
on_get_status(
|
||||||
_InstId,
|
_InstanceID,
|
||||||
#{
|
#{pool_name := PoolName, connect_timeout := Timeout} = State
|
||||||
poolname := PoolName,
|
|
||||||
connect_timeout := Timeout
|
|
||||||
} = State
|
|
||||||
) ->
|
) ->
|
||||||
case do_get_status(PoolName, Timeout) of
|
case do_get_status(PoolName, Timeout) of
|
||||||
ok ->
|
ok ->
|
||||||
|
@ -352,7 +346,7 @@ do_get_status(PoolName, Timeout) ->
|
||||||
on_query(
|
on_query(
|
||||||
ResourceID,
|
ResourceID,
|
||||||
{RequestType, DataOrSQL},
|
{RequestType, DataOrSQL},
|
||||||
#{poolname := PoolName} = State
|
#{pool_name := PoolName} = State
|
||||||
) ->
|
) ->
|
||||||
?SLOG(debug, #{
|
?SLOG(debug, #{
|
||||||
msg => "clickhouse connector received sql query",
|
msg => "clickhouse connector received sql query",
|
||||||
|
@ -391,16 +385,11 @@ query_type(send_message) ->
|
||||||
on_batch_query(
|
on_batch_query(
|
||||||
ResourceID,
|
ResourceID,
|
||||||
BatchReq,
|
BatchReq,
|
||||||
State
|
#{pool_name := PoolName, templates := Templates} = _State
|
||||||
) ->
|
) ->
|
||||||
%% Currently we only support batch requests with the send_message key
|
%% Currently we only support batch requests with the send_message key
|
||||||
{Keys, ObjectsToInsert} = lists:unzip(BatchReq),
|
{Keys, ObjectsToInsert} = lists:unzip(BatchReq),
|
||||||
ensure_keys_are_of_type_send_message(Keys),
|
ensure_keys_are_of_type_send_message(Keys),
|
||||||
%% Pick out the SQL template
|
|
||||||
#{
|
|
||||||
templates := Templates,
|
|
||||||
poolname := PoolName
|
|
||||||
} = State,
|
|
||||||
%% Create batch insert SQL statement
|
%% Create batch insert SQL statement
|
||||||
SQL = objects_to_sql(ObjectsToInsert, Templates),
|
SQL = objects_to_sql(ObjectsToInsert, Templates),
|
||||||
%% Do the actual query in the database
|
%% Do the actual query in the database
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue