chore: merge upstream/master
This commit is contained in:
commit
dc78ecb41c
3
Makefile
3
Makefile
|
@ -6,7 +6,7 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2
|
|||
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
||||
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
||||
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
||||
export EMQX_DASHBOARD_VERSION ?= v1.2.1
|
||||
export EMQX_DASHBOARD_VERSION ?= v1.2.2
|
||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.1
|
||||
export EMQX_REL_FORM ?= tgz
|
||||
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
||||
|
@ -239,7 +239,6 @@ $(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt))))
|
|||
.PHONY:
|
||||
merge-config:
|
||||
@$(SCRIPTS)/merge-config.escript
|
||||
@$(SCRIPTS)/merge-i18n.escript
|
||||
|
||||
## elixir target is to create release packages using Elixir's Mix
|
||||
.PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
%% This additional config file is used when the config 'cluster.proto_dis' in emqx.conf is set to 'inet_tls'.
|
||||
%% This additional config file is used when the config 'cluster.proto_dist' in emqx.conf is set to 'inet_tls'.
|
||||
%% Which means the EMQX nodes will connect to each other over TLS.
|
||||
%% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html
|
||||
|
||||
|
|
|
@ -57,16 +57,16 @@
|
|||
-define(ERROR_CODES, [
|
||||
{?BAD_USERNAME_OR_PWD, <<"Bad username or password">>},
|
||||
{?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>},
|
||||
{'BAD_REQUEST', <<"Request parameters are not legal">>},
|
||||
{'BAD_REQUEST', <<"Request parameters are invalid">>},
|
||||
{'NOT_MATCH', <<"Conditions are not matched">>},
|
||||
{'ALREADY_EXISTS', <<"Resource already existed">>},
|
||||
{'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>},
|
||||
{'BAD_CONFIG_SCHEMA', <<"Configuration data is invalid">>},
|
||||
{'BAD_LISTENER_ID', <<"Bad listener ID">>},
|
||||
{'BAD_NODE_NAME', <<"Bad Node Name">>},
|
||||
{'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>},
|
||||
{'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>},
|
||||
{'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>},
|
||||
{'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>},
|
||||
{'INVALID_PARAMETER', <<"Request parameters is invalid and exceeds the boundary value">>},
|
||||
{'CONFLICT', <<"Conflicting request resources">>},
|
||||
{'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>},
|
||||
{'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>},
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
|
||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}},
|
||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.38.1"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}},
|
||||
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
|
||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
||||
|
|
|
@ -30,6 +30,12 @@
|
|||
stop/0
|
||||
]).
|
||||
|
||||
%% Cluster API
|
||||
-export([
|
||||
cluster_nodes/1,
|
||||
running_nodes/0
|
||||
]).
|
||||
|
||||
%% PubSub API
|
||||
-export([
|
||||
subscribe/1,
|
||||
|
@ -102,6 +108,18 @@ is_running() ->
|
|||
_ -> true
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Cluster API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec running_nodes() -> [node()].
|
||||
running_nodes() ->
|
||||
mria:running_nodes().
|
||||
|
||||
-spec cluster_nodes(all | running | cores | stopped) -> [node()].
|
||||
cluster_nodes(Type) ->
|
||||
mria:cluster_nodes(Type).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% PubSub API
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
|
@ -182,10 +182,8 @@
|
|||
-define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]).
|
||||
-define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]).
|
||||
|
||||
%% use macro to do compile time limiter's type check
|
||||
-define(LIMITER_BYTES_IN, bytes_in).
|
||||
-define(LIMITER_MESSAGE_IN, message_in).
|
||||
-define(EMPTY_QUEUE, {[], []}).
|
||||
-define(LIMITER_BYTES_IN, bytes).
|
||||
-define(LIMITER_MESSAGE_IN, messages).
|
||||
|
||||
-dialyzer({no_match, [info/2]}).
|
||||
-dialyzer(
|
||||
|
|
|
@ -139,7 +139,8 @@ make_token_bucket_limiter(Cfg, Bucket) ->
|
|||
Cfg#{
|
||||
tokens => emqx_limiter_server:get_initial_val(Cfg),
|
||||
lasttime => ?NOW,
|
||||
bucket => Bucket
|
||||
bucket => Bucket,
|
||||
capacity => emqx_limiter_schema:calc_capacity(Cfg)
|
||||
}.
|
||||
|
||||
%%@doc create a limiter server's reference
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
%% API
|
||||
-export([
|
||||
new/3,
|
||||
infinity_bucket/0,
|
||||
check/3,
|
||||
try_restore/2,
|
||||
available/1
|
||||
|
@ -58,6 +59,10 @@ new(Counter, Index, Rate) ->
|
|||
rate => Rate
|
||||
}.
|
||||
|
||||
-spec infinity_bucket() -> bucket_ref().
|
||||
infinity_bucket() ->
|
||||
infinity.
|
||||
|
||||
%% @doc check tokens
|
||||
-spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) ->
|
||||
HasToken ::
|
||||
|
|
|
@ -31,20 +31,20 @@
|
|||
get_bucket_cfg_path/2,
|
||||
desc/1,
|
||||
types/0,
|
||||
infinity_value/0
|
||||
calc_capacity/1
|
||||
]).
|
||||
|
||||
-define(KILOBYTE, 1024).
|
||||
-define(BUCKET_KEYS, [
|
||||
{bytes_in, bucket_infinity},
|
||||
{message_in, bucket_infinity},
|
||||
{bytes, bucket_infinity},
|
||||
{messages, bucket_infinity},
|
||||
{connection, bucket_limit},
|
||||
{message_routing, bucket_infinity}
|
||||
]).
|
||||
|
||||
-type limiter_type() ::
|
||||
bytes_in
|
||||
| message_in
|
||||
bytes
|
||||
| messages
|
||||
| connection
|
||||
| message_routing
|
||||
%% internal limiter for unclassified resources
|
||||
|
@ -90,14 +90,17 @@
|
|||
|
||||
namespace() -> limiter.
|
||||
|
||||
roots() -> [limiter].
|
||||
roots() ->
|
||||
[{limiter, hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{importance => ?IMPORTANCE_HIDDEN})}].
|
||||
|
||||
fields(limiter) ->
|
||||
[
|
||||
{Type,
|
||||
?HOCON(?R_REF(node_opts), #{
|
||||
desc => ?DESC(Type),
|
||||
default => #{}
|
||||
default => #{},
|
||||
importance => ?IMPORTANCE_HIDDEN,
|
||||
aliases => alias_of_type(Type)
|
||||
})}
|
||||
|| Type <- types()
|
||||
] ++
|
||||
|
@ -107,6 +110,7 @@ fields(limiter) ->
|
|||
?R_REF(client_fields),
|
||||
#{
|
||||
desc => ?DESC(client),
|
||||
importance => ?IMPORTANCE_HIDDEN,
|
||||
default => maps:from_list([
|
||||
{erlang:atom_to_binary(Type), #{}}
|
||||
|| Type <- types()
|
||||
|
@ -124,30 +128,50 @@ fields(node_opts) ->
|
|||
})}
|
||||
];
|
||||
fields(client_fields) ->
|
||||
[
|
||||
{Type,
|
||||
?HOCON(?R_REF(client_opts), #{
|
||||
desc => ?DESC(Type),
|
||||
default => #{}
|
||||
})}
|
||||
|| Type <- types()
|
||||
];
|
||||
client_fields(types(), #{default => #{}});
|
||||
fields(bucket_infinity) ->
|
||||
[
|
||||
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})},
|
||||
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"infinity">>})},
|
||||
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}
|
||||
{burst,
|
||||
?HOCON(capacity(), #{
|
||||
desc => ?DESC(capacity),
|
||||
default => <<"0">>,
|
||||
importance => ?IMPORTANCE_HIDDEN,
|
||||
aliases => [capacity]
|
||||
})},
|
||||
{initial,
|
||||
?HOCON(initial(), #{
|
||||
default => <<"0">>,
|
||||
desc => ?DESC(initial),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
})}
|
||||
];
|
||||
fields(bucket_limit) ->
|
||||
[
|
||||
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"1000/s">>})},
|
||||
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"1000">>})},
|
||||
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}
|
||||
{burst,
|
||||
?HOCON(capacity(), #{
|
||||
desc => ?DESC(burst),
|
||||
default => <<"0">>,
|
||||
importance => ?IMPORTANCE_HIDDEN,
|
||||
aliases => [capacity]
|
||||
})},
|
||||
{initial,
|
||||
?HOCON(initial(), #{
|
||||
default => <<"0">>,
|
||||
desc => ?DESC(initial),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
})}
|
||||
];
|
||||
fields(client_opts) ->
|
||||
[
|
||||
{rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})},
|
||||
{initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})},
|
||||
{initial,
|
||||
?HOCON(initial(), #{
|
||||
default => <<"0">>,
|
||||
desc => ?DESC(initial),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
})},
|
||||
%% low_watermark add for emqx_channel and emqx_session
|
||||
%% both modules consume first and then check
|
||||
%% so we need to use this value to prevent excessive consumption
|
||||
|
@ -157,20 +181,24 @@ fields(client_opts) ->
|
|||
initial(),
|
||||
#{
|
||||
desc => ?DESC(low_watermark),
|
||||
default => <<"0">>
|
||||
default => <<"0">>,
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)},
|
||||
{capacity,
|
||||
{burst,
|
||||
?HOCON(capacity(), #{
|
||||
desc => ?DESC(client_bucket_capacity),
|
||||
default => <<"infinity">>
|
||||
desc => ?DESC(burst),
|
||||
default => <<"0">>,
|
||||
importance => ?IMPORTANCE_HIDDEN,
|
||||
aliases => [capacity]
|
||||
})},
|
||||
{divisible,
|
||||
?HOCON(
|
||||
boolean(),
|
||||
#{
|
||||
desc => ?DESC(divisible),
|
||||
default => false
|
||||
default => false,
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)},
|
||||
{max_retry_time,
|
||||
|
@ -178,7 +206,8 @@ fields(client_opts) ->
|
|||
emqx_schema:duration(),
|
||||
#{
|
||||
desc => ?DESC(max_retry_time),
|
||||
default => <<"10s">>
|
||||
default => <<"10s">>,
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)},
|
||||
{failure_strategy,
|
||||
|
@ -186,16 +215,18 @@ fields(client_opts) ->
|
|||
failure_strategy(),
|
||||
#{
|
||||
desc => ?DESC(failure_strategy),
|
||||
default => force
|
||||
default => force,
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields(listener_fields) ->
|
||||
bucket_fields(?BUCKET_KEYS, listener_client_fields);
|
||||
composite_bucket_fields(?BUCKET_KEYS, listener_client_fields);
|
||||
fields(listener_client_fields) ->
|
||||
client_fields(?BUCKET_KEYS);
|
||||
{Types, _} = lists:unzip(?BUCKET_KEYS),
|
||||
client_fields(Types, #{required => false});
|
||||
fields(Type) ->
|
||||
bucket_field(Type).
|
||||
simple_bucket_field(Type).
|
||||
|
||||
desc(limiter) ->
|
||||
"Settings for the rate limiter.";
|
||||
|
@ -230,19 +261,14 @@ get_bucket_cfg_path(Type, BucketName) ->
|
|||
[limiter, Type, bucket, BucketName].
|
||||
|
||||
types() ->
|
||||
[bytes_in, message_in, connection, message_routing, internal].
|
||||
[bytes, messages, connection, message_routing, internal].
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% `infinity` to `infinity_value` rules:
|
||||
%% 1. all infinity capacity will change to infinity_value
|
||||
%% 2. if the rate of global and bucket both are `infinity`,
|
||||
%% use `infinity_value` as bucket rate. see `emqx_limiter_server:get_counter_rate/2`
|
||||
infinity_value() ->
|
||||
%% 1 TB
|
||||
1099511627776.
|
||||
calc_capacity(#{rate := infinity}) ->
|
||||
infinity;
|
||||
calc_capacity(#{burst := infinity}) ->
|
||||
infinity;
|
||||
calc_capacity(#{rate := Rate, burst := Burst}) ->
|
||||
erlang:floor(1000 * Rate / default_period()) + Burst.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
|
@ -335,7 +361,7 @@ to_quota(Str, Regex) ->
|
|||
{match, [Quota, ""]} ->
|
||||
{ok, erlang:list_to_integer(Quota)};
|
||||
{match, ""} ->
|
||||
{ok, infinity_value()};
|
||||
{ok, infinity};
|
||||
_ ->
|
||||
{error, Str}
|
||||
end
|
||||
|
@ -350,7 +376,8 @@ apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
|
|||
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
|
||||
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
|
||||
|
||||
bucket_field(Type) when is_atom(Type) ->
|
||||
%% A bucket with only one type
|
||||
simple_bucket_field(Type) when is_atom(Type) ->
|
||||
fields(bucket_infinity) ++
|
||||
[
|
||||
{client,
|
||||
|
@ -358,16 +385,22 @@ bucket_field(Type) when is_atom(Type) ->
|
|||
?R_REF(?MODULE, client_opts),
|
||||
#{
|
||||
desc => ?DESC(client),
|
||||
required => false
|
||||
required => false,
|
||||
importance => importance_of_type(Type),
|
||||
aliases => alias_of_type(Type)
|
||||
}
|
||||
)}
|
||||
].
|
||||
bucket_fields(Types, ClientRef) ->
|
||||
|
||||
%% A bucket with multi types
|
||||
composite_bucket_fields(Types, ClientRef) ->
|
||||
[
|
||||
{Type,
|
||||
?HOCON(?R_REF(?MODULE, Opts), #{
|
||||
desc => ?DESC(?MODULE, Type),
|
||||
required => false
|
||||
required => false,
|
||||
importance => importance_of_type(Type),
|
||||
aliases => alias_of_type(Type)
|
||||
})}
|
||||
|| {Type, Opts} <- Types
|
||||
] ++
|
||||
|
@ -382,12 +415,29 @@ bucket_fields(Types, ClientRef) ->
|
|||
)}
|
||||
].
|
||||
|
||||
client_fields(Types) ->
|
||||
client_fields(Types, Meta) ->
|
||||
[
|
||||
{Type,
|
||||
?HOCON(?R_REF(client_opts), #{
|
||||
?HOCON(?R_REF(client_opts), Meta#{
|
||||
desc => ?DESC(Type),
|
||||
required => false
|
||||
importance => importance_of_type(Type),
|
||||
aliases => alias_of_type(Type)
|
||||
})}
|
||||
|| {Type, _} <- Types
|
||||
|| Type <- Types
|
||||
].
|
||||
|
||||
importance_of_type(interval) ->
|
||||
?IMPORTANCE_HIDDEN;
|
||||
importance_of_type(message_routing) ->
|
||||
?IMPORTANCE_HIDDEN;
|
||||
importance_of_type(connection) ->
|
||||
?IMPORTANCE_HIDDEN;
|
||||
importance_of_type(_) ->
|
||||
?DEFAULT_IMPORTANCE.
|
||||
|
||||
alias_of_type(messages) ->
|
||||
[message_in];
|
||||
alias_of_type(bytes) ->
|
||||
[bytes_in];
|
||||
alias_of_type(_) ->
|
||||
[].
|
||||
|
|
|
@ -118,17 +118,24 @@ connect(_Id, _Type, undefined) ->
|
|||
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
||||
connect(Id, Type, Cfg) ->
|
||||
case find_limiter_cfg(Type, Cfg) of
|
||||
{undefined, _} ->
|
||||
{_ClientCfg, undefined, _NodeCfg} ->
|
||||
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
||||
{#{rate := infinity}, #{rate := infinity}, #{rate := infinity}} ->
|
||||
{ok, emqx_htb_limiter:make_infinity_limiter()};
|
||||
{ClientCfg, #{rate := infinity}, #{rate := infinity}} ->
|
||||
{ok,
|
||||
emqx_htb_limiter:make_token_bucket_limiter(
|
||||
ClientCfg, emqx_limiter_bucket_ref:infinity_bucket()
|
||||
)};
|
||||
{
|
||||
#{
|
||||
rate := BucketRate,
|
||||
capacity := BucketSize
|
||||
},
|
||||
#{rate := CliRate, capacity := CliSize} = ClientCfg
|
||||
#{rate := CliRate} = ClientCfg,
|
||||
#{rate := BucketRate} = BucketCfg,
|
||||
_
|
||||
} ->
|
||||
case emqx_limiter_manager:find_bucket(Id, Type) of
|
||||
{ok, Bucket} ->
|
||||
BucketSize = emqx_limiter_schema:calc_capacity(BucketCfg),
|
||||
CliSize = emqx_limiter_schema:calc_capacity(ClientCfg),
|
||||
{ok,
|
||||
if
|
||||
CliRate < BucketRate orelse CliSize < BucketSize ->
|
||||
|
@ -493,12 +500,14 @@ make_root(#{rate := Rate, burst := Burst}) ->
|
|||
produced => 0.0
|
||||
}.
|
||||
|
||||
do_add_bucket(Id, #{rate := Rate, capacity := Capacity} = Cfg, #{buckets := Buckets} = State) ->
|
||||
do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) ->
|
||||
State;
|
||||
do_add_bucket(Id, #{rate := Rate} = Cfg, #{buckets := Buckets} = State) ->
|
||||
case maps:get(Id, Buckets, undefined) of
|
||||
undefined ->
|
||||
make_bucket(Id, Cfg, State);
|
||||
Bucket ->
|
||||
Bucket2 = Bucket#{rate := Rate, capacity := Capacity},
|
||||
Bucket2 = Bucket#{rate := Rate, capacity := emqx_limiter_schema:calc_capacity(Cfg)},
|
||||
State#{buckets := Buckets#{Id := Bucket2}}
|
||||
end.
|
||||
|
||||
|
@ -509,7 +518,7 @@ make_bucket(Id, Cfg, #{index := ?COUNTER_SIZE} = State) ->
|
|||
});
|
||||
make_bucket(
|
||||
Id,
|
||||
#{rate := Rate, capacity := Capacity} = Cfg,
|
||||
#{rate := Rate} = Cfg,
|
||||
#{type := Type, counter := Counter, index := Index, buckets := Buckets} = State
|
||||
) ->
|
||||
NewIndex = Index + 1,
|
||||
|
@ -519,7 +528,7 @@ make_bucket(
|
|||
rate => Rate,
|
||||
obtained => Initial,
|
||||
correction => 0,
|
||||
capacity => Capacity,
|
||||
capacity => emqx_limiter_schema:calc_capacity(Cfg),
|
||||
counter => Counter,
|
||||
index => NewIndex
|
||||
},
|
||||
|
@ -541,19 +550,14 @@ do_del_bucket(Id, #{type := Type, buckets := Buckets} = State) ->
|
|||
get_initial_val(
|
||||
#{
|
||||
initial := Initial,
|
||||
rate := Rate,
|
||||
capacity := Capacity
|
||||
rate := Rate
|
||||
}
|
||||
) ->
|
||||
%% initial will nevner be infinity(see the emqx_limiter_schema)
|
||||
InfVal = emqx_limiter_schema:infinity_value(),
|
||||
if
|
||||
Initial > 0 ->
|
||||
Initial;
|
||||
Rate =/= infinity ->
|
||||
erlang:min(Rate, Capacity);
|
||||
Capacity =/= infinity andalso Capacity =/= InfVal ->
|
||||
Capacity;
|
||||
Rate;
|
||||
true ->
|
||||
0
|
||||
end.
|
||||
|
@ -568,11 +572,12 @@ call(Type, Msg) ->
|
|||
end.
|
||||
|
||||
find_limiter_cfg(Type, #{rate := _} = Cfg) ->
|
||||
{Cfg, find_client_cfg(Type, maps:get(client, Cfg, undefined))};
|
||||
{find_client_cfg(Type, maps:get(client, Cfg, undefined)), Cfg, find_node_cfg(Type)};
|
||||
find_limiter_cfg(Type, Cfg) ->
|
||||
{
|
||||
find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined)),
|
||||
maps:get(Type, Cfg, undefined),
|
||||
find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined))
|
||||
find_node_cfg(Type)
|
||||
}.
|
||||
|
||||
find_client_cfg(Type, BucketCfg) ->
|
||||
|
@ -585,3 +590,6 @@ merge_client_cfg(NodeCfg, undefined) ->
|
|||
NodeCfg;
|
||||
merge_client_cfg(NodeCfg, BucketCfg) ->
|
||||
maps:merge(NodeCfg, BucketCfg).
|
||||
|
||||
find_node_cfg(Type) ->
|
||||
emqx:get_config([limiter, Type], #{rate => infinity, burst => 0}).
|
||||
|
|
|
@ -164,7 +164,7 @@ roots(high) ->
|
|||
}
|
||||
)},
|
||||
{?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(global)},
|
||||
%% NOTE: authorization schema here is only to keep emqx app prue
|
||||
%% NOTE: authorization schema here is only to keep emqx app pure
|
||||
%% the full schema for EMQX node is injected in emqx_conf_schema.
|
||||
{?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME,
|
||||
sc(
|
||||
|
@ -2225,6 +2225,7 @@ common_ssl_opts_schema(Defaults) ->
|
|||
#{
|
||||
default => AvailableVersions,
|
||||
desc => ?DESC(common_ssl_opts_schema_versions),
|
||||
importance => ?IMPORTANCE_HIGH,
|
||||
validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end
|
||||
}
|
||||
)},
|
||||
|
@ -2235,6 +2236,7 @@ common_ssl_opts_schema(Defaults) ->
|
|||
#{
|
||||
default => <<"emqx_tls_psk:lookup">>,
|
||||
converter => fun ?MODULE:user_lookup_fun_tr/2,
|
||||
importance => ?IMPORTANCE_HIDDEN,
|
||||
desc => ?DESC(common_ssl_opts_schema_user_lookup_fun)
|
||||
}
|
||||
)},
|
||||
|
@ -2762,10 +2764,16 @@ str(S) when is_list(S) ->
|
|||
S.
|
||||
|
||||
authentication(Which) ->
|
||||
Desc =
|
||||
{Importance, Desc} =
|
||||
case Which of
|
||||
global -> ?DESC(global_authentication);
|
||||
listener -> ?DESC(listener_authentication)
|
||||
global ->
|
||||
%% For root level authentication, it is recommended to configure
|
||||
%% from the dashboard or API.
|
||||
%% Hence it's considered a low-importance when it comes to
|
||||
%% configuration importance.
|
||||
{?IMPORTANCE_LOW, ?DESC(global_authentication)};
|
||||
listener ->
|
||||
{?IMPORTANCE_HIDDEN, ?DESC(listener_authentication)}
|
||||
end,
|
||||
%% poor man's dependency injection
|
||||
%% this is due to the fact that authn is implemented outside of 'emqx' app.
|
||||
|
@ -2781,7 +2789,7 @@ authentication(Which) ->
|
|||
hoconsc:mk(Type, #{
|
||||
desc => Desc,
|
||||
converter => fun ensure_array/2,
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
importance => Importance
|
||||
}).
|
||||
|
||||
%% the older version schema allows individual element (instead of a chain) in config
|
||||
|
|
|
@ -121,8 +121,8 @@
|
|||
-define(SOCK_STATS, [recv_oct, recv_cnt, send_oct, send_cnt]).
|
||||
|
||||
-define(ENABLED(X), (X =/= undefined)).
|
||||
-define(LIMITER_BYTES_IN, bytes_in).
|
||||
-define(LIMITER_MESSAGE_IN, message_in).
|
||||
-define(LIMITER_BYTES_IN, bytes).
|
||||
-define(LIMITER_MESSAGE_IN, messages).
|
||||
|
||||
-dialyzer({no_match, [info/2]}).
|
||||
-dialyzer({nowarn_function, [websocket_init/1]}).
|
||||
|
|
|
@ -148,6 +148,14 @@ t_run_hook(_) ->
|
|||
?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)),
|
||||
?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)).
|
||||
|
||||
t_cluster_nodes(_) ->
|
||||
Expected = [node()],
|
||||
?assertEqual(Expected, emqx:running_nodes()),
|
||||
?assertEqual(Expected, emqx:cluster_nodes(running)),
|
||||
?assertEqual(Expected, emqx:cluster_nodes(all)),
|
||||
?assertEqual(Expected, emqx:cluster_nodes(cores)),
|
||||
?assertEqual([], emqx:cluster_nodes(stopped)).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Hook fun
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
|
@ -162,8 +162,7 @@ limiter_conf() ->
|
|||
Make = fun() ->
|
||||
#{
|
||||
burst => 0,
|
||||
rate => infinity,
|
||||
capacity => infinity
|
||||
rate => infinity
|
||||
}
|
||||
end,
|
||||
|
||||
|
@ -172,7 +171,7 @@ limiter_conf() ->
|
|||
Acc#{Name => Make()}
|
||||
end,
|
||||
#{},
|
||||
[bytes_in, message_in, message_routing, connection, internal]
|
||||
[bytes, messages, message_routing, connection, internal]
|
||||
).
|
||||
|
||||
stats_conf() ->
|
||||
|
@ -1258,7 +1257,7 @@ limiter_cfg() ->
|
|||
Client = #{
|
||||
rate => 5,
|
||||
initial => 0,
|
||||
capacity => 5,
|
||||
burst => 0,
|
||||
low_watermark => 1,
|
||||
divisible => false,
|
||||
max_retry_time => timer:seconds(5),
|
||||
|
@ -1270,7 +1269,7 @@ limiter_cfg() ->
|
|||
}.
|
||||
|
||||
bucket_cfg() ->
|
||||
#{rate => 10, initial => 0, capacity => 10}.
|
||||
#{rate => 10, initial => 0, burst => 0}.
|
||||
|
||||
add_bucket() ->
|
||||
emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()).
|
||||
|
|
|
@ -427,7 +427,7 @@ t_ensure_rate_limit(_) ->
|
|||
fun(_, Client) -> {pause, 3000, undefined, Client} end
|
||||
),
|
||||
{ok, State2} = emqx_connection:check_limiter(
|
||||
[{1000, bytes_in}],
|
||||
[{1000, bytes}],
|
||||
[],
|
||||
WhenOk,
|
||||
[],
|
||||
|
@ -703,31 +703,29 @@ handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St).
|
|||
-define(LIMITER_ID, 'tcp:default').
|
||||
|
||||
init_limiter() ->
|
||||
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], limiter_cfg()).
|
||||
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], limiter_cfg()).
|
||||
|
||||
limiter_cfg() ->
|
||||
Infinity = emqx_limiter_schema:infinity_value(),
|
||||
Cfg = bucket_cfg(),
|
||||
Client = #{
|
||||
rate => Infinity,
|
||||
rate => infinity,
|
||||
initial => 0,
|
||||
capacity => Infinity,
|
||||
burst => 0,
|
||||
low_watermark => 1,
|
||||
divisible => false,
|
||||
max_retry_time => timer:seconds(5),
|
||||
failure_strategy => force
|
||||
},
|
||||
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
|
||||
#{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
|
||||
|
||||
bucket_cfg() ->
|
||||
Infinity = emqx_limiter_schema:infinity_value(),
|
||||
#{rate => Infinity, initial => 0, capacity => Infinity}.
|
||||
#{rate => infinity, initial => 0, burst => 0}.
|
||||
|
||||
add_bucket() ->
|
||||
Cfg = bucket_cfg(),
|
||||
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
|
||||
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
|
||||
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg),
|
||||
emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg).
|
||||
|
||||
del_bucket() ->
|
||||
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
|
||||
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).
|
||||
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes),
|
||||
emqx_limiter_server:del_bucket(?LIMITER_ID, messages).
|
||||
|
|
|
@ -72,7 +72,7 @@ t_consume(_) ->
|
|||
Cfg = fun(Cfg) ->
|
||||
Cfg#{
|
||||
rate := 100,
|
||||
capacity := 100,
|
||||
burst := 0,
|
||||
initial := 100,
|
||||
max_retry_time := 1000,
|
||||
failure_strategy := force
|
||||
|
@ -89,7 +89,7 @@ t_retry(_) ->
|
|||
Cfg = fun(Cfg) ->
|
||||
Cfg#{
|
||||
rate := 50,
|
||||
capacity := 200,
|
||||
burst := 150,
|
||||
initial := 0,
|
||||
max_retry_time := 1000,
|
||||
failure_strategy := force
|
||||
|
@ -109,7 +109,7 @@ t_restore(_) ->
|
|||
Cfg = fun(Cfg) ->
|
||||
Cfg#{
|
||||
rate := 1,
|
||||
capacity := 200,
|
||||
burst := 199,
|
||||
initial := 50,
|
||||
max_retry_time := 100,
|
||||
failure_strategy := force
|
||||
|
@ -129,7 +129,7 @@ t_max_retry_time(_) ->
|
|||
Cfg = fun(Cfg) ->
|
||||
Cfg#{
|
||||
rate := 1,
|
||||
capacity := 1,
|
||||
burst := 0,
|
||||
max_retry_time := 500,
|
||||
failure_strategy := drop
|
||||
}
|
||||
|
@ -139,8 +139,12 @@ t_max_retry_time(_) ->
|
|||
Begin = ?NOW,
|
||||
Result = emqx_htb_limiter:consume(101, Client),
|
||||
?assertMatch({drop, _}, Result),
|
||||
Time = ?NOW - Begin,
|
||||
?assert(Time >= 500 andalso Time < 550)
|
||||
End = ?NOW,
|
||||
Time = End - Begin,
|
||||
?assert(
|
||||
Time >= 500 andalso Time < 550,
|
||||
lists:flatten(io_lib:format("Begin:~p, End:~p, Time:~p~n", [Begin, End, Time]))
|
||||
)
|
||||
end,
|
||||
with_per_client(Cfg, Case).
|
||||
|
||||
|
@ -150,7 +154,7 @@ t_divisible(_) ->
|
|||
divisible := true,
|
||||
rate := ?RATE("1000/1s"),
|
||||
initial := 600,
|
||||
capacity := 600
|
||||
burst := 0
|
||||
}
|
||||
end,
|
||||
Case = fun(BucketCfg) ->
|
||||
|
@ -176,7 +180,7 @@ t_low_watermark(_) ->
|
|||
low_watermark := 400,
|
||||
rate := ?RATE("1000/1s"),
|
||||
initial := 1000,
|
||||
capacity := 1000
|
||||
burst := 0
|
||||
}
|
||||
end,
|
||||
Case = fun(BucketCfg) ->
|
||||
|
@ -201,8 +205,7 @@ t_infinity_client(_) ->
|
|||
Fun = fun(Cfg) -> Cfg end,
|
||||
Case = fun(Cfg) ->
|
||||
Client = connect(Cfg),
|
||||
InfVal = emqx_limiter_schema:infinity_value(),
|
||||
?assertMatch(#{bucket := #{rate := InfVal}}, Client),
|
||||
?assertMatch(infinity, Client),
|
||||
Result = emqx_htb_limiter:check(100000, Client),
|
||||
?assertEqual({ok, Client}, Result)
|
||||
end,
|
||||
|
@ -212,12 +215,12 @@ t_try_restore_agg(_) ->
|
|||
Fun = fun(#{client := Cli} = Bucket) ->
|
||||
Bucket2 = Bucket#{
|
||||
rate := 1,
|
||||
capacity := 200,
|
||||
burst := 199,
|
||||
initial := 50
|
||||
},
|
||||
Cli2 = Cli#{
|
||||
rate := infinity,
|
||||
capacity := infinity,
|
||||
burst := infinity,
|
||||
divisible := true,
|
||||
max_retry_time := 100,
|
||||
failure_strategy := force
|
||||
|
@ -239,11 +242,11 @@ t_short_board(_) ->
|
|||
Bucket2 = Bucket#{
|
||||
rate := ?RATE("100/1s"),
|
||||
initial := 0,
|
||||
capacity := 100
|
||||
burst := 0
|
||||
},
|
||||
Cli2 = Cli#{
|
||||
rate := ?RATE("600/1s"),
|
||||
capacity := 600,
|
||||
burst := 0,
|
||||
initial := 600
|
||||
},
|
||||
Bucket2#{client := Cli2}
|
||||
|
@ -261,46 +264,45 @@ t_rate(_) ->
|
|||
Bucket2 = Bucket#{
|
||||
rate := ?RATE("100/100ms"),
|
||||
initial := 0,
|
||||
capacity := infinity
|
||||
burst := infinity
|
||||
},
|
||||
Cli2 = Cli#{
|
||||
rate := infinity,
|
||||
capacity := infinity,
|
||||
burst := infinity,
|
||||
initial := 0
|
||||
},
|
||||
Bucket2#{client := Cli2}
|
||||
end,
|
||||
Case = fun(Cfg) ->
|
||||
Time = 1000,
|
||||
Client = connect(Cfg),
|
||||
Ts1 = erlang:system_time(millisecond),
|
||||
C1 = emqx_htb_limiter:available(Client),
|
||||
timer:sleep(1000),
|
||||
Ts2 = erlang:system_time(millisecond),
|
||||
timer:sleep(1100),
|
||||
C2 = emqx_htb_limiter:available(Client),
|
||||
ShouldInc = floor((Ts2 - Ts1) / 100) * 100,
|
||||
ShouldInc = floor(Time / 100) * 100,
|
||||
Inc = C2 - C1,
|
||||
?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate")
|
||||
end,
|
||||
with_bucket(Fun, Case).
|
||||
|
||||
t_capacity(_) ->
|
||||
Capacity = 600,
|
||||
Capacity = 1200,
|
||||
Fun = fun(#{client := Cli} = Bucket) ->
|
||||
Bucket2 = Bucket#{
|
||||
rate := ?RATE("100/100ms"),
|
||||
initial := 0,
|
||||
capacity := 600
|
||||
burst := 200
|
||||
},
|
||||
Cli2 = Cli#{
|
||||
rate := infinity,
|
||||
capacity := infinity,
|
||||
burst := infinity,
|
||||
initial := 0
|
||||
},
|
||||
Bucket2#{client := Cli2}
|
||||
end,
|
||||
Case = fun(Cfg) ->
|
||||
Client = connect(Cfg),
|
||||
timer:sleep(1000),
|
||||
timer:sleep(1500),
|
||||
C1 = emqx_htb_limiter:available(Client),
|
||||
?assertEqual(Capacity, C1, "test bucket capacity")
|
||||
end,
|
||||
|
@ -318,11 +320,11 @@ t_collaborative_alloc(_) ->
|
|||
Bucket2 = Bucket#{
|
||||
rate := ?RATE("400/1s"),
|
||||
initial := 0,
|
||||
capacity := 600
|
||||
burst := 200
|
||||
},
|
||||
Cli2 = Cli#{
|
||||
rate := ?RATE("50"),
|
||||
capacity := 100,
|
||||
burst := 50,
|
||||
initial := 100
|
||||
},
|
||||
Bucket2#{client := Cli2}
|
||||
|
@ -363,11 +365,11 @@ t_burst(_) ->
|
|||
Bucket2 = Bucket#{
|
||||
rate := ?RATE("200/1s"),
|
||||
initial := 0,
|
||||
capacity := 200
|
||||
burst := 0
|
||||
},
|
||||
Cli2 = Cli#{
|
||||
rate := ?RATE("50/1s"),
|
||||
capacity := 200,
|
||||
burst := 150,
|
||||
divisible := true
|
||||
},
|
||||
Bucket2#{client := Cli2}
|
||||
|
@ -401,11 +403,11 @@ t_limit_global_with_unlimit_other(_) ->
|
|||
Bucket2 = Bucket#{
|
||||
rate := infinity,
|
||||
initial := 0,
|
||||
capacity := infinity
|
||||
burst := infinity
|
||||
},
|
||||
Cli2 = Cli#{
|
||||
rate := infinity,
|
||||
capacity := infinity,
|
||||
burst := infinity,
|
||||
initial := 0
|
||||
},
|
||||
Bucket2#{client := Cli2}
|
||||
|
@ -414,7 +416,7 @@ t_limit_global_with_unlimit_other(_) ->
|
|||
Case = fun() ->
|
||||
C1 = counters:new(1, []),
|
||||
start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
|
||||
timer:sleep(2100),
|
||||
timer:sleep(2200),
|
||||
check_average_rate(C1, 2, 600)
|
||||
end,
|
||||
|
||||
|
@ -432,7 +434,7 @@ t_check_container(_) ->
|
|||
Cfg#{
|
||||
rate := ?RATE("1000/1s"),
|
||||
initial := 1000,
|
||||
capacity := 1000
|
||||
burst := 0
|
||||
}
|
||||
end,
|
||||
Case = fun(#{client := Client} = BucketCfg) ->
|
||||
|
@ -565,7 +567,7 @@ t_schema_unit(_) ->
|
|||
?assertMatch({error, _}, M:to_rate("100MB/1")),
|
||||
?assertMatch({error, _}, M:to_rate("100/10x")),
|
||||
|
||||
?assertEqual({ok, emqx_limiter_schema:infinity_value()}, M:to_capacity("infinity")),
|
||||
?assertEqual({ok, infinity}, M:to_capacity("infinity")),
|
||||
?assertEqual({ok, 100}, M:to_capacity("100")),
|
||||
?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")),
|
||||
?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")),
|
||||
|
@ -748,17 +750,16 @@ connect(Name, Cfg) ->
|
|||
Limiter.
|
||||
|
||||
make_limiter_cfg() ->
|
||||
Infinity = emqx_limiter_schema:infinity_value(),
|
||||
Client = #{
|
||||
rate => Infinity,
|
||||
rate => infinity,
|
||||
initial => 0,
|
||||
capacity => Infinity,
|
||||
burst => infinity,
|
||||
low_watermark => 0,
|
||||
divisible => false,
|
||||
max_retry_time => timer:seconds(5),
|
||||
failure_strategy => force
|
||||
},
|
||||
#{client => Client, rate => Infinity, initial => 0, capacity => Infinity}.
|
||||
#{client => Client, rate => infinity, initial => 0, burst => infinity}.
|
||||
|
||||
add_bucket(Cfg) ->
|
||||
add_bucket(?MODULE, Cfg).
|
||||
|
|
|
@ -509,16 +509,16 @@ t_handle_timeout_emit_stats(_) ->
|
|||
t_ensure_rate_limit(_) ->
|
||||
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
|
||||
Limiter = init_limiter(#{
|
||||
bytes_in => bucket_cfg(),
|
||||
message_in => bucket_cfg(),
|
||||
client => #{bytes_in => client_cfg(Rate)}
|
||||
bytes => bucket_cfg(),
|
||||
messages => bucket_cfg(),
|
||||
client => #{bytes => client_cfg(Rate)}
|
||||
}),
|
||||
St = st(#{limiter => Limiter}),
|
||||
|
||||
%% must bigger than value in emqx_ratelimit_SUITE
|
||||
{ok, Need} = emqx_limiter_schema:to_capacity("1GB"),
|
||||
St1 = ?ws_conn:check_limiter(
|
||||
[{Need, bytes_in}],
|
||||
[{Need, bytes}],
|
||||
[],
|
||||
fun(_, _, S) -> S end,
|
||||
[],
|
||||
|
@ -699,23 +699,21 @@ init_limiter() ->
|
|||
init_limiter(limiter_cfg()).
|
||||
|
||||
init_limiter(LimiterCfg) ->
|
||||
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], LimiterCfg).
|
||||
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], LimiterCfg).
|
||||
|
||||
limiter_cfg() ->
|
||||
Cfg = bucket_cfg(),
|
||||
Client = client_cfg(),
|
||||
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
|
||||
#{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}.
|
||||
|
||||
client_cfg() ->
|
||||
Infinity = emqx_limiter_schema:infinity_value(),
|
||||
client_cfg(Infinity).
|
||||
client_cfg(infinity).
|
||||
|
||||
client_cfg(Rate) ->
|
||||
Infinity = emqx_limiter_schema:infinity_value(),
|
||||
#{
|
||||
rate => Rate,
|
||||
initial => 0,
|
||||
capacity => Infinity,
|
||||
burst => 0,
|
||||
low_watermark => 1,
|
||||
divisible => false,
|
||||
max_retry_time => timer:seconds(5),
|
||||
|
@ -723,14 +721,13 @@ client_cfg(Rate) ->
|
|||
}.
|
||||
|
||||
bucket_cfg() ->
|
||||
Infinity = emqx_limiter_schema:infinity_value(),
|
||||
#{rate => Infinity, initial => 0, capacity => Infinity}.
|
||||
#{rate => infinity, initial => 0, burst => 0}.
|
||||
|
||||
add_bucket() ->
|
||||
Cfg = bucket_cfg(),
|
||||
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
|
||||
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
|
||||
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg),
|
||||
emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg).
|
||||
|
||||
del_bucket() ->
|
||||
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
|
||||
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).
|
||||
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes),
|
||||
emqx_limiter_server:del_bucket(?LIMITER_ID, messages).
|
||||
|
|
|
@ -105,14 +105,16 @@ mnesia(boot) ->
|
|||
%% Hocon Schema
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
namespace() -> "authn-scram-builtin_db".
|
||||
namespace() -> "authn".
|
||||
|
||||
tags() ->
|
||||
[<<"Authentication">>].
|
||||
|
||||
roots() -> [?CONF_NS].
|
||||
%% used for config check when the schema module is resolved
|
||||
roots() ->
|
||||
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, scram))}].
|
||||
|
||||
fields(?CONF_NS) ->
|
||||
fields(scram) ->
|
||||
[
|
||||
{mechanism, emqx_authn_schema:mechanism(scram)},
|
||||
{backend, emqx_authn_schema:backend(built_in_database)},
|
||||
|
@ -120,7 +122,7 @@ fields(?CONF_NS) ->
|
|||
{iteration_count, fun iteration_count/1}
|
||||
] ++ emqx_authn_schema:common_fields().
|
||||
|
||||
desc(?CONF_NS) ->
|
||||
desc(scram) ->
|
||||
"Settings for Salted Challenge Response Authentication Mechanism\n"
|
||||
"(SCRAM) authentication.";
|
||||
desc(_) ->
|
||||
|
@ -141,7 +143,7 @@ iteration_count(_) -> undefined.
|
|||
%%------------------------------------------------------------------------------
|
||||
|
||||
refs() ->
|
||||
[hoconsc:ref(?MODULE, ?CONF_NS)].
|
||||
[hoconsc:ref(?MODULE, scram)].
|
||||
|
||||
create(
|
||||
AuthenticatorID,
|
||||
|
|
|
@ -51,34 +51,35 @@
|
|||
%% Hocon Schema
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
namespace() -> "authn-http".
|
||||
namespace() -> "authn".
|
||||
|
||||
tags() ->
|
||||
[<<"Authentication">>].
|
||||
|
||||
%% used for config check when the schema module is resolved
|
||||
roots() ->
|
||||
[
|
||||
{?CONF_NS,
|
||||
hoconsc:mk(
|
||||
hoconsc:union(fun union_member_selector/1),
|
||||
hoconsc:union(fun ?MODULE:union_member_selector/1),
|
||||
#{}
|
||||
)}
|
||||
].
|
||||
|
||||
fields(get) ->
|
||||
fields(http_get) ->
|
||||
[
|
||||
{method, #{type => get, required => true, desc => ?DESC(method)}},
|
||||
{headers, fun headers_no_content_type/1}
|
||||
] ++ common_fields();
|
||||
fields(post) ->
|
||||
fields(http_post) ->
|
||||
[
|
||||
{method, #{type => post, required => true, desc => ?DESC(method)}},
|
||||
{headers, fun headers/1}
|
||||
] ++ common_fields().
|
||||
|
||||
desc(get) ->
|
||||
desc(http_get) ->
|
||||
?DESC(get);
|
||||
desc(post) ->
|
||||
desc(http_post) ->
|
||||
?DESC(post);
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
@ -156,8 +157,8 @@ request_timeout(_) -> undefined.
|
|||
|
||||
refs() ->
|
||||
[
|
||||
hoconsc:ref(?MODULE, get),
|
||||
hoconsc:ref(?MODULE, post)
|
||||
hoconsc:ref(?MODULE, http_get),
|
||||
hoconsc:ref(?MODULE, http_post)
|
||||
].
|
||||
|
||||
union_member_selector(all_union_members) ->
|
||||
|
@ -166,9 +167,9 @@ union_member_selector({value, Value}) ->
|
|||
refs(Value).
|
||||
|
||||
refs(#{<<"method">> := <<"get">>}) ->
|
||||
[hoconsc:ref(?MODULE, get)];
|
||||
[hoconsc:ref(?MODULE, http_get)];
|
||||
refs(#{<<"method">> := <<"post">>}) ->
|
||||
[hoconsc:ref(?MODULE, post)];
|
||||
[hoconsc:ref(?MODULE, http_post)];
|
||||
refs(_) ->
|
||||
throw(#{
|
||||
field_name => method,
|
||||
|
|
|
@ -35,18 +35,17 @@
|
|||
callback_mode() -> always_sync.
|
||||
|
||||
on_start(InstId, Opts) ->
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
PoolOpts = [
|
||||
{pool_size, maps:get(pool_size, Opts, ?DEFAULT_POOL_SIZE)},
|
||||
{connector_opts, Opts}
|
||||
],
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, PoolOpts) of
|
||||
ok -> {ok, #{pool_name => PoolName}};
|
||||
case emqx_resource_pool:start(InstId, ?MODULE, PoolOpts) of
|
||||
ok -> {ok, #{pool_name => InstId}};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
on_stop(_InstId, #{pool_name := PoolName}) ->
|
||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
||||
emqx_resource_pool:stop(PoolName).
|
||||
|
||||
on_query(InstId, get_jwks, #{pool_name := PoolName}) ->
|
||||
Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover),
|
||||
|
@ -72,16 +71,15 @@ on_query(_InstId, {update, Opts}, #{pool_name := PoolName}) ->
|
|||
ok.
|
||||
|
||||
on_get_status(_InstId, #{pool_name := PoolName}) ->
|
||||
Func =
|
||||
fun(Conn) ->
|
||||
case emqx_resource_pool:health_check_workers(PoolName, fun health_check/1) of
|
||||
true -> connected;
|
||||
false -> disconnected
|
||||
end.
|
||||
|
||||
health_check(Conn) ->
|
||||
case emqx_authn_jwks_client:get_jwks(Conn) of
|
||||
{ok, _} -> true;
|
||||
_ -> false
|
||||
end
|
||||
end,
|
||||
case emqx_plugin_libs_pool:health_check_ecpool_workers(PoolName, Func) of
|
||||
true -> connected;
|
||||
false -> disconnected
|
||||
end.
|
||||
|
||||
connect(Opts) ->
|
||||
|
|
|
@ -43,36 +43,57 @@
|
|||
%% Hocon Schema
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
namespace() -> "authn-jwt".
|
||||
namespace() -> "authn".
|
||||
|
||||
tags() ->
|
||||
[<<"Authentication">>].
|
||||
|
||||
%% used for config check when the schema module is resolved
|
||||
roots() ->
|
||||
[
|
||||
{?CONF_NS,
|
||||
hoconsc:mk(
|
||||
hoconsc:union(fun union_member_selector/1),
|
||||
hoconsc:union(fun ?MODULE:union_member_selector/1),
|
||||
#{}
|
||||
)}
|
||||
].
|
||||
|
||||
fields('hmac-based') ->
|
||||
fields(jwt_hmac) ->
|
||||
[
|
||||
{use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})},
|
||||
%% for hmac, it's the 'algorithm' field which selects this type
|
||||
%% use_jwks field can be ignored (kept for backward compatibility)
|
||||
{use_jwks,
|
||||
sc(
|
||||
hoconsc:enum([false]),
|
||||
#{
|
||||
required => false,
|
||||
desc => ?DESC(use_jwks),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)},
|
||||
{algorithm,
|
||||
sc(hoconsc:enum(['hmac-based']), #{required => true, desc => ?DESC(algorithm)})},
|
||||
{secret, fun secret/1},
|
||||
{secret_base64_encoded, fun secret_base64_encoded/1}
|
||||
] ++ common_fields();
|
||||
fields('public-key') ->
|
||||
fields(jwt_public_key) ->
|
||||
[
|
||||
{use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})},
|
||||
%% for public-key, it's the 'algorithm' field which selects this type
|
||||
%% use_jwks field can be ignored (kept for backward compatibility)
|
||||
{use_jwks,
|
||||
sc(
|
||||
hoconsc:enum([false]),
|
||||
#{
|
||||
required => false,
|
||||
desc => ?DESC(use_jwks),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)},
|
||||
{algorithm,
|
||||
sc(hoconsc:enum(['public-key']), #{required => true, desc => ?DESC(algorithm)})},
|
||||
{public_key, fun public_key/1}
|
||||
] ++ common_fields();
|
||||
fields('jwks') ->
|
||||
fields(jwt_jwks) ->
|
||||
[
|
||||
{use_jwks, sc(hoconsc:enum([true]), #{required => true, desc => ?DESC(use_jwks)})},
|
||||
{endpoint, fun endpoint/1},
|
||||
|
@ -85,12 +106,12 @@ fields('jwks') ->
|
|||
}}
|
||||
] ++ common_fields().
|
||||
|
||||
desc('hmac-based') ->
|
||||
?DESC('hmac-based');
|
||||
desc('public-key') ->
|
||||
?DESC('public-key');
|
||||
desc('jwks') ->
|
||||
?DESC('jwks');
|
||||
desc(jwt_hmac) ->
|
||||
?DESC(jwt_hmac);
|
||||
desc(jwt_public_key) ->
|
||||
?DESC(jwt_public_key);
|
||||
desc(jwt_jwks) ->
|
||||
?DESC(jwt_jwks);
|
||||
desc(undefined) ->
|
||||
undefined.
|
||||
|
||||
|
@ -160,9 +181,9 @@ from(_) -> undefined.
|
|||
|
||||
refs() ->
|
||||
[
|
||||
hoconsc:ref(?MODULE, 'hmac-based'),
|
||||
hoconsc:ref(?MODULE, 'public-key'),
|
||||
hoconsc:ref(?MODULE, 'jwks')
|
||||
hoconsc:ref(?MODULE, jwt_hmac),
|
||||
hoconsc:ref(?MODULE, jwt_public_key),
|
||||
hoconsc:ref(?MODULE, jwt_jwks)
|
||||
].
|
||||
|
||||
union_member_selector(all_union_members) ->
|
||||
|
@ -179,11 +200,11 @@ boolean(<<"false">>) -> false;
|
|||
boolean(Other) -> Other.
|
||||
|
||||
select_ref(true, _) ->
|
||||
[hoconsc:ref(?MODULE, 'jwks')];
|
||||
[hoconsc:ref(?MODULE, 'jwt_jwks')];
|
||||
select_ref(false, #{<<"public_key">> := _}) ->
|
||||
[hoconsc:ref(?MODULE, 'public-key')];
|
||||
[hoconsc:ref(?MODULE, jwt_public_key)];
|
||||
select_ref(false, _) ->
|
||||
[hoconsc:ref(?MODULE, 'hmac-based')];
|
||||
[hoconsc:ref(?MODULE, jwt_hmac)];
|
||||
select_ref(_, _) ->
|
||||
throw(#{
|
||||
field_name => use_jwks,
|
||||
|
|
|
@ -107,14 +107,16 @@ mnesia(boot) ->
|
|||
%% Hocon Schema
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
namespace() -> "authn-builtin_db".
|
||||
namespace() -> "authn".
|
||||
|
||||
tags() ->
|
||||
[<<"Authentication">>].
|
||||
|
||||
roots() -> [?CONF_NS].
|
||||
%% used for config check when the schema module is resolved
|
||||
roots() ->
|
||||
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, builtin_db))}].
|
||||
|
||||
fields(?CONF_NS) ->
|
||||
fields(builtin_db) ->
|
||||
[
|
||||
{mechanism, emqx_authn_schema:mechanism(password_based)},
|
||||
{backend, emqx_authn_schema:backend(built_in_database)},
|
||||
|
@ -122,8 +124,8 @@ fields(?CONF_NS) ->
|
|||
{password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1}
|
||||
] ++ emqx_authn_schema:common_fields().
|
||||
|
||||
desc(?CONF_NS) ->
|
||||
?DESC(?CONF_NS);
|
||||
desc(builtin_db) ->
|
||||
?DESC(builtin_db);
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
||||
|
@ -138,7 +140,7 @@ user_id_type(_) -> undefined.
|
|||
%%------------------------------------------------------------------------------
|
||||
|
||||
refs() ->
|
||||
[hoconsc:ref(?MODULE, ?CONF_NS)].
|
||||
[hoconsc:ref(?MODULE, builtin_db)].
|
||||
|
||||
create(_AuthenticatorID, Config) ->
|
||||
create(Config).
|
||||
|
|
|
@ -44,32 +44,33 @@
|
|||
%% Hocon Schema
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
namespace() -> "authn-mongodb".
|
||||
namespace() -> "authn".
|
||||
|
||||
tags() ->
|
||||
[<<"Authentication">>].
|
||||
|
||||
%% used for config check when the schema module is resolved
|
||||
roots() ->
|
||||
[
|
||||
{?CONF_NS,
|
||||
hoconsc:mk(
|
||||
hoconsc:union(fun union_member_selector/1),
|
||||
hoconsc:union(fun ?MODULE:union_member_selector/1),
|
||||
#{}
|
||||
)}
|
||||
].
|
||||
|
||||
fields(standalone) ->
|
||||
fields(mongo_single) ->
|
||||
common_fields() ++ emqx_connector_mongo:fields(single);
|
||||
fields('replica-set') ->
|
||||
fields(mongo_rs) ->
|
||||
common_fields() ++ emqx_connector_mongo:fields(rs);
|
||||
fields('sharded-cluster') ->
|
||||
fields(mongo_sharded) ->
|
||||
common_fields() ++ emqx_connector_mongo:fields(sharded).
|
||||
|
||||
desc(standalone) ->
|
||||
?DESC(standalone);
|
||||
desc('replica-set') ->
|
||||
desc(mongo_single) ->
|
||||
?DESC(single);
|
||||
desc(mongo_rs) ->
|
||||
?DESC('replica-set');
|
||||
desc('sharded-cluster') ->
|
||||
desc(mongo_sharded) ->
|
||||
?DESC('sharded-cluster');
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
@ -126,9 +127,9 @@ is_superuser_field(_) -> undefined.
|
|||
|
||||
refs() ->
|
||||
[
|
||||
hoconsc:ref(?MODULE, standalone),
|
||||
hoconsc:ref(?MODULE, 'replica-set'),
|
||||
hoconsc:ref(?MODULE, 'sharded-cluster')
|
||||
hoconsc:ref(?MODULE, mongo_single),
|
||||
hoconsc:ref(?MODULE, mongo_rs),
|
||||
hoconsc:ref(?MODULE, mongo_sharded)
|
||||
].
|
||||
|
||||
create(_AuthenticatorID, Config) ->
|
||||
|
@ -254,11 +255,11 @@ union_member_selector({value, Value}) ->
|
|||
refs(Value).
|
||||
|
||||
refs(#{<<"mongo_type">> := <<"single">>}) ->
|
||||
[hoconsc:ref(?MODULE, standalone)];
|
||||
[hoconsc:ref(?MODULE, mongo_single)];
|
||||
refs(#{<<"mongo_type">> := <<"rs">>}) ->
|
||||
[hoconsc:ref(?MODULE, 'replica-set')];
|
||||
[hoconsc:ref(?MODULE, mongo_rs)];
|
||||
refs(#{<<"mongo_type">> := <<"sharded">>}) ->
|
||||
[hoconsc:ref(?MODULE, 'sharded-cluster')];
|
||||
[hoconsc:ref(?MODULE, mongo_sharded)];
|
||||
refs(_) ->
|
||||
throw(#{
|
||||
field_name => mongo_type,
|
||||
|
|
|
@ -45,14 +45,16 @@
|
|||
%% Hocon Schema
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
namespace() -> "authn-mysql".
|
||||
namespace() -> "authn".
|
||||
|
||||
tags() ->
|
||||
[<<"Authentication">>].
|
||||
|
||||
roots() -> [?CONF_NS].
|
||||
%% used for config check when the schema module is resolved
|
||||
roots() ->
|
||||
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, mysql))}].
|
||||
|
||||
fields(?CONF_NS) ->
|
||||
fields(mysql) ->
|
||||
[
|
||||
{mechanism, emqx_authn_schema:mechanism(password_based)},
|
||||
{backend, emqx_authn_schema:backend(mysql)},
|
||||
|
@ -62,8 +64,8 @@ fields(?CONF_NS) ->
|
|||
] ++ emqx_authn_schema:common_fields() ++
|
||||
proplists:delete(prepare_statement, emqx_connector_mysql:fields(config)).
|
||||
|
||||
desc(?CONF_NS) ->
|
||||
?DESC(?CONF_NS);
|
||||
desc(mysql) ->
|
||||
?DESC(mysql);
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
||||
|
@ -82,7 +84,7 @@ query_timeout(_) -> undefined.
|
|||
%%------------------------------------------------------------------------------
|
||||
|
||||
refs() ->
|
||||
[hoconsc:ref(?MODULE, ?CONF_NS)].
|
||||
[hoconsc:ref(?MODULE, mysql)].
|
||||
|
||||
create(_AuthenticatorID, Config) ->
|
||||
create(Config).
|
||||
|
|
|
@ -49,14 +49,16 @@
|
|||
%% Hocon Schema
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
namespace() -> "authn-postgresql".
|
||||
namespace() -> "authn".
|
||||
|
||||
tags() ->
|
||||
[<<"Authentication">>].
|
||||
|
||||
roots() -> [?CONF_NS].
|
||||
%% used for config check when the schema module is resolved
|
||||
roots() ->
|
||||
[{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, postgresql))}].
|
||||
|
||||
fields(?CONF_NS) ->
|
||||
fields(postgresql) ->
|
||||
[
|
||||
{mechanism, emqx_authn_schema:mechanism(password_based)},
|
||||
{backend, emqx_authn_schema:backend(postgresql)},
|
||||
|
@ -66,8 +68,8 @@ fields(?CONF_NS) ->
|
|||
emqx_authn_schema:common_fields() ++
|
||||
proplists:delete(prepare_statement, emqx_connector_pgsql:fields(config)).
|
||||
|
||||
desc(?CONF_NS) ->
|
||||
?DESC(?CONF_NS);
|
||||
desc(postgresql) ->
|
||||
?DESC(postgresql);
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
||||
|
@ -81,7 +83,7 @@ query(_) -> undefined.
|
|||
%%------------------------------------------------------------------------------
|
||||
|
||||
refs() ->
|
||||
[hoconsc:ref(?MODULE, ?CONF_NS)].
|
||||
[hoconsc:ref(?MODULE, postgresql)].
|
||||
|
||||
create(_AuthenticatorID, Config) ->
|
||||
create(Config).
|
||||
|
|
|
@ -44,32 +44,33 @@
|
|||
%% Hocon Schema
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
namespace() -> "authn-redis".
|
||||
namespace() -> "authn".
|
||||
|
||||
tags() ->
|
||||
[<<"Authentication">>].
|
||||
|
||||
%% used for config check when the schema module is resolved
|
||||
roots() ->
|
||||
[
|
||||
{?CONF_NS,
|
||||
hoconsc:mk(
|
||||
hoconsc:union(fun union_member_selector/1),
|
||||
hoconsc:union(fun ?MODULE:union_member_selector/1),
|
||||
#{}
|
||||
)}
|
||||
].
|
||||
|
||||
fields(standalone) ->
|
||||
fields(redis_single) ->
|
||||
common_fields() ++ emqx_connector_redis:fields(single);
|
||||
fields(cluster) ->
|
||||
fields(redis_cluster) ->
|
||||
common_fields() ++ emqx_connector_redis:fields(cluster);
|
||||
fields(sentinel) ->
|
||||
fields(redis_sentinel) ->
|
||||
common_fields() ++ emqx_connector_redis:fields(sentinel).
|
||||
|
||||
desc(standalone) ->
|
||||
?DESC(standalone);
|
||||
desc(cluster) ->
|
||||
desc(redis_single) ->
|
||||
?DESC(single);
|
||||
desc(redis_cluster) ->
|
||||
?DESC(cluster);
|
||||
desc(sentinel) ->
|
||||
desc(redis_sentinel) ->
|
||||
?DESC(sentinel);
|
||||
desc(_) ->
|
||||
"".
|
||||
|
@ -93,9 +94,9 @@ cmd(_) -> undefined.
|
|||
|
||||
refs() ->
|
||||
[
|
||||
hoconsc:ref(?MODULE, standalone),
|
||||
hoconsc:ref(?MODULE, cluster),
|
||||
hoconsc:ref(?MODULE, sentinel)
|
||||
hoconsc:ref(?MODULE, redis_single),
|
||||
hoconsc:ref(?MODULE, redis_cluster),
|
||||
hoconsc:ref(?MODULE, redis_sentinel)
|
||||
].
|
||||
|
||||
union_member_selector(all_union_members) ->
|
||||
|
@ -104,11 +105,11 @@ union_member_selector({value, Value}) ->
|
|||
refs(Value).
|
||||
|
||||
refs(#{<<"redis_type">> := <<"single">>}) ->
|
||||
[hoconsc:ref(?MODULE, standalone)];
|
||||
[hoconsc:ref(?MODULE, redis_single)];
|
||||
refs(#{<<"redis_type">> := <<"cluster">>}) ->
|
||||
[hoconsc:ref(?MODULE, cluster)];
|
||||
[hoconsc:ref(?MODULE, redis_cluster)];
|
||||
refs(#{<<"redis_type">> := <<"sentinel">>}) ->
|
||||
[hoconsc:ref(?MODULE, sentinel)];
|
||||
[hoconsc:ref(?MODULE, redis_sentinel)];
|
||||
refs(_) ->
|
||||
throw(#{
|
||||
field_name => redis_type,
|
||||
|
|
|
@ -107,7 +107,7 @@ t_update_with_invalid_config(_Config) ->
|
|||
?assertMatch(
|
||||
{error, #{
|
||||
kind := validation_error,
|
||||
matched_type := "authn-postgresql:authentication",
|
||||
matched_type := "authn:postgresql",
|
||||
path := "authentication.1.server",
|
||||
reason := required_field
|
||||
}},
|
||||
|
|
|
@ -162,7 +162,7 @@ t_create_invalid_config(_Config) ->
|
|||
?assertMatch(
|
||||
{error, #{
|
||||
kind := validation_error,
|
||||
matched_type := "authn-redis:standalone",
|
||||
matched_type := "authn:redis_single",
|
||||
path := "authentication.1.server",
|
||||
reason := required_field
|
||||
}},
|
||||
|
|
|
@ -53,7 +53,7 @@ t_check_schema(_Config) ->
|
|||
?assertThrow(
|
||||
#{
|
||||
path := "authentication.1.password_hash_algorithm.name",
|
||||
matched_type := "authn-builtin_db:authentication/authn-hash:simple",
|
||||
matched_type := "authn:builtin_db/authn-hash:simple",
|
||||
reason := unable_to_convert_to_enum_symbol
|
||||
},
|
||||
Check(ConfigNotOk)
|
||||
|
@ -72,7 +72,7 @@ t_check_schema(_Config) ->
|
|||
#{
|
||||
path := "authentication.1.password_hash_algorithm",
|
||||
reason := "algorithm_name_missing",
|
||||
matched_type := "authn-builtin_db:authentication"
|
||||
matched_type := "authn:builtin_db"
|
||||
},
|
||||
Check(ConfigMissingAlgoName)
|
||||
).
|
||||
|
|
|
@ -32,19 +32,19 @@ union_member_selector_mongo_test_() ->
|
|||
end},
|
||||
{"single", fun() ->
|
||||
?assertMatch(
|
||||
?ERR(#{matched_type := "authn-mongodb:standalone"}),
|
||||
?ERR(#{matched_type := "authn:mongo_single"}),
|
||||
Check("{mongo_type: single}")
|
||||
)
|
||||
end},
|
||||
{"replica-set", fun() ->
|
||||
?assertMatch(
|
||||
?ERR(#{matched_type := "authn-mongodb:replica-set"}),
|
||||
?ERR(#{matched_type := "authn:mongo_rs"}),
|
||||
Check("{mongo_type: rs}")
|
||||
)
|
||||
end},
|
||||
{"sharded", fun() ->
|
||||
?assertMatch(
|
||||
?ERR(#{matched_type := "authn-mongodb:sharded-cluster"}),
|
||||
?ERR(#{matched_type := "authn:mongo_sharded"}),
|
||||
Check("{mongo_type: sharded}")
|
||||
)
|
||||
end}
|
||||
|
@ -61,19 +61,19 @@ union_member_selector_jwt_test_() ->
|
|||
end},
|
||||
{"jwks", fun() ->
|
||||
?assertMatch(
|
||||
?ERR(#{matched_type := "authn-jwt:jwks"}),
|
||||
?ERR(#{matched_type := "authn:jwt_jwks"}),
|
||||
Check("{use_jwks = true}")
|
||||
)
|
||||
end},
|
||||
{"publick-key", fun() ->
|
||||
?assertMatch(
|
||||
?ERR(#{matched_type := "authn-jwt:public-key"}),
|
||||
?ERR(#{matched_type := "authn:jwt_public_key"}),
|
||||
Check("{use_jwks = false, public_key = 1}")
|
||||
)
|
||||
end},
|
||||
{"hmac-based", fun() ->
|
||||
?assertMatch(
|
||||
?ERR(#{matched_type := "authn-jwt:hmac-based"}),
|
||||
?ERR(#{matched_type := "authn:jwt_hmac"}),
|
||||
Check("{use_jwks = false}")
|
||||
)
|
||||
end}
|
||||
|
@ -90,19 +90,19 @@ union_member_selector_redis_test_() ->
|
|||
end},
|
||||
{"single", fun() ->
|
||||
?assertMatch(
|
||||
?ERR(#{matched_type := "authn-redis:standalone"}),
|
||||
?ERR(#{matched_type := "authn:redis_single"}),
|
||||
Check("{redis_type = single}")
|
||||
)
|
||||
end},
|
||||
{"cluster", fun() ->
|
||||
?assertMatch(
|
||||
?ERR(#{matched_type := "authn-redis:cluster"}),
|
||||
?ERR(#{matched_type := "authn:redis_cluster"}),
|
||||
Check("{redis_type = cluster}")
|
||||
)
|
||||
end},
|
||||
{"sentinel", fun() ->
|
||||
?assertMatch(
|
||||
?ERR(#{matched_type := "authn-redis:sentinel"}),
|
||||
?ERR(#{matched_type := "authn:redis_sentinel"}),
|
||||
Check("{redis_type = sentinel}")
|
||||
)
|
||||
end}
|
||||
|
@ -119,13 +119,13 @@ union_member_selector_http_test_() ->
|
|||
end},
|
||||
{"get", fun() ->
|
||||
?assertMatch(
|
||||
?ERR(#{matched_type := "authn-http:get"}),
|
||||
?ERR(#{matched_type := "authn:http_get"}),
|
||||
Check("{method = get}")
|
||||
)
|
||||
end},
|
||||
{"post", fun() ->
|
||||
?assertMatch(
|
||||
?ERR(#{matched_type := "authn-http:post"}),
|
||||
?ERR(#{matched_type := "authn:http_post"}),
|
||||
Check("{method = post}")
|
||||
)
|
||||
end}
|
||||
|
|
|
@ -185,7 +185,7 @@ match_who(#{peerhost := IpAddress}, {ipaddrs, CIDRs}) ->
|
|||
match_who(ClientInfo, {'and', Principals}) when is_list(Principals) ->
|
||||
lists:foldl(
|
||||
fun(Principal, Permission) ->
|
||||
match_who(ClientInfo, Principal) andalso Permission
|
||||
Permission andalso match_who(ClientInfo, Principal)
|
||||
end,
|
||||
true,
|
||||
Principals
|
||||
|
@ -193,7 +193,7 @@ match_who(ClientInfo, {'and', Principals}) when is_list(Principals) ->
|
|||
match_who(ClientInfo, {'or', Principals}) when is_list(Principals) ->
|
||||
lists:foldl(
|
||||
fun(Principal, Permission) ->
|
||||
match_who(ClientInfo, Principal) orelse Permission
|
||||
Permission orelse match_who(ClientInfo, Principal)
|
||||
end,
|
||||
false,
|
||||
Principals
|
||||
|
|
|
@ -54,7 +54,7 @@ type_names() ->
|
|||
file,
|
||||
http_get,
|
||||
http_post,
|
||||
mnesia,
|
||||
builtin_db,
|
||||
mongo_single,
|
||||
mongo_rs,
|
||||
mongo_sharded,
|
||||
|
@ -93,7 +93,7 @@ fields(http_post) ->
|
|||
{method, method(post)},
|
||||
{headers, fun headers/1}
|
||||
];
|
||||
fields(mnesia) ->
|
||||
fields(builtin_db) ->
|
||||
authz_common_fields(built_in_database);
|
||||
fields(mongo_single) ->
|
||||
authz_common_fields(mongodb) ++
|
||||
|
@ -191,8 +191,8 @@ desc(http_get) ->
|
|||
?DESC(http_get);
|
||||
desc(http_post) ->
|
||||
?DESC(http_post);
|
||||
desc(mnesia) ->
|
||||
?DESC(mnesia);
|
||||
desc(builtin_db) ->
|
||||
?DESC(builtin_db);
|
||||
desc(mongo_single) ->
|
||||
?DESC(mongo_single);
|
||||
desc(mongo_rs) ->
|
||||
|
@ -459,7 +459,7 @@ select_union_member(#{<<"type">> := <<"http">>} = Value) ->
|
|||
})
|
||||
end;
|
||||
select_union_member(#{<<"type">> := <<"built_in_database">>}) ->
|
||||
?R_REF(mnesia);
|
||||
?R_REF(builtin_db);
|
||||
select_union_member(#{<<"type">> := Type}) ->
|
||||
select_union_member_loop(Type, type_names());
|
||||
select_union_member(_) ->
|
||||
|
@ -494,7 +494,10 @@ authz_fields() ->
|
|||
default => [],
|
||||
desc => ?DESC(sources),
|
||||
%% doc_lift is force a root level reference instead of nesting sub-structs
|
||||
extra => #{doc_lift => true}
|
||||
extra => #{doc_lift => true},
|
||||
%% it is recommended to configure authz sources from dashboard
|
||||
%% hance the importance level for config is low
|
||||
importance => ?IMPORTANCE_LOW
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
|
|
@ -137,7 +137,7 @@ namespace() -> "bridge".
|
|||
tags() ->
|
||||
[<<"Bridge">>].
|
||||
|
||||
roots() -> [{bridges, ?HOCON(?R_REF(bridges), #{importance => ?IMPORTANCE_HIDDEN})}].
|
||||
roots() -> [{bridges, ?HOCON(?R_REF(bridges), #{importance => ?IMPORTANCE_LOW})}].
|
||||
|
||||
fields(bridges) ->
|
||||
[
|
||||
|
|
|
@ -1,12 +1,7 @@
|
|||
## NOTE:
|
||||
## Configs in this file might be overridden by:
|
||||
## 1. Environment variables which start with 'EMQX_' prefix
|
||||
## 2. File $EMQX_NODE__DATA_DIR/configs/cluster-override.conf
|
||||
## 3. File $EMQX_NODE__DATA_DIR/configs/local-override.conf
|
||||
##
|
||||
## The *-override.conf files are overwritten at runtime when changes
|
||||
## are made from EMQX dashboard UI, management HTTP API, or CLI.
|
||||
## All configuration details can be found in emqx.conf.example
|
||||
## The EMQX configuration is prioritized (overlayed) in the following order:
|
||||
## `data/configs/cluster.hocon < etc/emqx.conf < environment variables`.
|
||||
|
||||
|
||||
node {
|
||||
name = "emqx@127.0.0.1"
|
||||
|
|
|
@ -25,9 +25,15 @@
|
|||
-export([update/3, update/4]).
|
||||
-export([remove/2, remove/3]).
|
||||
-export([reset/2, reset/3]).
|
||||
-export([dump_schema/1, dump_schema/3]).
|
||||
-export([dump_schema/2]).
|
||||
-export([schema_module/0]).
|
||||
-export([gen_example_conf/4]).
|
||||
-export([gen_example_conf/2]).
|
||||
|
||||
%% TODO: move to emqx_dashboard when we stop building api schema at build time
|
||||
-export([
|
||||
hotconf_schema_json/1,
|
||||
bridge_schema_json/1
|
||||
]).
|
||||
|
||||
%% for rpc
|
||||
-export([get_node_and_config/1]).
|
||||
|
@ -136,24 +142,22 @@ reset(Node, KeyPath, Opts) ->
|
|||
emqx_conf_proto_v2:reset(Node, KeyPath, Opts).
|
||||
|
||||
%% @doc Called from build script.
|
||||
-spec dump_schema(file:name_all()) -> ok.
|
||||
dump_schema(Dir) ->
|
||||
I18nFile = emqx_dashboard:i18n_file(),
|
||||
dump_schema(Dir, emqx_conf_schema, I18nFile).
|
||||
|
||||
dump_schema(Dir, SchemaModule, I18nFile) ->
|
||||
%% TODO: move to a external escript after all refactoring is done
|
||||
dump_schema(Dir, SchemaModule) ->
|
||||
_ = application:load(emqx_dashboard),
|
||||
ok = emqx_dashboard_desc_cache:init(),
|
||||
lists:foreach(
|
||||
fun(Lang) ->
|
||||
gen_config_md(Dir, I18nFile, SchemaModule, Lang),
|
||||
gen_api_schema_json(Dir, I18nFile, Lang),
|
||||
gen_example_conf(Dir, I18nFile, SchemaModule, Lang),
|
||||
gen_schema_json(Dir, I18nFile, SchemaModule, Lang)
|
||||
ok = gen_config_md(Dir, SchemaModule, Lang),
|
||||
ok = gen_api_schema_json(Dir, Lang),
|
||||
ok = gen_schema_json(Dir, SchemaModule, Lang)
|
||||
end,
|
||||
["en", "zh"]
|
||||
).
|
||||
),
|
||||
ok = gen_example_conf(Dir, SchemaModule).
|
||||
|
||||
%% for scripts/spellcheck.
|
||||
gen_schema_json(Dir, I18nFile, SchemaModule, Lang) ->
|
||||
gen_schema_json(Dir, SchemaModule, Lang) ->
|
||||
SchemaJsonFile = filename:join([Dir, "schema-" ++ Lang ++ ".json"]),
|
||||
io:format(user, "===< Generating: ~s~n", [SchemaJsonFile]),
|
||||
%% EMQX_SCHEMA_FULL_DUMP is quite a hidden API
|
||||
|
@ -164,40 +168,62 @@ gen_schema_json(Dir, I18nFile, SchemaModule, Lang) ->
|
|||
false -> ?IMPORTANCE_LOW
|
||||
end,
|
||||
io:format(user, "===< Including fields from importance level: ~p~n", [IncludeImportance]),
|
||||
Opts = #{desc_file => I18nFile, lang => Lang, include_importance_up_from => IncludeImportance},
|
||||
Opts = #{
|
||||
include_importance_up_from => IncludeImportance,
|
||||
desc_resolver => make_desc_resolver(Lang)
|
||||
},
|
||||
JsonMap = hocon_schema_json:gen(SchemaModule, Opts),
|
||||
IoData = emqx_utils_json:encode(JsonMap, [pretty, force_utf8]),
|
||||
ok = file:write_file(SchemaJsonFile, IoData).
|
||||
|
||||
gen_api_schema_json(Dir, I18nFile, Lang) ->
|
||||
emqx_dashboard:init_i18n(I18nFile, list_to_binary(Lang)),
|
||||
%% TODO: delete this function when we stop generating this JSON at build time.
|
||||
gen_api_schema_json(Dir, Lang) ->
|
||||
gen_api_schema_json_hotconf(Dir, Lang),
|
||||
gen_api_schema_json_bridge(Dir, Lang),
|
||||
emqx_dashboard:clear_i18n().
|
||||
gen_api_schema_json_bridge(Dir, Lang).
|
||||
|
||||
%% TODO: delete this function when we stop generating this JSON at build time.
|
||||
gen_api_schema_json_hotconf(Dir, Lang) ->
|
||||
SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>},
|
||||
File = schema_filename(Dir, "hot-config-schema-", Lang),
|
||||
ok = do_gen_api_schema_json(File, emqx_mgmt_api_configs, SchemaInfo).
|
||||
IoData = hotconf_schema_json(Lang),
|
||||
ok = write_api_schema_json_file(File, IoData).
|
||||
|
||||
%% TODO: delete this function when we stop generating this JSON at build time.
|
||||
gen_api_schema_json_bridge(Dir, Lang) ->
|
||||
SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>},
|
||||
File = schema_filename(Dir, "bridge-api-", Lang),
|
||||
ok = do_gen_api_schema_json(File, emqx_bridge_api, SchemaInfo).
|
||||
IoData = bridge_schema_json(Lang),
|
||||
ok = write_api_schema_json_file(File, IoData).
|
||||
|
||||
%% TODO: delete this function when we stop generating this JSON at build time.
|
||||
write_api_schema_json_file(File, IoData) ->
|
||||
io:format(user, "===< Generating: ~s~n", [File]),
|
||||
file:write_file(File, IoData).
|
||||
|
||||
%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time.
|
||||
hotconf_schema_json(Lang) ->
|
||||
SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>},
|
||||
gen_api_schema_json_iodata(emqx_mgmt_api_configs, SchemaInfo, Lang).
|
||||
|
||||
%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time.
|
||||
bridge_schema_json(Lang) ->
|
||||
SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>},
|
||||
gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo, Lang).
|
||||
|
||||
schema_filename(Dir, Prefix, Lang) ->
|
||||
Filename = Prefix ++ Lang ++ ".json",
|
||||
filename:join([Dir, Filename]).
|
||||
|
||||
gen_config_md(Dir, I18nFile, SchemaModule, Lang) ->
|
||||
%% TODO: remove it and also remove hocon_md.erl and friends.
|
||||
%% markdown generation from schema is a failure and we are moving to an interactive
|
||||
%% viewer like swagger UI.
|
||||
gen_config_md(Dir, SchemaModule, Lang) ->
|
||||
SchemaMdFile = filename:join([Dir, "config-" ++ Lang ++ ".md"]),
|
||||
io:format(user, "===< Generating: ~s~n", [SchemaMdFile]),
|
||||
ok = gen_doc(SchemaMdFile, SchemaModule, I18nFile, Lang).
|
||||
ok = gen_doc(SchemaMdFile, SchemaModule, Lang).
|
||||
|
||||
gen_example_conf(Dir, I18nFile, SchemaModule, Lang) ->
|
||||
SchemaMdFile = filename:join([Dir, "emqx.conf." ++ Lang ++ ".example"]),
|
||||
gen_example_conf(Dir, SchemaModule) ->
|
||||
SchemaMdFile = filename:join([Dir, "emqx.conf.example"]),
|
||||
io:format(user, "===< Generating: ~s~n", [SchemaMdFile]),
|
||||
ok = gen_example(SchemaMdFile, SchemaModule, I18nFile, Lang).
|
||||
ok = gen_example(SchemaMdFile, SchemaModule).
|
||||
|
||||
%% @doc return the root schema module.
|
||||
-spec schema_module() -> module().
|
||||
|
@ -211,35 +237,48 @@ schema_module() ->
|
|||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-spec gen_doc(file:name_all(), module(), file:name_all(), string()) -> ok.
|
||||
gen_doc(File, SchemaModule, I18nFile, Lang) ->
|
||||
%% @doc Make a resolver function that can be used to lookup the description by hocon_schema_json dump.
|
||||
make_desc_resolver(Lang) ->
|
||||
fun
|
||||
({desc, Namespace, Id}) ->
|
||||
emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, desc);
|
||||
(Desc) ->
|
||||
unicode:characters_to_binary(Desc)
|
||||
end.
|
||||
|
||||
-spec gen_doc(file:name_all(), module(), string()) -> ok.
|
||||
gen_doc(File, SchemaModule, Lang) ->
|
||||
Version = emqx_release:version(),
|
||||
Title =
|
||||
"# " ++ emqx_release:description() ++ " Configuration\n\n" ++
|
||||
"<!--" ++ Version ++ "-->",
|
||||
BodyFile = filename:join([rel, "emqx_conf.template." ++ Lang ++ ".md"]),
|
||||
{ok, Body} = file:read_file(BodyFile),
|
||||
Opts = #{title => Title, body => Body, desc_file => I18nFile, lang => Lang},
|
||||
Resolver = make_desc_resolver(Lang),
|
||||
Opts = #{title => Title, body => Body, desc_resolver => Resolver},
|
||||
Doc = hocon_schema_md:gen(SchemaModule, Opts),
|
||||
file:write_file(File, Doc).
|
||||
|
||||
gen_example(File, SchemaModule, I18nFile, Lang) ->
|
||||
gen_example(File, SchemaModule) ->
|
||||
%% we do not generate description in example files
|
||||
%% so there is no need for a desc_resolver
|
||||
Opts = #{
|
||||
title => <<"EMQX Configuration Example">>,
|
||||
body => <<"">>,
|
||||
desc_file => I18nFile,
|
||||
lang => Lang,
|
||||
include_importance_up_from => ?IMPORTANCE_MEDIUM
|
||||
},
|
||||
Example = hocon_schema_example:gen(SchemaModule, Opts),
|
||||
file:write_file(File, Example).
|
||||
|
||||
%% Only gen hot_conf schema, not all configuration fields.
|
||||
do_gen_api_schema_json(File, SchemaMod, SchemaInfo) ->
|
||||
io:format(user, "===< Generating: ~s~n", [File]),
|
||||
%% TODO: move this to emqx_dashboard when we stop generating
|
||||
%% this JSON at build time.
|
||||
gen_api_schema_json_iodata(SchemaMod, SchemaInfo, Lang) ->
|
||||
{ApiSpec0, Components0} = emqx_dashboard_swagger:spec(
|
||||
SchemaMod,
|
||||
#{schema_converter => fun hocon_schema_to_spec/2}
|
||||
#{
|
||||
schema_converter => fun hocon_schema_to_spec/2,
|
||||
i18n_lang => Lang
|
||||
}
|
||||
),
|
||||
ApiSpec = lists:foldl(
|
||||
fun({Path, Spec, _, _}, Acc) ->
|
||||
|
@ -268,22 +307,14 @@ do_gen_api_schema_json(File, SchemaMod, SchemaInfo) ->
|
|||
ApiSpec0
|
||||
),
|
||||
Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0),
|
||||
IoData = emqx_utils_json:encode(
|
||||
emqx_utils_json:encode(
|
||||
#{
|
||||
info => SchemaInfo,
|
||||
paths => ApiSpec,
|
||||
components => #{schemas => Components}
|
||||
},
|
||||
[pretty, force_utf8]
|
||||
),
|
||||
file:write_file(File, IoData).
|
||||
|
||||
-define(INIT_SCHEMA, #{
|
||||
fields => #{},
|
||||
translations => #{},
|
||||
validations => [],
|
||||
namespace => undefined
|
||||
}).
|
||||
).
|
||||
|
||||
-define(TO_REF(_N_, _F_), iolist_to_binary([to_bin(_N_), ".", to_bin(_F_)])).
|
||||
-define(TO_COMPONENTS_SCHEMA(_M_, _F_),
|
||||
|
|
|
@ -100,7 +100,7 @@ roots() ->
|
|||
?R_REF("rpc"),
|
||||
#{
|
||||
translate_to => ["gen_rpc"],
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
importance => ?IMPORTANCE_LOW
|
||||
}
|
||||
)}
|
||||
] ++
|
||||
|
@ -135,7 +135,7 @@ fields("cluster") ->
|
|||
)},
|
||||
{"core_nodes",
|
||||
sc(
|
||||
emqx_schema:comma_separated_atoms(),
|
||||
node_array(),
|
||||
#{
|
||||
mapping => "mria.core_nodes",
|
||||
default => [],
|
||||
|
@ -203,7 +203,7 @@ fields(cluster_static) ->
|
|||
[
|
||||
{"seeds",
|
||||
sc(
|
||||
hoconsc:array(atom()),
|
||||
node_array(),
|
||||
#{
|
||||
default => [],
|
||||
desc => ?DESC(cluster_static_seeds),
|
||||
|
@ -1288,7 +1288,7 @@ emqx_schema_high_prio_roots() ->
|
|||
?R_REF("authorization"),
|
||||
#{
|
||||
desc => ?DESC(authorization),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
importance => ?IMPORTANCE_HIGH
|
||||
}
|
||||
)},
|
||||
lists:keyreplace("authorization", 1, Roots, Authz).
|
||||
|
@ -1312,3 +1312,6 @@ validator_string_re(Val, RE, Error) ->
|
|||
catch
|
||||
_:_ -> {error, Error}
|
||||
end.
|
||||
|
||||
node_array() ->
|
||||
hoconsc:union([emqx_schema:comma_separated_atoms(), hoconsc:array(atom())]).
|
||||
|
|
|
@ -5,6 +5,46 @@
|
|||
-module(emqx_conf_schema_tests).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
array_nodes_test() ->
|
||||
ExpectNodes = ['emqx1@127.0.0.1', 'emqx2@127.0.0.1'],
|
||||
BaseConf =
|
||||
""
|
||||
"\n"
|
||||
" node {\n"
|
||||
" name = \"emqx1@127.0.0.1\"\n"
|
||||
" cookie = \"emqxsecretcookie\"\n"
|
||||
" data_dir = \"data\"\n"
|
||||
" }\n"
|
||||
" cluster {\n"
|
||||
" name = emqxcl\n"
|
||||
" discovery_strategy = static\n"
|
||||
" static.seeds = ~p\n"
|
||||
" core_nodes = ~p\n"
|
||||
" }\n"
|
||||
" "
|
||||
"",
|
||||
lists:foreach(
|
||||
fun(Nodes) ->
|
||||
ConfFile = iolist_to_binary(io_lib:format(BaseConf, [Nodes, Nodes])),
|
||||
{ok, Conf} = hocon:binary(ConfFile, #{format => richmap}),
|
||||
ConfList = hocon_tconf:generate(emqx_conf_schema, Conf),
|
||||
ClusterDiscovery = proplists:get_value(
|
||||
cluster_discovery, proplists:get_value(ekka, ConfList)
|
||||
),
|
||||
?assertEqual(
|
||||
{static, [{seeds, ExpectNodes}]},
|
||||
ClusterDiscovery,
|
||||
Nodes
|
||||
),
|
||||
?assertEqual(
|
||||
ExpectNodes,
|
||||
proplists:get_value(core_nodes, proplists:get_value(mria, ConfList)),
|
||||
Nodes
|
||||
)
|
||||
end,
|
||||
[["emqx1@127.0.0.1", "emqx2@127.0.0.1"], "emqx1@127.0.0.1, emqx2@127.0.0.1"]
|
||||
),
|
||||
ok.
|
||||
|
||||
doc_gen_test() ->
|
||||
%% the json file too large to encode.
|
||||
|
|
|
@ -231,9 +231,8 @@ on_start(
|
|||
{transport_opts, NTransportOpts},
|
||||
{enable_pipelining, maps:get(enable_pipelining, Config, ?DEFAULT_PIPELINE_SIZE)}
|
||||
],
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
State = #{
|
||||
pool_name => PoolName,
|
||||
pool_name => InstId,
|
||||
pool_type => PoolType,
|
||||
host => Host,
|
||||
port => Port,
|
||||
|
@ -241,7 +240,7 @@ on_start(
|
|||
base_path => BasePath,
|
||||
request => preprocess_request(maps:get(request, Config, undefined))
|
||||
},
|
||||
case ehttpc_sup:start_pool(PoolName, PoolOpts) of
|
||||
case ehttpc_sup:start_pool(InstId, PoolOpts) of
|
||||
{ok, _} -> {ok, State};
|
||||
{error, {already_started, _}} -> {ok, State};
|
||||
{error, Reason} -> {error, Reason}
|
||||
|
|
|
@ -87,20 +87,19 @@ on_start(
|
|||
{pool_size, PoolSize},
|
||||
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL}
|
||||
],
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ SslOpts) of
|
||||
ok -> {ok, #{poolname => PoolName}};
|
||||
case emqx_resource_pool:start(InstId, ?MODULE, Opts ++ SslOpts) of
|
||||
ok -> {ok, #{pool_name => InstId}};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
on_stop(InstId, #{poolname := PoolName}) ->
|
||||
on_stop(InstId, #{pool_name := PoolName}) ->
|
||||
?SLOG(info, #{
|
||||
msg => "stopping_ldap_connector",
|
||||
connector => InstId
|
||||
}),
|
||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
||||
emqx_resource_pool:stop(PoolName).
|
||||
|
||||
on_query(InstId, {search, Base, Filter, Attributes}, #{poolname := PoolName} = State) ->
|
||||
on_query(InstId, {search, Base, Filter, Attributes}, #{pool_name := PoolName} = State) ->
|
||||
Request = {Base, Filter, Attributes},
|
||||
?TRACE(
|
||||
"QUERY",
|
||||
|
|
|
@ -182,12 +182,11 @@ on_start(
|
|||
{options, init_topology_options(maps:to_list(Topology), [])},
|
||||
{worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)}
|
||||
],
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
Collection = maps:get(collection, Config, <<"mqtt">>),
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts) of
|
||||
case emqx_resource_pool:start(InstId, ?MODULE, Opts) of
|
||||
ok ->
|
||||
{ok, #{
|
||||
poolname => PoolName,
|
||||
pool_name => InstId,
|
||||
type => Type,
|
||||
collection => Collection
|
||||
}};
|
||||
|
@ -195,17 +194,17 @@ on_start(
|
|||
{error, Reason}
|
||||
end.
|
||||
|
||||
on_stop(InstId, #{poolname := PoolName}) ->
|
||||
on_stop(InstId, #{pool_name := PoolName}) ->
|
||||
?SLOG(info, #{
|
||||
msg => "stopping_mongodb_connector",
|
||||
connector => InstId
|
||||
}),
|
||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
||||
emqx_resource_pool:stop(PoolName).
|
||||
|
||||
on_query(
|
||||
InstId,
|
||||
{send_message, Document},
|
||||
#{poolname := PoolName, collection := Collection} = State
|
||||
#{pool_name := PoolName, collection := Collection} = State
|
||||
) ->
|
||||
Request = {insert, Collection, Document},
|
||||
?TRACE(
|
||||
|
@ -234,7 +233,7 @@ on_query(
|
|||
on_query(
|
||||
InstId,
|
||||
{Action, Collection, Filter, Projector},
|
||||
#{poolname := PoolName} = State
|
||||
#{pool_name := PoolName} = State
|
||||
) ->
|
||||
Request = {Action, Collection, Filter, Projector},
|
||||
?TRACE(
|
||||
|
@ -263,8 +262,7 @@ on_query(
|
|||
{ok, Result}
|
||||
end.
|
||||
|
||||
-dialyzer({nowarn_function, [on_get_status/2]}).
|
||||
on_get_status(InstId, #{poolname := PoolName} = _State) ->
|
||||
on_get_status(InstId, #{pool_name := PoolName}) ->
|
||||
case health_check(PoolName) of
|
||||
true ->
|
||||
?tp(debug, emqx_connector_mongo_health_check, #{
|
||||
|
@ -281,8 +279,10 @@ on_get_status(InstId, #{poolname := PoolName} = _State) ->
|
|||
end.
|
||||
|
||||
health_check(PoolName) ->
|
||||
emqx_plugin_libs_pool:health_check_ecpool_workers(
|
||||
PoolName, fun ?MODULE:check_worker_health/1, ?HEALTH_CHECK_TIMEOUT + timer:seconds(1)
|
||||
emqx_resource_pool:health_check_workers(
|
||||
PoolName,
|
||||
fun ?MODULE:check_worker_health/1,
|
||||
?HEALTH_CHECK_TIMEOUT + timer:seconds(1)
|
||||
).
|
||||
|
||||
%% ===================================================================
|
||||
|
|
|
@ -51,7 +51,7 @@
|
|||
-type sqls() :: #{atom() => binary()}.
|
||||
-type state() ::
|
||||
#{
|
||||
poolname := atom(),
|
||||
pool_name := binary(),
|
||||
prepare_statement := prepares(),
|
||||
params_tokens := params_tokens(),
|
||||
batch_inserts := sqls(),
|
||||
|
@ -123,13 +123,10 @@ on_start(
|
|||
{pool_size, PoolSize}
|
||||
]
|
||||
),
|
||||
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
Prepares = parse_prepare_sql(Config),
|
||||
State = maps:merge(#{poolname => PoolName}, Prepares),
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
|
||||
State = parse_prepare_sql(Config),
|
||||
case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
|
||||
ok ->
|
||||
{ok, init_prepare(State)};
|
||||
{ok, init_prepare(State#{pool_name => InstId})};
|
||||
{error, Reason} ->
|
||||
?tp(
|
||||
mysql_connector_start_failed,
|
||||
|
@ -143,12 +140,12 @@ maybe_add_password_opt(undefined, Options) ->
|
|||
maybe_add_password_opt(Password, Options) ->
|
||||
[{password, Password} | Options].
|
||||
|
||||
on_stop(InstId, #{poolname := PoolName}) ->
|
||||
on_stop(InstId, #{pool_name := PoolName}) ->
|
||||
?SLOG(info, #{
|
||||
msg => "stopping_mysql_connector",
|
||||
connector => InstId
|
||||
}),
|
||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
||||
emqx_resource_pool:stop(PoolName).
|
||||
|
||||
on_query(InstId, {TypeOrKey, SQLOrKey}, State) ->
|
||||
on_query(InstId, {TypeOrKey, SQLOrKey, [], default_timeout}, State);
|
||||
|
@ -157,7 +154,7 @@ on_query(InstId, {TypeOrKey, SQLOrKey, Params}, State) ->
|
|||
on_query(
|
||||
InstId,
|
||||
{TypeOrKey, SQLOrKey, Params, Timeout},
|
||||
#{poolname := PoolName, prepare_statement := Prepares} = State
|
||||
#{pool_name := PoolName, prepare_statement := Prepares} = State
|
||||
) ->
|
||||
MySqlFunction = mysql_function(TypeOrKey),
|
||||
{SQLOrKey2, Data} = proc_sql_params(TypeOrKey, SQLOrKey, Params, State),
|
||||
|
@ -216,8 +213,8 @@ mysql_function(prepared_query) ->
|
|||
mysql_function(_) ->
|
||||
mysql_function(prepared_query).
|
||||
|
||||
on_get_status(_InstId, #{poolname := Pool} = State) ->
|
||||
case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of
|
||||
on_get_status(_InstId, #{pool_name := PoolName} = State) ->
|
||||
case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
|
||||
true ->
|
||||
case do_check_prepares(State) of
|
||||
ok ->
|
||||
|
@ -238,7 +235,7 @@ do_get_status(Conn) ->
|
|||
|
||||
do_check_prepares(#{prepare_statement := Prepares}) when is_map(Prepares) ->
|
||||
ok;
|
||||
do_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, Prepares}}) ->
|
||||
do_check_prepares(State = #{pool_name := PoolName, prepare_statement := {error, Prepares}}) ->
|
||||
%% retry to prepare
|
||||
case prepare_sql(Prepares, PoolName) of
|
||||
ok ->
|
||||
|
@ -253,7 +250,7 @@ do_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, P
|
|||
connect(Options) ->
|
||||
mysql:start_link(Options).
|
||||
|
||||
init_prepare(State = #{prepare_statement := Prepares, poolname := PoolName}) ->
|
||||
init_prepare(State = #{prepare_statement := Prepares, pool_name := PoolName}) ->
|
||||
case maps:size(Prepares) of
|
||||
0 ->
|
||||
State;
|
||||
|
@ -409,7 +406,7 @@ on_sql_query(
|
|||
SQLOrKey,
|
||||
Params,
|
||||
Timeout,
|
||||
#{poolname := PoolName} = State
|
||||
#{pool_name := PoolName} = State
|
||||
) ->
|
||||
LogMeta = #{connector => InstId, sql => SQLOrKey, state => State},
|
||||
?TRACE("QUERY", "mysql_connector_received", LogMeta),
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
|
||||
-type state() ::
|
||||
#{
|
||||
poolname := atom(),
|
||||
pool_name := binary(),
|
||||
prepare_sql := prepares(),
|
||||
params_tokens := params_tokens(),
|
||||
prepare_statement := epgsql:statement()
|
||||
|
@ -120,13 +120,10 @@ on_start(
|
|||
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL},
|
||||
{pool_size, PoolSize}
|
||||
],
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
Prepares = parse_prepare_sql(Config),
|
||||
InitState = #{poolname => PoolName, prepare_statement => #{}},
|
||||
State = maps:merge(InitState, Prepares),
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
|
||||
State = parse_prepare_sql(Config),
|
||||
case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
|
||||
ok ->
|
||||
{ok, init_prepare(State)};
|
||||
{ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})};
|
||||
{error, Reason} ->
|
||||
?tp(
|
||||
pgsql_connector_start_failed,
|
||||
|
@ -135,19 +132,19 @@ on_start(
|
|||
{error, Reason}
|
||||
end.
|
||||
|
||||
on_stop(InstId, #{poolname := PoolName}) ->
|
||||
on_stop(InstId, #{pool_name := PoolName}) ->
|
||||
?SLOG(info, #{
|
||||
msg => "stopping postgresql connector",
|
||||
connector => InstId
|
||||
}),
|
||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
||||
emqx_resource_pool:stop(PoolName).
|
||||
|
||||
on_query(InstId, {TypeOrKey, NameOrSQL}, #{poolname := _PoolName} = State) ->
|
||||
on_query(InstId, {TypeOrKey, NameOrSQL}, State) ->
|
||||
on_query(InstId, {TypeOrKey, NameOrSQL, []}, State);
|
||||
on_query(
|
||||
InstId,
|
||||
{TypeOrKey, NameOrSQL, Params},
|
||||
#{poolname := PoolName} = State
|
||||
#{pool_name := PoolName} = State
|
||||
) ->
|
||||
?SLOG(debug, #{
|
||||
msg => "postgresql connector received sql query",
|
||||
|
@ -174,7 +171,7 @@ pgsql_query_type(_) ->
|
|||
on_batch_query(
|
||||
InstId,
|
||||
BatchReq,
|
||||
#{poolname := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State
|
||||
#{pool_name := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State
|
||||
) ->
|
||||
case BatchReq of
|
||||
[{Key, _} = Request | _] ->
|
||||
|
@ -258,8 +255,8 @@ on_sql_query(InstId, PoolName, Type, NameOrSQL, Data) ->
|
|||
{error, {unrecoverable_error, invalid_request}}
|
||||
end.
|
||||
|
||||
on_get_status(_InstId, #{poolname := Pool} = State) ->
|
||||
case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of
|
||||
on_get_status(_InstId, #{pool_name := PoolName} = State) ->
|
||||
case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
|
||||
true ->
|
||||
case do_check_prepares(State) of
|
||||
ok ->
|
||||
|
@ -280,7 +277,7 @@ do_get_status(Conn) ->
|
|||
|
||||
do_check_prepares(#{prepare_sql := Prepares}) when is_map(Prepares) ->
|
||||
ok;
|
||||
do_check_prepares(State = #{poolname := PoolName, prepare_sql := {error, Prepares}}) ->
|
||||
do_check_prepares(State = #{pool_name := PoolName, prepare_sql := {error, Prepares}}) ->
|
||||
%% retry to prepare
|
||||
case prepare_sql(Prepares, PoolName) of
|
||||
{ok, Sts} ->
|
||||
|
@ -358,7 +355,7 @@ parse_prepare_sql([], Prepares, Tokens) ->
|
|||
params_tokens => Tokens
|
||||
}.
|
||||
|
||||
init_prepare(State = #{prepare_sql := Prepares, poolname := PoolName}) ->
|
||||
init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName}) ->
|
||||
case maps:size(Prepares) of
|
||||
0 ->
|
||||
State;
|
||||
|
@ -389,17 +386,17 @@ prepare_sql(Prepares, PoolName) ->
|
|||
end.
|
||||
|
||||
do_prepare_sql(Prepares, PoolName) ->
|
||||
do_prepare_sql(ecpool:workers(PoolName), Prepares, PoolName, #{}).
|
||||
do_prepare_sql(ecpool:workers(PoolName), Prepares, #{}).
|
||||
|
||||
do_prepare_sql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) ->
|
||||
do_prepare_sql([{_Name, Worker} | T], Prepares, _LastSts) ->
|
||||
{ok, Conn} = ecpool_worker:client(Worker),
|
||||
case prepare_sql_to_conn(Conn, Prepares) of
|
||||
{ok, Sts} ->
|
||||
do_prepare_sql(T, Prepares, PoolName, Sts);
|
||||
do_prepare_sql(T, Prepares, Sts);
|
||||
Error ->
|
||||
Error
|
||||
end;
|
||||
do_prepare_sql([], _Prepares, _PoolName, LastSts) ->
|
||||
do_prepare_sql([], _Prepares, LastSts) ->
|
||||
{ok, LastSts}.
|
||||
|
||||
prepare_sql_to_conn(Conn, Prepares) ->
|
||||
|
|
|
@ -153,11 +153,10 @@ on_start(
|
|||
false ->
|
||||
[{ssl, false}]
|
||||
end ++ [{sentinel, maps:get(sentinel, Config, undefined)}],
|
||||
PoolName = InstId,
|
||||
State = #{poolname => PoolName, type => Type},
|
||||
State = #{pool_name => InstId, type => Type},
|
||||
case Type of
|
||||
cluster ->
|
||||
case eredis_cluster:start_pool(PoolName, Opts ++ [{options, Options}]) of
|
||||
case eredis_cluster:start_pool(InstId, Opts ++ [{options, Options}]) of
|
||||
{ok, _} ->
|
||||
{ok, State};
|
||||
{ok, _, _} ->
|
||||
|
@ -166,22 +165,20 @@ on_start(
|
|||
{error, Reason}
|
||||
end;
|
||||
_ ->
|
||||
case
|
||||
emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ [{options, Options}])
|
||||
of
|
||||
case emqx_resource_pool:start(InstId, ?MODULE, Opts ++ [{options, Options}]) of
|
||||
ok -> {ok, State};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end
|
||||
end.
|
||||
|
||||
on_stop(InstId, #{poolname := PoolName, type := Type}) ->
|
||||
on_stop(InstId, #{pool_name := PoolName, type := Type}) ->
|
||||
?SLOG(info, #{
|
||||
msg => "stopping_redis_connector",
|
||||
connector => InstId
|
||||
}),
|
||||
case Type of
|
||||
cluster -> eredis_cluster:stop_pool(PoolName);
|
||||
_ -> emqx_plugin_libs_pool:stop_pool(PoolName)
|
||||
_ -> emqx_resource_pool:stop(PoolName)
|
||||
end.
|
||||
|
||||
on_query(InstId, {cmd, _} = Query, State) ->
|
||||
|
@ -189,7 +186,7 @@ on_query(InstId, {cmd, _} = Query, State) ->
|
|||
on_query(InstId, {cmds, _} = Query, State) ->
|
||||
do_query(InstId, Query, State).
|
||||
|
||||
do_query(InstId, Query, #{poolname := PoolName, type := Type} = State) ->
|
||||
do_query(InstId, Query, #{pool_name := PoolName, type := Type} = State) ->
|
||||
?TRACE(
|
||||
"QUERY",
|
||||
"redis_connector_received",
|
||||
|
@ -227,7 +224,7 @@ is_unrecoverable_error({error, invalid_cluster_command}) ->
|
|||
is_unrecoverable_error(_) ->
|
||||
false.
|
||||
|
||||
on_get_status(_InstId, #{type := cluster, poolname := PoolName}) ->
|
||||
on_get_status(_InstId, #{type := cluster, pool_name := PoolName}) ->
|
||||
case eredis_cluster:pool_exists(PoolName) of
|
||||
true ->
|
||||
Health = eredis_cluster:ping_all(PoolName),
|
||||
|
@ -235,8 +232,8 @@ on_get_status(_InstId, #{type := cluster, poolname := PoolName}) ->
|
|||
false ->
|
||||
disconnected
|
||||
end;
|
||||
on_get_status(_InstId, #{poolname := Pool}) ->
|
||||
Health = emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1),
|
||||
on_get_status(_InstId, #{pool_name := PoolName}) ->
|
||||
Health = emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1),
|
||||
status_result(Health).
|
||||
|
||||
do_get_status(Conn) ->
|
||||
|
|
|
@ -64,15 +64,15 @@ t_lifecycle(_Config) ->
|
|||
mongo_config()
|
||||
).
|
||||
|
||||
perform_lifecycle_check(PoolName, InitialConfig) ->
|
||||
perform_lifecycle_check(ResourceId, InitialConfig) ->
|
||||
{ok, #{config := CheckedConfig}} =
|
||||
emqx_resource:check_config(?MONGO_RESOURCE_MOD, InitialConfig),
|
||||
{ok, #{
|
||||
state := #{poolname := ReturnedPoolName} = State,
|
||||
state := #{pool_name := PoolName} = State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:create_local(
|
||||
PoolName,
|
||||
ResourceId,
|
||||
?CONNECTOR_RESOURCE_GROUP,
|
||||
?MONGO_RESOURCE_MOD,
|
||||
CheckedConfig,
|
||||
|
@ -84,39 +84,39 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
state := State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||
% % Perform query as further check that the resource is working as expected
|
||||
?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())),
|
||||
?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())),
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
?assertMatch({ok, []}, emqx_resource:query(ResourceId, test_query_find())),
|
||||
?assertMatch({ok, undefined}, emqx_resource:query(ResourceId, test_query_find_one())),
|
||||
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||
% as the worker no longer exists.
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := StoppedStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual(stopped, StoppedStatus),
|
||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)),
|
||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)),
|
||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||
% Can call stop/1 again on an already stopped instance
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||
% Make sure it can be restarted and the healthchecks and queries work properly
|
||||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
||||
?assertEqual(ok, emqx_resource:restart(ResourceId)),
|
||||
% async restart, need to wait resource
|
||||
timer:sleep(500),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
||||
?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())),
|
||||
?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||
?assertMatch({ok, []}, emqx_resource:query(ResourceId, test_query_find())),
|
||||
?assertMatch({ok, undefined}, emqx_resource:query(ResourceId, test_query_find_one())),
|
||||
% Stop and remove the resource in one go.
|
||||
?assertEqual(ok, emqx_resource:remove_local(PoolName)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
?assertEqual(ok, emqx_resource:remove_local(ResourceId)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
||||
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)).
|
||||
?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)).
|
||||
|
||||
% %%------------------------------------------------------------------------------
|
||||
% %% Helpers
|
||||
|
|
|
@ -64,14 +64,14 @@ t_lifecycle(_Config) ->
|
|||
mysql_config()
|
||||
).
|
||||
|
||||
perform_lifecycle_check(PoolName, InitialConfig) ->
|
||||
perform_lifecycle_check(ResourceId, InitialConfig) ->
|
||||
{ok, #{config := CheckedConfig}} =
|
||||
emqx_resource:check_config(?MYSQL_RESOURCE_MOD, InitialConfig),
|
||||
{ok, #{
|
||||
state := #{poolname := ReturnedPoolName} = State,
|
||||
state := #{pool_name := PoolName} = State,
|
||||
status := InitialStatus
|
||||
}} = emqx_resource:create_local(
|
||||
PoolName,
|
||||
ResourceId,
|
||||
?CONNECTOR_RESOURCE_GROUP,
|
||||
?MYSQL_RESOURCE_MOD,
|
||||
CheckedConfig,
|
||||
|
@ -83,53 +83,53 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
state := State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||
% % Perform query as further check that the resource is working as expected
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_no_params())),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_with_params())),
|
||||
?assertMatch(
|
||||
{ok, _, [[1]]},
|
||||
emqx_resource:query(
|
||||
PoolName,
|
||||
ResourceId,
|
||||
test_query_with_params_and_timeout()
|
||||
)
|
||||
),
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||
% as the worker no longer exists.
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := StoppedStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual(stopped, StoppedStatus),
|
||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)),
|
||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)),
|
||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||
% Can call stop/1 again on an already stopped instance
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||
% Make sure it can be restarted and the healthchecks and queries work properly
|
||||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
||||
?assertEqual(ok, emqx_resource:restart(ResourceId)),
|
||||
% async restart, need to wait resource
|
||||
timer:sleep(500),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_no_params())),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_with_params())),
|
||||
?assertMatch(
|
||||
{ok, _, [[1]]},
|
||||
emqx_resource:query(
|
||||
PoolName,
|
||||
ResourceId,
|
||||
test_query_with_params_and_timeout()
|
||||
)
|
||||
),
|
||||
% Stop and remove the resource in one go.
|
||||
?assertEqual(ok, emqx_resource:remove_local(PoolName)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
?assertEqual(ok, emqx_resource:remove_local(ResourceId)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
||||
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)).
|
||||
?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)).
|
||||
|
||||
% %%------------------------------------------------------------------------------
|
||||
% %% Helpers
|
||||
|
|
|
@ -64,15 +64,15 @@ t_lifecycle(_Config) ->
|
|||
pgsql_config()
|
||||
).
|
||||
|
||||
perform_lifecycle_check(PoolName, InitialConfig) ->
|
||||
perform_lifecycle_check(ResourceId, InitialConfig) ->
|
||||
{ok, #{config := CheckedConfig}} =
|
||||
emqx_resource:check_config(?PGSQL_RESOURCE_MOD, InitialConfig),
|
||||
{ok, #{
|
||||
state := #{poolname := ReturnedPoolName} = State,
|
||||
state := #{pool_name := PoolName} = State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:create_local(
|
||||
PoolName,
|
||||
ResourceId,
|
||||
?CONNECTOR_RESOURCE_GROUP,
|
||||
?PGSQL_RESOURCE_MOD,
|
||||
CheckedConfig,
|
||||
|
@ -84,39 +84,39 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
state := State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||
% % Perform query as further check that the resource is working as expected
|
||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())),
|
||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())),
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_no_params())),
|
||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_with_params())),
|
||||
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||
% as the worker no longer exists.
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := StoppedStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual(stopped, StoppedStatus),
|
||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)),
|
||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)),
|
||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||
% Can call stop/1 again on an already stopped instance
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||
% Make sure it can be restarted and the healthchecks and queries work properly
|
||||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
||||
?assertEqual(ok, emqx_resource:restart(ResourceId)),
|
||||
% async restart, need to wait resource
|
||||
timer:sleep(500),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())),
|
||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_no_params())),
|
||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_with_params())),
|
||||
% Stop and remove the resource in one go.
|
||||
?assertEqual(ok, emqx_resource:remove_local(PoolName)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
?assertEqual(ok, emqx_resource:remove_local(ResourceId)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
||||
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)).
|
||||
?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)).
|
||||
|
||||
% %%------------------------------------------------------------------------------
|
||||
% %% Helpers
|
||||
|
|
|
@ -102,14 +102,14 @@ t_sentinel_lifecycle(_Config) ->
|
|||
[<<"PING">>]
|
||||
).
|
||||
|
||||
perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) ->
|
||||
perform_lifecycle_check(ResourceId, InitialConfig, RedisCommand) ->
|
||||
{ok, #{config := CheckedConfig}} =
|
||||
emqx_resource:check_config(?REDIS_RESOURCE_MOD, InitialConfig),
|
||||
{ok, #{
|
||||
state := #{poolname := ReturnedPoolName} = State,
|
||||
state := #{pool_name := PoolName} = State,
|
||||
status := InitialStatus
|
||||
}} = emqx_resource:create_local(
|
||||
PoolName,
|
||||
ResourceId,
|
||||
?CONNECTOR_RESOURCE_GROUP,
|
||||
?REDIS_RESOURCE_MOD,
|
||||
CheckedConfig,
|
||||
|
@ -121,49 +121,49 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) ->
|
|||
state := State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||
% Perform query as further check that the resource is working as expected
|
||||
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})),
|
||||
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(ResourceId, {cmd, RedisCommand})),
|
||||
?assertEqual(
|
||||
{ok, [{ok, <<"PONG">>}, {ok, <<"PONG">>}]},
|
||||
emqx_resource:query(PoolName, {cmds, [RedisCommand, RedisCommand]})
|
||||
emqx_resource:query(ResourceId, {cmds, [RedisCommand, RedisCommand]})
|
||||
),
|
||||
?assertMatch(
|
||||
{error, {unrecoverable_error, [{ok, <<"PONG">>}, {error, _}]}},
|
||||
emqx_resource:query(
|
||||
PoolName,
|
||||
ResourceId,
|
||||
{cmds, [RedisCommand, [<<"INVALID_COMMAND">>]]},
|
||||
#{timeout => 500}
|
||||
)
|
||||
),
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||
% as the worker no longer exists.
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := StoppedStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual(stopped, StoppedStatus),
|
||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)),
|
||||
?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)),
|
||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||
% Can call stop/1 again on an already stopped instance
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
?assertEqual(ok, emqx_resource:stop(ResourceId)),
|
||||
% Make sure it can be restarted and the healthchecks and queries work properly
|
||||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
||||
?assertEqual(ok, emqx_resource:restart(ResourceId)),
|
||||
% async restart, need to wait resource
|
||||
timer:sleep(500),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
||||
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})),
|
||||
emqx_resource:get_instance(ResourceId),
|
||||
?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)),
|
||||
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(ResourceId, {cmd, RedisCommand})),
|
||||
% Stop and remove the resource in one go.
|
||||
?assertEqual(ok, emqx_resource:remove_local(PoolName)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
?assertEqual(ok, emqx_resource:remove_local(ResourceId)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)),
|
||||
% Should not even be able to get the resource data out of ets now unlike just stopping.
|
||||
?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)).
|
||||
?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)).
|
||||
|
||||
% %%------------------------------------------------------------------------------
|
||||
% %% Helpers
|
||||
|
|
|
@ -16,22 +16,13 @@
|
|||
|
||||
-module(emqx_dashboard).
|
||||
|
||||
-define(APP, ?MODULE).
|
||||
|
||||
-export([
|
||||
start_listeners/0,
|
||||
start_listeners/1,
|
||||
stop_listeners/1,
|
||||
stop_listeners/0,
|
||||
list_listeners/0
|
||||
]).
|
||||
|
||||
-export([
|
||||
init_i18n/2,
|
||||
init_i18n/0,
|
||||
get_i18n/0,
|
||||
i18n_file/0,
|
||||
clear_i18n/0
|
||||
list_listeners/0,
|
||||
wait_for_listeners/0
|
||||
]).
|
||||
|
||||
%% Authorization
|
||||
|
@ -90,30 +81,34 @@ start_listeners(Listeners) ->
|
|||
dispatch => Dispatch,
|
||||
middlewares => [?EMQX_MIDDLE, cowboy_router, cowboy_handler]
|
||||
},
|
||||
Res =
|
||||
{OkListeners, ErrListeners} =
|
||||
lists:foldl(
|
||||
fun({Name, Protocol, Bind, RanchOptions, ProtoOpts}, Acc) ->
|
||||
fun({Name, Protocol, Bind, RanchOptions, ProtoOpts}, {OkAcc, ErrAcc}) ->
|
||||
Minirest = BaseMinirest#{protocol => Protocol, protocol_options => ProtoOpts},
|
||||
case minirest:start(Name, RanchOptions, Minirest) of
|
||||
{ok, _} ->
|
||||
?ULOG("Listener ~ts on ~ts started.~n", [
|
||||
Name, emqx_listeners:format_bind(Bind)
|
||||
]),
|
||||
Acc;
|
||||
{[Name | OkAcc], ErrAcc};
|
||||
{error, _Reason} ->
|
||||
%% Don't record the reason because minirest already does(too much logs noise).
|
||||
[Name | Acc]
|
||||
{OkAcc, [Name | ErrAcc]}
|
||||
end
|
||||
end,
|
||||
[],
|
||||
{[], []},
|
||||
listeners(Listeners)
|
||||
),
|
||||
case Res of
|
||||
[] -> ok;
|
||||
_ -> {error, Res}
|
||||
case ErrListeners of
|
||||
[] ->
|
||||
optvar:set(emqx_dashboard_listeners_ready, OkListeners),
|
||||
ok;
|
||||
_ ->
|
||||
{error, ErrListeners}
|
||||
end.
|
||||
|
||||
stop_listeners(Listeners) ->
|
||||
optvar:unset(emqx_dashboard_listeners_ready),
|
||||
[
|
||||
begin
|
||||
case minirest:stop(Name) of
|
||||
|
@ -129,23 +124,8 @@ stop_listeners(Listeners) ->
|
|||
],
|
||||
ok.
|
||||
|
||||
get_i18n() ->
|
||||
application:get_env(emqx_dashboard, i18n).
|
||||
|
||||
init_i18n(File, Lang) when is_atom(Lang) ->
|
||||
init_i18n(File, atom_to_binary(Lang));
|
||||
init_i18n(File, Lang) when is_binary(Lang) ->
|
||||
Cache = hocon_schema:new_desc_cache(File),
|
||||
application:set_env(emqx_dashboard, i18n, #{lang => Lang, cache => Cache}).
|
||||
|
||||
clear_i18n() ->
|
||||
case application:get_env(emqx_dashboard, i18n) of
|
||||
{ok, #{cache := Cache}} ->
|
||||
hocon_schema:delete_desc_cache(Cache),
|
||||
application:unset_env(emqx_dashboard, i18n);
|
||||
undefined ->
|
||||
ok
|
||||
end.
|
||||
wait_for_listeners() ->
|
||||
optvar:read(emqx_dashboard_listeners_ready).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% internal
|
||||
|
@ -187,11 +167,6 @@ ip_port(error, Opts) -> {Opts#{port => 18083}, 18083};
|
|||
ip_port({Port, Opts}, _) when is_integer(Port) -> {Opts#{port => Port}, Port};
|
||||
ip_port({{IP, Port}, Opts}, _) -> {Opts#{port => Port, ip => IP}, {IP, Port}}.
|
||||
|
||||
init_i18n() ->
|
||||
File = i18n_file(),
|
||||
Lang = emqx_conf:get([dashboard, i18n_lang], en),
|
||||
init_i18n(File, Lang).
|
||||
|
||||
ranch_opts(Options) ->
|
||||
Keys = [
|
||||
handshake_timeout,
|
||||
|
@ -255,12 +230,6 @@ return_unauthorized(Code, Message) ->
|
|||
},
|
||||
#{code => Code, message => Message}}.
|
||||
|
||||
i18n_file() ->
|
||||
case application:get_env(emqx_dashboard, i18n_file) of
|
||||
undefined -> filename:join([code:priv_dir(emqx_dashboard), "i18n.conf"]);
|
||||
{ok, File} -> File
|
||||
end.
|
||||
|
||||
listeners() ->
|
||||
emqx_conf:get([dashboard, listeners], #{}).
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
-behaviour(minirest_api).
|
||||
|
||||
-include("emqx_dashboard.hrl").
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% @doc This module is used to cache the description of the configuration items.
|
||||
-module(emqx_dashboard_desc_cache).
|
||||
|
||||
-export([init/0]).
|
||||
|
||||
%% internal exports
|
||||
-export([load_desc/2, lookup/4, lookup/5]).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
%% @doc Global ETS table to cache the description of the configuration items.
|
||||
%% The table is owned by the emqx_dashboard_sup the root supervisor of emqx_dashboard.
|
||||
%% The cache is initialized with the default language (English) and
|
||||
%% all the desc.<lang>.hocon files in the www/static directory (extracted from dashboard package).
|
||||
init() ->
|
||||
ok = ensure_app_loaded(emqx_dashboard),
|
||||
PrivDir = code:priv_dir(emqx_dashboard),
|
||||
EngDesc = filename:join([PrivDir, "desc.en.hocon"]),
|
||||
WwwStaticDir = filename:join([PrivDir, "www", "static"]),
|
||||
OtherLangDesc0 = filelib:wildcard("desc.*.hocon", WwwStaticDir),
|
||||
OtherLangDesc = lists:map(fun(F) -> filename:join([WwwStaticDir, F]) end, OtherLangDesc0),
|
||||
Files = [EngDesc | OtherLangDesc],
|
||||
?MODULE = ets:new(?MODULE, [named_table, public, set, {read_concurrency, true}]),
|
||||
ok = lists:foreach(fun(F) -> load_desc(?MODULE, F) end, Files).
|
||||
|
||||
%% @doc Load the description of the configuration items from the file.
|
||||
%% Load is incremental, so it can be called multiple times.
|
||||
%% NOTE: no garbage collection is done, because stale entries are harmless.
|
||||
load_desc(EtsTab, File) ->
|
||||
?SLOG(info, #{msg => "loading desc", file => File}),
|
||||
{ok, Descs} = hocon:load(File),
|
||||
["desc", Lang, "hocon"] = string:tokens(filename:basename(File), "."),
|
||||
Insert = fun(Namespace, Id, Tag, Text) ->
|
||||
Key = {bin(Lang), bin(Namespace), bin(Id), bin(Tag)},
|
||||
true = ets:insert(EtsTab, {Key, bin(Text)}),
|
||||
ok
|
||||
end,
|
||||
walk_ns(Insert, maps:to_list(Descs)).
|
||||
|
||||
%% @doc Lookup the description of the configuration item from the global cache.
|
||||
lookup(Lang, Namespace, Id, Tag) ->
|
||||
lookup(?MODULE, Lang, Namespace, Id, Tag).
|
||||
|
||||
%% @doc Lookup the description of the configuration item from the given cache.
|
||||
lookup(EtsTab, Lang0, Namespace, Id, Tag) ->
|
||||
Lang = bin(Lang0),
|
||||
try ets:lookup(EtsTab, {Lang, bin(Namespace), bin(Id), bin(Tag)}) of
|
||||
[{_, Desc}] ->
|
||||
Desc;
|
||||
[] when Lang =/= <<"en">> ->
|
||||
%% fallback to English
|
||||
lookup(EtsTab, <<"en">>, Namespace, Id, Tag);
|
||||
_ ->
|
||||
%% undefined but not <<>>
|
||||
undefined
|
||||
catch
|
||||
error:badarg ->
|
||||
%% schema is not initialized
|
||||
%% most likely in test cases
|
||||
undefined
|
||||
end.
|
||||
|
||||
%% The desc files are of names like:
|
||||
%% desc.en.hocon or desc.zh.hocon
|
||||
%% And with content like:
|
||||
%% namespace.id.desc = "description"
|
||||
%% namespace.id.label = "label"
|
||||
walk_ns(_Insert, []) ->
|
||||
ok;
|
||||
walk_ns(Insert, [{Namespace, Ids} | Rest]) ->
|
||||
walk_id(Insert, Namespace, maps:to_list(Ids)),
|
||||
walk_ns(Insert, Rest).
|
||||
|
||||
walk_id(_Insert, _Namespace, []) ->
|
||||
ok;
|
||||
walk_id(Insert, Namespace, [{Id, Tags} | Rest]) ->
|
||||
walk_tag(Insert, Namespace, Id, maps:to_list(Tags)),
|
||||
walk_id(Insert, Namespace, Rest).
|
||||
|
||||
walk_tag(_Insert, _Namespace, _Id, []) ->
|
||||
ok;
|
||||
walk_tag(Insert, Namespace, Id, [{Tag, Text} | Rest]) ->
|
||||
ok = Insert(Namespace, Id, Tag, Text),
|
||||
walk_tag(Insert, Namespace, Id, Rest).
|
||||
|
||||
bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
|
||||
bin(B) when is_binary(B) -> B;
|
||||
bin(L) when is_list(L) -> list_to_binary(L).
|
||||
|
||||
ensure_app_loaded(App) ->
|
||||
case application:load(App) of
|
||||
ok -> ok;
|
||||
{error, {already_loaded, _}} -> ok
|
||||
end.
|
|
@ -15,9 +15,11 @@
|
|||
%%--------------------------------------------------------------------
|
||||
-module(emqx_dashboard_listener).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-behaviour(emqx_config_handler).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
%% API
|
||||
-export([add_handler/0, remove_handler/0]).
|
||||
-export([pre_config_update/3, post_config_update/5]).
|
||||
|
@ -54,12 +56,10 @@ init([]) ->
|
|||
{ok, undefined, {continue, regenerate_dispatch}}.
|
||||
|
||||
handle_continue(regenerate_dispatch, _State) ->
|
||||
NewState = regenerate_minirest_dispatch(),
|
||||
{noreply, NewState, hibernate}.
|
||||
%% initialize the swagger dispatches
|
||||
ready = regenerate_minirest_dispatch(),
|
||||
{noreply, ready, hibernate}.
|
||||
|
||||
handle_call(is_ready, _From, retry) ->
|
||||
NewState = regenerate_minirest_dispatch(),
|
||||
{reply, NewState, NewState, hibernate};
|
||||
handle_call(is_ready, _From, State) ->
|
||||
{reply, State, State, hibernate};
|
||||
handle_call(_Request, _From, State) ->
|
||||
|
@ -68,6 +68,9 @@ handle_call(_Request, _From, State) ->
|
|||
handle_cast(_Request, State) ->
|
||||
{noreply, State, hibernate}.
|
||||
|
||||
handle_info(i18n_lang_changed, _State) ->
|
||||
NewState = regenerate_minirest_dispatch(),
|
||||
{noreply, NewState, hibernate};
|
||||
handle_info({update_listeners, OldListeners, NewListeners}, _State) ->
|
||||
ok = emqx_dashboard:stop_listeners(OldListeners),
|
||||
ok = emqx_dashboard:start_listeners(NewListeners),
|
||||
|
@ -83,29 +86,26 @@ terminate(_Reason, _State) ->
|
|||
code_change(_OldVsn, State, _Extra) ->
|
||||
{ok, State}.
|
||||
|
||||
%% generate dispatch is very slow.
|
||||
%% generate dispatch is very slow, takes about 1s.
|
||||
regenerate_minirest_dispatch() ->
|
||||
try
|
||||
emqx_dashboard:init_i18n(),
|
||||
lists:foreach(
|
||||
fun(Listener) ->
|
||||
minirest:update_dispatch(element(1, Listener))
|
||||
end,
|
||||
emqx_dashboard:list_listeners()
|
||||
),
|
||||
ready
|
||||
catch
|
||||
T:E:S ->
|
||||
?SLOG(error, #{
|
||||
msg => "regenerate_minirest_dispatch_failed",
|
||||
reason => E,
|
||||
type => T,
|
||||
stacktrace => S
|
||||
%% optvar:read waits for the var to be set
|
||||
Names = emqx_dashboard:wait_for_listeners(),
|
||||
{Time, ok} = timer:tc(fun() -> do_regenerate_minirest_dispatch(Names) end),
|
||||
Lang = emqx:get_config([dashboard, i18n_lang]),
|
||||
?tp(info, regenerate_minirest_dispatch, #{
|
||||
elapsed => erlang:convert_time_unit(Time, microsecond, millisecond),
|
||||
listeners => Names,
|
||||
i18n_lang => Lang
|
||||
}),
|
||||
retry
|
||||
after
|
||||
emqx_dashboard:clear_i18n()
|
||||
end.
|
||||
ready.
|
||||
|
||||
do_regenerate_minirest_dispatch(Names) ->
|
||||
lists:foreach(
|
||||
fun(Name) ->
|
||||
ok = minirest:update_dispatch(Name)
|
||||
end,
|
||||
Names
|
||||
).
|
||||
|
||||
add_handler() ->
|
||||
Roots = emqx_dashboard_schema:roots(),
|
||||
|
@ -117,6 +117,12 @@ remove_handler() ->
|
|||
ok = emqx_config_handler:remove_handler(Roots),
|
||||
ok.
|
||||
|
||||
pre_config_update(_Path, {change_i18n_lang, NewLang}, RawConf) ->
|
||||
%% e.g. emqx_conf:update([dashboard], {change_i18n_lang, zh}, #{}).
|
||||
%% TODO: check if there is such a language (all languages are cached in emqx_dashboard_desc_cache)
|
||||
Update = #{<<"i18n_lang">> => NewLang},
|
||||
NewConf = emqx_utils_maps:deep_merge(RawConf, Update),
|
||||
{ok, NewConf};
|
||||
pre_config_update(_Path, UpdateConf0, RawConf) ->
|
||||
UpdateConf = remove_sensitive_data(UpdateConf0),
|
||||
NewConf = emqx_utils_maps:deep_merge(RawConf, UpdateConf),
|
||||
|
@ -139,6 +145,8 @@ remove_sensitive_data(Conf0) ->
|
|||
Conf1
|
||||
end.
|
||||
|
||||
post_config_update(_, {change_i18n_lang, _}, _NewConf, _OldConf, _AppEnvs) ->
|
||||
delay_job(i18n_lang_changed);
|
||||
post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) ->
|
||||
OldHttp = get_listener(http, OldConf),
|
||||
OldHttps = get_listener(https, OldConf),
|
||||
|
@ -148,7 +156,12 @@ post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) ->
|
|||
{StopHttps, StartHttps} = diff_listeners(https, OldHttps, NewHttps),
|
||||
Stop = maps:merge(StopHttp, StopHttps),
|
||||
Start = maps:merge(StartHttp, StartHttps),
|
||||
_ = erlang:send_after(500, ?MODULE, {update_listeners, Stop, Start}),
|
||||
delay_job({update_listeners, Stop, Start}).
|
||||
|
||||
%% in post_config_update, the config is not yet persisted to persistent_term
|
||||
%% so we need to delegate the listener update to the gen_server a bit later
|
||||
delay_job(Msg) ->
|
||||
_ = erlang:send_after(500, ?MODULE, Msg),
|
||||
ok.
|
||||
|
||||
get_listener(Type, Conf) ->
|
||||
|
|
|
@ -233,6 +233,8 @@ cors(required) -> false;
|
|||
cors(desc) -> ?DESC(cors);
|
||||
cors(_) -> undefined.
|
||||
|
||||
%% TODO: change it to string type
|
||||
%% It will be up to the dashboard package which languagues to support
|
||||
i18n_lang(type) -> ?ENUM([en, zh]);
|
||||
i18n_lang(default) -> en;
|
||||
i18n_lang('readOnly') -> true;
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% This module is for dashboard to retrieve the schema hot config and bridges.
|
||||
-module(emqx_dashboard_schema_api).
|
||||
|
||||
-behaviour(minirest_api).
|
||||
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
|
||||
%% minirest API
|
||||
-export([api_spec/0, paths/0, schema/1]).
|
||||
|
||||
-export([get_schema/2]).
|
||||
|
||||
-define(TAGS, [<<"dashboard">>]).
|
||||
-define(BAD_REQUEST, 'BAD_REQUEST').
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% minirest API and schema
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
api_spec() ->
|
||||
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
|
||||
|
||||
paths() ->
|
||||
["/schemas/:name"].
|
||||
|
||||
%% This is a rather hidden API, so we don't need to add translations for the description.
|
||||
schema("/schemas/:name") ->
|
||||
#{
|
||||
'operationId' => get_schema,
|
||||
get => #{
|
||||
parameters => [
|
||||
{name, hoconsc:mk(hoconsc:enum([hotconf, bridges]), #{in => path})},
|
||||
{lang,
|
||||
hoconsc:mk(typerefl:string(), #{
|
||||
in => query,
|
||||
default => <<"en">>,
|
||||
desc => <<"The language of the schema.">>
|
||||
})}
|
||||
],
|
||||
desc => <<
|
||||
"Get the schema JSON of the specified name. "
|
||||
"NOTE: you should never need to make use of this API "
|
||||
"unless you are building a multi-lang dashboaard."
|
||||
>>,
|
||||
tags => ?TAGS,
|
||||
security => [],
|
||||
responses => #{
|
||||
200 => hoconsc:mk(binary(), #{desc => <<"The JSON schema of the specified name.">>})
|
||||
}
|
||||
}
|
||||
}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API Handler funcs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
get_schema(get, #{
|
||||
bindings := #{name := Name},
|
||||
query_string := #{<<"lang">> := Lang}
|
||||
}) ->
|
||||
{200, gen_schema(Name, iolist_to_binary(Lang))};
|
||||
get_schema(get, _) ->
|
||||
{400, ?BAD_REQUEST, <<"unknown">>}.
|
||||
|
||||
gen_schema(hotconf, Lang) ->
|
||||
emqx_conf:hotconf_schema_json(Lang);
|
||||
gen_schema(bridges, Lang) ->
|
||||
emqx_conf:bridge_schema_json(Lang).
|
|
@ -28,6 +28,8 @@ start_link() ->
|
|||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||
|
||||
init([]) ->
|
||||
%% supervisor owns the cache table
|
||||
ok = emqx_dashboard_desc_cache:init(),
|
||||
{ok,
|
||||
{{one_for_one, 5, 100}, [
|
||||
?CHILD(emqx_dashboard_listener, brutal_kill),
|
||||
|
|
|
@ -84,7 +84,8 @@
|
|||
-type spec_opts() :: #{
|
||||
check_schema => boolean() | filter(),
|
||||
translate_body => boolean(),
|
||||
schema_converter => fun((hocon_schema:schema(), Module :: atom()) -> map())
|
||||
schema_converter => fun((hocon_schema:schema(), Module :: atom()) -> map()),
|
||||
i18n_lang => atom() | string() | binary()
|
||||
}.
|
||||
|
||||
-type route_path() :: string() | binary().
|
||||
|
@ -237,8 +238,16 @@ parse_spec_ref(Module, Path, Options) ->
|
|||
erlang:apply(Module, schema, [Path])
|
||||
%% better error message
|
||||
catch
|
||||
error:Reason ->
|
||||
throw({error, #{mfa => {Module, schema, [Path]}, reason => Reason}})
|
||||
error:Reason:Stacktrace ->
|
||||
%% raise a new error with the same stacktrace.
|
||||
%% it's a bug if this happens.
|
||||
%% i.e. if a path is listed in the spec but the module doesn't
|
||||
%% implement it or crashes when trying to build the schema.
|
||||
erlang:raise(
|
||||
error,
|
||||
#{mfa => {Module, schema, [Path]}, reason => Reason},
|
||||
Stacktrace
|
||||
)
|
||||
end,
|
||||
{Specs, Refs} = maps:fold(
|
||||
fun(Method, Meta, {Acc, RefsAcc}) ->
|
||||
|
@ -333,11 +342,11 @@ check_request_body(#{body := Body}, Spec, _Module, _CheckFun, false) when is_map
|
|||
|
||||
%% tags, description, summary, security, deprecated
|
||||
meta_to_spec(Meta, Module, Options) ->
|
||||
{Params, Refs1} = parameters(maps:get(parameters, Meta, []), Module),
|
||||
{Params, Refs1} = parameters(maps:get(parameters, Meta, []), Module, Options),
|
||||
{RequestBody, Refs2} = request_body(maps:get('requestBody', Meta, []), Module, Options),
|
||||
{Responses, Refs3} = responses(maps:get(responses, Meta, #{}), Module, Options),
|
||||
{
|
||||
generate_method_desc(to_spec(Meta, Params, RequestBody, Responses)),
|
||||
generate_method_desc(to_spec(Meta, Params, RequestBody, Responses), Options),
|
||||
lists:usort(Refs1 ++ Refs2 ++ Refs3)
|
||||
}.
|
||||
|
||||
|
@ -348,13 +357,13 @@ to_spec(Meta, Params, RequestBody, Responses) ->
|
|||
Spec = to_spec(Meta, Params, [], Responses),
|
||||
maps:put('requestBody', RequestBody, Spec).
|
||||
|
||||
generate_method_desc(Spec = #{desc := _Desc}) ->
|
||||
Spec1 = trans_description(maps:remove(desc, Spec), Spec),
|
||||
generate_method_desc(Spec = #{desc := _Desc}, Options) ->
|
||||
Spec1 = trans_description(maps:remove(desc, Spec), Spec, Options),
|
||||
trans_tags(Spec1);
|
||||
generate_method_desc(Spec = #{description := _Desc}) ->
|
||||
Spec1 = trans_description(Spec, Spec),
|
||||
generate_method_desc(Spec = #{description := _Desc}, Options) ->
|
||||
Spec1 = trans_description(Spec, Spec, Options),
|
||||
trans_tags(Spec1);
|
||||
generate_method_desc(Spec) ->
|
||||
generate_method_desc(Spec, _Options) ->
|
||||
trans_tags(Spec).
|
||||
|
||||
trans_tags(Spec = #{tags := Tags}) ->
|
||||
|
@ -362,7 +371,7 @@ trans_tags(Spec = #{tags := Tags}) ->
|
|||
trans_tags(Spec) ->
|
||||
Spec.
|
||||
|
||||
parameters(Params, Module) ->
|
||||
parameters(Params, Module, Options) ->
|
||||
{SpecList, AllRefs} =
|
||||
lists:foldl(
|
||||
fun(Param, {Acc, RefsAcc}) ->
|
||||
|
@ -388,7 +397,7 @@ parameters(Params, Module) ->
|
|||
Type
|
||||
),
|
||||
Spec1 = trans_required(Spec0, Required, In),
|
||||
Spec2 = trans_description(Spec1, Type),
|
||||
Spec2 = trans_description(Spec1, Type, Options),
|
||||
{[Spec2 | Acc], Refs ++ RefsAcc}
|
||||
end
|
||||
end,
|
||||
|
@ -432,38 +441,38 @@ trans_required(Spec, true, _) -> Spec#{required => true};
|
|||
trans_required(Spec, _, path) -> Spec#{required => true};
|
||||
trans_required(Spec, _, _) -> Spec.
|
||||
|
||||
trans_desc(Init, Hocon, Func, Name) ->
|
||||
Spec0 = trans_description(Init, Hocon),
|
||||
trans_desc(Init, Hocon, Func, Name, Options) ->
|
||||
Spec0 = trans_description(Init, Hocon, Options),
|
||||
case Func =:= fun hocon_schema_to_spec/2 of
|
||||
true ->
|
||||
Spec0;
|
||||
false ->
|
||||
Spec1 = trans_label(Spec0, Hocon, Name),
|
||||
Spec1 = trans_label(Spec0, Hocon, Name, Options),
|
||||
case Spec1 of
|
||||
#{description := _} -> Spec1;
|
||||
_ -> Spec1#{description => <<Name/binary, " Description">>}
|
||||
end
|
||||
end.
|
||||
|
||||
trans_description(Spec, Hocon) ->
|
||||
trans_description(Spec, Hocon, Options) ->
|
||||
Desc =
|
||||
case desc_struct(Hocon) of
|
||||
undefined -> undefined;
|
||||
?DESC(_, _) = Struct -> get_i18n(<<"desc">>, Struct, undefined);
|
||||
Struct -> to_bin(Struct)
|
||||
?DESC(_, _) = Struct -> get_i18n(<<"desc">>, Struct, undefined, Options);
|
||||
Text -> to_bin(Text)
|
||||
end,
|
||||
case Desc of
|
||||
undefined ->
|
||||
Spec;
|
||||
Desc ->
|
||||
Desc1 = binary:replace(Desc, [<<"\n">>], <<"<br/>">>, [global]),
|
||||
maybe_add_summary_from_label(Spec#{description => Desc1}, Hocon)
|
||||
maybe_add_summary_from_label(Spec#{description => Desc1}, Hocon, Options)
|
||||
end.
|
||||
|
||||
maybe_add_summary_from_label(Spec, Hocon) ->
|
||||
maybe_add_summary_from_label(Spec, Hocon, Options) ->
|
||||
Label =
|
||||
case desc_struct(Hocon) of
|
||||
?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, undefined);
|
||||
?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, undefined, Options);
|
||||
_ -> undefined
|
||||
end,
|
||||
case Label of
|
||||
|
@ -471,20 +480,30 @@ maybe_add_summary_from_label(Spec, Hocon) ->
|
|||
_ -> Spec#{summary => Label}
|
||||
end.
|
||||
|
||||
get_i18n(Key, Struct, Default) ->
|
||||
{ok, #{cache := Cache, lang := Lang}} = emqx_dashboard:get_i18n(),
|
||||
Desc = hocon_schema:resolve_schema(Struct, Cache),
|
||||
emqx_utils_maps:deep_get([Key, Lang], Desc, Default).
|
||||
get_i18n(Tag, ?DESC(Namespace, Id), Default, Options) ->
|
||||
Lang = get_lang(Options),
|
||||
case emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, Tag) of
|
||||
undefined ->
|
||||
Default;
|
||||
Text ->
|
||||
Text
|
||||
end.
|
||||
|
||||
trans_label(Spec, Hocon, Default) ->
|
||||
%% So far i18n_lang in options is only used at build time.
|
||||
%% At runtime, it's still the global config which controls the language.
|
||||
get_lang(#{i18n_lang := Lang}) -> Lang;
|
||||
get_lang(_) -> emqx:get_config([dashboard, i18n_lang]).
|
||||
|
||||
trans_label(Spec, Hocon, Default, Options) ->
|
||||
Label =
|
||||
case desc_struct(Hocon) of
|
||||
?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, Default);
|
||||
?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, Default, Options);
|
||||
_ -> Default
|
||||
end,
|
||||
Spec#{label => Label}.
|
||||
|
||||
desc_struct(Hocon) ->
|
||||
R =
|
||||
case hocon_schema:field_schema(Hocon, desc) of
|
||||
undefined ->
|
||||
case hocon_schema:field_schema(Hocon, description) of
|
||||
|
@ -493,7 +512,12 @@ desc_struct(Hocon) ->
|
|||
end;
|
||||
Struct ->
|
||||
Struct
|
||||
end.
|
||||
end,
|
||||
ensure_bin(R).
|
||||
|
||||
ensure_bin(undefined) -> undefined;
|
||||
ensure_bin(?DESC(_Namespace, _Id) = Desc) -> Desc;
|
||||
ensure_bin(Text) -> to_bin(Text).
|
||||
|
||||
get_ref_desc(?R_REF(Mod, Name)) ->
|
||||
case erlang:function_exported(Mod, desc, 1) of
|
||||
|
@ -524,7 +548,7 @@ responses(Responses, Module, Options) ->
|
|||
{Spec, Refs}.
|
||||
|
||||
response(Status, ?DESC(_Mod, _Id) = Schema, {Acc, RefsAcc, Module, Options}) ->
|
||||
Desc = trans_description(#{}, #{desc => Schema}),
|
||||
Desc = trans_description(#{}, #{desc => Schema}, Options),
|
||||
{Acc#{integer_to_binary(Status) => Desc}, RefsAcc, Module, Options};
|
||||
response(Status, Bin, {Acc, RefsAcc, Module, Options}) when is_binary(Bin) ->
|
||||
{Acc#{integer_to_binary(Status) => #{description => Bin}}, RefsAcc, Module, Options};
|
||||
|
@ -553,7 +577,7 @@ response(Status, Schema, {Acc, RefsAcc, Module, Options}) ->
|
|||
Hocon = hocon_schema:field_schema(Schema, type),
|
||||
Examples = hocon_schema:field_schema(Schema, examples),
|
||||
{Spec, Refs} = hocon_schema_to_spec(Hocon, Module),
|
||||
Init = trans_description(#{}, Schema),
|
||||
Init = trans_description(#{}, Schema, Options),
|
||||
Content = content(Spec, Examples),
|
||||
{
|
||||
Acc#{integer_to_binary(Status) => Init#{<<"content">> => Content}},
|
||||
|
@ -563,7 +587,7 @@ response(Status, Schema, {Acc, RefsAcc, Module, Options}) ->
|
|||
};
|
||||
false ->
|
||||
{Props, Refs} = parse_object(Schema, Module, Options),
|
||||
Init = trans_description(#{}, Schema),
|
||||
Init = trans_description(#{}, Schema, Options),
|
||||
Content = Init#{<<"content">> => content(Props)},
|
||||
{Acc#{integer_to_binary(Status) => Content}, Refs ++ RefsAcc, Module, Options}
|
||||
end.
|
||||
|
@ -590,7 +614,7 @@ components(Options, [{Module, Field} | Refs], SpecAcc, SubRefsAcc) ->
|
|||
%% parameters in ref only have one value, not array
|
||||
components(Options, [{Module, Field, parameter} | Refs], SpecAcc, SubRefsAcc) ->
|
||||
Props = hocon_schema_fields(Module, Field),
|
||||
{[Param], SubRefs} = parameters(Props, Module),
|
||||
{[Param], SubRefs} = parameters(Props, Module, Options),
|
||||
Namespace = namespace(Module),
|
||||
NewSpecAcc = SpecAcc#{?TO_REF(Namespace, Field) => Param},
|
||||
components(Options, Refs, NewSpecAcc, SubRefs ++ SubRefsAcc).
|
||||
|
@ -869,7 +893,7 @@ parse_object_loop([{Name, Hocon} | Rest], Module, Options, Props, Required, Refs
|
|||
HoconType = hocon_schema:field_schema(Hocon, type),
|
||||
Init0 = init_prop([default | ?DEFAULT_FIELDS], #{}, Hocon),
|
||||
SchemaToSpec = schema_converter(Options),
|
||||
Init = trans_desc(Init0, Hocon, SchemaToSpec, NameBin),
|
||||
Init = trans_desc(Init0, Hocon, SchemaToSpec, NameBin, Options),
|
||||
{Prop, Refs1} = SchemaToSpec(HoconType, Module),
|
||||
NewRequiredAcc =
|
||||
case is_required(Hocon) of
|
||||
|
|
|
@ -57,7 +57,7 @@ t_look_up_code(_) ->
|
|||
|
||||
t_description_code(_) ->
|
||||
{error, not_found} = emqx_dashboard_error_code:description('_____NOT_EXIST_NAME'),
|
||||
{ok, <<"Request parameters are not legal">>} =
|
||||
{ok, <<"Request parameters are invalid">>} =
|
||||
emqx_dashboard_error_code:description('BAD_REQUEST'),
|
||||
ok.
|
||||
|
||||
|
@ -79,7 +79,7 @@ t_api_code(_) ->
|
|||
Url = ?SERVER ++ "/error_codes/BAD_REQUEST",
|
||||
{ok, #{
|
||||
<<"code">> := <<"BAD_REQUEST">>,
|
||||
<<"description">> := <<"Request parameters are not legal">>
|
||||
<<"description">> := <<"Request parameters are invalid">>
|
||||
}} = request(Url),
|
||||
ok.
|
||||
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
-module(emqx_dashboard_listener_SUITE).
|
||||
|
||||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
||||
ok = change_i18n_lang(en),
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
ok = change_i18n_lang(en),
|
||||
emqx_mgmt_api_test_util:end_suite([emqx_conf]).
|
||||
|
||||
t_change_i18n_lang(_Config) ->
|
||||
?check_trace(
|
||||
begin
|
||||
ok = change_i18n_lang(zh),
|
||||
{ok, _} = ?block_until(#{?snk_kind := regenerate_minirest_dispatch}, 10_000),
|
||||
ok
|
||||
end,
|
||||
fun(ok, Trace) ->
|
||||
?assertMatch([#{i18n_lang := zh}], ?of_kind(regenerate_minirest_dispatch, Trace))
|
||||
end
|
||||
),
|
||||
ok.
|
||||
|
||||
change_i18n_lang(Lang) ->
|
||||
{ok, _} = emqx_conf:update([dashboard], {change_i18n_lang, Lang}, #{}),
|
||||
ok.
|
|
@ -64,7 +64,6 @@ groups() ->
|
|||
|
||||
init_per_suite(Config) ->
|
||||
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
||||
emqx_dashboard:init_i18n(),
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
|
|
|
@ -33,7 +33,6 @@ init_per_suite(Config) ->
|
|||
mria:start(),
|
||||
application:load(emqx_dashboard),
|
||||
emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1),
|
||||
emqx_dashboard:init_i18n(),
|
||||
Config.
|
||||
|
||||
set_special_configs(emqx_dashboard) ->
|
||||
|
@ -308,8 +307,11 @@ t_nest_ref(_Config) ->
|
|||
|
||||
t_none_ref(_Config) ->
|
||||
Path = "/ref/none",
|
||||
?assertThrow(
|
||||
{error, #{mfa := {?MODULE, schema, [Path]}}},
|
||||
?assertError(
|
||||
#{
|
||||
mfa := {?MODULE, schema, [Path]},
|
||||
reason := function_clause
|
||||
},
|
||||
emqx_dashboard_swagger:parse_spec_ref(?MODULE, Path, #{})
|
||||
),
|
||||
ok.
|
||||
|
|
|
@ -33,7 +33,6 @@ all() -> emqx_common_test_helpers:all(?MODULE).
|
|||
|
||||
init_per_suite(Config) ->
|
||||
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
||||
emqx_dashboard:init_i18n(),
|
||||
Config.
|
||||
|
||||
end_per_suite(Config) ->
|
||||
|
@ -278,11 +277,11 @@ t_bad_ref(_Config) ->
|
|||
|
||||
t_none_ref(_Config) ->
|
||||
Path = "/ref/none",
|
||||
?assertThrow(
|
||||
{error, #{
|
||||
?assertError(
|
||||
#{
|
||||
mfa := {?MODULE, schema, ["/ref/none"]},
|
||||
reason := function_clause
|
||||
}},
|
||||
},
|
||||
validate(Path, #{}, [])
|
||||
),
|
||||
ok.
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
namespace() -> exhook.
|
||||
|
||||
roots() ->
|
||||
[{exhook, ?HOCON(?R_REF(exhook), #{importance => ?IMPORTANCE_HIDDEN})}].
|
||||
[{exhook, ?HOCON(?R_REF(exhook), #{importance => ?IMPORTANCE_LOW})}].
|
||||
|
||||
fields(exhook) ->
|
||||
[
|
||||
|
|
|
@ -112,8 +112,8 @@
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
list_nodes() ->
|
||||
Running = mria:cluster_nodes(running),
|
||||
Stopped = mria:cluster_nodes(stopped),
|
||||
Running = emqx:cluster_nodes(running),
|
||||
Stopped = emqx:cluster_nodes(stopped),
|
||||
DownNodes = lists:map(fun stopped_node_info/1, Stopped),
|
||||
[{Node, Info} || #{node := Node} = Info <- node_info(Running)] ++ DownNodes.
|
||||
|
||||
|
@ -199,7 +199,7 @@ vm_stats() ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
list_brokers() ->
|
||||
Running = mria:running_nodes(),
|
||||
Running = emqx:running_nodes(),
|
||||
[{Node, Broker} || #{node := Node} = Broker <- broker_info(Running)].
|
||||
|
||||
lookup_broker(Node) ->
|
||||
|
@ -223,7 +223,7 @@ broker_info(Nodes) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
get_metrics() ->
|
||||
nodes_info_count([get_metrics(Node) || Node <- mria:running_nodes()]).
|
||||
nodes_info_count([get_metrics(Node) || Node <- emqx:running_nodes()]).
|
||||
|
||||
get_metrics(Node) ->
|
||||
unwrap_rpc(emqx_proto_v1:get_metrics(Node)).
|
||||
|
@ -238,13 +238,20 @@ get_stats() ->
|
|||
'subscriptions.shared.count',
|
||||
'subscriptions.shared.max'
|
||||
],
|
||||
CountStats = nodes_info_count([
|
||||
begin
|
||||
Stats = get_stats(Node),
|
||||
delete_keys(Stats, GlobalStatsKeys)
|
||||
CountStats = nodes_info_count(
|
||||
lists:foldl(
|
||||
fun(Node, Acc) ->
|
||||
case get_stats(Node) of
|
||||
{error, _} ->
|
||||
Acc;
|
||||
Stats ->
|
||||
[delete_keys(Stats, GlobalStatsKeys) | Acc]
|
||||
end
|
||||
|| Node <- mria:running_nodes()
|
||||
]),
|
||||
end,
|
||||
[],
|
||||
emqx:running_nodes()
|
||||
)
|
||||
),
|
||||
GlobalStats = maps:with(GlobalStatsKeys, maps:from_list(get_stats(node()))),
|
||||
maps:merge(CountStats, GlobalStats).
|
||||
|
||||
|
@ -275,12 +282,12 @@ nodes_info_count(PropList) ->
|
|||
lookup_client({clientid, ClientId}, FormatFun) ->
|
||||
lists:append([
|
||||
lookup_client(Node, {clientid, ClientId}, FormatFun)
|
||||
|| Node <- mria:running_nodes()
|
||||
|| Node <- emqx:running_nodes()
|
||||
]);
|
||||
lookup_client({username, Username}, FormatFun) ->
|
||||
lists:append([
|
||||
lookup_client(Node, {username, Username}, FormatFun)
|
||||
|| Node <- mria:running_nodes()
|
||||
|| Node <- emqx:running_nodes()
|
||||
]).
|
||||
|
||||
lookup_client(Node, Key, FormatFun) ->
|
||||
|
@ -307,7 +314,7 @@ kickout_client(ClientId) ->
|
|||
[] ->
|
||||
{error, not_found};
|
||||
_ ->
|
||||
Results = [kickout_client(Node, ClientId) || Node <- mria:running_nodes()],
|
||||
Results = [kickout_client(Node, ClientId) || Node <- emqx:running_nodes()],
|
||||
check_results(Results)
|
||||
end.
|
||||
|
||||
|
@ -322,7 +329,7 @@ list_client_subscriptions(ClientId) ->
|
|||
[] ->
|
||||
{error, not_found};
|
||||
_ ->
|
||||
Results = [client_subscriptions(Node, ClientId) || Node <- mria:running_nodes()],
|
||||
Results = [client_subscriptions(Node, ClientId) || Node <- emqx:running_nodes()],
|
||||
Filter =
|
||||
fun
|
||||
({error, _}) ->
|
||||
|
@ -340,18 +347,18 @@ client_subscriptions(Node, ClientId) ->
|
|||
{Node, unwrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId))}.
|
||||
|
||||
clean_authz_cache(ClientId) ->
|
||||
Results = [clean_authz_cache(Node, ClientId) || Node <- mria:running_nodes()],
|
||||
Results = [clean_authz_cache(Node, ClientId) || Node <- emqx:running_nodes()],
|
||||
check_results(Results).
|
||||
|
||||
clean_authz_cache(Node, ClientId) ->
|
||||
unwrap_rpc(emqx_proto_v1:clean_authz_cache(Node, ClientId)).
|
||||
|
||||
clean_authz_cache_all() ->
|
||||
Results = [{Node, clean_authz_cache_all(Node)} || Node <- mria:running_nodes()],
|
||||
Results = [{Node, clean_authz_cache_all(Node)} || Node <- emqx:running_nodes()],
|
||||
wrap_results(Results).
|
||||
|
||||
clean_pem_cache_all() ->
|
||||
Results = [{Node, clean_pem_cache_all(Node)} || Node <- mria:running_nodes()],
|
||||
Results = [{Node, clean_pem_cache_all(Node)} || Node <- emqx:running_nodes()],
|
||||
wrap_results(Results).
|
||||
|
||||
wrap_results(Results) ->
|
||||
|
@ -379,7 +386,7 @@ set_keepalive(_ClientId, _Interval) ->
|
|||
|
||||
%% @private
|
||||
call_client(ClientId, Req) ->
|
||||
Results = [call_client(Node, ClientId, Req) || Node <- mria:running_nodes()],
|
||||
Results = [call_client(Node, ClientId, Req) || Node <- emqx:running_nodes()],
|
||||
Expected = lists:filter(
|
||||
fun
|
||||
({error, _}) -> false;
|
||||
|
@ -428,7 +435,7 @@ list_subscriptions(Node) ->
|
|||
list_subscriptions_via_topic(Topic, FormatFun) ->
|
||||
lists:append([
|
||||
list_subscriptions_via_topic(Node, Topic, FormatFun)
|
||||
|| Node <- mria:running_nodes()
|
||||
|| Node <- emqx:running_nodes()
|
||||
]).
|
||||
|
||||
list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) ->
|
||||
|
@ -442,7 +449,7 @@ list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
subscribe(ClientId, TopicTables) ->
|
||||
subscribe(mria:running_nodes(), ClientId, TopicTables).
|
||||
subscribe(emqx:running_nodes(), ClientId, TopicTables).
|
||||
|
||||
subscribe([Node | Nodes], ClientId, TopicTables) ->
|
||||
case unwrap_rpc(emqx_management_proto_v3:subscribe(Node, ClientId, TopicTables)) of
|
||||
|
@ -467,7 +474,7 @@ publish(Msg) ->
|
|||
-spec unsubscribe(emqx_types:clientid(), emqx_types:topic()) ->
|
||||
{unsubscribe, _} | {error, channel_not_found}.
|
||||
unsubscribe(ClientId, Topic) ->
|
||||
unsubscribe(mria:running_nodes(), ClientId, Topic).
|
||||
unsubscribe(emqx:running_nodes(), ClientId, Topic).
|
||||
|
||||
-spec unsubscribe([node()], emqx_types:clientid(), emqx_types:topic()) ->
|
||||
{unsubscribe, _} | {error, channel_not_found}.
|
||||
|
@ -490,7 +497,7 @@ do_unsubscribe(ClientId, Topic) ->
|
|||
-spec unsubscribe_batch(emqx_types:clientid(), [emqx_types:topic()]) ->
|
||||
{unsubscribe, _} | {error, channel_not_found}.
|
||||
unsubscribe_batch(ClientId, Topics) ->
|
||||
unsubscribe_batch(mria:running_nodes(), ClientId, Topics).
|
||||
unsubscribe_batch(emqx:running_nodes(), ClientId, Topics).
|
||||
|
||||
-spec unsubscribe_batch([node()], emqx_types:clientid(), [emqx_types:topic()]) ->
|
||||
{unsubscribe_batch, _} | {error, channel_not_found}.
|
||||
|
@ -515,7 +522,7 @@ do_unsubscribe_batch(ClientId, Topics) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
get_alarms(Type) ->
|
||||
[{Node, get_alarms(Node, Type)} || Node <- mria:running_nodes()].
|
||||
[{Node, get_alarms(Node, Type)} || Node <- emqx:running_nodes()].
|
||||
|
||||
get_alarms(Node, Type) ->
|
||||
add_duration_field(unwrap_rpc(emqx_proto_v1:get_alarms(Node, Type))).
|
||||
|
@ -524,7 +531,7 @@ deactivate(Node, Name) ->
|
|||
unwrap_rpc(emqx_proto_v1:deactivate_alarm(Node, Name)).
|
||||
|
||||
delete_all_deactivated_alarms() ->
|
||||
[delete_all_deactivated_alarms(Node) || Node <- mria:running_nodes()].
|
||||
[delete_all_deactivated_alarms(Node) || Node <- emqx:running_nodes()].
|
||||
|
||||
delete_all_deactivated_alarms(Node) ->
|
||||
unwrap_rpc(emqx_proto_v1:delete_all_deactivated_alarms(Node)).
|
||||
|
|
|
@ -163,7 +163,7 @@ cluster_query(Tab, QString, QSchema, MsFun, FmtFun) ->
|
|||
{error, page_limit_invalid};
|
||||
Meta ->
|
||||
{_CodCnt, NQString} = parse_qstring(QString, QSchema),
|
||||
Nodes = mria:running_nodes(),
|
||||
Nodes = emqx:running_nodes(),
|
||||
ResultAcc = init_query_result(),
|
||||
QueryState = init_query_state(Tab, NQString, MsFun, Meta),
|
||||
NResultAcc = do_cluster_query(
|
||||
|
|
|
@ -101,7 +101,7 @@ cluster_info(get, _) ->
|
|||
ClusterName = application:get_env(ekka, cluster_name, emqxcl),
|
||||
Info = #{
|
||||
name => ClusterName,
|
||||
nodes => mria:running_nodes(),
|
||||
nodes => emqx:running_nodes(),
|
||||
self => node()
|
||||
},
|
||||
{200, Info}.
|
||||
|
|
|
@ -42,8 +42,6 @@
|
|||
<<"alarm">>,
|
||||
<<"sys_topics">>,
|
||||
<<"sysmon">>,
|
||||
<<"limiter">>,
|
||||
<<"trace">>,
|
||||
<<"log">>,
|
||||
<<"persistent_session_store">>,
|
||||
<<"zones">>
|
||||
|
@ -260,7 +258,7 @@ configs(get, Params, _Req) ->
|
|||
QS = maps:get(query_string, Params, #{}),
|
||||
Node = maps:get(<<"node">>, QS, node()),
|
||||
case
|
||||
lists:member(Node, mria:running_nodes()) andalso
|
||||
lists:member(Node, emqx:running_nodes()) andalso
|
||||
emqx_management_proto_v2:get_full_config(Node)
|
||||
of
|
||||
false ->
|
||||
|
|
|
@ -483,7 +483,7 @@ err_msg_str(Reason) ->
|
|||
io_lib:format("~p", [Reason]).
|
||||
|
||||
list_listeners() ->
|
||||
[list_listeners(Node) || Node <- mria:running_nodes()].
|
||||
[list_listeners(Node) || Node <- emqx:running_nodes()].
|
||||
|
||||
list_listeners(Node) ->
|
||||
wrap_rpc(emqx_management_proto_v2:list_listeners(Node)).
|
||||
|
|
|
@ -59,7 +59,7 @@ metrics(get, #{query_string := Qs}) ->
|
|||
maps:from_list(
|
||||
emqx_mgmt:get_metrics(Node) ++ [{node, Node}]
|
||||
)
|
||||
|| Node <- mria:running_nodes()
|
||||
|| Node <- emqx:running_nodes()
|
||||
],
|
||||
{200, Data}
|
||||
end.
|
||||
|
|
|
@ -127,21 +127,21 @@ list(get, #{query_string := Qs}) ->
|
|||
true ->
|
||||
{200, emqx_mgmt:get_stats()};
|
||||
_ ->
|
||||
Data = [
|
||||
maps:from_list(emqx_mgmt:get_stats(Node) ++ [{node, Node}])
|
||||
|| Node <- running_nodes()
|
||||
],
|
||||
Data = lists:foldl(
|
||||
fun(Node, Acc) ->
|
||||
case emqx_mgmt:get_stats(Node) of
|
||||
{error, _Err} ->
|
||||
Acc;
|
||||
Stats when is_list(Stats) ->
|
||||
Data = maps:from_list([{node, Node} | Stats]),
|
||||
[Data | Acc]
|
||||
end
|
||||
end,
|
||||
[],
|
||||
emqx:running_nodes()
|
||||
),
|
||||
{200, Data}
|
||||
end.
|
||||
|
||||
%%%==============================================================================================
|
||||
%% Internal
|
||||
|
||||
running_nodes() ->
|
||||
Nodes = erlang:nodes([visible, this]),
|
||||
RpcResults = emqx_proto_v2:are_running(Nodes),
|
||||
[
|
||||
Node
|
||||
|| {Node, IsRunning} <- lists:zip(Nodes, RpcResults),
|
||||
IsRunning =:= {ok, true}
|
||||
].
|
||||
|
|
|
@ -390,7 +390,7 @@ trace(get, _Params) ->
|
|||
fun(#{start_at := A}, #{start_at := B}) -> A > B end,
|
||||
emqx_trace:format(List0)
|
||||
),
|
||||
Nodes = mria:running_nodes(),
|
||||
Nodes = emqx:running_nodes(),
|
||||
TraceSize = wrap_rpc(emqx_mgmt_trace_proto_v2:get_trace_size(Nodes)),
|
||||
AllFileSize = lists:foldl(fun(F, Acc) -> maps:merge(Acc, F) end, #{}, TraceSize),
|
||||
Now = erlang:system_time(second),
|
||||
|
@ -464,7 +464,7 @@ format_trace(Trace0) ->
|
|||
LogSize = lists:foldl(
|
||||
fun(Node, Acc) -> Acc#{Node => 0} end,
|
||||
#{},
|
||||
mria:running_nodes()
|
||||
emqx:running_nodes()
|
||||
),
|
||||
Trace2 = maps:without([enable, filter], Trace1),
|
||||
Trace2#{
|
||||
|
@ -560,13 +560,13 @@ group_trace_file(ZipDir, TraceLog, TraceFiles) ->
|
|||
).
|
||||
|
||||
collect_trace_file(undefined, TraceLog) ->
|
||||
Nodes = mria:running_nodes(),
|
||||
Nodes = emqx:running_nodes(),
|
||||
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog));
|
||||
collect_trace_file(Node, TraceLog) ->
|
||||
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file([Node], TraceLog)).
|
||||
|
||||
collect_trace_file_detail(TraceLog) ->
|
||||
Nodes = mria:running_nodes(),
|
||||
Nodes = emqx:running_nodes(),
|
||||
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file_detail(Nodes, TraceLog)).
|
||||
|
||||
wrap_rpc({GoodRes, BadNodes}) ->
|
||||
|
@ -696,7 +696,7 @@ parse_node(Query, Default) ->
|
|||
{ok, Default};
|
||||
{ok, NodeBin} ->
|
||||
Node = binary_to_existing_atom(NodeBin),
|
||||
true = lists:member(Node, mria:running_nodes()),
|
||||
true = lists:member(Node, emqx:running_nodes()),
|
||||
{ok, Node}
|
||||
end
|
||||
catch
|
||||
|
|
|
@ -36,16 +36,16 @@ end_per_suite(_) ->
|
|||
emqx_mgmt_api_test_util:end_suite([emqx_management, emqx_conf]).
|
||||
|
||||
init_per_testcase(TestCase, Config) ->
|
||||
meck:expect(mria, running_nodes, 0, [node()]),
|
||||
meck:expect(emqx, running_nodes, 0, [node()]),
|
||||
emqx_common_test_helpers:init_per_testcase(?MODULE, TestCase, Config).
|
||||
|
||||
end_per_testcase(TestCase, Config) ->
|
||||
meck:unload(mria),
|
||||
meck:unload(emqx),
|
||||
emqx_common_test_helpers:end_per_testcase(?MODULE, TestCase, Config).
|
||||
|
||||
t_list_nodes(init, Config) ->
|
||||
meck:expect(
|
||||
mria,
|
||||
emqx,
|
||||
cluster_nodes,
|
||||
fun
|
||||
(running) -> [node()];
|
||||
|
@ -125,7 +125,7 @@ t_lookup_client(_Config) ->
|
|||
emqx_mgmt:lookup_client({username, <<"user1">>}, ?FORMATFUN)
|
||||
),
|
||||
?assertEqual([], emqx_mgmt:lookup_client({clientid, <<"notfound">>}, ?FORMATFUN)),
|
||||
meck:expect(mria, running_nodes, 0, [node(), 'fake@nonode']),
|
||||
meck:expect(emqx, running_nodes, 0, [node(), 'fake@nonode']),
|
||||
?assertMatch(
|
||||
[_ | {error, nodedown}], emqx_mgmt:lookup_client({clientid, <<"client1">>}, ?FORMATFUN)
|
||||
).
|
||||
|
@ -188,7 +188,7 @@ t_clean_cache(_Config) ->
|
|||
{error, _},
|
||||
emqx_mgmt:clean_pem_cache_all()
|
||||
),
|
||||
meck:expect(mria, running_nodes, 0, [node(), 'fake@nonode']),
|
||||
meck:expect(emqx, running_nodes, 0, [node(), 'fake@nonode']),
|
||||
?assertMatch(
|
||||
{error, [{'fake@nonode', {error, _}}]},
|
||||
emqx_mgmt:clean_authz_cache_all()
|
||||
|
|
|
@ -179,14 +179,14 @@ t_bad_rpc(_) ->
|
|||
ClientLs1 = [start_emqtt_client(node(), I, 1883) || I <- lists:seq(1, 10)],
|
||||
Path = emqx_mgmt_api_test_util:api_path(["clients?limit=2&page=2"]),
|
||||
try
|
||||
meck:expect(mria, running_nodes, 0, ['fake@nohost']),
|
||||
meck:expect(emqx, running_nodes, 0, ['fake@nohost']),
|
||||
{error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path),
|
||||
%% good cop, bad cop
|
||||
meck:expect(mria, running_nodes, 0, [node(), 'fake@nohost']),
|
||||
meck:expect(emqx, running_nodes, 0, [node(), 'fake@nohost']),
|
||||
{error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path)
|
||||
after
|
||||
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1),
|
||||
meck:unload(mria),
|
||||
meck:unload(emqx),
|
||||
emqx_mgmt_api_test_util:end_suite()
|
||||
end.
|
||||
|
||||
|
|
|
@ -246,7 +246,7 @@ t_dashboard(_Config) ->
|
|||
|
||||
t_configs_node({'init', Config}) ->
|
||||
Node = node(),
|
||||
meck:expect(mria, running_nodes, fun() -> [Node, bad_node, other_node] end),
|
||||
meck:expect(emqx, running_nodes, fun() -> [Node, bad_node, other_node] end),
|
||||
meck:expect(
|
||||
emqx_management_proto_v2,
|
||||
get_full_config,
|
||||
|
@ -258,7 +258,7 @@ t_configs_node({'init', Config}) ->
|
|||
),
|
||||
Config;
|
||||
t_configs_node({'end', _}) ->
|
||||
meck:unload([mria, emqx_management_proto_v2]);
|
||||
meck:unload([emqx, emqx_management_proto_v2]);
|
||||
t_configs_node(_) ->
|
||||
Node = atom_to_list(node()),
|
||||
|
||||
|
|
|
@ -168,8 +168,8 @@ t_api_listeners_list_not_ready(Config) when is_list(Config) ->
|
|||
L3 = get_tcp_listeners(Node2),
|
||||
|
||||
Comment = #{
|
||||
node1 => rpc:call(Node1, mria, running_nodes, []),
|
||||
node2 => rpc:call(Node2, mria, running_nodes, [])
|
||||
node1 => rpc:call(Node1, emqx, running_nodes, []),
|
||||
node2 => rpc:call(Node2, emqx, running_nodes, [])
|
||||
},
|
||||
|
||||
?assert(length(L1) > length(L2), Comment),
|
||||
|
|
|
@ -24,10 +24,12 @@ all() ->
|
|||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
meck:expect(emqx, running_nodes, 0, [node(), 'fake@node']),
|
||||
emqx_mgmt_api_test_util:init_suite(),
|
||||
Config.
|
||||
|
||||
end_per_suite(_) ->
|
||||
meck:unload(emqx),
|
||||
emqx_mgmt_api_test_util:end_suite().
|
||||
|
||||
t_stats_api(_) ->
|
||||
|
|
|
@ -599,8 +599,8 @@ emqx_cluster() ->
|
|||
].
|
||||
|
||||
emqx_cluster_data() ->
|
||||
Running = mria:cluster_nodes(running),
|
||||
Stopped = mria:cluster_nodes(stopped),
|
||||
Running = emqx:cluster_nodes(running),
|
||||
Stopped = emqx:cluster_nodes(stopped),
|
||||
[
|
||||
{nodes_running, length(Running)},
|
||||
{nodes_stopped, length(Stopped)}
|
||||
|
|
|
@ -14,31 +14,27 @@
|
|||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_plugin_libs_pool).
|
||||
-module(emqx_resource_pool).
|
||||
|
||||
-export([
|
||||
start_pool/3,
|
||||
stop_pool/1,
|
||||
pool_name/1,
|
||||
health_check_ecpool_workers/2,
|
||||
health_check_ecpool_workers/3
|
||||
start/3,
|
||||
stop/1,
|
||||
health_check_workers/2,
|
||||
health_check_workers/3
|
||||
]).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
-define(HEALTH_CHECK_TIMEOUT, 15000).
|
||||
|
||||
pool_name(ID) when is_binary(ID) ->
|
||||
list_to_atom(binary_to_list(ID)).
|
||||
|
||||
start_pool(Name, Mod, Options) ->
|
||||
start(Name, Mod, Options) ->
|
||||
case ecpool:start_sup_pool(Name, Mod, Options) of
|
||||
{ok, _} ->
|
||||
?SLOG(info, #{msg => "start_ecpool_ok", pool_name => Name}),
|
||||
ok;
|
||||
{error, {already_started, _Pid}} ->
|
||||
stop_pool(Name),
|
||||
start_pool(Name, Mod, Options);
|
||||
stop(Name),
|
||||
start(Name, Mod, Options);
|
||||
{error, Reason} ->
|
||||
NReason = parse_reason(Reason),
|
||||
?SLOG(error, #{
|
||||
|
@ -49,7 +45,7 @@ start_pool(Name, Mod, Options) ->
|
|||
{error, {start_pool_failed, Name, NReason}}
|
||||
end.
|
||||
|
||||
stop_pool(Name) ->
|
||||
stop(Name) ->
|
||||
case ecpool:stop_sup_pool(Name) of
|
||||
ok ->
|
||||
?SLOG(info, #{msg => "stop_ecpool_ok", pool_name => Name});
|
||||
|
@ -64,10 +60,10 @@ stop_pool(Name) ->
|
|||
error({stop_pool_failed, Name, Reason})
|
||||
end.
|
||||
|
||||
health_check_ecpool_workers(PoolName, CheckFunc) ->
|
||||
health_check_ecpool_workers(PoolName, CheckFunc, ?HEALTH_CHECK_TIMEOUT).
|
||||
health_check_workers(PoolName, CheckFunc) ->
|
||||
health_check_workers(PoolName, CheckFunc, ?HEALTH_CHECK_TIMEOUT).
|
||||
|
||||
health_check_ecpool_workers(PoolName, CheckFunc, Timeout) ->
|
||||
health_check_workers(PoolName, CheckFunc, Timeout) ->
|
||||
Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)],
|
||||
DoPerWorker =
|
||||
fun(Worker) ->
|
|
@ -53,7 +53,8 @@ fields("retainer") ->
|
|||
sc(
|
||||
?R_REF(flow_control),
|
||||
flow_control,
|
||||
#{}
|
||||
#{},
|
||||
?IMPORTANCE_HIDDEN
|
||||
)},
|
||||
{max_payload_size,
|
||||
sc(
|
||||
|
@ -125,7 +126,9 @@ desc(_) ->
|
|||
%% hoconsc:mk(Type, #{desc => ?DESC(DescId)}).
|
||||
|
||||
sc(Type, DescId, Default) ->
|
||||
hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId)}).
|
||||
sc(Type, DescId, Default, ?DEFAULT_IMPORTANCE).
|
||||
sc(Type, DescId, Default, Importance) ->
|
||||
hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId), importance => Importance}).
|
||||
|
||||
backend_config() ->
|
||||
hoconsc:mk(hoconsc:ref(?MODULE, mnesia_config), #{desc => ?DESC(backend)}).
|
||||
|
|
|
@ -758,23 +758,22 @@ with_conf(ConfMod, Case) ->
|
|||
end.
|
||||
|
||||
make_limiter_cfg(Rate) ->
|
||||
Infinity = emqx_limiter_schema:infinity_value(),
|
||||
Client = #{
|
||||
rate => Rate,
|
||||
initial => 0,
|
||||
capacity => Infinity,
|
||||
burst => 0,
|
||||
low_watermark => 1,
|
||||
divisible => false,
|
||||
max_retry_time => timer:seconds(5),
|
||||
failure_strategy => force
|
||||
},
|
||||
#{client => Client, rate => Infinity, initial => 0, capacity => Infinity}.
|
||||
#{client => Client, rate => Rate, initial => 0, burst => 0}.
|
||||
|
||||
make_limiter_json(Rate) ->
|
||||
Client = #{
|
||||
<<"rate">> => Rate,
|
||||
<<"initial">> => 0,
|
||||
<<"capacity">> => <<"infinity">>,
|
||||
<<"burst">> => <<"0">>,
|
||||
<<"low_watermark">> => 0,
|
||||
<<"divisible">> => <<"false">>,
|
||||
<<"max_retry_time">> => <<"5s">>,
|
||||
|
@ -784,5 +783,5 @@ make_limiter_json(Rate) ->
|
|||
<<"client">> => Client,
|
||||
<<"rate">> => <<"infinity">>,
|
||||
<<"initial">> => 0,
|
||||
<<"capacity">> => <<"infinity">>
|
||||
<<"burst">> => <<"0">>
|
||||
}.
|
||||
|
|
|
@ -38,7 +38,7 @@ namespace() -> rule_engine.
|
|||
tags() ->
|
||||
[<<"Rule Engine">>].
|
||||
|
||||
roots() -> [{"rule_engine", ?HOCON(?R_REF("rule_engine"), #{importance => ?IMPORTANCE_HIDDEN})}].
|
||||
roots() -> [{"rule_engine", ?HOCON(?R_REF("rule_engine"), #{importance => ?IMPORTANCE_LOW})}].
|
||||
|
||||
fields("rule_engine") ->
|
||||
rule_engine_settings() ++
|
||||
|
|
|
@ -227,6 +227,7 @@
|
|||
now_timestamp/1,
|
||||
format_date/3,
|
||||
format_date/4,
|
||||
date_to_unix_ts/3,
|
||||
date_to_unix_ts/4
|
||||
]).
|
||||
|
||||
|
@ -1085,6 +1086,9 @@ format_date(TimeUnit, Offset, FormatString, TimeEpoch) ->
|
|||
)
|
||||
).
|
||||
|
||||
date_to_unix_ts(TimeUnit, FormatString, InputString) ->
|
||||
date_to_unix_ts(TimeUnit, "Z", FormatString, InputString).
|
||||
|
||||
date_to_unix_ts(TimeUnit, Offset, FormatString, InputString) ->
|
||||
emqx_rule_date:parse_date(
|
||||
time_unit(TimeUnit),
|
||||
|
|
|
@ -686,7 +686,6 @@ t_jq(_) ->
|
|||
%% Got timeout as expected
|
||||
got_timeout
|
||||
end,
|
||||
_ConfigRootKey = emqx_rule_engine_schema:namespace(),
|
||||
?assertThrow(
|
||||
{jq_exception, {timeout, _}},
|
||||
apply_func(jq, [TOProgram, <<"-2">>])
|
||||
|
@ -959,7 +958,7 @@ prop_format_date_fun() ->
|
|||
Args1 = [<<"second">>, <<"+07:00">>, <<"%m--%d--%y---%H:%M:%S%Z">>],
|
||||
?FORALL(
|
||||
S,
|
||||
erlang:system_time(second),
|
||||
range(0, 4000000000),
|
||||
S ==
|
||||
apply_func(
|
||||
date_to_unix_ts,
|
||||
|
@ -975,7 +974,7 @@ prop_format_date_fun() ->
|
|||
Args2 = [<<"millisecond">>, <<"+04:00">>, <<"--%m--%d--%y---%H:%M:%S%Z">>],
|
||||
?FORALL(
|
||||
S,
|
||||
erlang:system_time(millisecond),
|
||||
range(0, 4000000000),
|
||||
S ==
|
||||
apply_func(
|
||||
date_to_unix_ts,
|
||||
|
@ -991,7 +990,7 @@ prop_format_date_fun() ->
|
|||
Args = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>],
|
||||
?FORALL(
|
||||
S,
|
||||
erlang:system_time(second),
|
||||
range(0, 4000000000),
|
||||
S ==
|
||||
apply_func(
|
||||
date_to_unix_ts,
|
||||
|
@ -1003,6 +1002,24 @@ prop_format_date_fun() ->
|
|||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
%% When no offset is specified, the offset should be taken from the formatted time string
|
||||
ArgsNoOffset = [<<"second">>, <<"%y-%m-%d-%H:%M:%S%Z">>],
|
||||
ArgsOffset = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>],
|
||||
?FORALL(
|
||||
S,
|
||||
range(0, 4000000000),
|
||||
S ==
|
||||
apply_func(
|
||||
date_to_unix_ts,
|
||||
ArgsNoOffset ++
|
||||
[
|
||||
apply_func(
|
||||
format_date,
|
||||
ArgsOffset ++ [S]
|
||||
)
|
||||
]
|
||||
)
|
||||
).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
|
|
8
build
8
build
|
@ -117,18 +117,14 @@ make_docs() {
|
|||
mkdir -p "$docdir" "$dashboard_www_static"
|
||||
# shellcheck disable=SC2086
|
||||
erl -noshell -pa $libs_dir1 $libs_dir2 $libs_dir3 -eval \
|
||||
"I18nFile = filename:join([apps, emqx_dashboard, priv, 'i18n.conf']), \
|
||||
ok = emqx_conf:dump_schema('$docdir', $SCHEMA_MODULE, I18nFile), \
|
||||
"ok = emqx_conf:dump_schema('$docdir', $SCHEMA_MODULE), \
|
||||
halt(0)."
|
||||
cp "$docdir"/bridge-api-*.json "$dashboard_www_static"
|
||||
cp "$docdir"/hot-config-schema-*.json "$dashboard_www_static"
|
||||
}
|
||||
|
||||
assert_no_compile_time_only_deps() {
|
||||
if [ "$("$FIND" "_build/$PROFILE/rel/emqx/lib/" -maxdepth 1 -name 'gpb-*' -type d)" != "" ]; then
|
||||
echo "gpb should not be included in the release"
|
||||
exit 1
|
||||
fi
|
||||
:
|
||||
}
|
||||
|
||||
make_rel() {
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
Hide `flapping_detect/conn_congestion/stats` configuration.
|
||||
Deprecate `flapping_detect.enable`.
|
|
@ -1 +0,0 @@
|
|||
Hide the `auto_subscribe` configuration items so that they can be modified later only through the HTTP API.
|
|
@ -1 +0,0 @@
|
|||
Hide data items(rule_engine/bridge/authz/authn) from configuration files and documentation.
|
|
@ -0,0 +1,2 @@
|
|||
Unify the config formats for `cluster.core_nodes` and `cluster.statics.seeds`.
|
||||
Now they both support formats in array `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` or semicolon-separated string `"emqx1@127.0.0.1,emqx2@127.0.0.1"`.
|
|
@ -1 +1,7 @@
|
|||
hide exhook/rewrite/topic_metric/persistent_session_store/overload_protection from the docs and configuration file.
|
||||
Hide a large number of advanced options to simplify the configuration file.
|
||||
|
||||
That includes `rewrite`, `topic_metric`, `persistent_session_store`, `overload_protection`,
|
||||
`flapping_detect`, `conn_congestion`, `stats,auto_subscribe`, `broker_perf`,
|
||||
`shared_subscription_group`, `slow_subs`, `ssl_options.user_lookup_fun` and some advance items
|
||||
in `node` and `dashboard` section, [#10358](https://github.com/emqx/emqx/pull/10358),
|
||||
[#10381](https://github.com/emqx/emqx/pull/10381), [#10385](https://github.com/emqx/emqx/pull/10385).
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
A new function to convert a formatted date to an integer timestamp has been added: date_to_unix_ts/3
|
|
@ -0,0 +1,4 @@
|
|||
Optimize the configuration priority mechanism to fix the issue where the configuration
|
||||
changes made to `etc/emqx.conf` do not take effect after restarting EMQX.
|
||||
|
||||
More introduction about the new mechanism: [Configure Override Rules](https://www.emqx.io/docs/en/v5.0/configuration/configuration.html#configure-override-rules)
|
|
@ -0,0 +1,6 @@
|
|||
Fix error in `/api/v5/monitor_current` API endpoint that happens when some EMQX nodes are down.
|
||||
|
||||
Prior to this fix, sometimes the request returned HTTP code 500 and the following message:
|
||||
```
|
||||
{"code":"INTERNAL_ERROR","message":"error, badarg, [{erlang,'++',[{error,nodedown},[{node,'emqx@10.42.0.150'}]], ...
|
||||
```
|
|
@ -0,0 +1,6 @@
|
|||
Simplify the configuration of the limiter feature and optimize some codes
|
||||
- Rename `message_in` to `messages`
|
||||
- Rename `bytes_in` to `bytes`
|
||||
- Use `burst` instead of `capacity`
|
||||
- Hide non-importance fields
|
||||
- Optimize limiter instances in different rate settings
|
|
@ -0,0 +1,2 @@
|
|||
Simplify the configuration of the `retainer` feature.
|
||||
- Mark `flow_control` as non-importance field.
|
|
@ -0,0 +1 @@
|
|||
Add support for [Protocol Buffers](https://protobuf.dev/) schemas in Schema Registry.
|
|
@ -530,15 +530,16 @@ t_write_failure(Config) ->
|
|||
fun(Trace0) ->
|
||||
ct:pal("trace: ~p", [Trace0]),
|
||||
Trace = ?of_kind(buffer_worker_flush_nack, Trace0),
|
||||
?assertMatch([#{result := {async_return, {error, _}}} | _], Trace),
|
||||
[#{result := {async_return, {error, Error}}} | _] = Trace,
|
||||
case Error of
|
||||
{resource_error, _} ->
|
||||
[#{result := Result} | _] = Trace,
|
||||
case Result of
|
||||
{async_return, {error, {resource_error, _}}} ->
|
||||
ok;
|
||||
{recoverable_error, disconnected} ->
|
||||
{async_return, {error, {recoverable_error, disconnected}}} ->
|
||||
ok;
|
||||
{error, {resource_error, _}} ->
|
||||
ok;
|
||||
_ ->
|
||||
ct:fail("unexpected error: ~p", [Error])
|
||||
ct:fail("unexpected error: ~p", [Result])
|
||||
end
|
||||
end
|
||||
),
|
||||
|
|
|
@ -917,7 +917,7 @@ t_invalid_private_key(Config) ->
|
|||
#{<<"private_key">> => InvalidPrivateKeyPEM}
|
||||
}
|
||||
),
|
||||
#{?snk_kind := gcp_pubsub_bridge_jwt_worker_failed_to_start},
|
||||
#{?snk_kind := "gcp_pubsub_bridge_jwt_worker_failed_to_start"},
|
||||
20_000
|
||||
),
|
||||
Res
|
||||
|
@ -928,7 +928,7 @@ t_invalid_private_key(Config) ->
|
|||
[#{reason := Reason}] when
|
||||
Reason =:= noproc orelse
|
||||
Reason =:= {shutdown, {error, empty_key}},
|
||||
?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace)
|
||||
?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace)
|
||||
),
|
||||
?assertMatch(
|
||||
[#{error := empty_key}],
|
||||
|
@ -956,14 +956,14 @@ t_jwt_worker_start_timeout(Config) ->
|
|||
#{<<"private_key">> => InvalidPrivateKeyPEM}
|
||||
}
|
||||
),
|
||||
#{?snk_kind := gcp_pubsub_bridge_jwt_timeout},
|
||||
#{?snk_kind := "gcp_pubsub_bridge_jwt_timeout"},
|
||||
20_000
|
||||
),
|
||||
Res
|
||||
end,
|
||||
fun(Res, Trace) ->
|
||||
?assertMatch({ok, _}, Res),
|
||||
?assertMatch([_], ?of_kind(gcp_pubsub_bridge_jwt_timeout, Trace)),
|
||||
?assertMatch([_], ?of_kind("gcp_pubsub_bridge_jwt_timeout", Trace)),
|
||||
ok
|
||||
end
|
||||
),
|
||||
|
@ -1329,7 +1329,7 @@ t_failed_to_start_jwt_worker(Config) ->
|
|||
fun(Trace) ->
|
||||
?assertMatch(
|
||||
[#{reason := {error, restarting}}],
|
||||
?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace)
|
||||
?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace)
|
||||
),
|
||||
ok
|
||||
end
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-import(emqx_ee_bridge_influxdb, [to_influx_lines/1]).
|
||||
|
||||
-define(INVALID_LINES, [
|
||||
" ",
|
||||
" \n",
|
||||
|
@ -326,3 +324,13 @@ test_pairs(PairsList) ->
|
|||
|
||||
join(Sep, LinesList) ->
|
||||
lists:flatten(lists:join(Sep, LinesList)).
|
||||
|
||||
to_influx_lines(RawLines) ->
|
||||
OldLevel = emqx_logger:get_primary_log_level(),
|
||||
try
|
||||
%% mute error logs from this call
|
||||
emqx_logger:set_primary_log_level(none),
|
||||
emqx_ee_bridge_influxdb:to_influx_lines(RawLines)
|
||||
after
|
||||
emqx_logger:set_primary_log_level(OldLevel)
|
||||
end.
|
||||
|
|
|
@ -147,6 +147,16 @@ ensure_loaded() ->
|
|||
_ = emqx_ee_bridge:module_info(),
|
||||
ok.
|
||||
|
||||
mongo_type(Config) ->
|
||||
case ?config(mongo_type, Config) of
|
||||
rs ->
|
||||
{rs, maps:get(<<"replica_set_name">>, ?config(mongo_config, Config))};
|
||||
sharded ->
|
||||
sharded;
|
||||
single ->
|
||||
single
|
||||
end.
|
||||
|
||||
mongo_type_bin(rs) ->
|
||||
<<"mongodb_rs">>;
|
||||
mongo_type_bin(sharded) ->
|
||||
|
@ -263,17 +273,14 @@ create_bridge_http(Params) ->
|
|||
end.
|
||||
|
||||
clear_db(Config) ->
|
||||
Type = mongo_type_bin(?config(mongo_type, Config)),
|
||||
Name = ?config(mongo_name, Config),
|
||||
#{<<"collection">> := Collection} = ?config(mongo_config, Config),
|
||||
ResourceID = emqx_bridge_resource:resource_id(Type, Name),
|
||||
{ok, _, #{state := #{connector_state := #{poolname := PoolName}}}} =
|
||||
emqx_resource:get_instance(ResourceID),
|
||||
Selector = #{},
|
||||
{true, _} = ecpool:pick_and_do(
|
||||
PoolName, {mongo_api, delete, [Collection, Selector]}, no_handover
|
||||
),
|
||||
ok.
|
||||
Type = mongo_type(Config),
|
||||
Host = ?config(mongo_host, Config),
|
||||
Port = ?config(mongo_port, Config),
|
||||
Server = Host ++ ":" ++ integer_to_list(Port),
|
||||
#{<<"database">> := Db, <<"collection">> := Collection} = ?config(mongo_config, Config),
|
||||
{ok, Client} = mongo_api:connect(Type, [Server], [], [{database, Db}, {w_mode, unsafe}]),
|
||||
{true, _} = mongo_api:delete(Client, Collection, _Selector = #{}),
|
||||
mongo_api:disconnect(Client).
|
||||
|
||||
find_all(Config) ->
|
||||
Type = mongo_type_bin(?config(mongo_type, Config)),
|
||||
|
|
|
@ -265,7 +265,7 @@ unprepare(Config, Key) ->
|
|||
Name = ?config(mysql_name, Config),
|
||||
BridgeType = ?config(mysql_bridge_type, Config),
|
||||
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
|
||||
{ok, _, #{state := #{poolname := PoolName}}} = emqx_resource:get_instance(ResourceID),
|
||||
{ok, _, #{state := #{pool_name := PoolName}}} = emqx_resource:get_instance(ResourceID),
|
||||
[
|
||||
begin
|
||||
{ok, Conn} = ecpool_worker:client(Worker),
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
|
||||
-type state() ::
|
||||
#{
|
||||
poolname := atom(),
|
||||
pool_name := binary(),
|
||||
prepare_cql := prepares(),
|
||||
params_tokens := params_tokens(),
|
||||
%% returned by ecql:prepare/2
|
||||
|
@ -124,14 +124,10 @@ on_start(
|
|||
false ->
|
||||
[]
|
||||
end,
|
||||
%% use InstaId of binary type as Pool name, which is supported in ecpool.
|
||||
PoolName = InstId,
|
||||
Prepares = parse_prepare_cql(Config),
|
||||
InitState = #{poolname => PoolName, prepare_statement => #{}},
|
||||
State = maps:merge(InitState, Prepares),
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
|
||||
State = parse_prepare_cql(Config),
|
||||
case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
|
||||
ok ->
|
||||
{ok, init_prepare(State)};
|
||||
{ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})};
|
||||
{error, Reason} ->
|
||||
?tp(
|
||||
cassandra_connector_start_failed,
|
||||
|
@ -140,12 +136,12 @@ on_start(
|
|||
{error, Reason}
|
||||
end.
|
||||
|
||||
on_stop(InstId, #{poolname := PoolName}) ->
|
||||
on_stop(InstId, #{pool_name := PoolName}) ->
|
||||
?SLOG(info, #{
|
||||
msg => "stopping_cassandra_connector",
|
||||
connector => InstId
|
||||
}),
|
||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
||||
emqx_resource_pool:stop(PoolName).
|
||||
|
||||
-type request() ::
|
||||
% emqx_bridge.erl
|
||||
|
@ -184,7 +180,7 @@ do_single_query(
|
|||
InstId,
|
||||
Request,
|
||||
Async,
|
||||
#{poolname := PoolName} = State
|
||||
#{pool_name := PoolName} = State
|
||||
) ->
|
||||
{Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request),
|
||||
?tp(
|
||||
|
@ -232,7 +228,7 @@ do_batch_query(
|
|||
InstId,
|
||||
Requests,
|
||||
Async,
|
||||
#{poolname := PoolName} = State
|
||||
#{pool_name := PoolName} = State
|
||||
) ->
|
||||
CQLs =
|
||||
lists:map(
|
||||
|
@ -305,8 +301,8 @@ exec_cql_batch_query(InstId, PoolName, Async, CQLs) ->
|
|||
Result
|
||||
end.
|
||||
|
||||
on_get_status(_InstId, #{poolname := Pool} = State) ->
|
||||
case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of
|
||||
on_get_status(_InstId, #{pool_name := PoolName} = State) ->
|
||||
case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of
|
||||
true ->
|
||||
case do_check_prepares(State) of
|
||||
ok ->
|
||||
|
@ -327,7 +323,7 @@ do_get_status(Conn) ->
|
|||
|
||||
do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) ->
|
||||
ok;
|
||||
do_check_prepares(State = #{poolname := PoolName, prepare_cql := {error, Prepares}}) ->
|
||||
do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepares}}) ->
|
||||
%% retry to prepare
|
||||
case prepare_cql(Prepares, PoolName) of
|
||||
{ok, Sts} ->
|
||||
|
@ -397,7 +393,7 @@ parse_prepare_cql([], Prepares, Tokens) ->
|
|||
params_tokens => Tokens
|
||||
}.
|
||||
|
||||
init_prepare(State = #{prepare_cql := Prepares, poolname := PoolName}) ->
|
||||
init_prepare(State = #{prepare_cql := Prepares, pool_name := PoolName}) ->
|
||||
case maps:size(Prepares) of
|
||||
0 ->
|
||||
State;
|
||||
|
@ -429,17 +425,17 @@ prepare_cql(Prepares, PoolName) ->
|
|||
end.
|
||||
|
||||
do_prepare_cql(Prepares, PoolName) ->
|
||||
do_prepare_cql(ecpool:workers(PoolName), Prepares, PoolName, #{}).
|
||||
do_prepare_cql(ecpool:workers(PoolName), Prepares, #{}).
|
||||
|
||||
do_prepare_cql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) ->
|
||||
do_prepare_cql([{_Name, Worker} | T], Prepares, _LastSts) ->
|
||||
{ok, Conn} = ecpool_worker:client(Worker),
|
||||
case prepare_cql_to_conn(Conn, Prepares) of
|
||||
{ok, Sts} ->
|
||||
do_prepare_cql(T, Prepares, PoolName, Sts);
|
||||
do_prepare_cql(T, Prepares, Sts);
|
||||
Error ->
|
||||
Error
|
||||
end;
|
||||
do_prepare_cql([], _Prepares, _PoolName, LastSts) ->
|
||||
do_prepare_cql([], _Prepares, LastSts) ->
|
||||
{ok, LastSts}.
|
||||
|
||||
prepare_cql_to_conn(Conn, Prepares) ->
|
||||
|
|
|
@ -62,7 +62,8 @@
|
|||
-type state() ::
|
||||
#{
|
||||
templates := templates(),
|
||||
poolname := atom()
|
||||
pool_name := binary(),
|
||||
connect_timeout := pos_integer()
|
||||
}.
|
||||
|
||||
-type clickhouse_config() :: map().
|
||||
|
@ -141,7 +142,6 @@ on_start(
|
|||
connector => InstanceID,
|
||||
config => emqx_utils:redact(Config)
|
||||
}),
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstanceID),
|
||||
Options = [
|
||||
{url, URL},
|
||||
{user, maps:get(username, Config, "default")},
|
||||
|
@ -149,46 +149,43 @@ on_start(
|
|||
{database, DB},
|
||||
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL},
|
||||
{pool_size, PoolSize},
|
||||
{pool, PoolName}
|
||||
{pool, InstanceID}
|
||||
],
|
||||
InitState = #{
|
||||
poolname => PoolName,
|
||||
connect_timeout => ConnectTimeout
|
||||
},
|
||||
try
|
||||
Templates = prepare_sql_templates(Config),
|
||||
State = maps:merge(InitState, #{templates => Templates}),
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options) of
|
||||
State = #{
|
||||
pool_name => InstanceID,
|
||||
templates => Templates,
|
||||
connect_timeout => ConnectTimeout
|
||||
},
|
||||
case emqx_resource_pool:start(InstanceID, ?MODULE, Options) of
|
||||
ok ->
|
||||
{ok, State};
|
||||
{error, Reason} ->
|
||||
log_start_error(Config, Reason, none),
|
||||
?tp(
|
||||
info,
|
||||
"clickhouse_connector_start_failed",
|
||||
#{
|
||||
error => Reason,
|
||||
config => emqx_utils:redact(Config)
|
||||
}
|
||||
),
|
||||
{error, Reason}
|
||||
end
|
||||
catch
|
||||
_:CatchReason:Stacktrace ->
|
||||
log_start_error(Config, CatchReason, Stacktrace),
|
||||
?tp(
|
||||
info,
|
||||
"clickhouse_connector_start_failed",
|
||||
#{
|
||||
error => CatchReason,
|
||||
stacktrace => Stacktrace,
|
||||
config => emqx_utils:redact(Config)
|
||||
}
|
||||
),
|
||||
{error, CatchReason}
|
||||
end.
|
||||
|
||||
log_start_error(Config, Reason, Stacktrace) ->
|
||||
StacktraceMap =
|
||||
case Stacktrace of
|
||||
none -> #{};
|
||||
_ -> #{stacktrace => Stacktrace}
|
||||
end,
|
||||
LogMessage =
|
||||
#{
|
||||
msg => "clickhouse_connector_start_failed",
|
||||
error_reason => Reason,
|
||||
config => emqx_utils:redact(Config)
|
||||
},
|
||||
?SLOG(info, maps:merge(LogMessage, StacktraceMap)),
|
||||
?tp(
|
||||
clickhouse_connector_start_failed,
|
||||
#{error => Reason}
|
||||
).
|
||||
|
||||
%% Helper functions to prepare SQL tempaltes
|
||||
|
||||
prepare_sql_templates(#{
|
||||
|
@ -240,7 +237,7 @@ split_clickhouse_insert_sql(SQL) ->
|
|||
end.
|
||||
|
||||
% This is a callback for ecpool which is triggered by the call to
|
||||
% emqx_plugin_libs_pool:start_pool in on_start/2
|
||||
% emqx_resource_pool:start in on_start/2
|
||||
|
||||
connect(Options) ->
|
||||
URL = iolist_to_binary(emqx_http_lib:normalize(proplists:get_value(url, Options))),
|
||||
|
@ -277,23 +274,20 @@ connect(Options) ->
|
|||
|
||||
-spec on_stop(resource_id(), resource_state()) -> term().
|
||||
|
||||
on_stop(ResourceID, #{poolname := PoolName}) ->
|
||||
on_stop(InstanceID, #{pool_name := PoolName}) ->
|
||||
?SLOG(info, #{
|
||||
msg => "stopping clickouse connector",
|
||||
connector => ResourceID
|
||||
connector => InstanceID
|
||||
}),
|
||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
||||
emqx_resource_pool:stop(PoolName).
|
||||
|
||||
%% -------------------------------------------------------------------
|
||||
%% on_get_status emqx_resouce callback and related functions
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
on_get_status(
|
||||
_InstId,
|
||||
#{
|
||||
poolname := PoolName,
|
||||
connect_timeout := Timeout
|
||||
} = State
|
||||
_InstanceID,
|
||||
#{pool_name := PoolName, connect_timeout := Timeout} = State
|
||||
) ->
|
||||
case do_get_status(PoolName, Timeout) of
|
||||
ok ->
|
||||
|
@ -352,7 +346,7 @@ do_get_status(PoolName, Timeout) ->
|
|||
on_query(
|
||||
ResourceID,
|
||||
{RequestType, DataOrSQL},
|
||||
#{poolname := PoolName} = State
|
||||
#{pool_name := PoolName} = State
|
||||
) ->
|
||||
?SLOG(debug, #{
|
||||
msg => "clickhouse connector received sql query",
|
||||
|
@ -391,16 +385,11 @@ query_type(send_message) ->
|
|||
on_batch_query(
|
||||
ResourceID,
|
||||
BatchReq,
|
||||
State
|
||||
#{pool_name := PoolName, templates := Templates} = _State
|
||||
) ->
|
||||
%% Currently we only support batch requests with the send_message key
|
||||
{Keys, ObjectsToInsert} = lists:unzip(BatchReq),
|
||||
ensure_keys_are_of_type_send_message(Keys),
|
||||
%% Pick out the SQL template
|
||||
#{
|
||||
templates := Templates,
|
||||
poolname := PoolName
|
||||
} = State,
|
||||
%% Create batch insert SQL statement
|
||||
SQL = objects_to_sql(ObjectsToInsert, Templates),
|
||||
%% Do the actual query in the database
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue