From d3ccd8a65dcbcdf8f5d4845bcec78d4636f64303 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Thu, 13 Apr 2023 15:20:04 +0200 Subject: [PATCH 001/263] feat: add date_to_unix_ts/3 function to the rule engine Fixes: https://emqx.atlassian.net/browse/EMQX-9245 --- apps/emqx_rule_engine/src/emqx_rule_funcs.erl | 9 +++++++++ .../test/emqx_rule_funcs_SUITE.erl | 18 ++++++++++++++++++ changes/ce/feat-10392.en.md | 1 + 3 files changed, 28 insertions(+) create mode 100644 changes/ce/feat-10392.en.md diff --git a/apps/emqx_rule_engine/src/emqx_rule_funcs.erl b/apps/emqx_rule_engine/src/emqx_rule_funcs.erl index b8bfeb84c..7f7662b1b 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_funcs.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_funcs.erl @@ -227,6 +227,7 @@ now_timestamp/1, format_date/3, format_date/4, + date_to_unix_ts/3, date_to_unix_ts/4 ]). @@ -1085,6 +1086,14 @@ format_date(TimeUnit, Offset, FormatString, TimeEpoch) -> ) ). +date_to_unix_ts(TimeUnit, FormatString, InputString) -> + emqx_rule_date:parse_date( + time_unit(TimeUnit), + "Z", + emqx_plugin_libs_rule:str(FormatString), + emqx_plugin_libs_rule:str(InputString) + ). + date_to_unix_ts(TimeUnit, Offset, FormatString, InputString) -> emqx_rule_date:parse_date( time_unit(TimeUnit), diff --git a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl index 94adb3506..2bce5a1b4 100644 --- a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl +++ b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl @@ -1003,6 +1003,24 @@ prop_format_date_fun() -> ) ] ) + ), + %% When no offset is specified, the offset should be taken from the formatted time string + ArgsNoOffset = [<<"second">>, <<"%y-%m-%d-%H:%M:%S%Z">>], + ArgsOffset = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>], + ?FORALL( + S, + erlang:system_time(second), + S == + apply_func( + date_to_unix_ts, + ArgsNoOffset ++ + [ + apply_func( + format_date, + ArgsOffset ++ [S] + ) + ] + ) ). %%------------------------------------------------------------------------------ diff --git a/changes/ce/feat-10392.en.md b/changes/ce/feat-10392.en.md new file mode 100644 index 000000000..04c6c85cc --- /dev/null +++ b/changes/ce/feat-10392.en.md @@ -0,0 +1 @@ +A new function to convert a formatted date to an integer timestamp has been added: date_to_unix_ts/3 From a66d01d6f016af6e603cb72862178309b64a8ad8 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 14 Apr 2023 12:48:03 +0200 Subject: [PATCH 002/263] style: remove code duplication Co-authored-by: Thales Macedo Garitezi --- apps/emqx_rule_engine/src/emqx_rule_funcs.erl | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/apps/emqx_rule_engine/src/emqx_rule_funcs.erl b/apps/emqx_rule_engine/src/emqx_rule_funcs.erl index 7f7662b1b..dfbcdefed 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_funcs.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_funcs.erl @@ -1087,12 +1087,7 @@ format_date(TimeUnit, Offset, FormatString, TimeEpoch) -> ). date_to_unix_ts(TimeUnit, FormatString, InputString) -> - emqx_rule_date:parse_date( - time_unit(TimeUnit), - "Z", - emqx_plugin_libs_rule:str(FormatString), - emqx_plugin_libs_rule:str(InputString) - ). + date_to_unix_ts(TimeUnit, "Z", FormatString, InputString). date_to_unix_ts(TimeUnit, Offset, FormatString, InputString) -> emqx_rule_date:parse_date( From 3a511c6229b56ec34f5f987961ee811b4ba82473 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Sat, 15 Apr 2023 19:41:07 +0800 Subject: [PATCH 003/263] fix(gw): load emqx applications before hocon configs checking --- apps/emqx_gateway/src/emqx_gateway_app.erl | 2 +- apps/emqx_gateway/src/emqx_gateway_schema.erl | 2 +- apps/emqx_gateway/src/emqx_gateway_utils.erl | 6 +++--- bin/nodetool | 16 +++++++++++++++- 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/apps/emqx_gateway/src/emqx_gateway_app.erl b/apps/emqx_gateway/src/emqx_gateway_app.erl index 01a1aaddd..f0406bcaa 100644 --- a/apps/emqx_gateway/src/emqx_gateway_app.erl +++ b/apps/emqx_gateway/src/emqx_gateway_app.erl @@ -45,7 +45,7 @@ load_default_gateway_applications() -> fun(Def) -> load_gateway_application(Def) end, - emqx_gateway_utils:find_gateway_definations() + emqx_gateway_utils:find_gateway_definitions() ). load_gateway_application( diff --git a/apps/emqx_gateway/src/emqx_gateway_schema.erl b/apps/emqx_gateway/src/emqx_gateway_schema.erl index f0e65627f..8c80fc1fa 100644 --- a/apps/emqx_gateway/src/emqx_gateway_schema.erl +++ b/apps/emqx_gateway/src/emqx_gateway_schema.erl @@ -75,7 +75,7 @@ fields(gateway) -> } )} end, - emqx_gateway_utils:find_gateway_definations() + emqx_gateway_utils:find_gateway_definitions() ); fields(clientinfo_override) -> [ diff --git a/apps/emqx_gateway/src/emqx_gateway_utils.erl b/apps/emqx_gateway/src/emqx_gateway_utils.erl index 07185ef42..7a0188387 100644 --- a/apps/emqx_gateway/src/emqx_gateway_utils.erl +++ b/apps/emqx_gateway/src/emqx_gateway_utils.erl @@ -47,7 +47,7 @@ listener_chain/3, make_deprecated_paths/1, make_compatible_schema/2, - find_gateway_definations/0 + find_gateway_definitions/0 ]). -export([stringfy/1]). @@ -564,8 +564,8 @@ make_compatible_schema2(Path, SchemaFun) -> Schema ). --spec find_gateway_definations() -> list(gateway_def()). -find_gateway_definations() -> +-spec find_gateway_definitions() -> list(gateway_def()). +find_gateway_definitions() -> lists:flatten( lists:map( fun(App) -> diff --git a/bin/nodetool b/bin/nodetool index 9a5d5e069..9f32f9b3b 100755 --- a/bin/nodetool +++ b/bin/nodetool @@ -362,6 +362,20 @@ add_libs_dir() -> add_lib_dir(RootDir, Name, Vsn) -> LibDir = filename:join([RootDir, lib, atom_to_list(Name) ++ "-" ++ Vsn, ebin]), case code:add_patha(LibDir) of - true -> ok; + true -> + %% load all applications into application controller, before performing + %% the configuration check of HOCON + %% + %% It helps to implement the feature of dynamically searching schema. + %% See `emqx_gateway_schema:fields(gateway)` + is_emqx_application(Name) andalso application:load(Name), + ok; {error, _} -> error(LibDir) end. + +is_emqx_application(Name) when is_atom(Name) -> + is_emqx_application(atom_to_list(Name)); +is_emqx_application("emqx_" ++ _Rest) -> + true; +is_emqx_application(_) -> + false. From 73c15d9668e552d4b1bf3caad0b04eca8b2b20b9 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Sat, 15 Apr 2023 21:18:37 +0800 Subject: [PATCH 004/263] chore: update changes --- bin/nodetool | 11 +++++++++-- changes/ce/fix-10410.en.md | 2 ++ 2 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 changes/ce/fix-10410.en.md diff --git a/bin/nodetool b/bin/nodetool index 9f32f9b3b..8170e68e2 100755 --- a/bin/nodetool +++ b/bin/nodetool @@ -272,7 +272,7 @@ chkconfig(File) -> end. check_license(Config) -> - ok = application:load(emqx_license), + ok = ensure_application_load(emqx_license), %% This checks formal license validity to ensure %% that the node can successfully start with the given license. @@ -368,7 +368,7 @@ add_lib_dir(RootDir, Name, Vsn) -> %% %% It helps to implement the feature of dynamically searching schema. %% See `emqx_gateway_schema:fields(gateway)` - is_emqx_application(Name) andalso application:load(Name), + is_emqx_application(Name) andalso ensure_application_load(Name), ok; {error, _} -> error(LibDir) end. @@ -379,3 +379,10 @@ is_emqx_application("emqx_" ++ _Rest) -> true; is_emqx_application(_) -> false. + +ensure_application_load(Name) -> + case application:load(Name) of + ok -> ok; + {error, {already_loaded, _}} -> ok; + {error, Reason} -> error({failed_to_load_application, Name, Reason}) + end. diff --git a/changes/ce/fix-10410.en.md b/changes/ce/fix-10410.en.md new file mode 100644 index 000000000..48b55ea31 --- /dev/null +++ b/changes/ce/fix-10410.en.md @@ -0,0 +1,2 @@ +Fix EMQX starting failed once any gateways configured in emqx.conf. +This issue was first introduced in v5.0.22 via [#10278](https://github.com/emqx/emqx/pull/10278). From 55376144ce183bb39669cd4f1bdd87bf8ab5e658 Mon Sep 17 00:00:00 2001 From: firest Date: Mon, 17 Apr 2023 10:06:36 +0800 Subject: [PATCH 005/263] fix(limiter): simplify the configuration of the limiter --- apps/emqx/src/emqx_connection.erl | 6 +- .../src/emqx_limiter/src/emqx_htb_limiter.erl | 3 +- .../src/emqx_limiter_bucket_ref.erl | 5 + .../emqx_limiter/src/emqx_limiter_schema.erl | 152 ++++++++++++------ .../emqx_limiter/src/emqx_limiter_server.erl | 46 +++--- apps/emqx/src/emqx_ws_connection.erl | 4 +- apps/emqx/test/emqx_ratelimiter_SUITE.erl | 75 ++++----- rel/i18n/emqx_limiter_schema.hocon | 34 +--- 8 files changed, 183 insertions(+), 142 deletions(-) diff --git a/apps/emqx/src/emqx_connection.erl b/apps/emqx/src/emqx_connection.erl index 8d47f033c..27b6f3e84 100644 --- a/apps/emqx/src/emqx_connection.erl +++ b/apps/emqx/src/emqx_connection.erl @@ -182,10 +182,8 @@ -define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]). -define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]). -%% use macro to do compile time limiter's type check --define(LIMITER_BYTES_IN, bytes_in). --define(LIMITER_MESSAGE_IN, message_in). --define(EMPTY_QUEUE, {[], []}). +-define(LIMITER_BYTES_IN, bytes). +-define(LIMITER_MESSAGE_IN, messages). -dialyzer({no_match, [info/2]}). -dialyzer( diff --git a/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl b/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl index bbebd9460..53f26deb5 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl @@ -139,7 +139,8 @@ make_token_bucket_limiter(Cfg, Bucket) -> Cfg#{ tokens => emqx_limiter_server:get_initial_val(Cfg), lasttime => ?NOW, - bucket => Bucket + bucket => Bucket, + capacity => emqx_limiter_schema:calc_capacity(Cfg) }. %%@doc create a limiter server's reference diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl index fe30e41e9..139564df7 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl @@ -23,6 +23,7 @@ %% API -export([ new/3, + infinity_bucket/0, check/3, try_restore/2, available/1 @@ -58,6 +59,10 @@ new(Counter, Index, Rate) -> rate => Rate }. +-spec infinity_bucket() -> bucket_ref(). +infinity_bucket() -> + infinity. + %% @doc check tokens -spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) -> HasToken :: diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl index f45fc55b6..f59ddc35b 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl @@ -31,20 +31,20 @@ get_bucket_cfg_path/2, desc/1, types/0, - infinity_value/0 + calc_capacity/1 ]). -define(KILOBYTE, 1024). -define(BUCKET_KEYS, [ - {bytes_in, bucket_infinity}, - {message_in, bucket_infinity}, + {bytes, bucket_infinity}, + {messages, bucket_infinity}, {connection, bucket_limit}, {message_routing, bucket_infinity} ]). -type limiter_type() :: - bytes_in - | message_in + bytes + | messages | connection | message_routing %% internal limiter for unclassified resources @@ -90,14 +90,17 @@ namespace() -> limiter. -roots() -> [limiter]. +roots() -> + [{limiter, hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{importance => ?IMPORTANCE_HIDDEN})}]. fields(limiter) -> [ {Type, ?HOCON(?R_REF(node_opts), #{ desc => ?DESC(Type), - default => #{} + default => #{}, + importance => ?IMPORTANCE_HIDDEN, + aliases => alias_of_type(Type) })} || Type <- types() ] ++ @@ -107,6 +110,7 @@ fields(limiter) -> ?R_REF(client_fields), #{ desc => ?DESC(client), + importance => ?IMPORTANCE_HIDDEN, default => maps:from_list([ {erlang:atom_to_binary(Type), #{}} || Type <- types() @@ -124,30 +128,50 @@ fields(node_opts) -> })} ]; fields(client_fields) -> - [ - {Type, - ?HOCON(?R_REF(client_opts), #{ - desc => ?DESC(Type), - default => #{} - })} - || Type <- types() - ]; + client_fields(types(), #{default => #{}}); fields(bucket_infinity) -> [ {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})}, - {capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"infinity">>})}, - {initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})} + {burst, + ?HOCON(capacity(), #{ + desc => ?DESC(capacity), + default => <<"0">>, + importance => ?IMPORTANCE_HIDDEN, + aliases => [capacity] + })}, + {initial, + ?HOCON(initial(), #{ + default => <<"0">>, + desc => ?DESC(initial), + importance => ?IMPORTANCE_HIDDEN + })} ]; fields(bucket_limit) -> [ {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"1000/s">>})}, - {capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"1000">>})}, - {initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})} + {burst, + ?HOCON(capacity(), #{ + desc => ?DESC(burst), + default => <<"0">>, + importance => ?IMPORTANCE_HIDDEN, + aliases => [capacity] + })}, + {initial, + ?HOCON(initial(), #{ + default => <<"0">>, + desc => ?DESC(initial), + importance => ?IMPORTANCE_HIDDEN + })} ]; fields(client_opts) -> [ {rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})}, - {initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}, + {initial, + ?HOCON(initial(), #{ + default => <<"0">>, + desc => ?DESC(initial), + importance => ?IMPORTANCE_HIDDEN + })}, %% low_watermark add for emqx_channel and emqx_session %% both modules consume first and then check %% so we need to use this value to prevent excessive consumption @@ -157,20 +181,24 @@ fields(client_opts) -> initial(), #{ desc => ?DESC(low_watermark), - default => <<"0">> + default => <<"0">>, + importance => ?IMPORTANCE_HIDDEN } )}, - {capacity, + {burst, ?HOCON(capacity(), #{ - desc => ?DESC(client_bucket_capacity), - default => <<"infinity">> + desc => ?DESC(burst), + default => <<"0">>, + importance => ?IMPORTANCE_HIDDEN, + aliases => [capacity] })}, {divisible, ?HOCON( boolean(), #{ desc => ?DESC(divisible), - default => false + default => false, + importance => ?IMPORTANCE_HIDDEN } )}, {max_retry_time, @@ -178,7 +206,8 @@ fields(client_opts) -> emqx_schema:duration(), #{ desc => ?DESC(max_retry_time), - default => <<"10s">> + default => <<"10s">>, + importance => ?IMPORTANCE_HIDDEN } )}, {failure_strategy, @@ -186,16 +215,18 @@ fields(client_opts) -> failure_strategy(), #{ desc => ?DESC(failure_strategy), - default => force + default => force, + importance => ?IMPORTANCE_HIDDEN } )} ]; fields(listener_fields) -> - bucket_fields(?BUCKET_KEYS, listener_client_fields); + composite_bucket_fields(?BUCKET_KEYS, listener_client_fields); fields(listener_client_fields) -> - client_fields(?BUCKET_KEYS); + {Types, _} = lists:unzip(?BUCKET_KEYS), + client_fields(Types, #{required => false}); fields(Type) -> - bucket_field(Type). + simple_bucket_field(Type). desc(limiter) -> "Settings for the rate limiter."; @@ -230,19 +261,14 @@ get_bucket_cfg_path(Type, BucketName) -> [limiter, Type, bucket, BucketName]. types() -> - [bytes_in, message_in, connection, message_routing, internal]. + [bytes, messages, connection, message_routing, internal]. -%%-------------------------------------------------------------------- -%% Internal functions -%%-------------------------------------------------------------------- - -%% `infinity` to `infinity_value` rules: -%% 1. all infinity capacity will change to infinity_value -%% 2. if the rate of global and bucket both are `infinity`, -%% use `infinity_value` as bucket rate. see `emqx_limiter_server:get_counter_rate/2` -infinity_value() -> - %% 1 TB - 1099511627776. +calc_capacity(#{rate := infinity}) -> + infinity; +calc_capacity(#{burst := infinity}) -> + infinity; +calc_capacity(#{rate := Rate, burst := Burst}) -> + erlang:floor(1000 * Rate / default_period()) + Burst. %%-------------------------------------------------------------------- %% Internal functions @@ -335,7 +361,7 @@ to_quota(Str, Regex) -> {match, [Quota, ""]} -> {ok, erlang:list_to_integer(Quota)}; {match, ""} -> - {ok, infinity_value()}; + {ok, infinity}; _ -> {error, Str} end @@ -350,7 +376,8 @@ apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE; apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE; apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit). -bucket_field(Type) when is_atom(Type) -> +%% A bucket with only one type +simple_bucket_field(Type) when is_atom(Type) -> fields(bucket_infinity) ++ [ {client, @@ -358,16 +385,22 @@ bucket_field(Type) when is_atom(Type) -> ?R_REF(?MODULE, client_opts), #{ desc => ?DESC(client), - required => false + required => false, + importance => importance_of_type(Type), + aliases => alias_of_type(Type) } )} ]. -bucket_fields(Types, ClientRef) -> + +%% A bucket with multi types +composite_bucket_fields(Types, ClientRef) -> [ {Type, ?HOCON(?R_REF(?MODULE, Opts), #{ desc => ?DESC(?MODULE, Type), - required => false + required => false, + importance => importance_of_type(Type), + aliases => alias_of_type(Type) })} || {Type, Opts} <- Types ] ++ @@ -382,12 +415,29 @@ bucket_fields(Types, ClientRef) -> )} ]. -client_fields(Types) -> +client_fields(Types, Meta) -> [ {Type, - ?HOCON(?R_REF(client_opts), #{ + ?HOCON(?R_REF(client_opts), Meta#{ desc => ?DESC(Type), - required => false + importance => importance_of_type(Type), + aliases => alias_of_type(Type) })} - || {Type, _} <- Types + || Type <- Types ]. + +importance_of_type(interval) -> + ?IMPORTANCE_HIDDEN; +importance_of_type(message_routing) -> + ?IMPORTANCE_HIDDEN; +importance_of_type(connection) -> + ?IMPORTANCE_HIDDEN; +importance_of_type(_) -> + ?DEFAULT_IMPORTANCE. + +alias_of_type(messages) -> + [message_in]; +alias_of_type(bytess) -> + [bytes_in]; +alias_of_type(_) -> + []. diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl index f1daeaaeb..58db66f82 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl @@ -118,17 +118,24 @@ connect(_Id, _Type, undefined) -> {ok, emqx_htb_limiter:make_infinity_limiter()}; connect(Id, Type, Cfg) -> case find_limiter_cfg(Type, Cfg) of - {undefined, _} -> + {_ClientCfg, undefined, _NodeCfg} -> {ok, emqx_htb_limiter:make_infinity_limiter()}; + {#{rate := infinity}, #{rate := infinity}, #{rate := infinity}} -> + {ok, emqx_htb_limiter:make_infinity_limiter()}; + {ClientCfg, #{rate := infinity}, #{rate := infinity}} -> + {ok, + emqx_htb_limiter:make_token_bucket_limiter( + ClientCfg, emqx_limiter_bucket_ref:infinity_bucket() + )}; { - #{ - rate := BucketRate, - capacity := BucketSize - }, - #{rate := CliRate, capacity := CliSize} = ClientCfg + #{rate := CliRate} = ClientCfg, + #{rate := BucketRate} = BucketCfg, + _ } -> case emqx_limiter_manager:find_bucket(Id, Type) of {ok, Bucket} -> + BucketSize = emqx_limiter_schema:calc_capacity(BucketCfg), + CliSize = emqx_limiter_schema:calc_capacity(ClientCfg), {ok, if CliRate < BucketRate orelse CliSize < BucketSize -> @@ -493,12 +500,14 @@ make_root(#{rate := Rate, burst := Burst}) -> produced => 0.0 }. -do_add_bucket(Id, #{rate := Rate, capacity := Capacity} = Cfg, #{buckets := Buckets} = State) -> +do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) -> + State; +do_add_bucket(Id, #{rate := Rate} = Cfg, #{buckets := Buckets} = State) -> case maps:get(Id, Buckets, undefined) of undefined -> make_bucket(Id, Cfg, State); Bucket -> - Bucket2 = Bucket#{rate := Rate, capacity := Capacity}, + Bucket2 = Bucket#{rate := Rate, capacity := emqx_limiter_schema:calc_capacity(Cfg)}, State#{buckets := Buckets#{Id := Bucket2}} end. @@ -509,7 +518,7 @@ make_bucket(Id, Cfg, #{index := ?COUNTER_SIZE} = State) -> }); make_bucket( Id, - #{rate := Rate, capacity := Capacity} = Cfg, + #{rate := Rate} = Cfg, #{type := Type, counter := Counter, index := Index, buckets := Buckets} = State ) -> NewIndex = Index + 1, @@ -519,7 +528,7 @@ make_bucket( rate => Rate, obtained => Initial, correction => 0, - capacity => Capacity, + capacity => emqx_limiter_schema:calc_capacity(Cfg), counter => Counter, index => NewIndex }, @@ -541,19 +550,14 @@ do_del_bucket(Id, #{type := Type, buckets := Buckets} = State) -> get_initial_val( #{ initial := Initial, - rate := Rate, - capacity := Capacity + rate := Rate } ) -> - %% initial will nevner be infinity(see the emqx_limiter_schema) - InfVal = emqx_limiter_schema:infinity_value(), if Initial > 0 -> Initial; Rate =/= infinity -> - erlang:min(Rate, Capacity); - Capacity =/= infinity andalso Capacity =/= InfVal -> - Capacity; + Rate; true -> 0 end. @@ -568,11 +572,12 @@ call(Type, Msg) -> end. find_limiter_cfg(Type, #{rate := _} = Cfg) -> - {Cfg, find_client_cfg(Type, maps:get(client, Cfg, undefined))}; + {find_client_cfg(Type, maps:get(client, Cfg, undefined)), Cfg, find_node_cfg(Type)}; find_limiter_cfg(Type, Cfg) -> { + find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined)), maps:get(Type, Cfg, undefined), - find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined)) + find_node_cfg(Type) }. find_client_cfg(Type, BucketCfg) -> @@ -585,3 +590,6 @@ merge_client_cfg(NodeCfg, undefined) -> NodeCfg; merge_client_cfg(NodeCfg, BucketCfg) -> maps:merge(NodeCfg, BucketCfg). + +find_node_cfg(Type) -> + emqx:get_config([limiter, Type], #{rate => infinity, burst => 0}). diff --git a/apps/emqx/src/emqx_ws_connection.erl b/apps/emqx/src/emqx_ws_connection.erl index 20962809f..faf62f98d 100644 --- a/apps/emqx/src/emqx_ws_connection.erl +++ b/apps/emqx/src/emqx_ws_connection.erl @@ -121,8 +121,8 @@ -define(SOCK_STATS, [recv_oct, recv_cnt, send_oct, send_cnt]). -define(ENABLED(X), (X =/= undefined)). --define(LIMITER_BYTES_IN, bytes_in). --define(LIMITER_MESSAGE_IN, message_in). +-define(LIMITER_BYTES_IN, bytes). +-define(LIMITER_MESSAGE_IN, messages). -dialyzer({no_match, [info/2]}). -dialyzer({nowarn_function, [websocket_init/1]}). diff --git a/apps/emqx/test/emqx_ratelimiter_SUITE.erl b/apps/emqx/test/emqx_ratelimiter_SUITE.erl index f3b97d517..7288dcf7c 100644 --- a/apps/emqx/test/emqx_ratelimiter_SUITE.erl +++ b/apps/emqx/test/emqx_ratelimiter_SUITE.erl @@ -72,7 +72,7 @@ t_consume(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 100, - capacity := 100, + burst := 0, initial := 100, max_retry_time := 1000, failure_strategy := force @@ -89,7 +89,7 @@ t_retry(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 50, - capacity := 200, + burst := 150, initial := 0, max_retry_time := 1000, failure_strategy := force @@ -109,7 +109,7 @@ t_restore(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 1, - capacity := 200, + burst := 199, initial := 50, max_retry_time := 100, failure_strategy := force @@ -129,7 +129,7 @@ t_max_retry_time(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 1, - capacity := 1, + burst := 0, max_retry_time := 500, failure_strategy := drop } @@ -139,8 +139,12 @@ t_max_retry_time(_) -> Begin = ?NOW, Result = emqx_htb_limiter:consume(101, Client), ?assertMatch({drop, _}, Result), - Time = ?NOW - Begin, - ?assert(Time >= 500 andalso Time < 550) + End = ?NOW, + Time = End - Begin, + ?assert( + Time >= 500 andalso Time < 550, + lists:flatten(io_lib:format("Begin:~p, End:~p, Time:~p~n", [Begin, End, Time])) + ) end, with_per_client(Cfg, Case). @@ -150,7 +154,7 @@ t_divisible(_) -> divisible := true, rate := ?RATE("1000/1s"), initial := 600, - capacity := 600 + burst := 0 } end, Case = fun(BucketCfg) -> @@ -176,7 +180,7 @@ t_low_watermark(_) -> low_watermark := 400, rate := ?RATE("1000/1s"), initial := 1000, - capacity := 1000 + burst := 0 } end, Case = fun(BucketCfg) -> @@ -201,8 +205,7 @@ t_infinity_client(_) -> Fun = fun(Cfg) -> Cfg end, Case = fun(Cfg) -> Client = connect(Cfg), - InfVal = emqx_limiter_schema:infinity_value(), - ?assertMatch(#{bucket := #{rate := InfVal}}, Client), + ?assertMatch(infinity, Client), Result = emqx_htb_limiter:check(100000, Client), ?assertEqual({ok, Client}, Result) end, @@ -212,12 +215,12 @@ t_try_restore_agg(_) -> Fun = fun(#{client := Cli} = Bucket) -> Bucket2 = Bucket#{ rate := 1, - capacity := 200, + burst := 199, initial := 50 }, Cli2 = Cli#{ rate := infinity, - capacity := infinity, + burst := infinity, divisible := true, max_retry_time := 100, failure_strategy := force @@ -239,11 +242,11 @@ t_short_board(_) -> Bucket2 = Bucket#{ rate := ?RATE("100/1s"), initial := 0, - capacity := 100 + burst := 0 }, Cli2 = Cli#{ rate := ?RATE("600/1s"), - capacity := 600, + burst := 0, initial := 600 }, Bucket2#{client := Cli2} @@ -261,46 +264,45 @@ t_rate(_) -> Bucket2 = Bucket#{ rate := ?RATE("100/100ms"), initial := 0, - capacity := infinity + burst := infinity }, Cli2 = Cli#{ rate := infinity, - capacity := infinity, + burst := infinity, initial := 0 }, Bucket2#{client := Cli2} end, Case = fun(Cfg) -> + Time = 1000, Client = connect(Cfg), - Ts1 = erlang:system_time(millisecond), C1 = emqx_htb_limiter:available(Client), - timer:sleep(1000), - Ts2 = erlang:system_time(millisecond), + timer:sleep(1100), C2 = emqx_htb_limiter:available(Client), - ShouldInc = floor((Ts2 - Ts1) / 100) * 100, + ShouldInc = floor(Time / 100) * 100, Inc = C2 - C1, ?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate") end, with_bucket(Fun, Case). t_capacity(_) -> - Capacity = 600, + Capacity = 1200, Fun = fun(#{client := Cli} = Bucket) -> Bucket2 = Bucket#{ rate := ?RATE("100/100ms"), initial := 0, - capacity := 600 + burst := 200 }, Cli2 = Cli#{ rate := infinity, - capacity := infinity, + burst := infinity, initial := 0 }, Bucket2#{client := Cli2} end, Case = fun(Cfg) -> Client = connect(Cfg), - timer:sleep(1000), + timer:sleep(1500), C1 = emqx_htb_limiter:available(Client), ?assertEqual(Capacity, C1, "test bucket capacity") end, @@ -318,11 +320,11 @@ t_collaborative_alloc(_) -> Bucket2 = Bucket#{ rate := ?RATE("400/1s"), initial := 0, - capacity := 600 + burst := 200 }, Cli2 = Cli#{ rate := ?RATE("50"), - capacity := 100, + burst := 50, initial := 100 }, Bucket2#{client := Cli2} @@ -363,11 +365,11 @@ t_burst(_) -> Bucket2 = Bucket#{ rate := ?RATE("200/1s"), initial := 0, - capacity := 200 + burst := 0 }, Cli2 = Cli#{ rate := ?RATE("50/1s"), - capacity := 200, + burst := 150, divisible := true }, Bucket2#{client := Cli2} @@ -401,11 +403,11 @@ t_limit_global_with_unlimit_other(_) -> Bucket2 = Bucket#{ rate := infinity, initial := 0, - capacity := infinity + burst := infinity }, Cli2 = Cli#{ rate := infinity, - capacity := infinity, + burst := infinity, initial := 0 }, Bucket2#{client := Cli2} @@ -414,7 +416,7 @@ t_limit_global_with_unlimit_other(_) -> Case = fun() -> C1 = counters:new(1, []), start_client({b1, Bucket}, ?NOW + 2000, C1, 20), - timer:sleep(2100), + timer:sleep(2200), check_average_rate(C1, 2, 600) end, @@ -432,7 +434,7 @@ t_check_container(_) -> Cfg#{ rate := ?RATE("1000/1s"), initial := 1000, - capacity := 1000 + burst := 0 } end, Case = fun(#{client := Client} = BucketCfg) -> @@ -565,7 +567,7 @@ t_schema_unit(_) -> ?assertMatch({error, _}, M:to_rate("100MB/1")), ?assertMatch({error, _}, M:to_rate("100/10x")), - ?assertEqual({ok, emqx_limiter_schema:infinity_value()}, M:to_capacity("infinity")), + ?assertEqual({ok, infinity}, M:to_capacity("infinity")), ?assertEqual({ok, 100}, M:to_capacity("100")), ?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")), ?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")), @@ -748,17 +750,16 @@ connect(Name, Cfg) -> Limiter. make_limiter_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), Client = #{ - rate => Infinity, + rate => infinity, initial => 0, - capacity => Infinity, + burst => infinity, low_watermark => 0, divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force }, - #{client => Client, rate => Infinity, initial => 0, capacity => Infinity}. + #{client => Client, rate => infinity, initial => 0, burst => infinity}. add_bucket(Cfg) -> add_bucket(?MODULE, Cfg). diff --git a/rel/i18n/emqx_limiter_schema.hocon b/rel/i18n/emqx_limiter_schema.hocon index 3657df694..2874999a5 100644 --- a/rel/i18n/emqx_limiter_schema.hocon +++ b/rel/i18n/emqx_limiter_schema.hocon @@ -33,28 +33,6 @@ emqx_limiter_schema { } } - client_bucket_capacity { - desc { - en: """The capacity of per user.""" - zh: """每个使用者的令牌容量上限""" - } - label: { - en: """Capacity""" - zh: """容量""" - } - } - - capacity { - desc { - en: """The capacity of this token bucket.""" - zh: """该令牌桶的容量""" - } - label: { - en: """Capacity""" - zh: """容量""" - } - } - low_watermark { desc { en: """If the remaining tokens are lower than this value, @@ -152,30 +130,30 @@ Once the limit is reached, new connections will be refused""" } } - message_in { + messages { desc { - en: """The message in limiter. + en: """The messages limiter. This is used to limit the inbound message numbers for this EMQX node Once the limit is reached, the restricted client will be slow down even be hung for a while.""" zh: """流入速率控制器。 这个用来控制当前节点上的消息流入速率,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间""" } label: { - en: """Message In""" + en: """Messages""" zh: """消息流入速率""" } } - bytes_in { + bytes { desc { - en: """The bytes_in limiter. + en: """The bytes limiter. This is used to limit the inbound bytes rate for this EMQX node. Once the limit is reached, the restricted client will be slow down even be hung for a while.""" zh: """流入字节率控制器。 这个是用来控制当前节点上的数据流入的字节率,每条消息将会消耗和其二进制大小等量的令牌,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间""" } label: { - en: """Bytes In""" + en: """Bytes""" zh: """流入字节率""" } } From 2a54d93c7e05851b994396d23214a33a5a600002 Mon Sep 17 00:00:00 2001 From: firest Date: Wed, 12 Apr 2023 17:42:23 +0800 Subject: [PATCH 006/263] chore: update changes --- changes/ce/perf-10376.en.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 changes/ce/perf-10376.en.md diff --git a/changes/ce/perf-10376.en.md b/changes/ce/perf-10376.en.md new file mode 100644 index 000000000..d585ad5b2 --- /dev/null +++ b/changes/ce/perf-10376.en.md @@ -0,0 +1,6 @@ +Simplify the configuration of the limiter feature and optimize some codes +- Rename `message_in` to `messages` +- Rename `bytes_in` to `bytes` +- Use `burst` instead of `capacity` +- Hide non-importance fields +- Optimize limiter instances in different rate settings From 02f8d073f8dc125675bde9cc67bd5b8c8d013a17 Mon Sep 17 00:00:00 2001 From: firest Date: Thu, 13 Apr 2023 11:45:51 +0800 Subject: [PATCH 007/263] test(limiter): fix test errors and make spellcheck happy --- .../emqx_limiter/src/emqx_limiter_schema.erl | 2 +- apps/emqx/test/emqx_channel_SUITE.erl | 9 +++--- apps/emqx/test/emqx_connection_SUITE.erl | 22 +++++++------- apps/emqx/test/emqx_ws_connection_SUITE.erl | 29 +++++++++---------- .../test/emqx_retainer_SUITE.erl | 9 +++--- rel/i18n/emqx_limiter_schema.hocon | 4 +-- 6 files changed, 34 insertions(+), 41 deletions(-) diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl index f59ddc35b..730559f80 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl @@ -437,7 +437,7 @@ importance_of_type(_) -> alias_of_type(messages) -> [message_in]; -alias_of_type(bytess) -> +alias_of_type(bytes) -> [bytes_in]; alias_of_type(_) -> []. diff --git a/apps/emqx/test/emqx_channel_SUITE.erl b/apps/emqx/test/emqx_channel_SUITE.erl index 29f8b1503..eccb5c865 100644 --- a/apps/emqx/test/emqx_channel_SUITE.erl +++ b/apps/emqx/test/emqx_channel_SUITE.erl @@ -162,8 +162,7 @@ limiter_conf() -> Make = fun() -> #{ burst => 0, - rate => infinity, - capacity => infinity + rate => infinity } end, @@ -172,7 +171,7 @@ limiter_conf() -> Acc#{Name => Make()} end, #{}, - [bytes_in, message_in, message_routing, connection, internal] + [bytes, messages, message_routing, connection, internal] ). stats_conf() -> @@ -1258,7 +1257,7 @@ limiter_cfg() -> Client = #{ rate => 5, initial => 0, - capacity => 5, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), @@ -1270,7 +1269,7 @@ limiter_cfg() -> }. bucket_cfg() -> - #{rate => 10, initial => 0, capacity => 10}. + #{rate => 10, initial => 0, burst => 0}. add_bucket() -> emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()). diff --git a/apps/emqx/test/emqx_connection_SUITE.erl b/apps/emqx/test/emqx_connection_SUITE.erl index 21ed45119..f24c1c895 100644 --- a/apps/emqx/test/emqx_connection_SUITE.erl +++ b/apps/emqx/test/emqx_connection_SUITE.erl @@ -427,7 +427,7 @@ t_ensure_rate_limit(_) -> fun(_, Client) -> {pause, 3000, undefined, Client} end ), {ok, State2} = emqx_connection:check_limiter( - [{1000, bytes_in}], + [{1000, bytes}], [], WhenOk, [], @@ -703,31 +703,29 @@ handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St). -define(LIMITER_ID, 'tcp:default'). init_limiter() -> - emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], limiter_cfg()). + emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], limiter_cfg()). limiter_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), Cfg = bucket_cfg(), Client = #{ - rate => Infinity, + rate => infinity, initial => 0, - capacity => Infinity, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force }, - #{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}. + #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}. bucket_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), - #{rate => Infinity, initial => 0, capacity => Infinity}. + #{rate => infinity, initial => 0, burst => 0}. add_bucket() -> Cfg = bucket_cfg(), - emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg), - emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg). + emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg), + emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg). del_bucket() -> - emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in), - emqx_limiter_server:del_bucket(?LIMITER_ID, message_in). + emqx_limiter_server:del_bucket(?LIMITER_ID, bytes), + emqx_limiter_server:del_bucket(?LIMITER_ID, messages). diff --git a/apps/emqx/test/emqx_ws_connection_SUITE.erl b/apps/emqx/test/emqx_ws_connection_SUITE.erl index de8b1c9af..1ae23361e 100644 --- a/apps/emqx/test/emqx_ws_connection_SUITE.erl +++ b/apps/emqx/test/emqx_ws_connection_SUITE.erl @@ -509,16 +509,16 @@ t_handle_timeout_emit_stats(_) -> t_ensure_rate_limit(_) -> {ok, Rate} = emqx_limiter_schema:to_rate("50MB"), Limiter = init_limiter(#{ - bytes_in => bucket_cfg(), - message_in => bucket_cfg(), - client => #{bytes_in => client_cfg(Rate)} + bytes => bucket_cfg(), + messages => bucket_cfg(), + client => #{bytes => client_cfg(Rate)} }), St = st(#{limiter => Limiter}), %% must bigger than value in emqx_ratelimit_SUITE {ok, Need} = emqx_limiter_schema:to_capacity("1GB"), St1 = ?ws_conn:check_limiter( - [{Need, bytes_in}], + [{Need, bytes}], [], fun(_, _, S) -> S end, [], @@ -699,23 +699,21 @@ init_limiter() -> init_limiter(limiter_cfg()). init_limiter(LimiterCfg) -> - emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], LimiterCfg). + emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], LimiterCfg). limiter_cfg() -> Cfg = bucket_cfg(), Client = client_cfg(), - #{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}. + #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}. client_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), - client_cfg(Infinity). + client_cfg(infinity). client_cfg(Rate) -> - Infinity = emqx_limiter_schema:infinity_value(), #{ rate => Rate, initial => 0, - capacity => Infinity, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), @@ -723,14 +721,13 @@ client_cfg(Rate) -> }. bucket_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), - #{rate => Infinity, initial => 0, capacity => Infinity}. + #{rate => infinity, initial => 0, burst => 0}. add_bucket() -> Cfg = bucket_cfg(), - emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg), - emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg). + emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg), + emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg). del_bucket() -> - emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in), - emqx_limiter_server:del_bucket(?LIMITER_ID, message_in). + emqx_limiter_server:del_bucket(?LIMITER_ID, bytes), + emqx_limiter_server:del_bucket(?LIMITER_ID, messages). diff --git a/apps/emqx_retainer/test/emqx_retainer_SUITE.erl b/apps/emqx_retainer/test/emqx_retainer_SUITE.erl index 09d1f77da..c90ec6b2b 100644 --- a/apps/emqx_retainer/test/emqx_retainer_SUITE.erl +++ b/apps/emqx_retainer/test/emqx_retainer_SUITE.erl @@ -758,23 +758,22 @@ with_conf(ConfMod, Case) -> end. make_limiter_cfg(Rate) -> - Infinity = emqx_limiter_schema:infinity_value(), Client = #{ rate => Rate, initial => 0, - capacity => Infinity, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force }, - #{client => Client, rate => Infinity, initial => 0, capacity => Infinity}. + #{client => Client, rate => Rate, initial => 0, burst => 0}. make_limiter_json(Rate) -> Client = #{ <<"rate">> => Rate, <<"initial">> => 0, - <<"capacity">> => <<"infinity">>, + <<"burst">> => <<"0">>, <<"low_watermark">> => 0, <<"divisible">> => <<"false">>, <<"max_retry_time">> => <<"5s">>, @@ -784,5 +783,5 @@ make_limiter_json(Rate) -> <<"client">> => Client, <<"rate">> => <<"infinity">>, <<"initial">> => 0, - <<"capacity">> => <<"infinity">> + <<"burst">> => <<"0">> }. diff --git a/rel/i18n/emqx_limiter_schema.hocon b/rel/i18n/emqx_limiter_schema.hocon index 2874999a5..37eb4ee1e 100644 --- a/rel/i18n/emqx_limiter_schema.hocon +++ b/rel/i18n/emqx_limiter_schema.hocon @@ -132,7 +132,7 @@ Once the limit is reached, new connections will be refused""" messages { desc { - en: """The messages limiter. + en: """The `messages` limiter. This is used to limit the inbound message numbers for this EMQX node Once the limit is reached, the restricted client will be slow down even be hung for a while.""" zh: """流入速率控制器。 @@ -146,7 +146,7 @@ Once the limit is reached, the restricted client will be slow down even be hung bytes { desc { - en: """The bytes limiter. + en: """The `bytes` limiter. This is used to limit the inbound bytes rate for this EMQX node. Once the limit is reached, the restricted client will be slow down even be hung for a while.""" zh: """流入字节率控制器。 From e9e0ae7f0a55eb2ea31a10be2ff12a777381adec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9F=90=E6=96=87?= Date: Mon, 17 Apr 2023 17:23:39 +0800 Subject: [PATCH 008/263] chore: When matching authz's and/or rules, check the simple ones first to improve efficiency --- apps/emqx/etc/ssl_dist.conf | 2 +- apps/emqx_authz/src/emqx_authz_rule.erl | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/emqx/etc/ssl_dist.conf b/apps/emqx/etc/ssl_dist.conf index af1c7506d..b4c16e2cc 100644 --- a/apps/emqx/etc/ssl_dist.conf +++ b/apps/emqx/etc/ssl_dist.conf @@ -1,4 +1,4 @@ -%% This additional config file is used when the config 'cluster.proto_dis' in emqx.conf is set to 'inet_tls'. +%% This additional config file is used when the config 'cluster.proto_dist' in emqx.conf is set to 'inet_tls'. %% Which means the EMQX nodes will connect to each other over TLS. %% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html diff --git a/apps/emqx_authz/src/emqx_authz_rule.erl b/apps/emqx_authz/src/emqx_authz_rule.erl index 306ca9433..bdd0904f7 100644 --- a/apps/emqx_authz/src/emqx_authz_rule.erl +++ b/apps/emqx_authz/src/emqx_authz_rule.erl @@ -185,7 +185,7 @@ match_who(#{peerhost := IpAddress}, {ipaddrs, CIDRs}) -> match_who(ClientInfo, {'and', Principals}) when is_list(Principals) -> lists:foldl( fun(Principal, Permission) -> - match_who(ClientInfo, Principal) andalso Permission + Permission andalso match_who(ClientInfo, Principal) end, true, Principals @@ -193,7 +193,7 @@ match_who(ClientInfo, {'and', Principals}) when is_list(Principals) -> match_who(ClientInfo, {'or', Principals}) when is_list(Principals) -> lists:foldl( fun(Principal, Permission) -> - match_who(ClientInfo, Principal) orelse Permission + Permission orelse match_who(ClientInfo, Principal) end, false, Principals From 549283ae75941a5ec9d361fa32d0a10f9fe8dd99 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Mon, 17 Apr 2023 17:35:50 +0800 Subject: [PATCH 009/263] chore: update changes/ce/fix-10410.en.md Co-authored-by: Zaiming (Stone) Shi --- changes/ce/fix-10410.en.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/changes/ce/fix-10410.en.md b/changes/ce/fix-10410.en.md index 48b55ea31..aa219c96e 100644 --- a/changes/ce/fix-10410.en.md +++ b/changes/ce/fix-10410.en.md @@ -1,2 +1,2 @@ -Fix EMQX starting failed once any gateways configured in emqx.conf. -This issue was first introduced in v5.0.22 via [#10278](https://github.com/emqx/emqx/pull/10278). +Fix config check failed when gateways are configured in emqx.conf. +This issue was first introduced in v5.0.22 via [#10278](https://github.com/emqx/emqx/pull/10278), the boot-time config check was missing. From 37f42a486c990f6151efa51dea2e7c2aa2a8a283 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Mon, 17 Apr 2023 12:03:13 +0200 Subject: [PATCH 010/263] test: improve proper test so it generates more cases --- apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl index 2bce5a1b4..4dc9280da 100644 --- a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl +++ b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl @@ -959,7 +959,7 @@ prop_format_date_fun() -> Args1 = [<<"second">>, <<"+07:00">>, <<"%m--%d--%y---%H:%M:%S%Z">>], ?FORALL( S, - erlang:system_time(second), + range(0, 4000000000), S == apply_func( date_to_unix_ts, @@ -975,7 +975,7 @@ prop_format_date_fun() -> Args2 = [<<"millisecond">>, <<"+04:00">>, <<"--%m--%d--%y---%H:%M:%S%Z">>], ?FORALL( S, - erlang:system_time(millisecond), + range(0, 4000000000), S == apply_func( date_to_unix_ts, @@ -991,7 +991,7 @@ prop_format_date_fun() -> Args = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>], ?FORALL( S, - erlang:system_time(second), + range(0, 4000000000), S == apply_func( date_to_unix_ts, @@ -1009,7 +1009,7 @@ prop_format_date_fun() -> ArgsOffset = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>], ?FORALL( S, - erlang:system_time(second), + range(0, 4000000000), S == apply_func( date_to_unix_ts, From 334058eeec5532aedba75a1f540f19672f32f736 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Wed, 5 Apr 2023 12:23:19 +0200 Subject: [PATCH 011/263] build: add a script to split en and zh descriptions --- scripts/split-i18n-files.escript | 78 ++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100755 scripts/split-i18n-files.escript diff --git a/scripts/split-i18n-files.escript b/scripts/split-i18n-files.escript new file mode 100755 index 000000000..5910db667 --- /dev/null +++ b/scripts/split-i18n-files.escript @@ -0,0 +1,78 @@ +#!/usr/bin/env escript + +%% This script is for one-time use. +%% will be deleted after the migration is done. + +-mode(compile). + +main([]) -> + %% we need to parse hocon + %% so we'll just add all compiled libs to path + code:add_pathsz(find_ebin_paths("_build/default/lib/*")), + Files = filelib:wildcard("rel/i18n/*.hocon"), + ok = lists:foreach(fun split_file/1, Files), + ok. + +find_ebin_paths(DirPattern) -> + LibDirs = filelib:wildcard(DirPattern), + lists:filtermap(fun add_ebin/1, LibDirs). + +add_ebin(Dir) -> + EbinDir = filename:join(Dir, "ebin"), + case filelib:is_dir(EbinDir) of + true -> {true, EbinDir}; + false -> false + end. + +split_file(Path) -> + {ok, DescMap} = hocon:load(Path), + [{Module, Descs}] = maps:to_list(DescMap), + ok = split(Path, Module, <<"en">>, Descs), + ok = split(Path, Module, <<"zh">>, Descs), + ok. + +split(Path, Module, Lang, Fields) when is_map(Fields) -> + split(Path, Module, Lang, maps:to_list(Fields)); +split(Path, Module, Lang, Fields) when is_list(Fields) -> + Split = lists:map(fun({Name, Desc})-> do_split(Path, Name, Lang, Desc) end, Fields), + IoData = [Module, " {\n\n", Split, "}\n"], + %% assert it's a valid HOCON object + {ok, _} = hocon:binary(IoData), + %io:format(user, "~s", [IoData]). + WritePath = case Lang of + <<"en">> -> + Path; + <<"zh">> -> + rename(Path, "zh") + end, + ok = filelib:ensure_dir(WritePath), + ok = file:write_file(WritePath, IoData), + ok. + +rename(FilePath, Lang) -> + Dir = filename:dirname(FilePath), + BaseName = filename:basename(FilePath), + filename:join([Dir, Lang, BaseName]). + +do_split(Path, Name, Lang, #{<<"desc">> := Desc} = D) -> + try + Label = maps:get(<<"label">>, D, #{}), + DescL = maps:get(Lang, Desc), + LabelL = maps:get(Lang, Label, undefined), + [fmt([Name, ".desc:\n"], DescL), + fmt([Name, ".label:\n"], LabelL) + ] + catch + C : E : S-> + erlang:raise(C, {Path, Name, E}, S) + end. + + +tq() -> + "\"\"\"". + +fmt(_Key, undefined) -> + []; +fmt(Key, Content) -> + [Key, tq(), Content, tq(), "\n\n"]. + From b9a43ead393403b5b66aa75864bce5d17e08a3d1 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Mon, 17 Apr 2023 13:35:33 +0200 Subject: [PATCH 012/263] refactor: Split i18n files --- rel/i18n/emqx_authn_api.hocon | 247 +- rel/i18n/emqx_authn_http.hocon | 108 +- rel/i18n/emqx_authn_jwt.hocon | 297 +- rel/i18n/emqx_authn_mnesia.hocon | 23 +- rel/i18n/emqx_authn_mongodb.hocon | 106 +- rel/i18n/emqx_authn_mysql.hocon | 40 +- rel/i18n/emqx_authn_pgsql.hocon | 25 +- rel/i18n/emqx_authn_redis.hocon | 41 +- rel/i18n/emqx_authn_schema.hocon | 332 +- rel/i18n/emqx_authn_user_import_api.hocon | 16 +- rel/i18n/emqx_authz_api_cache.hocon | 10 +- rel/i18n/emqx_authz_api_mnesia.hocon | 218 +- rel/i18n/emqx_authz_api_schema.hocon | 227 +- rel/i18n/emqx_authz_api_settings.hocon | 18 +- rel/i18n/emqx_authz_api_sources.hocon | 136 +- rel/i18n/emqx_authz_schema.hocon | 778 ++-- rel/i18n/emqx_auto_subscribe_api.hocon | 25 +- rel/i18n/emqx_auto_subscribe_schema.hocon | 107 +- rel/i18n/emqx_bridge_api.hocon | 242 +- rel/i18n/emqx_bridge_kafka.hocon | 1020 ++--- rel/i18n/emqx_bridge_mqtt_schema.hocon | 47 +- rel/i18n/emqx_bridge_schema.hocon | 420 +- rel/i18n/emqx_bridge_webhook_schema.hocon | 224 +- rel/i18n/emqx_coap_api.hocon | 65 +- rel/i18n/emqx_coap_schema.hocon | 82 +- rel/i18n/emqx_conf_schema.hocon | 2027 +++------ rel/i18n/emqx_connector_api.hocon | 110 +- rel/i18n/emqx_connector_http.hocon | 183 +- rel/i18n/emqx_connector_ldap.hocon | 44 +- rel/i18n/emqx_connector_mongo.hocon | 375 +- rel/i18n/emqx_connector_mqtt.hocon | 46 +- rel/i18n/emqx_connector_mqtt_schema.hocon | 429 +- rel/i18n/emqx_connector_mysql.hocon | 17 +- rel/i18n/emqx_connector_pgsql.hocon | 17 +- rel/i18n/emqx_connector_redis.hocon | 110 +- rel/i18n/emqx_connector_schema_lib.hocon | 105 +- rel/i18n/emqx_dashboard_api.hocon | 168 +- rel/i18n/emqx_dashboard_schema.hocon | 346 +- rel/i18n/emqx_delayed_api.hocon | 184 +- rel/i18n/emqx_ee_bridge_cassa.hocon | 97 +- rel/i18n/emqx_ee_bridge_clickhouse.hocon | 110 +- rel/i18n/emqx_ee_bridge_dynamo.hocon | 93 +- rel/i18n/emqx_ee_bridge_gcp_pubsub.hocon | 191 +- rel/i18n/emqx_ee_bridge_hstreamdb.hocon | 127 +- rel/i18n/emqx_ee_bridge_influxdb.hocon | 105 +- rel/i18n/emqx_ee_bridge_mongodb.hocon | 137 +- rel/i18n/emqx_ee_bridge_mysql.hocon | 93 +- rel/i18n/emqx_ee_bridge_pgsql.hocon | 93 +- rel/i18n/emqx_ee_bridge_redis.hocon | 103 +- rel/i18n/emqx_ee_bridge_rocketmq.hocon | 93 +- rel/i18n/emqx_ee_bridge_sqlserver.hocon | 112 +- rel/i18n/emqx_ee_bridge_tdengine.hocon | 93 +- rel/i18n/emqx_ee_connector_cassa.hocon | 33 +- rel/i18n/emqx_ee_connector_clickhouse.hocon | 30 +- rel/i18n/emqx_ee_connector_dynamo.hocon | 15 +- rel/i18n/emqx_ee_connector_hstreamdb.hocon | 111 +- rel/i18n/emqx_ee_connector_influxdb.hocon | 177 +- rel/i18n/emqx_ee_connector_rocketmq.hocon | 79 +- rel/i18n/emqx_ee_connector_sqlserver.hocon | 17 +- rel/i18n/emqx_ee_connector_tdengine.hocon | 17 +- .../emqx_ee_schema_registry_http_api.hocon | 94 +- rel/i18n/emqx_ee_schema_registry_schema.hocon | 107 +- rel/i18n/emqx_exhook_api.hocon | 205 +- rel/i18n/emqx_exhook_schema.hocon | 118 +- rel/i18n/emqx_exproto_schema.hocon | 60 +- rel/i18n/emqx_gateway_api.hocon | 197 +- rel/i18n/emqx_gateway_api_authn.hocon | 114 +- rel/i18n/emqx_gateway_api_clients.hocon | 679 +-- rel/i18n/emqx_gateway_api_listeners.hocon | 173 +- rel/i18n/emqx_gateway_schema.hocon | 326 +- rel/i18n/emqx_license_http_api.hocon | 32 +- rel/i18n/emqx_license_schema.hocon | 78 +- rel/i18n/emqx_limiter_schema.hocon | 238 +- rel/i18n/emqx_lwm2m_api.hocon | 65 +- rel/i18n/emqx_lwm2m_schema.hocon | 145 +- rel/i18n/emqx_mgmt_api_alarms.hocon | 95 +- rel/i18n/emqx_mgmt_api_banned.hocon | 136 +- rel/i18n/emqx_mgmt_api_key_schema.hocon | 36 +- rel/i18n/emqx_mgmt_api_publish.hocon | 185 +- rel/i18n/emqx_mgmt_api_status.hocon | 61 +- rel/i18n/emqx_modules_schema.hocon | 93 +- rel/i18n/emqx_mqttsn_schema.hocon | 73 +- rel/i18n/emqx_plugins_schema.hocon | 126 +- rel/i18n/emqx_prometheus_schema.hocon | 118 +- rel/i18n/emqx_psk_schema.hocon | 61 +- rel/i18n/emqx_resource_schema.hocon | 282 +- rel/i18n/emqx_retainer_api.hocon | 160 +- rel/i18n/emqx_retainer_schema.hocon | 138 +- rel/i18n/emqx_rewrite_api.hocon | 25 +- rel/i18n/emqx_rule_api_schema.hocon | 945 ++-- rel/i18n/emqx_rule_engine_api.hocon | 237 +- rel/i18n/emqx_rule_engine_schema.hocon | 514 +-- rel/i18n/emqx_schema.hocon | 4048 ++++++----------- rel/i18n/emqx_slow_subs_api.hocon | 72 +- rel/i18n/emqx_slow_subs_schema.hocon | 40 +- rel/i18n/emqx_statsd_api.hocon | 17 +- rel/i18n/emqx_statsd_schema.hocon | 73 +- rel/i18n/emqx_stomp_schema.hocon | 38 +- rel/i18n/emqx_telemetry_api.hocon | 137 +- rel/i18n/emqx_topic_metrics_api.hocon | 273 +- rel/i18n/zh/emqx_authn_api.hocon | 96 + rel/i18n/zh/emqx_authn_http.hocon | 45 + rel/i18n/zh/emqx_authn_jwt.hocon | 118 + rel/i18n/zh/emqx_authn_mnesia.hocon | 12 + rel/i18n/zh/emqx_authn_mongodb.hocon | 45 + rel/i18n/zh/emqx_authn_mysql.hocon | 18 + rel/i18n/zh/emqx_authn_pgsql.hocon | 12 + rel/i18n/zh/emqx_authn_redis.hocon | 18 + rel/i18n/zh/emqx_authn_schema.hocon | 135 + rel/i18n/zh/emqx_authn_user_import_api.hocon | 9 + rel/i18n/zh/emqx_authz_api_cache.hocon | 6 + rel/i18n/zh/emqx_authz_api_mnesia.hocon | 87 + rel/i18n/zh/emqx_authz_api_schema.hocon | 90 + rel/i18n/zh/emqx_authz_api_settings.hocon | 9 + rel/i18n/zh/emqx_authz_api_sources.hocon | 48 + rel/i18n/zh/emqx_authz_schema.hocon | 285 ++ rel/i18n/zh/emqx_auto_subscribe_api.hocon | 12 + rel/i18n/zh/emqx_auto_subscribe_schema.hocon | 48 + rel/i18n/zh/emqx_bridge_api.hocon | 100 + rel/i18n/zh/emqx_bridge_kafka.hocon | 354 ++ rel/i18n/zh/emqx_bridge_mqtt_schema.hocon | 21 + rel/i18n/zh/emqx_bridge_schema.hocon | 158 + rel/i18n/zh/emqx_bridge_webhook_schema.hocon | 87 + rel/i18n/zh/emqx_coap_api.hocon | 27 + rel/i18n/zh/emqx_coap_schema.hocon | 37 + rel/i18n/zh/emqx_conf_schema.hocon | 774 ++++ rel/i18n/zh/emqx_connector_api.hocon | 46 + rel/i18n/zh/emqx_connector_http.hocon | 77 + rel/i18n/zh/emqx_connector_ldap.hocon | 21 + rel/i18n/zh/emqx_connector_mongo.hocon | 152 + rel/i18n/zh/emqx_connector_mqtt.hocon | 21 + rel/i18n/zh/emqx_connector_mqtt_schema.hocon | 170 + rel/i18n/zh/emqx_connector_mysql.hocon | 11 + rel/i18n/zh/emqx_connector_pgsql.hocon | 11 + rel/i18n/zh/emqx_connector_redis.hocon | 50 + rel/i18n/zh/emqx_connector_schema_lib.hocon | 45 + rel/i18n/zh/emqx_dashboard_api.hocon | 66 + rel/i18n/zh/emqx_dashboard_schema.hocon | 130 + rel/i18n/zh/emqx_delayed_api.hocon | 72 + rel/i18n/zh/emqx_ee_bridge_cassa.hocon | 40 + rel/i18n/zh/emqx_ee_bridge_clickhouse.hocon | 46 + rel/i18n/zh/emqx_ee_bridge_dynamo.hocon | 40 + rel/i18n/zh/emqx_ee_bridge_gcp_pubsub.hocon | 77 + rel/i18n/zh/emqx_ee_bridge_hstreamdb.hocon | 52 + rel/i18n/zh/emqx_ee_bridge_influxdb.hocon | 47 + rel/i18n/zh/emqx_ee_bridge_mongodb.hocon | 57 + rel/i18n/zh/emqx_ee_bridge_mysql.hocon | 40 + rel/i18n/zh/emqx_ee_bridge_pgsql.hocon | 40 + rel/i18n/zh/emqx_ee_bridge_redis.hocon | 41 + rel/i18n/zh/emqx_ee_bridge_rocketmq.hocon | 40 + rel/i18n/zh/emqx_ee_bridge_sqlserver.hocon | 46 + rel/i18n/zh/emqx_ee_bridge_tdengine.hocon | 40 + rel/i18n/zh/emqx_ee_connector_cassa.hocon | 17 + .../zh/emqx_ee_connector_clickhouse.hocon | 15 + rel/i18n/zh/emqx_ee_connector_dynamo.hocon | 9 + rel/i18n/zh/emqx_ee_connector_hstreamdb.hocon | 45 + rel/i18n/zh/emqx_ee_connector_influxdb.hocon | 71 + rel/i18n/zh/emqx_ee_connector_rocketmq.hocon | 35 + rel/i18n/zh/emqx_ee_connector_sqlserver.hocon | 11 + rel/i18n/zh/emqx_ee_connector_tdengine.hocon | 11 + .../zh/emqx_ee_schema_registry_http_api.hocon | 39 + .../zh/emqx_ee_schema_registry_schema.hocon | 45 + rel/i18n/zh/emqx_exhook_api.hocon | 81 + rel/i18n/zh/emqx_exhook_schema.hocon | 43 + rel/i18n/zh/emqx_exproto_schema.hocon | 26 + rel/i18n/zh/emqx_gateway_api.hocon | 73 + rel/i18n/zh/emqx_gateway_api_authn.hocon | 45 + rel/i18n/zh/emqx_gateway_api_clients.hocon | 207 + rel/i18n/zh/emqx_gateway_api_listeners.hocon | 65 + rel/i18n/zh/emqx_gateway_schema.hocon | 112 + rel/i18n/zh/emqx_license_http_api.hocon | 15 + rel/i18n/zh/emqx_license_schema.hocon | 29 + rel/i18n/zh/emqx_limiter_schema.hocon | 89 + rel/i18n/zh/emqx_lwm2m_api.hocon | 27 + rel/i18n/zh/emqx_lwm2m_schema.hocon | 56 + rel/i18n/zh/emqx_mgmt_api_alarms.hocon | 37 + rel/i18n/zh/emqx_mgmt_api_banned.hocon | 54 + rel/i18n/zh/emqx_mgmt_api_key_schema.hocon | 19 + rel/i18n/zh/emqx_mgmt_api_publish.hocon | 81 + rel/i18n/zh/emqx_mgmt_api_status.hocon | 22 + rel/i18n/zh/emqx_modules_schema.hocon | 45 + rel/i18n/zh/emqx_mqttsn_schema.hocon | 30 + rel/i18n/zh/emqx_plugins_schema.hocon | 46 + rel/i18n/zh/emqx_prometheus_schema.hocon | 47 + rel/i18n/zh/emqx_psk_schema.hocon | 28 + rel/i18n/zh/emqx_resource_schema.hocon | 112 + rel/i18n/zh/emqx_retainer_api.hocon | 63 + rel/i18n/zh/emqx_retainer_schema.hocon | 46 + rel/i18n/zh/emqx_rewrite_api.hocon | 12 + rel/i18n/zh/emqx_rule_api_schema.hocon | 381 ++ rel/i18n/zh/emqx_rule_engine_api.hocon | 93 + rel/i18n/zh/emqx_rule_engine_schema.hocon | 184 + rel/i18n/zh/emqx_schema.hocon | 1473 ++++++ rel/i18n/zh/emqx_slow_subs_api.hocon | 30 + rel/i18n/zh/emqx_slow_subs_schema.hocon | 18 + rel/i18n/zh/emqx_statsd_api.hocon | 9 + rel/i18n/zh/emqx_statsd_schema.hocon | 30 + rel/i18n/zh/emqx_stomp_schema.hocon | 15 + rel/i18n/zh/emqx_telemetry_api.hocon | 54 + rel/i18n/zh/emqx_topic_metrics_api.hocon | 105 + 200 files changed, 15283 insertions(+), 14601 deletions(-) create mode 100644 rel/i18n/zh/emqx_authn_api.hocon create mode 100644 rel/i18n/zh/emqx_authn_http.hocon create mode 100644 rel/i18n/zh/emqx_authn_jwt.hocon create mode 100644 rel/i18n/zh/emqx_authn_mnesia.hocon create mode 100644 rel/i18n/zh/emqx_authn_mongodb.hocon create mode 100644 rel/i18n/zh/emqx_authn_mysql.hocon create mode 100644 rel/i18n/zh/emqx_authn_pgsql.hocon create mode 100644 rel/i18n/zh/emqx_authn_redis.hocon create mode 100644 rel/i18n/zh/emqx_authn_schema.hocon create mode 100644 rel/i18n/zh/emqx_authn_user_import_api.hocon create mode 100644 rel/i18n/zh/emqx_authz_api_cache.hocon create mode 100644 rel/i18n/zh/emqx_authz_api_mnesia.hocon create mode 100644 rel/i18n/zh/emqx_authz_api_schema.hocon create mode 100644 rel/i18n/zh/emqx_authz_api_settings.hocon create mode 100644 rel/i18n/zh/emqx_authz_api_sources.hocon create mode 100644 rel/i18n/zh/emqx_authz_schema.hocon create mode 100644 rel/i18n/zh/emqx_auto_subscribe_api.hocon create mode 100644 rel/i18n/zh/emqx_auto_subscribe_schema.hocon create mode 100644 rel/i18n/zh/emqx_bridge_api.hocon create mode 100644 rel/i18n/zh/emqx_bridge_kafka.hocon create mode 100644 rel/i18n/zh/emqx_bridge_mqtt_schema.hocon create mode 100644 rel/i18n/zh/emqx_bridge_schema.hocon create mode 100644 rel/i18n/zh/emqx_bridge_webhook_schema.hocon create mode 100644 rel/i18n/zh/emqx_coap_api.hocon create mode 100644 rel/i18n/zh/emqx_coap_schema.hocon create mode 100644 rel/i18n/zh/emqx_conf_schema.hocon create mode 100644 rel/i18n/zh/emqx_connector_api.hocon create mode 100644 rel/i18n/zh/emqx_connector_http.hocon create mode 100644 rel/i18n/zh/emqx_connector_ldap.hocon create mode 100644 rel/i18n/zh/emqx_connector_mongo.hocon create mode 100644 rel/i18n/zh/emqx_connector_mqtt.hocon create mode 100644 rel/i18n/zh/emqx_connector_mqtt_schema.hocon create mode 100644 rel/i18n/zh/emqx_connector_mysql.hocon create mode 100644 rel/i18n/zh/emqx_connector_pgsql.hocon create mode 100644 rel/i18n/zh/emqx_connector_redis.hocon create mode 100644 rel/i18n/zh/emqx_connector_schema_lib.hocon create mode 100644 rel/i18n/zh/emqx_dashboard_api.hocon create mode 100644 rel/i18n/zh/emqx_dashboard_schema.hocon create mode 100644 rel/i18n/zh/emqx_delayed_api.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_cassa.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_clickhouse.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_dynamo.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_gcp_pubsub.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_hstreamdb.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_influxdb.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_mongodb.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_mysql.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_pgsql.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_redis.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_rocketmq.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_sqlserver.hocon create mode 100644 rel/i18n/zh/emqx_ee_bridge_tdengine.hocon create mode 100644 rel/i18n/zh/emqx_ee_connector_cassa.hocon create mode 100644 rel/i18n/zh/emqx_ee_connector_clickhouse.hocon create mode 100644 rel/i18n/zh/emqx_ee_connector_dynamo.hocon create mode 100644 rel/i18n/zh/emqx_ee_connector_hstreamdb.hocon create mode 100644 rel/i18n/zh/emqx_ee_connector_influxdb.hocon create mode 100644 rel/i18n/zh/emqx_ee_connector_rocketmq.hocon create mode 100644 rel/i18n/zh/emqx_ee_connector_sqlserver.hocon create mode 100644 rel/i18n/zh/emqx_ee_connector_tdengine.hocon create mode 100644 rel/i18n/zh/emqx_ee_schema_registry_http_api.hocon create mode 100644 rel/i18n/zh/emqx_ee_schema_registry_schema.hocon create mode 100644 rel/i18n/zh/emqx_exhook_api.hocon create mode 100644 rel/i18n/zh/emqx_exhook_schema.hocon create mode 100644 rel/i18n/zh/emqx_exproto_schema.hocon create mode 100644 rel/i18n/zh/emqx_gateway_api.hocon create mode 100644 rel/i18n/zh/emqx_gateway_api_authn.hocon create mode 100644 rel/i18n/zh/emqx_gateway_api_clients.hocon create mode 100644 rel/i18n/zh/emqx_gateway_api_listeners.hocon create mode 100644 rel/i18n/zh/emqx_gateway_schema.hocon create mode 100644 rel/i18n/zh/emqx_license_http_api.hocon create mode 100644 rel/i18n/zh/emqx_license_schema.hocon create mode 100644 rel/i18n/zh/emqx_limiter_schema.hocon create mode 100644 rel/i18n/zh/emqx_lwm2m_api.hocon create mode 100644 rel/i18n/zh/emqx_lwm2m_schema.hocon create mode 100644 rel/i18n/zh/emqx_mgmt_api_alarms.hocon create mode 100644 rel/i18n/zh/emqx_mgmt_api_banned.hocon create mode 100644 rel/i18n/zh/emqx_mgmt_api_key_schema.hocon create mode 100644 rel/i18n/zh/emqx_mgmt_api_publish.hocon create mode 100644 rel/i18n/zh/emqx_mgmt_api_status.hocon create mode 100644 rel/i18n/zh/emqx_modules_schema.hocon create mode 100644 rel/i18n/zh/emqx_mqttsn_schema.hocon create mode 100644 rel/i18n/zh/emqx_plugins_schema.hocon create mode 100644 rel/i18n/zh/emqx_prometheus_schema.hocon create mode 100644 rel/i18n/zh/emqx_psk_schema.hocon create mode 100644 rel/i18n/zh/emqx_resource_schema.hocon create mode 100644 rel/i18n/zh/emqx_retainer_api.hocon create mode 100644 rel/i18n/zh/emqx_retainer_schema.hocon create mode 100644 rel/i18n/zh/emqx_rewrite_api.hocon create mode 100644 rel/i18n/zh/emqx_rule_api_schema.hocon create mode 100644 rel/i18n/zh/emqx_rule_engine_api.hocon create mode 100644 rel/i18n/zh/emqx_rule_engine_schema.hocon create mode 100644 rel/i18n/zh/emqx_schema.hocon create mode 100644 rel/i18n/zh/emqx_slow_subs_api.hocon create mode 100644 rel/i18n/zh/emqx_slow_subs_schema.hocon create mode 100644 rel/i18n/zh/emqx_statsd_api.hocon create mode 100644 rel/i18n/zh/emqx_statsd_schema.hocon create mode 100644 rel/i18n/zh/emqx_stomp_schema.hocon create mode 100644 rel/i18n/zh/emqx_telemetry_api.hocon create mode 100644 rel/i18n/zh/emqx_topic_metrics_api.hocon diff --git a/rel/i18n/emqx_authn_api.hocon b/rel/i18n/emqx_authn_api.hocon index a068edee2..07f9c6c3e 100644 --- a/rel/i18n/emqx_authn_api.hocon +++ b/rel/i18n/emqx_authn_api.hocon @@ -1,217 +1,96 @@ emqx_authn_api { - authentication_get { - desc { - en: """List authenticators for global authentication.""" - zh: """列出全局认证链上的认证器。""" - } - } +authentication_get.desc: +"""List authenticators for global authentication.""" - authentication_post { - desc { - en: """Create authenticator for global authentication.""" - zh: """为全局认证链创建认证器。""" - } - } +authentication_id_delete.desc: +"""Delete authenticator from global authentication chain.""" - authentication_id_get { - desc { - en: """Get authenticator from global authentication chain.""" - zh: """获取全局认证链上的指定认证器。""" - } - } +authentication_id_get.desc: +"""Get authenticator from global authentication chain.""" - authentication_id_put { - desc { - en: """Update authenticator from global authentication chain.""" - zh: """更新全局认证链上的指定认证器。""" - } - } +authentication_id_position_put.desc: +"""Move authenticator in global authentication chain.""" - authentication_id_delete { - desc { - en: """Delete authenticator from global authentication chain.""" - zh: """删除全局认证链上的指定认证器。""" - } - } +authentication_id_put.desc: +"""Update authenticator from global authentication chain.""" - authentication_id_status_get { - desc { - en: """Get authenticator status from global authentication chain.""" - zh: """获取全局认证链上指定认证器的状态。""" - } - } +authentication_id_status_get.desc: +"""Get authenticator status from global authentication chain.""" - listeners_listener_id_authentication_get { - desc { - en: """List authenticators for listener authentication.""" - zh: """列出监听器认证链上的认证器。""" - } - } +authentication_id_users_get.desc: +"""List users in authenticator in global authentication chain.""" - listeners_listener_id_authentication_post { - desc { - en: """Create authenticator for listener authentication.""" - zh: """在监听器认证链上创建认证器。""" - } - } +authentication_id_users_post.desc: +"""Create users for authenticator in global authentication chain.""" - listeners_listener_id_authentication_id_get { - desc { - en: """Get authenticator from listener authentication chain.""" - zh: """获取监听器认证链上的指定认证器。""" - } - } +authentication_id_users_user_id_delete.desc: +"""Delete user in authenticator in global authentication chain.""" - listeners_listener_id_authentication_id_put { - desc { - en: """Update authenticator from listener authentication chain.""" - zh: """更新监听器认证链上的指定认证器。""" - } - } +authentication_id_users_user_id_get.desc: +"""Get user from authenticator in global authentication chain.""" - listeners_listener_id_authentication_id_delete { - desc { - en: """Delete authenticator from listener authentication chain.""" - zh: """删除监听器认证链上的指定认证器。""" - } - } +authentication_id_users_user_id_put.desc: +"""Update user in authenticator in global authentication chain.""" - listeners_listener_id_authentication_id_status_get { - desc { - en: """Get authenticator status from listener authentication chain.""" - zh: """获取监听器认证链上指定认证器的状态。""" - } - } +authentication_post.desc: +"""Create authenticator for global authentication.""" - authentication_id_position_put { - desc { - en: """Move authenticator in global authentication chain.""" - zh: """更改全局认证链上指定认证器的顺序。""" - } - } +is_superuser.desc: +"""Is superuser""" - listeners_listener_id_authentication_id_position_put { - desc { - en: """Move authenticator in listener authentication chain.""" - zh: """更改监听器认证链上指定认证器的顺序。""" - } - } +like_user_id.desc: +"""Fuzzy search user_id (username or clientid).""" - authentication_id_users_post { - desc { - en: """Create users for authenticator in global authentication chain.""" - zh: """为全局认证链上的指定认证器创建用户数据。""" - } - } +like_user_id.label: +"""like_user_id""" - authentication_id_users_get { - desc { - en: """List users in authenticator in global authentication chain.""" - zh: """获取全局认证链上指定认证器中的用户数据。""" - } - } +listeners_listener_id_authentication_get.desc: +"""List authenticators for listener authentication.""" - listeners_listener_id_authentication_id_users_post { - desc { - en: """Create users for authenticator in listener authentication chain.""" - zh: """为监听器认证链上的指定认证器创建用户数据。""" - } - } +listeners_listener_id_authentication_id_delete.desc: +"""Delete authenticator from listener authentication chain.""" - listeners_listener_id_authentication_id_users_get { - desc { - en: """List users in authenticator in listener authentication chain.""" - zh: """列出监听器认证链上指定认证器中的用户数据。""" - } - } +listeners_listener_id_authentication_id_get.desc: +"""Get authenticator from listener authentication chain.""" - authentication_id_users_user_id_get { - desc { - en: """Get user from authenticator in global authentication chain.""" - zh: """获取全局认证链上指定认证器中的指定用户数据。""" - } - } +listeners_listener_id_authentication_id_position_put.desc: +"""Move authenticator in listener authentication chain.""" - authentication_id_users_user_id_put { - desc { - en: """Update user in authenticator in global authentication chain.""" - zh: """更新全局认证链上指定认证器中的指定用户数据。""" - } - } +listeners_listener_id_authentication_id_put.desc: +"""Update authenticator from listener authentication chain.""" - authentication_id_users_user_id_delete { - desc { - en: """Delete user in authenticator in global authentication chain.""" - zh: """删除全局认证链上指定认证器中的指定用户数据。""" - } - } +listeners_listener_id_authentication_id_status_get.desc: +"""Get authenticator status from listener authentication chain.""" - listeners_listener_id_authentication_id_users_user_id_get { - desc { - en: """Get user from authenticator in listener authentication chain.""" - zh: """获取监听器认证链上指定认证器中的指定用户数据。""" - } - } +listeners_listener_id_authentication_id_users_get.desc: +"""List users in authenticator in listener authentication chain.""" - listeners_listener_id_authentication_id_users_user_id_put { - desc { - en: """Update user in authenticator in listener authentication chain.""" - zh: """更新监听器认证链上指定认证器中的指定用户数据。""" - } - } +listeners_listener_id_authentication_id_users_post.desc: +"""Create users for authenticator in listener authentication chain.""" - listeners_listener_id_authentication_id_users_user_id_delete { - desc { - en: """Delete user in authenticator in listener authentication chain.""" - zh: """删除监听器认证链上指定认证器中的指定用户数据。""" - } - } +listeners_listener_id_authentication_id_users_user_id_delete.desc: +"""Delete user in authenticator in listener authentication chain.""" - param_auth_id { - desc { - en: """Authenticator ID.""" - zh: """认证器 ID。""" - } - } +listeners_listener_id_authentication_id_users_user_id_get.desc: +"""Get user from authenticator in listener authentication chain.""" - param_listener_id { - desc { - en: """Listener ID.""" - zh: """监听器 ID。""" - } - } +listeners_listener_id_authentication_id_users_user_id_put.desc: +"""Update user in authenticator in listener authentication chain.""" - param_user_id { - desc { - en: """User ID.""" - zh: """用户 ID。""" - } - } +listeners_listener_id_authentication_post.desc: +"""Create authenticator for listener authentication.""" - param_position { - desc { - en: """Position of authenticator in chain. Possible values are 'front', 'rear', 'before:{other_authenticator}', 'after:{other_authenticator}'.""" - zh: """认证者在链中的位置。可能的值是 'front', 'rear', 'before:{other_authenticator}', 'after:{other_authenticator}'""" - } - } +param_auth_id.desc: +"""Authenticator ID.""" - like_user_id { - desc { - en: """Fuzzy search user_id (username or clientid).""" - zh: """使用用户 ID (username 或 clientid)模糊查询。""" - } - label { - en: """like_user_id""" - zh: """like_user_id""" - } - } +param_listener_id.desc: +"""Listener ID.""" - is_superuser { - desc { - en: """Is superuser""" - zh: """是否是超级用户""" - } - } +param_position.desc: +"""Position of authenticator in chain. Possible values are 'front', 'rear', 'before:{other_authenticator}', 'after:{other_authenticator}'.""" + +param_user_id.desc: +"""User ID.""" } diff --git a/rel/i18n/emqx_authn_http.hocon b/rel/i18n/emqx_authn_http.hocon index 129db5054..582bacea8 100644 --- a/rel/i18n/emqx_authn_http.hocon +++ b/rel/i18n/emqx_authn_http.hocon @@ -1,81 +1,45 @@ emqx_authn_http { - get { - desc { - en: """Configuration of authenticator using HTTP Server as authentication service (Using GET request).""" - zh: """使用 HTTP Server 作为认证服务的认证器的配置项 (使用 GET 请求)。""" - } - } - post { - desc { - en: """Configuration of authenticator using HTTP Server as authentication service (Using POST request).""" - zh: """使用 HTTP Server 作为认证服务的认证器的配置项 (使用 POST 请求)。""" - } - } +body.desc: +"""HTTP request body.""" - method { - desc { - en: """HTTP request method.""" - zh: """HTTP 请求方法。""" - } - label { - en: """Request Method""" - zh: """请求方法""" - } - } +body.label: +"""Request Body""" - url { - desc { - en: """URL of the HTTP server.""" - zh: """认证 HTTP 服务器地址。""" - } - label { - en: """URL""" - zh: """URL""" - } - } +get.desc: +"""Configuration of authenticator using HTTP Server as authentication service (Using GET request).""" - headers { - desc { - en: """List of HTTP Headers.""" - zh: """HTTP Headers 列表""" - } - label { - en: """Headers""" - zh: """请求头""" - } - } +headers.desc: +"""List of HTTP Headers.""" - headers_no_content_type { - desc { - en: """List of HTTP headers (without content-type).""" - zh: """HTTP Headers 列表 (无 content-type) 。""" - } - label { - en: """headers_no_content_type""" - zh: """请求头(无 content-type)""" - } - } +headers.label: +"""Headers""" - body { - desc { - en: """HTTP request body.""" - zh: """HTTP request body。""" - } - label { - en: """Request Body""" - zh: """Request Body""" - } - } +headers_no_content_type.desc: +"""List of HTTP headers (without content-type).""" + +headers_no_content_type.label: +"""headers_no_content_type""" + +method.desc: +"""HTTP request method.""" + +method.label: +"""Request Method""" + +post.desc: +"""Configuration of authenticator using HTTP Server as authentication service (Using POST request).""" + +request_timeout.desc: +"""HTTP request timeout.""" + +request_timeout.label: +"""Request Timeout""" + +url.desc: +"""URL of the HTTP server.""" + +url.label: +"""URL""" - request_timeout { - desc { - en: """HTTP request timeout.""" - zh: """HTTP 请求超时时长。""" - } - label { - en: """Request Timeout""" - zh: """请求超时时间""" - } - } } diff --git a/rel/i18n/emqx_authn_jwt.hocon b/rel/i18n/emqx_authn_jwt.hocon index 6a4a1e2d4..e28213e37 100644 --- a/rel/i18n/emqx_authn_jwt.hocon +++ b/rel/i18n/emqx_authn_jwt.hocon @@ -1,219 +1,118 @@ emqx_authn_jwt { - use_jwks { - desc { - en: """Whether to use JWKS.""" - zh: """是否使用 JWKS。""" - } - label { - en: """Whether to Use JWKS""" - zh: """是否使用 JWKS""" - } - } - algorithm { - desc { - en: """JWT signing algorithm, Supports HMAC (configured as hmac-based) and RSA, ECDSA (configured as public-key).""" - zh: """JWT 签名算法,支持 HMAC (配置为 hmac-based)和 RSA、ECDSA (配置为 public-key)。""" - } - label { - en: """JWT Signing Algorithm""" - zh: """JWT 签名算法""" - } - } +acl_claim_name.desc: +"""JWT claim name to use for getting ACL rules.""" - public_key { - desc { - en: """The public key used to verify the JWT.""" - zh: """用于验证 JWT 的公钥。""" - } - label { - en: """Public Key""" - zh: """公钥""" - } - } +acl_claim_name.label: +"""ACL claim name""" - secret_base64_encoded { - desc { - en: """Whether secret is base64 encoded.""" - zh: """密钥是否为 Base64 编码。""" - } - label { - en: """Whether Secret is Base64 Encoded""" - zh: """密钥是否为 Base64 编码""" - } - } +algorithm.desc: +"""JWT signing algorithm, Supports HMAC (configured as hmac-based) and RSA, ECDSA (configured as public-key).""" - secret { - desc { - en: """The key to verify the JWT using HMAC algorithm.""" - zh: """使用 HMAC 算法时用于验证 JWT 的密钥""" - } - label { - en: """Secret""" - zh: """Secret""" - } - } +algorithm.label: +"""JWT Signing Algorithm""" - endpoint { - desc { - en: """JWKS endpoint, it's a read-only endpoint that returns the server's public key set in the JWKS format.""" - zh: """JWKS 端点, 它是一个以 JWKS 格式返回服务端的公钥集的只读端点。""" - } - label { - en: """JWKS Endpoint""" - zh: """JWKS Endpoint""" - } - } +cacertfile.desc: +"""Path to a file containing PEM-encoded CA certificates.""" - refresh_interval { - desc { - en: """JWKS refresh interval.""" - zh: """JWKS 刷新间隔。""" - } - label { - en: """JWKS Refresh Interval""" - zh: """JWKS 刷新间隔""" - } - } +cacertfile.label: +"""CA Certificate File""" - cacertfile { - desc { - en: """Path to a file containing PEM-encoded CA certificates.""" - zh: """包含 PEM 编码的 CA 证书的文件的路径。""" - } - label { - en: """CA Certificate File""" - zh: """CA 证书文件""" - } - } +certfile.desc: +"""Path to a file containing the user certificate.""" - certfile { - desc { - en: """Path to a file containing the user certificate.""" - zh: """包含用户证书的文件的路径。""" - } - label { - en: """Certificate File""" - zh: """证书文件""" - } - } +certfile.label: +"""Certificate File""" - keyfile { - desc { - en: """Path to a file containing the user's private PEM-encoded key.""" - zh: """包含 PEM 编码的用户私钥的文件的路径。""" - } - label { - en: """Key File""" - zh: """私钥文件""" - } - } +enable.desc: +"""Enable/disable SSL.""" - verify { - desc { - en: """Enable or disable SSL peer verification.""" - zh: """指定握手过程中是否校验对端证书。""" - } - label { - en: """Verify""" - zh: """Verify""" - } - } +enable.label: +"""Enable/disable SSL""" - server_name_indication { - desc { - en: """Server Name Indication (SNI).""" - zh: """服务器名称指示(SNI)。""" - } - label { - en: """Server Name Indication""" - zh: """服务器名称指示""" - } - } +endpoint.desc: +"""JWKS endpoint, it's a read-only endpoint that returns the server's public key set in the JWKS format.""" - verify_claims { - desc { - en: """A list of custom claims to validate, which is a list of name/value pairs. +endpoint.label: +"""JWKS Endpoint""" + +from.desc: +"""Field to take JWT from.""" + +from.label: +"""From Field""" + +hmac-based.desc: +"""Configuration when the JWT for authentication is issued using the HMAC algorithm.""" + +jwks.desc: +"""Configuration when JWTs used for authentication need to be fetched from the JWKS endpoint.""" + +keyfile.desc: +"""Path to a file containing the user's private PEM-encoded key.""" + +keyfile.label: +"""Key File""" + +public-key.desc: +"""Configuration when the JWT for authentication is issued using RSA or ECDSA algorithm.""" + +public_key.desc: +"""The public key used to verify the JWT.""" + +public_key.label: +"""Public Key""" + +refresh_interval.desc: +"""JWKS refresh interval.""" + +refresh_interval.label: +"""JWKS Refresh Interval""" + +secret.desc: +"""The key to verify the JWT using HMAC algorithm.""" + +secret.label: +"""Secret""" + +secret_base64_encoded.desc: +"""Whether secret is base64 encoded.""" + +secret_base64_encoded.label: +"""Whether Secret is Base64 Encoded""" + +server_name_indication.desc: +"""Server Name Indication (SNI).""" + +server_name_indication.label: +"""Server Name Indication""" + +ssl.desc: +"""SSL options.""" + +ssl.label: +"""SSL Options""" + +use_jwks.desc: +"""Whether to use JWKS.""" + +use_jwks.label: +"""Whether to Use JWKS""" + +verify.desc: +"""Enable or disable SSL peer verification.""" + +verify.label: +"""Verify""" + +verify_claims.desc: +"""A list of custom claims to validate, which is a list of name/value pairs. Values can use the following placeholders: - ${username}: Will be replaced at runtime with Username used by the client when connecting - ${clientid}: Will be replaced at runtime with Client ID used by the client when connecting Authentication will verify that the value of claims in the JWT (taken from the Password field) matches what is required in verify_claims.""" - zh: """需要验证的自定义声明列表,它是一个名称/值对列表。 -值可以使用以下占位符: -- ${username}: 将在运行时被替换为客户端连接时使用的用户名 -- ${clientid}: 将在运行时被替换为客户端连接时使用的客户端标识符 -认证时将验证 JWT(取自 Password 字段)中 claims 的值是否与 verify_claims 中要求的相匹配。""" - } - label { - en: """Verify Claims""" - zh: """Verify Claims""" - } - } - - ssl { - desc { - en: """SSL options.""" - zh: """SSL 选项。""" - } - label { - en: """SSL Options""" - zh: """SSL 选项""" - } - } - - enable { - desc { - en: """Enable/disable SSL.""" - zh: """启用/禁用 SSL。""" - } - label { - en: """Enable/disable SSL""" - zh: """启用/禁用 SSL""" - } - } - - hmac-based { - desc { - en: """Configuration when the JWT for authentication is issued using the HMAC algorithm.""" - zh: """用于认证的 JWT 使用 HMAC 算法签发时的配置。""" - } - } - - public-key { - desc { - en: """Configuration when the JWT for authentication is issued using RSA or ECDSA algorithm.""" - zh: """用于认证的 JWT 使用 RSA 或 ECDSA 算法签发时的配置。""" - } - } - - jwks { - desc { - en: """Configuration when JWTs used for authentication need to be fetched from the JWKS endpoint.""" - zh: """用于认证的 JWTs 需要从 JWKS 端点获取时的配置。""" - } - } - - acl_claim_name { - desc { - en: """JWT claim name to use for getting ACL rules.""" - zh: """JWT claim name to use for getting ACL rules.""" - } - label { - en: """ACL claim name""" - zh: """ACL claim name""" - } - } - - from { - desc { - en: """Field to take JWT from.""" - zh: """要从中获取 JWT 的字段。""" - } - label { - en: """From Field""" - zh: """源字段""" - } - } +verify_claims.label: +"""Verify Claims""" } diff --git a/rel/i18n/emqx_authn_mnesia.hocon b/rel/i18n/emqx_authn_mnesia.hocon index 0d07217d9..4cfab9493 100644 --- a/rel/i18n/emqx_authn_mnesia.hocon +++ b/rel/i18n/emqx_authn_mnesia.hocon @@ -1,21 +1,12 @@ emqx_authn_mnesia { - authentication { - desc { - en: """Configuration of authenticator using built-in database as data source.""" - zh: """使用内置数据库作为认证数据源的认证器的配置项。""" - } - } - user_id_type { - desc { - en: """Specify whether to use `clientid` or `username` for authentication.""" - zh: """指定使用客户端ID `clientid` 还是用户名 `username` 进行认证。""" - } +authentication.desc: +"""Configuration of authenticator using built-in database as data source.""" - label: { - en: """Authentication ID Type""" - zh: """认证 ID 类型""" - } - } +user_id_type.desc: +"""Specify whether to use `clientid` or `username` for authentication.""" + +user_id_type.label: +"""Authentication ID Type""" } diff --git a/rel/i18n/emqx_authn_mongodb.hocon b/rel/i18n/emqx_authn_mongodb.hocon index 80d6473ed..97311f751 100644 --- a/rel/i18n/emqx_authn_mongodb.hocon +++ b/rel/i18n/emqx_authn_mongodb.hocon @@ -1,83 +1,45 @@ emqx_authn_mongodb { - standalone { - desc { - en: """Configuration of authenticator using MongoDB (Standalone) as authentication data source.""" - zh: """使用 MongoDB (Standalone) 作为认证数据源的认证器的配置项。""" - } - } - replica-set { - desc { - en: """Configuration of authenticator using MongoDB (Replica Set) as authentication data source.""" - zh: """使用 MongoDB (Replica Set) 作为认证数据源的认证器的配置项。""" - } - } +collection.desc: +"""Collection used to store authentication data.""" - sharded-cluster { - desc { - en: """Configuration of authenticator using MongoDB (Sharded Cluster) as authentication data source.""" - zh: """使用 MongoDB (Sharded Cluster) 作为认证数据源的认证器的配置项。""" - } - } +collection.label: +"""Collection""" - collection { - desc { - en: """Collection used to store authentication data.""" - zh: """存储认证数据的集合。""" - } - label: { - en: """Collection""" - zh: """集合""" - } - } - - filter { - desc { - en: """Conditional expression that defines the filter condition in the query. +filter.desc: +"""Conditional expression that defines the filter condition in the query. Filter supports the following placeholders: - ${username}: Will be replaced at runtime with Username used by the client when connecting - ${clientid}: Will be replaced at runtime with Client ID used by the client when connecting""" - zh: """在查询中定义过滤条件的条件表达式。 -过滤器支持如下占位符: -- ${username}: 将在运行时被替换为客户端连接时使用的用户名 -- ${clientid}: 将在运行时被替换为客户端连接时使用的客户端标识符""" - } - label: { - en: """Filter""" - zh: """过滤器""" - } - } - password_hash_field { - desc { - en: """Document field that contains password hash.""" - zh: """文档中用于存放密码散列的字段。""" - } - label: { - en: """Password Hash Field""" - zh: """密码散列字段""" - } - } +filter.label: +"""Filter""" - salt_field { - desc { - en: """Document field that contains the password salt.""" - zh: """文档中用于存放盐值的字段。""" - } - label: { - en: """Salt Field""" - zh: """盐值字段""" - } - } +is_superuser_field.desc: +"""Document field that defines if the user has superuser privileges.""" + +is_superuser_field.label: +"""Is Superuser Field""" + +password_hash_field.desc: +"""Document field that contains password hash.""" + +password_hash_field.label: +"""Password Hash Field""" + +replica-set.desc: +"""Configuration of authenticator using MongoDB (Replica Set) as authentication data source.""" + +salt_field.desc: +"""Document field that contains the password salt.""" + +salt_field.label: +"""Salt Field""" + +sharded-cluster.desc: +"""Configuration of authenticator using MongoDB (Sharded Cluster) as authentication data source.""" + +standalone.desc: +"""Configuration of authenticator using MongoDB (Standalone) as authentication data source.""" - is_superuser_field { - desc { - en: """Document field that defines if the user has superuser privileges.""" - zh: """文档中用于定义用户是否具有超级用户权限的字段。""" - } - label: { - en: """Is Superuser Field""" - zh: """超级用户字段""" - } - } } diff --git a/rel/i18n/emqx_authn_mysql.hocon b/rel/i18n/emqx_authn_mysql.hocon index 4ddfd5701..45634a4ad 100644 --- a/rel/i18n/emqx_authn_mysql.hocon +++ b/rel/i18n/emqx_authn_mysql.hocon @@ -1,30 +1,18 @@ emqx_authn_mysql { - authentication { - desc { - en: """Configuration of authenticator using MySQL as authentication data source.""" - zh: """使用 MySQL 作为认证数据源的认证器的配置项。""" - } - } - query { - desc { - en: """SQL used to query data for authentication, such as password hash.""" - zh: """用于查询密码散列等用于认证的数据的 SQL 语句。""" - } - label: { - en: """Query""" - zh: """查询语句""" - } - } +authentication.desc: +"""Configuration of authenticator using MySQL as authentication data source.""" + +query.desc: +"""SQL used to query data for authentication, such as password hash.""" + +query.label: +"""Query""" + +query_timeout.desc: +"""Timeout for the SQL query.""" + +query_timeout.label: +"""Query Timeout""" - query_timeout { - desc { - en: """Timeout for the SQL query.""" - zh: """SQL 查询的超时时间。""" - } - label: { - en: """Query Timeout""" - zh: """查询超时""" - } - } } diff --git a/rel/i18n/emqx_authn_pgsql.hocon b/rel/i18n/emqx_authn_pgsql.hocon index 298e38774..a9d727785 100644 --- a/rel/i18n/emqx_authn_pgsql.hocon +++ b/rel/i18n/emqx_authn_pgsql.hocon @@ -1,19 +1,12 @@ emqx_authn_pgsql { - authentication { - desc { - en: """Configuration of authenticator using PostgreSQL as authentication data source.""" - zh: """使用 PostgreSQL 作为认证数据源的认证器的配置项。""" - } - } - query { - desc { - en: """SQL used to query data for authentication, such as password hash.""" - zh: """用于查询密码散列等用于认证的数据的 SQL 语句。""" - } - label: { - en: """Query""" - zh: """查询语句""" - } - } +authentication.desc: +"""Configuration of authenticator using PostgreSQL as authentication data source.""" + +query.desc: +"""SQL used to query data for authentication, such as password hash.""" + +query.label: +"""Query""" + } diff --git a/rel/i18n/emqx_authn_redis.hocon b/rel/i18n/emqx_authn_redis.hocon index a9cd4a414..f39d54061 100644 --- a/rel/i18n/emqx_authn_redis.hocon +++ b/rel/i18n/emqx_authn_redis.hocon @@ -1,33 +1,18 @@ emqx_authn_redis { - standalone { - desc { - en: """Configuration of authenticator using Redis (Standalone) as authentication data source.""" - zh: """使用 Redis (Standalone) 作为认证数据源的认证器的配置项。""" - } - } - cluster { - desc { - en: """Configuration of authenticator using Redis (Cluster) as authentication data source.""" - zh: """使用 Redis (Cluster) 作为认证数据源的认证器的配置项。""" - } - } +cluster.desc: +"""Configuration of authenticator using Redis (Cluster) as authentication data source.""" - sentinel { - desc { - en: """Configuration of authenticator using Redis (Sentinel) as authentication data source.""" - zh: """使用 Redis (Sentinel) 作为认证数据源的认证器的配置项。""" - } - } +cmd.desc: +"""The Redis Command used to query data for authentication such as password hash, currently only supports HGET and HMGET.""" + +cmd.label: +"""Command""" + +sentinel.desc: +"""Configuration of authenticator using Redis (Sentinel) as authentication data source.""" + +standalone.desc: +"""Configuration of authenticator using Redis (Standalone) as authentication data source.""" - cmd { - desc { - en: """The Redis Command used to query data for authentication such as password hash, currently only supports HGET and HMGET.""" - zh: """用于查询密码散列等用于认证的数据的 Redis Command,目前仅支持 HGETHMGET。""" - } - label: { - en: """Command""" - zh: """Command""" - } - } } diff --git a/rel/i18n/emqx_authn_schema.hocon b/rel/i18n/emqx_authn_schema.hocon index 917554af0..98263ca49 100644 --- a/rel/i18n/emqx_authn_schema.hocon +++ b/rel/i18n/emqx_authn_schema.hocon @@ -1,243 +1,135 @@ emqx_authn_schema { - enable { - desc { - en: """Set to true or false to disable this auth provider.""" - zh: """设为 truefalse 以启用或禁用此认证数据源。""" - } - label: { - en: """Enable""" - zh: """启用""" - } - } - mechanism { - desc { - en: """Authentication mechanism.""" - zh: """认证机制。""" - } - label: { - en: """Authentication Mechanism""" - zh: """认证机制""" - } - } +backend.desc: +"""Backend type.""" - backend { - desc { - en: """Backend type.""" - zh: """后端类型。""" - } - label: { - en: """Backend Type""" - zh: """后端类型""" - } - } +backend.label: +"""Backend Type""" - metrics { - desc { - en: """The metrics of the resource.""" - zh: """资源统计指标。""" - } - label: { - en: """Metrics""" - zh: """指标""" - } - } +enable.desc: +"""Set to true or false to disable this auth provider.""" - node_metrics { - desc { - en: """The metrics of the resource for each node.""" - zh: """每个节点上资源的统计指标。""" - } - label: { - en: """Resource Metrics in Node""" - zh: """节点资源指标""" - } - } +enable.label: +"""Enable""" - status { - desc { - en: """The status of the resource.""" - zh: """资源状态。""" - } - label: { - en: """Status""" - zh: """状态""" - } - } +failed.desc: +"""Count of query failed.""" - node_status { - desc { - en: """The status of the resource for each node.""" - zh: """每个节点上资源的状态。""" - } - label: { - en: """Resource Status in Node""" - zh: """节点资源状态""" - } - } +failed.label: +"""Failed""" - node_error { - desc { - en: """The error of node.""" - zh: """节点上产生的错误。""" - } - label: { - en: """Error in Node""" - zh: """节点产生的错误""" - } - } +matched.desc: +"""Count of this resource is queried.""" - matched { - desc { - en: """Count of this resource is queried.""" - zh: """请求命中次数。""" - } - label: { - en: """Matched""" - zh: """已命中""" - } - } +matched.label: +"""Matched""" - success { - desc { - en: """Count of query success.""" - zh: """请求成功次数。""" - } - label: { - en: """Success""" - zh: """成功""" - } - } +mechanism.desc: +"""Authentication mechanism.""" - failed { - desc { - en: """Count of query failed.""" - zh: """请求失败次数。""" - } - label: { - en: """Failed""" - zh: """失败""" - } - } +mechanism.label: +"""Authentication Mechanism""" - rate { - desc { - en: """The rate of matched, times/second.""" - zh: """命中速率,单位:次/秒。""" - } - label: { - en: """Rate""" - zh: """速率""" - } - } +metrics.desc: +"""The metrics of the resource.""" - rate_max { - desc { - en: """The max rate of matched, times/second.""" - zh: """最大命中速率,单位:次/秒。""" - } - label: { - en: """Max Rate""" - zh: """最大速率""" - } - } +metrics.label: +"""Metrics""" - rate_last5m { - desc { - en: """The average rate of matched in the last 5 minutes, times/second.""" - zh: """5分钟内平均命中速率,单位:次/秒。""" - } - label: { - en: """Rate in Last 5min""" - zh: """5分钟内速率""" - } - } +metrics_failed.desc: +"""The required authentication information is found in the current instance, and the instance returns authentication failure.""" - node { - desc { - en: """Node name.""" - zh: """节点名称。""" - } - label: { - en: """Node Name.""" - zh: """节点名称。""" - } - } +metrics_failed.label: +"""Authentication Failed Times""" - metrics_nomatch { - desc { - en: """The number of times the instance was ignored when the required authentication information was not found in the current instance.""" - zh: """在当前实例中没有找到需要的认证信息,实例被忽略的次数。""" - } - label: { - en: """Nomatch Times""" - zh: """实例被忽略的次数""" - } - } +metrics_nomatch.desc: +"""The number of times the instance was ignored when the required authentication information was not found in the current instance.""" - metrics_total { - desc { - en: """The total number of times the current instance was triggered.""" - zh: """当前实例被触发的总次数。""" - } - label: { - en: """Total Triggered Times""" - zh: """当前实例被触发的总次数""" - } - } +metrics_nomatch.label: +"""Nomatch Times""" - metrics_success { - desc { - en: """The required authentication information is found in the current instance, and the instance returns authentication success.""" - zh: """在当前实例中找到需要的认证信息,并且实例返回认证成功的次数。""" - } - label: { - en: """Authentication Success Times""" - zh: """实例认证成功的次数""" - } - } +metrics_rate.desc: +"""The total rate at which instances are triggered, times/second.""" - metrics_failed { - desc { - en: """The required authentication information is found in the current instance, and the instance returns authentication failure.""" - zh: """在当前实例中找到需要的认证信息,并且实例返回认证失败的次数。""" - } - label: { - en: """Authentication Failed Times""" - zh: """实例认证失败的次数""" - } - } +metrics_rate.label: +"""Total Triggered Rate""" - metrics_rate { - desc { - en: """The total rate at which instances are triggered, times/second.""" - zh: """实例被触发的速率。触发速率等于匹配速率 + 忽略速率,单位:次/秒。""" - } - label: { - en: """Total Triggered Rate""" - zh: """实例被触发的速率""" - } - } +metrics_rate_last5m.desc: +"""The average trigger rate of the instance within 5 minutes, times/second.""" - metrics_rate_max { - desc { - en: """The highest trigger rate the instance has ever reached, times/second.""" - zh: """实例曾经达到的最高触发速率,单位:次/秒。""" - } - label: { - en: """Highest Triggered Rate""" - zh: """实例曾经达到的最高触发速率""" - } - } +metrics_rate_last5m.label: +"""Average Triggered Rate in Last 5min""" + +metrics_rate_max.desc: +"""The highest trigger rate the instance has ever reached, times/second.""" + +metrics_rate_max.label: +"""Highest Triggered Rate""" + +metrics_success.desc: +"""The required authentication information is found in the current instance, and the instance returns authentication success.""" + +metrics_success.label: +"""Authentication Success Times""" + +metrics_total.desc: +"""The total number of times the current instance was triggered.""" + +metrics_total.label: +"""Total Triggered Times""" + +node.desc: +"""Node name.""" + +node.label: +"""Node Name.""" + +node_error.desc: +"""The error of node.""" + +node_error.label: +"""Error in Node""" + +node_metrics.desc: +"""The metrics of the resource for each node.""" + +node_metrics.label: +"""Resource Metrics in Node""" + +node_status.desc: +"""The status of the resource for each node.""" + +node_status.label: +"""Resource Status in Node""" + +rate.desc: +"""The rate of matched, times/second.""" + +rate.label: +"""Rate""" + +rate_last5m.desc: +"""The average rate of matched in the last 5 minutes, times/second.""" + +rate_last5m.label: +"""Rate in Last 5min""" + +rate_max.desc: +"""The max rate of matched, times/second.""" + +rate_max.label: +"""Max Rate""" + +status.desc: +"""The status of the resource.""" + +status.label: +"""Status""" + +success.desc: +"""Count of query success.""" + +success.label: +"""Success""" - metrics_rate_last5m { - desc { - en: """The average trigger rate of the instance within 5 minutes, times/second.""" - zh: """实例5分钟内平均触发速率,单位:次/秒。""" - } - label: { - en: """Average Triggered Rate in Last 5min""" - zh: """实例5分钟内平均触发速率""" - } - } } diff --git a/rel/i18n/emqx_authn_user_import_api.hocon b/rel/i18n/emqx_authn_user_import_api.hocon index 294897ec1..f8fb1757c 100644 --- a/rel/i18n/emqx_authn_user_import_api.hocon +++ b/rel/i18n/emqx_authn_user_import_api.hocon @@ -1,17 +1,9 @@ emqx_authn_user_import_api { - authentication_id_import_users_post { - desc { - en: """Import users into authenticator in global authentication chain.""" - zh: """为全局认证链上的指定认证器导入用户数据。""" - } - } +authentication_id_import_users_post.desc: +"""Import users into authenticator in global authentication chain.""" - listeners_listener_id_authentication_id_import_users_post { - desc { - en: """Import users into authenticator in listener authentication chain.""" - zh: """为监听器认证链上的指定认证器导入用户数据。""" - } - } +listeners_listener_id_authentication_id_import_users_post.desc: +"""Import users into authenticator in listener authentication chain.""" } diff --git a/rel/i18n/emqx_authz_api_cache.hocon b/rel/i18n/emqx_authz_api_cache.hocon index 9c620a22d..0789c1e71 100644 --- a/rel/i18n/emqx_authz_api_cache.hocon +++ b/rel/i18n/emqx_authz_api_cache.hocon @@ -1,8 +1,6 @@ emqx_authz_api_cache { - authorization_cache_delete { - desc { - en: """Clean all authorization cache in the cluster.""" - zh: """清除集群中所有授权数据缓存。""" - } - } + +authorization_cache_delete.desc: +"""Clean all authorization cache in the cluster.""" + } diff --git a/rel/i18n/emqx_authz_api_mnesia.hocon b/rel/i18n/emqx_authz_api_mnesia.hocon index 6d318d02b..4cfed2970 100644 --- a/rel/i18n/emqx_authz_api_mnesia.hocon +++ b/rel/i18n/emqx_authz_api_mnesia.hocon @@ -1,177 +1,87 @@ emqx_authz_api_mnesia { - users_username_get { - desc { - en: """Show the list of rules for users""" - zh: """获取内置数据库中所有用户名类型的规则记录""" - } - } - users_username_post { - desc { - en: """Add new rule for 'username'""" - zh: """添加内置数据库中用户名类型的规则记录""" - } - } +action.desc: +"""Authorized action (pub/sub/all)""" - users_clientid_get { - desc { - en: """Show the list of rules for clients""" - zh: """获取内置数据库中所有客户端标识符类型的规则记录""" - } - } +action.label: +"""action""" - users_clientid_post { - desc { - en: """Add new rule for 'clientid'""" - zh: """添加内置数据库中客户端标识符类型的规则记录""" - } - } +clientid.desc: +"""ClientID""" +clientid.label: +"""clientid""" - user_username_get { - desc { - en: """Get rule for 'username'""" - zh: """获取内置数据库中指定用户名类型的规则记录""" - } - } +fuzzy_clientid.desc: +"""Fuzzy search `clientid` as substring""" - user_username_put { - desc { - en: """Set rule for 'username'""" - zh: """更新内置数据库中指定用户名类型的规则记录""" - } - } +fuzzy_clientid.label: +"""fuzzy_clientid""" - user_username_delete { - desc { - en: """Delete rule for 'username'""" - zh: """删除内置数据库中指定用户名类型的规则记录""" - } - } +fuzzy_username.desc: +"""Fuzzy search `username` as substring""" - user_clientid_get { - desc { - en: """Get rule for 'clientid'""" - zh: """获取内置数据库中指定客户端标识符类型的规则记录""" - } - } +fuzzy_username.label: +"""fuzzy_username""" - user_clientid_put { - desc { - en: """Set rule for 'clientid'""" - zh: """更新内置数据库中指定客户端标识符类型的规则记录""" - } - } +permission.desc: +"""Permission""" - user_clientid_delete { - desc { - en: """Delete rule for 'clientid'""" - zh: """删除内置数据库中指定客户端标识符类型的规则记录""" - } - } +permission.label: +"""permission""" - rules_all_get { - desc { - en: """Show the list of rules for 'all'""" - zh: """列出为所有客户端启用的规则列表""" - } - } +rules_all_delete.desc: +"""Delete rules for 'all'""" - rules_all_post { - desc { - en: """Create/Update the list of rules for 'all'.""" - zh: """创建/更新 为所有客户端启用的规则列表。""" - } - } +rules_all_get.desc: +"""Show the list of rules for 'all'""" - rules_all_delete { - desc { - en: """Delete rules for 'all'""" - zh: """删除 `all` 规则""" - } - } +rules_all_post.desc: +"""Create/Update the list of rules for 'all'.""" - rules_delete { - desc { - en: """Delete all rules for all 'users', 'clients' and 'all'""" - zh: """清除内置数据库中的所有类型('users' 、'clients' 、'all')的所有规则""" - } - } +rules_delete.desc: +"""Delete all rules for all 'users', 'clients' and 'all'""" - fuzzy_username { - desc { - en: """Fuzzy search `username` as substring""" - zh: """使用字串匹配模糊搜索用户名""" - } - label { - en: """fuzzy_username""" - zh: """用户名子串""" - } - } +topic.desc: +"""Rule on specific topic""" - fuzzy_clientid { - desc { - en: """Fuzzy search `clientid` as substring""" - zh: """使用字串匹配模糊搜索客户端标识符""" - } - label { - en: """fuzzy_clientid""" - zh: """客户端标识符子串""" - } - } +topic.label: +"""topic""" - topic { - desc { - en: """Rule on specific topic""" - zh: """在指定主题上的规则""" - } - label { - en: """topic""" - zh: """主题""" - } - } +user_clientid_delete.desc: +"""Delete rule for 'clientid'""" - permission { - desc { - en: """Permission""" - zh: """权限""" - } - label { - en: """permission""" - zh: """权限""" - } - } +user_clientid_get.desc: +"""Get rule for 'clientid'""" - action { - desc { - en: """Authorized action (pub/sub/all)""" - zh: """被授权的行为 (发布/订阅/所有)""" - } - label { - en: """action""" - zh: """行为""" - } - } +user_clientid_put.desc: +"""Set rule for 'clientid'""" - clientid { - desc { - en: """ClientID""" - zh: """客户端标识符""" - } - label { - en: """clientid""" - zh: """客户端标识符""" - } - } +user_username_delete.desc: +"""Delete rule for 'username'""" + +user_username_get.desc: +"""Get rule for 'username'""" + +user_username_put.desc: +"""Set rule for 'username'""" + +username.desc: +"""Username""" + +username.label: +"""username""" + +users_clientid_get.desc: +"""Show the list of rules for clients""" + +users_clientid_post.desc: +"""Add new rule for 'clientid'""" + +users_username_get.desc: +"""Show the list of rules for users""" + +users_username_post.desc: +"""Add new rule for 'username'""" - username { - desc { - en: """Username""" - zh: """用户名""" - } - label { - en: """username""" - zh: """用户名""" - } - } } diff --git a/rel/i18n/emqx_authz_api_schema.hocon b/rel/i18n/emqx_authz_api_schema.hocon index d649dd5a0..2edfc8fcb 100644 --- a/rel/i18n/emqx_authz_api_schema.hocon +++ b/rel/i18n/emqx_authz_api_schema.hocon @@ -1,185 +1,90 @@ emqx_authz_api_schema { - enable { - desc { - en: """Set to true or false to disable this ACL provider.""" - zh: """设为 truefalse 以启用或禁用此访问控制数据源。""" - } - label { - en: """enable""" - zh: """enable""" - } - } - type { - desc { - en: """Backend type.""" - zh: """数据后端类型。""" - } - label { - en: """type""" - zh: """type""" - } - } +body.desc: +"""HTTP request body.""" -#==== authz_file +body.label: +"""body""" - rules { - desc { - en: """Authorization static file rules.""" - zh: """静态授权文件规则。""" - } - label { - en: """rules""" - zh: """规则""" - } - } +cmd.desc: +"""Database query used to retrieve authorization data.""" -#==== authz_http +cmd.label: +"""cmd""" - method { - desc { - en: """HTTP method.""" - zh: """HTTP 请求方法。""" - } - label { - en: """method""" - zh: """method""" - } - } +collection.desc: +"""`MongoDB` collection containing the authorization data.""" - url { - desc { - en: """URL of the auth server.""" - zh: """认证服务器 URL。""" - } - label { - en: """url""" - zh: """url""" - } - } +collection.label: +"""collection""" - headers { - desc { - en: """List of HTTP Headers.""" - zh: """HTTP Headers 列表""" - } - label { - en: """Headers""" - zh: """请求头""" - } - } +enable.desc: +"""Set to true or false to disable this ACL provider.""" - headers_no_content_type { - desc { - en: """List of HTTP headers (without content-type).""" - zh: """HTTP Headers 列表(无 content-type)。""" - } - label { - en: """headers_no_content_type""" - zh: """请求头(无 content-type)""" - } - } +enable.label: +"""enable""" - body { - desc { - en: """HTTP request body.""" - zh: """HTTP 请求体。""" - } - label { - en: """body""" - zh: """请求体""" - } - } - - request_timeout { - desc { - en: """Request timeout.""" - zh: """请求超时时间。""" - } - label { - en: """request_timeout""" - zh: """请求超时""" - } - } - -#==== authz_mnesia - -# only common fields(`enable` and `type`) - -#==== authz_mongo - - collection { - desc { - en: """`MongoDB` collection containing the authorization data.""" - zh: """`MongoDB` 授权数据集。""" - } - label { - en: """collection""" - zh: """数据集""" - } - } - - filter { - desc { - en: """Conditional expression that defines the filter condition in the query. +filter.desc: +"""Conditional expression that defines the filter condition in the query. Filter supports the following placeholders: - ${username}: Will be replaced at runtime with Username used by the client when connecting; - ${clientid}: Will be replaced at runtime with Client ID used by the client when connecting.""" - zh: """在查询中定义过滤条件的条件表达式。 -过滤器支持如下占位符: -- ${username}: 将在运行时被替换为客户端连接时使用的用户名 -- ${clientid}: 将在运行时被替换为客户端连接时使用的客户端标识符""" - } - label { - en: """Filter""" - zh: """过滤器""" - } - } -#==== authz_mysql +filter.label: +"""Filter""" -# `query`, is common field +headers.desc: +"""List of HTTP Headers.""" -#==== authz_pgsql +headers.label: +"""Headers""" -# `query`, is common field +headers_no_content_type.desc: +"""List of HTTP headers (without content-type).""" -#==== authz_redis +headers_no_content_type.label: +"""headers_no_content_type""" - cmd { - desc { - en: """Database query used to retrieve authorization data.""" - zh: """访问控制数据查询命令。""" - } - label { - en: """cmd""" - zh: """查询命令""" - } - } +method.desc: +"""HTTP method.""" -#==== common field for DBs (except mongodb and redis) +method.label: +"""method""" - query { - desc { - en: """Database query used to retrieve authorization data.""" - zh: """访问控制数据查询语句。""" - } - label { - en: """query""" - zh: """查询语句""" - } - } +position.desc: +"""Where to place the source.""" -#==== fields +position.label: +"""position""" + +query.desc: +"""Database query used to retrieve authorization data.""" + +query.label: +"""query""" + +request_timeout.desc: +"""Request timeout.""" + +request_timeout.label: +"""request_timeout""" + +rules.desc: +"""Authorization static file rules.""" + +rules.label: +"""rules""" + +type.desc: +"""Backend type.""" + +type.label: +"""type""" + +url.desc: +"""URL of the auth server.""" + +url.label: +"""url""" - position { - desc { - en: """Where to place the source.""" - zh: """认证数据源位置。""" - } - label { - en: """position""" - zh: """位置""" - } - } } diff --git a/rel/i18n/emqx_authz_api_settings.hocon b/rel/i18n/emqx_authz_api_settings.hocon index b44580b34..1840848bd 100644 --- a/rel/i18n/emqx_authz_api_settings.hocon +++ b/rel/i18n/emqx_authz_api_settings.hocon @@ -1,15 +1,9 @@ emqx_authz_api_settings { - authorization_settings_get { - desc { - en: """Get authorization settings""" - zh: """获取授权配置""" - } - } - authorization_settings_put { - desc { - en: """Update authorization settings""" - zh: """更新授权配置""" - } - } +authorization_settings_get.desc: +"""Get authorization settings""" + +authorization_settings_put.desc: +"""Update authorization settings""" + } diff --git a/rel/i18n/emqx_authz_api_sources.hocon b/rel/i18n/emqx_authz_api_sources.hocon index c5f0eaad4..5d2dda69e 100644 --- a/rel/i18n/emqx_authz_api_sources.hocon +++ b/rel/i18n/emqx_authz_api_sources.hocon @@ -1,116 +1,48 @@ emqx_authz_api_sources { - authorization_sources_get { - desc { - en: """List all authorization sources""" - zh: """列出所有授权数据源""" - } - } - authorization_sources_post { - desc { - en: """Add a new source""" - zh: """添加授权数据源""" - } - } +authorization_sources_get.desc: +"""List all authorization sources""" - authorization_sources_type_get { - desc { - en: """Get a authorization source""" - zh: """获取指定类型的授权数据源""" - } - } +authorization_sources_post.desc: +"""Add a new source""" - authorization_sources_type_put { - desc { - en: """Update source""" - zh: """更新指定类型的授权数据源""" - } - } +authorization_sources_type_delete.desc: +"""Delete source""" - authorization_sources_type_delete { - desc { - en: """Delete source""" - zh: """删除指定类型的授权数据源""" - } - } +authorization_sources_type_get.desc: +"""Get a authorization source""" - authorization_sources_type_status_get { - desc { - en: """Get a authorization source""" - zh: """获取指定授权数据源的状态""" - } - } +authorization_sources_type_move_post.desc: +"""Change the exection order of sources""" - authorization_sources_type_move_post { - desc { - en: """Change the exection order of sources""" - zh: """更新授权数据源的优先执行顺序""" - } - } +authorization_sources_type_put.desc: +"""Update source""" - sources { - desc { - en: """Authorization source""" - zh: """授权数据源列表""" - } - label { - en: """sources""" - zh: """数据源列表""" - } - } +authorization_sources_type_status_get.desc: +"""Get a authorization source""" - sources { - desc { - en: """Authorization sources""" - zh: """授权数据源列表""" - } - label { - en: """sources""" - zh: """数据源列表""" - } - } +source.desc: +"""Authorization source""" - source_config { - desc { - en: """Source config""" - zh: """数据源配置""" - } - label { - en: """source_config""" - zh: """数据源配置""" - } - } +source.label: +"""source""" - source { - desc { - en: """Authorization source""" - zh: """授权数据源""" - } - label { - en: """source""" - zh: """数据源""" - } - } +source_config.desc: +"""Source config""" - source_config { - desc { - en: """Source config""" - zh: """数据源配置""" - } - label { - en: """source_config""" - zh: """数据源配置""" - } - } +source_config.label: +"""source_config""" + +source_type.desc: +"""Authorization type""" + +source_type.label: +"""source_type""" + +sources.desc: +"""Authorization sources""" + +sources.label: +"""sources""" - source_type { - desc { - en: """Authorization type""" - zh: """数据源类型""" - } - label { - en: """source_type""" - zh: """数据源类型""" - } - } } diff --git a/rel/i18n/emqx_authz_schema.hocon b/rel/i18n/emqx_authz_schema.hocon index e48c64b89..9e5339b2c 100644 --- a/rel/i18n/emqx_authz_schema.hocon +++ b/rel/i18n/emqx_authz_schema.hocon @@ -1,7 +1,251 @@ emqx_authz_schema { - sources { - desc { - en: """Authorization data sources.
+ +deny.desc: +"""The number of authentication failures.""" + +deny.label: +"""The Number of Authentication Failures""" + +redis_sentinel.desc: +"""Authorization using a Redis Sentinel.""" + +redis_sentinel.label: +"""redis_sentinel""" + +rate.desc: +"""The rate of matched, times/second.""" + +rate.label: +"""Rate""" + +status.desc: +"""The status of the resource.""" + +status.label: +"""Status""" + +method.desc: +"""HTTP method.""" + +method.label: +"""method""" + +query.desc: +"""Database query used to retrieve authorization data.""" + +query.label: +"""query""" + +metrics_total.desc: +"""The total number of times the authorization rule was triggered.""" + +metrics_total.label: +"""The Total Number of Times the Authorization Rule was Triggered""" + +redis_cluster.desc: +"""Authorization using a Redis cluster.""" + +redis_cluster.label: +"""redis_cluster""" + +mysql.desc: +"""Authorization using a MySQL database.""" + +mysql.label: +"""mysql""" + +postgresql.desc: +"""Authorization using a PostgreSQL database.""" + +postgresql.label: +"""postgresql""" + +mongo_rs.desc: +"""Authorization using a MongoDB replica set.""" + +mongo_rs.label: +"""mongo_rs""" + +type.desc: +"""Backend type.""" + +type.label: +"""type""" + +mongo_sharded.desc: +"""Authorization using a sharded MongoDB cluster.""" + +mongo_sharded.label: +"""mongo_sharded""" + +body.desc: +"""HTTP request body.""" + +body.label: +"""Request Body""" + +url.desc: +"""URL of the auth server.""" + +url.label: +"""URL""" + +node.desc: +"""Node name.""" + +node.label: +"""Node Name.""" + +headers.desc: +"""List of HTTP Headers.""" + +headers.label: +"""Headers""" + +rate_last5m.desc: +"""The average rate of matched in the last 5 minutes, times/second.""" + +rate_last5m.label: +"""Rate in Last 5min""" + +headers_no_content_type.desc: +"""List of HTTP headers (without content-type).""" + +headers_no_content_type.label: +"""headers_no_content_type""" + +node_error.desc: +"""The error of node.""" + +node_error.label: +"""Error in Node""" + +mnesia.desc: +"""Authorization using a built-in database (mnesia).""" + +mnesia.label: +"""mnesia""" + +enable.desc: +"""Set to true or false to disable this ACL provider""" + +enable.label: +"""enable""" + +matched.desc: +"""Count of this resource is queried.""" + +matched.label: +"""Matched""" + +node_status.desc: +"""The status of the resource for each node.""" + +node_status.label: +"""Resource Status in Node""" + +rate_max.desc: +"""The max rate of matched, times/second.""" + +rate_max.label: +"""Max Rate""" + +filter.desc: +"""Conditional expression that defines the filter condition in the query. +Filter supports the following placeholders
+ - ${username}: Will be replaced at runtime with Username used by the client when connecting
+ - ${clientid}: Will be replaced at runtime with Client ID used by the client when connecting""" + +filter.label: +"""Filter""" + +path.desc: +"""Path to the file which contains the ACL rules. +If the file provisioned before starting EMQX node, +it can be placed anywhere as long as EMQX has read access to it. +That is, EMQX will treat it as read only. + +In case the rule-set is created or updated from EMQX Dashboard or HTTP API, +a new file will be created and placed in `authz` subdirectory inside EMQX's `data_dir`, +and the old file will not be used anymore.""" + +path.label: +"""path""" + +redis_single.desc: +"""Authorization using a single Redis instance.""" + +redis_single.label: +"""redis_single""" + +failed.desc: +"""Count of query failed.""" + +failed.label: +"""Failed""" + +metrics.desc: +"""The metrics of the resource.""" + +metrics.label: +"""Metrics""" + +authorization.desc: +"""Configuration related to the client authorization.""" + +authorization.label: +"""authorization""" + +collection.desc: +"""`MongoDB` collection containing the authorization data.""" + +collection.label: +"""collection""" + +mongo_single.desc: +"""Authorization using a single MongoDB instance.""" + +mongo_single.label: +"""mongo_single""" + +file.desc: +"""Authorization using a static file.""" + +file.label: +"""file""" + +http_post.desc: +"""Authorization using an external HTTP server (via POST requests).""" + +http_post.label: +"""http_post""" + +request_timeout.desc: +"""HTTP request timeout.""" + +request_timeout.label: +"""Request Timeout""" + +allow.desc: +"""The number of times the authentication was successful.""" + +allow.label: +"""The Number of Times the Authentication was Successful""" + +cmd.desc: +"""Database query used to retrieve authorization data.""" + +cmd.label: +"""cmd""" + +nomatch.desc: +"""The number of times that no authorization rules were matched.""" + +nomatch.label: +"""The Number of Times that no Authorization Rules were Matched""" + +sources.desc: +"""Authorization data sources.
An array of authorization (ACL) data providers. It is designed as an array, not a hash-map, so the sources can be ordered to form a chain of access controls.
@@ -19,525 +263,25 @@ NOTE: The source elements are identified by their 'type'. It is NOT allowed to configure two or more sources of the same type.""" - zh: """授权数据源。
-授权(ACL)数据源的列表。 -它被设计为一个数组,而不是一个散列映射, -所以可以作为链式访问控制。
+sources.label: +"""sources""" -当授权一个 'publish' 或 'subscribe' 行为时, -该配置列表中的所有数据源将按顺序进行检查。 -如果在某个客户端未找到时(使用 ClientID 或 Username)。 -将会移动到下一个数据源。直至得到 'allow' 或 'deny' 的结果。
+node_metrics.desc: +"""The metrics of the resource for each node.""" -如果在任何数据源中都未找到对应的客户端信息。 -配置的默认行为 ('authorization.no_match') 将生效。
+node_metrics.label: +"""Resource Metrics in Node""" -注意: -数据源使用 'type' 进行标识。 -使用同一类型的数据源多于一次不被允许。""" - } - label { - en: """sources""" - zh: """数据源""" - } - } +success.desc: +"""Count of query success.""" - authorization { - desc { - en: """Configuration related to the client authorization.""" - zh: """客户端授权相关配置""" - } - label { - en: """authorization""" - zh: """授权""" - } - } +success.label: +"""Success""" - enable { - desc { - en: """Set to true or false to disable this ACL provider""" - zh: """设为 truefalse 以启用或禁用此访问控制数据源""" - } - label { - en: """enable""" - zh: """enable""" - } - } +http_get.desc: +"""Authorization using an external HTTP server (via GET requests).""" - type { - desc { - en: """Backend type.""" - zh: """数据后端类型""" - } - label { - en: """type""" - zh: """type""" - } - } +http_get.label: +"""http_get""" -#==== authz_file - - file { - desc { - en: """Authorization using a static file.""" - zh: """使用静态文件授权""" - } - label { - en: """file""" - zh: """文件""" - } - } - - path { - desc { - en: """Path to the file which contains the ACL rules. -If the file provisioned before starting EMQX node, -it can be placed anywhere as long as EMQX has read access to it. -That is, EMQX will treat it as read only. - -In case the rule-set is created or updated from EMQX Dashboard or HTTP API, -a new file will be created and placed in `authz` subdirectory inside EMQX's `data_dir`, -and the old file will not be used anymore.""" - zh: """包含 ACL 规则的文件路径。 -如果在启动 EMQX 节点前预先配置该路径, -那么可以将该文件置于任何 EMQX 可以访问到的位置。 - -如果从 EMQX Dashboard 或 HTTP API 创建或修改了规则集, -那么EMQX将会生成一个新的文件并将它存放在 `data_dir` 下的 `authz` 子目录中, -并从此弃用旧的文件。""" - } - label { - en: """path""" - zh: """path""" - } - } - -#==== authz_http - - http_get { - desc { - en: """Authorization using an external HTTP server (via GET requests).""" - zh: """使用外部 HTTP 服务器授权(GET 请求)。""" - } - label { - en: """http_get""" - zh: """http_get""" - } - } - - http_post { - desc { - en: """Authorization using an external HTTP server (via POST requests).""" - zh: """使用外部 HTTP 服务器授权(POST 请求)。""" - } - label { - en: """http_post""" - zh: """http_post""" - } - } - - method { - desc { - en: """HTTP method.""" - zh: """HTTP 请求方法""" - } - label { - en: """method""" - zh: """method""" - } - } - - url { - desc { - en: """URL of the auth server.""" - zh: """授权 HTTP 服务器地址。""" - } - label { - en: """URL""" - zh: """URL""" - } - } - - headers { - desc { - en: """List of HTTP Headers.""" - zh: """HTTP Headers 列表""" - } - label { - en: """Headers""" - zh: """请求头""" - } - } - - headers_no_content_type { - desc { - en: """List of HTTP headers (without content-type).""" - zh: """HTTP Headers 列表 (无 content-type) 。""" - } - label { - en: """headers_no_content_type""" - zh: """请求头(无 content-type)""" - } - } - - body { - desc { - en: """HTTP request body.""" - zh: """HTTP request body。""" - } - label { - en: """Request Body""" - zh: """Request Body""" - } - } - - request_timeout { - desc { - en: """HTTP request timeout.""" - zh: """HTTP 请求超时时长。""" - } - label { - en: """Request Timeout""" - zh: """请求超时时间""" - } - } - -#==== authz_mnesia - - mnesia { - desc { - en: """Authorization using a built-in database (mnesia).""" - zh: """使用内部数据库授权(mnesia)。""" - } - label { - en: """mnesia""" - zh: """mnesia""" - } - } - -#==== authz_mongo - - mongo_single { - desc { - en: """Authorization using a single MongoDB instance.""" - zh: """使用 MongoDB 授权(单实例)。""" - } - label { - en: """mongo_single""" - zh: """mongo_single""" - } - } - - mongo_rs { - desc { - en: """Authorization using a MongoDB replica set.""" - zh: """使用 MongoDB 授权(副本集模式)""" - } - label { - en: """mongo_rs""" - zh: """mongo_rs""" - } - } - - mongo_sharded { - desc { - en: """Authorization using a sharded MongoDB cluster.""" - zh: """使用 MongoDB 授权(分片集群模式)。""" - } - label { - en: """mongo_sharded""" - zh: """mongo_sharded""" - } - } - - collection { - desc { - en: """`MongoDB` collection containing the authorization data.""" - zh: """`MongoDB` 授权数据集。""" - } - label { - en: """collection""" - zh: """数据集""" - } - } - - filter { - desc { - en: """Conditional expression that defines the filter condition in the query. -Filter supports the following placeholders
- - ${username}: Will be replaced at runtime with Username used by the client when connecting
- - ${clientid}: Will be replaced at runtime with Client ID used by the client when connecting""" - zh: """在查询中定义过滤条件的条件表达式。 -过滤器支持如下占位符:
-- ${username}:将在运行时被替换为客户端连接时使用的用户名
-- ${clientid}:将在运行时被替换为客户端连接时使用的客户端标识符""" - } - label { - en: """Filter""" - zh: """过滤器""" - } - } - -#==== authz_mysql - - mysql { - desc { - en: """Authorization using a MySQL database.""" - zh: """使用 MySOL 数据库授权""" - } - label { - en: """mysql""" - zh: """mysql""" - } - } - -#==== authz_pgsql - - postgresql { - desc { - en: """Authorization using a PostgreSQL database.""" - zh: """使用 PostgreSQL 数据库授权""" - } - label { - en: """postgresql""" - zh: """postgresql""" - } - } - -#==== authz_redis - - redis_single { - desc { - en: """Authorization using a single Redis instance.""" - zh: """使用 Redis 授权(单实例)。""" - } - label { - en: """redis_single""" - zh: """redis_single""" - } - } - - redis_sentinel { - desc { - en: """Authorization using a Redis Sentinel.""" - zh: """使用 Redis 授权(哨兵模式)。""" - } - label { - en: """redis_sentinel""" - zh: """redis_sentinel""" - } - } - - redis_cluster { - desc { - en: """Authorization using a Redis cluster.""" - zh: """使用 Redis 授权(集群模式)。""" - } - label { - en: """redis_cluster""" - zh: """redis_cluster""" - } - } - - cmd { - desc { - en: """Database query used to retrieve authorization data.""" - zh: """访问控制数据查查询命令""" - } - label { - en: """cmd""" - zh: """查询命令""" - } - } - -#==== common field for DBs (except redis) - - query { - desc { - en: """Database query used to retrieve authorization data.""" - zh: """访问控制数据查询语句/查询命令。""" - } - label { - en: """query""" - zh: """查询语句""" - } - } - -#==== metrics field - - metrics { - desc { - en: """The metrics of the resource.""" - zh: """资源统计指标。""" - } - label: { - en: """Metrics""" - zh: """指标""" - } - } - - node_metrics { - desc { - en: """The metrics of the resource for each node.""" - zh: """每个节点上资源的统计指标。""" - } - label: { - en: """Resource Metrics in Node""" - zh: """节点资源指标""" - } - } - - status { - desc { - en: """The status of the resource.""" - zh: """资源状态。""" - } - label: { - en: """Status""" - zh: """状态""" - } - } - - node_status { - desc { - en: """The status of the resource for each node.""" - zh: """每个节点上资源的状态。""" - } - label: { - en: """Resource Status in Node""" - zh: """节点资源状态""" - } - } - - node_error { - desc { - en: """The error of node.""" - zh: """节点上产生的错误。""" - } - label: { - en: """Error in Node""" - zh: """节点产生的错误""" - } - } - - matched { - desc { - en: """Count of this resource is queried.""" - zh: """请求命中次数。""" - } - label: { - en: """Matched""" - zh: """已命中""" - } - } - - success { - desc { - en: """Count of query success.""" - zh: """请求成功次数。""" - } - label: { - en: """Success""" - zh: """成功""" - } - } - - failed { - desc { - en: """Count of query failed.""" - zh: """请求失败次数。""" - } - label: { - en: """Failed""" - zh: """失败""" - } - } - - rate { - desc { - en: """The rate of matched, times/second.""" - zh: """命中速率,单位:次/秒。""" - } - label: { - en: """Rate""" - zh: """速率""" - } - } - - rate_max { - desc { - en: """The max rate of matched, times/second.""" - zh: """最大命中速率,单位:次/秒。""" - } - label: { - en: """Max Rate""" - zh: """最大速率""" - } - } - - rate_last5m { - desc { - en: """The average rate of matched in the last 5 minutes, times/second.""" - zh: """5分钟内平均命中速率,单位:次/秒。""" - } - label: { - en: """Rate in Last 5min""" - zh: """5分钟内速率""" - } - } - - node { - desc { - en: """Node name.""" - zh: """节点名称。""" - } - label: { - en: """Node Name.""" - zh: """节点名称。""" - } - } - - metrics_total { - desc { - en: """The total number of times the authorization rule was triggered.""" - zh: """授权实例被触发的总次数。""" - } - label: { - en: """The Total Number of Times the Authorization Rule was Triggered""" - zh: """授权实例被触发的总次数""" - } - } - - nomatch { - desc { - en: """The number of times that no authorization rules were matched.""" - zh: """没有匹配到任何授权规则的次数。""" - } - label: { - en: """The Number of Times that no Authorization Rules were Matched""" - zh: """没有匹配到任何授权规则的次数""" - } - } - - allow { - desc { - en: """The number of times the authentication was successful.""" - zh: """授权成功的次数。""" - } - label: { - en: """The Number of Times the Authentication was Successful""" - zh: """授权成功次数""" - } - } - - deny { - desc { - en: """The number of authentication failures.""" - zh: """授权失败的次数。""" - } - label: { - en: """The Number of Authentication Failures""" - zh: """授权失败次数""" - } - } } diff --git a/rel/i18n/emqx_auto_subscribe_api.hocon b/rel/i18n/emqx_auto_subscribe_api.hocon index b8d043e9a..df8e87e1a 100644 --- a/rel/i18n/emqx_auto_subscribe_api.hocon +++ b/rel/i18n/emqx_auto_subscribe_api.hocon @@ -1,23 +1,12 @@ emqx_auto_subscribe_api { - list_auto_subscribe_api { - desc { - en: """Get auto subscribe topic list""" - zh: """获取自动订阅主题列表""" - } - } - update_auto_subscribe_api { - desc { - en: """Update auto subscribe topic list""" - zh: """更新自动订阅主题列表""" - } - } +list_auto_subscribe_api.desc: +"""Get auto subscribe topic list""" - update_auto_subscribe_api_response409 { - desc { - en: """Auto Subscribe topics max limit""" - zh: """超出自定订阅主题列表长度限制""" - } - } +update_auto_subscribe_api.desc: +"""Update auto subscribe topic list""" + +update_auto_subscribe_api_response409.desc: +"""Auto Subscribe topics max limit""" } diff --git a/rel/i18n/emqx_auto_subscribe_schema.hocon b/rel/i18n/emqx_auto_subscribe_schema.hocon index ae8c40303..e26ed2546 100644 --- a/rel/i18n/emqx_auto_subscribe_schema.hocon +++ b/rel/i18n/emqx_auto_subscribe_schema.hocon @@ -1,85 +1,48 @@ emqx_auto_subscribe_schema { - auto_subscribe { - desc { - en: """After the device logs in successfully, the subscription is automatically completed for the device through the pre-defined subscription representation. Supports the use of placeholders.""" - zh: """设备登录成功之后,通过预设的订阅表示符,为设备自动完成订阅。支持使用占位符。""" - } - label { - en: """Auto Subscribe""" - zh: """自动订阅""" - } - } - topic { - desc { - en: """Topic name, placeholders are supported. For example: client/${clientid}/username/${username}/host/${host}/port/${port} -Required field, and cannot be empty string""" - zh: """订阅标识符,支持使用占位符,例如 client/${clientid}/username/${username}/host/${host}/port/${port} -必填,且不可为空字符串""" - } - label { - en: """Topic""" - zh: """订阅标识符""" - } - } +auto_subscribe.desc: +"""After the device logs in successfully, the subscription is automatically completed for the device through the pre-defined subscription representation. Supports the use of placeholders.""" - qos { - desc { - en: """Default value 0. Quality of service. +auto_subscribe.label: +"""Auto Subscribe""" + +nl.desc: +"""Default value 0. +MQTT v3.1.1: if you subscribe to the topic published by yourself, you will receive all messages that you published. +MQTT v5: if you set this option as 1 when subscribing, the server will not forward the message you published to you.""" + +nl.label: +"""No Local""" + +qos.desc: +"""Default value 0. Quality of service. At most once (0) At least once (1) Exactly once (2)""" - zh: """缺省值为 0,服务质量, -QoS 0:消息最多传递一次,如果当时客户端不可用,则会丢失该消息。 -QoS 1:消息传递至少 1 次。 -QoS 2:消息仅传送一次。""" - } - label { - en: """Quality of Service""" - zh: """服务质量""" - } - } - rh { - desc { - en: """Default value 0. This option is used to specify whether the server forwards the retained message to the client when establishing a subscription. +qos.label: +"""Quality of Service""" + +rap.desc: +"""Default value 0. This option is used to specify whether the server retains the RETAIN mark when forwarding messages to the client, and this option does not affect the RETAIN mark in the retained message. Therefore, when the option Retain As Publish is set to 0, the client will directly distinguish whether this is a normal forwarded message or a retained message according to the RETAIN mark in the message, instead of judging whether this message is the first received after subscribing(the forwarded message may be sent before the retained message, which depends on the specific implementation of different brokers).""" + +rap.label: +"""Retain As Publish""" + +rh.desc: +"""Default value 0. This option is used to specify whether the server forwards the retained message to the client when establishing a subscription. Retain Handling is equal to 0, as long as the client successfully subscribes, the server will send the retained message. Retain Handling is equal to 1, if the client successfully subscribes and this subscription does not exist previously, the server sends the retained message. After all, sometimes the client re-initiate the subscription just to change the QoS, but it does not mean that it wants to receive the reserved messages again. Retain Handling is equal to 2, even if the client successfully subscribes, the server does not send the retained message.""" - zh: """指定订阅建立时服务端是否向客户端发送保留消息, -可选值 0:只要客户端订阅成功,服务端就发送保留消息。 -可选值 1:客户端订阅成功且该订阅此前不存在,服务端才发送保留消息。毕竟有些时候客户端重新发起订阅可能只是为了改变一下 QoS,并不意味着它想再次接收保留消息。 -可选值 2:即便客户订阅成功,服务端也不会发送保留消息。""" - } - label { - en: """Retain Handling""" - zh: """Retain Handling""" - } - } - rap { - desc { - en: """Default value 0. This option is used to specify whether the server retains the RETAIN mark when forwarding messages to the client, and this option does not affect the RETAIN mark in the retained message. Therefore, when the option Retain As Publish is set to 0, the client will directly distinguish whether this is a normal forwarded message or a retained message according to the RETAIN mark in the message, instead of judging whether this message is the first received after subscribing(the forwarded message may be sent before the retained message, which depends on the specific implementation of different brokers).""" - zh: """缺省值为 0,这一选项用来指定服务端向客户端转发消息时是否要保留其中的 RETAIN 标识,注意这一选项不会影响保留消息中的 RETAIN 标识。因此当 Retain As Publish 选项被设置为 0 时,客户端直接依靠消息中的 RETAIN 标识来区分这是一个正常的转发消息还是一个保留消息,而不是去判断消息是否是自己订阅后收到的第一个消息(转发消息甚至可能会先于保留消息被发送,视不同 Broker 的具体实现而定)。""" - } - label { - en: """Retain As Publish""" - zh: """Retain As Publish""" - } - } +rh.label: +"""Retain Handling""" + +topic.desc: +"""Topic name, placeholders are supported. For example: client/${clientid}/username/${username}/host/${host}/port/${port} +Required field, and cannot be empty string""" + +topic.label: +"""Topic""" - nl { - desc { - en: """Default value 0. -MQTT v3.1.1: if you subscribe to the topic published by yourself, you will receive all messages that you published. -MQTT v5: if you set this option as 1 when subscribing, the server will not forward the message you published to you.""" - zh: """缺省值为0, -MQTT v3.1.1:如果设备订阅了自己发布消息的主题,那么将收到自己发布的所有消息。 -MQTT v5:如果设备在订阅时将此选项设置为 1,那么服务端将不会向设备转发自己发布的消息""" - } - label { - en: """No Local""" - zh: """No Local""" - } - } } diff --git a/rel/i18n/emqx_bridge_api.hocon b/rel/i18n/emqx_bridge_api.hocon index 66960619a..8b7950cdc 100644 --- a/rel/i18n/emqx_bridge_api.hocon +++ b/rel/i18n/emqx_bridge_api.hocon @@ -1,180 +1,100 @@ emqx_bridge_api { - desc_param_path_operation_cluster { - desc { - en: """Operations can be one of: stop, restart""" - zh: """集群可用操作:停止、重新启动""" - } - label: { - en: "Cluster Operation" - zh: "集群可用操作" - } - } +desc_api1.desc: +"""List all created bridges""" - desc_param_path_operation_on_node { - desc { - en: """Operations can be one of: stop, restart""" - zh: """节点可用操作:停止、重新启动""" - } - label: { - en: "Node Operation " - zh: "节点可用操作" - } - } +desc_api1.label: +"""List All Bridges""" - desc_param_path_node { - desc { - en: """The node name, e.g. emqx@127.0.0.1""" - zh: """节点名,比如 emqx@127.0.0.1""" - } - label: { - en: "The node name" - zh: "节点名" - } - } +desc_api2.desc: +"""Create a new bridge by type and name""" - desc_param_path_id { - desc { - en: """The bridge Id. Must be of format {type}:{name}""" - zh: """Bridge ID , 格式为 {type}:{name}""" - } - label: { - en: "Bridge ID" - zh: "Bridge ID" - } - } +desc_api2.label: +"""Create Bridge""" - desc_param_path_enable { - desc { - en: """Whether to enable this bridge""" - zh: """是否启用桥接""" - } - label: { - en: "Enable bridge" - zh: "启用桥接" - } - } - desc_api1 { - desc { - en: """List all created bridges""" - zh: """列出所有 Bridge""" - } - label: { - en: "List All Bridges" - zh: "列出所有 Bridge" - } - } +desc_api3.desc: +"""Get a bridge by Id""" - desc_api2 { - desc { - en: """Create a new bridge by type and name""" - zh: """通过类型和名字创建 Bridge""" - } - label: { - en: "Create Bridge" - zh: "创建 Bridge" - } - } +desc_api3.label: +"""Get Bridge""" - desc_api3 { - desc { - en: """Get a bridge by Id""" - zh: """通过 ID 获取 Bridge""" - } - label: { - en: "Get Bridge" - zh: "获取 Bridge" - } - } +desc_api4.desc: +"""Update a bridge by Id""" - desc_api4 { - desc { - en: """Update a bridge by Id""" - zh: """通过 ID 更新 Bridge""" - } - label: { - en: "Update Bridge" - zh: "更新 Bridge" - } - } +desc_api4.label: +"""Update Bridge""" - desc_api5 { - desc { - en: """Delete a bridge by Id""" - zh: """通过 ID 删除 Bridge""" - } - label: { - en: "Delete Bridge" - zh: "删除 Bridge" - } - } +desc_api5.desc: +"""Delete a bridge by Id""" - desc_api6 { - desc { - en: """Reset a bridge metrics by Id""" - zh: """通过 ID 重置 Bridge 的计数""" - } - label: { - en: "Reset Bridge Metrics" - zh: "重置 Bridge 计数" - } - } +desc_api5.label: +"""Delete Bridge""" - desc_api7 { - desc { - en: """Stop/Restart bridges on all nodes in the cluster.""" - zh: """停止或启用所有节点上的桥接""" - } - label: { - en: "Cluster Bridge Operate" - zh: "集群 Bridge 操作" - } - } +desc_api6.desc: +"""Reset a bridge metrics by Id""" - desc_api8 { - desc { - en: """Stop/Restart bridges on a specific node.""" - zh: """在某个节点上停止/重新启动 Bridge。""" - } - label: { - en: "Node Bridge Operate" - zh: "单节点 Bridge 操作" - } - } +desc_api6.label: +"""Reset Bridge Metrics""" - desc_api9 { - desc { - en: """Test creating a new bridge by given ID
+desc_api7.desc: +"""Stop/Restart bridges on all nodes in the cluster.""" + +desc_api7.label: +"""Cluster Bridge Operate""" + +desc_api8.desc: +"""Stop/Restart bridges on a specific node.""" + +desc_api8.label: +"""Node Bridge Operate""" + +desc_api9.desc: +"""Test creating a new bridge by given ID
The ID must be of format '{type}:{name}'""" - zh: """通过给定的 ID 测试创建一个新的桥接。
-ID 的格式必须为 ’{type}:{name}”""" - } - label: { - en: "Test Bridge Creation" - zh: "测试桥接创建" - } - } - desc_bridge_metrics { - desc { - en: """Get bridge metrics by Id""" - zh: """通过 Id 来获取桥接的指标信息""" - } - label: { - en: "Get Bridge Metrics" - zh: "获取桥接的指标" - } - } +desc_api9.label: +"""Test Bridge Creation""" - desc_enable_bridge { - desc { - en: """Enable or Disable bridges on all nodes in the cluster.""" - zh: """启用或禁用所有节点上的桥接""" - } - label: { - en: "Cluster Bridge Enable" - zh: "是否启用集群内的桥接" - } - } +desc_bridge_metrics.desc: +"""Get bridge metrics by Id""" + +desc_bridge_metrics.label: +"""Get Bridge Metrics""" + +desc_enable_bridge.desc: +"""Enable or Disable bridges on all nodes in the cluster.""" + +desc_enable_bridge.label: +"""Cluster Bridge Enable""" + +desc_param_path_enable.desc: +"""Whether to enable this bridge""" + +desc_param_path_enable.label: +"""Enable bridge""" + +desc_param_path_id.desc: +"""The bridge Id. Must be of format {type}:{name}""" + +desc_param_path_id.label: +"""Bridge ID""" + +desc_param_path_node.desc: +"""The node name, e.g. emqx@127.0.0.1""" + +desc_param_path_node.label: +"""The node name""" + +desc_param_path_operation_cluster.desc: +"""Operations can be one of: stop, restart""" + +desc_param_path_operation_cluster.label: +"""Cluster Operation""" + +desc_param_path_operation_on_node.desc: +"""Operations can be one of: stop, restart""" + +desc_param_path_operation_on_node.label: +"""Node Operation """ } diff --git a/rel/i18n/emqx_bridge_kafka.hocon b/rel/i18n/emqx_bridge_kafka.hocon index 2f1811269..ef2e27972 100644 --- a/rel/i18n/emqx_bridge_kafka.hocon +++ b/rel/i18n/emqx_bridge_kafka.hocon @@ -1,667 +1,361 @@ emqx_bridge_kafka { - config_enable { - desc { - en: "Enable (true) or disable (false) this Kafka bridge." - zh: "启用(true)或停用该(false)Kafka 数据桥接。" - } - label { - en: "Enable or Disable" - zh: "启用或停用" - } - } - desc_config { - desc { - en: """Configuration for a Kafka bridge.""" - zh: """Kafka 桥接配置""" - } - label { - en: "Kafka Bridge Configuration" - zh: "Kafka 桥接配置" - } - } - desc_type { - desc { - en: """The Bridge Type""" - zh: """桥接类型""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } - desc_name { - desc { - en: """Bridge name, used as a human-readable description of the bridge.""" - zh: """桥接名字,可读描述""" - } - label { - en: "Bridge Name" - zh: "桥接名字" - } - } - kafka_producer { - desc { - en: "Kafka Producer configuration." - zh: "Kafka Producer 配置。" - } - label { - en: "Kafka Producer" - zh: "Kafka Producer" - } - } - producer_opts { - desc { - en: "Local MQTT data source and Kafka bridge configs." - zh: "本地 MQTT 数据源和 Kafka 桥接的配置。" - } - label { - en: "MQTT to Kafka" - zh: "MQTT 到 Kafka" - } - } - mqtt_topic { - desc { - en: "MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Kafka." - zh: "MQTT 主题数据源由桥接指定,或留空由规则动作指定。" - } - label { - en: "Source MQTT Topic" - zh: "源 MQTT 主题" - } - } - producer_kafka_opts { - desc { - en: "Kafka producer configs." - zh: "Kafka 生产者参数。" - } - label { - en: "Kafka Producer" - zh: "生产者参数" - } - } - bootstrap_hosts { - desc { - en: "A comma separated list of Kafka host[:port] endpoints to bootstrap the client. Default port number is 9092." - zh: "用逗号分隔的 host[:port] 主机列表。默认端口号为 9092。" - } - label { - en: "Bootstrap Hosts" - zh: "主机列表" - } - } - connect_timeout { - desc { - en: "Maximum wait time for TCP connection establishment (including authentication time if enabled)." - zh: "建立 TCP 连接时的最大等待时长(若启用认证,这个等待时长也包含完成认证所需时间)。" - } - label { - en: "Connect Timeout" - zh: "连接超时" - } - } - min_metadata_refresh_interval { - desc { - en: "Minimum time interval the client has to wait before refreshing Kafka broker and topic metadata. " - "Setting too small value may add extra load on Kafka." - zh: "刷新 Kafka broker 和 Kafka 主题元数据段最短时间间隔。设置太小可能会增加 Kafka 压力。" - } - label { - en: "Min Metadata Refresh Interval" - zh: "元数据刷新最小间隔" - } - } - metadata_request_timeout { - desc { - en: "Maximum wait time when fetching metadata from Kafka." - zh: "刷新元数据时最大等待时长。" - } - label { - en: "Metadata Request Timeout" - zh: "元数据请求超时" - } - } - authentication { - desc { - en: "Authentication configs." - zh: "认证参数。" - } - label { - en: "Authentication" - zh: "认证" - } - } - socket_opts { - desc { - en: "Extra socket options." - zh: "更多 Socket 参数设置。" - } - label { - en: "Socket Options" - zh: "Socket 参数" - } - } - auth_sasl_mechanism { - desc { - en: "SASL authentication mechanism." - zh: "SASL 认证方法名称。" - } - label { - en: "Mechanism" - zh: "认证方法" - } - } - auth_sasl_username { - desc { - en: "SASL authentication username." - zh: "SASL 认证的用户名。" - } - label { - en: "Username" - zh: "用户名" - } - } - auth_sasl_password { - desc { - en: "SASL authentication password." - zh: "SASL 认证的密码。" - } - label { - en: "Password" - zh: "密码" - } - } - auth_kerberos_principal { - desc { - en: "SASL GSSAPI authentication Kerberos principal. " - "For example client_name@MY.KERBEROS.REALM.MYDOMAIN.COM, " - "NOTE: The realm in use has to be configured in /etc/krb5.conf in EMQX nodes." - zh: "SASL GSSAPI 认证方法的 Kerberos principal," - "例如 client_name@MY.KERBEROS.REALM.MYDOMAIN.COM" - "注意:这里使用的 realm 需要配置在 EMQX 服务器的 /etc/krb5.conf 中" - } - label { - en: "Kerberos Principal" - zh: "Kerberos Principal" - } - } - auth_kerberos_keytab_file { - desc { - en: "SASL GSSAPI authentication Kerberos keytab file path. " - "NOTE: This file has to be placed in EMQX nodes, and the EMQX service runner user requires read permission." - zh: "SASL GSSAPI 认证方法的 Kerberos keytab 文件。" - "注意:该文件需要上传到 EMQX 服务器中,且运行 EMQX 服务的系统账户需要有读取权限。" - } - label { - en: "Kerberos keytab file" - zh: "Kerberos keytab 文件" - } - } - socket_send_buffer { - desc { - en: "Fine tune the socket send buffer. The default value is tuned for high throughput." - zh: "TCP socket 的发送缓存调优。默认值是针对高吞吐量的一个推荐值。" - } - label { - en: "Socket Send Buffer Size" - zh: "Socket 发送缓存大小" - } - } - socket_receive_buffer { - desc { - en: "Fine tune the socket receive buffer. The default value is tuned for high throughput." - zh: "TCP socket 的收包缓存调优。默认值是针对高吞吐量的一个推荐值。" - } - label { - en: "Socket Receive Buffer Size" - zh: "Socket 收包缓存大小" - } - } - # hidden - socket_nodelay { - desc { - en: "When set to 'true', TCP buffer is sent as soon as possible. " - "Otherwise, the OS kernel may buffer small TCP packets for a while (40 ms by default)." - zh: "设置‘true’让系统内核立即发送。否则当需要发送的内容很少时,可能会有一定延迟(默认 40 毫秒)。" - } - label { - en: "No Delay" - zh: "是否关闭延迟发送" - } - } - kafka_topic { - desc { - en: "Kafka topic name" - zh: "Kafka 主题名称" - } - label { - en: "Kafka Topic Name" - zh: "Kafka 主题名称" - } - } - kafka_message { - desc { - en: "Template to render a Kafka message." - zh: "用于生成 Kafka 消息的模版。" - } - label { - en: "Kafka Message Template" - zh: "Kafka 消息模版" - } - } - kafka_message_key { - desc { - en: "Template to render Kafka message key. " - "If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) " - "then Kafka's NULL (but not empty string) is used." - zh: "生成 Kafka 消息 Key 的模版。如果模版生成后为空值," - "则会使用 Kafka 的 NULL ,而非空字符串。" - } - label { - en: "Message Key" - zh: "消息的 Key" - } - } - kafka_message_value { - desc { - en: "Template to render Kafka message value. " - "If the template is rendered " - "into a NULL value (i.e. there is no such data field in Rule Engine context) " - "then Kafka's NULL (but not empty string) is used." - zh: "生成 Kafka 消息 Value 的模版。如果模版生成后为空值," - "则会使用 Kafka 的 NULL,而非空字符串。" - } - label { - en: "Message Value" - zh: "消息的 Value" - } - } - kafka_message_timestamp { - desc { - en: "Which timestamp to use. " - "The timestamp is expected to be a millisecond precision Unix epoch " - "which can be in string format, e.g. 1661326462115 or " - "'1661326462115'. " - "When the desired data field for this template is not found, " - "or if the found data is not a valid integer, " - "the current system timestamp will be used." - zh: "生成 Kafka 消息时间戳的模版。" - "该时间必需是一个整型数值(可以是字符串格式)例如 1661326462115 " - "或 '1661326462115'。" - "当所需的输入字段不存在,或不是一个整型时," - "则会使用当前系统时间。" - } - label { - en: "Message Timestamp" - zh: "消息的时间戳" - } - } - max_batch_bytes { - desc { - en: "Maximum bytes to collect in a Kafka message batch. " - "Most of the Kafka brokers default to a limit of 1 MB batch size. " - "EMQX's default value is less than 1 MB in order to compensate " - "Kafka message encoding overheads (especially when each individual message is very small). " - "When a single message is over the limit, it is still sent (as a single element batch)." - zh: "最大消息批量字节数。" - "大多数 Kafka 环境的默认最低值是 1 MB,EMQX 的默认值比 1 MB 更小是因为需要" - "补偿 Kafka 消息编码所需要的额外字节(尤其是当每条消息都很小的情况下)。" - "当单个消息的大小超过该限制时,它仍然会被发送,(相当于该批量中只有单个消息)。" - } - label { - en: "Max Batch Bytes" - zh: "最大批量字节数" - } - } - compression { - desc { - en: "Compression method." - zh: "压缩方法。" - } - label { - en: "Compression" - zh: "压缩" - } - } - partition_strategy { - desc { - en: "Partition strategy is to tell the producer how to dispatch messages to Kafka partitions.\n\n" - "random: Randomly pick a partition for each message\n" - "key_dispatch: Hash Kafka message key to a partition number" - zh: "设置消息发布时应该如何选择 Kafka 分区。\n\n" - "random: 为每个消息随机选择一个分区。\n" - "key_dispatch: Hash Kafka message key to a partition number" - } - label { - en: "Partition Strategy" - zh: "分区选择策略" - } - } - required_acks { - desc { - en: "Required acknowledgements for Kafka partition leader to wait for its followers " - "before it sends back the acknowledgement to EMQX Kafka producer\n\n" - "all_isr: Require all in-sync replicas to acknowledge.\n" - "leader_only: Require only the partition-leader's acknowledgement.\n" - "none: No need for Kafka to acknowledge at all." - zh: "设置 Kafka leader 在返回给 EMQX 确认之前需要等待多少个 follower 的确认。\n\n" - "all_isr: 需要所有的在线复制者都确认。\n" - "leader_only: 仅需要分区 leader 确认。\n" - "none: 无需 Kafka 回复任何确认。" - } - label { - en: "Required Acks" - zh: "Kafka 确认数量" - } - } - partition_count_refresh_interval { - desc { - en: "The time interval for Kafka producer to discover increased number of partitions.\n" - "After the number of partitions is increased in Kafka, EMQX will start taking the \n" - "discovered partitions into account when dispatching messages per partition_strategy." - zh: "配置 Kafka 刷新分区数量的时间间隔。\n" - "EMQX 发现 Kafka 分区数量增加后,会开始按 partition_strategy 配置,把消息发送到新的分区中。" - } - label { - en: "Partition Count Refresh Interval" - zh: "分区数量刷新间隔" - } - } - max_inflight { - desc { - en: "Maximum number of batches allowed for Kafka producer (per-partition) to send before receiving acknowledgement from Kafka. " - "Greater value typically means better throughput. However, there can be a risk of message reordering when this " - "value is greater than 1." - zh: "设置 Kafka 生产者(每个分区一个)在收到 Kafka 的确认前最多发送多少个请求(批量)。" - "调大这个值通常可以增加吞吐量,但是,当该值设置大于 1 时存在消息乱序的风险。" - } - label { - en: "Max Inflight" - zh: "飞行窗口" - } - } - producer_buffer { - desc { - en: "Configure producer message buffer.\n\n" - "Tell Kafka producer how to buffer messages when EMQX has more messages to send than " - "Kafka can keep up, or when Kafka is down." - zh: "配置消息缓存的相关参数。\n\n" - "当 EMQX 需要发送的消息超过 Kafka 处理能力,或者当 Kafka 临时下线时,EMQX 内部会将消息缓存起来。" - } - label { - en: "Message Buffer" - zh: "消息缓存" - } - } - buffer_mode { - desc { - en: "Message buffer mode.\n\n" - "memory: Buffer all messages in memory. The messages will be lost in case of EMQX node restart\n" - "disk: Buffer all messages on disk. The messages on disk are able to survive EMQX node restart.\n" - "hybrid: Buffer message in memory first, when up to certain limit " - "(see segment_bytes config for more information), then start offloading " - "messages to disk, Like memory mode, the messages will be lost in case of " - "EMQX node restart." - zh: "消息缓存模式。\n" - "memory: 所有的消息都缓存在内存里。如果 EMQX 服务重启,缓存的消息会丢失。\n" - "disk: 缓存到磁盘上。EMQX 重启后会继续发送重启前未发送完成的消息。\n" - "hybrid: 先将消息缓存在内存中,当内存中的消息堆积超过一定限制" - "(配置项 segment_bytes 描述了该限制)后,后续的消息会缓存到磁盘上。" - "与 memory 模式一样,如果 EMQX 服务重启,缓存的消息会丢失。" - } - label { - en: "Buffer Mode" - zh: "缓存模式" - } - } - buffer_per_partition_limit { - desc { - en: "Number of bytes allowed to buffer for each Kafka partition. " - "When this limit is exceeded, old messages will be dropped in a trade for credits " - "for new messages to be buffered." - zh: "为每个 Kafka 分区设置的最大缓存字节数。当超过这个上限之后,老的消息会被丢弃," - "为新的消息腾出空间。" - } - label { - en: "Per-partition Buffer Limit" - zh: "Kafka 分区缓存上限" - } - } - buffer_segment_bytes { - desc { - en: "Applicable when buffer mode is set to disk or hybrid.\n" - "This value is to specify the size of each on-disk buffer file." - zh: "当缓存模式是 diskhybrid 时适用。" - "该配置用于指定缓存到磁盘上的文件的大小。" - } - label { - en: "Segment File Bytes" - zh: "缓存文件大小" - } - } - buffer_memory_overload_protection { - desc { - en: "Applicable when buffer mode is set to memory\n" - "EMQX will drop old buffered messages under high memory pressure. " - "The high memory threshold is defined in config sysmon.os.sysmem_high_watermark. " - "NOTE: This config only works on Linux." - zh: "缓存模式是 memoryhybrid 时适用。" - "当系统处于高内存压力时,从队列中丢弃旧的消息以减缓内存增长。" - "内存压力值由配置项 sysmon.os.sysmem_high_watermark 决定。" - "注意,该配置仅在 Linux 系统中有效。" - } - label { - en: "Memory Overload Protection" - zh: "内存过载保护" - } - } - auth_username_password { - desc { - en: "Username/password based authentication." - zh: "基于用户名密码的认证。" - } - label { - en: "Username/password Auth" - zh: "用户名密码认证" - } - } - auth_gssapi_kerberos { - desc { - en: "Use GSSAPI/Kerberos authentication." - zh: "使用 GSSAPI/Kerberos 认证。" - } - label { - en: "GSSAPI/Kerberos" - zh: "GSSAPI/Kerberos" - } - } - kafka_consumer { - desc { - en: "Kafka Consumer configuration." - zh: "Kafka 消费者配置。" - } - label { - en: "Kafka Consumer" - zh: "Kafka 消费者" - } - } - consumer_opts { - desc { - en: "Local MQTT publish and Kafka consumer configs." - zh: "本地 MQTT 转发 和 Kafka 消费者配置。" - } - label { - en: "MQTT to Kafka" - zh: "MQTT 到 Kafka" - } - } - consumer_kafka_opts { - desc { - en: "Kafka consumer configs." - zh: "Kafka消费者配置。" - } - label { - en: "Kafka Consumer" - zh: "Kafka 消费者" - } - } - consumer_mqtt_opts { - desc { - en: "Local MQTT message publish." - zh: "本地 MQTT 消息转发。" - } - label { - en: "MQTT publish" - zh: "MQTT 转发" - } - } - consumer_mqtt_topic { - desc { - en: "Local topic to which consumed Kafka messages should be published to." - zh: "设置 Kafka 消息向哪个本地 MQTT 主题转发消息。" - } - label { - en: "MQTT Topic" - zh: "MQTT主题" - } - } - consumer_mqtt_qos { - desc { - en: "MQTT QoS used to publish messages consumed from Kafka." - zh: "转发 MQTT 消息时使用的 QoS。" - } - label { - en: "QoS" - zh: "QoS" - } - } - consumer_mqtt_payload { - desc { - en: "The template for transforming the incoming Kafka message." - " By default, it will use JSON format to serialize" - " inputs from the Kafka message. Such fields are:\n" - "headers: an object containing string key-value pairs.\n" - "key: Kafka message key (uses the chosen key encoding).\n" - "offset: offset for the message.\n" - "topic: Kafka topic.\n" - "ts: message timestamp.\n" - "ts_type: message timestamp type, which is one of" - " create, append or undefined.\n" - "value: Kafka message value (uses the chosen value encoding)." - zh: "用于转换收到的 Kafka 消息的模板。 " - "默认情况下,它将使用 JSON 格式来序列化来自 Kafka 的所有字段。 " - "这些字段包括:" - "headers:一个包含字符串键值对的 JSON 对象。\n" - "key:Kafka 消息的键(使用选择的编码方式编码)。\n" - "offset:消息的偏移量。\n" - "topic:Kafka 主题。\n" - "ts: 消息的时间戳。\n" - "ts_type:消息的时间戳类型,值可能是:" - " createappendundefined。\n" - "value: Kafka 消息值(使用选择的编码方式编码)。" +connect_timeout.desc: +"""Maximum wait time for TCP connection establishment (including authentication time if enabled).""" + +connect_timeout.label: +"""Connect Timeout""" + +producer_opts.desc: +"""Local MQTT data source and Kafka bridge configs.""" + +producer_opts.label: +"""MQTT to Kafka""" + +min_metadata_refresh_interval.desc: +"""Minimum time interval the client has to wait before refreshing Kafka broker and topic metadata. Setting too small value may add extra load on Kafka.""" + +min_metadata_refresh_interval.label: +"""Min Metadata Refresh Interval""" + +kafka_producer.desc: +"""Kafka Producer configuration.""" + +kafka_producer.label: +"""Kafka Producer""" + +producer_buffer.desc: +"""Configure producer message buffer. + +Tell Kafka producer how to buffer messages when EMQX has more messages to send than Kafka can keep up, or when Kafka is down.""" + +producer_buffer.label: +"""Message Buffer""" + +socket_send_buffer.desc: +"""Fine tune the socket send buffer. The default value is tuned for high throughput.""" + +socket_send_buffer.label: +"""Socket Send Buffer Size""" + +desc_name.desc: +"""Bridge name, used as a human-readable description of the bridge.""" + +desc_name.label: +"""Bridge Name""" + +consumer_offset_commit_interval_seconds.desc: +"""Defines the time interval between two offset commit requests sent for each consumer group.""" + +consumer_offset_commit_interval_seconds.label: +"""Offset Commit Interval""" + +consumer_max_batch_bytes.desc: +"""Set how many bytes to pull from Kafka in each fetch request. Please note that if the configured value is smaller than the message size in Kafka, it may negatively impact the fetch performance.""" + +consumer_max_batch_bytes.label: +"""Fetch Bytes""" + +socket_receive_buffer.desc: +"""Fine tune the socket receive buffer. The default value is tuned for high throughput.""" + +socket_receive_buffer.label: +"""Socket Receive Buffer Size""" + +consumer_topic_mapping.desc: +"""Defines the mapping between Kafka topics and MQTT topics. Must contain at least one item.""" + +consumer_topic_mapping.label: +"""Topic Mapping""" + +producer_kafka_opts.desc: +"""Kafka producer configs.""" + +producer_kafka_opts.label: +"""Kafka Producer""" + +kafka_topic.desc: +"""Kafka topic name""" + +kafka_topic.label: +"""Kafka Topic Name""" + +consumer_kafka_topic.desc: +"""Kafka topic to consume from.""" + +consumer_kafka_topic.label: +"""Kafka Topic""" + +auth_username_password.desc: +"""Username/password based authentication.""" + +auth_username_password.label: +"""Username/password Auth""" + +auth_sasl_password.desc: +"""SASL authentication password.""" + +auth_sasl_password.label: +"""Password""" + +kafka_message_timestamp.desc: +"""Which timestamp to use. The timestamp is expected to be a millisecond precision Unix epoch which can be in string format, e.g. 1661326462115 or '1661326462115'. When the desired data field for this template is not found, or if the found data is not a valid integer, the current system timestamp will be used.""" + +kafka_message_timestamp.label: +"""Message Timestamp""" + +buffer_mode.desc: +"""Message buffer mode. + +memory: Buffer all messages in memory. The messages will be lost in case of EMQX node restart +disk: Buffer all messages on disk. The messages on disk are able to survive EMQX node restart. +hybrid: Buffer message in memory first, when up to certain limit (see segment_bytes config for more information), then start offloading messages to disk, Like memory mode, the messages will be lost in case of EMQX node restart.""" + +buffer_mode.label: +"""Buffer Mode""" + +consumer_mqtt_qos.desc: +"""MQTT QoS used to publish messages consumed from Kafka.""" + +consumer_mqtt_qos.label: +"""QoS""" + +consumer_key_encoding_mode.desc: +"""Defines how the key from the Kafka message is encoded before being forwarded via MQTT. +none Uses the key from the Kafka message unchanged. Note: in this case, the key must be a valid UTF-8 string. +base64 Uses base-64 encoding on the received key.""" + +consumer_key_encoding_mode.label: +"""Key Encoding Mode""" + +auth_gssapi_kerberos.desc: +"""Use GSSAPI/Kerberos authentication.""" + +auth_gssapi_kerberos.label: +"""GSSAPI/Kerberos""" + +consumer_mqtt_opts.desc: +"""Local MQTT message publish.""" + +consumer_mqtt_opts.label: +"""MQTT publish""" + +auth_kerberos_principal.desc: +"""SASL GSSAPI authentication Kerberos principal. For example client_name@MY.KERBEROS.REALM.MYDOMAIN.COM, NOTE: The realm in use has to be configured in /etc/krb5.conf in EMQX nodes.""" + +auth_kerberos_principal.label: +"""Kerberos Principal""" + +socket_opts.desc: +"""Extra socket options.""" + +socket_opts.label: +"""Socket Options""" + +consumer_mqtt_topic.desc: +"""Local topic to which consumed Kafka messages should be published to.""" + +consumer_mqtt_topic.label: +"""MQTT Topic""" + +consumer_offset_reset_policy.desc: +"""Defines from which offset a consumer should start fetching when there is no commit history or when the commit history becomes invalid.""" + +consumer_offset_reset_policy.label: +"""Offset Reset Policy""" + +partition_count_refresh_interval.desc: +"""The time interval for Kafka producer to discover increased number of partitions. +After the number of partitions is increased in Kafka, EMQX will start taking the +discovered partitions into account when dispatching messages per partition_strategy.""" + +partition_count_refresh_interval.label: +"""Partition Count Refresh Interval""" + +max_batch_bytes.desc: +"""Maximum bytes to collect in a Kafka message batch. Most of the Kafka brokers default to a limit of 1 MB batch size. EMQX's default value is less than 1 MB in order to compensate Kafka message encoding overheads (especially when each individual message is very small). When a single message is over the limit, it is still sent (as a single element batch).""" + +max_batch_bytes.label: +"""Max Batch Bytes""" + +required_acks.desc: +"""Required acknowledgements for Kafka partition leader to wait for its followers before it sends back the acknowledgement to EMQX Kafka producer + +all_isr: Require all in-sync replicas to acknowledge. +leader_only: Require only the partition-leader's acknowledgement. +none: No need for Kafka to acknowledge at all.""" + +required_acks.label: +"""Required Acks""" + +metadata_request_timeout.desc: +"""Maximum wait time when fetching metadata from Kafka.""" + +metadata_request_timeout.label: +"""Metadata Request Timeout""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +socket_nodelay.desc: +"""When set to 'true', TCP buffer is sent as soon as possible. Otherwise, the OS kernel may buffer small TCP packets for a while (40 ms by default).""" + +socket_nodelay.label: +"""No Delay""" + +authentication.desc: +"""Authentication configs.""" + +authentication.label: +"""Authentication""" + +buffer_memory_overload_protection.desc: +"""Applicable when buffer mode is set to memory +EMQX will drop old buffered messages under high memory pressure. The high memory threshold is defined in config sysmon.os.sysmem_high_watermark. NOTE: This config only works on Linux.""" + +buffer_memory_overload_protection.label: +"""Memory Overload Protection""" + +auth_sasl_mechanism.desc: +"""SASL authentication mechanism.""" + +auth_sasl_mechanism.label: +"""Mechanism""" + +config_enable.desc: +"""Enable (true) or disable (false) this Kafka bridge.""" + +config_enable.label: +"""Enable or Disable""" + +consumer_mqtt_payload.desc: +"""The template for transforming the incoming Kafka message. By default, it will use JSON format to serialize inputs from the Kafka message. Such fields are: +headers: an object containing string key-value pairs. +key: Kafka message key (uses the chosen key encoding). +offset: offset for the message. +topic: Kafka topic. +ts: message timestamp. +ts_type: message timestamp type, which is one of create, append or undefined. +value: Kafka message value (uses the chosen value encoding).""" + +consumer_mqtt_payload.label: +"""MQTT Payload Template""" + +consumer_opts.desc: +"""Local MQTT publish and Kafka consumer configs.""" + +consumer_opts.label: +"""MQTT to Kafka""" + +kafka_consumer.desc: +"""Kafka Consumer configuration.""" + +kafka_consumer.label: +"""Kafka Consumer""" + +desc_config.desc: +"""Configuration for a Kafka bridge.""" + +desc_config.label: +"""Kafka Bridge Configuration""" + +consumer_value_encoding_mode.desc: +"""Defines how the value from the Kafka message is encoded before being forwarded via MQTT. +none Uses the value from the Kafka message unchanged. Note: in this case, the value must be a valid UTF-8 string. +base64 Uses base-64 encoding on the received value.""" + +consumer_value_encoding_mode.label: +"""Value Encoding Mode""" + +buffer_per_partition_limit.desc: +"""Number of bytes allowed to buffer for each Kafka partition. When this limit is exceeded, old messages will be dropped in a trade for credits for new messages to be buffered.""" + +buffer_per_partition_limit.label: +"""Per-partition Buffer Limit""" + +bootstrap_hosts.desc: +"""A comma separated list of Kafka host[:port] endpoints to bootstrap the client. Default port number is 9092.""" + +bootstrap_hosts.label: +"""Bootstrap Hosts""" + +consumer_max_rejoin_attempts.desc: +"""Maximum number of times allowed for a member to re-join the group. If the consumer group can not reach balance after this configured number of attempts, the consumer group member will restart after a delay.""" + +consumer_max_rejoin_attempts.label: +"""Max Rejoin Attempts""" + +kafka_message_key.desc: +"""Template to render Kafka message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Kafka's NULL (but not empty string) is used.""" + +kafka_message_key.label: +"""Message Key""" + +kafka_message.desc: +"""Template to render a Kafka message.""" + +kafka_message.label: +"""Kafka Message Template""" + +mqtt_topic.desc: +"""MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Kafka.""" + +mqtt_topic.label: +"""Source MQTT Topic""" + +kafka_message_value.desc: +"""Template to render Kafka message value. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Kafka's NULL (but not empty string) is used.""" + +kafka_message_value.label: +"""Message Value""" + +partition_strategy.desc: +"""Partition strategy is to tell the producer how to dispatch messages to Kafka partitions. + +random: Randomly pick a partition for each message +key_dispatch: Hash Kafka message key to a partition number""" + +partition_strategy.label: +"""Partition Strategy""" + +buffer_segment_bytes.desc: +"""Applicable when buffer mode is set to disk or hybrid. +This value is to specify the size of each on-disk buffer file.""" + +buffer_segment_bytes.label: +"""Segment File Bytes""" + +consumer_kafka_opts.desc: +"""Kafka consumer configs.""" + +consumer_kafka_opts.label: +"""Kafka Consumer""" + +max_inflight.desc: +"""Maximum number of batches allowed for Kafka producer (per-partition) to send before receiving acknowledgement from Kafka. Greater value typically means better throughput. However, there can be a risk of message reordering when this value is greater than 1.""" + +max_inflight.label: +"""Max Inflight""" + +auth_sasl_username.desc: +"""SASL authentication username.""" + +auth_sasl_username.label: +"""Username""" + +auth_kerberos_keytab_file.desc: +"""SASL GSSAPI authentication Kerberos keytab file path. NOTE: This file has to be placed in EMQX nodes, and the EMQX service runner user requires read permission.""" + +auth_kerberos_keytab_file.label: +"""Kerberos keytab file""" + +compression.desc: +"""Compression method.""" + +compression.label: +"""Compression""" - } - label { - en: "MQTT Payload Template" - zh: "MQTT Payload Template" - } - } - consumer_kafka_topic { - desc { - en: "Kafka topic to consume from." - zh: "指定从哪个 Kafka 主题消费消息。" - } - label { - en: "Kafka Topic" - zh: "Kafka 主题" - } - } - consumer_max_batch_bytes { - desc { - en: "Set how many bytes to pull from Kafka in each fetch request. " - "Please note that if the configured value is smaller than the message size in Kafka, it may negatively impact the fetch performance." - zh: "设置每次从 Kafka 拉取数据的字节数。" - "如该配置小于 Kafka 消息的大小,可能会影响消费性能。" - } - label { - en: "Fetch Bytes" - zh: "拉取字节数" - } - } - # hidden - consumer_max_rejoin_attempts { - desc { - en: "Maximum number of times allowed for a member to re-join the group. If the consumer group can not reach balance after this configured number of attempts, the consumer group member will restart after a delay." - zh: "消费组成员允许重新加入小组的最大次数。如超过该配置次数后仍未能成功加入消费组,则会在等待一段时间后重试。" - } - label { - en: "Max Rejoin Attempts" - zh: "最大的重新加入尝试" - } - } - consumer_offset_reset_policy { - desc { - en: "Defines from which offset a consumer should start fetching when there " - "is no commit history or when the commit history becomes invalid." - zh: "如不存在偏移量历史记录或历史记录失效,消费者应使用哪个偏移量开始消费。" - } - label { - en: "Offset Reset Policy" - zh: "偏移重置策略" - } - } - consumer_offset_commit_interval_seconds { - desc { - en: "Defines the time interval between two offset commit requests sent for each consumer group." - zh: "指定 Kafka 消费组偏移量提交的时间间隔。" - } - label { - en: "Offset Commit Interval" - zh: "偏移提交间隔" - } - } - consumer_topic_mapping { - desc { - en: "Defines the mapping between Kafka topics and MQTT topics. Must contain at least one item." - zh: "指定 Kafka 主题和 MQTT 主题之间的映射关系。 应至少包含一项。" - } - label { - en: "Topic Mapping" - zh: "主题映射关系" - } - } - consumer_key_encoding_mode { - desc { - en: "Defines how the key from the Kafka message is" - " encoded before being forwarded via MQTT.\n" - "none Uses the key from the Kafka message unchanged." - " Note: in this case, the key must be a valid UTF-8 string.\n" - "base64 Uses base-64 encoding on the received key." - zh: "通过 MQTT 转发之前,如何处理 Kafka 消息的 Key。" - "none 使用 Kafka 消息中的 Key 原始值,不进行编码。" - " 注意:在这种情况下,Key 必须是一个有效的 UTF-8 字符串。\n" - "base64 对收到的密钥或值使用 base-64 编码。" - } - label { - en: "Key Encoding Mode" - zh: "Key 编码模式" - } - } - consumer_value_encoding_mode { - desc { - en: "Defines how the value from the Kafka message is" - " encoded before being forwarded via MQTT.\n" - "none Uses the value from the Kafka message unchanged." - " Note: in this case, the value must be a valid UTF-8 string.\n" - "base64 Uses base-64 encoding on the received value." - zh: "通过 MQTT 转发之前,如何处理 Kafka 消息的 Value。" - "none 使用 Kafka 消息中的 Value 原始值,不进行编码。" - " 注意:在这种情况下,Value 必须是一个有效的 UTF-8 字符串。\n" - "base64 对收到的 Value 使用 base-64 编码。" - } - label { - en: "Value Encoding Mode" - zh: "Value 编码模式" - } - } } diff --git a/rel/i18n/emqx_bridge_mqtt_schema.hocon b/rel/i18n/emqx_bridge_mqtt_schema.hocon index b935b360c..e05c4fb0a 100644 --- a/rel/i18n/emqx_bridge_mqtt_schema.hocon +++ b/rel/i18n/emqx_bridge_mqtt_schema.hocon @@ -1,34 +1,21 @@ emqx_bridge_mqtt_schema { - config { - desc { - en: """The config for MQTT Bridges.""" - zh: """MQTT Bridge 的配置。""" - } - label: { - en: "Config" - zh: "配置" - } - } - desc_type { - desc { - en: """The bridge type.""" - zh: """Bridge 的类型""" - } - label: { - en: "Bridge Type" - zh: "Bridge 类型" - } - } - desc_name { - desc { - en: """Bridge name, used as a human-readable description of the bridge.""" - zh: """Bridge 名字,Bridge 的可读描述""" - } - label: { - en: "Bridge Name" - zh: "Bridge 名字" - } - } +config.desc: +"""The config for MQTT Bridges.""" + +config.label: +"""Config""" + +desc_name.desc: +"""Bridge name, used as a human-readable description of the bridge.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The bridge type.""" + +desc_type.label: +"""Bridge Type""" } diff --git a/rel/i18n/emqx_bridge_schema.hocon b/rel/i18n/emqx_bridge_schema.hocon index de4ceb0d5..1d3053f73 100644 --- a/rel/i18n/emqx_bridge_schema.hocon +++ b/rel/i18n/emqx_bridge_schema.hocon @@ -1,324 +1,158 @@ emqx_bridge_schema { - desc_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用 Bridge""" - } - label: { - en: "Enable Or Disable Bridge" - zh: "启用/禁用 Bridge" - } - } +bridges_mqtt.desc: +"""MQTT bridges to/from another MQTT broker""" - desc_metrics { - desc { - en: """The metrics of the bridge""" - zh: """Bridge 的计数""" - } - label: { - en: "Bridge Metrics" - zh: "Bridge 计数" - } - } +bridges_mqtt.label: +"""MQTT Bridge""" - desc_node_metrics { - desc { - en: """The metrics of the bridge for each node""" - zh: """每个节点的 Bridge 计数""" - } - label: { - en: "Each Node Bridge Metircs" - zh: "每个节点的 Bridge 计数" - } - } +bridges_webhook.desc: +"""WebHook to an HTTP server.""" - desc_status { - desc { - en: """The status of the bridge
+bridges_webhook.label: +"""WebHook""" + +desc_bridges.desc: +"""Configuration for MQTT bridges.""" + +desc_bridges.label: +"""MQTT Bridge Configuration""" + +desc_enable.desc: +"""Enable or disable this bridge""" + +desc_enable.label: +"""Enable Or Disable Bridge""" + +desc_metrics.desc: +"""Bridge metrics.""" + +desc_metrics.label: +"""Bridge Metrics""" + +desc_node_metrics.desc: +"""Node metrics.""" + +desc_node_metrics.label: +"""Node Metrics""" + +desc_node_name.desc: +"""The node name.""" + +desc_node_name.label: +"""Node Name""" + +desc_node_status.desc: +"""Node status.""" + +desc_node_status.label: +"""Node Status""" + +desc_status.desc: +"""The status of the bridge
- connecting: the initial state before any health probes were made.
- connected: when the bridge passes the health probes.
- disconnected: when the bridge can not pass health probes.
- stopped: when the bridge resource is requested to be stopped.
- inconsistent: When not all the nodes are at the same status.""" - zh: """Bridge 的连接状态
-- connecting: 启动时的初始状态。
-- connected: 桥接驱动健康检查正常。
-- disconnected: 当桥接无法通过健康检查。
-- stopped: 桥接处于停用状态。
-- inconsistent: 集群中有各节点汇报的状态不一致。""" - } - label: { - en: "Bridge Status" - zh: "Bridge 状态" - } - } - desc_status_reason { - desc { - en: "This is the reason given in case a bridge is failing to connect." - zh: "桥接连接失败的原因。" - } - label: { - en: "Failure reason" - zh: "失败原因" - } - } +desc_status.label: +"""Bridge Status""" - desc_node_status { - desc { - en: """The status of the bridge for each node. -- connecting: the initial state before any health probes were made.
-- connected: when the bridge passes the health probes.
-- disconnected: when the bridge can not pass health probes.
-- stopped: when the bridge resource is requested to be stopped.""" - zh: """每个节点的 Bridge 状态 -- connecting: 启动时的初始状态。
-- connected: 桥接驱动健康检查正常。
-- disconnected: 当桥接无法通过健康检查。
-- stopped: 桥接处于停用状态。""" - } - label: { - en: "Node Bridge Status" - zh: "每个节点的 Bridge 状态" - } - } +desc_status_reason.desc: +"""This is the reason given in case a bridge is failing to connect.""" - bridges_webhook { - desc { - en: """WebHook to an HTTP server.""" - zh: """转发消息到 HTTP 服务器的 WebHook""" - } - label: { - en: "WebHook" - zh: "WebHook" - } - } +desc_status_reason.label: +"""Failure reason""" +metric_dropped.desc: +"""Count of messages dropped.""" - bridges_mqtt { - desc { - en: """MQTT bridges to/from another MQTT broker""" - zh: """桥接到另一个 MQTT Broker 的 MQTT Bridge""" - } - label: { - en: "MQTT Bridge" - zh: "MQTT Bridge" - } - } +metric_dropped.label: +"""Dropped""" - metric_dropped { - desc { - en: """Count of messages dropped.""" - zh: """被丢弃的消息个数。""" - } - label: { - en: "Dropped" - zh: "丢弃" - } - } +metric_dropped_other.desc: +"""Count of messages dropped due to other reasons.""" - metric_dropped_other { - desc { - en: """Count of messages dropped due to other reasons.""" - zh: """因为其他原因被丢弃的消息个数。""" - } - label: { - en: "Dropped Other" - zh: "其他丢弃" - } - } - metric_dropped_queue_full { - desc { - en: """Count of messages dropped due to the queue is full.""" - zh: """因为队列已满被丢弃的消息个数。""" - } - label: { - en: "Dropped Queue Full" - zh: "队列已满被丢弃" - } - } - metric_dropped_resource_not_found { - desc { - en: """Count of messages dropped due to the resource is not found.""" - zh: """因为资源不存在被丢弃的消息个数。""" - } - label: { - en: "Dropped Resource NotFound" - zh: "资源不存在被丢弃" - } - } - metric_dropped_resource_stopped { - desc { - en: """Count of messages dropped due to the resource is stopped.""" - zh: """因为资源已停用被丢弃的消息个数。""" - } - label: { - en: "Dropped Resource Stopped" - zh: "资源停用被丢弃" - } - } - metric_matched { - desc { - en: """Count of this bridge is matched and queried.""" - zh: """Bridge 被匹配到(被请求)的次数。""" - } - label: { - en: "Matched" - zh: "匹配次数" - } - } +metric_dropped_other.label: +"""Dropped Other""" - metric_queuing { - desc { - en: """Count of messages that are currently queuing.""" - zh: """当前被缓存到磁盘队列的消息个数。""" - } - label: { - en: "Queued" - zh: "被缓存" - } - } - metric_retried { - desc { - en: """Times of retried.""" - zh: """重试的次数。""" - } - label: { - en: "Retried" - zh: "已重试" - } - } +metric_dropped_queue_full.desc: +"""Count of messages dropped due to the queue is full.""" - metric_sent_failed { - desc { - en: """Count of messages that sent failed.""" - zh: """发送失败的消息个数。""" - } - label: { - en: "Sent Failed" - zh: "发送失败" - } - } +metric_dropped_queue_full.label: +"""Dropped Queue Full""" - metric_inflight { - desc { - en: """Count of messages that were sent asynchronously but ACKs are not yet received.""" - zh: """已异步地发送但没有收到 ACK 的消息个数。""" - } - label: { - en: "Sent Inflight" - zh: "已发送未确认" - } - } - metric_sent_success { - desc { - en: """Count of messages that sent successfully.""" - zh: """已经发送成功的消息个数。""" - } - label: { - en: "Sent Success" - zh: "发送成功" - } - } +metric_dropped_resource_not_found.desc: +"""Count of messages dropped due to the resource is not found.""" - metric_rate { - desc { - en: """The rate of matched, times/second""" - zh: """执行操作的速率,次/秒""" - } - label: { - en: "Rate" - zh: "速率" - } - } +metric_dropped_resource_not_found.label: +"""Dropped Resource NotFound""" - metric_rate_max { - desc { - en: """The max rate of matched, times/second""" - zh: """执行操作的最大速率,次/秒""" - } - label: { - en: "Max Rate Of Matched" - zh: "执行操作的最大速率" - } - } +metric_dropped_resource_stopped.desc: +"""Count of messages dropped due to the resource is stopped.""" - metric_rate_last5m { - desc { - en: """The average rate of matched in the last 5 minutes, times/second""" - zh: """5 分钟平均速率,次/秒""" - } - label: { - en: "Last 5 Minutes Rate" - zh: "5 分钟平均速率" - } - } +metric_dropped_resource_stopped.label: +"""Dropped Resource Stopped""" - metric_received { - desc { - en: """Count of messages that is received from the remote system.""" - zh: """从远程系统收到的消息个数。""" - } - label: { - en: "Received" - zh: "已接收" - } - } +metric_inflight.desc: +"""Count of messages that were sent asynchronously but ACKs are not yet received.""" - desc_bridges { - desc { - en: """Configuration for MQTT bridges.""" - zh: """MQTT Bridge 配置""" - } - label: { - en: "MQTT Bridge Configuration" - zh: "MQTT Bridge 配置" - } - } +metric_inflight.label: +"""Sent Inflight""" - desc_metrics { - desc { - en: """Bridge metrics.""" - zh: """Bridge 计数""" - } - label: { - en: "Bridge Metrics" - zh: "Bridge 计数" - } - } +metric_matched.desc: +"""Count of this bridge is matched and queried.""" - desc_node_metrics { - desc { - en: """Node metrics.""" - zh: """节点的计数器""" - } - label: { - en: "Node Metrics" - zh: "节点的计数器" - } - } +metric_matched.label: +"""Matched""" - desc_node_status { - desc { - en: """Node status.""" - zh: """节点的状态""" - } - label: { - en: "Node Status" - zh: "节点的状态" - } - } +metric_queuing.desc: +"""Count of messages that are currently queuing.""" - desc_node_name { - desc { - en: """The node name.""" - zh: """节点的名字""" - } - label: { - en: "Node Name" - zh: "节点名字" - } - } +metric_queuing.label: +"""Queued""" + +metric_rate.desc: +"""The rate of matched, times/second""" + +metric_rate.label: +"""Rate""" + +metric_rate_last5m.desc: +"""The average rate of matched in the last 5 minutes, times/second""" + +metric_rate_last5m.label: +"""Last 5 Minutes Rate""" + +metric_rate_max.desc: +"""The max rate of matched, times/second""" + +metric_rate_max.label: +"""Max Rate Of Matched""" + +metric_received.desc: +"""Count of messages that is received from the remote system.""" + +metric_received.label: +"""Received""" + +metric_retried.desc: +"""Times of retried.""" + +metric_retried.label: +"""Retried""" + +metric_sent_failed.desc: +"""Count of messages that sent failed.""" + +metric_sent_failed.label: +"""Sent Failed""" + +metric_sent_success.desc: +"""Count of messages that sent successfully.""" + +metric_sent_success.label: +"""Sent Success""" } diff --git a/rel/i18n/emqx_bridge_webhook_schema.hocon b/rel/i18n/emqx_bridge_webhook_schema.hocon index cf86b63e0..4037893bd 100644 --- a/rel/i18n/emqx_bridge_webhook_schema.hocon +++ b/rel/i18n/emqx_bridge_webhook_schema.hocon @@ -1,162 +1,92 @@ emqx_bridge_webhook_schema { - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用 Bridge""" - } - label: { - en: "Enable Or Disable Bridge" - zh: "启用/禁用 Bridge" - } - } - config_direction { - desc { - en: """Deprecated, The direction of this bridge, MUST be 'egress'""" - zh: """已废弃,Bridge 的方向,必须是 egress""" - } - label: { - en: "Bridge Direction" - zh: "Bridge 方向" - } - } - config_url { - desc { - en: """The URL of the HTTP Bridge.
-Template with variables is allowed in the path, but variables cannot be used in the scheme, host, -or port part.
-For example, http://localhost:9901/${topic} is allowed, but - http://${host}:9901/message or http://localhost:${port}/message -is not allowed.""" - zh: """HTTP Bridge 的 URL。
-路径中允许使用带变量的模板,但是 host, port 不允许使用变量模板。
-例如, http://localhost:9901/${topic} 是允许的, -但是 http://${host}:9901/message -或 http://localhost:${port}/message -不允许。""" - } - label: { - en: "HTTP Bridge" - zh: "HTTP Bridge" - } - } - - config_local_topic { - desc { - en: """The MQTT topic filter to be forwarded to the HTTP server. All MQTT 'PUBLISH' messages with the topic -matching the local_topic will be forwarded.
-NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is -configured, then both the data got from the rule and the MQTT messages that match local_topic -will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 HTTP 服务器。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 HTTP 服务器。""" - } - label: { - en: "Local Topic" - zh: "本地 Topic" - } - } - - config_method { - desc { - en: """The method of the HTTP request. All the available methods are: post, put, get, delete.
-Template with variables is allowed.""" - zh: """HTTP 请求的方法。 所有可用的方法包括:post、put、get、delete。
-允许使用带有变量的模板。""" - } - label: { - en: "HTTP Method" - zh: "HTTP 请求方法" - } - } - - config_headers { - desc { - en: """The headers of the HTTP request.
-Template with variables is allowed.""" - zh: """HTTP 请求的标头。
-允许使用带有变量的模板。""" - } - label: { - en: "HTTP Header" - zh: "HTTP 请求标头" - } - } - - config_body { - desc { - en: """The body of the HTTP request.
+config_body.desc: +"""The body of the HTTP request.
If not provided, the body will be a JSON object of all the available fields.
There, 'all the available fields' means the context of a MQTT message when this webhook is triggered by receiving a MQTT message (the `local_topic` is set), or the context of the event when this webhook is triggered by a rule (i.e. this webhook is used as an action of a rule).
Template with variables is allowed.""" - zh: """HTTP 请求的正文。
-如果没有设置该字段,请求正文将是包含所有可用字段的 JSON object。
-如果该 webhook 是由于收到 MQTT 消息触发的,'所有可用字段' 将是 MQTT 消息的 -上下文信息;如果该 webhook 是由于规则触发的,'所有可用字段' 则为触发事件的上下文信息。
-允许使用带有变量的模板。""" - } - label: { - en: "HTTP Body" - zh: "HTTP 请求正文" - } - } - config_request_timeout { - desc { - en: """HTTP request timeout.""" - zh: """HTTP 请求超时""" - } - label: { - en: "HTTP Request Timeout" - zh: "HTTP 请求超时" - } - } +config_body.label: +"""HTTP Body""" - config_max_retries { - desc { - en: """HTTP request max retry times if failed.""" - zh: """HTTP 请求失败最大重试次数""" - } - label: { - en: "HTTP Request Max Retries" - zh: "HTTP 请求重试次数" - } - } +config_direction.desc: +"""Deprecated, The direction of this bridge, MUST be 'egress'""" - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label: { - en: "Bridge Type" - zh: "Bridge 类型" - } - } +config_direction.label: +"""Bridge Direction""" - desc_name { - desc { - en: """Bridge name, used as a human-readable description of the bridge.""" - zh: """Bridge 名字,Bridge 的可读描述""" - } - label: { - en: "Bridge Name" - zh: "Bridge 名字" - } - } +config_enable.desc: +"""Enable or disable this bridge""" - desc_config { - desc { - en: """Configuration for an HTTP bridge.""" - zh: """HTTP Bridge 配置""" - } - label: { - en: "HTTP Bridge Configuration" - zh: "HTTP Bridge 配置" - } - } +config_enable.label: +"""Enable Or Disable Bridge""" + +config_headers.desc: +"""The headers of the HTTP request.
+Template with variables is allowed.""" + +config_headers.label: +"""HTTP Header""" + +config_local_topic.desc: +"""The MQTT topic filter to be forwarded to the HTTP server. All MQTT 'PUBLISH' messages with the topic +matching the local_topic will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +configured, then both the data got from the rule and the MQTT messages that match local_topic +will be forwarded.""" + +config_local_topic.label: +"""Local Topic""" + +config_max_retries.desc: +"""HTTP request max retry times if failed.""" + +config_max_retries.label: +"""HTTP Request Max Retries""" + +config_method.desc: +"""The method of the HTTP request. All the available methods are: post, put, get, delete.
+Template with variables is allowed.""" + +config_method.label: +"""HTTP Method""" + +config_request_timeout.desc: +"""HTTP request timeout.""" + +config_request_timeout.label: +"""HTTP Request Timeout""" + +config_url.desc: +"""The URL of the HTTP Bridge.
+Template with variables is allowed in the path, but variables cannot be used in the scheme, host, +or port part.
+For example, http://localhost:9901/${topic} is allowed, but + http://${host}:9901/message or http://localhost:${port}/message +is not allowed.""" + +config_url.label: +"""HTTP Bridge""" + +desc_config.desc: +"""Configuration for an HTTP bridge.""" + +desc_config.label: +"""HTTP Bridge Configuration""" + +desc_name.desc: +"""Bridge name, used as a human-readable description of the bridge.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" } diff --git a/rel/i18n/emqx_coap_api.hocon b/rel/i18n/emqx_coap_api.hocon index 77ca40c00..14f644a87 100644 --- a/rel/i18n/emqx_coap_api.hocon +++ b/rel/i18n/emqx_coap_api.hocon @@ -1,58 +1,27 @@ emqx_coap_api { - send_coap_request { - desc { - en: """Send a CoAP request message to the client""" - zh: """发送 CoAP 消息到指定客户端""" - } - } +content_type.desc: +"""Payload type""" - token { - desc { - en: """Message token, can be empty""" - zh: """消息 Token, 可以为空""" - } - } +message_id.desc: +"""Message ID""" - method { - desc { - en: """Request method type""" - zh: """请求 Method 类型""" - } - } +method.desc: +"""Request method type""" - timeout { - desc { - en: """Timespan for response""" - zh: """请求超时时间""" - } - } +payload.desc: +"""The content of the payload""" - content_type { - desc { - en: """Payload type""" - zh: """Payload 类型""" - } - } +response_code.desc: +"""Response code""" - payload { - desc { - en: """The content of the payload""" - zh: """Payload 内容""" - } - } +send_coap_request.desc: +"""Send a CoAP request message to the client""" - message_id { - desc { - en: """Message ID""" - zh: """消息 ID""" - } - } +timeout.desc: +"""Timespan for response""" + +token.desc: +"""Message token, can be empty""" - response_code { - desc { - en: """Response code""" - zh: """应答码""" - } - } } diff --git a/rel/i18n/emqx_coap_schema.hocon b/rel/i18n/emqx_coap_schema.hocon index 1e6452e49..322003a3f 100644 --- a/rel/i18n/emqx_coap_schema.hocon +++ b/rel/i18n/emqx_coap_schema.hocon @@ -1,80 +1,38 @@ emqx_coap_schema { - coap { - desc { - en: """The CoAP Gateway configuration. + +coap.desc: +"""The CoAP Gateway configuration. This gateway is implemented based on RFC-7252 and https://core-wg.github.io/coap-pubsub/draft-ietf-core-pubsub.html""" - zh: """CoAP 网关配置。 -该网关的实现基于 RFC-7252 和 https://core-wg.github.io/coap-pubsub/draft-ietf-core-pubsub.html""" - } - } - coap_heartbeat { - desc { - en: """The gateway server required minimum heartbeat interval. -When connection mode is enabled, this parameter is used to set the minimum heartbeat interval for the connection to be alive""" - zh: """CoAP 网关要求客户端的最小心跳间隔时间。 -当 connection_required 开启后,该参数用于检查客户端连接是否存活""" - } - } - - coap_connection_required { - desc { - en: """Enable or disable connection mode. +coap_connection_required.desc: +"""Enable or disable connection mode. Connection mode is a feature of non-standard protocols. When connection mode is enabled, it is necessary to maintain the creation, authentication and alive of connection resources""" - zh: """是否开启连接模式。 -连接模式是非标准协议的功能。它维护 CoAP 客户端上线、认证、和连接状态的保持""" - } - } - coap_notify_type { - desc { - en: """The Notification Message will be delivered to the CoAP client if a new message received on an observed topic. +coap_heartbeat.desc: +"""The gateway server required minimum heartbeat interval. +When connection mode is enabled, this parameter is used to set the minimum heartbeat interval for the connection to be alive""" + +coap_notify_type.desc: +"""The Notification Message will be delivered to the CoAP client if a new message received on an observed topic. The type of delivered coap message can be set to:
- non: Non-confirmable;
- con: Confirmable;
- qos: Mapping from QoS type of received message, QoS0 -> non, QoS1,2 -> con""" - zh: """投递给 CoAP 客户端的通知消息类型。当客户端 Observe 一个资源(或订阅某个主题)时,网关会向客户端推送新产生的消息。其消息类型可设置为:
- - non: 不需要客户端返回确认消息;
- - con: 需要客户端返回一个确认消息;
- - qos: 取决于消息的 QoS 等级; QoS 0 会以 `non` 类型下发,QoS 1/2 会以 `con` 类型下发""" - } - } - coap_subscribe_qos { - desc { - en: """The Default QoS Level indicator for subscribe request. -This option specifies the QoS level for the CoAP Client when establishing a subscription membership, if the subscribe request is not carried `qos` option. The indicator can be set to:
- - qos0, qos1, qos2: Fixed default QoS level
- - coap: Dynamic QoS level by the message type of subscribe request
- * qos0: If the subscribe request is non-confirmable
- * qos1: If the subscribe request is confirmable""" - - zh: """客户端订阅请求的默认 QoS 等级。 -当 CoAP 客户端发起订阅请求时,如果未携带 `qos` 参数则会使用该默认值。默认值可设置为:
- - qos0、 qos1、qos2: 设置为固定的 QoS 等级
- - coap: 依据订阅操作的 CoAP 报文类型来动态决定
- * 当订阅请求为 `non-confirmable` 类型时,取值为 qos0
- * 当订阅请求为 `confirmable` 类型时,取值为 qos1""" - } - } - - coap_publish_qos { - desc { - en: """The Default QoS Level indicator for publish request. +coap_publish_qos.desc: +"""The Default QoS Level indicator for publish request. This option specifies the QoS level for the CoAP Client when publishing a message to EMQX PUB/SUB system, if the publish request is not carried `qos` option. The indicator can be set to:
- qos0, qos1, qos2: Fixed default QoS level
- coap: Dynamic QoS level by the message type of publish request
* qos0: If the publish request is non-confirmable
* qos1: If the publish request is confirmable""" - zh: """客户端发布请求的默认 QoS 等级。 -当 CoAP 客户端发起发布请求时,如果未携带 `qos` 参数则会使用该默认值。默认值可设置为:
- - qos0、qos1、qos2: 设置为固定的 QoS 等级
- - coap: 依据发布操作的 CoAP 报文类型来动态决定
- * 当发布请求为 `non-confirmable` 类型时,取值为 qos0
- * 当发布请求为 `confirmable` 类型时,取值为 qos1""" - } - } - +coap_subscribe_qos.desc: +"""The Default QoS Level indicator for subscribe request. +This option specifies the QoS level for the CoAP Client when establishing a subscription membership, if the subscribe request is not carried `qos` option. The indicator can be set to:
+ - qos0, qos1, qos2: Fixed default QoS level
+ - coap: Dynamic QoS level by the message type of subscribe request
+ * qos0: If the subscribe request is non-confirmable
+ * qos1: If the subscribe request is confirmable""" } diff --git a/rel/i18n/emqx_conf_schema.hocon b/rel/i18n/emqx_conf_schema.hocon index b252353f8..f4016d32e 100644 --- a/rel/i18n/emqx_conf_schema.hocon +++ b/rel/i18n/emqx_conf_schema.hocon @@ -1,411 +1,456 @@ emqx_conf_schema { - cluster_name { - desc { - en: """Human-friendly name of the EMQX cluster.""" - zh: """EMQX集群名称。每个集群都有一个唯一的名称。服务发现时会用于做路径的一部分。""" - } - label { - en: "Cluster Name" - zh: "集群名称" - } - } +common_handler_drop_mode_qlen.desc: +"""When the number of buffered log events is larger than this value, the new log events are dropped. +When drop mode is activated or deactivated, a message is printed in the logs.""" - process_limit { - desc { - en: """Maximum number of simultaneously existing processes for this Erlang system. -The actual maximum chosen may be much larger than the Number passed. -For more information, see: https://www.erlang.org/doc/man/erl.html""" +common_handler_drop_mode_qlen.label: +"""Queue Length before Entering Drop Mode""" - zh: """Erlang系统同时存在的最大进程数。 -实际选择的最大值可能比设置的数字大得多。 -参考: https://www.erlang.org/doc/man/erl.html""" - } - label { - en: "Erlang Process Limit" - zh: "Erlang 最大进程数" - } - } +cluster_mcast_addr.desc: +"""Multicast IPv4 address.""" - max_ports { - desc { - en: """Maximum number of simultaneously existing ports for this Erlang system. -The actual maximum chosen may be much larger than the Number passed. -For more information, see: https://www.erlang.org/doc/man/erl.html""" +cluster_mcast_addr.label: +"""Cluster Multicast Address""" - zh: """Erlang系统同时存在的最大端口数。 -实际选择的最大值可能比设置的数字大得多。 -参考: https://www.erlang.org/doc/man/erl.html""" - } - label { - en: "Erlang Port Limit" - zh: "Erlang 最大端口数" - } - } +desc_cluster_dns.desc: +"""Service discovery via DNS SRV records.""" - dist_buffer_size { - desc { - en: """Erlang's distribution buffer busy limit in kilobytes.""" - zh: """Erlang分布式缓冲区的繁忙阈值,单位是KB。""" - } - label { - en: "Erlang's dist buffer size(KB)" - zh: "Erlang分布式缓冲区的繁忙阈值(KB)" - } - } +desc_cluster_dns.label: +"""Cluster DNS""" - max_ets_tables { - desc { - en: """Max number of ETS tables""" - zh: """Erlang ETS 表的最大数量""" - } - label { - en: "Max number of ETS tables" - zh: "Erlang 表的最大数量" - } - } +cluster_dns_name.desc: +"""The domain name from which to discover peer EMQX nodes' IP addresses. +Applicable when cluster.discovery_strategy = dns""" - cluster_discovery_strategy { - desc { - en: """Service discovery method for the cluster nodes. Possible values are: -- manual: Use emqx ctl cluster command to manage cluster.
-- static: Configure static nodes list by setting seeds in config file.
-- dns: Use DNS A record to discover peer nodes.
-- etcd: Use etcd to discover peer nodes.
-- k8s: Use Kubernetes API to discover peer pods.""" +cluster_dns_name.label: +"""Cluster Dns Name""" - zh: """集群节点发现方式。可选值为: -- manual: 使用 emqx ctl cluster 命令管理集群。
-- static: 配置静态节点。配置几个固定的节点,新节点通过连接固定节点中的某一个来加入集群。
-- dns: 使用 DNS A 记录的方式发现节点。
-- etcd: 使用 etcd 发现节点。
-- k8s: 使用 Kubernetes API 发现节点。""" - } - label { - en: "Cluster Discovery Strategy" - zh: "集群服务发现策略" - } - } +rpc_keyfile.desc: +"""Path to the private key file for the rpc.certfile.
+Note: contents of this file are secret, so it's necessary to set permissions to 600.""" - cluster_autoclean { - desc { - en: """Remove disconnected nodes from the cluster after this interval.""" - zh: """指定多久之后从集群中删除离线节点。""" - } - label { - en: "Cluster Auto Clean" - zh: "自动删除离线节点时间" - } - } +rpc_keyfile.label: +"""RPC Keyfile""" - cluster_autoheal { - desc { - en: """If true, the node will try to heal network partitions automatically.""" - zh: """集群脑裂自动恢复机制开关。""" - } - label { - en: "Cluster Auto Heal" - zh: "节点脑裂自动修复机制" - } - } +cluster_mcast_recbuf.desc: +"""Size of the kernel-level buffer for incoming datagrams.""" - cluster_proto_dist { - desc { - en: """The Erlang distribution protocol for the cluster.
+cluster_mcast_recbuf.label: +"""Cluster Muticast Sendbuf""" + +cluster_autoheal.desc: +"""If true, the node will try to heal network partitions automatically.""" + +cluster_autoheal.label: +"""Cluster Auto Heal""" + +log_overload_kill_enable.desc: +"""Enable log handler overload kill feature.""" + +log_overload_kill_enable.label: +"""Log Handler Overload Kill""" + +node_etc_dir.desc: +"""etc dir for the node""" + +node_etc_dir.label: +"""Etc Dir""" + +cluster_proto_dist.desc: +"""The Erlang distribution protocol for the cluster.
- inet_tcp: IPv4 TCP
- inet_tls: IPv4 TLS, works together with etc/ssl_dist.conf""" - zh: """分布式 Erlang 集群协议类型。可选值为:
-- inet_tcp: 使用 IPv4
-- inet_tls: 使用 TLS,需要配合 etc/ssl_dist.conf 一起使用。""" - } - label { - en: "Cluster Protocol Distribution" - zh: "集群内部通信协议" - } - } - cluster_static_seeds { - desc { - en: """List EMQX node names in the static cluster. See node.name.""" - zh: """集群中的EMQX节点名称列表, -指定固定的节点列表,多个节点间使用逗号 , 分隔。 -当 cluster.discovery_strategy 为 static 时,此配置项才有效。 -适合于节点数量较少且固定的集群。""" - } - label { - en: "Cluster Static Seeds" - zh: "集群静态节点" - } - } +cluster_proto_dist.label: +"""Cluster Protocol Distribution""" - cluster_mcast_addr { - desc { - en: """Multicast IPv4 address.""" - zh: """指定多播 IPv4 地址。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" - } - label { - en: "Cluster Multicast Address" - zh: "多播地址" - } - } +log_burst_limit_enable.desc: +"""Enable log burst control feature.""" - cluster_mcast_ports { - desc { - en: """List of UDP ports used for service discovery.
-Note: probe messages are broadcast to all the specified ports.""" +log_burst_limit_enable.label: +"""Enable Burst""" - zh: """指定多播端口。如有多个端口使用逗号 , 分隔。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" - } - label { - en: "Cluster Multicast Ports" - zh: "多播端口" - } - } +dist_buffer_size.desc: +"""Erlang's distribution buffer busy limit in kilobytes.""" - cluster_mcast_iface { - desc { - en: """Local IP address the node discovery service needs to bind to.""" - zh: """指定节点发现服务需要绑定到本地 IP 地址。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" - } - label { - en: "Cluster Multicast Interface" - zh: "多播绑定地址" - } - } +dist_buffer_size.label: +"""Erlang's dist buffer size(KB)""" - cluster_mcast_ttl { - desc { - en: """Time-to-live (TTL) for the outgoing UDP datagrams.""" - zh: """指定多播的 Time-To-Live 值。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" - } - label { - en: "Cluster Multicast TTL" - zh: "多播TTL" - } - } +common_handler_max_depth.desc: +"""Maximum depth for Erlang term log formatting and Erlang process message queue inspection.""" - cluster_mcast_loop { - desc { - en: """If true, loop UDP datagrams back to the local socket.""" - zh: """设置多播的报文是否投递到本地回环地址。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" - } - label { - en: "Cluster Multicast Loop" - zh: "多播回环开关" - } - } +common_handler_max_depth.label: +"""Max Depth""" - cluster_mcast_sndbuf { - desc { - en: """Size of the kernel-level buffer for outgoing datagrams.""" - zh: """外发数据报的内核级缓冲区的大小。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" - } - label { - en: "Cluster Muticast Sendbuf" - zh: "多播发送缓存区" - } - } +desc_log.desc: +"""EMQX logging supports multiple sinks for the log events. +Each sink is represented by a _log handler_, which can be configured independently.""" - cluster_mcast_recbuf { - desc { - en: """Size of the kernel-level buffer for incoming datagrams.""" - zh: """接收数据报的内核级缓冲区的大小。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" - } - label { - en: "Cluster Muticast Sendbuf" - zh: "多播接收数据缓冲区" - } - } +desc_log.label: +"""Log""" - cluster_mcast_buffer { - desc { - en: """Size of the user-level buffer.""" - zh: """用户级缓冲区的大小。 -当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" - } - label { - en: "Cluster Muticast Buffer" - zh: "多播用户级缓冲区" - } - } +common_handler_flush_qlen.desc: +"""If the number of buffered log events grows larger than this threshold, a flush (delete) operation takes place. +To flush events, the handler discards the buffered log messages without logging.""" - cluster_dns_name { - desc { - en: """The domain name from which to discover peer EMQX nodes' IP addresses. -Applicable when cluster.discovery_strategy = dns""" - zh: """指定 DNS A 记录的名字。emqx 会通过访问这个 DNS A 记录来获取 IP 地址列表。 -当cluster.discovery_strategydns 时有效。""" - } - label { - en: "Cluster Dns Name" - zh: "DNS名称" - } - } +common_handler_flush_qlen.label: +"""Flush Threshold""" - cluster_dns_record_type { - desc { - en: """DNS record type.""" - zh: """DNS 记录类型。""" - } - label { - en: "DNS Record Type" - zh: "DNS记录类型" - } - } +common_handler_chars_limit.desc: +"""Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated. +NOTE: Restrict char limiter if formatter is JSON , it will get a truncated incomplete JSON data, which is not recommended.""" - cluster_etcd_server { - desc { - en: """List of endpoint URLs of the etcd cluster""" - zh: """指定 etcd 服务的地址。如有多个服务使用逗号 , 分隔。 -当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。""" - } - label { - en: "Cluster Etcd Server" - zh: "Etcd 服务器地址" - } - } +common_handler_chars_limit.label: +"""Single Log Max Length""" - cluster_etcd_prefix { - desc { - en: """Key prefix used for EMQX service discovery.""" - zh: """指定 etcd 路径的前缀。每个节点在 etcd 中都会创建一个路径: -v2/keys///
-当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。""" - } - label { - en: "Cluster Etcd Prefix" - zh: "Etcd 路径前缀" - } - } +cluster_k8s_namespace.desc: +"""Kubernetes namespace.""" - cluster_etcd_node_ttl { - desc { - en: """Expiration time of the etcd key associated with the node. -It is refreshed automatically, as long as the node is alive.""" +cluster_k8s_namespace.label: +"""K8s Namespace""" - zh: """指定 etcd 中节点信息的过期时间。 -当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。""" - } - label { - en: "Cluster Etcd Node TTL" - zh: "Etcd 节点过期时间" - } - } +node_name.desc: +"""Unique name of the EMQX node. It must follow %name%@FQDN or +%name%@IPv4 format.""" - cluster_etcd_ssl { - desc { - en: """Options for the TLS connection to the etcd cluster.""" - zh: """当使用 TLS 连接 etcd 时的配置选项。 -当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。""" - } - label { - en: "Cluster Etcd SSL Option" - zh: "Etcd SSL 选项" - } - } +node_name.label: +"""Node Name""" - cluster_k8s_apiserver { - desc { - en: """Kubernetes API endpoint URL.""" - zh: """指定 Kubernetes API Server。如有多个 Server 使用逗号 , 分隔。 -当 cluster.discovery_strategy 为 k8s 时,此配置项才有效。""" - } - label { - en: "Cluster k8s ApiServer" - zh: "K8s 服务地址" - } - } +rpc_port_discovery.desc: +"""manual: discover ports by tcp_server_port.
+stateless: discover ports in a stateless manner, using the following algorithm. +If node name is emqxN@127.0.0.1, where the N is an integer, +then the listening port will be 5370 + N.""" - cluster_k8s_service_name { - desc { - en: """EMQX broker service name.""" - zh: """指定 Kubernetes 中 EMQX 的服务名。 -当 cluster.discovery_strategy 为 k8s 时,此配置项才有效。""" - } - label { - en: "K8s Service Name" - zh: "K8s 服务别名" - } - } +rpc_port_discovery.label: +"""RRC Port Discovery""" - cluster_k8s_address_type { - desc { - en: """Address type used for connecting to the discovered nodes. +log_overload_kill_restart_after.desc: +"""The handler restarts automatically after a delay in the event of termination, unless the value `infinity` is set, which blocks any subsequent restarts.""" + +log_overload_kill_restart_after.label: +"""Handler Restart Timer""" + +log_file_handler_max_size.desc: +"""This parameter controls log file rotation. The value `infinity` means the log file will grow indefinitely, otherwise the log file will be rotated once it reaches `max_size` in bytes.""" + +log_file_handler_max_size.label: +"""Rotation Size""" + +desc_log_file_handler.desc: +"""Log handler that prints log events to files.""" + +desc_log_file_handler.label: +"""Files Log Handler""" + +rpc_socket_keepalive_count.desc: +"""How many times the keepalive probe message can fail to receive a reply +until the RPC connection is considered lost.""" + +rpc_socket_keepalive_count.label: +"""RPC Socket Keepalive Count""" + +cluster_etcd_server.desc: +"""List of endpoint URLs of the etcd cluster""" + +cluster_etcd_server.label: +"""Cluster Etcd Server""" + +db_backend.desc: +"""Select the backend for the embedded database.
+rlog is the default backend, +that is suitable for very large clusters.
+mnesia is a backend that offers decent performance in small clusters.""" + +db_backend.label: +"""DB Backend""" + +desc_authorization.desc: +"""Settings that control client authorization.""" + +desc_authorization.label: +"""Authorization""" + +cluster_etcd_ssl.desc: +"""Options for the TLS connection to the etcd cluster.""" + +cluster_etcd_ssl.label: +"""Cluster Etcd SSL Option""" + +rpc_insecure_fallback.desc: +"""Enable compatibility with old RPC authentication.""" + +rpc_insecure_fallback.label: +"""RPC insecure fallback""" + +cluster_mcast_buffer.desc: +"""Size of the user-level buffer.""" + +cluster_mcast_buffer.label: +"""Cluster Muticast Buffer""" + +rpc_authentication_timeout.desc: +"""Timeout for the remote node authentication.""" + +rpc_authentication_timeout.label: +"""RPC Authentication Timeout""" + +cluster_call_retry_interval.desc: +"""Time interval to retry after a failed call.""" + +cluster_call_retry_interval.label: +"""Cluster Call Retry Interval""" + +cluster_mcast_sndbuf.desc: +"""Size of the kernel-level buffer for outgoing datagrams.""" + +cluster_mcast_sndbuf.label: +"""Cluster Muticast Sendbuf""" + +rpc_driver.desc: +"""Transport protocol used for inter-broker communication""" + +rpc_driver.label: +"""RPC dirver""" + +max_ets_tables.desc: +"""Max number of ETS tables""" + +max_ets_tables.label: +"""Max number of ETS tables""" + +desc_db.desc: +"""Settings for the embedded database.""" + +desc_db.label: +"""Database""" + +desc_cluster_etcd.desc: +"""Service discovery using 'etcd' service.""" + +desc_cluster_etcd.label: +"""Cluster Etcd""" + +cluster_name.desc: +"""Human-friendly name of the EMQX cluster.""" + +cluster_name.label: +"""Cluster Name""" + +log_rotation_enable.desc: +"""Enable log rotation feature.""" + +log_rotation_enable.label: +"""Rotation Enable""" + +cluster_call_cleanup_interval.desc: +"""Time interval to clear completed but stale transactions. +Ensure that the number of completed transactions is less than the max_history.""" + +cluster_call_cleanup_interval.label: +"""Clean Up Interval""" + +desc_cluster_static.desc: +"""Service discovery via static nodes. +The new node joins the cluster by connecting to one of the bootstrap nodes.""" + +desc_cluster_static.label: +"""Cluster Static""" + +db_default_shard_transport.desc: +"""Defines the default transport for pushing transaction logs.
+This may be overridden on a per-shard basis in db.shard_transports. +gen_rpc uses the gen_rpc library, +distr uses the Erlang distribution.""" + +db_default_shard_transport.label: +"""Default Shard Transport""" + +cluster_static_seeds.desc: +"""List EMQX node names in the static cluster. See node.name.""" + +cluster_static_seeds.label: +"""Cluster Static Seeds""" + +log_overload_kill_qlen.desc: +"""Maximum allowed queue length.""" + +log_overload_kill_qlen.label: +"""Max Queue Length""" + +node_backtrace_depth.desc: +"""Maximum depth of the call stack printed in error messages and +process_info.""" + +node_backtrace_depth.label: +"""BackTrace Depth""" + +desc_log_burst_limit.desc: +"""Large bursts of log events produced in a short time can potentially cause problems, such as: + - Log files grow very large + - Log files are rotated too quickly, and useful information gets overwritten + - Overall performance impact on the system + +Log burst limit feature can temporarily disable logging to avoid these issues.""" + +desc_log_burst_limit.label: +"""Log Burst Limit""" + +common_handler_enable.desc: +"""Enable this log handler.""" + +common_handler_enable.label: +"""Enable Log Handler""" + +cluster_k8s_service_name.desc: +"""EMQX broker service name.""" + +cluster_k8s_service_name.label: +"""K8s Service Name""" + +log_rotation_count.desc: +"""Maximum number of log files.""" + +log_rotation_count.label: +"""Max Log Files Number""" + +node_cookie.desc: +"""Secret cookie is a random string that should be the same on all nodes in +the given EMQX cluster, but unique per EMQX cluster. It is used to prevent EMQX nodes that +belong to different clusters from accidentally connecting to each other.""" + +node_cookie.label: +"""Node Cookie""" + +db_role.desc: +"""Select a node role.
+core nodes provide durability of the data, and take care of writes. +It is recommended to place core nodes in different racks or different availability zones.
+replicant nodes are ephemeral worker nodes. Removing them from the cluster +doesn't affect database redundancy
+It is recommended to have more replicant nodes than core nodes.
+Note: this parameter only takes effect when the backend is set +to rlog.""" + +db_role.label: +"""DB Role""" + +rpc_tcp_server_port.desc: +"""Listening port used by RPC local service.
+Note that this config only takes effect when rpc.port_discovery is set to manual.""" + +rpc_tcp_server_port.label: +"""RPC TCP Server Port""" + +desc_console_handler.desc: +"""Log handler that prints log events to the EMQX console.""" + +desc_console_handler.label: +"""Console Handler""" + +node_applications.desc: +"""List of Erlang applications that shall be rebooted when the EMQX broker joins the cluster.""" + +node_applications.label: +"""Application""" + +log_burst_limit_max_count.desc: +"""Maximum number of log events to handle within a `window_time` interval. After the limit is reached, successive events are dropped until the end of the `window_time`.""" + +log_burst_limit_max_count.label: +"""Events Number""" + +rpc_tcp_client_num.desc: +"""Set the maximum number of RPC communication channels initiated by this node to each remote node.""" + +rpc_tcp_client_num.label: +"""RPC TCP Client Num""" + +cluster_k8s_address_type.desc: +"""Address type used for connecting to the discovered nodes. Setting cluster.k8s.address_type to ip will make EMQX to discover IP addresses of peer nodes from Kubernetes API.""" - zh: """当使用 k8s 方式集群时,address_type 用来从 Kubernetes 接口的应答里获取什么形式的 Host 列表。 -指定 cluster.k8s.address_typeip,则将从 Kubernetes 接口中获取集群中其他节点 -的IP地址。""" - } - label { - en: "K8s Address Type" - zh: "K8s 地址类型" - } - } +cluster_k8s_address_type.label: +"""K8s Address Type""" - cluster_k8s_namespace { - desc { - en: """Kubernetes namespace.""" - zh: """当使用 k8s 方式并且 cluster.k8s.address_type 指定为 dns 类型时, -可设置 emqx 节点名的命名空间。与 cluster.k8s.suffix 一起使用用以拼接得到节点名列表。""" - } - label { - en: "K8s Namespace" - zh: "K8s 命名空间" - } - } +rpc_socket_sndbuf.desc: +"""TCP tuning parameters. TCP sending buffer size.""" - cluster_k8s_suffix { - desc { - en: """Node name suffix.
-Note: this parameter is only relevant when address_type is dns -or hostname.""" - zh: """当使用 k8s 方式并且 cluster.k8s.address_type 指定为 dns 类型时,可设置 emqx 节点名的后缀。 -与 cluster.k8s.namespace 一起使用用以拼接得到节点名列表。""" - } - label { - en: "K8s Suffix" - zh: "K8s 前缀" - } - } +rpc_socket_sndbuf.label: +"""RPC Socket Sndbuf""" - node_name { - desc { - en: """Unique name of the EMQX node. It must follow %name%@FQDN or -%name%@IPv4 format.""" - zh: """节点名。格式为 \@\。其中 可以是 IP 地址,也可以是 FQDN。 -详见 http://erlang.org/doc/reference_manual/distributed.html。""" - } - label { - en: "Node Name" - zh: "节点名" - } - } +cluster_mcast_ttl.desc: +"""Time-to-live (TTL) for the outgoing UDP datagrams.""" - node_cookie { - desc { - en: """Secret cookie is a random string that should be the same on all nodes in -the given EMQX cluster, but unique per EMQX cluster. It is used to prevent EMQX nodes that -belong to different clusters from accidentally connecting to each other.""" - zh: """分布式 Erlang 集群使用的 cookie 值。集群间保持一致""" - } - label { - en: "Node Cookie" - zh: "节点 Cookie" - } - } +cluster_mcast_ttl.label: +"""Cluster Multicast TTL""" - node_data_dir { - desc { - en: """Path to the persistent data directory.
+db_core_nodes.desc: +"""List of core nodes that the replicant will connect to.
+Note: this parameter only takes effect when the backend is set +to rlog and the role is set to replicant.
+This value needs to be defined for manual or static cluster discovery mechanisms.
+If an automatic cluster discovery mechanism is being used (such as etcd), +there is no need to set this value.""" + +db_core_nodes.label: +"""Db Core Node""" + +log_file_handler_file.desc: +"""Name the log file.""" + +log_file_handler_file.label: +"""Log File Name""" + +node_dist_net_ticktime.desc: +"""This is the approximate time an EMQX node may be unresponsive until it is considered down and thereby disconnected.""" + +node_dist_net_ticktime.label: +"""Dist Net TickTime""" + +desc_cluster_k8s.desc: +"""Service discovery via Kubernetes API server.""" + +desc_cluster_k8s.label: +"""Cluster Kubernetes""" + +desc_cluster_mcast.desc: +"""Service discovery via UDP multicast.""" + +desc_cluster_mcast.label: +"""Cluster Multicast""" + +rpc_cacertfile.desc: +"""Path to certification authority TLS certificate file used to validate rpc.certfile.
+Note: certificates of all nodes in the cluster must be signed by the same CA.""" + +rpc_cacertfile.label: +"""RPC Cacertfile""" + +desc_node.desc: +"""Node name, cookie, config & data directories and the Erlang virtual machine (BEAM) boot parameters.""" + +desc_node.label: +"""Node""" + +cluster_k8s_apiserver.desc: +"""Kubernetes API endpoint URL.""" + +cluster_k8s_apiserver.label: +"""Cluster k8s ApiServer""" + +common_handler_supervisor_reports.desc: +"""Type of supervisor reports that are logged. Defaults to error
+ - error: only log errors in the Erlang processes
. + - progress: log process startup.""" + +common_handler_supervisor_reports.label: +"""Report Type""" + +node_data_dir.desc: +"""Path to the persistent data directory.
Possible auto-created subdirectories are:
- `mnesia/`: EMQX's built-in database directory.
For example, `mnesia/emqx@127.0.0.1`.
@@ -418,822 +463,85 @@ the old dir should be deleted first.
**NOTE**: One data dir cannot be shared by two or more EMQX nodes.""" - zh: """节点数据存放目录,可能会自动创建的子目录如下:
-- `mnesia/`。EMQX的内置数据库目录。例如,`mnesia/emqx@127.0.0.1`。
-如果节点要被重新命名(例如,`emqx@10.0.1.1`)。旧目录应该首先被删除。
-- `configs`。在启动时生成的配置,以及集群/本地覆盖的配置。
-- `patches`: 热补丁文件将被放在这里。
-- `trace`: 日志跟踪文件。
+node_data_dir.label: +"""Node Data Dir""" -**注意**: 一个数据dir不能被两个或更多的EMQX节点同时使用。""" +cluster_k8s_suffix.desc: +"""Node name suffix.
+Note: this parameter is only relevant when address_type is dns +or hostname.""" - } - label { - en: "Node Data Dir" - zh: "节点数据目录" - } - } +cluster_k8s_suffix.label: +"""K8s Suffix""" - node_global_gc_interval { - desc { - en: """Periodic garbage collection interval. Set to disabled to have it disabled.""" - zh: """系统调优参数,设置节点运行多久强制进行一次全局垃圾回收。禁用设置为 disabled。""" - } - label { - en: "Global GC Interval" - zh: "全局垃圾回收" - } - } +db_rpc_module.desc: +"""Protocol used for pushing transaction logs to the replicant nodes.""" - node_crash_dump_file { - desc { - en: """Location of the crash dump file.""" - zh: """设置 Erlang crash_dump 文件的存储路径和文件名。""" - } - label { - en: "Crash Dump File" - zh: "节点崩溃时的Dump文件" - } - } +db_rpc_module.label: +"""RPC Module""" - node_crash_dump_seconds { - desc { - en: """This variable gives the number of seconds that the emulator is allowed to spend writing a crash dump. When the given number of seconds have elapsed, the emulator is terminated.
-- If setting to 0 seconds, the runtime system does not even attempt to write the crash dump file. It only terminates.
-- If setting to a positive value S, wait for S seconds to complete the crash dump file and then terminates the runtime system with a SIGALRM signal.
-- A negative value causes the termination of the runtime system to wait indefinitely until the crash dump file has been completely written.""" +cluster_etcd_prefix.desc: +"""Key prefix used for EMQX service discovery.""" - zh: """该配置给出了运行时系统允许花费的写入崩溃转储的秒数。当给定的秒数已经过去,运行时系统将被终止。
-- 如果设置为0秒,运行时会立即终止,不会尝试写入崩溃转储文件。
-- 如果设置为一个正数 S,节点会等待 S 秒来完成崩溃转储文件,然后用SIGALRM信号终止运行时系统。
-- 如果设置为一个负值导致运行时系统的终止等待无限期地直到崩溃转储文件已经完全写入。""" - } - label { - en: "Crash Dump Seconds" - zh: "保存崩溃文件最长时间" - } - } +cluster_etcd_prefix.label: +"""Cluster Etcd Prefix""" - node_crash_dump_bytes { - desc { - en: """This variable sets the maximum size of a crash dump file in bytes. -The crash dump will be truncated if this limit is exceeded. -If setting it to 0, the runtime system does not even attempt to write a crash dump file.""" +cluster_mcast_iface.desc: +"""Local IP address the node discovery service needs to bind to.""" - zh: """限制崩溃文件的大小,当崩溃时节点内存太大, -如果为了保存现场,需要全部存到崩溃文件中,此处限制最多能保存多大的文件。 -如果超过此限制,崩溃转储将被截断。如果设置为0,系统不会尝试写入崩溃转储文件。""" - } - label { - en: "Crash Dump Bytes" - zh: "崩溃文件最大容量" - } - } +cluster_mcast_iface.label: +"""Cluster Multicast Interface""" - node_dist_net_ticktime { - desc { - en: """This is the approximate time an EMQX node may be unresponsive until it is considered down and thereby disconnected.""" - zh: """系统调优参数,此配置将覆盖 vm.args 文件里的 -kernel net_ticktime 参数。当一个节点持续无响应多久之后,认为其已经宕机并断开连接。""" - } - label { - en: "Dist Net TickTime" - zh: "节点间心跳间隔" - } - } +log_burst_limit_window_time.desc: +"""See max_count.""" - node_backtrace_depth { - desc { - en: """Maximum depth of the call stack printed in error messages and -process_info.""" - zh: """错误信息中打印的最大堆栈层数""" - } - label { - en: "BackTrace Depth" - zh: "最大堆栈导数" - } - } +log_burst_limit_window_time.label: +"""Window Time""" - # TODO: check if this is still needed - node_applications { - desc { - en: """List of Erlang applications that shall be rebooted when the EMQX broker joins the cluster.""" - zh: """当新EMQX 加入集群时,应重启的Erlang应用程序的列表。""" - } - label { - en: "Application" - zh: "应用" - } - } +cluster_dns_record_type.desc: +"""DNS record type.""" - # deprecated, TODO: remove - node_etc_dir { - desc { - en: """etc dir for the node""" - zh: """etc 存放目录""" - } - label { - en: "Etc Dir" - zh: "Etc 目录" - } - } +cluster_dns_record_type.label: +"""DNS Record Type""" - db_backend { - desc { - en: """Select the backend for the embedded database.
-rlog is the default backend, -that is suitable for very large clusters.
-mnesia is a backend that offers decent performance in small clusters.""" - zh: """配置后端数据库驱动,默认值为 rlog 它适用于大规模的集群。 -mnesia 是备选数据库,适合中小集群。""" - } - label { - en: "DB Backend" - zh: "内置数据库" - } - } +cluster_autoclean.desc: +"""Remove disconnected nodes from the cluster after this interval.""" - db_role { - desc { - en: """Select a node role.
-core nodes provide durability of the data, and take care of writes. -It is recommended to place core nodes in different racks or different availability zones.
-replicant nodes are ephemeral worker nodes. Removing them from the cluster -doesn't affect database redundancy
-It is recommended to have more replicant nodes than core nodes.
-Note: this parameter only takes effect when the backend is set -to rlog.""" - zh: """选择节点的角色。
-core 节点提供数据的持久性,并负责写入。建议将核心节点放置在不同的机架或不同的可用区。
-repliant 节点是临时工作节点。 从集群中删除它们,不影响数据库冗余
-建议复制节点多于核心节点。
-注意:该参数仅在设置backend时生效到 rlog。""" - } - label { - en: "DB Role" - zh: "数据库角色" - } - } +cluster_autoclean.label: +"""Cluster Auto Clean""" - db_core_nodes { - desc { - en: """List of core nodes that the replicant will connect to.
-Note: this parameter only takes effect when the backend is set -to rlog and the role is set to replicant.
-This value needs to be defined for manual or static cluster discovery mechanisms.
-If an automatic cluster discovery mechanism is being used (such as etcd), -there is no need to set this value.""" - zh: """当前节点连接的核心节点列表。
-注意:该参数仅在设置backend时生效到 rlog -并且设置rolereplicant时生效。
-该值需要在手动或静态集群发现机制下设置。
-如果使用了自动集群发现机制(如etcd),则不需要设置该值。""" - } - label { - en: "Db Core Node" - zh: "数据库核心节点" - } - } +process_limit.desc: +"""Maximum number of simultaneously existing processes for this Erlang system. +The actual maximum chosen may be much larger than the Number passed. +For more information, see: https://www.erlang.org/doc/man/erl.html""" - db_rpc_module { - desc { - en: """Protocol used for pushing transaction logs to the replicant nodes.""" - zh: """集群间推送事务日志到复制节点使用的协议。""" - } - label { - en: "RPC Module" - zh: "RPC协议" - } - } +process_limit.label: +"""Erlang Process Limit""" - db_tlog_push_mode { - desc { - en: """In sync mode the core node waits for an ack from the replicant nodes before sending the next -transaction log entry.""" - zh: """同步模式下,核心节点等待复制节点的确认信息,然后再发送下一条事务日志。""" - } - label { - en: "Tlog Push Mode" - zh: "Tlog推送模式" - } - } +max_ports.desc: +"""Maximum number of simultaneously existing ports for this Erlang system. +The actual maximum chosen may be much larger than the Number passed. +For more information, see: https://www.erlang.org/doc/man/erl.html""" - db_default_shard_transport { - desc { - en: """Defines the default transport for pushing transaction logs.
-This may be overridden on a per-shard basis in db.shard_transports. -gen_rpc uses the gen_rpc library, -distr uses the Erlang distribution.""" - zh: """定义用于推送事务日志的默认传输。
-这可以在 db.shard_transports 中基于每个分片被覆盖。 -gen_rpc 使用 gen_rpc 库, -distr 使用 Erlang 发行版。""" - } - label { - en: "Default Shard Transport" - zh: "事务日志传输默认协议" - } - } +max_ports.label: +"""Erlang Port Limit""" - db_shard_transports { - desc { - en: """Allows to tune the transport method used for transaction log replication, on a per-shard basis.
-gen_rpc uses the gen_rpc library, -distr uses the Erlang distribution.
If not specified, -the default is to use the value set in db.default_shard_transport.""" - zh: """允许为每个 shard 下的事务日志复制操作的传输方法进行调优。
-gen_rpc 使用 gen_rpc 库, -distr 使用 Erlang 自带的 rpc 库。
如果未指定, -默认是使用 db.default_shard_transport 中设置的值。""" - } - label { - en: "Shard Transports" - zh: "事务日志传输协议" - } - } +desc_log_rotation.desc: +"""By default, the logs are stored in `./log` directory (for installation from zip file) or in `/var/log/emqx` (for binary installation).
+This section of the configuration controls the number of files kept for each log handler.""" - cluster_call_retry_interval { - desc { - en: """Time interval to retry after a failed call.""" - zh: """当集群间调用出错时,多长时间重试一次。""" - } - label { - en: "Cluster Call Retry Interval" - zh: "重试时间间隔" - } - } +desc_log_rotation.label: +"""Log Rotation""" - cluster_call_max_history { - desc { - en: """Retain the maximum number of completed transactions (for queries).""" - zh: """集群间调用最多保留的历史记录数。只用于排错时查看。""" - } - label { - en: "Cluster Call Max History" - zh: "最大历史记录" - } - } +desc_log_overload_kill.desc: +"""Log overload kill features an overload protection that activates when the log handlers use too much memory or have too many buffered log messages.
+When the overload is detected, the log handler is terminated and restarted after a cooldown period.""" - cluster_call_cleanup_interval { - desc { - en: """Time interval to clear completed but stale transactions. -Ensure that the number of completed transactions is less than the max_history.""" - zh: """清理过期事务的时间间隔""" - } - label { - en: "Clean Up Interval" - zh: "清理间隔" - } - } +desc_log_overload_kill.label: +"""Log Overload Kill""" - rpc_mode { - desc { - en: """In sync mode the sending side waits for the ack from the receiving side.""" - zh: """在 sync 模式下,发送端等待接收端的 ack信号。""" - } - label { - en: "RPC Mode" - zh: "RPC 模式" - } - } - - rpc_driver { - desc { - en: """Transport protocol used for inter-broker communication""" - zh: """集群间通信使用的传输协议。""" - } - label { - en: "RPC dirver" - zh: "RPC 驱动" - } - } - - rpc_async_batch_size { - desc { - en: """The maximum number of batch messages sent in asynchronous mode. - Note that this configuration does not work in synchronous mode.""" - zh: """异步模式下,发送的批量消息的最大数量。""" - } - label { - en: "Async Batch Size" - zh: "异步模式下的批量消息数量" - } - } - - rpc_port_discovery { - desc { - en: """manual: discover ports by tcp_server_port.
-stateless: discover ports in a stateless manner, using the following algorithm. -If node name is emqxN@127.0.0.1, where the N is an integer, -then the listening port will be 5370 + N.""" - zh: """manual: 通过 tcp_server_port 来发现端口。 -
stateless: 使用无状态的方式来发现端口,使用如下算法。如果节点名称是 -emqxN@127.0.0.1, N 是一个数字,那么监听端口就是 5370 + N。""" - } - label { - en: "RRC Port Discovery" - zh: "RPC 端口发现策略" - } - } - - rpc_tcp_server_port { - desc { - en: """Listening port used by RPC local service.
-Note that this config only takes effect when rpc.port_discovery is set to manual.""" - zh: """RPC 本地服务使用的 TCP 端口。
-只有当 rpc.port_discovery 设置为 manual 时,此配置才会生效。""" - } - label { - en: "RPC TCP Server Port" - zh: "RPC TCP 服务监听端口" - } - } - - rpc_ssl_server_port { - desc { - en: """Listening port used by RPC local service.
-Note that this config only takes effect when rpc.port_discovery is set to manual -and driver is set to ssl.""" - zh: """RPC 本地服务使用的监听SSL端口。
-只有当 rpc.port_discovery 设置为 manual 且 dirver 设置为 ssl, -此配置才会生效。""" - } - label { - en: "RPC SSL Server Port" - zh: "RPC SSL 服务监听端口" - } - } - - rpc_tcp_client_num { - desc { - en: """Set the maximum number of RPC communication channels initiated by this node to each remote node.""" - zh: """设置本节点与远程节点之间的 RPC 通信通道的最大数量。""" - } - label { - en: "RPC TCP Client Num" - zh: "RPC TCP 客户端数量" - } - } - - rpc_connect_timeout { - desc { - en: """Timeout for establishing an RPC connection.""" - zh: """建立 RPC 连接的超时时间。""" - } - label { - en: "RPC Connect Timeout" - zh: "RPC 连接超时时间" - } - } - - rpc_certfile { - desc { - en: """Path to TLS certificate file used to validate identity of the cluster nodes. -Note that this config only takes effect when rpc.driver is set to ssl.""" - - zh: """TLS 证书文件的路径,用于验证集群节点的身份。 -只有当 rpc.driver 设置为 ssl 时,此配置才会生效。""" - } - label { - en: "RPC Certfile" - zh: "RPC 证书文件" - } - } - - rpc_keyfile { - desc { - en: """Path to the private key file for the rpc.certfile.
-Note: contents of this file are secret, so it's necessary to set permissions to 600.""" - zh: """rpc.certfile 的私钥文件的路径。
-注意:此文件内容是私钥,所以需要设置权限为 600。""" - } - label { - en: "RPC Keyfile" - zh: "RPC 私钥文件" - } - } - - rpc_cacertfile { - desc { - en: """Path to certification authority TLS certificate file used to validate rpc.certfile.
-Note: certificates of all nodes in the cluster must be signed by the same CA.""" - zh: """验证 rpc.certfile 的 CA 证书文件的路径。
-注意:集群中所有节点的证书必须使用同一个 CA 签发。""" - } - label { - en: "RPC Cacertfile" - zh: "RPC CA 证书文件" - } - } - - rpc_send_timeout { - desc { - en: """Timeout for sending the RPC request.""" - zh: """发送 RPC 请求的超时时间。""" - } - label { - en: "RPC Send Timeout" - zh: "RPC 发送超时时间" - } - } - - rpc_authentication_timeout { - desc { - en: """Timeout for the remote node authentication.""" - zh: """远程节点认证的超时时间。""" - } - label { - en: "RPC Authentication Timeout" - zh: "RPC 认证超时时间" - } - } - - rpc_call_receive_timeout { - desc { - en: """Timeout for the reply to a synchronous RPC.""" - zh: """同步 RPC 的回复超时时间。""" - } - label { - en: "RPC Call Receive Timeout" - zh: "RPC 调用接收超时时间" - } - } - - rpc_socket_keepalive_idle { - desc { - en: """How long the connections between the brokers should remain open after the last message is sent.""" - zh: """broker 之间的连接在最后一条消息发送后保持打开的时间。""" - } - label { - en: "RPC Socket Keepalive Idle" - zh: "RPC Socket Keepalive Idle" - } - } - - rpc_socket_keepalive_interval { - desc { - en: """The interval between keepalive messages.""" - zh: """keepalive 消息的间隔。""" - } - label { - en: "RPC Socket Keepalive Interval" - zh: "RPC Socket Keepalive 间隔" - } - } - - rpc_socket_keepalive_count { - desc { - en: """How many times the keepalive probe message can fail to receive a reply -until the RPC connection is considered lost.""" - zh: """keepalive 探测消息发送失败的次数,直到 RPC 连接被认为已经断开。""" - } - label { - en: "RPC Socket Keepalive Count" - zh: "RPC Socket Keepalive 次数" - } - } - - rpc_socket_sndbuf { - desc { - en: """TCP tuning parameters. TCP sending buffer size.""" - zh: """TCP 调节参数。TCP 发送缓冲区大小。""" - } - label { - en: "RPC Socket Sndbuf" - zh: "RPC 套接字发送缓冲区大小" - } - } - - rpc_socket_recbuf { - desc { - en: """TCP tuning parameters. TCP receiving buffer size.""" - zh: """TCP 调节参数。TCP 接收缓冲区大小。""" - } - label { - en: "RPC Socket Recbuf" - zh: "RPC 套接字接收缓冲区大小" - } - } - - rpc_socket_buffer { - desc { - en: """TCP tuning parameters. Socket buffer size in user mode.""" - zh: """TCP 调节参数。用户模式套接字缓冲区大小。""" - } - label { - en: "RPC Socket Buffer" - zh: "RPC 套接字缓冲区大小" - } - } - - rpc_insecure_fallback { - desc { - en: """Enable compatibility with old RPC authentication.""" - zh: """兼容旧的无鉴权模式""" - } - label { - en: "RPC insecure fallback" - zh: "向后兼容旧的无鉴权模式" - } - } - - log_file_handlers { - desc { - en: """File-based log handlers.""" - zh: """输出到文件的日志处理进程列表""" - } - label { - en: "File Handler" - zh: "File Handler" - } - } - - common_handler_enable { - desc { - en: """Enable this log handler.""" - zh: """启用此日志处理进程。""" - } - label { - en: "Enable Log Handler" - zh: "启用日志处理进程" - } - } - - common_handler_level { - desc { - en: """The log level for the current log handler. -Defaults to warning.""" - zh: """当前日志处理进程的日志级别。 -默认为 warning 级别。""" - } - label { - en: "Log Level" - zh: "日志级别" - } - } - - common_handler_time_offset { - desc { - en: """The time offset to be used when formatting the timestamp. -Can be one of: - - system: the time offset used by the local system - - utc: the UTC time offset - - +-[hh]:[mm]: user specified time offset, such as "-02:00" or "+00:00" -Defaults to: system.""" - zh: """日志中的时间戳使用的时间偏移量。 -可选值为: - - system: 本地系统使用的时区偏移量 - - utc: 0 时区的偏移量 - - +-[hh]:[mm]: 自定义偏移量,比如 "-02:00" 或者 "+00:00" -默认值为本地系统的时区偏移量:system。""" - } - label { - en: "Time Offset" - zh: "时间偏移量" - } - } - - common_handler_chars_limit { - desc { - en: """Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated. -NOTE: Restrict char limiter if formatter is JSON , it will get a truncated incomplete JSON data, which is not recommended.""" - zh: """设置单个日志消息的最大长度。 如果超过此长度,则日志消息将被截断。最小可设置的长度为100。 -注意:如果日志格式为 JSON,限制字符长度可能会导致截断不完整的 JSON 数据。""" - } - label { - en: "Single Log Max Length" - zh: "单条日志长度限制" - } - } - - common_handler_formatter { - desc { - en: """Choose log formatter. text for free text, and json for structured logging.""" - zh: """选择日志格式类型。 text 用于纯文本,json 用于结构化日志记录。""" - } - label { - en: "Log Formatter" - zh: "日志格式类型" - } - } - - common_handler_single_line { - desc { - en: """Print logs in a single line if set to true. Otherwise, log messages may span multiple lines.""" - zh: """如果设置为 true,则单行打印日志。 否则,日志消息可能跨越多行。""" - } - label { - en: "Single Line Mode" - zh: "单行模式" - } - } - - common_handler_sync_mode_qlen { - desc { - en: """As long as the number of buffered log events is lower than this value, -all log events are handled asynchronously. This means that the client process sending the log event, -by calling a log function in the Logger API, does not wait for a response from the handler -but continues executing immediately after the event is sent. -It is not affected by the time it takes the handler to print the event to the log device. -If the message queue grows larger than this value, -the handler starts handling log events synchronously instead, -meaning that the client process sending the event must wait for a response. -When the handler reduces the message queue to a level below the sync_mode_qlen threshold, -asynchronous operation is resumed.""" - zh: """只要缓冲的日志事件的数量低于这个值,所有的日志事件都会被异步处理。 -这意味着,日志落地速度不会影响正常的业务进程,因为它们不需要等待日志处理进程的响应。 -如果消息队列的增长超过了这个值,处理程序开始同步处理日志事件。也就是说,发送事件的客户进程必须等待响应。 -当处理程序将消息队列减少到低于sync_mode_qlen阈值的水平时,异步操作就会恢复。 -默认为100条信息,当等待的日志事件大于100条时,就开始同步处理日志。""" - } - label { - en: "Queue Length before Entering Sync Mode" - zh: "进入异步模式的队列长度" - } - } - - common_handler_drop_mode_qlen { - desc { - en: """When the number of buffered log events is larger than this value, the new log events are dropped. -When drop mode is activated or deactivated, a message is printed in the logs.""" - zh: """当缓冲的日志事件数大于此值时,新的日志事件将被丢弃。起到过载保护的功能。 -为了使过载保护算法正常工作必须要: sync_mode_qlen =< drop_mode_qlen =< flush_qlen 且 drop_mode_qlen > 1 -要禁用某些模式,请执行以下操作。 -- 如果sync_mode_qlen被设置为0,所有的日志事件都被同步处理。也就是说,异步日志被禁用。 -- 如果sync_mode_qlen被设置为与drop_mode_qlen相同的值,同步模式被禁用。也就是说,处理程序总是以异步模式运行,除非调用drop或flushing。 -- 如果drop_mode_qlen被设置为与flush_qlen相同的值,则drop模式被禁用,永远不会发生。""" - } - label { - en: "Queue Length before Entering Drop Mode" - zh: "进入丢弃模式的队列长度" - } - } - - common_handler_flush_qlen { - desc { - en: """If the number of buffered log events grows larger than this threshold, a flush (delete) operation takes place. -To flush events, the handler discards the buffered log messages without logging.""" - zh: """如果缓冲日志事件的数量增长大于此阈值,则会发生冲刷(删除)操作。 日志处理进程会丢弃缓冲的日志消息。 -来缓解自身不会由于内存瀑涨而影响其它业务进程。日志内容会提醒有多少事件被删除。""" - } - label { - en: "Flush Threshold" - zh: "冲刷阈值" - } - } - - common_handler_supervisor_reports { - desc { - en: """Type of supervisor reports that are logged. Defaults to error
- - error: only log errors in the Erlang processes
. - - progress: log process startup.""" - - zh: """Supervisor 报告的类型。默认为 error 类型。
- - error:仅记录 Erlang 进程中的错误。 - - progress:除了 error 信息外,还需要记录进程启动的详细信息。""" - } - label { - en: "Report Type" - zh: "报告类型" - } - } - - common_handler_max_depth { - desc { - en: """Maximum depth for Erlang term log formatting and Erlang process message queue inspection.""" - zh: """Erlang 内部格式日志格式化和 Erlang 进程消息队列检查的最大深度。""" - } - label { - en: "Max Depth" - zh: "最大深度" - } - } - - log_file_handler_file { - desc { - en: """Name the log file.""" - zh: """日志文件路径及名字。""" - } - label { - en: "Log File Name" - zh: "日志文件名字" - } - } - - log_file_handler_max_size { - desc { - en: """This parameter controls log file rotation. The value `infinity` means the log file will grow indefinitely, otherwise the log file will be rotated once it reaches `max_size` in bytes.""" - zh: """此参数控制日志文件轮换。 `infinity` 意味着日志文件将无限增长,否则日志文件将在达到 `max_size`(以字节为单位)时进行轮换。 -与 rotation count配合使用。如果 counter 为 10,则是10个文件轮换。""" - } - label { - en: "Rotation Size" - zh: "日志文件轮换大小" - } - } - - log_rotation_enable { - desc { - en: """Enable log rotation feature.""" - zh: """启用日志轮换功能。启动后生成日志文件后缀会加上对应的索引数字,比如:log/emqx.log.1。 -系统会默认生成*.siz/*.idx用于记录日志位置,请不要手动修改这两个文件。""" - } - label { - en: "Rotation Enable" - zh: "日志轮换" - } - } - - log_rotation_count { - desc { - en: """Maximum number of log files.""" - zh: """轮换的最大日志文件数。""" - } - label { - en: "Max Log Files Number" - zh: "最大日志文件数" - } - } - - log_overload_kill_enable { - desc { - en: """Enable log handler overload kill feature.""" - zh: """日志处理进程过载时为保护自己节点其它的业务能正常,强制杀死日志处理进程。""" - } - label { - en: "Log Handler Overload Kill" - zh: "日志处理进程过载保护" - } - } - - log_overload_kill_mem_size { - desc { - en: """Maximum memory size that the log handler process is allowed to use.""" - zh: """日志处理进程允许使用的最大内存。""" - } - label { - en: "Log Handler Max Memory Size" - zh: "日志处理进程允许使用的最大内存" - } - } - - log_overload_kill_qlen { - desc { - en: """Maximum allowed queue length.""" - zh: """允许的最大队列长度。""" - } - label { - en: "Max Queue Length" - zh: "最大队列长度" - } - } - - log_overload_kill_restart_after { - desc { - en: """The handler restarts automatically after a delay in the event of termination, unless the value `infinity` is set, which blocks any subsequent restarts.""" - zh: """处理进程停止后,会在该延迟时间后自动重新启动。除非该值设置为 infinity,这会阻止任何后续的重启。""" - } - label { - en: "Handler Restart Timer" - zh: "处理进程重启延迟" - } - } - - log_burst_limit_enable { - desc { - en: """Enable log burst control feature.""" - zh: """启用日志限流保护机制。""" - } - label { - en: "Enable Burst" - zh: "日志限流保护" - } - } - - log_burst_limit_max_count { - desc { - en: """Maximum number of log events to handle within a `window_time` interval. After the limit is reached, successive events are dropped until the end of the `window_time`.""" - zh: """在 `window_time` 间隔内处理的最大日志事件数。 达到限制后,将丢弃连续事件,直到 `window_time` 结束。""" - } - label { - en: "Events Number" - zh: "日志事件数" - } - } - - log_burst_limit_window_time { - desc { - en: """See max_count.""" - zh: """参考 max_count。""" - } - label { - en: "Window Time" - zh: "Window Time" - } - } - - authorization { - desc { - en: """Authorization a.k.a. ACL.
+authorization.desc: +"""Authorization a.k.a. ACL.
In EMQX, MQTT client access control is extremely flexible.
An out-of-the-box set of authorization data sources are supported. For example,
@@ -1243,221 +551,238 @@ natively in the EMQX node;
'http' source to make EMQX call an external HTTP API to make the decision;
'PostgreSQL' etc. to look up clients or rules from external databases""" - zh: """授权(ACL)。EMQX 支持完整的客户端访问控制(ACL)。""" - } - label { - en: "Authorization" - zh: "授权" - } - } +authorization.label: +"""Authorization""" - desc_cluster { - desc { - en: """EMQX nodes can form a cluster to scale up the total capacity.
- Here holds the configs to instruct how individual nodes can discover each other.""" - zh: """EMQX 节点可以组成一个集群,以提高总容量。
这里指定了节点之间如何连接。""" - } - label { - en: "Cluster" - zh: "集群" - } - } +rpc_socket_keepalive_idle.desc: +"""How long the connections between the brokers should remain open after the last message is sent.""" - desc_cluster_static { - desc { - en: """Service discovery via static nodes. -The new node joins the cluster by connecting to one of the bootstrap nodes.""" - zh: """静态节点服务发现。新节点通过连接一个节点来加入集群。""" - } - label { - en: "Cluster Static" - zh: "静态节点服务发现" - } - } +rpc_socket_keepalive_idle.label: +"""RPC Socket Keepalive Idle""" - desc_cluster_mcast { - desc { - en: """Service discovery via UDP multicast.""" - zh: """UDP 组播服务发现。""" - } - label { - en: "Cluster Multicast" - zh: "UDP 组播服务发现" - } - } +desc_cluster_call.desc: +"""Options for the 'cluster call' feature that allows to execute a callback on all nodes in the cluster.""" - desc_cluster_dns { - desc { - en: """Service discovery via DNS SRV records.""" - zh: """DNS SRV 记录服务发现。""" - } - label { - en: "Cluster DNS" - zh: "DNS SRV 记录服务发现" - } - } +desc_cluster_call.label: +"""Cluster Call""" - desc_cluster_etcd { - desc { - en: """Service discovery using 'etcd' service.""" - zh: """使用 'etcd' 服务的服务发现。""" - } - label { - en: "Cluster Etcd" - zh: "'etcd' 服务的服务发现" - } - } +cluster_mcast_ports.desc: +"""List of UDP ports used for service discovery.
+Note: probe messages are broadcast to all the specified ports.""" - desc_cluster_k8s { - desc { - en: """Service discovery via Kubernetes API server.""" - zh: """Kubernetes 服务发现。""" - } - label { - en: "Cluster Kubernetes" - zh: "Kubernetes 服务发现" - } - } +cluster_mcast_ports.label: +"""Cluster Multicast Ports""" - desc_node { - desc { - en: """Node name, cookie, config & data directories and the Erlang virtual machine (BEAM) boot parameters.""" - zh: """节点名称、Cookie、配置文件、数据目录和 Erlang 虚拟机(BEAM)启动参数。""" - } - label { - en: "Node" - zh: "节点" - } - } +log_overload_kill_mem_size.desc: +"""Maximum memory size that the log handler process is allowed to use.""" - desc_db { - desc { - en: """Settings for the embedded database.""" - zh: """内置数据库的配置。""" - } - label { - en: "Database" - zh: "数据库" - } - } +log_overload_kill_mem_size.label: +"""Log Handler Max Memory Size""" - desc_cluster_call { - desc { - en: """Options for the 'cluster call' feature that allows to execute a callback on all nodes in the cluster.""" - zh: """集群调用功能的选项。""" - } - label { - en: "Cluster Call" - zh: "集群调用" - } - } +rpc_connect_timeout.desc: +"""Timeout for establishing an RPC connection.""" - desc_rpc { - desc { - en: """EMQX uses a library called gen_rpc for inter-broker communication.
+rpc_connect_timeout.label: +"""RPC Connect Timeout""" + +cluster_etcd_node_ttl.desc: +"""Expiration time of the etcd key associated with the node. +It is refreshed automatically, as long as the node is alive.""" + +cluster_etcd_node_ttl.label: +"""Cluster Etcd Node TTL""" + +rpc_call_receive_timeout.desc: +"""Timeout for the reply to a synchronous RPC.""" + +rpc_call_receive_timeout.label: +"""RPC Call Receive Timeout""" + +rpc_socket_recbuf.desc: +"""TCP tuning parameters. TCP receiving buffer size.""" + +rpc_socket_recbuf.label: +"""RPC Socket Recbuf""" + +db_tlog_push_mode.desc: +"""In sync mode the core node waits for an ack from the replicant nodes before sending the next +transaction log entry.""" + +db_tlog_push_mode.label: +"""Tlog Push Mode""" + +node_crash_dump_bytes.desc: +"""This variable sets the maximum size of a crash dump file in bytes. +The crash dump will be truncated if this limit is exceeded. +If setting it to 0, the runtime system does not even attempt to write a crash dump file.""" + +node_crash_dump_bytes.label: +"""Crash Dump Bytes""" + +rpc_certfile.desc: +"""Path to TLS certificate file used to validate identity of the cluster nodes. +Note that this config only takes effect when rpc.driver is set to ssl.""" + +rpc_certfile.label: +"""RPC Certfile""" + +node_crash_dump_seconds.desc: +"""This variable gives the number of seconds that the emulator is allowed to spend writing a crash dump. When the given number of seconds have elapsed, the emulator is terminated.
+- If setting to 0 seconds, the runtime system does not even attempt to write the crash dump file. It only terminates.
+- If setting to a positive value S, wait for S seconds to complete the crash dump file and then terminates the runtime system with a SIGALRM signal.
+- A negative value causes the termination of the runtime system to wait indefinitely until the crash dump file has been completely written.""" + +node_crash_dump_seconds.label: +"""Crash Dump Seconds""" + +log_file_handlers.desc: +"""File-based log handlers.""" + +log_file_handlers.label: +"""File Handler""" + +node_global_gc_interval.desc: +"""Periodic garbage collection interval. Set to disabled to have it disabled.""" + +node_global_gc_interval.label: +"""Global GC Interval""" + +common_handler_time_offset.desc: +"""The time offset to be used when formatting the timestamp. +Can be one of: + - system: the time offset used by the local system + - utc: the UTC time offset + - +-[hh]:[mm]: user specified time offset, such as "-02:00" or "+00:00" +Defaults to: system.""" + +common_handler_time_offset.label: +"""Time Offset""" + +rpc_mode.desc: +"""In sync mode the sending side waits for the ack from the receiving side.""" + +rpc_mode.label: +"""RPC Mode""" + +node_crash_dump_file.desc: +"""Location of the crash dump file.""" + +node_crash_dump_file.label: +"""Crash Dump File""" + +cluster_mcast_loop.desc: +"""If true, loop UDP datagrams back to the local socket.""" + +cluster_mcast_loop.label: +"""Cluster Multicast Loop""" + +rpc_socket_keepalive_interval.desc: +"""The interval between keepalive messages.""" + +rpc_socket_keepalive_interval.label: +"""RPC Socket Keepalive Interval""" + +common_handler_level.desc: +"""The log level for the current log handler. +Defaults to warning.""" + +common_handler_level.label: +"""Log Level""" + +desc_rpc.desc: +"""EMQX uses a library called gen_rpc for inter-broker communication.
Most of the time the default config should work, but in case you need to do performance fine-tuning or experiment a bit, this is where to look.""" - zh: """EMQX 使用 gen_rpc 库来实现跨节点通信。
-大多数情况下,默认的配置应该可以工作,但如果你需要做一些性能优化或者实验,可以尝试调整这些参数。""" - } - label { - en: "RPC" - zh: "RPC" - } - } - desc_log { - desc { - en: """EMQX logging supports multiple sinks for the log events. -Each sink is represented by a _log handler_, which can be configured independently.""" - zh: """EMQX 日志记录支持日志事件的多个接收器。 每个接收器由一个_log handler_表示,可以独立配置。""" - } - label { - en: "Log" - zh: "日志" - } - } +desc_rpc.label: +"""RPC""" - desc_console_handler { - desc { - en: """Log handler that prints log events to the EMQX console.""" - zh: """日志处理进程将日志事件打印到 EMQX 控制台。""" - } - label { - en: "Console Handler" - zh: "Console Handler" - } - } +rpc_ssl_server_port.desc: +"""Listening port used by RPC local service.
+Note that this config only takes effect when rpc.port_discovery is set to manual +and driver is set to ssl.""" - desc_log_file_handler { - desc { - en: """Log handler that prints log events to files.""" - zh: """日志处理进程将日志事件打印到文件。""" - } - label { - en: "Files Log Handler" - zh: "文件日志处理进程" - } - } +rpc_ssl_server_port.label: +"""RPC SSL Server Port""" - desc_log_rotation { - desc { - en: """By default, the logs are stored in `./log` directory (for installation from zip file) or in `/var/log/emqx` (for binary installation).
-This section of the configuration controls the number of files kept for each log handler.""" +desc_cluster.desc: +"""EMQX nodes can form a cluster to scale up the total capacity.
+ Here holds the configs to instruct how individual nodes can discover each other.""" - zh: """默认情况下,日志存储在 `./log` 目录(用于从 zip 文件安装)或 `/var/log/emqx`(用于二进制安装)。
-这部分配置,控制每个日志处理进程保留的文件数量。""" - } - label { - en: "Log Rotation" - zh: "日志轮换" - } - } +desc_cluster.label: +"""Cluster""" - desc_log_overload_kill { - desc { - en: """Log overload kill features an overload protection that activates when the log handlers use too much memory or have too many buffered log messages.
-When the overload is detected, the log handler is terminated and restarted after a cooldown period.""" +common_handler_sync_mode_qlen.desc: +"""As long as the number of buffered log events is lower than this value, +all log events are handled asynchronously. This means that the client process sending the log event, +by calling a log function in the Logger API, does not wait for a response from the handler +but continues executing immediately after the event is sent. +It is not affected by the time it takes the handler to print the event to the log device. +If the message queue grows larger than this value, +the handler starts handling log events synchronously instead, +meaning that the client process sending the event must wait for a response. +When the handler reduces the message queue to a level below the sync_mode_qlen threshold, +asynchronous operation is resumed.""" - zh: """日志过载终止,具有过载保护功能。当日志处理进程使用过多内存,或者缓存的日志消息过多时该功能被激活。
-检测到过载时,日志处理进程将终止,并在冷却期后重新启动。""" - } - label { - en: "Log Overload Kill" - zh: "日志过载保护" - } - } +common_handler_sync_mode_qlen.label: +"""Queue Length before Entering Sync Mode""" - desc_log_burst_limit { - desc { - en: """Large bursts of log events produced in a short time can potentially cause problems, such as: - - Log files grow very large - - Log files are rotated too quickly, and useful information gets overwritten - - Overall performance impact on the system +common_handler_formatter.desc: +"""Choose log formatter. text for free text, and json for structured logging.""" -Log burst limit feature can temporarily disable logging to avoid these issues.""" - zh: """短时间内产生的大量日志事件可能会导致问题,例如: - - 日志文件变得非常大 - - 日志文件轮换过快,有用信息被覆盖 - - 对系统的整体性能影响 +common_handler_formatter.label: +"""Log Formatter""" -日志突发限制功能可以暂时禁用日志记录以避免这些问题。""" - } - label { - en: "Log Burst Limit" - zh: "日志突发限制" - } - } +rpc_async_batch_size.desc: +"""The maximum number of batch messages sent in asynchronous mode. + Note that this configuration does not work in synchronous mode.""" + +rpc_async_batch_size.label: +"""Async Batch Size""" + +cluster_call_max_history.desc: +"""Retain the maximum number of completed transactions (for queries).""" + +cluster_call_max_history.label: +"""Cluster Call Max History""" + +cluster_discovery_strategy.desc: +"""Service discovery method for the cluster nodes. Possible values are: +- manual: Use emqx ctl cluster command to manage cluster.
+- static: Configure static nodes list by setting seeds in config file.
+- dns: Use DNS A record to discover peer nodes.
+- etcd: Use etcd to discover peer nodes.
+- k8s: Use Kubernetes API to discover peer pods.""" + +cluster_discovery_strategy.label: +"""Cluster Discovery Strategy""" + +rpc_send_timeout.desc: +"""Timeout for sending the RPC request.""" + +rpc_send_timeout.label: +"""RPC Send Timeout""" + +common_handler_single_line.desc: +"""Print logs in a single line if set to true. Otherwise, log messages may span multiple lines.""" + +common_handler_single_line.label: +"""Single Line Mode""" + +rpc_socket_buffer.desc: +"""TCP tuning parameters. Socket buffer size in user mode.""" + +rpc_socket_buffer.label: +"""RPC Socket Buffer""" + +db_shard_transports.desc: +"""Allows to tune the transport method used for transaction log replication, on a per-shard basis.
+gen_rpc uses the gen_rpc library, +distr uses the Erlang distribution.
If not specified, +the default is to use the value set in db.default_shard_transport.""" + +db_shard_transports.label: +"""Shard Transports""" - desc_authorization { - desc { - en: """Settings that control client authorization.""" - zh: """授权相关""" - } - label { - en: "Authorization" - zh: "授权" - } - } } diff --git a/rel/i18n/emqx_connector_api.hocon b/rel/i18n/emqx_connector_api.hocon index 8575f01aa..4942ce981 100644 --- a/rel/i18n/emqx_connector_api.hocon +++ b/rel/i18n/emqx_connector_api.hocon @@ -1,82 +1,46 @@ emqx_connector_api { - id { - desc { - en: "The connector ID. Must be of format {type}:{name}" - zh: "连接器 ID, 格式必须为 {type}:{name}" - } - label: { - en: "Connector ID" - zh: "连接器 ID" - } - } +conn_get.desc: +"""List all connectors""" - conn_test_post { - desc { - en: """Test creating a new connector by given ID
+conn_get.label: +"""List All Connectors""" + +conn_id_delete.desc: +"""Delete a connector by ID""" + +conn_id_delete.label: +"""Delete Connector""" + +conn_id_get.desc: +"""Get the connector by ID""" + +conn_id_get.label: +"""Get Connector""" + +conn_id_put.desc: +"""Update an existing connector by ID""" + +conn_id_put.label: +"""Update Connector""" + +conn_post.desc: +"""Create a new connector""" + +conn_post.label: +"""Create Connector""" + +conn_test_post.desc: +"""Test creating a new connector by given ID
The ID must be of format '{type}:{name}'""" - zh: """通过给定的 ID 测试创建一个新的连接器
-ID 的格式必须为“{type}:{name}”""" - } - label: { - en: "Create Test Connector" - zh: "创建测试连接器" - } - } - conn_get { - desc { - en: "List all connectors" - zh: "列出所有连接器" - } - label: { - en: "List All Connectors" - zh: "列出所有连接器" - } - } +conn_test_post.label: +"""Create Test Connector""" - conn_post { - desc { - en: "Create a new connector" - zh: "创建一个新的连接器" - } - label: { - en: "Create Connector" - zh: "创建连接器" - } - } +id.desc: +"""The connector ID. Must be of format {type}:{name}""" - conn_id_get { - desc { - en: "Get the connector by ID" - zh: "通过 ID 获取连接器" - } - label: { - en: "Get Connector" - zh: "获取连接器" - } - } - - conn_id_put { - desc { - en: "Update an existing connector by ID" - zh: "通过 ID 更新一个连接器" - } - label: { - en: "Update Connector" - zh: "更新连接器" - } - } - - conn_id_delete { - desc { - en: "Delete a connector by ID" - zh: "通过 ID 删除一个连接器" - } - label: { - en: "Delete Connector" - zh: "删除连接器" - } - } +id.label: +"""Connector ID""" } diff --git a/rel/i18n/emqx_connector_http.hocon b/rel/i18n/emqx_connector_http.hocon index c6efd03ca..70c644e33 100644 --- a/rel/i18n/emqx_connector_http.hocon +++ b/rel/i18n/emqx_connector_http.hocon @@ -1,139 +1,78 @@ emqx_connector_http { - base_url { - desc { - en: """The base URL is the URL includes only the scheme, host and port.
+ +base_url.desc: +"""The base URL is the URL includes only the scheme, host and port.
When send an HTTP request, the real URL to be used is the concatenation of the base URL and the path parameter
For example: `http://localhost:9901/`""" - zh: """base URL 只包含host和port。
-发送HTTP请求时,真实的URL是由base URL 和 path parameter连接而成。
-示例:`http://localhost:9901/`""" - } - label: { - en: "Base Url" - zh: "Base Url" - } - } - connect_timeout { - desc { - en: "The timeout when connecting to the HTTP server." - zh: "连接HTTP服务器的超时时间。" - } - label: { - en: "Connect Timeout" - zh: "连接超时" - } - } +base_url.label: +"""Base Url""" - max_retries { - desc { - en: "Max retry times if error on sending request." - zh: "请求出错时的最大重试次数。" - } - label: { - en: "Max Retries" - zh: "最大重试次数" - } - } +body.desc: +"""HTTP request body.""" - pool_type { - desc { - en: "The type of the pool. Can be one of `random`, `hash`." - zh: "连接池的类型,可用类型有`random`, `hash`。" - } - label: { - en: "Pool Type" - zh: "连接池类型" - } - } +body.label: +"""HTTP Body""" - pool_size { - desc { - en: "The pool size." - zh: "连接池大小。" - } - label: { - en: "Pool Size" - zh: "连接池大小" - } - } +connect_timeout.desc: +"""The timeout when connecting to the HTTP server.""" - enable_pipelining { - desc { - en: "A positive integer. Whether to send HTTP requests continuously, when set to 1, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request." - zh: "正整数,设置最大可发送的异步 HTTP 请求数量。当设置为 1 时,表示每次发送完成 HTTP 请求后都需要等待服务器返回,再继续发送下一个请求。" - } - label: { - en: "HTTP Pipelineing" - zh: "HTTP 管道" - } - } +connect_timeout.label: +"""Connect Timeout""" - request { - desc { - en: """Configure HTTP request parameters.""" - zh: """设置 HTTP 请求的参数。""" - } - label: { - en: "Request" - zh: "HTTP 请求" - } - } +enable_pipelining.desc: +"""A positive integer. Whether to send HTTP requests continuously, when set to 1, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request.""" - method { - desc { - en: "HTTP method." - zh: "HTTP 请求方法。" - } - label: { - en: "HTTP Method" - zh: "HTTP 请求方法" - } - } +enable_pipelining.label: +"""HTTP Pipelineing""" - path { - desc { - en: "URL path." - zh: "HTTP请求路径。" - } - label: { - en: "URL Path" - zh: "HTTP请求路径" - } - } +headers.desc: +"""List of HTTP headers.""" - body { - desc { - en: "HTTP request body." - zh: "HTTP请求报文主体。" - } - label: { - en: "HTTP Body" - zh: "HTTP请求报文主体" - } - } +headers.label: +"""HTTP Headers""" - headers { - desc { - en: "List of HTTP headers." - zh: "HTTP 头字段列表。" - } - label: { - en: "HTTP Headers" - zh: "HTTP 头字段列表" - } - } +max_retries.desc: +"""Max retry times if error on sending request.""" - request_timeout { - desc { - en: "HTTP request timeout." - zh: "HTTP 请求超时。" - } - label: { - en: "Request Timeout" - zh: "HTTP 请求超时" - } - } +max_retries.label: +"""Max Retries""" + +method.desc: +"""HTTP method.""" + +method.label: +"""HTTP Method""" + +path.desc: +"""URL path.""" + +path.label: +"""URL Path""" + +pool_size.desc: +"""The pool size.""" + +pool_size.label: +"""Pool Size""" + +pool_type.desc: +"""The type of the pool. Can be one of `random`, `hash`.""" + +pool_type.label: +"""Pool Type""" + +request.desc: +"""Configure HTTP request parameters.""" + +request.label: +"""Request""" + +request_timeout.desc: +"""HTTP request timeout.""" + +request_timeout.label: +"""Request Timeout""" } diff --git a/rel/i18n/emqx_connector_ldap.hocon b/rel/i18n/emqx_connector_ldap.hocon index 0bcb4869e..64a953816 100644 --- a/rel/i18n/emqx_connector_ldap.hocon +++ b/rel/i18n/emqx_connector_ldap.hocon @@ -1,37 +1,21 @@ emqx_connector_ldap { - bind_dn { - desc { - en: """LDAP's Binding Distinguished Name (DN)""" - zh: """LDAP 绑定的 DN 的值""" - } - label: { - en: "Bind DN" - zh: "Bind DN" - } - } +bind_dn.desc: +"""LDAP's Binding Distinguished Name (DN)""" - port { - desc { - en: """LDAP Port""" - zh: """LDAP 端口""" - } - label: { - en: "Port" - zh: "端口" - } - } +bind_dn.label: +"""Bind DN""" +port.desc: +"""LDAP Port""" - timeout { - desc { - en: """LDAP's query timeout""" - zh: """LDAP 查询超时时间""" - } - label: { - en: "timeout" - zh: "超时时间" - } - } +port.label: +"""Port""" + +timeout.desc: +"""LDAP's query timeout""" + +timeout.label: +"""timeout""" } diff --git a/rel/i18n/emqx_connector_mongo.hocon b/rel/i18n/emqx_connector_mongo.hocon index 6a2511ec8..bba26d736 100644 --- a/rel/i18n/emqx_connector_mongo.hocon +++ b/rel/i18n/emqx_connector_mongo.hocon @@ -1,277 +1,152 @@ emqx_connector_mongo { - single_mongo_type { - desc { - en: "Standalone instance. Must be set to 'single' when MongoDB server is running in standalone mode." - zh: "Standalone 模式。当 MongoDB 服务运行在 standalone 模式下,该配置必须设置为 'single'。" - } - label: { - en: "Standalone instance" - zh: "Standalone 模式" - } - } +auth_source.desc: +"""Database name associated with the user's credentials.""" - rs_mongo_type { - desc { - en: "Replica set. Must be set to 'rs' when MongoDB server is running in 'replica set' mode." - zh: "Replica set模式。当 MongoDB 服务运行在 replica-set 模式下,该配置必须设置为 'rs'。" - } - label: { - en: "Replica set" - zh: "Replica set 模式" - } - } +auth_source.label: +"""Auth Source""" - sharded_mongo_type { - desc { - en: "Sharded cluster. Must be set to 'sharded' when MongoDB server is running in 'sharded' mode." - zh: "Sharded cluster模式。当 MongoDB 服务运行在 sharded 模式下,该配置必须设置为 'sharded'。" - } - label: { - en: "Sharded cluster" - zh: "Sharded cluster 模式" - } - } +connect_timeout.desc: +"""The duration to attempt a connection before timing out.""" - auth_source { - desc { - en: "Database name associated with the user's credentials." - zh: "与用户证书关联的数据库名称。" - } - label: { - en: "Auth Source" - zh: "认证源" - } - } +connect_timeout.label: +"""Connect Timeout""" - server { - desc { - en: """The IPv4 or IPv6 address or the hostname to connect to.
+desc_rs.desc: +"""Settings for replica set.""" + +desc_rs.label: +"""Setting Replica Set""" + +desc_sharded.desc: +"""Settings for sharded cluster.""" + +desc_sharded.label: +"""Setting Sharded Cluster""" + +desc_single.desc: +"""Settings for a single MongoDB instance.""" + +desc_single.label: +"""Setting Single MongoDB""" + +desc_topology.desc: +"""Topology of MongoDB.""" + +desc_topology.label: +"""Setting Topology""" + +heartbeat_period.desc: +"""Controls when the driver checks the state of the MongoDB deployment. Specify the interval between checks, counted from the end of the previous check until the beginning of the next one. If the number of connections is increased (which will happen, for example, if you increase the pool size), you may need to increase this period as well to avoid creating too many log entries in the MongoDB log file.""" + +heartbeat_period.label: +"""Heartbeat period""" + +local_threshold.desc: +"""The size of the latency window for selecting among multiple suitable MongoDB instances.""" + +local_threshold.label: +"""Local Threshold""" + +max_overflow.desc: +"""Max Overflow.""" + +max_overflow.label: +"""Max Overflow""" + +min_heartbeat_period.desc: +"""Controls the minimum amount of time to wait between heartbeats.""" + +min_heartbeat_period.label: +"""Minimum Heartbeat Period""" + +overflow_check_period.desc: +"""Period for checking if there are more workers than configured ("overflow").""" + +overflow_check_period.label: +"""Overflow Check Period""" + +overflow_ttl.desc: +"""Period of time before workers that exceed the configured pool size ("overflow") to be terminated.""" + +overflow_ttl.label: +"""Overflow TTL""" + +r_mode.desc: +"""Read mode.""" + +r_mode.label: +"""Read Mode""" + +replica_set_name.desc: +"""Name of the replica set.""" + +replica_set_name.label: +"""Replica Set Name""" + +rs_mongo_type.desc: +"""Replica set. Must be set to 'rs' when MongoDB server is running in 'replica set' mode.""" + +rs_mongo_type.label: +"""Replica set""" + +server.desc: +"""The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
The MongoDB default port 27017 is used if `[:Port]` is not specified.""" - zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 MongoDB 默认端口 27017。""" - } - label: { - en: "Server Host" - zh: "服务器地址" - } - } - servers { - desc { - en: """A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].` +server.label: +"""Server Host""" + +server_selection_timeout.desc: +"""Specifies how long to block for server selection before throwing an exception.""" + +server_selection_timeout.label: +"""Server Selection Timeout""" + +servers.desc: +"""A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].` For each Node should be: The IPv4 or IPv6 address or the hostname to connect to. A host entry has the following form: `Host[:Port]`. The MongoDB default port 27017 is used if `[:Port]` is not specified.""" - zh: """集群将要连接的节点列表。 节点之间用逗号分隔,如:`Node[,Node].` -每个节点的配置为:将要连接的 IPv4 或 IPv6 地址或主机名。 -主机名具有以下形式:`Host[:Port]`。 -如果未指定 `[:Port]`,则使用 MongoDB 默认端口 27017。""" - } - label: { - en: "Servers" - zh: "服务器列表" - } - } - w_mode { - desc { - en: "Write mode." - zh: "写模式。" - } - label: { - en: "Write Mode" - zh: "写模式" - } - } +servers.label: +"""Servers""" - r_mode { - desc { - en: "Read mode." - zh: "读模式。" - } - label: { - en: "Read Mode" - zh: "读模式" - } - } +sharded_mongo_type.desc: +"""Sharded cluster. Must be set to 'sharded' when MongoDB server is running in 'sharded' mode.""" - overflow_ttl { - desc { - en: "Period of time before workers that exceed the configured pool size (\"overflow\") to be terminated." - zh: "当池内工人太多时,等待多久清除多余工人。" - } - label { - en: "Overflow TTL" - zh: "溢出TTL" - } - } +sharded_mongo_type.label: +"""Sharded cluster""" - overflow_check_period { - desc { - en: "Period for checking if there are more workers than configured (\"overflow\")." - zh: "检查是否有超过配置的工人的周期(\"溢出\")。" - } - label { - en: "Overflow Check Period" - zh: "溢出检查周期" - } - } +single_mongo_type.desc: +"""Standalone instance. Must be set to 'single' when MongoDB server is running in standalone mode.""" - local_threshold { - desc { - en: "The size of the latency window for selecting among multiple suitable MongoDB instances." - zh: "在多个合适的MongoDB实例中进行选择的延迟窗口的大小。" - } - label { - en: "Local Threshold" - zh: "本地阈值" - } - } +single_mongo_type.label: +"""Standalone instance""" - connect_timeout { - desc { - en: "The duration to attempt a connection before timing out." - zh: "超时重连的等待时间。" - } - label { - en: "Connect Timeout" - zh: "连接超时" - } - } +socket_timeout.desc: +"""The duration to attempt to send or to receive on a socket before the attempt times out.""" - socket_timeout { - desc { - en: "The duration to attempt to send or to receive on a socket before the attempt times out." - zh: "在尝试超时之前,在套接字上尝试发送或接收的持续时间。" - } - label { - en: "Socket Timeout" - zh: "套接字操作超时" - } - } +socket_timeout.label: +"""Socket Timeout""" - server_selection_timeout { - desc { - en: "Specifies how long to block for server selection before throwing an exception." - zh: "指定在抛出异常之前为服务器选择阻断多长时间。" - } - label { - en: "Server Selection Timeout" - zh: "服务器选择超时" - } - } +srv_record.desc: +"""Use DNS SRV record.""" - wait_queue_timeout { - desc { - en: "The maximum duration that a worker can wait for a connection to become available." - zh: "工作者等待连接可用的最长时间。" - } - label { - en: "Wait Queue Timeout" - zh: "等待队列超时" - } - } +srv_record.label: +"""Srv Record""" - heartbeat_period { - desc { - en: "Controls when the driver checks the state of the MongoDB deployment. Specify the interval between checks, counted from the end of the previous check until the beginning of the next one. If the number of connections is increased (which will happen, for example, if you increase the pool size), you may need to increase this period as well to avoid creating too many log entries in the MongoDB log file." - zh: "控制驱动程序何时检查MongoDB部署的状态。指定检查的间隔时间,从上一次检查结束到下一次检查开始计算。如果连接数增加(例如,如果你增加池子的大小,就会发生这种情况),你可能也需要增加这个周期,以避免在MongoDB日志文件中创建太多的日志条目。" - } - label { - en: "Heartbeat period" - zh: "心跳期" - } - } +w_mode.desc: +"""Write mode.""" - min_heartbeat_period { - desc { - en: "Controls the minimum amount of time to wait between heartbeats." - zh: "心跳间的最小间隙" - } - label { - en: "Minimum Heartbeat Period" - zh: "最小心跳周期" - } - } +w_mode.label: +"""Write Mode""" - max_overflow { - desc { - en: "Max Overflow." - zh: "最大溢出。" - } - label: { - en: "Max Overflow" - zh: "最大溢出" - } - } +wait_queue_timeout.desc: +"""The maximum duration that a worker can wait for a connection to become available.""" - replica_set_name { - desc { - en: "Name of the replica set." - zh: "副本集的名称。" - } - label: { - en: "Replica Set Name" - zh: "副本集名称" - } - } - - srv_record { - desc { - en: "Use DNS SRV record." - zh: "使用 DNS SRV 记录。" - } - label: { - en: "Srv Record" - zh: "SRV 记录" - } - } - - desc_single { - desc { - en: """Settings for a single MongoDB instance.""" - zh: """配置 Single 模式""" - } - label: { - en: "Setting Single MongoDB" - zh: "配置 Single 模式" - } - } - - desc_rs { - desc { - en: """Settings for replica set.""" - zh: """配置 Replica Set""" - } - label: { - en: "Setting Replica Set" - zh: "配置 Replica Set" - } - } - - desc_sharded { - desc { - en: """Settings for sharded cluster.""" - zh: """配置 Sharded Cluster""" - } - label: { - en: "Setting Sharded Cluster" - zh: "配置 Sharded Cluster" - } - } - - desc_topology { - desc { - en: """Topology of MongoDB.""" - zh: """配置 Topology""" - } - label: { - en: "Setting Topology" - zh: "配置 Topology" - } - } +wait_queue_timeout.label: +"""Wait Queue Timeout""" } diff --git a/rel/i18n/emqx_connector_mqtt.hocon b/rel/i18n/emqx_connector_mqtt.hocon index 5ade54670..80303b825 100644 --- a/rel/i18n/emqx_connector_mqtt.hocon +++ b/rel/i18n/emqx_connector_mqtt.hocon @@ -1,35 +1,21 @@ emqx_connector_mqtt { - num_of_bridges { - desc { - en: "The current number of bridges that are using this connector." - zh: "当前使用此连接器的网桥数量。" - } - label: { - en: "Num of Bridges" - zh: "网桥数量" - } - } - type { - desc { - en: "The Connector Type." - zh: "连接器类型。" - } - label: { - en: "Connector Type" - zh: "连接器类型" - } - } +name.desc: +"""Connector name, used as a human-readable description of the connector.""" - name { - desc { - en: "Connector name, used as a human-readable description of the connector." - zh: "连接器名称,人类可读的连接器描述。" - } - label: { - en: "Connector Name" - zh: "连接器名称" - } - } +name.label: +"""Connector Name""" + +num_of_bridges.desc: +"""The current number of bridges that are using this connector.""" + +num_of_bridges.label: +"""Num of Bridges""" + +type.desc: +"""The Connector Type.""" + +type.label: +"""Connector Type""" } diff --git a/rel/i18n/emqx_connector_mqtt_schema.hocon b/rel/i18n/emqx_connector_mqtt_schema.hocon index 0de97d84b..e37e87e49 100644 --- a/rel/i18n/emqx_connector_mqtt_schema.hocon +++ b/rel/i18n/emqx_connector_mqtt_schema.hocon @@ -1,319 +1,178 @@ emqx_connector_mqtt_schema { - ingress_desc { - desc { - en: """The ingress config defines how this bridge receive messages from the remote MQTT broker, and then + +bridge_mode.desc: +"""If enable bridge mode. +NOTE: This setting is only for MQTT protocol version older than 5.0, and the remote MQTT +broker MUST support this feature. +If bridge_mode is set to true, the bridge will indicate to the remote broker that it is a bridge not an ordinary client. +This means that loop detection will be more effective and that retained messages will be propagated correctly.""" + +bridge_mode.label: +"""Bridge Mode""" + +clean_start.desc: +"""Whether to start a clean session when reconnecting a remote broker for ingress bridge""" + +clean_start.label: +"""Clean Session""" + +clientid_prefix.desc: +"""Optional prefix to prepend to the clientid used by egress bridges.""" + +clientid_prefix.label: +"""Clientid Prefix""" + +egress_desc.desc: +"""The egress config defines how this bridge forwards messages from the local broker to the remote broker.
+Template with variables is allowed in 'remote.topic', 'local.qos', 'local.retain', 'local.payload'.
+NOTE: if this bridge is used as the action of a rule, and also 'local.topic' +is configured, then both the data got from the rule and the MQTT messages that matches +'local.topic' will be forwarded.""" + +egress_desc.label: +"""Egress Configs""" + +egress_local.desc: +"""The configs about receiving messages from local broker.""" + +egress_local.label: +"""Local Configs""" + +egress_local_topic.desc: +"""The local topic to be forwarded to the remote broker""" + +egress_local_topic.label: +"""Local Topic""" + +egress_remote.desc: +"""The configs about sending message to the remote broker.""" + +egress_remote.label: +"""Remote Configs""" + +egress_remote_qos.desc: +"""The QoS of the MQTT message to be sent.
+Template with variables is allowed.""" + +egress_remote_qos.label: +"""Remote QoS""" + +egress_remote_topic.desc: +"""Forward to which topic of the remote broker.
+Template with variables is allowed.""" + +egress_remote_topic.label: +"""Remote Topic""" + +ingress_desc.desc: +"""The ingress config defines how this bridge receive messages from the remote MQTT broker, and then send them to the local broker.
Template with variables is allowed in 'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'.
NOTE: if this bridge is used as the input of a rule, and also 'local.topic' is configured, then messages got from the remote broker will be sent to both the 'local.topic' and the rule.""" - zh: """入口配置定义了该桥接如何从远程 MQTT Broker 接收消息,然后将消息发送到本地 Broker。
- 以下字段中允许使用带有变量的模板:'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'。
- 注意:如果此桥接被用作规则的输入,并且配置了 'local.topic',则从远程代理获取的消息将同时被发送到 'local.topic' 和规则。""" - } - label: { - en: "Ingress Configs" - zh: "入方向配置" - } - } - egress_desc { - desc { - en: """The egress config defines how this bridge forwards messages from the local broker to the remote broker.
-Template with variables is allowed in 'remote.topic', 'local.qos', 'local.retain', 'local.payload'.
-NOTE: if this bridge is used as the action of a rule, and also 'local.topic' -is configured, then both the data got from the rule and the MQTT messages that matches -'local.topic' will be forwarded.""" - zh: """出口配置定义了该桥接如何将消息从本地 Broker 转发到远程 Broker。 -以下字段中允许使用带有变量的模板:'remote.topic', 'local.qos', 'local.retain', 'local.payload'。
-注意:如果此桥接被用作规则的动作,并且配置了 'local.topic',则从规则输出的数据以及匹配到 'local.topic' 的 MQTT 消息都会被转发。""" - } - label: { - en: "Egress Configs" - zh: "出方向配置" - } - } +ingress_desc.label: +"""Ingress Configs""" - ingress_remote { - desc { - en: """The configs about subscribing to the remote broker.""" - zh: """订阅远程 Broker 相关的配置。""" - } - label: { - en: "Remote Configs" - zh: "远程配置" - } - } +ingress_local.desc: +"""The configs about sending message to the local broker.""" - ingress_local { - desc { - en: """The configs about sending message to the local broker.""" - zh: """发送消息到本地 Broker 相关的配置。""" - } - label: { - en: "Local Configs" - zh: "本地配置" - } - } +ingress_local.label: +"""Local Configs""" - egress_remote { - desc { - en: """The configs about sending message to the remote broker.""" - zh: """发送消息到远程 Broker 相关的配置。""" - } - label: { - en: "Remote Configs" - zh: "远程配置" - } - } +ingress_local_qos.desc: +"""The QoS of the MQTT message to be sent.
+Template with variables is allowed.""" - egress_local { - desc { - en: """The configs about receiving messages from local broker.""" - zh: """如何从本地 Broker 接收消息相关的配置。""" - } - label: { - en: "Local Configs" - zh: "本地配置" - } - } +ingress_local_qos.label: +"""Local QoS""" - mode { - desc { - en: """The mode of the MQTT Bridge.
+ingress_local_topic.desc: +"""Send messages to which topic of the local broker.
+Template with variables is allowed.""" + +ingress_local_topic.label: +"""Local Topic""" + +ingress_remote.desc: +"""The configs about subscribing to the remote broker.""" + +ingress_remote.label: +"""Remote Configs""" + +ingress_remote_qos.desc: +"""The QoS level to be used when subscribing to the remote broker""" + +ingress_remote_qos.label: +"""Remote QoS""" + +ingress_remote_topic.desc: +"""Receive messages from which topic of the remote broker""" + +ingress_remote_topic.label: +"""Remote Topic""" + +max_inflight.desc: +"""Max inflight (sent, but un-acked) messages of the MQTT protocol""" + +max_inflight.label: +"""Max Inflight Message""" + +mode.desc: +"""The mode of the MQTT Bridge.
- cluster_shareload: create an MQTT connection on each node in the emqx cluster.
In 'cluster_shareload' mode, the incoming load from the remote broker is shared by using shared subscription.
Note that the 'clientid' is suffixed by the node name, this is to avoid clientid conflicts between different nodes. And we can only use shared subscription topic filters for remote.topic of ingress connections.""" - zh: """MQTT 桥的模式。
-- cluster_shareload:在 emqx 集群的每个节点上创建一个 MQTT 连接。
-在“cluster_shareload”模式下,来自远程代理的传入负载通过共享订阅的方式接收。
-请注意,clientid 以节点名称为后缀,这是为了避免不同节点之间的 clientid 冲突。 -而且对于入口连接的 remote.topic,我们只能使用共享订阅主题过滤器。""" - } - label: { - en: "MQTT Bridge Mode" - zh: "MQTT 桥接模式" - } - } - server { - desc { - en: "The host and port of the remote MQTT broker" - zh: "远程 MQTT Broker的主机和端口。" - } - label: { - en: "Broker IP And Port" - zh: "Broker主机和端口" - } - } +mode.label: +"""MQTT Bridge Mode""" - bridge_mode { - desc { - en: """If enable bridge mode. -NOTE: This setting is only for MQTT protocol version older than 5.0, and the remote MQTT -broker MUST support this feature. -If bridge_mode is set to true, the bridge will indicate to the remote broker that it is a bridge not an ordinary client. -This means that loop detection will be more effective and that retained messages will be propagated correctly.""" - zh: """是否启用 Bridge Mode。 -注意:此设置只针对 MQTT 协议版本 < 5.0 有效,并且需要远程 MQTT Broker 支持 Bridge Mode。 -如果设置为 true ,桥接会告诉远端服务器当前连接是一个桥接而不是一个普通的客户端。 -这意味着消息回环检测会更加高效,并且远端服务器收到的保留消息的标志位会透传给本地。""" - } - label { - en: "Bridge Mode" - zh: "Bridge 模式" - } - } +password.desc: +"""The password of the MQTT protocol""" - proto_ver { - desc { - en: "The MQTT protocol version" - zh: "MQTT 协议版本" - } - label: { - en: "Protocol Version" - zh: "协议版本" - } - } +password.label: +"""Password""" - username { - desc { - en: "The username of the MQTT protocol" - zh: "MQTT 协议的用户名" - } - label: { - en: "Username" - zh: "用户名" - } - } - - password { - desc { - en: "The password of the MQTT protocol" - zh: "MQTT 协议的密码" - } - label: { - en: "Password" - zh: "密码" - } - } - - clean_start { - desc { - en: "Whether to start a clean session when reconnecting a remote broker for ingress bridge" - zh: "与 ingress MQTT 桥的远程服务器重连时是否清除老的 MQTT 会话。" - } - label: { - en: "Clean Session" - zh: "清除会话" - } - } - - max_inflight { - desc { - en: "Max inflight (sent, but un-acked) messages of the MQTT protocol" - zh: "MQTT 协议的最大飞行(已发送但未确认)消息" - } - label: { - en: "Max Inflight Message" - zh: "最大飞行消息" - } - } - - ingress_remote_topic { - desc { - en: "Receive messages from which topic of the remote broker" - zh: "从远程broker的哪个topic接收消息" - } - label: { - en: "Remote Topic" - zh: "远程主题" - } - } - - ingress_remote_qos { - desc { - en: "The QoS level to be used when subscribing to the remote broker" - zh: "订阅远程borker时要使用的 QoS 级别" - } - label: { - en: "Remote QoS" - zh: "远程 QoS" - } - } - - ingress_local_topic { - desc { - en: """Send messages to which topic of the local broker.
+payload.desc: +"""The payload of the MQTT message to be sent.
Template with variables is allowed.""" - zh: """向本地broker的哪个topic发送消息。
-允许使用带有变量的模板。""" - } - label: { - en: "Local Topic" - zh: "本地主题" - } - } - ingress_local_qos { - desc { - en: """The QoS of the MQTT message to be sent.
+payload.label: +"""Payload""" + +proto_ver.desc: +"""The MQTT protocol version""" + +proto_ver.label: +"""Protocol Version""" + +retain.desc: +"""The 'retain' flag of the MQTT message to be sent.
Template with variables is allowed.""" - zh: """待发送 MQTT 消息的 QoS。
-允许使用带有变量的模板。""" - } - label: { - en: "Local QoS" - zh: "本地 QoS" - } - } - egress_local_topic { - desc { - en: "The local topic to be forwarded to the remote broker" - zh: "要转发到远程broker的本地主题" - } - label: { - en: "Local Topic" - zh: "本地主题" - } - } +retain.label: +"""Retain Flag""" - egress_remote_topic { - desc { - en: """Forward to which topic of the remote broker.
-Template with variables is allowed.""" - zh: """转发到远程broker的哪个topic。
-允许使用带有变量的模板。""" - } - label: { - en: "Remote Topic" - zh: "远程主题" - } - } +server.desc: +"""The host and port of the remote MQTT broker""" - egress_remote_qos { - desc { - en: """The QoS of the MQTT message to be sent.
-Template with variables is allowed.""" - zh: """待发送 MQTT 消息的 QoS。
-允许使用带有变量的模板。""" - } - label: { - en: "Remote QoS" - zh: "远程 QoS" - } - } +server.label: +"""Broker IP And Port""" - retain { - desc { - en: """The 'retain' flag of the MQTT message to be sent.
-Template with variables is allowed.""" - zh: """要发送的 MQTT 消息的“保留”标志。
-允许使用带有变量的模板。""" - } - label: { - en: "Retain Flag" - zh: "保留消息标志" - } - } +server_configs.desc: +"""Configs related to the server.""" - payload { - desc { - en: """The payload of the MQTT message to be sent.
-Template with variables is allowed.""" - zh: """要发送的 MQTT 消息的负载。
-允许使用带有变量的模板。""" - } - label: { - en: "Payload" - zh: "消息负载" - } - } +server_configs.label: +"""Server Configs""" - server_configs { - desc { - en: """Configs related to the server.""" - zh: """服务器相关的配置。""" - } - label: { - en: "Server Configs" - zh: "服务配置。" - } - } +username.desc: +"""The username of the MQTT protocol""" - clientid_prefix { - desc { - en: """Optional prefix to prepend to the clientid used by egress bridges.""" - zh: """可选的前缀,用于在出口网桥使用的clientid前加上前缀。""" - } - label: { - en: "Clientid Prefix" - zh: "客户ID前缀" - } - } +username.label: +"""Username""" } diff --git a/rel/i18n/emqx_connector_mysql.hocon b/rel/i18n/emqx_connector_mysql.hocon index 89e95534b..dd86b62c8 100644 --- a/rel/i18n/emqx_connector_mysql.hocon +++ b/rel/i18n/emqx_connector_mysql.hocon @@ -1,18 +1,11 @@ emqx_connector_mysql { - server { - desc { - en: """The IPv4 or IPv6 address or the hostname to connect to.
+server.desc: +"""The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
The MySQL default port 3306 is used if `[:Port]` is not specified.""" - zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 MySQL 默认端口 3306。""" - } - label: { - en: "Server Host" - zh: "服务器地址" - } - } + +server.label: +"""Server Host""" } diff --git a/rel/i18n/emqx_connector_pgsql.hocon b/rel/i18n/emqx_connector_pgsql.hocon index 33246c844..485e666a0 100644 --- a/rel/i18n/emqx_connector_pgsql.hocon +++ b/rel/i18n/emqx_connector_pgsql.hocon @@ -1,18 +1,11 @@ emqx_connector_pgsql { - server { - desc { - en: """The IPv4 or IPv6 address or the hostname to connect to.
+server.desc: +"""The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
The PostgreSQL default port 5432 is used if `[:Port]` is not specified.""" - zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 PostgreSQL 默认端口 5432。""" - } - label: { - en: "Server Host" - zh: "服务器地址" - } - } + +server.label: +"""Server Host""" } diff --git a/rel/i18n/emqx_connector_redis.hocon b/rel/i18n/emqx_connector_redis.hocon index 5915725a2..5dc887b72 100644 --- a/rel/i18n/emqx_connector_redis.hocon +++ b/rel/i18n/emqx_connector_redis.hocon @@ -1,90 +1,50 @@ emqx_connector_redis { - single { - desc { - en: "Single mode. Must be set to 'single' when Redis server is running in single mode." - zh: "单机模式。当 Redis 服务运行在单机模式下,该配置必须设置为 'single'。" - } - label: { - en: "Single Mode" - zh: "单机模式" - } - } +cluster.desc: +"""Cluster mode. Must be set to 'cluster' when Redis server is running in clustered mode.""" - cluster { - desc { - en: "Cluster mode. Must be set to 'cluster' when Redis server is running in clustered mode." - zh: "集群模式。当 Redis 服务运行在集群模式下,该配置必须设置为 'cluster'。" - } - label: { - en: "Cluster Mode" - zh: "集群模式" - } - } +cluster.label: +"""Cluster Mode""" - sentinel { - desc { - en: "Sentinel mode. Must be set to 'sentinel' when Redis server is running in sentinel mode." - zh: "哨兵模式。当 Redis 服务运行在哨兵模式下,该配置必须设置为 'sentinel'。" - } - label: { - en: "Sentinel Mode" - zh: "哨兵模式" - } - } +database.desc: +"""Redis database ID.""" - sentinel_desc { - desc { - en: "The cluster name in Redis sentinel mode." - zh: "Redis 哨兵模式下的集群名称。" - } - label: { - en: "Cluster Name" - zh: "集群名称" - } - } +database.label: +"""Database ID""" - server { - desc { - en: """The IPv4 or IPv6 address or the hostname to connect to.
+sentinel.desc: +"""Sentinel mode. Must be set to 'sentinel' when Redis server is running in sentinel mode.""" + +sentinel.label: +"""Sentinel Mode""" + +sentinel_desc.desc: +"""The cluster name in Redis sentinel mode.""" + +sentinel_desc.label: +"""Cluster Name""" + +server.desc: +"""The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
The Redis default port 6379 is used if `[:Port]` is not specified.""" - zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 Redis 默认端口 6379。""" - } - label: { - en: "Server Host" - zh: "服务器地址" - } - } - servers { - desc { - en: """A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].` +server.label: +"""Server Host""" + +servers.desc: +"""A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].` For each Node should be: The IPv4 or IPv6 address or the hostname to connect to. A host entry has the following form: `Host[:Port]`. The Redis default port 6379 is used if `[:Port]` is not specified.""" - zh: """集群将要连接的节点列表。 节点之间用逗号分隔,如:`Node[,Node].` -每个节点的配置为:将要连接的 IPv4 或 IPv6 地址或主机名。 -主机名具有以下形式:`Host[:Port]`。 -如果未指定 `[:Port]`,则使用 Redis 默认端口 6379。""" - } - label: { - en: "Servers" - zh: "服务器列表" - } - } - database { - desc { - en: "Redis database ID." - zh: "Redis 数据库 ID。" - } - label: { - en: "Database ID" - zh: "数据库 ID" - } - } +servers.label: +"""Servers""" + +single.desc: +"""Single mode. Must be set to 'single' when Redis server is running in single mode.""" + +single.label: +"""Single Mode""" } diff --git a/rel/i18n/emqx_connector_schema_lib.hocon b/rel/i18n/emqx_connector_schema_lib.hocon index 1bc45c36d..0e8a2e9a3 100644 --- a/rel/i18n/emqx_connector_schema_lib.hocon +++ b/rel/i18n/emqx_connector_schema_lib.hocon @@ -1,80 +1,45 @@ emqx_connector_schema_lib { - ssl { - desc { - en: "SSL connection settings." - zh: "启用 SSL 连接。" - } - label: { - en: "Enable SSL" - zh: "启用SSL" - } - } +auto_reconnect.desc: +"""Deprecated. Enable automatic reconnect to the database.""" - prepare_statement { - desc { - en: "Key-value list of SQL prepared statements." - zh: "SQL 预处理语句列表。" - } - label: { - en: "SQL Prepared Statements List" - zh: "SQL 预处理语句列表" - } - } +auto_reconnect.label: +"""Deprecated. Auto Reconnect Database""" - database_desc { - desc { - en: "Database name." - zh: "数据库名字。" - } - label: { - en: "Database Name" - zh: "数据库名字" - } - } +database_desc.desc: +"""Database name.""" - pool_size { - desc { - en: "Size of the connection pool towards the bridge target service." - zh: "桥接远端服务时使用的连接池大小。" - } - label: { - en: "Connection Pool Size" - zh: "连接池大小" - } - } +database_desc.label: +"""Database Name""" - username { - desc { - en: "EMQX's username in the external database." - zh: "内部数据库的用户名。" - } - label: { - en: "Username" - zh: "用户名" - } - } +password.desc: +"""EMQX's password in the external database.""" - password { - desc { - en: "EMQX's password in the external database." - zh: "内部数据库密码。" - } - label: { - en: "Password" - zh: "密码" - } - } +password.label: +"""Password""" - auto_reconnect { - desc { - en: "Deprecated. Enable automatic reconnect to the database." - zh: "已弃用。自动重连数据库。" - } - label: { - en: "Deprecated. Auto Reconnect Database" - zh: "已弃用。自动重连数据库" - } - } +pool_size.desc: +"""Size of the connection pool towards the bridge target service.""" + +pool_size.label: +"""Connection Pool Size""" + +prepare_statement.desc: +"""Key-value list of SQL prepared statements.""" + +prepare_statement.label: +"""SQL Prepared Statements List""" + +ssl.desc: +"""SSL connection settings.""" + +ssl.label: +"""Enable SSL""" + +username.desc: +"""EMQX's username in the external database.""" + +username.label: +"""Username""" } diff --git a/rel/i18n/emqx_dashboard_api.hocon b/rel/i18n/emqx_dashboard_api.hocon index 9a6390a02..3e5bb6239 100644 --- a/rel/i18n/emqx_dashboard_api.hocon +++ b/rel/i18n/emqx_dashboard_api.hocon @@ -1,150 +1,66 @@ emqx_dashboard_api { - token { - desc { - en: """Dashboard Auth Token""" - zh: """Dashboard 认证 Token""" - } - } +change_pwd_api.desc: +"""Change dashboard user password""" - username { - desc { - en: """Dashboard Username""" - zh: """Dashboard 用户名""" - } - } +create_user_api.desc: +"""Create dashboard user""" - user_description { - desc { - en: """Dashboard User Description""" - zh: """Dashboard 用户描述""" - } - } +create_user_api_success.desc: +"""Create dashboard user success""" - password { - desc { - en: """Dashboard Password""" - zh: """Dashboard 密码""" - } - } +delete_user_api.desc: +"""Delete dashboard user""" - license { - desc { - en: """EMQX License. opensource or enterprise""" - zh: """EMQX 许可类型。可为 opensource 或 enterprise""" - } - } +license.desc: +"""EMQX License. opensource or enterprise""" - version { - desc { - en: """EMQX Version""" - zh: """EMQX 版本""" - } - } +list_users_api.desc: +"""Dashboard list users""" - login_api { - desc { - en: """Get Dashboard Auth Token.""" - zh: """获取 Dashboard 认证 Token。""" - } - } +login_api.desc: +"""Get Dashboard Auth Token.""" - login_success { - desc { - en: """Dashboard Auth Success""" - zh: """Dashboard 认证成功""" - } - } +login_failed401.desc: +"""Login failed. Bad username or password""" - login_failed401 { - desc { - en: """Login failed. Bad username or password""" - zh: """登录失败。用户名或密码错误""" - } - } +login_failed_response400.desc: +"""Login failed. Bad username or password""" - logout_api { - desc { - en: """Dashboard user logout""" - zh: """Dashboard 用户登出""" - } - } +login_success.desc: +"""Dashboard Auth Success""" - list_users_api { - desc { - en: """Dashboard list users""" - zh: """Dashboard 用户列表""" - } - } +logout_api.desc: +"""Dashboard user logout""" - create_user_api { - desc { - en: """Create dashboard user""" - zh: """创建 Dashboard 用户""" - } - } +new_pwd.desc: +"""New password""" - create_user_api_success { - desc { - en: """Create dashboard user success""" - zh: """创建 Dashboard 用户成功""" - } - } +old_pwd.desc: +"""Old password""" - update_user_api { - desc { - en: """Update dashboard user description""" - zh: """更新 Dashboard 用户描述""" - } - } +password.desc: +"""Dashboard Password""" - update_user_api200 { - desc { - en: """Update dashboard user success""" - zh: """更新 Dashboard 用户成功""" - } - } +token.desc: +"""Dashboard Auth Token""" - delete_user_api { - desc { - en: """Delete dashboard user""" - zh: """删除 Dashboard 用户""" - } - } +update_user_api.desc: +"""Update dashboard user description""" - users_api404 { - desc { - en: """Dashboard user not found""" - zh: """Dashboard 用户不存在""" - } - } +update_user_api200.desc: +"""Update dashboard user success""" - change_pwd_api { - desc { - en: """Change dashboard user password""" - zh: """更改 Dashboard 用户密码""" - } - } +user_description.desc: +"""Dashboard User Description""" - old_pwd { - desc { - en: """Old password""" - zh: """旧密码""" - } - } +username.desc: +"""Dashboard Username""" - new_pwd { - desc { - en: """New password""" - zh: """新密码""" - } - } +users_api404.desc: +"""Dashboard user not found""" - login_failed_response400 { - desc { - en: """Login failed. Bad username or password""" - zh: """登录失败。用户名或密码错误""" - } - } +version.desc: +"""EMQX Version""" } diff --git a/rel/i18n/emqx_dashboard_schema.hocon b/rel/i18n/emqx_dashboard_schema.hocon index f81816e63..6bd6ab016 100644 --- a/rel/i18n/emqx_dashboard_schema.hocon +++ b/rel/i18n/emqx_dashboard_schema.hocon @@ -1,225 +1,139 @@ emqx_dashboard_schema { - listeners { - desc { - en: """HTTP(s) listeners are identified by their protocol type and are + +backlog.desc: +"""Defines the maximum length that the queue of pending connections can grow to.""" + +backlog.label: +"""Backlog""" + +bind.desc: +"""Port without IP(18083) or port with specified IP(127.0.0.1:18083).""" + +bind.label: +"""Bind""" + +bootstrap_users_file.desc: +"""Deprecated, use api_key.bootstrap_file.""" + +bootstrap_users_file.label: +"""Deprecated""" + +cors.desc: +"""Support Cross-Origin Resource Sharing (CORS). +Allows a server to indicate any origins (domain, scheme, or port) other than +its own from which a browser should permit loading resources.""" + +cors.label: +"""CORS""" + +default_password.desc: +"""The initial default password for dashboard 'admin' user. +For safety, it should be changed as soon as possible. +This value is not valid when you log in to Dashboard for the first time via the web +and change to a complex password as prompted.""" + +default_password.label: +"""Default password""" + +default_username.desc: +"""The default username of the automatically created dashboard user.""" + +default_username.label: +"""Default username""" + +desc_dashboard.desc: +"""Configuration for EMQX dashboard.""" + +desc_dashboard.label: +"""Dashboard""" + +desc_http.desc: +"""Configuration for the dashboard listener (plaintext).""" + +desc_http.label: +"""HTTP""" + +desc_https.desc: +"""Configuration for the dashboard listener (TLS).""" + +desc_https.label: +"""HTTPS""" + +desc_listeners.desc: +"""Configuration for the dashboard listener.""" + +desc_listeners.label: +"""Listeners""" + +i18n_lang.desc: +"""Internationalization language support.""" + +i18n_lang.label: +"""I18n language""" + +inet6.desc: +"""Enable IPv6 support, default is false, which means IPv4 only.""" + +inet6.label: +"""IPv6""" + +ipv6_v6only.desc: +"""Disable IPv4-to-IPv6 mapping for the listener. +The configuration is only valid when the inet6 is true.""" + +ipv6_v6only.label: +"""IPv6 only""" + +listener_enable.desc: +"""Ignore or enable this listener""" + +listener_enable.label: +"""Enable""" + +listeners.desc: +"""HTTP(s) listeners are identified by their protocol type and are used to serve dashboard UI and restful HTTP API. Listeners must have a unique combination of port number and IP address. For example, an HTTP listener can listen on all configured IP addresses on a given port for a machine by specifying the IP address 0.0.0.0. Alternatively, the HTTP listener can specify a unique IP address for each listener, but use the same port.""" - zh: """Dashboard 监听器设置。监听器必须有唯一的端口号和IP地址的组合。 -例如,可以通过指定IP地址 0.0.0.0 来监听机器上给定端口上的所有配置的IP地址。 -或者,可以为每个监听器指定唯一的IP地址,但使用相同的端口。""" - } - label { - en: "Listeners" - zh: "监听器" - } - } - sample_interval { - desc { - en: """How often to update metrics displayed in the dashboard. + +listeners.label: +"""Listeners""" + +max_connections.desc: +"""Maximum number of simultaneous connections.""" + +max_connections.label: +"""Maximum connections""" + +num_acceptors.desc: +"""Socket acceptor pool size for TCP protocols. Default is the number of schedulers online""" + +num_acceptors.label: +"""Number of acceptors""" + +proxy_header.desc: +"""Enable support for `HAProxy` header. Be aware once enabled regular HTTP requests can't be handled anymore.""" + +proxy_header.label: +"""Enable support for HAProxy header""" + +sample_interval.desc: +"""How often to update metrics displayed in the dashboard. Note: `sample_interval` should be a divisor of 60, default is 10s.""" - zh: """Dashboard 中图表指标的时间间隔。必须小于60,且被60的整除,默认设置 10s。""" - } - } - token_expired_time { - desc { - en: "JWT token expiration time. Default is 60 minutes" - zh: "JWT token 过期时间。默认设置为 60 分钟。" - } - label { - en: "Token expired time" - zh: "JWT 过期时间" - } - } - num_acceptors { - desc { - en: "Socket acceptor pool size for TCP protocols. Default is the number of schedulers online" - zh: "TCP协议的Socket acceptor池大小, 默认设置在线的调度器数量(通常为 CPU 核数)" - } - label { - en: "Number of acceptors" - zh: "Acceptor 数量" - } - } - max_connections { - desc { - en: "Maximum number of simultaneous connections." - zh: "同时处理的最大连接数。" - } - label { - en: "Maximum connections" - zh: "最大连接数" - } - } - backlog { - desc { - en: "Defines the maximum length that the queue of pending connections can grow to." - zh: "排队等待连接的队列的最大长度。" - } - label { - en: "Backlog" - zh: "排队长度" - } - } - send_timeout { - desc { - en: "Send timeout for the socket." - zh: "Socket发送超时时间。" - } - label { - en: "Send timeout" - zh: "发送超时时间" - } - } - inet6 { - desc { - en: "Enable IPv6 support, default is false, which means IPv4 only." - zh: "启用IPv6, 如果机器不支持IPv6,请关闭此选项,否则会导致 Dashboard 无法使用。" - } - label { - en: "IPv6" - zh: "IPv6" - } - } - ipv6_v6only { - desc { - en: """Disable IPv4-to-IPv6 mapping for the listener. -The configuration is only valid when the inet6 is true.""" - zh: "当开启 inet6 功能的同时禁用 IPv4-to-IPv6 映射。该配置仅在 inet6 功能开启时有效。" - } - label { - en: "IPv6 only" - zh: "IPv6 only" - } - } - proxy_header { - desc { - en: "Enable support for `HAProxy` header. Be aware once enabled regular HTTP requests can't be handled anymore." - zh: "开启对 `HAProxy` 的支持,注意:一旦开启了这个功能,就无法再处理普通的 HTTP 请求了。" - } - label: { - en: "Enable support for HAProxy header" - zh: "开启对 `HAProxy` 的支持" - } - } - desc_dashboard { - desc { - en: "Configuration for EMQX dashboard." - zh: "EMQX Dashboard 配置。" - } - label { - en: "Dashboard" - zh: "Dashboard" - } - } - desc_listeners { - desc { - en: "Configuration for the dashboard listener." - zh: "Dashboard 监听器配置。" - } - label { - en: "Listeners" - zh: "监听器" - } - } - desc_http { - desc { - en: "Configuration for the dashboard listener (plaintext)." - zh: "Dashboard 监听器(HTTP)配置。" - } - label { - en: "HTTP" - zh: "HTTP" - } - } - desc_https { - desc { - en: "Configuration for the dashboard listener (TLS)." - zh: "Dashboard 监听器(HTTPS)配置。" - } - label { - en: "HTTPS" - zh: "HTTPS" - } - } - listener_enable { - desc { - en: "Ignore or enable this listener" - zh: "忽略或启用该监听器。" - } - label { - en: "Enable" - zh: "启用" - } - } - bind { - desc { - en: "Port without IP(18083) or port with specified IP(127.0.0.1:18083)." - zh: "监听地址和端口,热更新此配置时,会重启 Dashboard 服务。" - } - label { - en: "Bind" - zh: "绑定端口" - } - } - default_username { - desc { - en: "The default username of the automatically created dashboard user." - zh: "Dashboard 的默认用户名。" - } - label { - en: "Default username" - zh: "默认用户名" - } - } - default_password { - desc { - en: """The initial default password for dashboard 'admin' user. -For safety, it should be changed as soon as possible. -This value is not valid when you log in to Dashboard for the first time via the web -and change to a complex password as prompted.""" - zh: """Dashboard 的默认密码,为了安全,应该尽快修改密码。 -当通过网页首次登录 Dashboard 并按提示修改成复杂密码后,此值就会失效。""" - } - label { - en: "Default password" - zh: "默认密码" - } - } - cors { - desc { - en: """Support Cross-Origin Resource Sharing (CORS). -Allows a server to indicate any origins (domain, scheme, or port) other than -its own from which a browser should permit loading resources.""" - zh: """支持跨域资源共享(CORS), -允许服务器指示任何来源(域名、协议或端口),除了本服务器之外的任何浏览器应允许加载资源。""" - } - label { - en: "CORS" - zh: "跨域资源共享" - } - } - i18n_lang { - desc { - en: "Internationalization language support." - zh: "设置 Swagger 多语言的版本,可为 en 或 zh。" - } - label { - en: "I18n language" - zh: "多语言支持" - } - } - bootstrap_users_file { - desc { - en: "Deprecated, use api_key.bootstrap_file." - zh: "已废弃,请使用 api_key.bootstrap_file。" - } - label { - en: """Deprecated""" - zh: """已废弃""" - } - } + +send_timeout.desc: +"""Send timeout for the socket.""" + +send_timeout.label: +"""Send timeout""" + +token_expired_time.desc: +"""JWT token expiration time. Default is 60 minutes""" + +token_expired_time.label: +"""Token expired time""" + } diff --git a/rel/i18n/emqx_delayed_api.hocon b/rel/i18n/emqx_delayed_api.hocon index d16d61ccf..62e0fd775 100644 --- a/rel/i18n/emqx_delayed_api.hocon +++ b/rel/i18n/emqx_delayed_api.hocon @@ -1,164 +1,72 @@ emqx_delayed_api { - view_status_api { - desc { - en: "Get delayed status" - zh: "查看慢订阅状态" - } - } +bad_msgid_format.desc: +"""Bad Message ID format""" - update_api { - desc { - en: "Enable or disable delayed, set max delayed messages" - zh: "开启或者关闭功能,或者设置延迟消息数量上限" - } - } +count.desc: +"""Count of delayed messages""" - update_success { - desc { - en: "Enable or disable delayed successfully" - zh: "开启或者关闭功能操作成功" - } - } +delayed_interval.desc: +"""Delayed interval(second)""" - illegality_limit { - desc { - en: "Max limit illegality" - zh: "数量上限不合法" - } - } +delayed_remaining.desc: +"""Delayed remaining(second)""" - get_message_api { - desc { - en: "View delayed message" - zh: "查看延迟消息" - } - } +delete_api.desc: +"""Delete delayed message""" - node { - desc { - en: "The node where message from" - zh: "消息的来源节点" - } - } +expected_at.desc: +"""Expect publish time, in RFC 3339 format""" - msgid { - desc { - en: "Delayed Message ID" - zh: "延迟消息 ID" - } - } +from_clientid.desc: +"""From ClientID""" - bad_msgid_format { - desc { - en: "Bad Message ID format" - zh: "消息 ID 格式错误" - } - } +from_username.desc: +"""From Username""" - msgid_not_found { - desc { - en: "Message ID not found" - zh: "未找到对应消息" - } - } +get_message_api.desc: +"""View delayed message""" - delete_api { - desc { - en: "Delete delayed message" - zh: "删除延迟消息" - } - } +illegality_limit.desc: +"""Max limit illegality""" - list_api { - desc { - en: "List delayed messages" - zh: "查看延迟消息列表" - } - } +list_api.desc: +"""List delayed messages""" - view_page { - desc { - en: "View page" - zh: "查看的页数" - } - } +msgid.desc: +"""Delayed Message ID""" - view_limit { - desc { - en: "Page limit" - zh: "每页数量" - } - } +msgid_not_found.desc: +"""Message ID not found""" - count { - desc { - en: "Count of delayed messages" - zh: "延迟消息总数" - } - } +node.desc: +"""The node where message from""" - publish_at { - desc { - en: "Clinet publish message time, in RFC 3339 format" - zh: "客户端发送时间, RFC 3339 格式" - } - } +payload.desc: +"""Payload, base64 encoded. Payload will be set to 'PAYLOAD_TO_LARGE' if its length is larger than 2048 bytes""" - delayed_interval { - desc { - en: "Delayed interval(second)" - zh: "延迟时间(秒)" - } - } +publish_at.desc: +"""Clinet publish message time, in RFC 3339 format""" - delayed_remaining { - desc { - en: "Delayed remaining(second)" - zh: "剩余时间(秒)" - } - } +qos.desc: +"""QoS""" - expected_at { - desc { - en: "Expect publish time, in RFC 3339 format" - zh: "期望的发送时间, RFC 3339 格式" - } - } +topic.desc: +"""Topic""" - topic { - desc { - en: "Topic" - zh: "主题" - } - } +update_api.desc: +"""Enable or disable delayed, set max delayed messages""" - qos { - desc { - en: "QoS" - zh: "QoS" - } - } +update_success.desc: +"""Enable or disable delayed successfully""" - from_clientid { - desc { - en: "From ClientID" - zh: "消息的 ClientID" - } - } +view_limit.desc: +"""Page limit""" - from_username { - desc { - en: "From Username" - zh: "消息的 Username" - } - } +view_page.desc: +"""View page""" - payload { - desc { - en: "Payload, base64 encoded. Payload will be set to 'PAYLOAD_TO_LARGE' if its length is larger than 2048 bytes" - zh: "消息内容, base64 格式。如果消息的大小超过 2048 字节,则消息内容会被设置为 'PAYLOAD_TO_LARGE'" - } - } +view_status_api.desc: +"""Get delayed status""" } diff --git a/rel/i18n/emqx_ee_bridge_cassa.hocon b/rel/i18n/emqx_ee_bridge_cassa.hocon index 3bbac6658..d86c95a5f 100644 --- a/rel/i18n/emqx_ee_bridge_cassa.hocon +++ b/rel/i18n/emqx_ee_bridge_cassa.hocon @@ -1,72 +1,43 @@ emqx_ee_bridge_cassa { - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to Cassandra. All MQTT 'PUBLISH' messages with the topic +config_enable.desc: +"""Enable or disable this bridge""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +cql_template.desc: +"""CQL Template""" + +cql_template.label: +"""CQL Template""" + +desc_config.desc: +"""Configuration for a Cassandra bridge.""" + +desc_config.label: +"""Cassandra Bridge Configuration""" + +desc_name.desc: +"""Bridge name.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to Cassandra. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 Cassandra。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" - } - label { - en: "Local Topic" - zh: "本地 Topic" - } - } - cql_template { - desc { - en: """CQL Template""" - zh: """CQL 模板""" - } - label { - en: "CQL Template" - zh: "CQL 模板" - } - } - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用桥接""" - } - label { - en: "Enable Or Disable Bridge" - zh: "启用/禁用桥接" - } - } +local_topic.label: +"""Local Topic""" - desc_config { - desc { - en: """Configuration for a Cassandra bridge.""" - zh: """Cassandra 桥接配置""" - } - label: { - en: "Cassandra Bridge Configuration" - zh: "Cassandra 桥接配置" - } - } - - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } - - desc_name { - desc { - en: """Bridge name.""" - zh: """桥接名字""" - } - label { - en: "Bridge Name" - zh: "桥接名字" - } - } } diff --git a/rel/i18n/emqx_ee_bridge_clickhouse.hocon b/rel/i18n/emqx_ee_bridge_clickhouse.hocon index b54f4dc70..6735aee22 100644 --- a/rel/i18n/emqx_ee_bridge_clickhouse.hocon +++ b/rel/i18n/emqx_ee_bridge_clickhouse.hocon @@ -1,81 +1,49 @@ emqx_ee_bridge_clickhouse { - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to Clickhouse. All MQTT 'PUBLISH' messages with the topic +batch_value_separator.desc: +"""The default value ',' works for the VALUES format. You can also use other separator if other format is specified. See [INSERT INTO Statement](https://clickhouse.com/docs/en/sql-reference/statements/insert-into).""" + +batch_value_separator.label: +"""Batch Value Separator""" + +config_enable.desc: +"""Enable or disable this bridge""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for a Clickhouse bridge.""" + +desc_config.label: +"""Clickhouse Bridge Configuration""" + +desc_name.desc: +"""Bridge name.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to Clickhouse. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 Clickhouse。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" - } - label { - en: "Local Topic" - zh: "本地 Topic" - } - } - sql_template { - desc { - en: """The template string can contain ${field} placeholders for message metadata and payload field. Make sure that the inserted values are formatted and escaped correctly. [Prepared Statement](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridges.html#Prepared-Statement) is not supported.""" - zh: """可以使用 ${field} 占位符来引用消息与客户端上下文中的变量,请确保对应字段存在且数据格式符合预期。此处不支持 [SQL 预处理](https://docs.emqx.com/zh/enterprise/v5.0/data-integration/data-bridges.html#sql-预处理)。""" - } - label { - en: "SQL Template" - zh: "SQL 模板" - } - } - batch_value_separator { - desc { - en: """The default value ',' works for the VALUES format. You can also use other separator if other format is specified. See [INSERT INTO Statement](https://clickhouse.com/docs/en/sql-reference/statements/insert-into).""" - zh: """默认为逗号 ',',适用于 VALUE 格式。您也可以使用其他分隔符, 请参考 [INSERT INTO 语句](https://clickhouse.com/docs/en/sql-reference/statements/insert-into)。""" - } - label { - en: "Batch Value Separator" - zh: "分隔符" - } - } - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用桥接""" - } - label { - en: "Enable Or Disable Bridge" - zh: "启用/禁用桥接" - } - } - desc_config { - desc { - en: """Configuration for a Clickhouse bridge.""" - zh: """Clickhouse 桥接配置""" - } - label: { - en: "Clickhouse Bridge Configuration" - zh: "Clickhouse 桥接配置" - } - } +local_topic.label: +"""Local Topic""" - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } +sql_template.desc: +"""The template string can contain ${field} placeholders for message metadata and payload field. Make sure that the inserted values are formatted and escaped correctly. [Prepared Statement](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridges.html#Prepared-Statement) is not supported.""" + +sql_template.label: +"""SQL Template""" - desc_name { - desc { - en: """Bridge name.""" - zh: """桥接名字""" - } - label { - en: "Bridge Name" - zh: "桥接名字" - } - } } diff --git a/rel/i18n/emqx_ee_bridge_dynamo.hocon b/rel/i18n/emqx_ee_bridge_dynamo.hocon index b93b12166..7725130eb 100644 --- a/rel/i18n/emqx_ee_bridge_dynamo.hocon +++ b/rel/i18n/emqx_ee_bridge_dynamo.hocon @@ -1,72 +1,43 @@ emqx_ee_bridge_dynamo { - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to DynamoDB. All MQTT `PUBLISH` messages with the topic +config_enable.desc: +"""Enable or disable this bridge""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for a DynamoDB bridge.""" + +desc_config.label: +"""DynamoDB Bridge Configuration""" + +desc_name.desc: +"""Bridge name.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to DynamoDB. All MQTT `PUBLISH` messages with the topic matching the `local_topic` will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also `local_topic` is configured, then both the data got from the rule and the MQTT messages that match `local_topic` will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 DynamoDB。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" - } - label { - en: "Local Topic" - zh: "本地 Topic" - } - } - template { - desc { - en: """Template, the default value is empty. When this value is empty the whole message will be stored in the database""" - zh: """模板, 默认为空,为空时将会将整个消息存入数据库""" - } - label { - en: "Template" - zh: "模板" - } - } - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用桥接""" - } - label { - en: "Enable Or Disable Bridge" - zh: "启用/禁用桥接" - } - } +local_topic.label: +"""Local Topic""" - desc_config { - desc { - en: """Configuration for a DynamoDB bridge.""" - zh: """DynamoDB 桥接配置""" - } - label: { - en: "DynamoDB Bridge Configuration" - zh: "DynamoDB 桥接配置" - } - } +template.desc: +"""Template, the default value is empty. When this value is empty the whole message will be stored in the database""" - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } +template.label: +"""Template""" - desc_name { - desc { - en: """Bridge name.""" - zh: """桥接名字""" - } - label { - en: "Bridge Name" - zh: "桥接名字" - } - } } diff --git a/rel/i18n/emqx_ee_bridge_gcp_pubsub.hocon b/rel/i18n/emqx_ee_bridge_gcp_pubsub.hocon index b8fa3b43a..6f864a524 100644 --- a/rel/i18n/emqx_ee_bridge_gcp_pubsub.hocon +++ b/rel/i18n/emqx_ee_bridge_gcp_pubsub.hocon @@ -1,145 +1,80 @@ emqx_ee_bridge_gcp_pubsub { - desc_config { - desc { - en: """Configuration for a GCP PubSub bridge.""" - zh: """GCP PubSub 桥接配置""" - } - label { - en: "GCP PubSub Bridge Configuration" - zh: "GCP PubSub 桥接配置" - } - } - desc_type { - desc { - en: """The Bridge Type""" - zh: """桥接类型""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } - desc_name { - desc { - en: """Bridge name, used as a human-readable description of the bridge.""" - zh: """桥接名字,可读描述""" - } - label { - en: "Bridge Name" - zh: "桥接名字" - } - } +connect_timeout.desc: +"""The timeout when connecting to the HTTP server.""" - connect_timeout { - desc { - en: "The timeout when connecting to the HTTP server." - zh: "连接 HTTP 服务器的超时时间。" - } - label: { - en: "Connect Timeout" - zh: "连接超时" - } - } +connect_timeout.label: +"""Connect Timeout""" - max_retries { - desc { - en: "Max retry times if an error occurs when sending a request." - zh: "请求出错时的最大重试次数。" - } - label: { - en: "Max Retries" - zh: "最大重试次数" - } - } +desc_config.desc: +"""Configuration for a GCP PubSub bridge.""" - pool_size { - desc { - en: "The pool size." - zh: "连接池大小。" - } - label: { - en: "Pool Size" - zh: "连接池大小" - } - } +desc_config.label: +"""GCP PubSub Bridge Configuration""" - pipelining { - desc { - en: "A positive integer. Whether to send HTTP requests continuously, when set to 1, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request." - zh: "正整数,设置最大可发送的异步 HTTP 请求数量。当设置为 1 时,表示每次发送完成 HTTP 请求后都需要等待服务器返回,再继续发送下一个请求。" - } - label: { - en: "HTTP Pipelineing" - zh: "HTTP 流水线" - } - } +desc_name.desc: +"""Bridge name, used as a human-readable description of the bridge.""" - request_timeout { - desc { - en: "Deprecated: Configure the request timeout in the buffer settings." - zh: "废弃的。在缓冲区设置中配置请求超时。" - } - label: { - en: "Request Timeout" - zh: "HTTP 请求超时" - } - } +desc_name.label: +"""Bridge Name""" - payload_template { - desc { - en: "The template for formatting the outgoing messages. If undefined, will send all the available context in JSON format." - zh: "用于格式化外发信息的模板。 如果未定义,将以JSON格式发送所有可用的上下文。" - } - label: { - en: "Payload template" - zh: "HTTP 请求消息体模板" - } - } +desc_type.desc: +"""The Bridge Type""" - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to GCP PubSub. All MQTT 'PUBLISH' messages with the topic +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to GCP PubSub. All MQTT 'PUBLISH' messages with the topic matching `local_topic` will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 GCP PubSub。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 GCP PubSub。""" - } - label { - en: "Local Topic" - zh: "本地 Topic" - } - } - pubsub_topic { - desc { - en: "The GCP PubSub topic to publish messages to." - zh: "要发布消息的GCP PubSub主题。" - } - label: { - en: "GCP PubSub Topic" - zh: "GCP PubSub 主题" - } - } +local_topic.label: +"""Local Topic""" - service_account_json { - desc { - en: "JSON containing the GCP Service Account credentials to be used with PubSub.\n" - "When a GCP Service Account is created " - "(as described in https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount), " - "you have the option of downloading the credentials in JSON form. That's the " - "file needed." - zh: "包含将与 PubSub 一起使用的 GCP 服务账户凭证的 JSON。\n" - "当创建GCP服务账户时" - "(如https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount)," - "可以选择下载 JSON 形式的凭证,然后在该配置项中使用。" - } - label: { - en: "GCP Service Account Credentials" - zh: "GCP 服务账户凭证" - } - } +max_retries.desc: +"""Max retry times if an error occurs when sending a request.""" + +max_retries.label: +"""Max Retries""" + +payload_template.desc: +"""The template for formatting the outgoing messages. If undefined, will send all the available context in JSON format.""" + +payload_template.label: +"""Payload template""" + +pipelining.desc: +"""A positive integer. Whether to send HTTP requests continuously, when set to 1, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request.""" + +pipelining.label: +"""HTTP Pipelineing""" + +pool_size.desc: +"""The pool size.""" + +pool_size.label: +"""Pool Size""" + +pubsub_topic.desc: +"""The GCP PubSub topic to publish messages to.""" + +pubsub_topic.label: +"""GCP PubSub Topic""" + +request_timeout.desc: +"""Deprecated: Configure the request timeout in the buffer settings.""" + +request_timeout.label: +"""Request Timeout""" + +service_account_json.desc: +"""JSON containing the GCP Service Account credentials to be used with PubSub. +When a GCP Service Account is created (as described in https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount), you have the option of downloading the credentials in JSON form. That's the file needed.""" + +service_account_json.label: +"""GCP Service Account Credentials""" } diff --git a/rel/i18n/emqx_ee_bridge_hstreamdb.hocon b/rel/i18n/emqx_ee_bridge_hstreamdb.hocon index dce40aa85..cb43d483a 100644 --- a/rel/i18n/emqx_ee_bridge_hstreamdb.hocon +++ b/rel/i18n/emqx_ee_bridge_hstreamdb.hocon @@ -1,90 +1,55 @@ emqx_ee_bridge_hstreamdb { - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to the HStreamDB. All MQTT 'PUBLISH' messages with the topic + +config_direction.desc: +"""The direction of this bridge, MUST be 'egress'""" + +config_direction.label: +"""Bridge Direction""" + +config_enable.desc: +"""Enable or disable this bridge""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for an HStreamDB bridge.""" + +desc_config.label: +"""HStreamDB Bridge Configuration""" + +desc_connector.desc: +"""Generic configuration for the connector.""" + +desc_connector.label: +"""Connector Generic Configuration""" + +desc_name.desc: +"""Bridge name, used as a human-readable description of the bridge.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to the HStreamDB. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 HStreamDB。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 HStreamDB。""" - } - label { - en: "Local Topic" - zh: "本地 Topic" - } - } - payload { - desc { - en: """The payload to be forwarded to the HStreamDB. Placeholders supported.""" - zh: """要转发到 HStreamDB 的数据内容,支持占位符""" - } - label { - en: "Payload" - zh: "消息内容" - } - } - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用桥接""" - } - label { - en: "Enable Or Disable Bridge" - zh: "启用/禁用桥接" - } - } - config_direction { - desc { - en: """The direction of this bridge, MUST be 'egress'""" - zh: """桥接的方向, 必须是 egress""" - } - label { - en: "Bridge Direction" - zh: "桥接方向" - } - } - desc_config { - desc { - en: """Configuration for an HStreamDB bridge.""" - zh: """HStreamDB 桥接配置""" - } - label: { - en: "HStreamDB Bridge Configuration" - zh: "HStreamDB 桥接配置" - } - } +local_topic.label: +"""Local Topic""" - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } +payload.desc: +"""The payload to be forwarded to the HStreamDB. Placeholders supported.""" + +payload.label: +"""Payload""" - desc_name { - desc { - en: """Bridge name, used as a human-readable description of the bridge.""" - zh: """桥接名字,可读描述""" - } - label { - en: "Bridge Name" - zh: "桥接名字" - } - } - desc_connector { - desc { - en: """Generic configuration for the connector.""" - zh: """连接器的通用配置。""" - } - label: { - en: "Connector Generic Configuration" - zh: "连接器通用配置。" - } - } } diff --git a/rel/i18n/emqx_ee_bridge_influxdb.hocon b/rel/i18n/emqx_ee_bridge_influxdb.hocon index d73d62b14..c5cee2b66 100644 --- a/rel/i18n/emqx_ee_bridge_influxdb.hocon +++ b/rel/i18n/emqx_ee_bridge_influxdb.hocon @@ -1,22 +1,41 @@ emqx_ee_bridge_influxdb { - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to the InfluxDB. All MQTT 'PUBLISH' messages with the topic + +config_enable.desc: +"""Enable or disable this bridge.""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for an InfluxDB bridge.""" + +desc_config.label: +"""InfluxDB Bridge Configuration""" + +desc_name.desc: +"""Bridge name.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type.""" + +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to the InfluxDB. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 InfluxDB。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 InfluxDB。""" - } - label { - en: "Local Topic" - zh: "本地 Topic" - } - } - write_syntax { - desc { - en: """Conf of InfluxDB line protocol to write data points. It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point, and placeholder supported. + +local_topic.label: +"""Local Topic""" + +write_syntax.desc: +"""Conf of InfluxDB line protocol to write data points. It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point, and placeholder supported. See also [InfluxDB 2.3 Line Protocol](https://docs.influxdata.com/influxdb/v2.3/reference/syntax/line-protocol/) and [InfluxDB 1.8 Line Protocol](https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/)
TLDR:
@@ -24,62 +43,8 @@ TLDR:
[,=[,=]] =[,=] [] ``` Please note that a placeholder for an integer value must be annotated with a suffix `i`. For example `${payload.int_value}i`.""" - zh: """使用 InfluxDB API Line Protocol 写入 InfluxDB 的数据,支持占位符
-参考 [InfluxDB 2.3 Line Protocol](https://docs.influxdata.com/influxdb/v2.3/reference/syntax/line-protocol/) 及 -[InfluxDB 1.8 Line Protocol](https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/)
-TLDR:
-``` -[,=[,=]] =[,=] [] -``` -注意,整形数值占位符后需要添加一个字符 `i` 类型标识。例如 `${payload.int_value}i`""" - } - label { - en: "Write Syntax" - zh: "写语句" - } - } - config_enable { - desc { - en: """Enable or disable this bridge.""" - zh: """启用/禁用桥接。""" - } - label { - en: "Enable Or Disable Bridge" - zh: "启用/禁用桥接" - } - } - desc_config { - desc { - en: """Configuration for an InfluxDB bridge.""" - zh: """InfluxDB 桥接配置。""" - } - label: { - en: "InfluxDB Bridge Configuration" - zh: "InfluxDB 桥接配置" - } - } - - desc_type { - desc { - en: """The Bridge Type.""" - zh: """桥接类型。""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } - - desc_name { - desc { - en: """Bridge name.""" - zh: """桥接名称。""" - } - label { - en: "Bridge Name" - zh: "桥接名称" - } - } +write_syntax.label: +"""Write Syntax""" } diff --git a/rel/i18n/emqx_ee_bridge_mongodb.hocon b/rel/i18n/emqx_ee_bridge_mongodb.hocon index 053c932f7..fab371824 100644 --- a/rel/i18n/emqx_ee_bridge_mongodb.hocon +++ b/rel/i18n/emqx_ee_bridge_mongodb.hocon @@ -1,100 +1,57 @@ emqx_ee_bridge_mongodb { - desc_config { - desc { - en: "Configuration for MongoDB Bridge" - zh: "为MongoDB桥配置" - } - label { - en: "MongoDB Bridge Configuration" - zh: "MongoDB桥配置" - } - } - enable { - desc { - en: "Enable or disable this MongoDB Bridge" - zh: "启用或停用该MongoDB桥" - } - label { - en: "Enable or disable" - zh: "启用或禁用" - } - } +collection.desc: +"""The collection where data will be stored into""" - collection { - desc { - en: "The collection where data will be stored into" - zh: "数据将被存储到的集合" - } - label { - en: "Collection to be used" - zh: "将要使用的集合(Collection)" - } - } +collection.label: +"""Collection to be used""" - mongodb_rs_conf { - desc { - en: "MongoDB (Replica Set) configuration" - zh: "MongoDB(Replica Set)配置" - } - label { - en: "MongoDB (Replica Set) Configuration" - zh: "MongoDB(Replica Set)配置" - } - } +desc_config.desc: +"""Configuration for MongoDB Bridge""" - mongodb_sharded_conf { - desc { - en: "MongoDB (Sharded) configuration" - zh: "MongoDB (Sharded)配置" - } - label { - en: "MongoDB (Sharded) Configuration" - zh: "MongoDB (Sharded)配置" - } - } +desc_config.label: +"""MongoDB Bridge Configuration""" - mongodb_single_conf { - desc { - en: "MongoDB (Standalone) configuration" - zh: "MongoDB(独立)配置" - } - label { - en: "MongoDB (Standalone) Configuration" - zh: "MongoDB(独立)配置" - } - } +desc_name.desc: +"""Bridge name.""" - desc_type { - desc { - en: """The Bridge Type.""" - zh: """桥接类型。""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } +desc_name.label: +"""Bridge Name""" - desc_name { - desc { - en: """Bridge name.""" - zh: """桥接名称。""" - } - label { - en: "Bridge Name" - zh: "桥接名称" - } - } +desc_type.desc: +"""The Bridge Type.""" + +desc_type.label: +"""Bridge Type""" + +enable.desc: +"""Enable or disable this MongoDB Bridge""" + +enable.label: +"""Enable or disable""" + +mongodb_rs_conf.desc: +"""MongoDB (Replica Set) configuration""" + +mongodb_rs_conf.label: +"""MongoDB (Replica Set) Configuration""" + +mongodb_sharded_conf.desc: +"""MongoDB (Sharded) configuration""" + +mongodb_sharded_conf.label: +"""MongoDB (Sharded) Configuration""" + +mongodb_single_conf.desc: +"""MongoDB (Standalone) configuration""" + +mongodb_single_conf.label: +"""MongoDB (Standalone) Configuration""" + +payload_template.desc: +"""The template for formatting the outgoing messages. If undefined, rule engine will use JSON format to serialize all visible inputs, such as clientid, topic, payload etc.""" + +payload_template.label: +"""Payload template""" - payload_template { - desc { - en: "The template for formatting the outgoing messages. If undefined, rule engine will use JSON format to serialize all visible inputs, such as clientid, topic, payload etc." - zh: "用于格式化写入 MongoDB 的消息模板。 如果未定义,规则引擎会使用 JSON 格式序列化所有的可见输入,例如 clientid, topic, payload 等。" - } - label: { - en: "Payload template" - zh: "有效载荷模板" - } - } } diff --git a/rel/i18n/emqx_ee_bridge_mysql.hocon b/rel/i18n/emqx_ee_bridge_mysql.hocon index 345fd9cba..bd627f726 100644 --- a/rel/i18n/emqx_ee_bridge_mysql.hocon +++ b/rel/i18n/emqx_ee_bridge_mysql.hocon @@ -1,72 +1,43 @@ emqx_ee_bridge_mysql { - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to MySQL. All MQTT 'PUBLISH' messages with the topic +config_enable.desc: +"""Enable or disable this bridge""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for an HStreamDB bridge.""" + +desc_config.label: +"""HStreamDB Bridge Configuration""" + +desc_name.desc: +"""Bridge name, used as a human-readable description of the bridge.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to MySQL. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 MySQL。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" - } - label { - en: "Local Topic" - zh: "本地 Topic" - } - } - sql_template { - desc { - en: """SQL Template""" - zh: """SQL 模板""" - } - label { - en: "SQL Template" - zh: "SQL 模板" - } - } - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用桥接""" - } - label { - en: "Enable Or Disable Bridge" - zh: "启用/禁用桥接" - } - } +local_topic.label: +"""Local Topic""" - desc_config { - desc { - en: """Configuration for an HStreamDB bridge.""" - zh: """HStreamDB 桥接配置""" - } - label: { - en: "HStreamDB Bridge Configuration" - zh: "HStreamDB 桥接配置" - } - } +sql_template.desc: +"""SQL Template""" - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } +sql_template.label: +"""SQL Template""" - desc_name { - desc { - en: """Bridge name, used as a human-readable description of the bridge.""" - zh: """桥接名字,可读描述""" - } - label { - en: "Bridge Name" - zh: "桥接名字" - } - } } diff --git a/rel/i18n/emqx_ee_bridge_pgsql.hocon b/rel/i18n/emqx_ee_bridge_pgsql.hocon index 1ce2818ca..94c263a56 100644 --- a/rel/i18n/emqx_ee_bridge_pgsql.hocon +++ b/rel/i18n/emqx_ee_bridge_pgsql.hocon @@ -1,72 +1,43 @@ emqx_ee_bridge_pgsql { - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to PostgreSQL. All MQTT 'PUBLISH' messages with the topic +config_enable.desc: +"""Enable or disable this bridge""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for a PostgreSQL bridge.""" + +desc_config.label: +"""PostgreSQL Bridge Configuration""" + +desc_name.desc: +"""Bridge name.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to PostgreSQL. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 PostgreSQL。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" - } - label { - en: "Local Topic" - zh: "本地 Topic" - } - } - sql_template { - desc { - en: """SQL Template""" - zh: """SQL 模板""" - } - label { - en: "SQL Template" - zh: "SQL 模板" - } - } - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用桥接""" - } - label { - en: "Enable Or Disable Bridge" - zh: "启用/禁用桥接" - } - } +local_topic.label: +"""Local Topic""" - desc_config { - desc { - en: """Configuration for a PostgreSQL bridge.""" - zh: """PostgreSQL 桥接配置""" - } - label: { - en: "PostgreSQL Bridge Configuration" - zh: "PostgreSQL 桥接配置" - } - } +sql_template.desc: +"""SQL Template""" - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } +sql_template.label: +"""SQL Template""" - desc_name { - desc { - en: """Bridge name.""" - zh: """桥接名字""" - } - label { - en: "Bridge Name" - zh: "桥接名字" - } - } } diff --git a/rel/i18n/emqx_ee_bridge_redis.hocon b/rel/i18n/emqx_ee_bridge_redis.hocon index 4de42b4e3..78db30196 100644 --- a/rel/i18n/emqx_ee_bridge_redis.hocon +++ b/rel/i18n/emqx_ee_bridge_redis.hocon @@ -1,74 +1,45 @@ emqx_ee_bridge_redis { - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to Redis. All MQTT 'PUBLISH' messages with the topic + +command_template.desc: +"""Redis command template used to export messages. Each list element stands for a command name or its argument. +For example, to push payloads in a Redis list by key `msgs`, the elements should be the following: +`rpush`, `msgs`, `${payload}`.""" + +command_template.label: +"""Redis Command Template""" + +config_enable.desc: +"""Enable or disable this bridge""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for a Redis bridge.""" + +desc_config.label: +"""Redis Bridge Configuration""" + +desc_name.desc: +"""Bridge name, used as a human-readable description of the bridge.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to Redis. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 Redis。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 Redis。""" - } - label { - en: "Local Topic" - zh: "本地 Topic" - } - } - command_template { - desc { - en: """Redis command template used to export messages. Each list element stands for a command name or its argument. -For example, to push payloads in a Redis list by key `msgs`, the elements should be the following: -`rpush`, `msgs`, `${payload}`.""" - zh: """用于推送数据的 Redis 命令模板。 每个列表元素代表一个命令名称或其参数。 -例如,要通过键值 `msgs` 将消息体推送到 Redis 列表中,数组元素应该是: `rpush`, `msgs`, `${payload}`。""" - } - label { - en: "Redis Command Template" - zh: "Redis Command 模板" - } - } - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用桥接""" - } - label { - en: "Enable Or Disable Bridge" - zh: "启用/禁用桥接" - } - } +local_topic.label: +"""Local Topic""" - desc_config { - desc { - en: """Configuration for a Redis bridge.""" - zh: """Resis 桥接配置""" - } - label: { - en: "Redis Bridge Configuration" - zh: "Redis 桥接配置" - } - } - - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } - - desc_name { - desc { - en: """Bridge name, used as a human-readable description of the bridge.""" - zh: """桥接名字,可读描述""" - } - label { - en: "Bridge Name" - zh: "桥接名字" - } - } } diff --git a/rel/i18n/emqx_ee_bridge_rocketmq.hocon b/rel/i18n/emqx_ee_bridge_rocketmq.hocon index 2e33e6c07..a545a7fca 100644 --- a/rel/i18n/emqx_ee_bridge_rocketmq.hocon +++ b/rel/i18n/emqx_ee_bridge_rocketmq.hocon @@ -1,70 +1,41 @@ emqx_ee_bridge_rocketmq { - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to RocketMQ. All MQTT `PUBLISH` messages with the topic +config_enable.desc: +"""Enable or disable this bridge""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for a RocketMQ bridge.""" + +desc_config.label: +"""RocketMQ Bridge Configuration""" + +desc_name.desc: +"""Bridge name.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to RocketMQ. All MQTT `PUBLISH` messages with the topic matching the `local_topic` will be forwarded.
NOTE: if the bridge is used as a rule action, `local_topic` should be left empty otherwise the messages will be duplicated.""" - zh: """发送到 'local_topic' 的消息都会转发到 RocketMQ。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" - } - label { - en: "Local Topic" - zh: "本地 Topic" - } - } - template { - desc { - en: """Template, the default value is empty. When this value is empty the whole message will be stored in the RocketMQ""" - zh: """模板, 默认为空,为空时将会将整个消息转发给 RocketMQ""" - } - label { - en: "Template" - zh: "模板" - } - } - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用桥接""" - } - label { - en: "Enable Or Disable Bridge" - zh: "启用/禁用桥接" - } - } +local_topic.label: +"""Local Topic""" - desc_config { - desc { - en: """Configuration for a RocketMQ bridge.""" - zh: """RocketMQ 桥接配置""" - } - label: { - en: "RocketMQ Bridge Configuration" - zh: "RocketMQ 桥接配置" - } - } +template.desc: +"""Template, the default value is empty. When this value is empty the whole message will be stored in the RocketMQ""" - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } +template.label: +"""Template""" - desc_name { - desc { - en: """Bridge name.""" - zh: """桥接名字""" - } - label { - en: "Bridge Name" - zh: "桥接名字" - } - } } diff --git a/rel/i18n/emqx_ee_bridge_sqlserver.hocon b/rel/i18n/emqx_ee_bridge_sqlserver.hocon index a6f96f86d..396126622 100644 --- a/rel/i18n/emqx_ee_bridge_sqlserver.hocon +++ b/rel/i18n/emqx_ee_bridge_sqlserver.hocon @@ -1,85 +1,49 @@ emqx_ee_bridge_sqlserver { - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to Microsoft SQL Server. All MQTT 'PUBLISH' messages with the topic +config_enable.desc: +"""Enable or disable this bridge""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for a Microsoft SQL Server bridge.""" + +desc_config.label: +"""Microsoft SQL Server Bridge Configuration""" + +desc_name.desc: +"""Bridge name.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +driver.desc: +"""SQL Server Driver Name""" + +driver.label: +"""SQL Server Driver Name""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to Microsoft SQL Server. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 Microsoft SQL Server。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" - } - label { - en: """Local Topic""" - zh: """本地 Topic""" - } - } - sql_template { - desc { - en: """SQL Template""" - zh: """SQL 模板""" - } - label { - en: """SQL Template""" - zh: """SQL 模板""" - } - } +local_topic.label: +"""Local Topic""" - driver { - desc { - en: """SQL Server Driver Name""" - zh: """SQL Server Driver 名称""" - } - label { - en: """SQL Server Driver Name""" - zh: """SQL Server Driver 名称""" - } - } +sql_template.desc: +"""SQL Template""" - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用桥接""" - } - label { - en: """Enable Or Disable Bridge""" - zh: """启用/禁用桥接""" - } - } - - desc_config { - desc { - en: """Configuration for a Microsoft SQL Server bridge.""" - zh: """Microsoft SQL Server 桥接配置""" - } - label: { - en: """Microsoft SQL Server Bridge Configuration""" - zh: """Microsoft SQL Server 桥接配置""" - } - } - - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label { - en: """Bridge Type""" - zh: """桥接类型""" - } - } - - desc_name { - desc { - en: """Bridge name.""" - zh: """桥接名字""" - } - label { - en: """Bridge Name""" - zh: """桥接名字""" - } - } +sql_template.label: +"""SQL Template""" } diff --git a/rel/i18n/emqx_ee_bridge_tdengine.hocon b/rel/i18n/emqx_ee_bridge_tdengine.hocon index 12eb6f062..e6ece89c8 100644 --- a/rel/i18n/emqx_ee_bridge_tdengine.hocon +++ b/rel/i18n/emqx_ee_bridge_tdengine.hocon @@ -1,72 +1,43 @@ emqx_ee_bridge_tdengine { - local_topic { - desc { - en: """The MQTT topic filter to be forwarded to TDengine. All MQTT 'PUBLISH' messages with the topic +config_enable.desc: +"""Enable or disable this bridge""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for a TDengine bridge.""" + +desc_config.label: +"""TDengine Bridge Configuration""" + +desc_name.desc: +"""Bridge name.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to TDengine. All MQTT 'PUBLISH' messages with the topic matching the local_topic will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded.""" - zh: """发送到 'local_topic' 的消息都会转发到 TDengine。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" - } - label { - en: "Local Topic" - zh: "本地 Topic" - } - } - sql_template { - desc { - en: """SQL Template""" - zh: """SQL 模板""" - } - label { - en: "SQL Template" - zh: "SQL 模板" - } - } - config_enable { - desc { - en: """Enable or disable this bridge""" - zh: """启用/禁用桥接""" - } - label { - en: "Enable Or Disable Bridge" - zh: "启用/禁用桥接" - } - } +local_topic.label: +"""Local Topic""" - desc_config { - desc { - en: """Configuration for a TDengine bridge.""" - zh: """TDengine 桥接配置""" - } - label: { - en: "TDengine Bridge Configuration" - zh: "TDengine 桥接配置" - } - } +sql_template.desc: +"""SQL Template""" - desc_type { - desc { - en: """The Bridge Type""" - zh: """Bridge 类型""" - } - label { - en: "Bridge Type" - zh: "桥接类型" - } - } +sql_template.label: +"""SQL Template""" - desc_name { - desc { - en: """Bridge name.""" - zh: """桥接名字""" - } - label { - en: "Bridge Name" - zh: "桥接名字" - } - } } diff --git a/rel/i18n/emqx_ee_connector_cassa.hocon b/rel/i18n/emqx_ee_connector_cassa.hocon index ecf004722..bd5fb544c 100644 --- a/rel/i18n/emqx_ee_connector_cassa.hocon +++ b/rel/i18n/emqx_ee_connector_cassa.hocon @@ -1,28 +1,17 @@ emqx_ee_connector_cassa { - servers { - desc { - en: """The IPv4 or IPv6 address or the hostname to connect to.
+keyspace.desc: +"""Keyspace name to connect to.""" + +keyspace.label: +"""Keyspace""" + +servers.desc: +"""The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port][,Host2:Port]`.
The Cassandra default port 9042 is used if `[:Port]` is not specified.""" - zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port][,Host2:Port]`。
-如果未指定 `[:Port]`,则使用 Cassandra 默认端口 9042。""" - } - label: { - en: "Servers" - zh: "Servers" - } - } - keyspace { - desc { - en: """Keyspace name to connect to.""" - zh: """要连接到的 Keyspace 名称。""" - } - label: { - en: "Keyspace" - zh: "Keyspace" - } - } +servers.label: +"""Servers""" + } diff --git a/rel/i18n/emqx_ee_connector_clickhouse.hocon b/rel/i18n/emqx_ee_connector_clickhouse.hocon index 4d30e1715..cebba5aef 100644 --- a/rel/i18n/emqx_ee_connector_clickhouse.hocon +++ b/rel/i18n/emqx_ee_connector_clickhouse.hocon @@ -1,25 +1,15 @@ emqx_ee_connector_clickhouse { - base_url { - desc { - en: """The HTTP URL to the Clickhouse server that you want to connect to (for example http://myhostname:8123)""" - zh: """你想连接到的Clickhouse服务器的HTTP URL(例如http://myhostname:8123)。""" - } - label: { - en: "Server URL" - zh: "服务器 URL" - } - } +base_url.desc: +"""The HTTP URL to the Clickhouse server that you want to connect to (for example http://myhostname:8123)""" - connect_timeout { - desc { - en: "The timeout when connecting to the Clickhouse server." - zh: "连接HTTP服务器的超时时间。" - } - label: { - en: "Clickhouse Timeout" - zh: "连接超时" - } - } +base_url.label: +"""Server URL""" + +connect_timeout.desc: +"""The timeout when connecting to the Clickhouse server.""" + +connect_timeout.label: +"""Clickhouse Timeout""" } diff --git a/rel/i18n/emqx_ee_connector_dynamo.hocon b/rel/i18n/emqx_ee_connector_dynamo.hocon index 295929a72..939efaeec 100644 --- a/rel/i18n/emqx_ee_connector_dynamo.hocon +++ b/rel/i18n/emqx_ee_connector_dynamo.hocon @@ -1,14 +1,9 @@ emqx_ee_connector_dynamo { - url { - desc { - en: """The url of DynamoDB endpoint.""" - zh: """DynamoDB 的地址。""" - } - label: { - en: "DynamoDB Endpoint" - zh: "DynamoDB 地址" - } - } +url.desc: +"""The url of DynamoDB endpoint.""" + +url.label: +"""DynamoDB Endpoint""" } diff --git a/rel/i18n/emqx_ee_connector_hstreamdb.hocon b/rel/i18n/emqx_ee_connector_hstreamdb.hocon index 0826c8f0c..f6838297f 100644 --- a/rel/i18n/emqx_ee_connector_hstreamdb.hocon +++ b/rel/i18n/emqx_ee_connector_hstreamdb.hocon @@ -1,74 +1,45 @@ emqx_ee_connector_hstreamdb { - config { - desc { - en: "HStreamDB connection config" - zh: "HStreamDB 连接配置。" - } - label: { - en: "Connection config" - zh: "连接配置" - } - } - type { - desc { - en: "The Connector Type." - zh: "连接器类型。" - } - label: { - en: "Connector Type" - zh: "连接器类型" - } - } +config.desc: +"""HStreamDB connection config""" + +config.label: +"""Connection config""" + +name.desc: +"""Connector name, used as a human-readable description of the connector.""" + +name.label: +"""Connector Name""" + +ordering_key.desc: +"""HStreamDB Ordering Key""" + +ordering_key.label: +"""HStreamDB Ordering Key""" + +pool_size.desc: +"""HStreamDB Pool Size""" + +pool_size.label: +"""HStreamDB Pool Size""" + +stream_name.desc: +"""HStreamDB Stream Name""" + +stream_name.label: +"""HStreamDB Stream Name""" + +type.desc: +"""The Connector Type.""" + +type.label: +"""Connector Type""" + +url.desc: +"""HStreamDB Server URL""" + +url.label: +"""HStreamDB Server URL""" - name { - desc { - en: "Connector name, used as a human-readable description of the connector." - zh: "连接器名称,人类可读的连接器描述。" - } - label: { - en: "Connector Name" - zh: "连接器名称" - } - } - url { - desc { - en: """HStreamDB Server URL""" - zh: """HStreamDB 服务器 URL""" - } - label { - en: """HStreamDB Server URL""" - zh: """HStreamDB 服务器 URL""" - } - } - stream_name { - desc { - en: """HStreamDB Stream Name""" - zh: """HStreamDB 流名称""" - } - label { - en: """HStreamDB Stream Name""" - zh: """HStreamDB 流名称""" - } - } - ordering_key { - desc { - en: """HStreamDB Ordering Key""" - zh: """HStreamDB 分区键""" - } - label { - en: """HStreamDB Ordering Key""" - zh: """HStreamDB 分区键""" - } - } - pool_size { - desc { - en: """HStreamDB Pool Size""" - zh: """HStreamDB 连接池大小""" - } - label { - en: """HStreamDB Pool Size""" - zh: """HStreamDB 连接池大小""" - } - } } diff --git a/rel/i18n/emqx_ee_connector_influxdb.hocon b/rel/i18n/emqx_ee_connector_influxdb.hocon index 18ff48109..9c3b143a2 100644 --- a/rel/i18n/emqx_ee_connector_influxdb.hocon +++ b/rel/i18n/emqx_ee_connector_influxdb.hocon @@ -1,118 +1,71 @@ emqx_ee_connector_influxdb { - server { - desc { - en: """The IPv4 or IPv6 address or the hostname to connect to.
+bucket.desc: +"""InfluxDB bucket name.""" + +bucket.label: +"""Bucket""" + +database.desc: +"""InfluxDB database.""" + +database.label: +"""Database""" + +influxdb_api_v1.desc: +"""InfluxDB's protocol. Support InfluxDB v1.8 and before.""" + +influxdb_api_v1.label: +"""HTTP API Protocol""" + +influxdb_api_v2.desc: +"""InfluxDB's protocol. Support InfluxDB v2.0 and after.""" + +influxdb_api_v2.label: +"""HTTP API V2 Protocol""" + +org.desc: +"""Organization name of InfluxDB.""" + +org.label: +"""Organization""" + +password.desc: +"""InfluxDB password.""" + +password.label: +"""Password""" + +precision.desc: +"""InfluxDB time precision.""" + +precision.label: +"""Time Precision""" + +protocol.desc: +"""InfluxDB's protocol. HTTP API or HTTP API V2.""" + +protocol.label: +"""Protocol""" + +server.desc: +"""The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
The InfluxDB default port 8086 is used if `[:Port]` is not specified.""" - zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 InfluxDB 默认端口 8086。""" - } - label { - en: "Server Host" - zh: "服务器地址" - } - } - precision { - desc { - en: """InfluxDB time precision.""" - zh: """InfluxDB 时间精度。""" - } - label { - en: """Time Precision""" - zh: """时间精度""" - } - } - protocol { - desc { - en: """InfluxDB's protocol. HTTP API or HTTP API V2.""" - zh: """InfluxDB 协议。HTTP API 或 HTTP API V2。""" - } - label { - en: """Protocol""" - zh: """协议""" - } - } - influxdb_api_v1 { - desc { - en: """InfluxDB's protocol. Support InfluxDB v1.8 and before.""" - zh: """InfluxDB HTTP API 协议。支持 Influxdb v1.8 以及之前的版本。""" - } - label { - en: """HTTP API Protocol""" - zh: """HTTP API 协议""" - } - } - influxdb_api_v2 { - desc { - en: """InfluxDB's protocol. Support InfluxDB v2.0 and after.""" - zh: """InfluxDB HTTP API V2 协议。支持 Influxdb v2.0 以及之后的版本。""" - } - label { - en: """HTTP API V2 Protocol""" - zh: """HTTP API V2 协议""" - } - } - database { - desc { - en: """InfluxDB database.""" - zh: """InfluxDB 数据库。""" - } - label { - en: "Database" - zh: "数据库" - } - } - username { - desc { - en: "InfluxDB username." - zh: "InfluxDB 用户名。" - } - label { - en: "Username" - zh: "用户名" - } - } - password { - desc { - en: "InfluxDB password." - zh: "InfluxDB 密码。" - } - label { - en: "Password" - zh: "密码" - } - } - bucket { - desc { - en: "InfluxDB bucket name." - zh: "InfluxDB bucket 名称。" - } - label { - en: "Bucket" - zh: "Bucket" - } - } - org { - desc { - en: """Organization name of InfluxDB.""" - zh: """InfluxDB 组织名称。""" - } - label { - en: """Organization""" - zh: """组织""" - } - } - token { - desc { - en: """InfluxDB token.""" - zh: """InfluxDB token。""" - } - label { - en: """Token""" - zh: """Token""" - } - } + +server.label: +"""Server Host""" + +token.desc: +"""InfluxDB token.""" + +token.label: +"""Token""" + +username.desc: +"""InfluxDB username.""" + +username.label: +"""Username""" } diff --git a/rel/i18n/emqx_ee_connector_rocketmq.hocon b/rel/i18n/emqx_ee_connector_rocketmq.hocon index 44dda7931..672dcafce 100644 --- a/rel/i18n/emqx_ee_connector_rocketmq.hocon +++ b/rel/i18n/emqx_ee_connector_rocketmq.hocon @@ -1,62 +1,35 @@ emqx_ee_connector_rocketmq { - server { - desc { - en: """The IPv4 or IPv6 address or the hostname to connect to.
+refresh_interval.desc: +"""RocketMQ Topic Route Refresh Interval.""" + +refresh_interval.label: +"""Topic Route Refresh Interval""" + +security_token.desc: +"""RocketMQ Server Security Token""" + +security_token.label: +"""Security Token""" + +send_buffer.desc: +"""The socket send buffer size of the RocketMQ driver client.""" + +send_buffer.label: +"""Send Buffer Size""" + +server.desc: +"""The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
The RocketMQ default port 9876 is used if `[:Port]` is not specified.""" - zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 RocketMQ 默认端口 9876。""" - } - label: { - en: "Server Host" - zh: "服务器地址" - } - } - topic { - desc { - en: """RocketMQ Topic""" - zh: """RocketMQ 主题""" - } - label: { - en: "RocketMQ Topic" - zh: "RocketMQ 主题" - } - } +server.label: +"""Server Host""" - refresh_interval { - desc { - en: """RocketMQ Topic Route Refresh Interval.""" - zh: """RocketMQ 主题路由更新间隔。""" - } - label: { - en: "Topic Route Refresh Interval" - zh: "主题路由更新间隔" - } - } +topic.desc: +"""RocketMQ Topic""" - send_buffer { - desc { - en: """The socket send buffer size of the RocketMQ driver client.""" - zh: """RocketMQ 驱动的套字节发送消息的缓冲区大小""" - } - label: { - en: "Send Buffer Size" - zh: "发送消息的缓冲区大小" - } - } - - security_token { - desc { - en: """RocketMQ Server Security Token""" - zh: """RocketMQ 服务器安全令牌""" - } - label: { - en: "Security Token" - zh: "安全令牌" - } - } +topic.label: +"""RocketMQ Topic""" } diff --git a/rel/i18n/emqx_ee_connector_sqlserver.hocon b/rel/i18n/emqx_ee_connector_sqlserver.hocon index 85280d833..ef68865fe 100644 --- a/rel/i18n/emqx_ee_connector_sqlserver.hocon +++ b/rel/i18n/emqx_ee_connector_sqlserver.hocon @@ -1,18 +1,11 @@ emqx_ee_connector_sqlserver { - server { - desc { - en: """The IPv4 or IPv6 address or the hostname to connect to.
+server.desc: +"""The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
The SQL Server default port 1433 is used if `[:Port]` is not specified.""" - zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 SQL Server 默认端口 1433。""" - } - label: { - en: "Server Host" - zh: "服务器地址" - } - } + +server.label: +"""Server Host""" } diff --git a/rel/i18n/emqx_ee_connector_tdengine.hocon b/rel/i18n/emqx_ee_connector_tdengine.hocon index 02254124c..9a34b32ce 100644 --- a/rel/i18n/emqx_ee_connector_tdengine.hocon +++ b/rel/i18n/emqx_ee_connector_tdengine.hocon @@ -1,18 +1,11 @@ emqx_ee_connector_tdengine { - server { - desc { - en: """The IPv4 or IPv6 address or the hostname to connect to.
+server.desc: +"""The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
The TDengine default port 6041 is used if `[:Port]` is not specified.""" - zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 TDengine 默认端口 6041。""" - } - label: { - en: "Server Host" - zh: "服务器地址" - } - } + +server.label: +"""Server Host""" } diff --git a/rel/i18n/emqx_ee_schema_registry_http_api.hocon b/rel/i18n/emqx_ee_schema_registry_http_api.hocon index 058796a66..09f268459 100644 --- a/rel/i18n/emqx_ee_schema_registry_http_api.hocon +++ b/rel/i18n/emqx_ee_schema_registry_http_api.hocon @@ -1,69 +1,39 @@ emqx_ee_schema_registry_http_api { - # apis - desc_schema_registry_api_list { - desc { - en: "List all registered schemas" - zh: "列出所有注册的模式" - } - label { - en: "List schemas" - zh: "列表模式" - } - } - desc_schema_registry_api_get { - desc { - en: "Get a schema by its name" - zh: "通过名称获取模式" - } - label { - en: "Get schema" - zh: "获取模式" - } - } +desc_param_path_schema_name.desc: +"""The schema name""" - desc_schema_registry_api_post { - desc { - en: "Register a new schema" - zh: "注册一个新的模式" - } - label { - en: "Register schema" - zh: "注册模式" - } - } +desc_param_path_schema_name.label: +"""Schema name""" - desc_schema_registry_api_put { - desc { - en: "Update an existing schema" - zh: "更新一个现有的模式" - } - label { - en: "Update schema" - zh: "更新模式" - } - } +desc_schema_registry_api_delete.desc: +"""Delete a schema""" - desc_schema_registry_api_delete { - desc { - en: "Delete a schema" - zh: "删除一个模式" - } - label { - en: "Delete schema" - zh: "删除模式" - } - } +desc_schema_registry_api_delete.label: +"""Delete schema""" + +desc_schema_registry_api_get.desc: +"""Get a schema by its name""" + +desc_schema_registry_api_get.label: +"""Get schema""" + +desc_schema_registry_api_list.desc: +"""List all registered schemas""" + +desc_schema_registry_api_list.label: +"""List schemas""" + +desc_schema_registry_api_post.desc: +"""Register a new schema""" + +desc_schema_registry_api_post.label: +"""Register schema""" + +desc_schema_registry_api_put.desc: +"""Update an existing schema""" + +desc_schema_registry_api_put.label: +"""Update schema""" - # params - desc_param_path_schema_name { - desc { - en: "The schema name" - zh: "模式名称" - } - label { - en: "Schema name" - zh: "模式名称" - } - } } diff --git a/rel/i18n/emqx_ee_schema_registry_schema.hocon b/rel/i18n/emqx_ee_schema_registry_schema.hocon index 1538fe5f9..667c4c0a4 100644 --- a/rel/i18n/emqx_ee_schema_registry_schema.hocon +++ b/rel/i18n/emqx_ee_schema_registry_schema.hocon @@ -1,78 +1,45 @@ emqx_ee_schema_registry_schema { - schema_registry_root { - desc { - en: "Schema registry configurations." - zh: "模式注册表的配置。" - } - label { - en: "Schema registry" - zh: "模式注册表" - } - } - schema_registry_schemas { - desc { - en: "Registered schemas." - zh: "注册的模式。" - } - label { - en: "Registered schemas" - zh: "注册的模式" - } - } +avro_type.desc: +"""[Apache Avro](https://avro.apache.org/) serialization format.""" - schema_name { - desc { - en: "A name for the schema that will serve as its identifier." - zh: "模式的一个名称,将作为其标识符。" - } - label { - en: "Schema name" - zh: "模式名称" - } - } +avro_type.label: +"""Apache Avro""" - schema_type { - desc { - en: "Schema type." - zh: "模式类型。" - } - label { - en: "Schema type" - zh: "模式类型" - } - } +schema_description.desc: +"""A description for this schema.""" - schema_source { - desc { - en: "Source text for the schema." - zh: "模式的源文本。" - } - label { - en: "Schema source" - zh: "模式来源" - } - } +schema_description.label: +"""Schema description""" - schema_description { - desc { - en: "A description for this schema." - zh: "对该模式的描述。" - } - label { - en: "Schema description" - zh: "模式描述" - } - } +schema_name.desc: +"""A name for the schema that will serve as its identifier.""" + +schema_name.label: +"""Schema name""" + +schema_registry_root.desc: +"""Schema registry configurations.""" + +schema_registry_root.label: +"""Schema registry""" + +schema_registry_schemas.desc: +"""Registered schemas.""" + +schema_registry_schemas.label: +"""Registered schemas""" + +schema_source.desc: +"""Source text for the schema.""" + +schema_source.label: +"""Schema source""" + +schema_type.desc: +"""Schema type.""" + +schema_type.label: +"""Schema type""" - avro_type { - desc { - en: "[Apache Avro](https://avro.apache.org/) serialization format." - zh: "[阿帕奇-阿夫罗](https://avro.apache.org/) 序列化格式。" - } - label { - en: "Apache Avro" - zh: "阿帕奇-阿夫罗" - } - } } diff --git a/rel/i18n/emqx_exhook_api.hocon b/rel/i18n/emqx_exhook_api.hocon index 3ec5367ed..9cb7177c1 100644 --- a/rel/i18n/emqx_exhook_api.hocon +++ b/rel/i18n/emqx_exhook_api.hocon @@ -1,180 +1,81 @@ emqx_exhook_api { - list_all_servers { - desc { - en: "List all servers" - zh: "查看ExHook 服务器列表" - } - } +add_server.desc: +"""Add a server""" - add_server { - desc { - en: "Add a server" - zh: "添加 ExHook 服务器" - } - } +delete_server.desc: +"""Delete the server""" - get_detail { - desc { - en: "Get the detail information of Exhook server" - zh: "查看 Exhook 服务器详细信息" - } - } +get_detail.desc: +"""Get the detail information of Exhook server""" - update_server { - desc { - en: "Update the server" - zh: "更新 Exhook 服务器" - } - } +get_hooks.desc: +"""Get the hooks information of server""" - delete_server { - desc { - en: "Delete the server" - zh: "删除 Exhook 服务器" - } - } +hook_metrics.desc: +"""Metrics information of this hook in the current node""" - get_hooks { - desc { - en: "Get the hooks information of server" - zh: "获取 Exhook 服务器的钩子信息" - } - } +hook_name.desc: +"""The hook's name""" - move_api { - desc { - en: """Move the server. -NOTE: The position should be \"front | rear | before:{name} | after:{name}""" - zh: """移动 Exhook 服务器顺序。 -注意: 移动的参数只能是:front | rear | before:{name} | after:{name}""" - } - label { - en: "Change order of execution for registered Exhook server" - zh: "改变已注册的Exhook服务器的执行顺序" - } - } +hook_params.desc: +"""The parameters used when the hook is registered""" - move_position { - desc { - en: "The target position to be moved" - zh: "移动的方向" - } - } +list_all_servers.desc: +"""List all servers""" - hook_name { - desc { - en: "The hook's name" - zh: "钩子的名称" - } - } +metric_failed.desc: +"""The number of times the hook execution failed""" - server_name { - desc { - en: "The Exhook server name" - zh: "Exhook 服务器的名称" - } - } +metric_max_rate.desc: +"""Maximum call rate of hooks""" - hook_params { - desc { - en: "The parameters used when the hook is registered" - zh: "钩子注册时使用的参数" - } - } +metric_rate.desc: +"""The call rate of hooks""" - server_metrics { - desc { - en: "Metrics information of this server in the current node" - zh: "当前节点中该服务器的指标信息" - } - } +metric_succeed.desc: +"""The number of times the hooks execution successful""" - node_metrics { - desc { - en: "Metrics information of this server in all nodes" - zh: "所有节点中该服务器的指标信息" - } - } +metrics.desc: +"""Metrics information""" - node_status { - desc { - en: "status of this server in all nodes" - zh: "所有节点中该服务器的状态信息" - } - } +move_api.desc: +"""Move the server. +NOTE: The position should be "front | rear | before:{name} | after:{name}""" - hook_metrics { - desc { - en: "Metrics information of this hook in the current node" - zh: "当前节点中该钩子的指标信息" - } - } +move_api.label: +"""Change order of execution for registered Exhook server""" - node_hook_metrics { - desc { - en: "Metrics information of this hook in all nodes" - zh: "所有节点中该钩子的指标信息" - } - } +move_position.desc: +"""The target position to be moved""" - node { - desc { - en: "Node name" - zh: "节点名称" - } - } +node.desc: +"""Node name""" - metrics { - desc { - en: "Metrics information" - zh: "指标信息" - } - } +node_hook_metrics.desc: +"""Metrics information of this hook in all nodes""" - status { - desc { - en: """The status of Exhook server.
+node_metrics.desc: +"""Metrics information of this server in all nodes""" + +node_status.desc: +"""status of this server in all nodes""" + +server_metrics.desc: +"""Metrics information of this server in the current node""" + +server_name.desc: +"""The Exhook server name""" + +status.desc: +"""The status of Exhook server.
connected: connection succeeded
connecting: connection failed, reconnecting
disconnected: failed to connect and didn't reconnect
disabled: this server is disabled
error: failed to view the status of this server""" - zh: """Exhook 服务器的状态。
-connected: 连接成功
-connecting: 连接失败,重连中
-disconnected: 连接失败,且未设置自动重连
-disabled: 该服务器未开启
-error: 查看该服务器状态时发生错误""" - } - } - metric_succeed { - desc { - en: "The number of times the hooks execution successful" - zh: "钩子执行成功的次数" - } - } - - metric_failed { - desc { - en: "The number of times the hook execution failed" - zh: "钩子执行失败的次数" - } - } - - metric_rate { - desc { - en: "The call rate of hooks" - zh: "钩子的调用速率" - } - } - - metric_max_rate { - desc { - en: "Maximum call rate of hooks" - zh: "钩子的最大调用速率" - } - } +update_server.desc: +"""Update the server""" } diff --git a/rel/i18n/emqx_exhook_schema.hocon b/rel/i18n/emqx_exhook_schema.hocon index 5b34a245a..6d262fae7 100644 --- a/rel/i18n/emqx_exhook_schema.hocon +++ b/rel/i18n/emqx_exhook_schema.hocon @@ -1,97 +1,45 @@ emqx_exhook_schema { - servers { - desc { - en: "List of exhook servers" - zh: "ExHook 服务器列表" - } - } - - name { - desc { - en: "Name of the exhook server" - zh: "ExHook 服务器名称" - } - } - - enable { - desc { - en: "Enable this Exhook server" - zh: "开启这个 Exhook 服务器" - } - } - - url { - desc { - en: "URL of the gRPC server" - zh: "gRPC 服务器地址" - } - } - - request_timeout { - desc { - en: "The timeout of request gRPC server" - zh: "gRPC 服务器请求超时时间" - } - } - - failed_action { - desc { - en: "The value that is returned when the request to the gRPC server fails for any reason" - zh: "当 gRPC 请求失败后的操作" - } - } - - auto_reconnect { - desc { - en: """Whether to automatically reconnect (initialize) the gRPC server. +auto_reconnect.desc: +"""Whether to automatically reconnect (initialize) the gRPC server. When gRPC is not available, Exhook tries to request the gRPC service at that interval and reinitialize the list of mounted hooks.""" - zh: """自动重连到 gRPC 服务器的设置。 -当 gRPC 服务器不可用时,Exhook 将会按照这里设置的间隔时间进行重连,并重新初始化注册的钩子""" - } - } - pool_size { - desc { - en: "The process pool size for gRPC client" - zh: "gRPC 客户端进程池大小" - } - } +enable.desc: +"""Enable this Exhook server""" - socket_options { - desc { - en: "Connection socket options" - zh: "连接套接字设置" - } - } +failed_action.desc: +"""The value that is returned when the request to the gRPC server fails for any reason""" - keepalive { - desc { - en: """Enables/disables periodic transmission on a connected socket when no other data is exchanged. +keepalive.desc: +"""Enables/disables periodic transmission on a connected socket when no other data is exchanged. If the other end does not respond, the connection is considered broken and an error message is sent to the controlling process.""" - zh: """当没有其他数据交换时,是否向连接的对端套接字定期的发送探测包。如果另一端没有响应,则认为连接断开,并向控制进程发送错误消息""" - } - } - nodelay { - desc { - en: """If true, option TCP_NODELAY is turned on for the socket, +name.desc: +"""Name of the exhook server""" + +nodelay.desc: +"""If true, option TCP_NODELAY is turned on for the socket, which means that also small amounts of data are sent immediately""" - zh: "如果为 true,则为套接字设置 TCP_NODELAY 选项,这意味着会立即发送数据包" - } - } - recbuf { - desc { - en: "The minimum size of receive buffer to use for the socket" - zh: "套接字的最小接收缓冲区大小" - } - } +pool_size.desc: +"""The process pool size for gRPC client""" + +recbuf.desc: +"""The minimum size of receive buffer to use for the socket""" + +request_timeout.desc: +"""The timeout of request gRPC server""" + +servers.desc: +"""List of exhook servers""" + +sndbuf.desc: +"""The minimum size of send buffer to use for the socket""" + +socket_options.desc: +"""Connection socket options""" + +url.desc: +"""URL of the gRPC server""" - sndbuf { - desc { - en: "The minimum size of send buffer to use for the socket" - zh: "套接字的最小发送缓冲区大小" - } - } } diff --git a/rel/i18n/emqx_exproto_schema.hocon b/rel/i18n/emqx_exproto_schema.hocon index 0c6fd2286..eed450208 100644 --- a/rel/i18n/emqx_exproto_schema.hocon +++ b/rel/i18n/emqx_exproto_schema.hocon @@ -1,52 +1,24 @@ emqx_exproto_schema { - exproto { - desc { - en: """The Extension Protocol configuration""" - zh: """ExProto 网关""" - } - } - exproto_server { - desc { - en: """Configurations for starting the ConnectionAdapter service""" - zh: """配置 ExProto 网关需要启动的 ConnectionAdapter 服务。 -该服务用于提供客户端的认证、发布、订阅和数据下行等功能。""" - } - } +exproto.desc: +"""The Extension Protocol configuration""" - exproto_grpc_server_bind { - desc { - en: """Listening address and port for the gRPC server.""" - zh: """服务监听地址和端口。""" - } - } +exproto_grpc_handler_address.desc: +"""gRPC server address.""" - exproto_grpc_server_ssl { - desc { - en: """SSL configuration for the gRPC server.""" - zh: """服务 SSL 配置。""" - } - } +exproto_grpc_handler_ssl.desc: +"""SSL configuration for the gRPC client.""" - exproto_handler { - desc { - en: """Configurations for request to ConnectionHandler service""" - zh: """配置 ExProto 网关需要请求的 ConnectionHandler 服务地址。 -该服务用于给 ExProto 提供客户端的 Socket 事件处理、字节解码、订阅消息接收等功能。""" - } - } +exproto_grpc_server_bind.desc: +"""Listening address and port for the gRPC server.""" - exproto_grpc_handler_address { - desc { - en: """gRPC server address.""" - zh: """对端 gRPC 服务器地址。""" - } - } +exproto_grpc_server_ssl.desc: +"""SSL configuration for the gRPC server.""" + +exproto_handler.desc: +"""Configurations for request to ConnectionHandler service""" + +exproto_server.desc: +"""Configurations for starting the ConnectionAdapter service""" - exproto_grpc_handler_ssl { - desc { - en: """SSL configuration for the gRPC client.""" - zh: """gRPC 客户端的 SSL 配置。""" - } - } } diff --git a/rel/i18n/emqx_gateway_api.hocon b/rel/i18n/emqx_gateway_api.hocon index 18ab1f242..1e0e22456 100644 --- a/rel/i18n/emqx_gateway_api.hocon +++ b/rel/i18n/emqx_gateway_api.hocon @@ -1,166 +1,73 @@ emqx_gateway_api { - list_gateway { - desc { - en: """This API returns an overview info for the specified or all gateways. -including current running status, number of connections, listener status, etc.""" - zh: """该接口会返回指定或所有网关的概览状态, -包括当前状态、连接数、监听器状态等。""" - } - } +delete_gateway.desc: +"""Unload the specified gateway""" - enable_gateway { - desc { - en: """Enable a gateway by confs.""" - zh: """使用配置启动某一网关。""" - } - } +enable_gateway.desc: +"""Enable a gateway by confs.""" - get_gateway { - desc { - en: """Get the gateway configurations""" - zh: """获取网关配置详情""" - } - } +gateway_created_at.desc: +"""The Gateway created datetime""" - delete_gateway { - desc { - en: """Unload the specified gateway""" - zh: """停用指定网关""" - } - } +gateway_current_connections.desc: +"""The Gateway current connected connections/clients""" - update_gateway { - desc { - en: """Update the gateway basic configurations and running status.
-Note: The Authentication and Listener configurations should be updated by other special APIs.""" - zh: """更新指定网关的基础配置、和启用的状态。
-注:认证、和监听器的配置更新需参考对应的 API 接口。""" - } - } +gateway_enable_in_path.desc: +"""Whether to enable this gateway""" - gateway_name { - desc { - en: """Gateway Name""" - zh: """网关名称""" - } - } +gateway_listener_id.desc: +"""Listener ID""" - gateway_name_in_qs { - desc { - en: """Gateway Name.
+gateway_listener_name.desc: +"""Listener Name""" + +gateway_listener_running.desc: +"""Listener Running status""" + +gateway_listener_type.desc: +"""Listener Type""" + +gateway_listeners.desc: +"""The Gateway listeners overview""" + +gateway_max_connections.desc: +"""The Gateway allowed maximum connections/clients""" + +gateway_name.desc: +"""Gateway Name""" + +gateway_name_in_qs.desc: +"""Gateway Name.
It's enum with `stomp`, `mqttsn`, `coap`, `lwm2m`, `exproto`""" - zh: """网关名称.
-可取值为 `stomp`、`mqttsn`、`coap`、`lwm2m`、`exproto`""" - } - } - gateway_enable_in_path { - desc { - en: """Whether to enable this gateway""" +gateway_node_status.desc: +"""The status of the gateway on each node in the cluster""" - zh: """是否开启此网关""" - } - } +gateway_started_at.desc: +"""The Gateway started datetime""" - gateway_status { - desc { - en: """Gateway status""" - zh: """网关启用状态""" - } - } +gateway_status.desc: +"""Gateway status""" - gateway_status_in_qs { - desc { - en: """Filter gateways by status.
+gateway_status_in_qs.desc: +"""Filter gateways by status.
It is enum with `running`, `stopped`, `unloaded`""" - zh: """通过网关状态筛选
-可选值为 `running`、`stopped`、`unloaded`""" - } - } - gateway_created_at { - desc { - en: """The Gateway created datetime""" - zh: """网关创建时间""" - } - } +gateway_stopped_at.desc: +"""The Gateway stopped datetime""" - gateway_started_at { - desc { - en: """The Gateway started datetime""" - zh: """网关启用时间""" - } - } +get_gateway.desc: +"""Get the gateway configurations""" - gateway_stopped_at { - desc { - en: """The Gateway stopped datetime""" - zh: """网关停用时间""" - } - } +list_gateway.desc: +"""This API returns an overview info for the specified or all gateways. +including current running status, number of connections, listener status, etc.""" - gateway_max_connections { - desc { - en: """The Gateway allowed maximum connections/clients""" - zh: """最大连接数""" - } - } +node.desc: +"""Node Name""" - gateway_current_connections { - desc { - en: """The Gateway current connected connections/clients""" - zh: """当前连接数""" - } - } - - gateway_listeners { - desc { - en: """The Gateway listeners overview""" - zh: """网关监听器列表""" - } - } - - gateway_listener_id { - desc { - en: """Listener ID""" - zh: """监听器 ID""" - } - } - - gateway_listener_name { - desc { - en: """Listener Name""" - zh: """监听器名称""" - } - } - - gateway_listener_running { - desc { - en: """Listener Running status""" - zh: """监听器运行状态""" - } - } - - gateway_listener_type { - desc { - en: """Listener Type""" - zh: """监听器类型""" - } - } - - gateway_node_status { - desc { - en: """The status of the gateway on each node in the cluster""" - zh: """网关在集群中每个节点上的状态""" - } - } - - node { - desc { - en: """Node Name""" - zh: """节点名称""" - } - } +update_gateway.desc: +"""Update the gateway basic configurations and running status.
+Note: The Authentication and Listener configurations should be updated by other special APIs.""" } diff --git a/rel/i18n/emqx_gateway_api_authn.hocon b/rel/i18n/emqx_gateway_api_authn.hocon index a9ae33f0c..2d84eef54 100644 --- a/rel/i18n/emqx_gateway_api_authn.hocon +++ b/rel/i18n/emqx_gateway_api_authn.hocon @@ -1,99 +1,45 @@ emqx_gateway_api_authn { - get_authn { - desc { - en: """Gets the configuration of the specified gateway authenticator.
-Returns 404 when gateway or authentication is not enabled.""" - zh: """获取指定网关认证器的配置 -当网关或认证未启用时,返回 404。""" - } - } - - update_authn { - desc { - en: """Update the configuration of the specified gateway authenticator, or disable the authenticator.""" - zh: """更新指定网关认证器的配置,或停用认证器。""" - } - } - - add_authn { - desc { - en: """Enables the authenticator for client authentication for the specified gateway.
+add_authn.desc: +"""Enables the authenticator for client authentication for the specified gateway.
When the authenticator is not configured or turned off, all client connections are assumed to be allowed.
Note: Only one authenticator is allowed to be enabled at a time in the gateway, rather than allowing multiple authenticators to be configured to form an authentication chain as in MQTT.""" - zh: """为指定网关开启认证器实现客户端认证的功能。
-当未配置认证器或关闭认证器时,则认为允许所有客户端的连接。
-注:在网关中仅支持添加一个认证器,而不是像 MQTT 一样允许配置多个认证器构成认证链。""" - } - } - delete_authn { - desc { - en: """Delete the authenticator of the specified gateway.""" - zh: """删除指定网关的认证器。""" - } - } +add_user.desc: +"""Add user for the authenticator (only supports built_in_database).""" - list_users { - desc { - en: """Get the users for the authenticator (only supported by built_in_database).""" - zh: """获取用户列表(仅支持 built_in_database 类型的认证器)""" - } - } +delete_authn.desc: +"""Delete the authenticator of the specified gateway.""" - add_user { - desc { - en: """Add user for the authenticator (only supports built_in_database).""" - zh: """添加用户(仅支持 built_in_database 类型的认证器)""" - } - } +delete_user.desc: +"""Delete the user for the gateway authenticator (only supports built_in_database)""" - get_user { - desc { - en: """Get user info from the gateway authenticator (only supports built_in_database)""" - zh: """获取用户信息(仅支持 built_in_database 类型的认证器)""" - } - } +get_authn.desc: +"""Gets the configuration of the specified gateway authenticator.
+Returns 404 when gateway or authentication is not enabled.""" - update_user { - desc { - en: """Update the user info for the gateway authenticator (only supports built_in_database)""" - zh: """更新用户信息(仅支持 built_in_database 类型的认证器)""" - } - } +get_user.desc: +"""Get user info from the gateway authenticator (only supports built_in_database)""" - delete_user { - desc { - en: """Delete the user for the gateway authenticator (only supports built_in_database)""" - zh: """删除用户(仅支持 built_in_database 类型的认证器)""" - } - } +import_users.desc: +"""Import users into the gateway authenticator (only supports built_in_database)""" - import_users { - desc { - en: """Import users into the gateway authenticator (only supports built_in_database)""" - zh: """导入用户(仅支持 built_in_database 类型的认证器)""" - } - } +is_superuser.desc: +"""Is superuser""" - user_id { - desc { - en: """User ID""" - zh: """用户 ID""" - } - } +like_user_id.desc: +"""Fuzzy search using user ID (username or clientid), only supports search by substring.""" - like_user_id { - desc { - en: """Fuzzy search using user ID (username or clientid), only supports search by substring.""" - zh: """使用用户 ID (username 或 clientid)模糊搜索,仅支持按子串的方式进行搜索。""" - } - } +list_users.desc: +"""Get the users for the authenticator (only supported by built_in_database).""" + +update_authn.desc: +"""Update the configuration of the specified gateway authenticator, or disable the authenticator.""" + +update_user.desc: +"""Update the user info for the gateway authenticator (only supports built_in_database)""" + +user_id.desc: +"""User ID""" - is_superuser { - desc { - en: """Is superuser""" - zh: """是否是超级用户""" - } - } } diff --git a/rel/i18n/emqx_gateway_api_clients.hocon b/rel/i18n/emqx_gateway_api_clients.hocon index 1e6f575c3..4c95318ab 100644 --- a/rel/i18n/emqx_gateway_api_clients.hocon +++ b/rel/i18n/emqx_gateway_api_clients.hocon @@ -1,478 +1,207 @@ emqx_gateway_api_clients { - list_clients { - desc { - en: """Get the gateway client list""" - zh: """获取指定网关的客户端列表""" - } - } - - get_client { - desc { - en: """Get the gateway client information""" - zh: """获取客户端信息""" - } - } - - kick_client { - desc { - en: """Kick out the gateway client""" - zh: """踢出指定客户端""" - } - } - - list_subscriptions { - desc { - en: """Get the gateway client subscriptions""" - zh: """获取某客户端的主题订阅列表""" - } - } - - add_subscription { - desc { - en: """Create a subscription membership""" - zh: """为某客户端新增订阅关系""" - } - } - - delete_subscription { - desc { - en: """Delete a subscriptions membership""" - zh: """为某客户端删除某订阅关系""" - } - } - - param_node { - desc { - en: """Match the client's node name""" - zh: """匹配客户端的节点名称""" - } - } - - param_clientid { - desc { - en: """Match the client's ID""" - zh: """匹配客户端 ID""" - } - } - - param_username { - desc { - en: """Match the client's Username""" - zh: """匹配客户端 Username""" - } - } - - param_ip_address { - desc { - en: """Match the client's ip address""" - zh: """匹配客户端 IP 地址""" - } - } - - param_conn_state { - desc { - en: """Match the client's connection state""" - zh: """匹配客户端连接状态""" - } - } - - param_proto_ver { - desc { - en: """Match the client's protocol version""" - zh: """匹配客户端协议版本""" - } - } - - param_clean_start { - desc { - en: """Match the client's clean start flag""" - zh: """匹配客户端 `clean_start` 标记""" - } - } - - param_like_clientid { - desc { - en: """Use sub-string to match client's ID""" - zh: """子串匹配客户端 ID""" - } - } - - param_like_username { - desc { - en: """Use sub-string to match client's username""" - zh: """子串匹配 客户端 Username""" - } - } - - param_gte_created_at { - desc { - en: """Match the session created datetime greater than a certain value""" - zh: """匹配会话创建时间大于等于指定值的客户端""" - } - } - - param_lte_created_at { - desc { - en: """Match the session created datetime less than a certain value""" - zh: """匹配会话创建时间小于等于指定值的客户端""" - } - } - - param_gte_connected_at{ - desc { - en: """Match the client socket connected datetime greater than a certain value""" - zh: """匹配连接创建时间大于等于指定值的客户端""" - } - } - - param_lte_connected_at { - desc { - en: """Match the client socket connected datatime less than a certain value""" - zh: """匹配连接创建时间小于等于指定值的客户端""" - } - } - - param_endpoint_name { - desc { - en: """Match the lwm2m client's endpoint name""" - zh: """匹配 LwM2M 客户端 Endpoint Name""" - } - } - - param_like_endpoint_name { - desc { - en: """Use sub-string to match lwm2m client's endpoint name""" - zh: """子串匹配 LwM2M 客户端 Endpoint Name""" - } - } - - param_gte_lifetime { - desc { - en: """Match the lwm2m client registered lifetime greater than a certain value""" - zh: """匹配心跳时间大于等于指定值的 LwM2M 客户端""" - } - } - - param_lte_lifetime { - desc { - en: """Match the lwm2m client registered lifetime less than a certain value""" - zh: """匹配心跳时间小于等于指定值的 LwM2M 客户端""" - } - } - - clientid { - desc { - en: """Client ID""" - zh: """客户端 ID""" - } - } - - topic { - desc { - en: """Topic Filter/Name""" - zh: """主题过滤器或主题名称""" - } - } - - endpoint_name { - desc { - en: """The LwM2M client endpoint name""" - zh: """LwM2M 客户端 Endpoint Name""" - } - } - - lifetime { - desc { - en: """LwM2M Life time""" - zh: """LwM2M 客户端心跳周期""" - } - } - - qos { - desc { - en: """QoS level, enum: 0, 1, 2""" - zh: """QoS 等级,枚举:0,1,2""" - } - } - - nl { - desc { - en: """No Local option, enum: 0, 1""" - zh: """No Local 选项,枚举:0,1""" - } - } - - rap { - desc { - en: """Retain as Published option, enum: 0, 1""" - zh: """Retain as Published 选项,枚举:0,1""" - } - } - - rh { - desc { - en: """Retain Handling option, enum: 0, 1, 2""" - zh: """Retain Handling 选项,枚举:0,1,2""" - } - } - - sub_props { - desc { - en: """Subscription properties""" - zh: """订阅属性""" - } - } - - subid { - desc { - en: """Only stomp protocol, a unique identity for the subscription. range: 1-65535.""" - zh: """订阅ID,仅用于 Stomp 网关。用于创建订阅关系时指定订阅 ID。取值范围 1-65535。""" - } - } - - node { - desc { - en: """Name of the node to which the client is connected""" - zh: """客户端连接到的节点名称""" - } - } - - username { - desc { - en: """Username of client when connecting""" - zh: """客户端连接的用户名""" - } - } - - mountpoint { - desc { - en: """Topic mountpoint""" - zh: """主题固定前缀""" - } - } - - proto_name { - desc { - en: """Client protocol name""" - zh: """客户端使用的协议名称""" - } - } - - proto_ver { - desc { - en: """Protocol version used by the client""" - zh: """客户端使用的协议版本""" - } - } - - ip_address { - desc { - en: """Client's IP address""" - zh: """客户端 IP 地址""" - } - } - - port { - desc { - en: """Client's port""" - zh: """客户端端口""" - } - } - - is_bridge { - desc { - en: """Indicates whether the client is connected via bridge""" - zh: """标识客户端是否通过 is_bridge 标志连接""" - } - } - - connected_at { - desc { - en: """Client connection time""" - zh: """客户端连接时间""" - } - } - - disconnected_at { - desc { - en: """Client offline time, This field is only valid and returned when connected is false""" - zh: """客户端连接断开时间""" - } - } - - connected { - desc { - en: """Whether the client is connected""" - zh: """标识客户端是否已连接到网关""" - } - } - - keepalive { - desc { - en: """Keepalive time, with the unit of second""" - zh: """Keepalive 时间,单位:秒""" - } - } - - clean_start { - desc { - en: """Indicate whether the client is using a brand new session""" - zh: """标识客户端是否以 clean_start 的标志连接到网关""" - } - } - - expiry_interval { - desc { - en: """Session expiration interval, with the unit of second""" - zh: """会话超期时间,单位:秒""" - } - } - - created_at { - desc { - en: """Session creation time""" - zh: """会话创建时间""" - } - } - - subscriptions_cnt { - desc { - en: """Number of subscriptions established by this client""" - zh: """客户端已订阅主题数""" - } - } - - subscriptions_max { - desc { - en: """Maximum number of subscriptions allowed by this client""" - zh: """客户端允许订阅的最大主题数""" - } - } - - inflight_cnt { - desc { - en: """Current length of inflight""" - zh: """客户端当前飞行窗口大小""" - } - } - - inflight_max { - desc { - en: """Maximum length of inflight""" - zh: """客户端允许的飞行窗口最大值""" - } - } - - mqueue_len { - desc { - en: """Current length of message queue""" - zh: """客户端当前消息队列长度""" - } - } - - mqueue_max { - desc { - en: """Maximum length of message queue""" - zh: """客户端允许的最大消息队列长度""" - } - } - - mqueue_dropped { - desc { - en: """Number of messages dropped by the message queue due to exceeding the length""" - zh: """由于消息队列过程,客户端消息队列丢弃消息条数""" - } - } - - awaiting_rel_cnt { - desc { - en: """Number of awaiting acknowledge packet""" - zh: """客户端当前等待 PUBREL 确认的 PUBREC 消息的条数""" - } - } - - awaiting_rel_max { - desc { - en: """Maximum allowed number of awaiting PUBREC packet""" - zh: """客户端允许的最大 PUBREC 等待队列长度""" - } - } - - recv_oct { - desc { - en: """Number of bytes received""" - zh: """已接收的字节数""" - } - } - - recv_cnt { - desc { - en: """Number of socket packets received""" - zh: """已接收 Socket 报文次数""" - } - } - - recv_pkt { - desc { - en: """Number of protocol packets received""" - zh: """已接收应用层协议控制报文数""" - } - } - - recv_msg { - desc { - en: """Number of message packets received""" - zh: """已接收上行的消息条数""" - } - } - - send_oct { - desc { - en: """Number of bytes sent""" - zh: """已发送字节数""" - } - } - - send_cnt { - desc { - en: """Number of socket packets sent""" - zh: """已发送 Socket 报文次数""" - } - } - - send_pkt { - desc { - en: """Number of protocol packets sent""" - zh: """已发送应用层协议控制报文数""" - } - } - - send_msg { - desc { - en: """Number of message packets sent""" - zh: """已发送下行消息数条数""" - } - } - - mailbox_len { - desc { - en: """Process mailbox size""" - zh: """进程邮箱大小""" - } - } - - heap_size { - desc { - en: """Process heap size with the unit of byte""" - zh: """进程堆内存大小,单位:字节""" - } - } - - reductions { - desc { - en: """Erlang reduction""" - zh: """进程已消耗 Reduction 数""" - } - } +disconnected_at.desc: +"""Client offline time, This field is only valid and returned when connected is false""" + +heap_size.desc: +"""Process heap size with the unit of byte""" + +send_oct.desc: +"""Number of bytes sent""" + +get_client.desc: +"""Get the gateway client information""" + +param_gte_created_at.desc: +"""Match the session created datetime greater than a certain value""" + +param_conn_state.desc: +"""Match the client's connection state""" + +send_pkt.desc: +"""Number of protocol packets sent""" + +clean_start.desc: +"""Indicate whether the client is using a brand new session""" + +inflight_cnt.desc: +"""Current length of inflight""" + +delete_subscription.desc: +"""Delete a subscriptions membership""" + +param_lte_connected_at.desc: +"""Match the client socket connected datatime less than a certain value""" + +node.desc: +"""Name of the node to which the client is connected""" + +awaiting_rel_cnt.desc: +"""Number of awaiting acknowledge packet""" + +rap.desc: +"""Retain as Published option, enum: 0, 1""" + +inflight_max.desc: +"""Maximum length of inflight""" + +param_username.desc: +"""Match the client's Username""" + +param_like_endpoint_name.desc: +"""Use sub-string to match lwm2m client's endpoint name""" + +created_at.desc: +"""Session creation time""" + +sub_props.desc: +"""Subscription properties""" + +list_clients.desc: +"""Get the gateway client list""" + +subscriptions_cnt.desc: +"""Number of subscriptions established by this client""" + +mailbox_len.desc: +"""Process mailbox size""" + +send_cnt.desc: +"""Number of socket packets sent""" + +rh.desc: +"""Retain Handling option, enum: 0, 1, 2""" + +connected.desc: +"""Whether the client is connected""" + +qos.desc: +"""QoS level, enum: 0, 1, 2""" + +ip_address.desc: +"""Client's IP address""" + +param_gte_connected_at.desc: +"""Match the client socket connected datetime greater than a certain value""" + +awaiting_rel_max.desc: +"""Maximum allowed number of awaiting PUBREC packet""" + +param_like_username.desc: +"""Use sub-string to match client's username""" + +nl.desc: +"""No Local option, enum: 0, 1""" + +param_like_clientid.desc: +"""Use sub-string to match client's ID""" + +param_lte_created_at.desc: +"""Match the session created datetime less than a certain value""" + +topic.desc: +"""Topic Filter/Name""" + +proto_ver.desc: +"""Protocol version used by the client""" + +mountpoint.desc: +"""Topic mountpoint""" + +proto_name.desc: +"""Client protocol name""" + +param_lte_lifetime.desc: +"""Match the lwm2m client registered lifetime less than a certain value""" + +port.desc: +"""Client's port""" + +connected_at.desc: +"""Client connection time""" + +expiry_interval.desc: +"""Session expiration interval, with the unit of second""" + +username.desc: +"""Username of client when connecting""" + +param_clean_start.desc: +"""Match the client's clean start flag""" + +recv_msg.desc: +"""Number of message packets received""" + +list_subscriptions.desc: +"""Get the gateway client subscriptions""" + +recv_oct.desc: +"""Number of bytes received""" + +keepalive.desc: +"""Keepalive time, with the unit of second""" + +param_clientid.desc: +"""Match the client's ID""" + +subscriptions_max.desc: +"""Maximum number of subscriptions allowed by this client""" + +param_ip_address.desc: +"""Match the client's ip address""" + +mqueue_max.desc: +"""Maximum length of message queue""" + +mqueue_dropped.desc: +"""Number of messages dropped by the message queue due to exceeding the length""" + +subid.desc: +"""Only stomp protocol, a unique identity for the subscription. range: 1-65535.""" + +clientid.desc: +"""Client ID""" + +kick_client.desc: +"""Kick out the gateway client""" + +is_bridge.desc: +"""Indicates whether the client is connected via bridge""" + +lifetime.desc: +"""LwM2M Life time""" + +send_msg.desc: +"""Number of message packets sent""" + +add_subscription.desc: +"""Create a subscription membership""" + +param_endpoint_name.desc: +"""Match the lwm2m client's endpoint name""" + +param_node.desc: +"""Match the client's node name""" + +recv_cnt.desc: +"""Number of socket packets received""" + +recv_pkt.desc: +"""Number of protocol packets received""" + +endpoint_name.desc: +"""The LwM2M client endpoint name""" + +param_proto_ver.desc: +"""Match the client's protocol version""" + +reductions.desc: +"""Erlang reduction""" + +param_gte_lifetime.desc: +"""Match the lwm2m client registered lifetime greater than a certain value""" + +mqueue_len.desc: +"""Current length of message queue""" + } diff --git a/rel/i18n/emqx_gateway_api_listeners.hocon b/rel/i18n/emqx_gateway_api_listeners.hocon index 9319bb3e5..3b5bb65a1 100644 --- a/rel/i18n/emqx_gateway_api_listeners.hocon +++ b/rel/i18n/emqx_gateway_api_listeners.hocon @@ -1,146 +1,65 @@ emqx_gateway_api_listeners { - list_listeners { - desc { - en: """Gets a list of gateway listeners. This interface returns all the configs of the listener (including the authenticator on that listener), as well as the status of that listener running in the cluster.""" - zh: """获取网关监听器列表。该接口会返回监听器所有的配置(包括该监听器上的认证器),同时也会返回该监听器在集群中运行的状态。""" - } - } - - add_listener { - desc { - en: """Create the gateway listener.
+add_listener.desc: +"""Create the gateway listener.
Note: For listener types not supported by a gateway, this API returns `400: BAD_REQUEST`.""" - zh: """为指定网关添加监听器。
-注:对于某网关不支持的监听器类型,该接口会返回 `400: BAD_REQUEST`。""" - } - } - get_listener { - desc { - en: """Get the gateway listener configs""" - zh: """获取指定网关监听器的配置。""" - } - } - - delete_listener { - desc { - en: """Delete the gateway listener. All connected clients under the deleted listener will be disconnected.""" - zh: """删除指定监听器。被删除的监听器下所有已连接的客户端都会离线。""" - } - } - - update_listener { - desc { - en: """Update the gateway listener. The listener being updated performs a restart and all clients connected to that listener will be disconnected.""" - zh: """更新某网关监听器的配置。被更新的监听器会执行重启,所有已连接到该监听器上的客户端都会被断开。""" - } - } - - get_listener_authn { - desc { - en: """Get the listener's authenticator configs.""" - zh: """获取监听器的认证器配置。""" - } - } - - add_listener_authn { - desc { - en: """Enable authenticator for specified listener for client authentication.
+add_listener_authn.desc: +"""Enable authenticator for specified listener for client authentication.
When authenticator is enabled for a listener, all clients connecting to that listener will use that authenticator for authentication.""" - zh: """为指定监听器开启认证器以实现客户端认证的能力。
-当某一监听器开启认证后,所有连接到该监听器的客户端会使用该认证器进行认证。""" - } - } - update_listener_authn { - desc { - en: """Update authenticator configs for the listener, or disable/enable it.""" - zh: """更新指定监听器的认证器配置,或停用/启用该认证器。""" - } - } +add_user.desc: +"""Add user for the authenticator (only supports built_in_database)""" - delete_listener_authn { - desc { - en: """Remove authenticator for the listener.""" - zh: """移除指定监听器的认证器。""" - } - } +current_connections.desc: +"""Current Connections""" - list_users { - desc { - en: """Get the users for the authenticator (only supported by built_in_database)""" - zh: """获取用户列表(仅支持 built_in_database 类型的认证器)""" - } - } +delete_listener.desc: +"""Delete the gateway listener. All connected clients under the deleted listener will be disconnected.""" - add_user { - desc { - en: """Add user for the authenticator (only supports built_in_database)""" - zh: """添加用户(仅支持 built_in_database 类型的认证器)""" - } - } +delete_listener_authn.desc: +"""Remove authenticator for the listener.""" - get_user { - desc { - en: """Get user info from the gateway authenticator (only supports built_in_database)""" - zh: """获取用户信息(仅支持 built_in_database 类型的认证器)""" - } - } +delete_user.desc: +"""Delete the user for the gateway authenticator (only supports built_in_database)""" - update_user { - desc { - en: """Update the user info for the gateway authenticator (only supports built_in_database)""" - zh: """更新用户信息(仅支持 built_in_database 类型的认证器)""" - } - } +get_listener.desc: +"""Get the gateway listener configs""" - delete_user { - desc { - en: """Delete the user for the gateway authenticator (only supports built_in_database)""" - zh: """删除用户(仅支持 built_in_database 类型的认证器)""" - } - } +get_listener_authn.desc: +"""Get the listener's authenticator configs.""" - import_users { - desc { - en: """Import users into the gateway authenticator (only supports built_in_database)""" - zh: """导入用户(仅支持 built_in_database 类型的认证器)""" - } - } +get_user.desc: +"""Get user info from the gateway authenticator (only supports built_in_database)""" - listener_id { - desc { - en: """Listener ID""" - zh: """监听器 ID""" - } - } +import_users.desc: +"""Import users into the gateway authenticator (only supports built_in_database)""" - listener_status { - desc { - en: """listener status""" - zh: """监听器状态""" - } - } +list_listeners.desc: +"""Gets a list of gateway listeners. This interface returns all the configs of the listener (including the authenticator on that listener), as well as the status of that listener running in the cluster.""" - listener_node_status { - desc { - en: """listener status of each node in the cluster""" - zh: """监听器在集群中每个节点上的状态""" - } - } +list_users.desc: +"""Get the users for the authenticator (only supported by built_in_database)""" - node { - desc { - en: """Node Name""" - zh: """节点名称""" - } - } +listener_id.desc: +"""Listener ID""" + +listener_node_status.desc: +"""listener status of each node in the cluster""" + +listener_status.desc: +"""listener status""" + +node.desc: +"""Node Name""" + +update_listener.desc: +"""Update the gateway listener. The listener being updated performs a restart and all clients connected to that listener will be disconnected.""" + +update_listener_authn.desc: +"""Update authenticator configs for the listener, or disable/enable it.""" + +update_user.desc: +"""Update the user info for the gateway authenticator (only supports built_in_database)""" - current_connections { - desc { - en: """Current Connections""" - zh: """当前连接数""" - } - } } diff --git a/rel/i18n/emqx_gateway_schema.hocon b/rel/i18n/emqx_gateway_schema.hocon index fc34ef0a8..5f7d71913 100644 --- a/rel/i18n/emqx_gateway_schema.hocon +++ b/rel/i18n/emqx_gateway_schema.hocon @@ -1,255 +1,117 @@ emqx_gateway_schema { - gateway_common_enable { - desc { - en: """Whether to enable this gateway""" - zh: """是否启用该网关""" - } - } +dtls_listener_acceptors.desc: +"""Size of the acceptor pool.""" - gateway_common_enable_stats { - desc { - en: """Whether to enable client process statistic""" - zh: """是否开启客户端统计""" - } - } +dtls_listener_dtls_opts.desc: +"""DTLS socket options""" - gateway_common_idle_timeout { - desc { - en: """The idle time of the client connection process. It has two purposes: +gateway_common_authentication.desc: +"""Default authentication configs for all the gateway listeners. For per-listener overrides see authentication + in listener configs""" + +gateway_common_clientinfo_override.desc: +"""ClientInfo override.""" + +gateway_common_clientinfo_override_clientid.desc: +"""Template for overriding clientid.""" + +gateway_common_clientinfo_override_password.desc: +"""Template for overriding password.""" + +gateway_common_clientinfo_override_username.desc: +"""Template for overriding username.""" + +gateway_common_enable.desc: +"""Whether to enable this gateway""" + +gateway_common_enable_stats.desc: +"""Whether to enable client process statistic""" + +gateway_common_idle_timeout.desc: +"""The idle time of the client connection process. It has two purposes: 1. A newly created client process that does not receive any client requests after that time will be closed directly. 2. A running client process that does not receive any client requests after this time will go into hibernation to save resources.""" - zh: """客户端连接过程的空闲时间。该配置用于: - 1. 一个新创建的客户端进程如果在该时间间隔内没有收到任何客户端请求,将被直接关闭。 - 2. 一个正在运行的客户进程如果在这段时间后没有收到任何客户请求,将进入休眠状态以节省资源。""" - } - } - gateway_common_clientinfo_override { - desc { - en: """ClientInfo override.""" - zh: """ClientInfo 重写。""" - } - } +gateway_common_listener_access_rules.desc: +"""The access control rules for this listener. +See: https://github.com/emqtt/esockd#allowdeny""" - gateway_common_clientinfo_override_username { - desc { - en: """Template for overriding username.""" - zh: """username 重写模板""" - } - } - gateway_common_clientinfo_override_password { - desc { - en: """Template for overriding password.""" - zh: """password 重写模板""" - } - } - gateway_common_clientinfo_override_clientid { - desc { - en: """Template for overriding clientid.""" - zh: """clientid 重写模板""" - } - } +gateway_common_listener_bind.desc: +"""The IP address and port that the listener will bind.""" - gateway_common_authentication { - desc { - en: """Default authentication configs for all the gateway listeners. For per-listener overrides see authentication\n in listener configs""" - zh: """网关的认证器配置,对该网关下所以的监听器生效。如果每个监听器需要配置不同的认证器,需要配置监听器下的 authentication 字段。""" - } - } +gateway_common_listener_enable.desc: +"""Enable the listener.""" - tcp_udp_listeners { - desc { - en: """Settings for the listeners.""" - zh: """监听器配置。""" - } - } - - tcp_listeners { - desc { - en: """Settings for the TCP listeners.""" - zh: """配置 TCP 类型的监听器。""" - } - } - - udp_listeners { - desc { - en: """Settings for the UDP listeners.""" - zh: """配置 UDP 类型的监听器。""" - } - } - - listener_name_to_settings_map{ - desc { - en: """A map from listener names to listener settings.""" - zh: """从监听器名称到配置参数的映射。""" - } - } - - tcp_listener_acceptors { - desc { - en: """Size of the acceptor pool.""" - zh: """Acceptor 进程池大小。""" - } - } - - tcp_listener_tcp_opts{ - desc { - en: """Setting the TCP socket options.""" - zh: """TCP Socket 配置。""" - } - } - - tcp_listener_proxy_protocol { - desc { - en: """Enable the Proxy Protocol V1/2 if the EMQX cluster is deployed behind HAProxy or Nginx. -See: https://www.haproxy.com/blog/haproxy/proxy-protocol/""" - zh: """是否开启 Proxy Protocol V1/2。当 EMQX 集群部署在 HAProxy 或 Nginx 后需要获取客户端真实 IP 时常用到该选项。参考:https://www.haproxy.com/blog/haproxy/proxy-protocol/""" - } - } - - tcp_listener_proxy_protocol_timeout { - desc { - en: """Timeout for proxy protocol. -EMQX will close the TCP connection if proxy protocol packet is not received within the timeout.""" - zh: """接收 Proxy Protocol 报文头的超时时间。如果在超时内没有收到 Proxy Protocol 包,EMQX 将关闭 TCP 连接。""" - } - } - - ssl_listener_options { - desc { - en: """SSL Socket options.""" - zh: """SSL Socket 配置。""" - } - } - - udp_listener_udp_opts { - desc { - en: """Settings for the UDP sockets.""" - zh: """UDP Socket 配置。""" - } - } - - udp_listener_active_n { - desc { - en: """Specify the {active, N} option for the socket. -See: https://erlang.org/doc/man/inet.html#setopts-2""" - zh: """为 Socket 指定 {active, N} 选项。 -参见:https://erlang.org/doc/man/inet.html#setopts-2""" - } - } - - udp_listener_recbuf { - desc { - en: """Size of the kernel-space receive buffer for the socket.""" - zh: """Socket 在内核空间接收缓冲区的大小。""" - } - } - - udp_listener_sndbuf { - desc { - en: """Size of the kernel-space send buffer for the socket.""" - zh: """Socket 在内核空间发送缓冲区的大小。""" - } - } - - udp_listener_buffer { - desc { - en: """Size of the user-space buffer for the socket.""" - zh: """Socket 在用户空间的缓冲区大小。""" - } - } - - udp_listener_reuseaddr { - desc { - en: """Allow local reuse of port numbers.""" - zh: """允许重用本地处于 TIME_WAIT 的端口号。""" - } - } - - dtls_listener_acceptors { - desc { - en: """Size of the acceptor pool.""" - zh: """Acceptor 进程池大小。""" - } - } - - dtls_listener_dtls_opts { - desc { - en: """DTLS socket options""" - zh: """DTLS Socket 配置""" - } - - } - - gateway_common_listener_enable { - desc { - en: """Enable the listener.""" - zh: """是否启用该监听器。""" - } - } - - gateway_common_listener_bind { - desc { - en: """The IP address and port that the listener will bind.""" - zh: """监听器绑定的 IP 地址或端口。""" - } - } - - gateway_common_listener_max_connections { - desc { - en: """Maximum number of concurrent connections.""" - zh: """监听器支持的最大连接数。""" - } - } - - gateway_common_listener_max_conn_rate { - desc { - en: """Maximum connections per second.""" - zh: """监听器支持的最大连接速率。""" - } - } - - gateway_common_listener_enable_authn { - desc { - en: """Set true (default) to enable client authentication on this listener. +gateway_common_listener_enable_authn.desc: +"""Set true (default) to enable client authentication on this listener. When set to false clients will be allowed to connect without authentication.""" - zh: """配置 true (默认值)启用客户端进行身份认证。 -配置 false 时,将不对客户端做任何认证。""" - } - } - gateway_mountpoint { - desc { - en: """When publishing or subscribing, prefix all topics with a mountpoint string. +gateway_common_listener_max_conn_rate.desc: +"""Maximum connections per second.""" + +gateway_common_listener_max_connections.desc: +"""Maximum number of concurrent connections.""" + +gateway_mountpoint.desc: +"""When publishing or subscribing, prefix all topics with a mountpoint string. The prefixed string will be removed from the topic name when the message is delivered to the subscriber. The mountpoint is a way that users can use to implement isolation of message routing between different listeners. For example if a client A subscribes to `t` with `listeners.tcp.\.mountpoint` set to `some_tenant`, then the client actually subscribes to the topic `some_tenant/t`. Similarly, if another client B (connected to the same listener as the client A) sends a message to topic `t`, the message is routed to all the clients subscribed `some_tenant/t`, -so client A will receive the message, with topic name `t`. Set to `\"\"` to disable the feature. +so client A will receive the message, with topic name `t`. Set to `""` to disable the feature. Variables in mountpoint string:
- ${clientid}: clientid
- ${username}: username""" - zh: """发布或订阅时,在所有主题前增加前缀字符串。 -当消息投递给订阅者时,前缀字符串将从主题名称中删除。挂载点是用户可以用来实现不同监听器之间的消息路由隔离的一种方式。 -例如,如果客户端 A 在 `listeners.tcp.\.mountpoint` 设置为 `some_tenant` 的情况下订阅 `t`, -则客户端实际上订阅了 `some_tenant/t` 主题。 -类似地,如果另一个客户端 B(连接到与客户端 A 相同的侦听器)向主题 `t` 发送消息, -则该消息被路由到所有订阅了 `some_tenant/t` 的客户端,因此客户端 A 将收到该消息,带有 主题名称`t`。 设置为 `\"\"` 以禁用该功能。 -挂载点字符串中可用的变量:
- - ${clientid}:clientid
- - ${username}:用户名""" - } - } - gateway_common_listener_access_rules { - desc { - en: """The access control rules for this listener. -See: https://github.com/emqtt/esockd#allowdeny""" - zh: """配置监听器的访问控制规则。 -见:https://github.com/emqtt/esockd#allowdeny""" - } - } +listener_name_to_settings_map.desc: +"""A map from listener names to listener settings.""" + +ssl_listener_options.desc: +"""SSL Socket options.""" + +tcp_listener_acceptors.desc: +"""Size of the acceptor pool.""" + +tcp_listener_proxy_protocol.desc: +"""Enable the Proxy Protocol V1/2 if the EMQX cluster is deployed behind HAProxy or Nginx. +See: https://www.haproxy.com/blog/haproxy/proxy-protocol/""" + +tcp_listener_proxy_protocol_timeout.desc: +"""Timeout for proxy protocol. +EMQX will close the TCP connection if proxy protocol packet is not received within the timeout.""" + +tcp_listener_tcp_opts.desc: +"""Setting the TCP socket options.""" + +tcp_listeners.desc: +"""Settings for the TCP listeners.""" + +tcp_udp_listeners.desc: +"""Settings for the listeners.""" + +udp_listener_active_n.desc: +"""Specify the {active, N} option for the socket. +See: https://erlang.org/doc/man/inet.html#setopts-2""" + +udp_listener_buffer.desc: +"""Size of the user-space buffer for the socket.""" + +udp_listener_recbuf.desc: +"""Size of the kernel-space receive buffer for the socket.""" + +udp_listener_reuseaddr.desc: +"""Allow local reuse of port numbers.""" + +udp_listener_sndbuf.desc: +"""Size of the kernel-space send buffer for the socket.""" + +udp_listener_udp_opts.desc: +"""Settings for the UDP sockets.""" + +udp_listeners.desc: +"""Settings for the UDP listeners.""" + } diff --git a/rel/i18n/emqx_license_http_api.hocon b/rel/i18n/emqx_license_http_api.hocon index 40a18bbf3..895041c18 100644 --- a/rel/i18n/emqx_license_http_api.hocon +++ b/rel/i18n/emqx_license_http_api.hocon @@ -1,23 +1,15 @@ emqx_license_http_api { - desc_license_info_api { - desc { - en: "Get license info" - zh: "获取许可证信息" - } - label: { - en: "License info" - zh: "许可证信息" - } - } - desc_license_key_api { - desc { - en: "Update a license key" - zh: "更新一个许可证密钥" - } - label: { - en: "Update license" - zh: "更新许可证" - } - } +desc_license_info_api.desc: +"""Get license info""" + +desc_license_info_api.label: +"""License info""" + +desc_license_key_api.desc: +"""Update a license key""" + +desc_license_key_api.label: +"""Update license""" + } diff --git a/rel/i18n/emqx_license_schema.hocon b/rel/i18n/emqx_license_schema.hocon index c330f1cb2..3e4e37bff 100644 --- a/rel/i18n/emqx_license_schema.hocon +++ b/rel/i18n/emqx_license_schema.hocon @@ -1,55 +1,33 @@ emqx_license_schema { - license_root { - desc { - en: "Defines the EMQX Enterprise license. \n\n" - "\n" - "The default license has 100 connections limit, it is " - "issued on 2023-01-09 and valid for 5 years (1825 days).\n" - "\n" - "EMQX comes with a default trial license. For production use, please \n" - "visit https://www.emqx.com/apply-licenses/emqx to apply." - zh: "EMQX企业许可证。\n" - "EMQX 自带一个默认的试用许可证," - "默认试用许可允许最多接入 100 个连接,签发时间是 2023年1月9日,有效期是 5 年(1825 天)。" - "若需要在生产环境部署,\n" - "请访问 https://www.emqx.com/apply-licenses/emqx 来申请。" - } - label { - en: "License" - zh: "许可证" - } - } - key_field { - desc { - en: "License string" - zh: "许可证字符串" - } - label { - en: "License string" - zh: "许可证字符串" - } - } +connection_high_watermark_field.desc: +"""High watermark limit above which license connection quota usage alarms are activated""" - connection_low_watermark_field { - desc { - en: "Low watermark limit below which license connection quota usage alarms are deactivated" - zh: "低水位限制,低于此水位线时系统会清除连接配额使用告警" - } - label { - en: "Connection low watermark" - zh: "连接低水位线" - } - } +connection_high_watermark_field.label: +"""Connection high watermark""" + +connection_low_watermark_field.desc: +"""Low watermark limit below which license connection quota usage alarms are deactivated""" + +connection_low_watermark_field.label: +"""Connection low watermark""" + +key_field.desc: +"""License string""" + +key_field.label: +"""License string""" + +license_root.desc: +"""Defines the EMQX Enterprise license. + + +The default license has 100 connections limit, it is issued on 2023-01-09 and valid for 5 years (1825 days). + +EMQX comes with a default trial license. For production use, please +visit https://www.emqx.com/apply-licenses/emqx to apply.""" + +license_root.label: +"""License""" - connection_high_watermark_field { - desc { - en: "High watermark limit above which license connection quota usage alarms are activated" - zh: "高水位线,连接数超过这个水位线时,系统会触发许可证连接配额使用告警" - } - label { - en: "Connection high watermark" - zh: "连接高水位" - } - } } diff --git a/rel/i18n/emqx_limiter_schema.hocon b/rel/i18n/emqx_limiter_schema.hocon index 37eb4ee1e..c99840375 100644 --- a/rel/i18n/emqx_limiter_schema.hocon +++ b/rel/i18n/emqx_limiter_schema.hocon @@ -1,168 +1,94 @@ emqx_limiter_schema { - failure_strategy { - desc { - en: """The strategy when all the retries failed.""" - zh: """当所有的重试都失败后的处理策略""" - } - label: { - en: """Failure Strategy""" - zh: """失败策略""" - } - } +bucket_cfg.desc: +"""Bucket Configs""" - max_retry_time { - desc { - en: """The maximum retry time when acquire failed.""" - zh: """申请失败后,尝试重新申请的时长最大值""" - } - label: { - en: """Max Retry Time""" - zh: """最大重试时间""" - } - } +bucket_cfg.label: +"""Buckets""" - divisible { - desc { - en: """Is it possible to split the number of requested tokens?""" - zh: """申请的令牌数是否可以被分割""" - } - label: { - en: """Divisible""" - zh: """是否可分割""" - } - } - - low_watermark { - desc { - en: """If the remaining tokens are lower than this value, -the check/consume will succeed, but it will be forced to wait for a short period of time.""" - zh: """当桶中剩余的令牌数低于这个值,即使令牌申请成功了,也会被强制暂停一会儿""" - } - label: { - en: """Low Watermark""" - zh: """低水位线""" - } - } - - initial { - desc { - en: """The initial number of tokens for this bucket.""" - zh: """桶中的初始令牌数""" - } - label: { - en: """Initial""" - zh: """初始令牌数""" - } - } - - rate { - desc { - en: """Rate for this bucket.""" - zh: """桶的令牌生成速率""" - } - label: { - en: """Rate""" - zh: """速率""" - } - } - - client { - desc { - en: """The rate limit for each user of the bucket""" - zh: """对桶的每个使用者的速率控制设置""" - } - label: { - en: """Per Client""" - zh: """每个使用者的限制""" - } - } - - bucket_cfg { - desc { - en: """Bucket Configs""" - zh: """桶的配置""" - } - label: { - en: """Buckets""" - zh: """桶的配置""" - } - } - - burst { - desc { - en: """The burst, This value is based on rate.
+burst.desc: +"""The burst, This value is based on rate.
This value + rate = the maximum limit that can be achieved when limiter burst.""" - zh: """突发速率。 -突发速率允许短时间内速率超过设置的速率值,突发速率 + 速率 = 当前桶能达到的最大速率值""" - } - label: { - en: """Burst""" - zh: """突发速率""" - } - } - message_routing { - desc { - en: """The message routing limiter. -This is used to limit the forwarding rate for this EMQX node. -Once the limit is reached, new publish will be refused""" - zh: """消息派发速率控制器。 -这个用来控制当前节点内的消息派发速率,当达到最大速率后,新的推送将会被拒绝""" - } - label: { - en: """Message Routing""" - zh: """消息派发""" - } - } +burst.label: +"""Burst""" - connection { - desc { - en: """The connection limiter. -This is used to limit the connection rate for this EMQX node. -Once the limit is reached, new connections will be refused""" - zh: """连接速率控制器。 -这个用来控制当前节点上的连接速率,当达到最大速率后,新的连接将会被拒绝""" - } - label: { - en: """Connection""" - zh: """连接速率""" - } - } - - messages { - desc { - en: """The `messages` limiter. -This is used to limit the inbound message numbers for this EMQX node -Once the limit is reached, the restricted client will be slow down even be hung for a while.""" - zh: """流入速率控制器。 -这个用来控制当前节点上的消息流入速率,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间""" - } - label: { - en: """Messages""" - zh: """消息流入速率""" - } - } - - bytes { - desc { - en: """The `bytes` limiter. +bytes.desc: +"""The `bytes` limiter. This is used to limit the inbound bytes rate for this EMQX node. Once the limit is reached, the restricted client will be slow down even be hung for a while.""" - zh: """流入字节率控制器。 -这个是用来控制当前节点上的数据流入的字节率,每条消息将会消耗和其二进制大小等量的令牌,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间""" - } - label: { - en: """Bytes""" - zh: """流入字节率""" - } - } - internal { - desc { - en: """Limiter for EMQX internal app.""" - zh: """EMQX 内部功能所用限制器。""" +bytes.label: +"""Bytes""" + +client.desc: +"""The rate limit for each user of the bucket""" + +client.label: +"""Per Client""" + +connection.desc: +"""The connection limiter. +This is used to limit the connection rate for this EMQX node. +Once the limit is reached, new connections will be refused""" + +connection.label: +"""Connection""" + +divisible.desc: +"""Is it possible to split the number of requested tokens?""" + +divisible.label: +"""Divisible""" + +failure_strategy.desc: +"""The strategy when all the retries failed.""" + +failure_strategy.label: +"""Failure Strategy""" + +initial.desc: +"""The initial number of tokens for this bucket.""" + +initial.label: +"""Initial""" + +internal.desc: +"""Limiter for EMQX internal app.""" + +low_watermark.desc: +"""If the remaining tokens are lower than this value, +the check/consume will succeed, but it will be forced to wait for a short period of time.""" + +low_watermark.label: +"""Low Watermark""" + +max_retry_time.desc: +"""The maximum retry time when acquire failed.""" + +max_retry_time.label: +"""Max Retry Time""" + +message_routing.desc: +"""The message routing limiter. +This is used to limit the forwarding rate for this EMQX node. +Once the limit is reached, new publish will be refused""" + +message_routing.label: +"""Message Routing""" + +messages.desc: +"""The `messages` limiter. +This is used to limit the inbound message numbers for this EMQX node +Once the limit is reached, the restricted client will be slow down even be hung for a while.""" + +messages.label: +"""Messages""" + +rate.desc: +"""Rate for this bucket.""" + +rate.label: +"""Rate""" - } - } } diff --git a/rel/i18n/emqx_lwm2m_api.hocon b/rel/i18n/emqx_lwm2m_api.hocon index 9cd7e27c0..7ff1fdcce 100644 --- a/rel/i18n/emqx_lwm2m_api.hocon +++ b/rel/i18n/emqx_lwm2m_api.hocon @@ -1,58 +1,27 @@ emqx_lwm2m_api { - lookup_resource { - desc { - en: """Look up a resource""" - zh: """查看指定资源状态""" - } - } +dataType.desc: +"""Data Type""" - observe_resource { - desc { - en: """Observe or Cancel observe a resource""" - zh: """Observe/Un-Observe 指定资源""" - } - } +lookup_resource.desc: +"""Look up a resource""" - read_resource { - desc { - en: """Send a read command to a resource""" - zh: """发送读指令到某资源""" - } - } +name.desc: +"""Resource Name""" - write_resource { - desc { - en: """Send a write command to a resource""" - zh: """发送写指令到某资源""" - } - } +observe_resource.desc: +"""Observe or Cancel observe a resource""" - operations { - desc { - en: """Resource Operations""" - zh: """资源可用操作列表""" - } - } +operations.desc: +"""Resource Operations""" - dataType { - desc { - en: """Data Type""" - zh: """数据类型""" - } - } +path.desc: +"""Resource Path""" - path { - desc { - en: """Resource Path""" - zh: """资源路径""" - } - } +read_resource.desc: +"""Send a read command to a resource""" + +write_resource.desc: +"""Send a write command to a resource""" - name { - desc { - en: """Resource Name""" - zh: """资源名称""" - } - } } diff --git a/rel/i18n/emqx_lwm2m_schema.hocon b/rel/i18n/emqx_lwm2m_schema.hocon index 822570f1d..0193ce88f 100644 --- a/rel/i18n/emqx_lwm2m_schema.hocon +++ b/rel/i18n/emqx_lwm2m_schema.hocon @@ -1,127 +1,56 @@ emqx_lwm2m_schema { - lwm2m { - desc { - en: """The LwM2M Gateway configuration. This gateway only supports the v1.0.1 protocol.""" - zh: """LwM2M 网关配置。仅支持 v1.0.1 协议。""" - } - } +lwm2m.desc: +"""The LwM2M Gateway configuration. This gateway only supports the v1.0.1 protocol.""" - lwm2m_xml_dir { - desc { - en: """The Directory for LwM2M Resource definition.""" - zh: """LwM2M Resource 定义的 XML 文件目录路径。""" - } - } +lwm2m_auto_observe.desc: +"""Automatically observe the object list of REGISTER packet.""" - lwm2m_lifetime_min { - desc { - en: """Minimum value of lifetime allowed to be set by the LwM2M client.""" - zh: """允许 LwM2M 客户端允许设置的心跳最小值。""" - } - } +lwm2m_lifetime_max.desc: +"""Maximum value of lifetime allowed to be set by the LwM2M client.""" - lwm2m_lifetime_max { - desc { - en: """Maximum value of lifetime allowed to be set by the LwM2M client.""" - zh: """允许 LwM2M 客户端允许设置的心跳最大值。""" - } - } +lwm2m_lifetime_min.desc: +"""Minimum value of lifetime allowed to be set by the LwM2M client.""" - lwm2m_qmode_time_window { - desc { - en: """The value of the time window during which the network link is considered valid by the LwM2M Gateway in QMode mode. +lwm2m_qmode_time_window.desc: +"""The value of the time window during which the network link is considered valid by the LwM2M Gateway in QMode mode. For example, after receiving an update message from a client, any messages within this time window are sent directly to the LwM2M client, and all messages beyond this time window are temporarily stored in memory.""" - zh: """在QMode模式下,LwM2M网关认为网络链接有效的时间窗口的值。 -例如,在收到客户端的更新信息后,在这个时间窗口内的任何信息都会直接发送到LwM2M客户端,而超过这个时间窗口的所有信息都会暂时储存在内存中。""" - } - } +lwm2m_translators.desc: +"""Topic configuration for LwM2M's gateway publishing and subscription.""" - lwm2m_auto_observe { - desc { - en: """Automatically observe the object list of REGISTER packet.""" - zh: """自动 Observe REGISTER 数据包的 Object 列表。""" - } - } - - lwm2m_update_msg_publish_condition { - desc { - en: """Policy for publishing UPDATE event message.
- - always: send update events as long as the UPDATE request is received.
- - contains_object_list: send update events only if the UPDATE request carries any Object List""" - zh: """发布UPDATE事件消息的策略。
- - always: 只要收到 UPDATE 请求,就发送更新事件。
- - contains_object_list: 仅当 UPDATE 请求携带 Object 列表时才发送更新事件。""" - } - } - - lwm2m_translators { - desc { - en: """Topic configuration for LwM2M's gateway publishing and subscription.""" - zh: """LwM2M 网关订阅/发布消息的主题映射配置。""" - } - } - - lwm2m_translators_command { - desc { - en: """The topic for receiving downstream commands. +lwm2m_translators_command.desc: +"""The topic for receiving downstream commands. For each new LwM2M client that succeeds in going online, the gateway creates a subscription relationship to receive downstream commands and send it to the LwM2M client""" - zh: """下行命令主题。 -对于每个成功上线的新 LwM2M 客户端,网关会创建一个订阅关系来接收下行消息并将其发送给客户端。""" - } - } - - lwm2m_translators_response { - desc { - en: """The topic for gateway to publish the acknowledge events from LwM2M client""" - zh: """用于网关发布来自 LwM2M 客户端的确认事件的主题。""" - } - } - - lwm2m_translators_notify { - desc { - en: """The topic for gateway to publish the notify events from LwM2M client. +lwm2m_translators_notify.desc: +"""The topic for gateway to publish the notify events from LwM2M client. After succeed observe a resource of LwM2M client, Gateway will send the notify events via this topic, if the client reports any resource changes""" - zh: """用于发布来自 LwM2M 客户端的通知事件的主题。 -在成功 Observe 到 LwM2M 客户端的资源后,如果客户端报告任何资源状态的变化,网关将通过该主题发送通知事件。""" - } - } +lwm2m_translators_register.desc: +"""The topic for gateway to publish the register events from LwM2M client.""" - lwm2m_translators_register { - desc { - en: """The topic for gateway to publish the register events from LwM2M client.""" - zh: """用于发布来自 LwM2M 客户端的注册事件的主题。""" - } - } +lwm2m_translators_response.desc: +"""The topic for gateway to publish the acknowledge events from LwM2M client""" - lwm2m_translators_update { - desc { - en: """The topic for gateway to publish the update events from LwM2M client""" - zh: """用于发布来自LwM2M客户端的更新事件的主题。""" - } - } +lwm2m_translators_update.desc: +"""The topic for gateway to publish the update events from LwM2M client""" - translator { - desc { - en: """MQTT topic that corresponds to a particular type of event.""" - zh: """配置某网关客户端对于发布消息或订阅的主题和 QoS 等级。""" - } - } +lwm2m_update_msg_publish_condition.desc: +"""Policy for publishing UPDATE event message.
+ - always: send update events as long as the UPDATE request is received.
+ - contains_object_list: send update events only if the UPDATE request carries any Object List""" - translator_topic { - desc { - en: """Topic Name""" - zh: """主题名称""" - } - } +lwm2m_xml_dir.desc: +"""The Directory for LwM2M Resource definition.""" + +translator.desc: +"""MQTT topic that corresponds to a particular type of event.""" + +translator_qos.desc: +"""QoS Level""" + +translator_topic.desc: +"""Topic Name""" - translator_qos { - desc { - en: """QoS Level""" - zh: """QoS 等级""" - } - } } diff --git a/rel/i18n/emqx_mgmt_api_alarms.hocon b/rel/i18n/emqx_mgmt_api_alarms.hocon index ca8bf0769..0327fffcd 100644 --- a/rel/i18n/emqx_mgmt_api_alarms.hocon +++ b/rel/i18n/emqx_mgmt_api_alarms.hocon @@ -1,84 +1,39 @@ emqx_mgmt_api_alarms { - list_alarms_api { - desc { - en: """List currently activated alarms or historical alarms, determined by query parameters.""" - zh: """列出当前激活的告警或历史告警,由查询参数决定。""" - } - } +activate_at.desc: +"""Alarm start time, using rfc3339 standard time format.""" - delete_alarms_api { - desc { - en: """Remove all historical alarms.""" - zh: """删除所有历史告警。""" - } - } +deactivate_at.desc: +"""Alarm end time, using rfc3339 standard time format.""" - delete_alarms_api_response204 { - desc { - en: """Historical alarms have been cleared successfully.""" - zh: """历史告警已成功清除。""" - } - } +delete_alarms_api.desc: +"""Remove all historical alarms.""" - get_alarms_qs_activated { - desc { - en: """It is used to specify the alarm type of the query. +delete_alarms_api_response204.desc: +"""Historical alarms have been cleared successfully.""" + +details.desc: +"""Alarm details, provides more alarm information, mainly for program processing.""" + +duration.desc: +"""Indicates how long the alarm has been active in milliseconds.""" + +get_alarms_qs_activated.desc: +"""It is used to specify the alarm type of the query. When true, it returns the currently activated alarm, and when it is false, it returns the historical alarm. The default is false.""" - zh: """用于指定查询的告警类型, -为 true 时返回当前激活的告警,为 false 时返回历史告警,默认为 false。""" - } - } - node { - desc { - en: """The name of the node that triggered this alarm.""" - zh: """触发此告警的节点名称。""" - } - } +list_alarms_api.desc: +"""List currently activated alarms or historical alarms, determined by query parameters.""" - name { - desc { - en: """Alarm name, used to distinguish different alarms.""" - zh: """告警名称,用于区分不同的告警。""" - } - } +message.desc: +"""Alarm message, which describes the alarm content in a human-readable format.""" - message { - desc { - en: """Alarm message, which describes the alarm content in a human-readable format.""" - zh: """告警消息,以人类可读的方式描述告警内容。""" - } - } +name.desc: +"""Alarm name, used to distinguish different alarms.""" - details { - desc { - en: """Alarm details, provides more alarm information, mainly for program processing.""" - zh: """告警详情,提供了更多的告警信息,主要提供给程序处理。""" - } - } - - duration { - desc { - en: """Indicates how long the alarm has been active in milliseconds.""" - zh: """表明告警已经持续了多久,单位:毫秒。""" - } - } - - activate_at { - desc { - en: """Alarm start time, using rfc3339 standard time format.""" - zh: """告警开始时间,使用 rfc3339 标准时间格式。""" - } - } - - deactivate_at { - desc { - en: """Alarm end time, using rfc3339 standard time format.""" - zh: """告警结束时间,使用 rfc3339 标准时间格式。""" - } - } +node.desc: +"""The name of the node that triggered this alarm.""" } diff --git a/rel/i18n/emqx_mgmt_api_banned.hocon b/rel/i18n/emqx_mgmt_api_banned.hocon index b45a40ba6..1a9700641 100644 --- a/rel/i18n/emqx_mgmt_api_banned.hocon +++ b/rel/i18n/emqx_mgmt_api_banned.hocon @@ -1,98 +1,54 @@ emqx_mgmt_api_banned { - list_banned_api { - desc { - en: """List all currently banned client IDs, usernames and IP addresses.""" - zh: """列出目前所有被封禁的客户端 ID、用户名和 IP 地址。""" - } - } +as.desc: +"""Ban method, which can be client ID, username or IP address.""" - create_banned_api { - desc { - en: """Add a client ID, username or IP address to the blacklist.""" - zh: """添加一个客户端 ID、用户名或者 IP 地址到黑名单。""" - } - } +as.label: +"""Ban Method""" - create_banned_api_response400 { - desc { - en: """Bad request, possibly due to wrong parameters or the existence of a banned object.""" - zh: """错误的请求,可能是参数错误或封禁对象已存在等原因。""" - } - } +at.desc: +"""The start time of the ban, the format is rfc3339, the default is the time when the operation was initiated.""" - delete_banned_api { - desc { - en: """Remove a client ID, username or IP address from the blacklist.""" - zh: """将一个客户端 ID、用户名或者 IP 地址从黑名单中删除。""" - } - } +at.label: +"""Ban Time""" - delete_banned_api_response404 { - desc { - en: """The banned object was not found in the blacklist.""" - zh: """未在黑名单中找到该封禁对象。""" - } - } +by.desc: +"""Initiator of the ban.""" + +by.label: +"""Ban Initiator""" + +create_banned_api.desc: +"""Add a client ID, username or IP address to the blacklist.""" + +create_banned_api_response400.desc: +"""Bad request, possibly due to wrong parameters or the existence of a banned object.""" + +delete_banned_api.desc: +"""Remove a client ID, username or IP address from the blacklist.""" + +delete_banned_api_response404.desc: +"""The banned object was not found in the blacklist.""" + +list_banned_api.desc: +"""List all currently banned client IDs, usernames and IP addresses.""" + +reason.desc: +"""Ban reason, record the reason why the current object was banned.""" + +reason.label: +"""Ban Reason""" + +until.desc: +"""The end time of the ban, the format is rfc3339, the default is the time when the operation was initiated + 1 year.""" + +until.label: +"""Ban End Time""" + +who.desc: +"""Ban object, specific client ID, username or IP address.""" + +who.label: +"""Ban Object""" - as { - desc { - en: """Ban method, which can be client ID, username or IP address.""" - zh: """封禁方式,可以通过客户端 ID、用户名或者 IP 地址等方式进行封禁。""" - } - label { - en: """Ban Method""" - zh: """封禁方式""" - } - } - who { - desc { - en: """Ban object, specific client ID, username or IP address.""" - zh: """封禁对象,具体的客户端 ID、用户名或者 IP 地址。""" - } - label { - en: """Ban Object""" - zh: """封禁对象""" - } - } - by { - desc { - en: """Initiator of the ban.""" - zh: """封禁的发起者。""" - } - label { - en: """Ban Initiator""" - zh: """封禁发起者""" - } - } - reason { - desc { - en: """Ban reason, record the reason why the current object was banned.""" - zh: """封禁原因,记录当前对象被封禁的原因。""" - } - label { - en: """Ban Reason""" - zh: """封禁原因""" - } - } - at { - desc { - en: """The start time of the ban, the format is rfc3339, the default is the time when the operation was initiated.""" - zh: """封禁的起始时间,格式为 rfc3339,默认为发起操作的时间。""" - } - label { - en: """Ban Time""" - zh: """封禁时间""" - } - } - until { - desc { - en: """The end time of the ban, the format is rfc3339, the default is the time when the operation was initiated + 1 year.""" - zh: """封禁的结束时间,格式为 rfc3339,默认值为发起操作的时间 + 1 年。""" - } - label { - en: """Ban End Time""" - zh: """封禁结束时间""" - } - } } diff --git a/rel/i18n/emqx_mgmt_api_key_schema.hocon b/rel/i18n/emqx_mgmt_api_key_schema.hocon index f96c5c11b..0dc11c7ac 100644 --- a/rel/i18n/emqx_mgmt_api_key_schema.hocon +++ b/rel/i18n/emqx_mgmt_api_key_schema.hocon @@ -1,32 +1,20 @@ emqx_mgmt_api_key_schema { - api_key { - desc { - en: """API Key, can be used to request API other than the management API key and the Dashboard user management API""" - zh: """API 密钥, 可用于请求除管理 API 密钥及 Dashboard 用户管理 API 的其它接口""" - } - label { - en: "API Key" - zh: "API 密钥" - } - } - bootstrap_file { - desc { - en: """Bootstrap file is used to add an api_key when emqx is launched, + +api_key.desc: +"""API Key, can be used to request API other than the management API key and the Dashboard user management API""" + +api_key.label: +"""API Key""" + +bootstrap_file.desc: +"""Bootstrap file is used to add an api_key when emqx is launched, the format is: ``` 7e729ae70d23144b:2QILI9AcQ9BYlVqLDHQNWN2saIjBV4egr1CZneTNKr9CpK ec3907f865805db0:Ee3taYltUKtoBVD9C3XjQl9C6NXheip8Z9B69BpUv5JxVHL ```""" - zh: """用于在启动 emqx 时,添加 API 密钥,其格式为: - ``` - 7e729ae70d23144b:2QILI9AcQ9BYlVqLDHQNWN2saIjBV4egr1CZneTNKr9CpK - ec3907f865805db0:Ee3taYltUKtoBVD9C3XjQl9C6NXheip8Z9B69BpUv5JxVHL - ```""" - } - label { - en: "Initialize api_key file." - zh: "API 密钥初始化文件" - } - } +bootstrap_file.label: +"""Initialize api_key file.""" + } diff --git a/rel/i18n/emqx_mgmt_api_publish.hocon b/rel/i18n/emqx_mgmt_api_publish.hocon index a09732cfc..50589f8d7 100644 --- a/rel/i18n/emqx_mgmt_api_publish.hocon +++ b/rel/i18n/emqx_mgmt_api_publish.hocon @@ -1,27 +1,51 @@ emqx_mgmt_api_publish { - publish_api { - desc { - en: """Possible HTTP status response codes are:
+ +error_message.desc: +"""Describes the failure reason in detail.""" + +message_id.desc: +"""A globally unique message ID for correlation/tracing.""" + +message_properties.desc: +"""The Properties of the PUBLISH message.""" + +msg_content_type.desc: +"""The Content Type MUST be a UTF-8 Encoded String.""" + +msg_correlation_data.desc: +"""Identifier of the Correlation Data. The Server MUST send the Correlation Data unaltered to all subscribers receiving the Application Message.""" + +msg_message_expiry_interval.desc: +"""Identifier of the Message Expiry Interval. If the Message Expiry Interval has passed and the Server has not managed to start onward delivery to a matching subscriber, then it MUST delete the copy of the message for that subscriber.""" + +msg_payload_format_indicator.desc: +"""0 (0x00) Byte Indicates that the Payload is unspecified bytes, which is equivalent to not sending a Payload Format Indicator. +1 (0x01) Byte Indicates that the Payload is UTF-8 Encoded Character Data. The UTF-8 data in the Payload MUST be well-formed UTF-8 as defined by the Unicode specification and restated in RFC 3629.""" + +msg_response_topic.desc: +"""Identifier of the Response Topic.The Response Topic MUST be a UTF-8 Encoded, It MUST NOT contain wildcard characters.""" + +msg_user_properties.desc: +"""The User-Property key-value pairs. Note: in case there are duplicated keys, only the last one will be used.""" + +payload.desc: +"""The MQTT message payload.""" + +payload_encoding.desc: +"""MQTT Payload Encoding, base64 or plain. When set to base64, the message is decoded before it is published.""" + +publish_api.desc: +"""Possible HTTP status response codes are:
200: The message is delivered to at least one subscriber;
202: No matched subscribers;
400: Message is invalid. for example bad topic name, or QoS is out of range;
503: Failed to deliver the message to subscriber(s)""" - zh: """发布一个消息。
-可能的 HTTP 状态码如下:
-200: 消息被成功发送到至少一个订阅。
-202: 没有匹配到任何订阅。
-400: 消息编码错误,如非法主题,或 QoS 超出范围等。
-503: 服务重启等过程中导致转发失败。""" - } - label { - en: "Publish a message" - zh: "发布一条信息" - } - } - publish_bulk_api { - desc { - en: """Possible HTTP response status code are:
+publish_api.label: +"""Publish a message""" + +publish_bulk_api.desc: +"""Possible HTTP response status code are:
200: All messages are delivered to at least one subscriber;
202: At least one message was not delivered to any subscriber;
400: At least one message is invalid. For example bad topic name, or QoS is out of range;
@@ -31,62 +55,15 @@ In case there is at lest one invalid message in the batch, the HTTP response bod is the same as for /publish API.
Otherwise the HTTP response body is an array of JSON objects indicating the publish result of each individual message in the batch.""" - zh: """批量发布一组消息。
-可能的 HTTP 状态码如下:
-200: 所有的消息都被成功发送到至少一个订阅。
-202: 至少有一个消息没有匹配到任何订阅。
-400: 至少有一个消息编码错误,如非法主题,或 QoS 超出范围等。
-503: 至少有一个小因为服务重启的原因导致转发失败。
-请求的 Body 或者 Body 中包含的某个消息无法通过 API 规范的类型检查时,HTTP 响应的消息与发布单个消息的 API - /publish 是一样的。 -如果所有的消息都是合法的,那么 HTTP 返回的内容是一个 JSON 数组,每个元素代表了该消息转发的状态。""" - } - label { - en: "Publish a batch of messages" - zh: "发布一批信息" - } - } +publish_bulk_api.label: +"""Publish a batch of messages""" - topic_name { - desc { - en: "Topic Name" - zh: "主题名称" - } - } - qos { - desc { - en: "MQTT message QoS" - zh: "MQTT 消息的 QoS" - } - } - payload { - desc { - en: "The MQTT message payload." - zh: "MQTT 消息体。" - } - } - retain { - desc { - en: "A boolean field to indicate if this message should be retained." - zh: "布尔型字段,用于表示该消息是否保留消息。" - } - } - payload_encoding { - desc { - en: "MQTT Payload Encoding, base64 or plain. When set to base64, the message is decoded before it is published." - zh: "MQTT 消息体的编码方式,可以是 base64plain。当设置为 base64 时,消息在发布前会先被解码。" - } - } - message_id { - desc { - en: "A globally unique message ID for correlation/tracing." - zh: "全局唯一的一个消息 ID,方便用于关联和追踪。" - } - } - reason_code { - desc { - en: """The MQTT reason code, as the same ones used in PUBACK packet.
+qos.desc: +"""MQTT message QoS""" + +reason_code.desc: +"""The MQTT reason code, as the same ones used in PUBACK packet.
Currently supported codes are:
16(0x10): No matching subscribers;
@@ -95,63 +72,11 @@ Currently supported codes are:
151(0x97): Publish rate limited, or message size exceeded limit. The global size limit can be configured with mqtt.max_packet_size
NOTE: The message size is estimated with the received topic and payload size, meaning the actual size of serialized bytes (when sent to MQTT subscriber) might be slightly over the limit.""" - zh: """MQTT 消息发布的错误码,这些错误码也是 MQTT 规范中 PUBACK 消息可能携带的错误码。
-当前支持如下错误码:
-16(0x10):没能匹配到任何订阅;
-131(0x81):消息转发时发生错误,例如 EMQX 服务重启;
-144(0x90):主题名称非法;
-151(0x97):受到了速率限制,或者消息尺寸过大。全局消息大小限制可以通过配置项 mqtt.max_packet_size 来进行修改。
-注意:消息尺寸的是通过主题和消息体的字节数进行估算的。具体发布时所占用的字节数可能会稍大于这个估算的值。""" - } - } - error_message { - desc { - en: "Describes the failure reason in detail." - zh: "失败的详细原因。" - } - } - message_properties { - desc { - en: "The Properties of the PUBLISH message." - zh: "PUBLISH 消息里的 Property 字段。" - } - } - msg_payload_format_indicator { - desc { - en: """0 (0x00) Byte Indicates that the Payload is unspecified bytes, which is equivalent to not sending a Payload Format Indicator. -1 (0x01) Byte Indicates that the Payload is UTF-8 Encoded Character Data. The UTF-8 data in the Payload MUST be well-formed UTF-8 as defined by the Unicode specification and restated in RFC 3629.""" - zh: "载荷格式指示标识符,0 表示载荷是未指定格式的数据,相当于没有发送载荷格式指示;1 表示载荷是 UTF-8 编码的字符数据,载荷中的 UTF-8 数据必须是按照 Unicode 的规范和 RFC 3629 的标准要求进行编码的。" - } - } - msg_message_expiry_interval { - desc { - en: "Identifier of the Message Expiry Interval. If the Message Expiry Interval has passed and the Server has not managed to start onward delivery to a matching subscriber, then it MUST delete the copy of the message for that subscriber." - zh: "消息过期间隔标识符,以秒为单位。当消失已经过期时,如果服务端还没有开始向匹配的订阅者投递该消息,则服务端会删除该订阅者的消息副本。如果不设置,则消息永远不会过期" - } - } - msg_response_topic { - desc { - en: "Identifier of the Response Topic.The Response Topic MUST be a UTF-8 Encoded, It MUST NOT contain wildcard characters." - zh: "响应主题标识符, UTF-8 编码的字符串,用作响应消息的主题名。响应主题不能包含通配符,也不能包含多个主题,否则将造成协议错误。当存在响应主题时,消息将被视作请求报文。服务端在收到应用消息时必须将响应主题原封不动的发送给所有的订阅者。" - } - } - msg_correlation_data { - desc { - en: "Identifier of the Correlation Data. The Server MUST send the Correlation Data unaltered to all subscribers receiving the Application Message." - zh: "对比数据标识符,服务端在收到应用消息时必须原封不动的把对比数据发送给所有的订阅者。对比数据只对请求消息(Request Message)的发送端和响应消息(Response Message)的接收端有意义。" - } - } - msg_user_properties { - desc { - en: "The User-Property key-value pairs. Note: in case there are duplicated keys, only the last one will be used." - zh: "指定 MQTT 消息的 User Property 键值对。注意,如果出现重复的键,只有最后一个会保留。" - } - } - msg_content_type { - desc { - en: "The Content Type MUST be a UTF-8 Encoded String." - zh: "内容类型标识符,以 UTF-8 格式编码的字符串,用来描述应用消息的内容,服务端必须把收到的应用消息中的内容类型原封不动的发送给所有的订阅者。" - } - } +retain.desc: +"""A boolean field to indicate if this message should be retained.""" + +topic_name.desc: +"""Topic Name""" + } diff --git a/rel/i18n/emqx_mgmt_api_status.hocon b/rel/i18n/emqx_mgmt_api_status.hocon index d72fd0998..28278b747 100644 --- a/rel/i18n/emqx_mgmt_api_status.hocon +++ b/rel/i18n/emqx_mgmt_api_status.hocon @@ -1,48 +1,21 @@ emqx_mgmt_api_status { - get_status_api { - desc { - en: "Serves as a health check for the node. Returns a plain text response" - " describing the status of the node. This endpoint requires no" - " authentication.\n" - "\n" - "Returns status code 200 if the EMQX application is up and running, " - "503 otherwise." - "\n" - "This API was introduced in v5.0.10." - "\n" - "The GET `/status` endpoint (without the `/api/...` prefix) is also an alias" - " to this endpoint and works in the same way. This alias has been available since" - " v5.0.0." - zh: "作为节点的健康检查。 返回一个纯文本的响应,描述节点的状态。\n" - "\n" - "如果 EMQX 应用程序已经启动并运行,返回状态代码 200,否则返回 503。\n" - "\n" - "这个API是在v5.0.10中引入的。" - "\n" - "GET `/status`端点(没有`/api/...`前缀)也是这个端点的一个别名,工作方式相同。" - " 这个别名从v5.0.0开始就有了。" - } - label { - en: "Service health check" - zh: "服务健康检查" - } - } - get_status_response200 { - desc { - en: "Node emqx@127.0.0.1 is started\n" - "emqx is running" - zh: "Node emqx@127.0.0.1 is started\n" - "emqx is running" - } - } +get_status_api.desc: +"""Serves as a health check for the node. Returns a plain text response describing the status of the node. This endpoint requires no authentication. + +Returns status code 200 if the EMQX application is up and running, 503 otherwise. +This API was introduced in v5.0.10. +The GET `/status` endpoint (without the `/api/...` prefix) is also an alias to this endpoint and works in the same way. This alias has been available since v5.0.0.""" + +get_status_api.label: +"""Service health check""" + +get_status_response200.desc: +"""Node emqx@127.0.0.1 is started +emqx is running""" + +get_status_response503.desc: +"""Node emqx@127.0.0.1 is stopped +emqx is not_running""" - get_status_response503 { - desc { - en: "Node emqx@127.0.0.1 is stopped\n" - "emqx is not_running" - zh: "Node emqx@127.0.0.1 is stopped\n" - "emqx is not_running" - } - } } diff --git a/rel/i18n/emqx_modules_schema.hocon b/rel/i18n/emqx_modules_schema.hocon index 248f85341..25588b4e2 100644 --- a/rel/i18n/emqx_modules_schema.hocon +++ b/rel/i18n/emqx_modules_schema.hocon @@ -1,8 +1,13 @@ emqx_modules_schema { - rewrite { - desc { - en: """The topic rewriting function of EMQX supports rewriting topic A to topic B when the client subscribes to topics, publishes messages, and cancels subscriptions according to user-configured rules. +enable.desc: +"""Enable this feature""" + +max_delayed_messages.desc: +"""Maximum number of delayed messages (0 is no limit).""" + +rewrite.desc: +"""The topic rewriting function of EMQX supports rewriting topic A to topic B when the client subscribes to topics, publishes messages, and cancels subscriptions according to user-configured rules. Each rewrite rule consists of three parts: subject filter, regular expression, and target expression. Under the premise that the subject rewriting function is enabled, when EMQX receives a subject-based MQTT message such as a `PUBLISH` message, it will use the subject of the message to sequentially match the subject filter part of the rule in the configuration file. If the match is successful, @@ -13,78 +18,32 @@ It should be noted that EMQX uses reverse order to read the rewrite rules in the When a topic can match the topic filter of multiple topic rewrite rules at the same time, EMQX will only use the first rule it matches. Rewrite. If the regular expression in this rule does not match the subject of the MQTT message, the rewriting will fail, and no other rules will be attempted for rewriting. Therefore, users need to carefully design MQTT message topics and topic rewriting rules when using them.""" - zh: """EMQX 的主题重写功能支持根据用户配置的规则在客户端订阅主题、发布消息、取消订阅的时候将 A 主题重写为 B 主题。 -重写规则分为 Pub 规则和 Sub 规则,Pub 规则匹配 PUSHLISH 报文携带的主题,Sub 规则匹配 SUBSCRIBE、UNSUBSCRIBE 报文携带的主题。 -每条重写规则都由主题过滤器、正则表达式、目标表达式三部分组成。 -在主题重写功能开启的前提下,EMQX 在收到诸如 PUBLISH 报文等带有主题的 MQTT 报文时,将使用报文中的主题去依次匹配配置文件中规则的主题过滤器部分,一旦成功匹配,则使用正则表达式提取主题中的信息,然后替换至目标表达式以构成新的主题。 -目标表达式中可以使用 `$N` 这种格式的变量匹配正则表达中提取出来的元素,`$N` 的值为正则表达式中提取出来的第 N 个元素,比如 `$1` 即为正则表达式提取的第一个元素。 -需要注意的是,EMQX 使用倒序读取配置文件中的重写规则,当一条主题可以同时匹配多条主题重写规则的主题过滤器时,EMQX 仅会使用它匹配到的第一条规则进行重写,如果该条规则中的正则表达式与 MQTT 报文主题不匹配,则重写失败,不会再尝试使用其他的规则进行重写。 -因此用户在使用时需要谨慎的设计 MQTT 报文主题以及主题重写规则。""" - } - label { - en: """Topic Rewrite""" - zh: """主题重写""" - } - } - tr_source_topic { - desc { - en: """Source topic, specified by the client.""" - zh: """源主题,客户端业务指定的主题""" - } - label { - en: """Source Topic""" - zh: """源主题""" - } - } +rewrite.label: +"""Topic Rewrite""" - tr_action { - desc { - en: """Topic rewriting takes effect on the type of operation: +tr_action.desc: +"""Topic rewriting takes effect on the type of operation: - `subscribe`: Rewrite topic when client do subscribe. - `publish`: Rewrite topic when client do publish. - `all`: Both""" - zh: """主题重写在哪种操作上生效: - - `subscribe`:订阅时重写主题; - - `publish`:发布时重写主题; - -`all`:全部重写主题""" - } - label { - en: """Action""" - zh: """Action""" - } - } +tr_action.label: +"""Action""" - tr_re { - desc { - en: """Regular expressions""" - zh: """正则表达式""" - } - } +tr_dest_topic.desc: +"""Destination topic.""" - tr_dest_topic { - desc { - en: """Destination topic.""" - zh: """目标主题。""" - } - label { - en: """Destination Topic""" - zh: """目标主题""" - } - } +tr_dest_topic.label: +"""Destination Topic""" - enable { - desc { - en: "Enable this feature" - zh: "是否开启该功能" - } - } +tr_re.desc: +"""Regular expressions""" + +tr_source_topic.desc: +"""Source topic, specified by the client.""" + +tr_source_topic.label: +"""Source Topic""" - max_delayed_messages { - desc { - en: "Maximum number of delayed messages (0 is no limit)." - zh: "延迟消息的数量上限(0 代表无限)" - } - } } diff --git a/rel/i18n/emqx_mqttsn_schema.hocon b/rel/i18n/emqx_mqttsn_schema.hocon index 20c160b11..1541a8c78 100644 --- a/rel/i18n/emqx_mqttsn_schema.hocon +++ b/rel/i18n/emqx_mqttsn_schema.hocon @@ -1,64 +1,31 @@ emqx_mqttsn_schema { - mqttsn { - desc { - en: """The MQTT-SN Gateway configuration. + +mqttsn.desc: +"""The MQTT-SN Gateway configuration. This gateway only supports the v1.2 protocol""" - zh: """MQTT-SN 网关配置。当前实现仅支持 v1.2 版本""" - } - } - mqttsn_gateway_id { - desc { - en: """MQTT-SN Gateway ID. -When the broadcast option is enabled, the gateway will broadcast ADVERTISE message with this value""" - zh: """MQTT-SN 网关 ID。 -当 broadcast 打开时,MQTT-SN 网关会使用该 ID 来广播 ADVERTISE 消息""" - } - } +mqttsn_broadcast.desc: +"""Whether to periodically broadcast ADVERTISE messages""" - mqttsn_broadcast { - desc { - en: """Whether to periodically broadcast ADVERTISE messages""" - zh: """是否周期性广播 ADVERTISE 消息""" - } - } - - mqttsn_enable_qos3 { - desc { - en: """Allows connectionless clients to publish messages with a Qos of -1. +mqttsn_enable_qos3.desc: +"""Allows connectionless clients to publish messages with a Qos of -1. This feature is defined for very simple client implementations which do not support any other features except this one. There is no connection setup nor tear down, no registration nor subscription. The client just sends its 'PUBLISH' messages to a GW""" - zh: """是否允许无连接的客户端发送 QoS 等于 -1 的消息。 -该功能主要用于支持轻量的 MQTT-SN 客户端实现,它不会向网关建立连接,注册主题,也不会发起订阅;它只使用 QoS 为 -1 来发布消息""" - } - } - mqttsn_subs_resume { - desc { - en: """Whether to initiate all subscribed topic name registration messages to the client after the Session has been taken over by a new channel""" - zh: """在会话被重用后,网关是否主动向客户端注册对已订阅主题名称""" - } - } +mqttsn_gateway_id.desc: +"""MQTT-SN Gateway ID. +When the broadcast option is enabled, the gateway will broadcast ADVERTISE message with this value""" - mqttsn_predefined { - desc { - en: """The pre-defined topic IDs and topic names. +mqttsn_predefined.desc: +"""The pre-defined topic IDs and topic names. A 'pre-defined' topic ID is a topic ID whose mapping to a topic name is known in advance by both the client's application and the gateway""" - zh: """预定义主题列表。 -预定义的主题列表,是一组 主题 ID 和 主题名称 的映射关系。使用预先定义的主题列表,可以减少 MQTT-SN 客户端和网关对于固定主题的注册请求""" - } - } - mqttsn_predefined_id { - desc { - en: """Topic ID. Range: 1-65535""" - zh: """主题 ID。范围:1-65535""" - } - } +mqttsn_predefined_id.desc: +"""Topic ID. Range: 1-65535""" + +mqttsn_predefined_topic.desc: +"""Topic Name""" + +mqttsn_subs_resume.desc: +"""Whether to initiate all subscribed topic name registration messages to the client after the Session has been taken over by a new channel""" - mqttsn_predefined_topic { - desc { - en: """Topic Name""" - zh: """主题名称。注:不支持通配符""" - } - } } diff --git a/rel/i18n/emqx_plugins_schema.hocon b/rel/i18n/emqx_plugins_schema.hocon index c3fed1884..b72c87054 100644 --- a/rel/i18n/emqx_plugins_schema.hocon +++ b/rel/i18n/emqx_plugins_schema.hocon @@ -1,85 +1,55 @@ emqx_plugins_schema { - plugins { - desc { - en: """Manage EMQX plugins.
-Plugins can be pre-built as a part of EMQX package, -or installed as a standalone package in a location specified by -install_dir config key
-The standalone-installed plugins are referred to as 'external' plugins.""" - zh: """管理EMQX插件。
-插件可以是EMQX安装包中的一部分,也可以是一个独立的安装包。
-独立安装的插件称为“外部插件”。""" - } - label { - en: "Plugins" - zh: "插件" - } - } - state { - desc { - en: "A per-plugin config to describe the desired state of the plugin." - zh: "描述插件的状态" - } - label { - en: "State" - zh: "插件状态" - } - } - name_vsn { - desc { - en: """The {name}-{version} of the plugin.
-It should match the plugin application name-version as the for the plugin release package name
-For example: my_plugin-0.1.0.""" - zh: """插件的名称{name}-{version}。
-它应该与插件的发布包名称一致,如my_plugin-0.1.0。""" - } - label { - en: "Name-Version" - zh: "名称-版本" - } - } - enable { - desc { - en: "Set to 'true' to enable this plugin" - zh: "设置为“true”以启用此插件" - } - label { - en: "Enable" - zh: "启用" - } - } - states { - desc { - en: """An array of plugins in the desired states.
-The plugins are started in the defined order""" - zh: """一组插件的状态。插件将按照定义的顺序启动""" - } - label { - en: "States" - zh: "插件启动顺序及状态" - } - } - install_dir { - desc { - en: """The installation directory for the external plugins. + +check_interval.desc: +"""Check interval: check if the status of the plugins in the cluster is consistent,
+if the results of 3 consecutive checks are not consistent, then alarm.""" + +enable.desc: +"""Set to 'true' to enable this plugin""" + +enable.label: +"""Enable""" + +install_dir.desc: +"""The installation directory for the external plugins. The plugin beam files and configuration files should reside in the subdirectory named as emqx_foo_bar-0.1.0.
NOTE: For security reasons, this directory should **NOT** be writable by anyone except emqx (or any user which runs EMQX).""" - zh: "插件安装包的目录,出于安全考虑,该目录应该值允许 emqx,或用于运行 EMQX 服务的用户拥有写入权限。" - } - label { - en: "Install Directory" - zh: "安装目录" - } - } - check_interval { - desc { - en: """Check interval: check if the status of the plugins in the cluster is consistent,
-if the results of 3 consecutive checks are not consistent, then alarm.""" - zh: """检查间隔:检查集群中插件的状态是否一致,
-如果连续3次检查结果不一致,则报警。""" - } - } + +install_dir.label: +"""Install Directory""" + +name_vsn.desc: +"""The {name}-{version} of the plugin.
+It should match the plugin application name-version as the for the plugin release package name
+For example: my_plugin-0.1.0.""" + +name_vsn.label: +"""Name-Version""" + +plugins.desc: +"""Manage EMQX plugins.
+Plugins can be pre-built as a part of EMQX package, +or installed as a standalone package in a location specified by +install_dir config key
+The standalone-installed plugins are referred to as 'external' plugins.""" + +plugins.label: +"""Plugins""" + +state.desc: +"""A per-plugin config to describe the desired state of the plugin.""" + +state.label: +"""State""" + +states.desc: +"""An array of plugins in the desired states.
+The plugins are started in the defined order""" + +states.label: +"""States""" + } diff --git a/rel/i18n/emqx_prometheus_schema.hocon b/rel/i18n/emqx_prometheus_schema.hocon index e9a0fc11e..d79685a4d 100644 --- a/rel/i18n/emqx_prometheus_schema.hocon +++ b/rel/i18n/emqx_prometheus_schema.hocon @@ -1,95 +1,47 @@ emqx_prometheus_schema { - prometheus { - desc { - en: """Settings for reporting metrics to Prometheus""" - zh: """Prometheus 监控数据推送""" - } - label { - en: """Prometheus""" - zh: """Prometheus""" - } - } +enable.desc: +"""Turn Prometheus data pushing on or off""" - push_gateway_server { - desc { - en: """URL of Prometheus server""" - zh: """Prometheus 服务器地址""" - } - } - - interval { - desc { - en: """Data reporting interval""" - zh: """数据推送间隔""" - } - } - - headers { - desc { - en: """A list of HTTP Headers when pushing to Push Gateway.
+headers.desc: +"""A list of HTTP Headers when pushing to Push Gateway.
For example, { Authorization = "some-authz-tokens"}""" - zh: """推送到 Push Gateway 的 HTTP Headers 列表。
-例如, { Authorization = "some-authz-tokens"}""" - } - } - job_name { - desc { - en: """Job Name that is pushed to the Push Gateway. Available variables:
+interval.desc: +"""Data reporting interval""" + +job_name.desc: +"""Job Name that is pushed to the Push Gateway. Available variables:
- ${name}: Name of EMQX node.
- ${host}: Host name of EMQX node.
For example, when the EMQX node name is emqx@127.0.0.1 then the name variable takes value emqx and the host variable takes value 127.0.0.1.
Default value is: ${name}/instance/${name}~${host}""" - zh: """推送到 Push Gateway 的 Job 名称。可用变量为:
-- ${name}: EMQX 节点的名称。 -- ${host}: EMQX 节点主机名。 -例如,当 EMQX 节点名为 emqx@127.0.0.1 则 name 变量的值为 emqx,host 变量的值为 127.0.0.1
-默认值为: ${name}/instance/${name}~${host}""" - } - } - enable { - desc { - en: """Turn Prometheus data pushing on or off""" - zh: """开启或关闭 Prometheus 数据推送""" - } - } - vm_dist_collector { - desc { - en: """Enable or disable VM distribution collector, collects information about the sockets and processes involved in the Erlang distribution mechanism.""" - zh: """开启或关闭 VM 分布采集器,收集 Erlang 分布机制中涉及的套接字和进程的信息。""" - } - } - mnesia_collector { - desc { - en: """Enable or disable Mnesia collector, collects Mnesia metrics mainly using mnesia:system_info/1 .""" - zh: """开启或关闭 Mnesia 采集器, 使用 mnesia:system_info/1 收集 Mnesia 相关指标""" - } - } - vm_statistics_collector { - desc { - en: """Enable or disable VM statistics collector, collects Erlang VM metrics using erlang:statistics/1 .""" - zh: """开启或关闭 VM 统计采集器, 使用 erlang:statistics/1 收集 Erlang VM 相关指标""" - } - } +mnesia_collector.desc: +"""Enable or disable Mnesia collector, collects Mnesia metrics mainly using mnesia:system_info/1 .""" + +prometheus.desc: +"""Settings for reporting metrics to Prometheus""" + +prometheus.label: +"""Prometheus""" + +push_gateway_server.desc: +"""URL of Prometheus server""" + +vm_dist_collector.desc: +"""Enable or disable VM distribution collector, collects information about the sockets and processes involved in the Erlang distribution mechanism.""" + +vm_memory_collector.desc: +"""Enable or disable VM memory collector, collects information about memory dynamically allocated by the Erlang emulator using erlang:memory/0 , also provides basic (D)ETS statistics .""" + +vm_msacc_collector.desc: +"""Enable or disable VM msacc collector, collects microstate accounting metrics using erlang:statistics(microstate_accounting) .""" + +vm_statistics_collector.desc: +"""Enable or disable VM statistics collector, collects Erlang VM metrics using erlang:statistics/1 .""" + +vm_system_info_collector.desc: +"""Enable or disable VM system info collector, collects Erlang VM metrics using erlang:system_info/1 .""" - vm_system_info_collector { - desc { - en: """Enable or disable VM system info collector, collects Erlang VM metrics using erlang:system_info/1 .""" - zh: """开启或关闭 VM 系统信息采集器, 使用 erlang:system_info/1 收集 Erlang VM 相关指标""" - } - } - vm_memory_collector { - desc { - en: """Enable or disable VM memory collector, collects information about memory dynamically allocated by the Erlang emulator using erlang:memory/0 , also provides basic (D)ETS statistics .""" - zh: """开启或关闭 VM 内存采集器, 使用 erlang:memory/0 收集 Erlang 虚拟机动态分配的内存信息,同时提供基本的 (D)ETS 统计信息""" - } - } - vm_msacc_collector { - desc { - en: """Enable or disable VM msacc collector, collects microstate accounting metrics using erlang:statistics(microstate_accounting) .""" - zh: """开启或关闭 VM msacc 采集器, 使用 erlang:statistics(microstate_accounting) 收集微状态计数指标""" - } - } } diff --git a/rel/i18n/emqx_psk_schema.hocon b/rel/i18n/emqx_psk_schema.hocon index 60d45977a..1a99e1b19 100644 --- a/rel/i18n/emqx_psk_schema.hocon +++ b/rel/i18n/emqx_psk_schema.hocon @@ -1,8 +1,18 @@ emqx_psk_schema { - psk_authentication { - desc { - en: """PSK stands for 'Pre-Shared Keys'. +chunk_size.desc: +"""The size of each chunk used to import to the built-in database from PSK file""" + +enable.desc: +"""Whether to enable TLS PSK support""" + +init_file.desc: +"""If init_file is specified, EMQX will import PSKs from the file into the built-in database at startup for use by the runtime. +The file has to be structured line-by-line, each line must be in the format of PSKIdentity:SharedSecret. +For example: mydevice1:c2VjcmV0""" + +psk_authentication.desc: +"""PSK stands for 'Pre-Shared Keys'. This config to enable TLS-PSK authentication. Important! Make sure the SSL listener with only tlsv1.2 enabled, and also PSK cipher suites @@ -11,49 +21,8 @@ configured, such as RSA-PSK-AES256-GCM-SHA384. See listener SSL options config for more details. The IDs and secrets can be provided from a file which is configurable by the init_file field.""" - zh: """此配置用于启用 TLS-PSK 身份验证。 -PSK 是 “Pre-Shared-Keys” 的缩写。 - -注意: 确保 SSL 监听器仅启用了 'tlsv1.2',并且配置了PSK 密码套件,例如 'RSA-PSK-AES256-GCM-SHA384'。 - -可以通过查看监听器中的 SSL 选项,了解更多详细信息。 - -可以通过配置 'init_file' 来设置初始化的 ID 和 密钥""" - } - } - - enable { - desc { - en: "Whether to enable TLS PSK support" - zh: "是否开启 TLS PSK 支持" - } - } - - init_file { - desc { - en: """If init_file is specified, EMQX will import PSKs from the file into the built-in database at startup for use by the runtime. -The file has to be structured line-by-line, each line must be in the format of PSKIdentity:SharedSecret. -For example: mydevice1:c2VjcmV0""" - zh: """如果设置了初始化文件,EMQX 将在启动时从初始化文件中导入 PSK 信息到内建数据库中。 -这个文件需要按行进行组织,每一行必须遵守如下格式: PSKIdentity:SharedSecret -例如: mydevice1:c2VjcmV0""" - } - } - - separator { - desc { - en: "The separator between PSKIdentity and SharedSecret in the PSK file" - - zh: "PSK 文件中 PSKIdentitySharedSecret 之间的分隔符" - } - } - - chunk_size { - desc { - en: "The size of each chunk used to import to the built-in database from PSK file" - zh: "将 PSK 文件导入到内建数据时每个块的大小" - } - } +separator.desc: +"""The separator between PSKIdentity and SharedSecret in the PSK file""" } diff --git a/rel/i18n/emqx_resource_schema.hocon b/rel/i18n/emqx_resource_schema.hocon index f4a9982bc..8fc781794 100644 --- a/rel/i18n/emqx_resource_schema.hocon +++ b/rel/i18n/emqx_resource_schema.hocon @@ -1,214 +1,116 @@ emqx_resource_schema { - resource_opts { - desc { - en: """Resource options.""" - zh: """资源相关的选项。""" - } - label { - en: """Resource Options""" - zh: """资源选项""" - } - } +auto_restart_interval.desc: +"""The auto restart interval after the resource is disconnected.""" - creation_opts { - desc { - en: """Creation options.""" - zh: """资源启动相关的选项。""" - } - label { - en: """Creation Options""" - zh: """资源启动选项""" - } - } +auto_restart_interval.label: +"""Auto Restart Interval""" - worker_pool_size { - desc { - en: """The number of buffer workers. Only applicable for egress type bridges. -For bridges only have ingress direction data flow, it can be set to 0 otherwise must be greater than 0.""" - zh: """缓存队列 worker 数量。仅对 egress 类型的桥接有意义。当桥接仅有 ingress 方向时,可设置为 0,否则必须大于 0。""" - } - label { - en: """Buffer Pool Size""" - zh: """缓存池大小""" - } - } +batch_size.desc: +"""Maximum batch count. If equal to 1, there's effectively no batching.""" - health_check_interval { - desc { - en: """Health check interval.""" - zh: """健康检查间隔。""" - } - label { - en: """Health Check Interval""" - zh: """健康检查间隔""" - } - } +batch_size.label: +"""Max batch size""" - resume_interval { - desc { - en: """The interval at which the buffer worker attempts to resend failed requests in the inflight window.""" - zh: """在发送失败后尝试重传飞行窗口中的请求的时间间隔。""" - } - label { - en: """Resume Interval""" - zh: """重试时间间隔""" - } - } +batch_time.desc: +"""Maximum waiting interval when accumulating a batch at a low message rates for more efficient resource usage.""" - start_after_created { - desc { - en: """Whether start the resource right after created.""" - zh: """是否在创建资源后立即启动资源。""" - } - label { - en: """Start After Created""" - zh: """创建后立即启动""" - } - } +batch_time.label: +"""Max batch wait time""" - start_timeout { - desc { - en: """Time interval to wait for an auto-started resource to become healthy before responding resource creation requests.""" - zh: """在回复资源创建请求前等待资源进入健康状态的时间。""" - } - label { - en: """Start Timeout""" - zh: """启动超时时间""" - } - } +buffer_mode.desc: +"""Buffer operation mode. +memory_only: Buffer all messages in memory.volatile_offload: Buffer message in memory first, when up to certain limit (see buffer_seg_bytes config for more information), then start offloading messages to disk""" - auto_restart_interval { - desc { - en: """The auto restart interval after the resource is disconnected.""" - zh: """资源断开以后,自动重连的时间间隔。""" - } - label { - en: """Auto Restart Interval""" - zh: """自动重连间隔""" - } - } +buffer_mode.label: +"""Buffer Mode""" - query_mode { - desc { - en: """Query mode. Optional 'sync/async', default 'async'.""" - zh: """请求模式。可选 '同步/异步',默认为'异步'模式。""" - } - label { - en: """Query mode""" - zh: """请求模式""" - } - } +buffer_seg_bytes.desc: +"""Applicable when buffer mode is set to volatile_offload. +This value is to specify the size of each on-disk buffer file.""" - request_timeout { - desc { - en: """Starting from the moment when the request enters the buffer, if the request remains in the buffer for the specified time or is sent but does not receive a response or acknowledgement in time, the request is considered expired.""" - zh: """从请求进入缓冲区开始计时,如果请求在规定的时间内仍停留在缓冲区内或者已发送但未能及时收到响应或确认,该请求将被视为过期。""" - } - label { - en: """Request Expiry""" - zh: """请求超期""" - } - } +buffer_seg_bytes.label: +"""Segment File Bytes""" - enable_batch { - desc { - en: """Batch mode enabled.""" - zh: """启用批量模式。""" - } - label { - en: """Enable batch""" - zh: """启用批量模式""" - } - } +creation_opts.desc: +"""Creation options.""" - enable_queue { - desc { - en: """Enable disk buffer queue (only applicable for egress bridges). +creation_opts.label: +"""Creation Options""" + +enable_batch.desc: +"""Batch mode enabled.""" + +enable_batch.label: +"""Enable batch""" + +enable_queue.desc: +"""Enable disk buffer queue (only applicable for egress bridges). When Enabled, messages will be buffered on disk when the bridge connection is down. When disabled the messages are buffered in RAM only.""" - zh: """启用磁盘缓存队列(仅对 egress 方向桥接有用)。""" - } - label { - en: """Enable disk buffer queue""" - zh: """启用磁盘缓存队列""" - } - } - inflight_window { - desc { - en: """Query inflight window. When query_mode is set to async, this config has to be set to 1 if messages from the same MQTT client have to be strictly ordered.""" - zh: """请求飞行队列窗口大小。当请求模式为异步时,如果需要严格保证来自同一 MQTT 客户端的消息有序,则必须将此值设为 1。""""" - } - label { - en: """Inflight window""" - zh: """请求飞行队列窗口""" - } - } +enable_queue.label: +"""Enable disk buffer queue""" - batch_size { - desc { - en: """Maximum batch count. If equal to 1, there's effectively no batching.""" - zh: """最大批量请求大小。如果设为1,则无批处理。""" - } - label { - en: """Max batch size""" - zh: """最大批量请求大小""" - } - } +health_check_interval.desc: +"""Health check interval.""" - batch_time { - desc { - en: """Maximum waiting interval when accumulating a batch at a low message rates for more efficient resource usage.""" - zh: """在较低消息率情况下尝试累积批量输出时的最大等待间隔,以提高资源的利用率。""" - } - label { - en: """Max batch wait time""" - zh: """批量等待最大间隔""" - } - } +health_check_interval.label: +"""Health Check Interval""" - max_buffer_bytes { - desc { - en: """Maximum number of bytes to buffer for each buffer worker.""" - zh: """每个缓存 worker 允许使用的最大字节数。""" - } - label { - en: """Max buffer queue size""" - zh: """缓存队列最大长度""" - } - } +inflight_window.desc: +"""Query inflight window. When query_mode is set to async, this config has to be set to 1 if messages from the same MQTT client have to be strictly ordered.""" - buffer_seg_bytes { - desc { - en: "Applicable when buffer mode is set to volatile_offload.\n" - "This value is to specify the size of each on-disk buffer file." - zh: "当缓存模式是 volatile_offload 时适用。" - "该配置用于指定缓存到磁盘上的文件的大小。" - } - label { - en: "Segment File Bytes" - zh: "缓存文件大小" - } - } +inflight_window.label: +"""Inflight window""" - buffer_mode { - desc { - en: "Buffer operation mode.\n" - "memory_only: Buffer all messages in memory." - "volatile_offload: Buffer message in memory first, when up to certain limit" - " (see buffer_seg_bytes config for more information), then start offloading messages to disk" - zh: "队列操作模式。\n" - "memory_only: 所有的消息都缓存在内存里。" - "volatile_offload: 先将消息缓存在内存中,当内存中的消息堆积超过一定限制" - "(配置项 buffer_seg_bytes 指定该限制)后," - " 消息会开始缓存到磁盘上。" - } - label { - en: "Buffer Mode" - zh: "缓存模式" - } - } +max_buffer_bytes.desc: +"""Maximum number of bytes to buffer for each buffer worker.""" +max_buffer_bytes.label: +"""Max buffer queue size""" + +query_mode.desc: +"""Query mode. Optional 'sync/async', default 'async'.""" + +query_mode.label: +"""Query mode""" + +request_timeout.desc: +"""Starting from the moment when the request enters the buffer, if the request remains in the buffer for the specified time or is sent but does not receive a response or acknowledgement in time, the request is considered expired.""" + +request_timeout.label: +"""Request Expiry""" + +resource_opts.desc: +"""Resource options.""" + +resource_opts.label: +"""Resource Options""" + +resume_interval.desc: +"""The interval at which the buffer worker attempts to resend failed requests in the inflight window.""" + +resume_interval.label: +"""Resume Interval""" + +start_after_created.desc: +"""Whether start the resource right after created.""" + +start_after_created.label: +"""Start After Created""" + +start_timeout.desc: +"""Time interval to wait for an auto-started resource to become healthy before responding resource creation requests.""" + +start_timeout.label: +"""Start Timeout""" + +worker_pool_size.desc: +"""The number of buffer workers. Only applicable for egress type bridges. +For bridges only have ingress direction data flow, it can be set to 0 otherwise must be greater than 0.""" + +worker_pool_size.label: +"""Buffer Pool Size""" } diff --git a/rel/i18n/emqx_retainer_api.hocon b/rel/i18n/emqx_retainer_api.hocon index aced87076..5c7084778 100644 --- a/rel/i18n/emqx_retainer_api.hocon +++ b/rel/i18n/emqx_retainer_api.hocon @@ -1,143 +1,63 @@ emqx_retainer_api { - get_config_api { - desc { - en: "View config" - zh: "查看配置内容" - } - } +config_content.desc: +"""The config content""" - config_content { - desc { - en: "The config content" - zh: "配置内容" - } - } +config_not_found.desc: +"""Config not found.""" - config_not_found { - desc { - en: "Config not found." - zh: "配置不存在" - } - } +delete_matching_api.desc: +"""Delete matching messages.""" - update_retainer_api { - desc { - en: "Update retainer config." - zh: "更新配置" - } - } +from_clientid.desc: +"""The clientid of publisher.""" - update_config_success { - desc { - en: "Update configs successfully." - zh: "配置更新成功" - } - } +from_username.desc: +"""The username of publisher.""" - update_config_failed { - desc { - en: "Update config failed" - zh: "配置更新失败" - } - } +get_config_api.desc: +"""View config""" - list_retained_api { - desc { - en: "List retained messages." - zh: "查看保留消息列表" - } - } +list_retained_api.desc: +"""List retained messages.""" - retained_list { - desc { - en: "Retained messages list." - zh: "保留消息列表" - } - } +lookup_api.desc: +"""Lookup a message by a topic without wildcards.""" - unsupported_backend { - desc { - en: "Unsupported backend." - zh: "不支持的后端" - } - } +message_detail.desc: +"""Details of the message.""" - lookup_api { - desc { - en: "Lookup a message by a topic without wildcards." - zh: "通过不带通配符的主题查看对应的保留消息" - } - } +message_not_exist.desc: +"""Viewed message doesn't exist.""" - message_detail { - desc { - en: "Details of the message." - zh: "消息详情" - } - } +msgid.desc: +"""Message ID.""" - message_not_exist { - desc { - en: "Viewed message doesn't exist." - zh: "消息不存在" - } - } +payload.desc: +"""Payload.""" - delete_matching_api { - desc { - en: "Delete matching messages." - zh: "删除对应的消息" - } - } +publish_at.desc: +"""Message publish time, RFC 3339 format.""" - topic { - desc { - en: "Topic." - zh: "主题" - } - } +qos.desc: +"""QoS.""" - msgid { - desc { - en: "Message ID." - zh: "消息 ID" - } - } +retained_list.desc: +"""Retained messages list.""" - qos { - desc { - en: "QoS." - zh: "QoS" - } - } +topic.desc: +"""Topic.""" - publish_at { - desc { - en: "Message publish time, RFC 3339 format." - zh: "消息发送时间, RFC 3339 格式" - } - } +unsupported_backend.desc: +"""Unsupported backend.""" - from_clientid { - desc { - en: "The clientid of publisher." - zh: "发布者的 ClientID" - } - } +update_config_failed.desc: +"""Update config failed""" - from_username { - desc { - en: "The username of publisher." - zh: "发布者的用户名" - } - } +update_config_success.desc: +"""Update configs successfully.""" - payload { - desc { - en: "Payload." - zh: "消息内容" - } - } +update_retainer_api.desc: +"""Update retainer config.""" } diff --git a/rel/i18n/emqx_retainer_schema.hocon b/rel/i18n/emqx_retainer_schema.hocon index 274c260d4..9b2905da1 100644 --- a/rel/i18n/emqx_retainer_schema.hocon +++ b/rel/i18n/emqx_retainer_schema.hocon @@ -1,105 +1,49 @@ emqx_retainer_schema { - enable { - desc { - en: "Enable retainer feature" - zh: "是否开启消息保留功能" - } - } +backend.desc: +"""Settings for the database storing the retained messages.""" - msg_expiry_interval { - desc { - en: "Message retention time. 0 means message will never be expired." - zh: "消息保留时间。0 代表永久保留" - } - } - - flow_control { - desc { - en: "Flow control." - zh: "流控设置" - } - } - - msg_clear_interval { - desc { - en: """Periodic interval for cleaning up expired messages. -Never clear if the value is 0.""" - zh: "消息清理间隔。0 代表不进行清理" - } - } - - max_payload_size { - desc { - en: "Maximum retained message size." - zh: "消息大小最大值" - } - } - - stop_publish_clear_msg { - desc { - en: """When the retained flag of the `PUBLISH` message is set and Payload is empty, -whether to continue to publish the message. -See: -http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718038""" - zh: """是否不发送保留消息的清理消息,在 MQTT 5.0 中如果一条保留消息的消息体为空,则会清除掉之前存储 -的对应的保留消息,通过这个值控制是否停止发送清理消息""" - } - } - - backend { - desc { - en: "Settings for the database storing the retained messages." - zh: "保留消息的存储后端" - } - } - - mnesia_config_type { - desc { - en: "Backend type." - zh: "后端类型" - } - } - - mnesia_config_storage_type { - desc { - en: "Specifies whether the messages are stored in RAM or persisted on disc." - zh: "选择消息是存放在磁盘还是内存中" - } - } - - max_retained_messages { - desc { - en: "Maximum number of retained messages. 0 means no limit." - zh: "消息保留的数量上限。0 表示无限" - } - } - - batch_read_number { - desc { - en: "Size of the batch when reading messages from storage. 0 means no limit." - zh: "从存储后端批量加载时的每批数量上限,0 代表一次性读取" - } - } - - batch_deliver_number { - desc { - en: "The number of retained messages can be delivered per batch." - zh: "批量派发时每批的数量。0 代表一次性全部派发" - } - } - - batch_deliver_limiter { - desc { - en: """The rate limiter name for retained messages' delivery. +batch_deliver_limiter.desc: +"""The rate limiter name for retained messages' delivery. Limiter helps to avoid delivering too many messages to the client at once, which may cause the client to block or crash, or drop messages due to exceeding the size of the message queue. The names of the available rate limiters are taken from the existing rate limiters under `limiter.batch`. If this field is empty, limiter is not used.""" - zh: """批量发送的限流器的名称。 -限流器可以用来防止短时间内向客户端发送太多的消息,从而避免过多的消息导致客户端队列堵塞甚至崩溃。 -这个名称需要是指向 `limiter.batch` 下的一个真实存在的限流器。 -如果这个字段为空,则不使用限流器。""" - } - } + +batch_deliver_number.desc: +"""The number of retained messages can be delivered per batch.""" + +batch_read_number.desc: +"""Size of the batch when reading messages from storage. 0 means no limit.""" + +enable.desc: +"""Enable retainer feature""" + +flow_control.desc: +"""Flow control.""" + +max_payload_size.desc: +"""Maximum retained message size.""" + +max_retained_messages.desc: +"""Maximum number of retained messages. 0 means no limit.""" + +mnesia_config_storage_type.desc: +"""Specifies whether the messages are stored in RAM or persisted on disc.""" + +mnesia_config_type.desc: +"""Backend type.""" + +msg_clear_interval.desc: +"""Periodic interval for cleaning up expired messages. +Never clear if the value is 0.""" + +msg_expiry_interval.desc: +"""Message retention time. 0 means message will never be expired.""" + +stop_publish_clear_msg.desc: +"""When the retained flag of the `PUBLISH` message is set and Payload is empty, +whether to continue to publish the message. +See: +http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718038""" } diff --git a/rel/i18n/emqx_rewrite_api.hocon b/rel/i18n/emqx_rewrite_api.hocon index 91f15cb8c..7e090e590 100644 --- a/rel/i18n/emqx_rewrite_api.hocon +++ b/rel/i18n/emqx_rewrite_api.hocon @@ -1,25 +1,12 @@ emqx_rewrite_api { - list_topic_rewrite_api { - desc { - en: """List all rewrite rules""" - zh: """列出全部主题重写规则""" - } - } +list_topic_rewrite_api.desc: +"""List all rewrite rules""" - update_topic_rewrite_api { - desc { - en: """Update all rewrite rules""" - zh: """更新全部主题重写规则""" - } - } +update_topic_rewrite_api.desc: +"""Update all rewrite rules""" - - update_topic_rewrite_api_response413 { - desc { - en: """Rules count exceed max limit""" - zh: """超出主题重写规则数量上限""" - } - } +update_topic_rewrite_api_response413.desc: +"""Rules count exceed max limit""" } diff --git a/rel/i18n/emqx_rule_api_schema.hocon b/rel/i18n/emqx_rule_api_schema.hocon index 0d8253223..29ecaa18e 100644 --- a/rel/i18n/emqx_rule_api_schema.hocon +++ b/rel/i18n/emqx_rule_api_schema.hocon @@ -1,696 +1,381 @@ emqx_rule_api_schema { - event_event_type { - desc { - en: "Event Type" - zh: "事件类型" - } - label: { - en: "Event Type" - zh: "事件类型" - } - } +event_action.desc: +"""Publish or Subscribe""" - event_id { - desc { - en: "Message ID" - zh: "消息 ID" - } - label: { - en: "Message ID" - zh: "消息 ID" - } - } +event_action.label: +"""Publish or Subscribe""" - event_clientid { - desc { - en: "The Client ID" - zh: "客户端 ID" - } - label: { - en: "Client ID" - zh: "客户端 ID" - } - } +event_payload.desc: +"""The Message Payload""" - event_username { - desc { - en: "Username" - zh: "用户名" - } - label: { - en: "Username" - zh: "用户名" - } - } +event_payload.label: +"""Message Payload""" - event_payload { - desc { - en: "The Message Payload" - zh: "消息负载" - } - label: { - en: "Message Payload" - zh: "消息负载" - } - } +metrics_actions_failed_out_of_service.desc: +"""How much times the rule failed to call actions due to the action is out of service. For example, a bridge is disabled or stopped.""" - event_peerhost { - desc { - en: "The IP Address of the Peer Client" - zh: "对等客户端的 IP 地址" - } - label: { - en: "Peer IP Address" - zh: "对等客户端的 IP" - } - } +metrics_actions_failed_out_of_service.label: +"""Fail Action""" - event_topic { - desc { - en: "Message Topic" - zh: "消息主题" - } - label: { - en: "Message Topic" - zh: "消息主题" - } - } +metrics_actions_failed_unknown.desc: +"""How much times the rule failed to call actions due to to an unknown error.""" - event_publish_received_at { - desc { - en: "The Time that this Message is Received" - zh: "消息被接受的时间" - } - label: { - en: "Message Received Time" - zh: "消息被接受的时间" - } - } +metrics_actions_failed_unknown.label: +"""Fail Action""" - event_qos { - desc { - en: "The Message QoS" - zh: "消息的 QoS" - } - label: { - en: "Message QoS" - zh: "消息 QoS" - } - } +event_server.desc: +"""The IP address (or hostname) and port of the MQTT broker, in IP:Port format""" - event_from_clientid { - desc { - en: "The Client ID" - zh: "事件来源客户端的 ID" - } - label: { - en: "Client ID" - zh: "客户端 ID" - } - } +event_server.label: +"""Server IP And Port""" - event_from_username { - desc { - en: "The User Name" - zh: "事件来源客户端的用户名" - } - label: { - en: "Username" - zh: "用户名" - } - } +metrics_actions_total.desc: +"""How much times the actions are called by the rule. This value may several times of 'matched', depending on the number of the actions of the rule.""" - event_mountpoint { - desc { - en: "The Mountpoint" - zh: "挂载点" - } - label: { - en: "Mountpoint" - zh: "挂载点" - } - } +metrics_actions_total.label: +"""Action Total""" - event_peername { - desc { - en: "The IP Address and Port of the Peer Client" - zh: "对等客户端的 IP 地址和端口" - } - label: { - en: "IP Address And Port" - zh: "IP 地址和端口" - } - } +event_ctx_disconnected_da.desc: +"""The Time that this Client is Disconnected""" - event_sockname { - desc { - en: "The IP Address and Port of the Local Listener" - zh: "本地监听的 IP 地址和端口" - } - label: { - en: "IP Address And Port" - zh: "IP 地址和端口" - } - } +event_ctx_disconnected_da.label: +"""Disconnected Time""" - event_proto_name { - desc { - en: "Protocol Name" - zh: "协议名称" - } - label: { - en: "Protocol Name" - zh: "协议名称" - } - } +event_topic.desc: +"""Message Topic""" - event_proto_ver { - desc { - en: "Protocol Version" - zh: "协议版本" - } - label: { - en: "Protocol Version" - zh: "协议版本" - } - } +event_topic.label: +"""Message Topic""" - event_keepalive { - desc { - en: "KeepAlive" - zh: "保持连接" - } - label: { - en: "KeepAlive" - zh: "保持连接" - } - } +event_peername.desc: +"""The IP Address and Port of the Peer Client""" - event_clean_start { - desc { - en: "Clean Start" - zh: "清除会话" - } - label: { - en: "Clean Start" - zh: "清除会话" - } - } +event_peername.label: +"""IP Address And Port""" - event_expiry_interval { - desc { - en: "Expiry Interval" - zh: "到期间隔" - } - label: { - en: "Expiry Interval" - zh: "到期间隔" - } - } +metrics_sql_passed.desc: +"""How much times the SQL is passed""" - event_is_bridge { - desc { - en: "Is Bridge" - zh: "是否桥接" - } - label: { - en: "Is Bridge" - zh: "是否桥接" - } - } +metrics_sql_passed.label: +"""SQL Passed""" - event_connected_at { - desc { - en: "The Time that this Client is Connected" - zh: "客户端连接完成时的时刻" - } - label: { - en: "Connected Time" - zh: "连接完成时的时刻" - } - } +test_context.desc: +"""The context of the event for testing""" - event_action { - desc { - en: "Publish or Subscribe" - zh: "订阅或发布" - } - label: { - en: "Publish or Subscribe" - zh: "订阅或发布" - } - } +test_context.label: +"""Event Conetxt""" - event_authz_source { - desc { - en: "Cache, Plugs or Default" - zh: "缓存,插件或者默认值" - } - label: { - en: "Auth Source" - zh: "认证源" - } - } +node_node.desc: +"""The node name""" - event_result { - desc { - en: "Allow or Deny" - zh: "允许或禁止" - } - label: { - en: "Auth Result" - zh: "认证结果" - } - } +node_node.label: +"""Node Name""" - event_server { - desc { - en: "The IP address (or hostname) and port of the MQTT broker, in IP:Port format" - zh: "MQTT broker的 IP 地址(或主机名)和端口,采用 IP:Port 格式" - } - label: { - en: "Server IP And Port" - zh: "服务器 IP 地址和端口" - } - } +event_from_clientid.desc: +"""The Client ID""" - event_dup { - desc { - en: "The DUP flag of the MQTT message" - zh: "MQTT 消息的 DUP 标志" - } - label: { - en: "DUP Flag" - zh: "DUP 标志" - } - } +event_from_clientid.label: +"""Client ID""" - event_retain { - desc { - en: "If is a retain message" - zh: "是否是保留消息" - } - label: { - en: "Retain Message" - zh: "保留消息" - } - } +event_keepalive.desc: +"""KeepAlive""" - event_ctx_dropped { - desc { - en: "The Reason for Dropping" - zh: "消息被丢弃的原因" - } - label: { - en: "Dropped Reason" - zh: "丢弃原因" - } - } +event_keepalive.label: +"""KeepAlive""" - event_ctx_disconnected_reason { - desc { - en: "The Reason for Disconnect" - zh: "断开连接的原因" - } - label: { - en: "Disconnect Reason" - zh: "断开连接原因" - } - } +event_connected_at.desc: +"""The Time that this Client is Connected""" - event_ctx_disconnected_da { - desc { - en: "The Time that this Client is Disconnected" - zh: "客户端断开连接的时刻" - } - label: { - en: "Disconnected Time" - zh: "客户端断开连接时刻" - } - } +event_connected_at.label: +"""Connected Time""" - event_ctx_connack_reason_code { - desc { - en: "The reason code" - zh: "错误码" - } - label: { - en: "Reason Code" - zh: "错误码" - } - } +metrics_sql_failed_exception.desc: +"""How much times the SQL is failed due to exceptions. This may because of a crash when calling a SQL function, or trying to do arithmetic operation on undefined variables""" - rule_id { - desc { - en: "The ID of the rule" - zh: "规则的 ID" - } - label: { - en: "Rule ID" - zh: "规则 ID" - } - } +metrics_sql_failed_exception.label: +"""SQL Exception""" - node_node { - desc { - en: "The node name" - zh: "节点名字" - } - label: { - en: "Node Name" - zh: "节点名字" - } - } +event_from_username.desc: +"""The User Name""" - metrics_sql_matched { - desc { - en: "How much times the FROM clause of the SQL is matched." - zh: "SQL 的 FROM 子句匹配的次数。" - } - label: { - en: "Matched" - zh: "命中数" - } - } +event_from_username.label: +"""Username""" - metrics_sql_matched_rate { - desc { - en: "The rate of matched, times/second" - zh: "命中速率,次/秒" - } - label: { - en: "命中速率" - zh: "Matched Rate" - } - } +event_ctx_connack_reason_code.desc: +"""The reason code""" - metrics_sql_matched_rate_max { - desc { - en: "The max rate of matched, times/second" - zh: "最大命中速率,次/秒" - } - label: { - en: "Max Matched Rate" - zh: "最大命中速率" - } - } +event_ctx_connack_reason_code.label: +"""Reason Code""" - metrics_sql_matched_rate_last5m { - desc { - en: "The average rate of matched in last 5 minutes, times/second" - zh: "5分钟平均命中速率,次/秒" - } - label: { - en: "Average Matched Rate" - zh: "平均命中速率" - } - } +rs_description.desc: +"""The description""" - metrics_sql_passed { - desc { - en: "How much times the SQL is passed" - zh: "SQL 通过的次数" - } - label: { - en: "SQL Passed" - zh: "SQL 通过" - } - } +rs_description.label: +"""Description""" - metrics_sql_failed { - desc { - en: "How much times the SQL is failed" - zh: "SQL 失败的次数" - } - label: { - en: "SQL Failed" - zh: "SQL 失败" - } - } +rule_id.desc: +"""The ID of the rule""" - metrics_sql_failed_exception { - desc { - en: "How much times the SQL is failed due to exceptions. This may because of a crash when calling a SQL function, or trying to do arithmetic operation on undefined variables" - zh: "SQL 由于执行异常而失败的次数。 这可能是因为调用 SQL 函数时崩溃,或者试图对未定义的变量进行算术运算" - } - label: { - en: "SQL Exception" - zh: "SQL 执行异常" - } - } +rule_id.label: +"""Rule ID""" - metrics_sql_failed_unknown { - desc { - en: "How much times the SQL is failed due to an unknown error." - zh: "由于未知错误导致 SQL 失败的次数。" - } - label: { - en: "SQL Unknown Error" - zh: "SQL 未知错误" - } - } +rs_event.desc: +"""The event topics""" - metrics_actions_total { - desc { - en: "How much times the actions are called by the rule. This value may several times of 'matched', depending on the number of the actions of the rule." - zh: "规则调用输出的次数。 该值可能是“sql.matched”的几倍,具体取决于规则输出的数量。" - } - label: { - en: "Action Total" - zh: "调用输出次数" - } - } +rs_event.label: +"""Event Topics""" - metrics_actions_success { - desc { - en: "How much times the rule success to call the actions." - zh: "规则成功调用输出的次数。" - } - label: { - en: "Success Action" - zh: "成功调用输出次数" - } - } +root_rule_engine.desc: +"""Rule engine configurations. This API can be used to change EMQX rule engine settings. But not for the rules. To list, create, or update rules, call the '/rules' API instead.""" - metrics_actions_failed { - desc { - en: "How much times the rule failed to call the actions." - zh: "规则调用输出失败的次数。" - } - label: { - en: "Failed Action" - zh: "调用输出失败次数" - } - } +root_rule_engine.label: +"""Rule engine configuration""" - metrics_actions_failed_out_of_service { - desc { - en: "How much times the rule failed to call actions due to the action is out of service. For example, a bridge is disabled or stopped." - zh: "由于输出停止服务而导致规则调用输出失败的次数。 例如,桥接被禁用或停止。" - } - label: { - en: "Fail Action" - zh: "调用输出失败次数" - } - } +event_sockname.desc: +"""The IP Address and Port of the Local Listener""" - metrics_actions_failed_unknown { - desc { - en: "How much times the rule failed to call actions due to to an unknown error." - zh: "由于未知错误,规则调用输出失败的次数。" - } - label: { - en: "Fail Action" - zh: "调用输出失败次数" - } - } +event_sockname.label: +"""IP Address And Port""" - test_context { - desc { - en: "The context of the event for testing" - zh: "测试事件的上下文" - } - label: { - en: "Event Conetxt" - zh: "事件上下文" - } - } +event_qos.desc: +"""The Message QoS""" - test_sql { - desc { - en: "The SQL of the rule for testing" - zh: "测试的 SQL" - } - label: { - en: "Test SQL" - zh: "测试 SQL" - } - } +event_qos.label: +"""Message QoS""" - rs_event { - desc { - en: "The event topics" - zh: "事件主题" - } - label: { - en: "Event Topics" - zh: "事件主题" - } - } +event_mountpoint.desc: +"""The Mountpoint""" - rs_title { - desc { - en: "The title" - zh: "标题" - } - label: { - en: "Title" - zh: "标题" - } - } +event_mountpoint.label: +"""Mountpoint""" - rs_description { - desc { - en: "The description" - zh: "描述" - } - label: { - en: "Description" - zh: "描述" - } - } +rs_title.desc: +"""The title""" - rs_columns { - desc { - en: "The columns" - zh: "列" - } - label: { - en: "Column" - zh: "列" - } - } +rs_title.label: +"""Title""" - rs_test_columns { - desc { - en: "The test columns" - zh: "测试列" - } - label: { - en: "Test Columns" - zh: "测试列" - } - } +ri_metrics.desc: +"""The metrics of the rule""" - rs_sql_example { - desc { - en: "The sql_example" - zh: "SQL 例子" - } - label: { - en: "SQL Example" - zh: "SQL 例子" - } - } +ri_metrics.label: +"""Rule Metrics""" - ri_metrics { - desc { - en: "The metrics of the rule" - zh: "规则的计数器" - } - label: { - en: "Rule Metrics" - zh: "规则计数器" - } - } +event_retain.desc: +"""If is a retain message""" - ri_node_metrics { - desc { - en: "The metrics of the rule for each node" - zh: "每个节点的规则计数器" - } - label: { - en: "Each Node Rule Metrics" - zh: "每个节点规则计数器" - } - } +event_retain.label: +"""Retain Message""" - ri_from { - desc { - en: "The topics of the rule" - zh: "规则指定的主题" - } - label: { - en: "Topics of Rule" - zh: "规则指定的主题" - } - } +event_event_type.desc: +"""Event Type""" - ri_created_at { - desc { - en: "The created time of the rule" - zh: "规则创建时间" - } - label: { - en: "Rule Create Time" - zh: "规则创建时间" - } - } +event_event_type.label: +"""Event Type""" - root_rule_engine { - desc { - en: "Rule engine configurations. This API can be used to change EMQX rule engine settings. But not for the rules. To list, create, or update rules, call the '/rules' API instead." - zh: "规则引擎配置。该 API 可用于查看和修改规则引擎相关的一些设置。但不可用于规则,如需查看或修改规则,请调用 '/rules' API 进行操作。" - } - label: { - en: "Rule engine configuration" - zh: "规则引擎配置" - } - } +event_expiry_interval.desc: +"""Expiry Interval""" - root_rule_creation { - desc { - en: "Schema for creating rules" - zh: "用于创建规则的 Schema" - } - label: { - en: "Create Schema" - zh: "用于创建规则的 Schema" - } - } +event_expiry_interval.label: +"""Expiry Interval""" - root_rule_info { - desc { - en: "Schema for rule info" - zh: "用于规则信息的 Schema" - } - label: { - en: "Info Schema" - zh: "用于规则信息的 Schema" - } - } +metrics_sql_matched.desc: +"""How much times the FROM clause of the SQL is matched.""" - root_rule_events { - desc { - en: "Schema for rule events" - zh: "用于事件的 Schema" - } - label: { - en: "Rule Events Schema" - zh: "用于规则事件的 Schema" - } - } +metrics_sql_matched.label: +"""Matched""" - root_rule_test { - desc { - en: "Schema for testing rules" - zh: "用于规则测试的 Schema" - } - label: { - en: "Rule Test Schema" - zh: "用于规则测试的 Schema" - } - } +event_clientid.desc: +"""The Client ID""" + +event_clientid.label: +"""Client ID""" + +metrics_actions_success.desc: +"""How much times the rule success to call the actions.""" + +metrics_actions_success.label: +"""Success Action""" + +metrics_actions_failed.desc: +"""How much times the rule failed to call the actions.""" + +metrics_actions_failed.label: +"""Failed Action""" + +metrics_sql_matched_rate.desc: +"""The rate of matched, times/second""" + +metrics_sql_matched_rate.label: +"""命中速率""" + +event_proto_ver.desc: +"""Protocol Version""" + +event_proto_ver.label: +"""Protocol Version""" + +event_publish_received_at.desc: +"""The Time that this Message is Received""" + +event_publish_received_at.label: +"""Message Received Time""" + +metrics_sql_matched_rate_last5m.desc: +"""The average rate of matched in last 5 minutes, times/second""" + +metrics_sql_matched_rate_last5m.label: +"""Average Matched Rate""" + +event_is_bridge.desc: +"""Is Bridge""" + +event_is_bridge.label: +"""Is Bridge""" + +event_authz_source.desc: +"""Cache, Plugs or Default""" + +event_authz_source.label: +"""Auth Source""" + +metrics_sql_failed_unknown.desc: +"""How much times the SQL is failed due to an unknown error.""" + +metrics_sql_failed_unknown.label: +"""SQL Unknown Error""" + +metrics_sql_failed.desc: +"""How much times the SQL is failed""" + +metrics_sql_failed.label: +"""SQL Failed""" + +event_ctx_dropped.desc: +"""The Reason for Dropping""" + +event_ctx_dropped.label: +"""Dropped Reason""" + +root_rule_test.desc: +"""Schema for testing rules""" + +root_rule_test.label: +"""Rule Test Schema""" + +rs_test_columns.desc: +"""The test columns""" + +rs_test_columns.label: +"""Test Columns""" + +event_peerhost.desc: +"""The IP Address of the Peer Client""" + +event_peerhost.label: +"""Peer IP Address""" + +event_proto_name.desc: +"""Protocol Name""" + +event_proto_name.label: +"""Protocol Name""" + +root_rule_events.desc: +"""Schema for rule events""" + +root_rule_events.label: +"""Rule Events Schema""" + +rs_sql_example.desc: +"""The sql_example""" + +rs_sql_example.label: +"""SQL Example""" + +metrics_sql_matched_rate_max.desc: +"""The max rate of matched, times/second""" + +metrics_sql_matched_rate_max.label: +"""Max Matched Rate""" + +event_clean_start.desc: +"""Clean Start""" + +event_clean_start.label: +"""Clean Start""" + +ri_created_at.desc: +"""The created time of the rule""" + +ri_created_at.label: +"""Rule Create Time""" + +event_dup.desc: +"""The DUP flag of the MQTT message""" + +event_dup.label: +"""DUP Flag""" + +ri_from.desc: +"""The topics of the rule""" + +ri_from.label: +"""Topics of Rule""" + +ri_node_metrics.desc: +"""The metrics of the rule for each node""" + +ri_node_metrics.label: +"""Each Node Rule Metrics""" + +root_rule_creation.desc: +"""Schema for creating rules""" + +root_rule_creation.label: +"""Create Schema""" + +event_result.desc: +"""Allow or Deny""" + +event_result.label: +"""Auth Result""" + +event_id.desc: +"""Message ID""" + +event_id.label: +"""Message ID""" + +event_username.desc: +"""Username""" + +event_username.label: +"""Username""" + +root_rule_info.desc: +"""Schema for rule info""" + +root_rule_info.label: +"""Info Schema""" + +rs_columns.desc: +"""The columns""" + +rs_columns.label: +"""Column""" + +test_sql.desc: +"""The SQL of the rule for testing""" + +test_sql.label: +"""Test SQL""" + +event_ctx_disconnected_reason.desc: +"""The Reason for Disconnect""" + +event_ctx_disconnected_reason.label: +"""Disconnect Reason""" } diff --git a/rel/i18n/emqx_rule_engine_api.hocon b/rel/i18n/emqx_rule_engine_api.hocon index 8a57f8e31..385b71ddc 100644 --- a/rel/i18n/emqx_rule_engine_api.hocon +++ b/rel/i18n/emqx_rule_engine_api.hocon @@ -1,168 +1,93 @@ emqx_rule_engine_api { - api1 { - desc { - en: "List all rules" - zh: "列出所有规则" - } - label: { - en: "List All Rules" - zh: "列出所有规则" - } - } - api1_enable { - desc { - en: "Filter enable/disable rules" - zh: "根据规则是否开启条件过滤" - } - } +api1.desc: +"""List all rules""" - api1_from { - desc { - en: "Filter rules by from(topic), exact match" - zh: "根据规则来源 Topic 过滤, 需要完全匹配" - } - } +api1.label: +"""List All Rules""" - api1_like_id { - desc { - en: "Filter rules by id, Substring matching" - zh: "根据规则 id 过滤, 使用子串模糊匹配" - } - } +api10.desc: +"""Update rule engine configuration.""" - api1_like_from { - desc { - en: "Filter rules by from(topic), Substring matching" - zh: "根据规则来源 Topic 过滤, 使用子串模糊匹配" - } - } +api10.label: +"""Update configuration""" - api1_like_description { - desc { - en: "Filter rules by description, Substring matching" - zh: "根据规则描述过滤, 使用子串模糊匹配" - } - } - api1_match_from { - desc { - en: "Filter rules by from(topic), Mqtt topic matching" - zh: "根据规则来源 Topic 过滤, 使用 MQTT Topic 匹配" - } - } - api1_resp { - desc { - en: "List of rules" - zh: "规则列表" - } - label: { - en: "List Rules" - zh: "列出所有规则" - } - } - api2 { - desc { - en: "Create a new rule using given Id" - zh: "通过指定 ID 创建规则" - } - label: { - en: "Create Rule By ID" - zh: "通过指定 ID 创建规则" - } - } +api1_enable.desc: +"""Filter enable/disable rules""" - api3 { - desc { - en: "List all events can be used in rules" - zh: "列出所有能被规则使用的事件" - } - label: { - en: "List All Events Can Be Used In Rule" - zh: "列出所有能被规则使用的事件" - } - } +api1_from.desc: +"""Filter rules by from(topic), exact match""" - api4 { - desc { - en: "Get a rule by given Id" - zh: "通过 ID 查询规则" - } - label: { - en: "Get Rule" - zh: "查询规则" - } - } +api1_like_description.desc: +"""Filter rules by description, Substring matching""" - api4_1 { - desc { - en: "Get a rule's metrics by given Id" - zh: "通过给定的 Id 获得规则的指标数据" - } - label: { - en: "Get Metric" - zh: "获得指标数据" - } - } +api1_like_from.desc: +"""Filter rules by from(topic), Substring matching""" - api5 { - desc { - en: "Update a rule by given Id to all nodes in the cluster" - zh: "通过 ID 更新集群里所有节点上的规则" - } - label: { - en: "Update Cluster Rule" - zh: "更新集群规则" - } - } +api1_like_id.desc: +"""Filter rules by id, Substring matching""" + +api1_match_from.desc: +"""Filter rules by from(topic), Mqtt topic matching""" + +api1_resp.desc: +"""List of rules""" + +api1_resp.label: +"""List Rules""" + +api2.desc: +"""Create a new rule using given Id""" + +api2.label: +"""Create Rule By ID""" + +api3.desc: +"""List all events can be used in rules""" + +api3.label: +"""List All Events Can Be Used In Rule""" + +api4.desc: +"""Get a rule by given Id""" + +api4.label: +"""Get Rule""" + +api4_1.desc: +"""Get a rule's metrics by given Id""" + +api4_1.label: +"""Get Metric""" + +api5.desc: +"""Update a rule by given Id to all nodes in the cluster""" + +api5.label: +"""Update Cluster Rule""" + +api6.desc: +"""Delete a rule by given Id from all nodes in the cluster""" + +api6.label: +"""Delete Cluster Rule""" + +api7.desc: +"""Reset a rule metrics""" + +api7.label: +"""Reset Rule Metrics""" + +api8.desc: +"""Test a rule""" + +api8.label: +"""Test Rule""" + +api9.desc: +"""Get rule engine configuration.""" + +api9.label: +"""Get configuration""" - api6 { - desc { - en: "Delete a rule by given Id from all nodes in the cluster" - zh: "通过 ID 删除集群里所有节点上的规则" - } - label: { - en: "Delete Cluster Rule" - zh: "基于给定 ID 新建一条规则" - } - } - api7 { - desc { - en: "Reset a rule metrics" - zh: "重置规则计数" - } - label: { - en: "Reset Rule Metrics" - zh: "重置规则计数" - } - } - api8 { - desc { - en: "Test a rule" - zh: "测试一个规则" - } - label: { - en: "Test Rule" - zh: "测试规则" - } - } - api9 { - desc { - en: "Get rule engine configuration." - zh: "获取规则引擎配置。" - } - label { - en: "Get configuration" - zh: "获取配置" - } - } - api10 { - desc { - en: "Update rule engine configuration." - zh: "更新规则引擎配置。" - } - label { - en: "Update configuration" - zh: "更新配置" - } - } } diff --git a/rel/i18n/emqx_rule_engine_schema.hocon b/rel/i18n/emqx_rule_engine_schema.hocon index ca0a73f0f..9b1f1f802 100644 --- a/rel/i18n/emqx_rule_engine_schema.hocon +++ b/rel/i18n/emqx_rule_engine_schema.hocon @@ -1,326 +1,25 @@ emqx_rule_engine_schema { - rules_name { - desc { - en: "The name of the rule" - zh: "规则名字" - } - label: { - en: "Rule Name" - zh: "规则名字" - } - } +console_function.desc: +"""Print the actions to the console""" - rules_sql { - desc { - en: """SQL query to transform the messages. -Example: SELECT * FROM "test/topic" WHERE payload.x = 1""" - zh: """用于处理消息的 SQL 。 -示例:SELECT * FROM "test/topic" WHERE payload.x = 1""" - } - label: { - en: "Rule SQL" - zh: "规则 SQL" - } - } +console_function.label: +"""Console Function""" - rules_actions { - desc { - en: """A list of actions of the rule. -An action can be a string that refers to the channel ID of an EMQX bridge, or an object -that refers to a function. -There a some built-in functions like "republish" and "console", and we also support user -provided functions in the format: "{module}:{function}". -The actions in the list are executed sequentially. -This means that if one of the action is executing slowly, all the following actions will not -be executed until it returns. -If one of the action crashed, all other actions come after it will still be executed, in the -original order. -If there's any error when running an action, there will be an error message, and the 'failure' -counter of the function action or the bridge channel will increase.""" +desc_builtin_action_console.desc: +"""Configuration for a built-in action.""" - zh: """规则的动作列表。 -动作可以是指向 EMQX bridge 的引用,也可以是一个指向函数的对象。 -我们支持一些内置函数,如“republish”和“console”,我们还支持用户提供的函数,它的格式为:“{module}:{function}”。 -列表中的动作按顺序执行。这意味着如果其中一个动作执行缓慢,则以下所有动作都不会被执行直到它返回。 -如果其中一个动作崩溃,在它之后的所有动作仍然会被按照原始顺序执行。 -如果运行动作时出现任何错误,则会出现错误消息,并且相应的计数器会增加。""" - } - label: { - en: "Rule Action List" - zh: "动作列表" - } - } +desc_builtin_action_console.label: +"""Action Console Configuration""" - rules_enable { - desc { - en: "Enable or disable the rule" - zh: "启用或禁用规则引擎" - } - label: { - en: "Enable Or Disable Rule" - zh: "启用或禁用规则引擎" - } - } +desc_builtin_action_republish.desc: +"""Configuration for a built-in action.""" - rules_metadata { - desc { - en: "Rule metadata, do not change manually" - zh: "规则的元数据,不要手动修改" - } - label: { - en: "Rule metadata" - zh: "规则的元数据" - } - } +desc_builtin_action_republish.label: +"""Republish Configuration""" - rules_description { - desc { - en: "The description of the rule" - zh: "规则的描述" - } - label: { - en: "Rule Description" - zh: "规则描述" - } - } - - republish_function { - desc { - en: """Republish the message as a new MQTT message""" - zh: """将消息重新发布为新的 MQTT 消息""" - } - label: { - en: "Republish Function" - zh: "重新发布函数" - } - } - - console_function { - desc { - en: """Print the actions to the console""" - zh: "将输出打印到控制台" - } - label: { - en: "Console Function" - zh: "控制台函数" - } - } - - user_provided_function_function { - desc { - en: """The user provided function. Should be in the format: '{module}:{function}'. -Where {module} is the Erlang callback module and {function} is the Erlang function. - -To write your own function, checkout the function console and -republish in the source file: -apps/emqx_rule_engine/src/emqx_rule_actions.erl as an example.""" - - zh: """用户提供的函数。 格式应为:'{module}:{function}'。 -其中 {module} 是 Erlang 回调模块, {function} 是 Erlang 函数。 -要编写自己的函数,请检查源文件:apps/emqx_rule_engine/src/emqx_rule_actions.erl 中的示例函数 consolerepublish 。""" - } - label: { - en: "User Provided Function" - zh: "用户提供的函数" - } - } - - user_provided_function_args { - desc { - en: """The args will be passed as the 3rd argument to module:function/3, -checkout the function console and republish in the source file: -apps/emqx_rule_engine/src/emqx_rule_actions.erl as an example.""" - zh: """用户提供的参数将作为函数 module:function/3 的第三个参数, -请检查源文件:apps/emqx_rule_engine/src/emqx_rule_actions.erl 中的示例函数 consolerepublish 。""" - } - label: { - en: "User Provided Function Args" - zh: "用户提供函数的参数" - } - } - - republish_args_topic { - desc { - en: """The target topic of message to be re-published. -Template with variables is allowed, see description of the 'republish_args'.""" - zh: """重新发布消息的目标主题。 -允许使用带有变量的模板,请参阅“republish_args”的描述。""" - } - label: { - en: "Target Topic" - zh: "目标主题" - } - } - - republish_args_qos { - desc { - en: """The qos of the message to be re-published. -Template with variables is allowed, see description of the 'republish_args'. -Defaults to ${qos}. If variable ${qos} is not found from the selected result of the rule, -0 is used.""" - zh: """要重新发布的消息的 qos。允许使用带有变量的模板,请参阅“republish_args”的描述。 -默认为 ${qos}。 如果从规则的选择结果中没有找到变量 ${qos},则使用 0。""" - } - label: { - en: "Message QoS" - zh: "消息 QoS 等级" - } - } - - republish_args_retain { - desc { - en: """The 'retain' flag of the message to be re-published. -Template with variables is allowed, see description of the 'republish_args'. -Defaults to ${retain}. If variable ${retain} is not found from the selected result -of the rule, false is used.""" - zh: """要重新发布的消息的“保留”标志。允许使用带有变量的模板,请参阅“republish_args”的描述。 -默认为 ${retain}。 如果从所选结果中未找到变量 ${retain},则使用 false。""" - } - label: { - en: "Retain Flag" - zh: "保留消息标志" - } - } - - republish_args_payload { - desc { - en: """The payload of the message to be re-published. -Template with variables is allowed, see description of the 'republish_args'. -Defaults to ${payload}. If variable ${payload} is not found from the selected result -of the rule, then the string "undefined" is used.""" - zh: """要重新发布的消息的有效负载。允许使用带有变量的模板,请参阅“republish_args”的描述。 -默认为 ${payload}。 如果从所选结果中未找到变量 ${payload},则使用字符串 "undefined"。""" - } - label: { - en: "Message Payload" - zh: "消息负载" - } - } - republish_args_user_properties { - desc { - en: """From which variable should the MQTT message's User-Property pairs be taken from. -The value must be a map. -You may configure it to ${pub_props.'User-Property'} or -use SELECT *,pub_props.'User-Property' as user_properties -to forward the original user properties to the republished message. -You may also call map_put function like -map_put('my-prop-name', 'my-prop-value', user_properties) as user_properties -to inject user properties. -NOTE: MQTT spec allows duplicated user property names, but EMQX Rule-Engine does not.""" - - zh: """指定使用哪个变量来填充 MQTT 消息的 User-Property 列表。这个变量的值必须是一个 map 类型。 -可以设置成 ${pub_props.'User-Property'} 或者 -使用 SELECT *,pub_props.'User-Property' as user_properties 来把源 MQTT 消息 -的 User-Property 列表用于填充。 -也可以使用 map_put 函数来添加新的 User-Property, -map_put('my-prop-name', 'my-prop-value', user_properties) as user_properties -注意:MQTT 协议允许一个消息中出现多次同一个 property 名,但是 EMQX 的规则引擎不允许。""" - } - } - - rule_engine_ignore_sys_message { - desc { - en: "When set to 'true' (default), rule-engine will ignore messages published to $SYS topics." - zh: "当设置为“true”(默认)时,规则引擎将忽略发布到 $SYS 主题的消息。" - } - label: { - en: "Ignore Sys Message" - zh: "忽略系统消息" - } - } - - rule_engine_rules { - desc { - en: """The rules""" - zh: "规则" - } - label: { - en: "Rules" - zh: "规则" - } - } - - rule_engine_jq_function_default_timeout { - desc { - en: "Default timeout for the `jq` rule engine function" - zh: "规则引擎内建函数 `jq` 默认时间限制" - } - label: { - en: "Rule engine jq function default timeout" - zh: "规则引擎 jq 函数时间限制" - } - } - - rule_engine_jq_implementation_module { - desc { - en: "The implementation module for the jq rule engine function. The two options are jq_nif and jq_port. With the jq_nif option an Erlang NIF library is used while with the jq_port option an implementation based on Erlang port programs is used. The jq_nif option (the default option) is the fastest implementation of the two but jq_port is safer as the jq programs will not execute in the same process as the Erlang VM." - zh: "jq 规则引擎功能的实现模块。可用的两个选项是 jq_nif 和 jq_port。jq_nif 使用 Erlang NIF 库访问 jq 库,而 jq_port 使用基于 Erlang Port 的实现。jq_nif 方式(默认选项)是这两个选项中最快的实现,但 jq_port 方式更安全,因为这种情况下 jq 程序不会在 Erlang VM 进程中执行。" - } - label: { - en: "JQ Implementation Module" - zh: "JQ 实现模块" - } - } - - desc_rule_engine { - desc { - en: """Configuration for the EMQX Rule Engine.""" - zh: """配置 EMQX 规则引擎。""" - } - label: { - en: "Rule Engine Configuration" - zh: "配置规则引擎" - } - } - - desc_rules { - desc { - en: """Configuration for a rule.""" - zh: """配置规则""" - } - label: { - en: "Rule Configuration" - zh: "配置规则" - } - } - - desc_builtin_action_republish { - desc { - en: """Configuration for a built-in action.""" - zh: """配置重新发布。""" - } - label: { - en: "Republish Configuration" - zh: "配置重新发布" - } - } - - desc_builtin_action_console { - desc { - en: """Configuration for a built-in action.""" - zh: """配置打印到控制台""" - } - label: { - en: "Action Console Configuration" - zh: "配置打印到控制台" - } - } - - desc_user_provided_function { - desc { - en: """Configuration for a built-in action.""" - zh: """配置用户函数""" - } - label: { - en: "User Provid Function Configuration" - zh: "配置用户函数" - } - } - - desc_republish_args { - desc { - en: """The arguments of the built-in 'republish' action.One can use variables in the args. +desc_republish_args.desc: +"""The arguments of the built-in 'republish' action.One can use variables in the args. The variables are selected by the rule. For example, if the rule SQL is defined as following: SELECT clientid, qos, payload FROM "t/1" @@ -337,27 +36,168 @@ Then there are 3 variables available: clientid, qos an When the rule is triggered by an MQTT message with payload = `hello`, qos = 1, clientid = `Steve`, the rule will republish a new MQTT message to topic `t/Steve`, payload = `msg: hello`, and `qos = 1`.""" - zh: """内置 'republish' 动作的参数。 -可以在参数中使用变量。 -变量是规则中选择的字段。 例如规则 SQL 定义如下: - - SELECT clientid, qos, payload FROM "t/1" - -然后有 3 个变量可用:clientidqospayload。 如果我们将参数设置为: - - { - topic = "t/${clientid}" - qos = "${qos}" - payload = "msg: ${payload}" - } - -当收到一条消息 payload = `hello`, qos = 1, clientid = `Steve` 时,将重新发布一条新的 MQTT 消息到主题 `t/Steve` -消息内容为 payload = `msg: hello`, and `qos = 1""" - } - label: { - en: "Republish Args" - zh: "重新发布参数" - } - } + +desc_republish_args.label: +"""Republish Args""" + +desc_rule_engine.desc: +"""Configuration for the EMQX Rule Engine.""" + +desc_rule_engine.label: +"""Rule Engine Configuration""" + +desc_rules.desc: +"""Configuration for a rule.""" + +desc_rules.label: +"""Rule Configuration""" + +desc_user_provided_function.desc: +"""Configuration for a built-in action.""" + +desc_user_provided_function.label: +"""User Provid Function Configuration""" + +republish_args_payload.desc: +"""The payload of the message to be re-published. +Template with variables is allowed, see description of the 'republish_args'. +Defaults to ${payload}. If variable ${payload} is not found from the selected result +of the rule, then the string "undefined" is used.""" + +republish_args_payload.label: +"""Message Payload""" + +republish_args_qos.desc: +"""The qos of the message to be re-published. +Template with variables is allowed, see description of the 'republish_args'. +Defaults to ${qos}. If variable ${qos} is not found from the selected result of the rule, +0 is used.""" + +republish_args_qos.label: +"""Message QoS""" + +republish_args_retain.desc: +"""The 'retain' flag of the message to be re-published. +Template with variables is allowed, see description of the 'republish_args'. +Defaults to ${retain}. If variable ${retain} is not found from the selected result +of the rule, false is used.""" + +republish_args_retain.label: +"""Retain Flag""" + +republish_args_topic.desc: +"""The target topic of message to be re-published. +Template with variables is allowed, see description of the 'republish_args'.""" + +republish_args_topic.label: +"""Target Topic""" + +republish_args_user_properties.desc: +"""From which variable should the MQTT message's User-Property pairs be taken from. +The value must be a map. +You may configure it to ${pub_props.'User-Property'} or +use SELECT *,pub_props.'User-Property' as user_properties +to forward the original user properties to the republished message. +You may also call map_put function like +map_put('my-prop-name', 'my-prop-value', user_properties) as user_properties +to inject user properties. +NOTE: MQTT spec allows duplicated user property names, but EMQX Rule-Engine does not.""" + +republish_function.desc: +"""Republish the message as a new MQTT message""" + +republish_function.label: +"""Republish Function""" + +rule_engine_ignore_sys_message.desc: +"""When set to 'true' (default), rule-engine will ignore messages published to $SYS topics.""" + +rule_engine_ignore_sys_message.label: +"""Ignore Sys Message""" + +rule_engine_jq_function_default_timeout.desc: +"""Default timeout for the `jq` rule engine function""" + +rule_engine_jq_function_default_timeout.label: +"""Rule engine jq function default timeout""" + +rule_engine_jq_implementation_module.desc: +"""The implementation module for the jq rule engine function. The two options are jq_nif and jq_port. With the jq_nif option an Erlang NIF library is used while with the jq_port option an implementation based on Erlang port programs is used. The jq_nif option (the default option) is the fastest implementation of the two but jq_port is safer as the jq programs will not execute in the same process as the Erlang VM.""" + +rule_engine_jq_implementation_module.label: +"""JQ Implementation Module""" + +rule_engine_rules.desc: +"""The rules""" + +rule_engine_rules.label: +"""Rules""" + +rules_actions.desc: +"""A list of actions of the rule. +An action can be a string that refers to the channel ID of an EMQX bridge, or an object +that refers to a function. +There a some built-in functions like "republish" and "console", and we also support user +provided functions in the format: "{module}:{function}". +The actions in the list are executed sequentially. +This means that if one of the action is executing slowly, all the following actions will not +be executed until it returns. +If one of the action crashed, all other actions come after it will still be executed, in the +original order. +If there's any error when running an action, there will be an error message, and the 'failure' +counter of the function action or the bridge channel will increase.""" + +rules_actions.label: +"""Rule Action List""" + +rules_description.desc: +"""The description of the rule""" + +rules_description.label: +"""Rule Description""" + +rules_enable.desc: +"""Enable or disable the rule""" + +rules_enable.label: +"""Enable Or Disable Rule""" + +rules_metadata.desc: +"""Rule metadata, do not change manually""" + +rules_metadata.label: +"""Rule metadata""" + +rules_name.desc: +"""The name of the rule""" + +rules_name.label: +"""Rule Name""" + +rules_sql.desc: +"""SQL query to transform the messages. +Example: SELECT * FROM "test/topic" WHERE payload.x = 1""" + +rules_sql.label: +"""Rule SQL""" + +user_provided_function_args.desc: +"""The args will be passed as the 3rd argument to module:function/3, +checkout the function console and republish in the source file: +apps/emqx_rule_engine/src/emqx_rule_actions.erl as an example.""" + +user_provided_function_args.label: +"""User Provided Function Args""" + +user_provided_function_function.desc: +"""The user provided function. Should be in the format: '{module}:{function}'. +Where {module} is the Erlang callback module and {function} is the Erlang function. + +To write your own function, checkout the function console and +republish in the source file: +apps/emqx_rule_engine/src/emqx_rule_actions.erl as an example.""" + +user_provided_function_function.label: +"""User Provided Function""" } diff --git a/rel/i18n/emqx_schema.hocon b/rel/i18n/emqx_schema.hocon index d36809c3b..9c2a1530d 100644 --- a/rel/i18n/emqx_schema.hocon +++ b/rel/i18n/emqx_schema.hocon @@ -1,1582 +1,441 @@ emqx_schema { - force_shutdown_enable { - desc { - en: "Enable `force_shutdown` feature." - zh: "启用 `force_shutdown` 功能。" - } - label { - en: "Enable `force_shutdown` feature" - zh: "启用 `force_shutdown` 功能" - } - } +fields_mqtt_quic_listener_peer_unidi_stream_count.desc: +"""Number of unidirectional streams to allow the peer to open.""" - force_shutdown_max_message_queue_len { - desc { - en: "Maximum message queue length." - zh: "消息队列的最大长度。" - } - label { - en: "Maximum mailbox queue length of process." - zh: "进程邮箱消息队列的最大长度" - } - } +fields_mqtt_quic_listener_peer_unidi_stream_count.label: +"""Peer unidi stream count""" - force_shutdown_max_heap_size { - desc { - en: "Total heap size" - zh: "Heap 的总大小。" - } - label { - en: "Total heap size" - zh: "Heap 的总大小" - } - } +fields_authorization_no_match.desc: +"""Default access control action if the user or client matches no ACL rules, +or if no such user or client is found by the configurable authorization +sources such as built_in_database, an HTTP API, or a query against PostgreSQL. +Find more details in 'authorization.sources' config.""" - overload_protection_enable { - desc { - en: "React on system overload or not." - zh: "是否对系统过载做出反应。" - } - label { - en: "React on system overload or not" - zh: "是否对系统过载做出反应" - } - } +fields_authorization_no_match.label: +"""Authorization no match""" - overload_protection_backoff_delay { - desc { - en: "The maximum duration of delay for background task execution during high load conditions." - zh: "高负载时,一些不重要的任务可能会延迟执行,在这里设置允许延迟的时间。" - } - label { - en: "Delay Time" - zh: "延迟时间" - } - } +sysmon_top_db_hostname.desc: +"""Hostname of the PostgreSQL database that collects the data points""" - overload_protection_backoff_gc { - desc { - en: "When at high load, skip forceful GC." - zh: "高负载时,跳过强制 GC。" - } - label { - en: "Skip GC" - zh: "跳过GC" - } - } +sysmon_top_db_hostname.label: +"""DB Hostname""" - overload_protection_backoff_hibernation { - desc { - en: "When at high load, skip process hibernation." - zh: "高负载时,跳过进程休眠。" - } - label { - en: "Skip hibernation" - zh: "跳过休眠" - } - } - - overload_protection_backoff_new_conn { - desc { - en: "When at high load, close new incoming connections." - zh: "高负载时,拒绝新进来的客户端连接。" - } - label { - en: "Close new connections" - zh: "关闭新连接" - } - } - - conn_congestion_enable_alarm { - desc { - en: "Enable or disable connection congestion alarm." - zh: "启用或者禁用连接阻塞告警功能。" - } - label { - en: "Enable/disable congestion alarm" - zh: "启用/禁用阻塞告警" - } - } - - conn_congestion_min_alarm_sustain_duration { - desc { - en: "Minimal time before clearing the alarm.
" - "The alarm is cleared only when there's no pending data in
" - "the queue, and at least min_alarm_sustain_duration" - "milliseconds passed since the last time we considered the connection 'congested'.
" - "This is to avoid clearing and raising the alarm again too often." - zh: "清除警报前的最短时间。
" - "只有当队列中没有挂起的数据,并且连接至少被堵塞了 min_alarm_sustain_duration 毫秒时,
" - "报警才会被清除。这是为了避免太频繁地清除和再次发出警报。" - } - label { - en: "Sustain duration" - zh: "告警维持时间" - } - } - - force_gc_enable { - desc { - en: "Enable forced garbage collection." - zh: "启用强制垃圾回收。" - } - label { - en: "Enable forced garbage collection" - zh: "启用强制垃圾回收" - } - } - - force_gc_count { - desc { - en: "GC the process after this many received messages." - zh: "在进程收到多少消息之后,对此进程执行垃圾回收。" - } - label { - en: "Process GC messages num" - zh: "垃圾回收消息数" - } - } - - force_gc_bytes { - desc { - en: "GC the process after specified number of bytes have passed through." - zh: "在进程处理过多少个字节之后,对此进程执行垃圾回收。" - } - label { - en: "Process GC bytes" - zh: "垃圾回收字节数" - } - } - - sysmon_vm_process_check_interval { - desc { - en: "The time interval for the periodic process limit check." - zh: "定期进程限制检查的时间间隔。" - } - label { - en: "Process limit check interval" - zh: "进程限制检查时间" - } - } - - sysmon_vm_process_high_watermark { - desc { - en: "The threshold, as percentage of processes, for how many\n" - " processes can simultaneously exist at the local node before the corresponding\n" - " alarm is raised." - zh: "在发出相应警报之前,本地节点上可以同时存在多少进程的阈值(以进程百分比表示)。" - } - label { - en: "Process high watermark" - zh: "进程数高水位线" - } - } - - sysmon_vm_process_low_watermark { - desc { - en: "The threshold, as percentage of processes, for how many\n" - " processes can simultaneously exist at the local node before the corresponding\n" - " alarm is cleared." - zh: "在清除相应警报之前,本地节点上可以同时存在多少进程的阈值(以进程百分比表示)。" - } - label { - en: "Process low watermark" - zh: "进程数低水位线" - } - } - - sysmon_vm_long_gc { - desc { - en: """When an Erlang process spends long time to perform garbage collection, a warning level long_gc log is emitted, -and an MQTT message is published to the system topic $SYS/sysmon/long_gc.""" - zh: """当系统检测到某个 Erlang 进程垃圾回收占用过长时间,会触发一条带有 long_gc 关键字的日志。 -同时还会发布一条主题为 $SYS/sysmon/long_gc 的 MQTT 系统消息。""" - } - label { - en: "Enable Long GC monitoring." - zh: "启用长垃圾回收监控" - } - } - - sysmon_vm_long_schedule { - desc { - en: """When the Erlang VM detect a task scheduled for too long, a warning level 'long_schedule' log is emitted, -and an MQTT message is published to the system topic $SYS/sysmon/long_schedule.""" - zh: """启用后,如果 Erlang VM 调度器出现某个任务占用时间过长时,会触发一条带有 'long_schedule' 关键字的日志。 -同时还会发布一条主题为 $SYS/sysmon/long_schedule 的 MQTT 系统消息。""" - } - label { - en: "Enable Long Schedule monitoring." - zh: "启用长调度监控" - } - } - - sysmon_vm_large_heap { - desc { - en: """When an Erlang process consumed a large amount of memory for its heap space, -the system will write a warning level large_heap log, and an MQTT message is published to -the system topic $SYS/sysmon/large_heap.""" - zh: """启用后,当一个 Erlang 进程申请了大量内存,系统会触发一条带有 large_heap 关键字的 -warning 级别日志。同时还会发布一条主题为 $SYS/sysmon/busy_dist_port 的 MQTT 系统消息。""" - } - label { - en: "Enable Large Heap monitoring." - zh: "启用大 heap 监控" - } - } - - sysmon_vm_busy_dist_port { - desc { - en: """When the RPC connection used to communicate with other nodes in the cluster is overloaded, -there will be a busy_dist_port warning log, -and an MQTT message is published to system topic $SYS/sysmon/busy_dist_port.""" - zh: """启用后,当用于集群接点之间 RPC 的连接过忙时,会触发一条带有 busy_dist_port 关键字的 warning 级别日志。 -同时还会发布一条主题为 $SYS/sysmon/busy_dist_port 的 MQTT 系统消息。""" - } - label { - en: "Enable Busy Distribution Port monitoring." - zh: "启用分布式端口过忙监控" - } - } - - sysmon_vm_busy_port { - desc { - en: """When a port (e.g. TCP socket) is overloaded, there will be a busy_port warning log, -and an MQTT message is published to the system topic $SYS/sysmon/busy_port.""" - zh: """当一个系统接口(例如 TCP socket)过忙,会触发一条带有 busy_port 关键字的 warning 级别的日志。 -同时还会发布一条主题为 $SYS/sysmon/busy_port 的 MQTT 系统消息。""" - } - label { - en: "Enable Busy Port monitoring." - zh: "启用端口过忙监控" - } - } - - sysmon_os_cpu_check_interval { - desc { - en: "The time interval for the periodic CPU check." - zh: "定期 CPU 检查的时间间隔。" - } - label { - en: "The time interval for the periodic CPU check." - zh: "定期 CPU 检查的时间间隔" - } - } - - sysmon_os_cpu_high_watermark { - desc { - en: "The threshold, as percentage of system CPU load,\n" - " for how much system cpu can be used before the corresponding alarm is raised." - zh: "在发出相应警报之前可以使用多少系统 CPU 的阈值,以系统CPU负载的百分比表示。" - } - label { - en: "CPU high watermark" - zh: "CPU 高水位线" - } - } - - sysmon_os_cpu_low_watermark { - desc { - en: "The threshold, as percentage of system CPU load,\n" - " for how much system cpu can be used before the corresponding alarm is cleared." - zh: "在解除相应警报之前可以使用多少系统 CPU 的阈值,以系统CPU负载的百分比表示。" - } - label { - en: "CPU low watermark" - zh: "CPU 低水位线" - } - } - - sysmon_os_mem_check_interval { - desc { - en: "The time interval for the periodic memory check." - zh: "定期内存检查的时间间隔。" - } - label { - en: "Mem check interval" - zh: "内存检查间隔" - } - } - - sysmon_os_sysmem_high_watermark { - desc { - en: "The threshold, as percentage of system memory,\n" - " for how much system memory can be allocated before the corresponding alarm is raised." - zh: "在发出相应报警之前可以分配多少系统内存的阈值,以系统内存的百分比表示。" - } - label { - en: "SysMem high wartermark" - zh: "系统内存高水位线" - } - } - - sysmon_os_procmem_high_watermark { - desc { - en: "The threshold, as percentage of system memory,\n" - " for how much system memory can be allocated by one Erlang process before\n" - " the corresponding alarm is raised." - zh: "在发出相应警报之前,一个Erlang进程可以分配多少系统内存的阈值,以系统内存的百分比表示。" - } - label { - en: "ProcMem high wartermark" - zh: "进程内存高水位线" - } - } - - sysmon_top_num_items { - desc { - en: "The number of top processes per monitoring group" - zh: "每个监视组的顶级进程数。" - } - label { - en: "Top num items" - zh: "顶级进程数" - } - } - - sysmon_top_sample_interval { - desc { - en: "Specifies how often process top should be collected" - zh: "指定应收集进程顶部的频率。" - } - label { - en: "Top sample interval" - zh: "取样时间" - } - } - - sysmon_top_max_procs { - desc { - en: "Stop collecting data when the number of processes\n" - "in the VM exceeds this value" - zh: "当 VM 中的进程数超过此值时,停止收集数据。" - } - label { - en: "Max procs" - zh: "最大进程数" - } - } - - sysmon_top_db_hostname { - desc { - en: "Hostname of the PostgreSQL database that collects the data points" - zh: "收集数据点的 PostgreSQL 数据库的主机名。" - } - label { - en: "DB Hostname" - zh: "数据库主机名" - } - } - - sysmon_top_db_port { - desc { - en: "Port of the PostgreSQL database that collects the data points." - zh: "收集数据点的 PostgreSQL 数据库的端口。" - } - label { - en: "DB Port" - zh: "数据库端口" - } - } - - sysmon_top_db_username { - desc { - en: "Username of the PostgreSQL database" - zh: "PostgreSQL 数据库的用户名" - } - label { - en: "DB Username" - zh: "数据库用户名" - } - } - - sysmon_top_db_password { - desc { - en: "EMQX user password in the PostgreSQL database" - zh: "PostgreSQL 数据库的密码" - } - label { - en: "DB Password" - zh: "数据库密码" - } - } - - sysmon_top_db_name { - desc { - en: "PostgreSQL database name" - zh: "PostgreSQL 数据库的数据库名" - } - label { - en: "DB Name" - zh: "数据库名" - } - } - - alarm_actions { - desc { - en: "The actions triggered when the alarm is activated.
" - "Currently, the following actions are supported: log and " - "publish.\n" - "log is to write the alarm to log (console or file).\n" - "publish is to publish the alarm as an MQTT message to " - "the system topics:\n" - "$SYS/brokers/emqx@xx.xx.xx.x/alarms/activate and\n" - "$SYS/brokers/emqx@xx.xx.xx.x/alarms/deactivate" - zh: "警报激活时触发的动作。
" - "目前,支持以下操作:log 和 " - "publish.\n" - "log 将告警写入日志 (控制台或者文件).\n" - "publish 将告警作为 MQTT 消息发布到系统主题:\n" - "$SYS/brokers/emqx@xx.xx.xx.x/alarms/activate and\n" - "$SYS/brokers/emqx@xx.xx.xx.x/alarms/deactivate" - } - label: { - en: "Alarm Actions" - zh: "告警动作" - } - } - - alarm_size_limit { - desc { - en: "The maximum total number of deactivated alarms to keep as history.
" - "When this limit is exceeded, the oldest deactivated alarms are " - "deleted to cap the total number." - zh: "要保留为历史记录的已停用报警的最大总数。当超过此限制时,将删除最旧的停用报警,以限制总数。" - } - label: { - en: "Alarm size limit" - zh: "告警总数限制" - } - } - - alarm_validity_period { - desc { - en: "Retention time of deactivated alarms. Alarms are not deleted immediately\n" - "when deactivated, but after the retention time." - zh: "停用报警的保留时间。报警在停用时不会立即删除,而是在保留时间之后删除。" - } - label: { - en: "Alarm validity period" - zh: "告警保留时间" - } - } - - flapping_detect_enable { - desc { - en: "Enable flapping connection detection feature." - zh: "启用抖动检测功能。" - } - label: { - en: "Enable flapping detection" - zh: "启用抖动检测" - } - } - - flapping_detect_max_count { - desc { - en: "The maximum number of disconnects allowed for a MQTT Client in `window_time`" - zh: "MQTT 客户端在“窗口”时间内允许的最大断开次数。" - } - label: { - en: "Max count" - zh: "最大断开次数" - } - } - - flapping_detect_window_time { - desc { - en: "The time window for flapping detection." - zh: "抖动检测的时间窗口。" - } - label: { - en: "Window time" - zh: "时间窗口" - } - } - - flapping_detect_ban_time { - desc { - en: "How long the flapping clientid will be banned." - zh: "抖动的客户端将会被禁止登录多长时间。" - } - label: { - en: "Ban time" - zh: "禁止登录时长" - } - } - - persistent_session_store_enabled { - desc { - en: "Use the database to store information about persistent sessions.\n" - "This makes it possible to migrate a client connection to another\n" - "cluster node if a node is stopped." - zh: "使用数据库存储有关持久会话的信息。\n" - "这使得在节点停止时,可以将客户端连接迁移到另一个群集节点。" - } - label: { - en: "Enable persistent session store" - zh: "启用持久会话保存" - } - } - - persistent_session_store_backend { - desc { - en: "Database management system used to store information about persistent sessions and messages.\n" - "- `builtin`: Use the embedded database (mria)" - zh: "用于存储持久性会话和信息的数据库管理后端\n" - "- `builtin`: 使用内置的数据库(mria)" - } - label: { - en: "Backend" - zh: "后端类型" - } - } - - persistent_store_on_disc { - desc { - en: "Save information about the persistent sessions on disc.\n" - "If this option is enabled, persistent sessions will survive full restart of the cluster.\n" - "Otherwise, all the data will be stored in RAM, and it will be lost when all the nodes in the cluster are stopped." - zh: "将持久会话数据保存在磁盘上。如果为 false 则存储在内存中。\n" - "如开启, 持久会话数据可在集群重启后恢复。\n" - "如关闭, 数据仅存储在内存中, 则在整个集群停止后丢失。" - } - label: { - en: "Persist on disc" - zh: "持久化在磁盘上" - } - } - - persistent_store_ram_cache { - desc { - en: "Maintain a copy of the data in RAM for faster access." - zh: "在内存中保持一份数据的副本,以便更快地访问。" - } - label: { - en: "RAM cache" - zh: "内存缓存" - } - } - - persistent_session_store_max_retain_undelivered { - desc { - en: "The time messages that was not delivered to a persistent session\n" - "is stored before being garbage collected if the node the previous\n" - "session was handled on restarts of is stopped." - zh: "如果重新启动时处理上一个会话的节点已停止,则未传递到持久会话的消息在垃圾收集之前会被存储。" - } - label: { - en: "Max retain undelivered" - zh: "未投递的消息保留条数" - } - } - - persistent_session_store_message_gc_interval { - desc { - en: "The starting interval for garbage collection of undelivered messages to\n" - "a persistent session. This affects how often the \"max_retain_undelivered\"\n" - "is checked for removal." - zh: "将未送达的消息垃圾收集到持久会话的开始间隔。\n" - "这会影响检查 \"max_retain_undelivered\"(最大保留未送达)的删除频率。" - } - label: { - en: "Message GC interval" - zh: "消息清理间隔" - } - } - - persistent_session_store_session_message_gc_interval { - desc { - en: "The starting interval for garbage collection of transient data for\n" - "persistent session messages. This does not affect the lifetime length\n" - "of persistent session messages." - zh: "持久会话消息的临时数据垃圾收集的开始间隔。\n" - "这不会影响持久会话消息的生命周期长度。" - } - label: { - en: "Session message GC interval" - zh: "会话消息清理间隔" - } - } - - persistent_session_builtin_session_table { - desc { - en: "Performance tuning options for built-in session table." - zh: "用于内建会话表的性能调优参数。" - } - label: { - en: "Persistent session" - zh: "持久会话" - } - } - - persistent_session_builtin_sess_msg_table { - desc { - en: "Performance tuning options for built-in session messages table." - zh: "优化内置的会话消息表的配置。" - } - label: { - en: "Persistent session messages" - zh: "用于内建会话管理表的性能调优参数" - } - } - - persistent_session_builtin_messages_table { - desc { - en: "Performance tuning options for built-in messages table." - zh: "用于内建消息表的性能调优参数。" - } - label: { - en: "Persistent messages" - zh: "持久化消息" - } - } - - stats_enable { - desc { - en: "Enable/disable statistic data collection." - zh: "启用/禁用统计数据收集功能。" - } - label: { - en: "Enable/disable statistic data collection." - zh: "启用/禁用统计数据收集功能" - } - } - - zones { - desc { - en: """A zone is a set of configs grouped by the zone name. +zones.desc: +"""A zone is a set of configs grouped by the zone name. For flexible configuration mapping, the name can be set to a listener's zone config. NOTE: A built-in zone named default is auto created and can not be deleted.""" - zh: """zone 是按name 分组的一组配置。 -对于灵活的配置映射,可以将 name 设置为侦听器的 zone 配置。 -注:名为 default 的内置区域是自动创建的,无法删除。""" - } - } - mqtt { - desc { - en: """Global MQTT configuration. -The configs here work as default values which can be overridden in zone configs""" - zh: """全局的 MQTT 配置项。 -mqtt 下所有的配置作为全局的默认值存在,它可以被 zone 中的配置覆盖。""" - } - } +fields_mqtt_quic_listener_certfile.desc: +"""Path to the certificate file. Will be deprecated in 5.1, use .ssl_options.certfile instead.""" - mqtt_idle_timeout { - desc { - en: """Configure the duration of time that a connection can remain idle (i.e., without any data transfer) before being: - - Automatically disconnected if no CONNECT package is received from the client yet. - - Put into hibernation mode to save resources if some CONNECT packages are already received. -Note: Please set the parameter with caution as long idle time will lead to resource waste.""" - zh: """设置连接被断开或进入休眠状态前的等待时间,空闲超时后, - - 如暂未收到客户端的 CONNECT 报文,连接将断开; - - 如已收到客户端的 CONNECT 报文,连接将进入休眠模式以节省系统资源。 +fields_mqtt_quic_listener_certfile.label: +"""Certificate file""" -注意:请合理设置该参数值,如等待时间设置过长,可能造成系统资源的浪费。""" - } - label: { - en: """Idle Timeout""" - zh: """空闲超时""" - } - } +fields_rate_limit_conn_bytes_in.desc: +"""Limit the rate of receiving packets for a MQTT connection. +The rate is counted by bytes of packets per second.""" - mqtt_max_packet_size { - desc { - en: """Maximum MQTT packet size allowed.""" - zh: """允许的最大 MQTT 报文大小。""" - } - label: { - en: """Max Packet Size""" - zh: """最大报文大小""" - } - } +fields_rate_limit_conn_bytes_in.label: +"""Connection bytes in""" - mqtt_max_clientid_len { - desc { - en: """Maximum allowed length of MQTT Client ID.""" - zh: """允许的最大 MQTT Client ID 长度。""" - } - label: { - en: """Max Client ID Length""" - zh: """最大 Client ID 长度""" - } - } +crl_cache_capacity.desc: +"""The maximum number of CRL URLs that can be held in cache. If the cache is at full capacity and a new URL must be fetched, then it'll evict the oldest inserted URL in the cache.""" - mqtt_max_topic_levels { - desc { - en: """Maximum topic levels allowed.""" - zh: """允许的最大主题层级。""" - } - label: { - en: """Max Topic Levels""" - zh: """最大主题层级""" - } - } +crl_cache_capacity.label: +"""CRL Cache Capacity""" - mqtt_max_qos_allowed { - desc { - en: """Maximum QoS allowed.""" - zh: """允许的最大 QoS 等级。""" - } - label: { - en: """Max QoS""" - zh: """最大 QoS""" - } - } +alarm_actions.desc: +"""The actions triggered when the alarm is activated.
Currently, the following actions are supported: log and publish. +log is to write the alarm to log (console or file). +publish is to publish the alarm as an MQTT message to the system topics: +$SYS/brokers/emqx@xx.xx.xx.x/alarms/activate and +$SYS/brokers/emqx@xx.xx.xx.x/alarms/deactivate""" - mqtt_max_topic_alias { - desc { - en: """Maximum topic alias, 0 means no topic alias supported.""" - zh: """允许的最大主题别名数,0 表示不支持主题别名。""" - } - label: { - en: """Max Topic Alias""" - zh: """最大主题别名""" - } - } +alarm_actions.label: +"""Alarm Actions""" - mqtt_retain_available { - desc { - en: """Whether to enable support for MQTT retained message.""" - zh: """是否启用对 MQTT 保留消息的支持。""" - } - label: { - en: """Retain Available""" - zh: """保留消息可用""" - } - } +base_listener_max_connections.desc: +"""The maximum number of concurrent connections allowed by the listener.""" - mqtt_wildcard_subscription { - desc { - en: """Whether to enable support for MQTT wildcard subscription.""" - zh: """是否启用对 MQTT 通配符订阅的支持。""" - } - label: { - en: """Wildcard Subscription Available""" - zh: """通配符订阅可用""" - } - } +base_listener_max_connections.label: +"""Max connections""" - mqtt_shared_subscription { - desc { - en: """Whether to enable support for MQTT shared subscription.""" - zh: """是否启用对 MQTT 共享订阅的支持。""" - } - label: { - en: """Shared Subscription Available""" - zh: """共享订阅可用""" - } - } - - mqtt_exclusive_subscription { - desc { - en: """Whether to enable support for MQTT exclusive subscription.""" - zh: """是否启用对 MQTT 排它订阅的支持。""" - } - label: { - en: """Exclusive Subscription""" - zh: """排它订阅""" - } - } - - mqtt_ignore_loop_deliver { - desc { - en: """Whether the messages sent by the MQTT v3.1.1/v3.1.0 client will be looped back to the publisher itself, similar to No Local in MQTT 5.0.""" - zh: """设置由 MQTT v3.1.1/v3.1.0 客户端发布的消息是否将转发给其本身;类似 MQTT 5.0 协议中的 No Local 选项。""" - } - label: { - en: """Ignore Loop Deliver""" - zh: """忽略循环投递""" - } - } - - mqtt_strict_mode { - desc { - en: """Whether to parse MQTT messages in strict mode. -In strict mode, invalid utf8 strings in for example client ID, topic name, etc. will cause the client to be disconnected.""" - zh: """是否以严格模式解析 MQTT 消息。 -严格模式下,如客户端 ID、主题名称等中包含无效 utf8 字符串,连接将被断开。""" - } - label: { - en: """Strict Mode""" - zh: """严格模式""" - } - } - - mqtt_response_information { - desc { - en: """UTF-8 string, for creating the response topic, for example, if set to reqrsp/, the publisher/subscriber will communicate using the topic prefix reqrsp/. -To disable this feature, input \"\" in the text box below. Only applicable to MQTT 5.0 clients.""" - zh: """UTF-8 字符串,用于指定返回给客户端的响应主题,如 reqrsp/,此时请求和应答客户端都需要使用 reqrsp/ 前缀的主题来完成通讯。 -如希望禁用此功能,请在下方的文字框中输入\"\";仅适用于 MQTT 5.0 客户端。""" - } - label: { - en: """Response Information""" - zh: """响应信息""" - } - } - - mqtt_server_keepalive { - desc { - en: """The keep alive duration required by EMQX. To use the setting from the client side, choose disabled from the drop-down list. Only applicable to MQTT 5.0 clients.""" - zh: """EMQX 要求的保活时间,如设为 disabled,则将使用客户端指定的保持连接时间;仅适用于 MQTT 5.0 客户端。""" - } - label: { - en: """Server Keep Alive""" - zh: """服务端保活时间""" - } - } - - mqtt_keepalive_backoff { - desc { - en: """The coefficient EMQX uses to confirm whether the keep alive duration of the client expires. Formula: Keep Alive * Backoff * 2""" - zh: """EMQX 判定客户端保活超时使用的阈值系数。计算公式为:Keep Alive * Backoff * 2""" - } - label: { - en: """Keep Alive Backoff""" - zh: """保活超时阈值系数""" - } - } - - mqtt_max_subscriptions { - desc { - en: """Maximum number of subscriptions allowed per client.""" - zh: """允许每个客户端建立的最大订阅数量。""" - } - label: { - en: """Max Subscriptions""" - zh: """最大订阅数量""" - } - } - - mqtt_upgrade_qos { - desc { - en: """Force upgrade of QoS level according to subscription.""" - zh: """投递消息时,是否根据订阅主题时的 QoS 等级来强制提升派发的消息的 QoS 等级。""" - } - label: { - en: """Upgrade QoS""" - zh: """升级 QoS""" - } - } - - mqtt_max_inflight { - desc { - en: """Maximum number of QoS 1 and QoS 2 messages that are allowed to be delivered simultaneously before completing the acknowledgment.""" - zh: """允许在完成应答前同时投递的 QoS 1 和 QoS 2 消息的最大数量。""" - } - label: { - en: """Max Inflight""" - zh: """最大飞行窗口""" - } - - } - - mqtt_retry_interval { - desc { - en: """Retry interval for QoS 1/2 message delivering.""" - zh: """QoS 1/2 消息的重新投递间隔。""" - } - label: { - en: """Retry Interval""" - zh: """重试间隔""" - } - } - - mqtt_max_awaiting_rel { - desc { - en: """For each publisher session, the maximum number of outstanding QoS 2 messages pending on the client to send PUBREL. After reaching this limit, new QoS 2 PUBLISH requests will be rejected with `147(0x93)` until either PUBREL is received or timed out.""" - zh: """每个发布者的会话中,都存在一个队列来处理客户端发送的 QoS 2 消息。该队列会存储 QoS 2 消息的报文 ID 直到收到客户端的 PUBREL 或超时,达到队列长度的限制后,新的 QoS 2 消息发布会被拒绝,并返回 `147(0x93)` 错误。""" - } - label: { - en: """Max Awaiting PUBREL""" - zh: """PUBREL 等待队列长度""" - } - } - - mqtt_await_rel_timeout { - desc { - en: """For client to broker QoS 2 message, the time limit for the broker to wait before the `PUBREL` message is received. The wait is aborted after timed out, meaning the packet ID is freed for new `PUBLISH` requests. Receiving a stale `PUBREL` causes a warning level log. Note, the message is delivered to subscribers before entering the wait for PUBREL.""" - zh: """客户端发布 QoS 2 消息时,服务器等待 `PUBREL` 的最长时延。超过该时长后服务器会放弃等待,该PACKET ID 会被释放,从而允许后续新的 PUBLISH 消息使用。如果超时后收到 PUBREL,服务器将会产生一条告警日志。注意,向订阅客户端转发消息的动作发生在进入等待之前。""" - } - label: { - en: """Max Awaiting PUBREL TIMEOUT""" - zh: """PUBREL 最大等待时间""" - } - } - - mqtt_session_expiry_interval { - desc { - en: """Specifies how long the session will expire after the connection is disconnected, only for non-MQTT 5.0 connections.""" - zh: """指定会话将在连接断开后多久过期,仅适用于非 MQTT 5.0 的连接。""" - } - label: { - en: """Session Expiry Interval""" - zh: """会话过期间隔""" - } - } - - mqtt_max_mqueue_len { - desc { - en: """Maximum queue length. Enqueued messages when persistent client disconnected, or inflight window is full.""" - zh: """消息队列最大长度。持久客户端断开连接或飞行窗口已满时排队的消息长度。""" - } - label: { - en: """Max Message Queue Length""" - zh: """最大消息队列长度""" - } - } - - mqtt_mqueue_priorities { - desc { - en: """Topic priorities. Priority number [1-255] -There's no priority table by default, hence all messages are treated equal. - -**NOTE**: Comma and equal signs are not allowed for priority topic names. -**NOTE**: Messages for topics not in the priority table are treated as either highest or lowest priority depending on the configured value for mqtt.mqueue_default_priority. - -**Examples**: -To configure \"topic/1\" > \"topic/2\": -mqueue_priorities: {\"topic/1\": 10, \"topic/2\": 8}""" - zh: """主题优先级。取值范围 [1-255] -默认优先级表为空,即所有的主题优先级相同。 - -注:优先主题名称中不支持使用逗号和等号。 -注:不在此列表中的主题,被视为最高/最低优先级,这取决于mqtt.mqueue_default_priority 的配置 - -示例: -配置 \"topic/1\" > \"topic/2\": -mqueue_priorities: {\"topic/1\": 10, \"topic/2\": 8}""" - } - label: { - en: """Topic Priorities""" - zh: """主题优先级""" - } - } - - mqtt_mqueue_default_priority { - desc { - en: """Default topic priority, which will be used by topics not in Topic Priorities (mqueue_priorities).""" - zh: """默认的主题优先级,不在 主题优先级mqueue_priorities) 中的主题将会使用该优先级。""" - } - label: { - en: """Default Topic Priorities""" - zh: """默认主题优先级""" - } - } - - mqtt_mqueue_store_qos0 { - desc { - en: """Specifies whether to store QoS 0 messages in the message queue while the connection is down but the session remains.""" - zh: """指定在连接断开但会话保持期间,是否需要在消息队列中存储 QoS 0 消息。""" - } - label: { - en: """Store QoS 0 Message""" - zh: """存储 QoS 0 消息""" - } - } - - mqtt_use_username_as_clientid { - desc { - en: """Whether to use Username as Client ID. -This setting takes effect later than Use Peer Certificate as Username and Use peer certificate as Client ID.""" - zh: """是否使用用户名作为客户端 ID。 -此设置的作用时间晚于 对端证书作为用户名对端证书作为客户端 ID。""" - } - label: { - en: """Use Username as Client ID""" - zh: """用户名作为客户端 ID""" - } - } - - mqtt_peer_cert_as_username { - desc { - en: """Use the CN, DN field in the peer certificate or the entire certificate content as Username. Only works for the TLS connection. +mqtt_peer_cert_as_username.desc: +"""Use the CN, DN field in the peer certificate or the entire certificate content as Username. Only works for the TLS connection. Supported configurations are the following: - cn: CN field of the certificate - dn: DN field of the certificate - crt: Content of the DER or PEM certificate - pem: Convert DER certificate content to PEM format and use as Username - md5: MD5 value of the DER or PEM certificate""" - zh: """使用对端证书中的 CN、DN 字段或整个证书内容来作为用户名;仅适用于 TLS 连接。 -目前支持: -- cn: 取证书的 CN 字段 -- dn: 取证书的 DN 字段 -- crt: 取 DERPEM 的证书内容 -- pem: 将 DER 证书转换为 PEM 格式作为用户名 -- md5: 取 DERPEM 证书内容的 MD5 值""" - } - label: { - en: """Use Peer Certificate as Username""" - zh: """对端证书作为用户名""" - } - } - mqtt_peer_cert_as_clientid { - desc { - en: """Use the CN, DN field in the peer certificate or the entire certificate content as Client ID. Only works for the TLS connection. -Supported configurations are the following: -- cn: CN field of the certificate -- dn: DN field of the certificate -- crt: DER or PEM certificate -- pem: Convert DER certificate content to PEM format and use as Client ID -- md5: MD5 value of the DER or PEM certificate""" - zh: """使用对端证书中的 CN、DN 字段或整个证书内容来作为客户端 ID。仅适用于 TLS 连接; -目前支持: -- cn: 取证书的 CN 字段 -- dn: 取证书的 DN 字段 -- crt: 取 DERPEM 证书的内容 -- pem: 将 DER 证书内容转换为 PEM 格式作为客户端 ID -- md5: 取 DERPEM 证书内容的 MD5 值""" - } - label: { - en: """Use Peer Certificate as Client ID""" - zh: """对端证书作为客户端 ID""" - } - } +mqtt_peer_cert_as_username.label: +"""Use Peer Certificate as Username""" - broker { - desc { - en: """Message broker options.""" - zh: """Broker 相关配置项。""" - } - } +fields_cache_enable.desc: +"""Enable or disable the authorization cache.""" - broker_enable_session_registry { - desc { - en: """Enable session registry""" - zh: """是否启用 Session Registry""" - } - } +fields_cache_enable.label: +"""Enable or disable the authorization cache.""" - broker_session_locking_strategy { - desc { - en: """Session locking strategy in a cluster. - - `local`: only lock the session on the current node - - `one`: select only one remote node to lock the session - - `quorum`: select some nodes to lock the session - - `all`: lock the session on all the nodes in the cluster""" +fields_mqtt_quic_listener_disconnect_timeout_ms.desc: +"""How long to wait for an ACK before declaring a path dead and disconnecting. Default: 16000""" - zh: """Session 在集群中的锁策略。 - - `loca`:仅锁本节点的 Session; - - `one`:任选一个其它节点加锁; - - `quorum`:选择集群中半数以上的节点加锁; - - `all`:选择所有节点加锁。""" - } - } +fields_mqtt_quic_listener_disconnect_timeout_ms.label: +"""Disconnect timeout ms""" - broker_shared_subscription_strategy { - desc { - en: """Dispatch strategy for shared subscription. - - `random`: dispatch the message to a random selected subscriber - - `round_robin`: select the subscribers in a round-robin manner - - `round_robin_per_group`: select the subscribers in round-robin fashion within each shared subscriber group - - `local`: select random local subscriber otherwise select random cluster-wide - - `sticky`: always use the last selected subscriber to dispatch, until the subscriber disconnects. - - `hash_clientid`: select the subscribers by hashing the `clientIds` - - `hash_topic`: select the subscribers by hashing the source topic""" +mqtt_max_topic_alias.desc: +"""Maximum topic alias, 0 means no topic alias supported.""" - zh: """共享订阅消息派发策略。 - - `random`:随机挑选一个共享订阅者派发; - - `round_robin`:使用 round-robin 策略派发; - - `round_robin_per_group`:在共享组内循环选择下一个成员; - - `local`:选择随机的本地成员,否则选择随机的集群范围内成员; - - `sticky`:总是使用上次选中的订阅者派发,直到它断开连接; - - `hash_clientid`:通过对发送者的客户端 ID 进行 Hash 处理来选择订阅者; - - `hash_topic`:通过对源主题进行 Hash 处理来选择订阅者。""" - } - } +mqtt_max_topic_alias.label: +"""Max Topic Alias""" - broker_shared_dispatch_ack_enabled { - desc { - en: """Deprecated, will be removed in 5.1. -Enable/disable shared dispatch acknowledgement for QoS 1 and QoS 2 messages. -This should allow messages to be dispatched to a different subscriber in the group in case the picked (based on `shared_subscription_strategy`) subscriber is offline.""" +common_ssl_opts_schema_user_lookup_fun.desc: +"""EMQX-internal callback that is used to lookup pre-shared key (PSK) identity.""" - zh: """该配置项已废弃,会在 5.1 中移除。 -启用/禁用 QoS 1 和 QoS 2 消息的共享派发确认。 -开启后,允许将消息从未及时回复 ACK 的订阅者 (例如,客户端离线) 重新派发给另外一个订阅者。""" - } - } +common_ssl_opts_schema_user_lookup_fun.label: +"""SSL PSK user lookup fun""" - broker_route_batch_clean { - desc { - en: """Enable batch clean for deleted routes.""" - zh: """是否开启批量清除路由。""" - } - } +fields_listeners_wss.desc: +"""HTTPS websocket listeners.""" - shared_subscription_group_strategy { - desc { - en: """Per group dispatch strategy for shared subscription. -This config is a map from shared subscription group name to the strategy -name. The group name should be of format `[A-Za-z0-9]`. i.e. no -special characters are allowed.""" - zh: """设置共享订阅组为单位的分发策略。该配置是一个从组名到 -策略名的一个map,组名不得包含 `[A-Za-z0-9]` 之外的特殊字符。""" - } +fields_listeners_wss.label: +"""HTTPS websocket listeners""" - } +sysmon_top_max_procs.desc: +"""Stop collecting data when the number of processes +in the VM exceeds this value""" - shared_subscription_strategy_enum { - desc { - en: """Dispatch strategy for shared subscription. -- `random`: dispatch the message to a random selected subscriber -- `round_robin`: select the subscribers in a round-robin manner -- `round_robin_per_group`: select the subscribers in round-robin fashion within each shared subscriber group -- `sticky`: always use the last selected subscriber to dispatch, -until the subscriber disconnects. -- `hash`: select the subscribers by the hash of `clientIds` -- `local`: send to a random local subscriber. If local -subscriber was not found, send to a random subscriber cluster-wide""" - zh: """共享订阅的分发策略名称。 -- `random`:随机选择一个组内成员; -- `round_robin`:循环选择下一个成员; -- `round_robin_per_group`:在共享组内循环选择下一个成员; -- `sticky`:使用上一次选中的成员; -- `hash`:根据 ClientID 哈希映射到一个成员; -- `local`:随机分发到节点本地成成员,如果本地成员不存在,则随机分发到任意一个成员。""" +sysmon_top_max_procs.label: +"""Max procs""" - } - } +mqtt_use_username_as_clientid.desc: +"""Whether to use Username as Client ID. +This setting takes effect later than Use Peer Certificate as Username and Use peer certificate as Client ID.""" - broker_perf_route_lock_type { - desc { - en: """Performance tuning for subscribing/unsubscribing a wildcard topic. -Change this parameter only when there are many wildcard topics. +mqtt_use_username_as_clientid.label: +"""Use Username as Client ID""" -NOTE: when changing from/to `global` lock, it requires all nodes in the cluster to be stopped before the change. - - `key`: mnesia transactional updates with per-key locks. Recommended for a single-node setup. - - `tab`: mnesia transactional updates with table lock. Recommended for a cluster setup. - - `global`: updates are protected with a global lock. Recommended for large clusters.""" - zh: """通配主题订阅/取消订阅性能调优。 -建议仅当通配符主题较多时才更改此参数。 +mqtt_max_qos_allowed.desc: +"""Maximum QoS allowed.""" -注:当从/更改为 `global` 锁时,它要求集群中的所有节点在更改之前停止。 - - `key`:为 Mnesia 事务涉及到的每个 key 上锁,建议单节点时使用。 - - `tab`:为 Mnesia 事务涉及到的表上锁,建议在集群中使用。 - - `global`:所以更新操作都被全局的锁保护,仅建议在超大规模集群中使用。""" - } - } +mqtt_max_qos_allowed.label: +"""Max QoS""" - broker_perf_trie_compaction { - desc { - en: """Enable trie path compaction. -Enabling it significantly improves wildcard topic subscribe rate, if wildcard topics have unique prefixes like: 'sensor/{{id}}/+/', where ID is unique per subscriber. -Topic match performance (when publishing) may degrade if messages are mostly published to topics with large number of levels. +fields_mqtt_quic_listener_max_binding_stateless_operations.desc: +"""The maximum number of stateless operations that may be queued on a binding at any one time. Default: 100""" -NOTE: This is a cluster-wide configuration. It requires all nodes to be stopped before changing it.""" - zh: """是否开启主题表压缩存储。 -启用它会显着提高通配符主题订阅率,如果通配符主题具有唯一前缀,例如:'sensor/{{id}}/+/',其中每个订阅者的 ID 是唯一的。 -如果消息主要发布到具有大量级别的主题,则主题匹配性能(发布时)可能会降低。 +fields_mqtt_quic_listener_max_binding_stateless_operations.label: +"""Max binding stateless operations""" -注意:这是一个集群范围的配置。 它要求在更改之前停止所有节点。""" - } - } +fields_mqtt_quic_listener_stream_recv_buffer_default.desc: +"""Stream initial buffer size. Default: 4096""" - sys_topics { - desc { - en: """System topics configuration.""" - zh: """系统主题配置。""" - } - } +fields_mqtt_quic_listener_stream_recv_buffer_default.label: +"""Stream recv buffer default""" - sys_msg_interval { - desc { - en: """Time interval of publishing `$SYS` messages.""" - zh: """发送 `$SYS` 主题的间隔时间。""" - } - } +fields_mqtt_quic_listener_pacing_enabled.desc: +"""Pace sending to avoid overfilling buffers on the path. Default: 1 (Enabled)""" - sys_heartbeat_interval { - desc { - en: """Time interval for publishing following heartbeat messages: - - `$SYS/brokers//uptime` - - `$SYS/brokers//datetime`""" - zh: """发送心跳系统消息的间隔时间,它包括: - - `$SYS/brokers//uptime` - - `$SYS/brokers//datetime`""" - } - } +fields_mqtt_quic_listener_pacing_enabled.label: +"""Pacing enabled""" - sys_event_messages { - desc { - en: """Client events messages.""" - zh: """客户端事件消息。""" - } - } +mqtt_max_subscriptions.desc: +"""Maximum number of subscriptions allowed per client.""" - sys_event_client_connected { - desc { - en: """Enable to publish client connected event messages""" - zh: """是否开启客户端已连接事件消息。""" - } - } +mqtt_max_subscriptions.label: +"""Max Subscriptions""" - sys_event_client_disconnected { - desc { - en: """Enable to publish client disconnected event messages.""" - zh: """是否开启客户端已断开连接事件消息。""" - } - } +persistent_session_builtin_messages_table.desc: +"""Performance tuning options for built-in messages table.""" - sys_event_client_subscribed { - desc { - en: """Enable to publish event message that client subscribed a topic successfully.""" - zh: """是否开启客户端已成功订阅主题事件消息。""" - } - } +persistent_session_builtin_messages_table.label: +"""Persistent messages""" - sys_event_client_unsubscribed { - desc { - en: """Enable to publish event message that client unsubscribed a topic successfully.""" - zh: """是否开启客户端已成功取消订阅主题事件消息。""" - } - } +sysmon_os_cpu_low_watermark.desc: +"""The threshold, as percentage of system CPU load, + for how much system cpu can be used before the corresponding alarm is cleared.""" +sysmon_os_cpu_low_watermark.label: +"""CPU low watermark""" -fields_authorization_no_match { - desc { - en: """Default access control action if the user or client matches no ACL rules, -or if no such user or client is found by the configurable authorization -sources such as built_in_database, an HTTP API, or a query against PostgreSQL. -Find more details in 'authorization.sources' config.""" - zh: """如果用户或客户端不匹配ACL规则,或者从可配置授权源(比如内置数据库、HTTP API 或 PostgreSQL 等。)内未找 -到此类用户或客户端时,模式的认访问控制操作。 -在“授权”中查找更多详细信息。""" - } - label: { - en: "Authorization no match" - zh: "未匹时的默认授权动作" - } -} +fields_mqtt_quic_listener_tls_server_max_send_buffer.desc: +"""How much Server TLS data to buffer. Default: 8192""" -fields_authorization_deny_action { - desc { - en: """The action when the authorization check rejects an operation.""" - zh: """授权检查拒绝操作时的操作。""" - } - label: { - en: "Authorization deny action" - zh: "授权检查拒绝操作时的操作" - } -} +fields_mqtt_quic_listener_tls_server_max_send_buffer.label: +"""TLS server max send buffer""" -fields_cache_enable { - desc { - en: """Enable or disable the authorization cache.""" - zh: """启用或禁用授权缓存。""" - } - label: { - en: "Enable or disable the authorization cache." - zh: "启用或禁用授权缓存" - } -} +base_listener_bind.desc: +"""IP address and port for the listening socket.""" -fields_cache_max_size { - desc { - en: """Maximum number of cached items.""" - zh: """缓存项的最大数量。""" - } - label: { - en: "Maximum number of cached items." - zh: "缓存项的最大数量" - } -} +base_listener_bind.label: +"""IP address and port""" -fields_cache_ttl { - desc { - en: """Time to live for the cached data.""" - zh: """缓存数据的生存时间。""" - } - label: { - en: "Time to live for the cached data." - zh: "缓存数据的生存时间。" - } -} +server_ssl_opts_schema_handshake_timeout.desc: +"""Maximum time duration allowed for the handshake to complete""" -fields_deflate_opts_level { - desc { - en: """Compression level.""" - zh: """压缩级别""" - } - label: { - en: "Compression level" - zh: "压缩级别" - } -} +server_ssl_opts_schema_handshake_timeout.label: +"""Handshake timeout""" -fields_deflate_opts_mem_level { - desc { - en: """Specifies the size of the compression state.
-Lower values decrease memory usage per connection.""" - zh: """指定压缩状态的大小
-较低的值会减少每个连接的内存使用。""" - } - label: { - en: "Size of the compression state" - zh: "压缩状态大小" - } -} +fields_deflate_opts_server_context_takeover.desc: +"""Takeover means the compression state is retained between server messages.""" -fields_deflate_opts_strategy { - desc { - en: """Specifies the compression strategy.""" - zh: """指定压缩策略。""" - } - label: { - en: "compression strategy" - zh: "指定压缩策略" - } -} +fields_deflate_opts_server_context_takeover.label: +"""Server context takeover""" -fields_deflate_opts_server_context_takeover { - desc { - en: """Takeover means the compression state is retained between server messages.""" - zh: """接管意味着在服务器消息之间保留压缩状态。""" - } - label: { - en: "Server context takeover" - zh: "服务上下文接管" - } -} +mqtt_session_expiry_interval.desc: +"""Specifies how long the session will expire after the connection is disconnected, only for non-MQTT 5.0 connections.""" -fields_deflate_opts_client_context_takeover { - desc { - en: """Takeover means the compression state is retained between client messages.""" - zh: """接管意味着在客户端消息之间保留压缩状态。""" - } - label: { - en: "Client context takeover" - zh: "客户端上下文接管" - } -} +mqtt_session_expiry_interval.label: +"""Session Expiry Interval""" -fields_deflate_opts_server_max_window_bits { - desc { - en: """Specifies the size of the compression context for the server.""" - zh: """指定服务器压缩上下文的大小。""" - } - label: { - en: "Server compression max window size" - zh: "服务器压缩窗口大小" - } -} +fields_listener_enabled.desc: +"""Enable listener.""" -fields_deflate_opts_client_max_window_bits { - desc { - en: """Specifies the size of the compression context for the client.""" - zh: """指定客户端压缩上下文的大小。""" - } - label: { - en: "Client compression max window size" - zh: "压缩窗口大小" - } -} +fields_listener_enabled.label: +"""Enable listener""" -client_ssl_opts_schema_enable { - desc { - en: """Enable TLS.""" - zh: """启用 TLS。""" - } - label: { - en: "Enable TLS." - zh: "启用 TLS" - } -} +mqtt.desc: +"""Global MQTT configuration. +The configs here work as default values which can be overridden in zone configs""" -common_ssl_opts_schema_cacertfile { - desc { - en: """Trusted PEM format CA certificates bundle file.
+crl_cache_refresh_http_timeout.desc: +"""The timeout for the HTTP request when fetching CRLs. This is a global setting for all listeners.""" + +crl_cache_refresh_http_timeout.label: +"""CRL Cache Refresh HTTP Timeout""" + +fields_tcp_opts_backlog.desc: +"""TCP backlog defines the maximum length that the queue of +pending connections can grow to.""" + +fields_tcp_opts_backlog.label: +"""TCP backlog length""" + +broker_route_batch_clean.desc: +"""Enable batch clean for deleted routes.""" + +fields_mqtt_quic_listener_initial_window_packets.desc: +"""The size (in packets) of the initial congestion window for a connection. Default: 10""" + +fields_mqtt_quic_listener_initial_window_packets.label: +"""Initial window packets""" + +flapping_detect_enable.desc: +"""Enable flapping connection detection feature.""" + +flapping_detect_enable.label: +"""Enable flapping detection""" + +sysmon_top_db_password.desc: +"""EMQX user password in the PostgreSQL database""" + +sysmon_top_db_password.label: +"""DB Password""" + +fields_ws_opts_check_origins.desc: +"""List of allowed origins.
See check_origin_enable.""" + +fields_ws_opts_check_origins.label: +"""Allowed origins""" + +fields_deflate_opts_client_context_takeover.desc: +"""Takeover means the compression state is retained between client messages.""" + +fields_deflate_opts_client_context_takeover.label: +"""Client context takeover""" + +base_listener_acceptors.desc: +"""The size of the listener's receiving pool.""" + +base_listener_acceptors.label: +"""Acceptors Num""" + +common_ssl_opts_schema_cacertfile.desc: +"""Trusted PEM format CA certificates bundle file.
The certificates in this file are used to verify the TLS peer's certificates. Append new certificates to the file if new CAs are to be trusted. There is no need to restart EMQX to have the updated file loaded, because the system regularly checks if file has been updated (and reload).
NOTE: invalidating (deleting) a certificate from the file will not affect already established connections.""" - zh: """受信任的PEM格式 CA 证书捆绑文件
-此文件中的证书用于验证TLS对等方的证书。 -如果要信任新 CA,请将新证书附加到文件中。 -无需重启EMQX即可加载更新的文件,因为系统会定期检查文件是否已更新(并重新加载)
-注意:从文件中失效(删除)证书不会影响已建立的连接。""" - } - label: { - en: "CACertfile" - zh: "CA 证书文件" - } -} -common_ssl_opts_schema_certfile { - desc { - en: """PEM format certificates chain file.
-The certificates in this file should be in reversed order of the certificate -issue chain. That is, the host's certificate should be placed in the beginning -of the file, followed by the immediate issuer certificate and so on. -Although the root CA certificate is optional, it should be placed at the end of -the file if it is to be added.""" - zh: """PEM格式证书链文件
-此文件中的证书应与证书颁发链的顺序相反。也就是说,主机的证书应该放在文件的开头, -然后是直接颁发者 CA 证书,依此类推,一直到根 CA 证书。 -根 CA 证书是可选的,如果想要添加,应加到文件到最末端。""" - } - label: { - en: "Certfile" - zh: "证书文件" - } -} +common_ssl_opts_schema_cacertfile.label: +"""CACertfile""" -common_ssl_opts_schema_keyfile { - desc { - en: """PEM format private key file.""" - zh: """PEM格式的私钥文件。""" - } - label: { - en: "Keyfile" - zh: "私钥文件" - } -} +fields_ws_opts_mqtt_path.desc: +"""WebSocket's MQTT protocol path. So the address of EMQX Broker's WebSocket is: +ws://{ip}:{port}/mqtt""" -common_ssl_opts_schema_verify { - desc { - en: """Enable or disable peer verification.""" - zh: """启用或禁用对等验证。""" - } - label: { - en: "Verify peer" - zh: "对等验证" - } -} +fields_ws_opts_mqtt_path.label: +"""WS MQTT Path""" -common_ssl_opts_schema_reuse_sessions { - desc { - en: """Enable TLS session reuse.""" - zh: """启用 TLS 会话重用。""" - } - label: { - en: "TLS session reuse" - zh: "TLS 会话重用" - } -} +sysmon_os_procmem_high_watermark.desc: +"""The threshold, as percentage of system memory, + for how much system memory can be allocated by one Erlang process before + the corresponding alarm is raised.""" -common_ssl_opts_schema_depth { - desc { - en: """Maximum number of non-self-issued intermediate certificates that can follow the peer certificate in a valid certification path. -So, if depth is 0 the PEER must be signed by the trusted ROOT-CA directly;
-if 1 the path can be PEER, Intermediate-CA, ROOT-CA;
-if 2 the path can be PEER, Intermediate-CA1, Intermediate-CA2, ROOT-CA.""" - zh: """在有效的证书路径中,可以跟随对等证书的非自颁发中间证书的最大数量。 -因此,如果深度为0,则对等方必须由受信任的根 CA 直接签名;
-如果是1,路径可以是 PEER、中间 CA、ROOT-CA;
-如果是2,则路径可以是PEER、中间 CA1、中间 CA2、ROOT-CA。""" - } - label: { - en: "CACert Depth" - zh: "CA 证书深度" - } -} +sysmon_os_procmem_high_watermark.label: +"""ProcMem high wartermark""" -common_ssl_opts_schema_password { - desc { - en: """String containing the user's password. Only used if the private key file is password-protected.""" - zh: """包含用户密码的字符串。仅在私钥文件受密码保护时使用。""" - } - label: { - en: "Keyfile passphrase" - zh: "秘钥文件密码" - } -} +fields_listeners_quic.desc: +"""QUIC listeners.""" -common_ssl_opts_schema_versions { - desc { - en: """All TLS/DTLS versions to be supported.
-NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config.
-In case PSK cipher suites are intended, make sure to configure -['tlsv1.2', 'tlsv1.1'] here.""" - zh: """支持所有TLS/DTLS版本
-注:PSK 的 Ciphers 无法在 tlsv1.3 中使用,如果打算使用 PSK 密码套件,请确保这里配置为 ["tlsv1.2","tlsv1.1"]。""" - } - label: { - en: "SSL versions" - zh: "SSL 版本" - } -} +fields_listeners_quic.label: +"""QUIC listeners""" -common_ssl_opts_schema_hibernate_after { - desc { - en: """Hibernate the SSL process after idling for amount of time reducing its memory footprint.""" - zh: """在闲置一定时间后休眠 SSL 进程,减少其内存占用。""" - } - label: { - en: "hibernate after" - zh: "闲置多久后休眠" - } -} +fields_listeners_ws.desc: +"""HTTP websocket listeners.""" -ciphers_schema_common { - desc { - en: """This config holds TLS cipher suite names separated by comma, -or as an array of strings. e.g. -"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256" or -["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]. -
-Ciphers (and their ordering) define the way in which the -client and server encrypts information over the network connection. -Selecting a good cipher suite is critical for the -application's data security, confidentiality and performance. +fields_listeners_ws.label: +"""HTTP websocket listeners""" -The names should be in OpenSSL string format (not RFC format). -All default values and examples provided by EMQX config -documentation are all in OpenSSL format.
+mqtt_retry_interval.desc: +"""Retry interval for QoS 1/2 message delivering.""" -NOTE: Certain cipher suites are only compatible with -specific TLS versions ('tlsv1.1', 'tlsv1.2' or 'tlsv1.3') -incompatible cipher suites will be silently dropped. -For instance, if only 'tlsv1.3' is given in the versions, -configuring cipher suites for other versions will have no effect. -
+mqtt_retry_interval.label: +"""Retry Interval""" -NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
-If PSK cipher suites are intended, 'tlsv1.3' should be disabled from versions.
-PSK cipher suites: "RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, -RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, -RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"""" - zh: """此配置保存由逗号分隔的 TLS 密码套件名称,或作为字符串数组。例如 -"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256"或 -["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]。 -
-密码(及其顺序)定义了客户端和服务器通过网络连接加密信息的方式。 -选择一个好的密码套件对于应用程序的数据安全性、机密性和性能至关重要。 +stats_enable.desc: +"""Enable/disable statistic data collection.""" -名称应为 OpenSSL 字符串格式(而不是 RFC 格式)。 -EMQX 配置文档提供的所有默认值和示例都是 OpenSSL 格式
-注意:某些密码套件仅与特定的 TLS 版本兼容('tlsv1.1'、'tlsv1.2'或'tlsv1.3')。 -不兼容的密码套件将被自动删除。 +stats_enable.label: +"""Enable/disable statistic data collection.""" -例如,如果只有 versions 仅配置为 tlsv1.3。为其他版本配置密码套件将无效。 +fields_authorization_deny_action.desc: +"""The action when the authorization check rejects an operation.""" -
-注:PSK 的 Ciphers 不支持 tlsv1.3
-如果打算使用PSK密码套件 tlsv1.3。应在ssl.versions中禁用。 +fields_authorization_deny_action.label: +"""Authorization deny action""" -
-PSK 密码套件: -"RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, -RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, -RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"""" - } - label: { - en: "" - zh: "" - } -} +fields_deflate_opts_server_max_window_bits.desc: +"""Specifies the size of the compression context for the server.""" -ciphers_schema_quic { - desc { - en: """This config holds TLS cipher suite names separated by comma, +fields_deflate_opts_server_max_window_bits.label: +"""Server compression max window size""" + +client_ssl_opts_schema_server_name_indication.desc: +"""Specify the host name to be used in TLS Server Name Indication extension.
+For instance, when connecting to "server.example.net", the genuine server +which accepts the connection and performs TLS handshake may differ from the +host the TLS client initially connects to, e.g. when connecting to an IP address +or when the host has multiple resolvable DNS records
+If not specified, it will default to the host name string which is used +to establish the connection, unless it is IP addressed used.
+The host name is then also used in the host name verification of the peer +certificate.
The special value 'disable' prevents the Server Name +Indication extension from being sent and disables the hostname +verification check.""" + +client_ssl_opts_schema_server_name_indication.label: +"""Server Name Indication""" + +fields_mqtt_quic_listener_retry_memory_limit.desc: +"""The percentage of available memory usable for handshake connections before stateless retry is used. Calculated as `N/65535`. Default: 65""" + +fields_mqtt_quic_listener_retry_memory_limit.label: +"""Retry memory limit""" + +force_shutdown_max_message_queue_len.desc: +"""Maximum message queue length.""" + +force_shutdown_max_message_queue_len.label: +"""Maximum mailbox queue length of process.""" + +sys_heartbeat_interval.desc: +"""Time interval for publishing following heartbeat messages: + - `$SYS/brokers//uptime` + - `$SYS/brokers//datetime`""" + +flapping_detect_ban_time.desc: +"""How long the flapping clientid will be banned.""" + +flapping_detect_ban_time.label: +"""Ban time""" + +sysmon_top_num_items.desc: +"""The number of top processes per monitoring group""" + +sysmon_top_num_items.label: +"""Top num items""" + +persistent_session_builtin_session_table.desc: +"""Performance tuning options for built-in session table.""" + +persistent_session_builtin_session_table.label: +"""Persistent session""" + +mqtt_upgrade_qos.desc: +"""Force upgrade of QoS level according to subscription.""" + +mqtt_upgrade_qos.label: +"""Upgrade QoS""" + +mqtt_shared_subscription.desc: +"""Whether to enable support for MQTT shared subscription.""" + +mqtt_shared_subscription.label: +"""Shared Subscription Available""" + +fields_tcp_opts_sndbuf.desc: +"""The TCP send buffer (OS kernel) for the connections.""" + +fields_tcp_opts_sndbuf.label: +"""TCP send buffer""" + +sysmon_os_mem_check_interval.desc: +"""The time interval for the periodic memory check.""" + +sysmon_os_mem_check_interval.label: +"""Mem check interval""" + +server_ssl_opts_schema_gc_after_handshake.desc: +"""Memory usage tuning. If enabled, will immediately perform a garbage collection after the TLS/SSL handshake.""" + +server_ssl_opts_schema_gc_after_handshake.label: +"""Perform GC after handshake""" + +fields_mqtt_quic_listener_ssl_options.desc: +"""TLS options for QUIC transport""" + +fields_mqtt_quic_listener_ssl_options.label: +"""TLS Options""" + +fields_ws_opts_mqtt_piggyback.desc: +"""Whether a WebSocket message is allowed to contain multiple MQTT packets.""" + +fields_ws_opts_mqtt_piggyback.label: +"""MQTT Piggyback""" + +base_listener_mountpoint.desc: +"""When publishing or subscribing, prefix all topics with a mountpoint string. +The prefixed string will be removed from the topic name when the message +is delivered to the subscriber. The mountpoint is a way that users can use +to implement isolation of message routing between different listeners. +For example if a client A subscribes to `t` with `listeners.tcp.\.mountpoint` +set to `some_tenant`, then the client actually subscribes to the topic +`some_tenant/t`. Similarly, if another client B (connected to the same listener +as the client A) sends a message to topic `t`, the message is routed +to all the clients subscribed `some_tenant/t`, so client A will receive the +message, with topic name `t`.
+Set to `""` to disable the feature.
+ +Variables in mountpoint string: + - ${clientid}: clientid + - ${username}: username""" + +base_listener_mountpoint.label: +"""mountpoint""" + +mqtt_max_awaiting_rel.desc: +"""For each publisher session, the maximum number of outstanding QoS 2 messages pending on the client to send PUBREL. After reaching this limit, new QoS 2 PUBLISH requests will be rejected with `147(0x93)` until either PUBREL is received or timed out.""" + +mqtt_max_awaiting_rel.label: +"""Max Awaiting PUBREL""" + +ciphers_schema_quic.desc: +"""This config holds TLS cipher suite names separated by comma, or as an array of strings. e.g. "TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256" or ["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]. @@ -1606,115 +465,744 @@ RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"

NOTE: QUIC listener supports only 'tlsv1.3' ciphers""" - zh: """此配置保存由逗号分隔的 TLS 密码套件名称,或作为字符串数组。例如 -"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256"或 -["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]。 -
-密码(及其顺序)定义了客户端和服务器通过网络连接加密信息的方式。 -选择一个好的密码套件对于应用程序的数据安全性、机密性和性能至关重要。 +ciphers_schema_quic.label: +"""""" -名称应为 OpenSSL 字符串格式(而不是 RFC 格式)。 -EMQX 配置文档提供的所有默认值和示例都是 OpenSSL 格式
-注意:某些密码套件仅与特定的 TLS 版本兼容('tlsv1.1'、'tlsv1.2'或'tlsv1.3')。 -不兼容的密码套件将被自动删除。 +fields_mqtt_quic_listener_max_bytes_per_key.desc: +"""Maximum number of bytes to encrypt with a single 1-RTT encryption key before initiating key update. Default: 274877906944""" -例如,如果只有 versions 仅配置为 tlsv1.3。为其他版本配置密码套件将无效。 +fields_mqtt_quic_listener_max_bytes_per_key.label: +"""Max bytes per key""" -
-注:PSK 的 Ciphers 不支持 tlsv1.3
-如果打算使用PSK密码套件,tlsv1.3。应在ssl.versions中禁用。 +fields_mqtt_quic_listener_mtu_discovery_search_complete_timeout_us.desc: +"""The time in microseconds to wait before reattempting MTU probing if max was not reached. Default: 600000000""" -
-PSK 密码套件: -"RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, -RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, -RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
+fields_mqtt_quic_listener_mtu_discovery_search_complete_timeout_us.label: +"""MTU discovery search complete timeout us""" -注:QUIC 监听器不支持 tlsv1.3 的 ciphers""" - } - label: { - en: "" - zh: "" - } -} +fields_ws_opts_check_origin_enable.desc: +"""If true, origin HTTP header will be + validated against the list of allowed origins configured in check_origins + parameter.""" -common_ssl_opts_schema_user_lookup_fun { - desc { - en: """EMQX-internal callback that is used to lookup pre-shared key (PSK) identity.""" - zh: """用于查找预共享密钥(PSK)标识的 EMQX 内部回调。""" - } - label: { - en: "SSL PSK user lookup fun" - zh: "SSL PSK 用户回调" - } -} +fields_ws_opts_check_origin_enable.label: +"""Check origin""" -common_ssl_opts_schema_secure_renegotiate { - desc { - en: """SSL parameter renegotiation is a feature that allows a client and a server -to renegotiate the parameters of the SSL connection on the fly. -RFC 5746 defines a more secure way of doing this. By enabling secure renegotiation, -you drop support for the insecure renegotiation, prone to MitM attacks.""" - zh: """SSL 参数重新协商是一种允许客户端和服务器动态重新协商 SSL 连接参数的功能。 -RFC 5746 定义了一种更安全的方法。通过启用安全的重新协商,您就失去了对不安全的重新协商的支持,从而容易受到 MitM 攻击。""" - } - label: { - en: "SSL renegotiate" - zh: "SSL 重新协商" - } -} +sysmon_vm_busy_dist_port.desc: +"""When the RPC connection used to communicate with other nodes in the cluster is overloaded, +there will be a busy_dist_port warning log, +and an MQTT message is published to system topic $SYS/sysmon/busy_dist_port.""" -server_ssl_opts_schema_dhfile { - desc { - en: """Path to a file containing PEM-encoded Diffie-Hellman parameters +sysmon_vm_busy_dist_port.label: +"""Enable Busy Distribution Port monitoring.""" + +mqtt_max_mqueue_len.desc: +"""Maximum queue length. Enqueued messages when persistent client disconnected, or inflight window is full.""" + +mqtt_max_mqueue_len.label: +"""Max Message Queue Length""" + +mqtt_max_inflight.desc: +"""Maximum number of QoS 1 and QoS 2 messages that are allowed to be delivered simultaneously before completing the acknowledgment.""" + +mqtt_max_inflight.label: +"""Max Inflight""" + +persistent_session_store_enabled.desc: +"""Use the database to store information about persistent sessions. +This makes it possible to migrate a client connection to another +cluster node if a node is stopped.""" + +persistent_session_store_enabled.label: +"""Enable persistent session store""" + +fields_deflate_opts_level.desc: +"""Compression level.""" + +fields_deflate_opts_level.label: +"""Compression level""" + +mqtt_server_keepalive.desc: +"""The keep alive duration required by EMQX. To use the setting from the client side, choose disabled from the drop-down list. Only applicable to MQTT 5.0 clients.""" + +mqtt_server_keepalive.label: +"""Server Keep Alive""" + +global_authentication.desc: +"""Default authentication configs for all MQTT listeners. + +For per-listener overrides see authentication in listener configs + +This option can be configured with: +
    +
  • []: The default value, it allows *ALL* logins
  • +
  • one: For example {enable:true,backend:"built_in_database",mechanism="password_based"}
  • +
  • chain: An array of structs.
  • +
+ +When a chain is configured, the login credentials are checked against the backends per the configured order, until an 'allow' or 'deny' decision can be made. + +If there is no decision after a full chain exhaustion, the login is rejected.""" + +fields_mqtt_quic_listener_load_balancing_mode.desc: +"""0: Disabled, 1: SERVER_ID_IP, 2: SERVER_ID_FIXED. default: 0""" + +fields_mqtt_quic_listener_load_balancing_mode.label: +"""Load balancing mode""" + +persistent_session_store_session_message_gc_interval.desc: +"""The starting interval for garbage collection of transient data for +persistent session messages. This does not affect the lifetime length +of persistent session messages.""" + +persistent_session_store_session_message_gc_interval.label: +"""Session message GC interval""" + +server_ssl_opts_schema_ocsp_refresh_http_timeout.desc: +"""The timeout for the HTTP request when checking OCSP responses.""" + +server_ssl_opts_schema_ocsp_refresh_http_timeout.label: +"""OCSP Refresh HTTP Timeout""" + +fields_tcp_opts_send_timeout.desc: +"""The TCP send timeout for the connections.""" + +fields_tcp_opts_send_timeout.label: +"""TCP send timeout""" + +sysmon_vm_process_high_watermark.desc: +"""The threshold, as percentage of processes, for how many + processes can simultaneously exist at the local node before the corresponding + alarm is raised.""" + +sysmon_vm_process_high_watermark.label: +"""Process high watermark""" + +fields_tcp_opts_buffer.desc: +"""The size of the user-space buffer used by the driver.""" + +fields_tcp_opts_buffer.label: +"""TCP user-space buffer""" + +server_ssl_opts_schema_honor_cipher_order.desc: +"""An important security setting, it forces the cipher to be set based + on the server-specified order instead of the client-specified order, + hence enforcing the (usually more properly configured) security + ordering of the server administrator.""" + +server_ssl_opts_schema_honor_cipher_order.label: +"""SSL honor cipher order""" + +conn_congestion_min_alarm_sustain_duration.desc: +"""Minimal time before clearing the alarm.
The alarm is cleared only when there's no pending data in
the queue, and at least min_alarm_sustain_durationmilliseconds passed since the last time we considered the connection 'congested'.
This is to avoid clearing and raising the alarm again too often.""" + +conn_congestion_min_alarm_sustain_duration.label: +"""Sustain duration""" + +fields_mqtt_quic_listener_keep_alive_interval_ms.desc: +"""How often to send PING frames to keep a connection alive.""" + +fields_mqtt_quic_listener_keep_alive_interval_ms.label: +"""Keep alive interval ms""" + +fields_mqtt_quic_listener_handshake_idle_timeout_ms.desc: +"""How long a handshake can idle before it is discarded""" + +fields_mqtt_quic_listener_handshake_idle_timeout_ms.label: +"""Handshake idle timeout ms""" + +broker_session_locking_strategy.desc: +"""Session locking strategy in a cluster. + - `local`: only lock the session on the current node + - `one`: select only one remote node to lock the session + - `quorum`: select some nodes to lock the session + - `all`: lock the session on all the nodes in the cluster""" + +persistent_store_ram_cache.desc: +"""Maintain a copy of the data in RAM for faster access.""" + +persistent_store_ram_cache.label: +"""RAM cache""" + +fields_mqtt_quic_listener_stream_recv_window_default.desc: +"""Initial stream receive window size. Default: 32678""" + +fields_mqtt_quic_listener_stream_recv_window_default.label: +"""Stream recv window default""" + +mqtt_mqueue_priorities.desc: +"""Topic priorities. Priority number [1-255] +There's no priority table by default, hence all messages are treated equal. + +**NOTE**: Comma and equal signs are not allowed for priority topic names. +**NOTE**: Messages for topics not in the priority table are treated as either highest or lowest priority depending on the configured value for mqtt.mqueue_default_priority. + +**Examples**: +To configure "topic/1" > "topic/2": +mqueue_priorities: {"topic/1": 10, "topic/2": 8}""" + +mqtt_mqueue_priorities.label: +"""Topic Priorities""" + +fields_rate_limit_conn_messages_in.desc: +"""Message limit for the external MQTT connections.""" + +fields_rate_limit_conn_messages_in.label: +"""connecting messages in""" + +fields_rate_limit_max_conn_rate.desc: +"""Maximum connections per second.""" + +fields_rate_limit_max_conn_rate.label: +"""Max connection rate""" + +alarm_size_limit.desc: +"""The maximum total number of deactivated alarms to keep as history.
When this limit is exceeded, the oldest deactivated alarms are deleted to cap the total number.""" + +alarm_size_limit.label: +"""Alarm size limit""" + +fields_cache_max_size.desc: +"""Maximum number of cached items.""" + +fields_cache_max_size.label: +"""Maximum number of cached items.""" + +fields_listeners_tcp.desc: +"""TCP listeners.""" + +fields_listeners_tcp.label: +"""TCP listeners""" + +conn_congestion_enable_alarm.desc: +"""Enable or disable connection congestion alarm.""" + +conn_congestion_enable_alarm.label: +"""Enable/disable congestion alarm""" + +fields_ws_opts_proxy_port_header.desc: +"""HTTP header used to pass information about the client port. Relevant when the EMQX cluster is deployed behind a load-balancer.""" + +fields_ws_opts_proxy_port_header.label: +"""Proxy port header""" + +overload_protection_enable.desc: +"""React on system overload or not.""" + +overload_protection_enable.label: +"""React on system overload or not""" + +fields_mqtt_quic_listener_minimum_mtu.desc: +"""The minimum MTU supported by a connection. This will be used as the starting MTU. Default: 1248""" + +fields_mqtt_quic_listener_minimum_mtu.label: +"""Minimum MTU""" + +sys_msg_interval.desc: +"""Time interval of publishing `$SYS` messages.""" + +mqtt_await_rel_timeout.desc: +"""For client to broker QoS 2 message, the time limit for the broker to wait before the `PUBREL` message is received. The wait is aborted after timed out, meaning the packet ID is freed for new `PUBLISH` requests. Receiving a stale `PUBREL` causes a warning level log. Note, the message is delivered to subscribers before entering the wait for PUBREL.""" + +mqtt_await_rel_timeout.label: +"""Max Awaiting PUBREL TIMEOUT""" + +common_ssl_opts_schema_verify.desc: +"""Enable or disable peer verification.""" + +common_ssl_opts_schema_verify.label: +"""Verify peer""" + +fields_listeners_ssl.desc: +"""SSL listeners.""" + +fields_listeners_ssl.label: +"""SSL listeners""" + +fields_deflate_opts_client_max_window_bits.desc: +"""Specifies the size of the compression context for the client.""" + +fields_deflate_opts_client_max_window_bits.label: +"""Client compression max window size""" + +common_ssl_opts_schema_keyfile.desc: +"""PEM format private key file.""" + +common_ssl_opts_schema_keyfile.label: +"""Keyfile""" + +sysmon_os_cpu_high_watermark.desc: +"""The threshold, as percentage of system CPU load, + for how much system cpu can be used before the corresponding alarm is raised.""" + +sysmon_os_cpu_high_watermark.label: +"""CPU high watermark""" + +flapping_detect_window_time.desc: +"""The time window for flapping detection.""" + +flapping_detect_window_time.label: +"""Window time""" + +mqtt_mqueue_default_priority.desc: +"""Default topic priority, which will be used by topics not in Topic Priorities (mqueue_priorities).""" + +mqtt_mqueue_default_priority.label: +"""Default Topic Priorities""" + +client_ssl_opts_schema_enable.desc: +"""Enable TLS.""" + +client_ssl_opts_schema_enable.label: +"""Enable TLS.""" + +fields_mqtt_quic_listener_mtu_discovery_missing_probe_count.desc: +"""The maximum number of stateless operations that may be queued on a binding at any one time. Default: 3""" + +fields_mqtt_quic_listener_mtu_discovery_missing_probe_count.label: +"""MTU discovery missing probe count""" + +fields_tcp_opts_recbuf.desc: +"""The TCP receive buffer (OS kernel) for the connections.""" + +fields_tcp_opts_recbuf.label: +"""TCP receive buffer""" + +sysmon_vm_process_check_interval.desc: +"""The time interval for the periodic process limit check.""" + +sysmon_vm_process_check_interval.label: +"""Process limit check interval""" + +fields_mqtt_quic_listener_server_resumption_level.desc: +"""Controls resumption tickets and/or 0-RTT server support. Default: 0 (No resumption)""" + +fields_mqtt_quic_listener_server_resumption_level.label: +"""Server resumption level""" + +fields_ws_opts_proxy_address_header.desc: +"""HTTP header used to pass information about the client IP address. +Relevant when the EMQX cluster is deployed behind a load-balancer.""" + +fields_ws_opts_proxy_address_header.label: +"""Proxy address header""" + +sysmon_os_sysmem_high_watermark.desc: +"""The threshold, as percentage of system memory, + for how much system memory can be allocated before the corresponding alarm is raised.""" + +sysmon_os_sysmem_high_watermark.label: +"""SysMem high wartermark""" + +fields_tcp_opts_high_watermark.desc: +"""The socket is set to a busy state when the amount of data queued internally +by the VM socket implementation reaches this limit.""" + +fields_tcp_opts_high_watermark.label: +"""TCP 高水位线""" + +fields_mqtt_quic_listener_stateless_operation_expiration_ms.desc: +"""The time limit between operations for the same endpoint, in milliseconds. Default: 100""" + +fields_mqtt_quic_listener_stateless_operation_expiration_ms.label: +"""Stateless operation expiration ms""" + +server_ssl_opts_schema_dhfile.desc: +"""Path to a file containing PEM-encoded Diffie-Hellman parameters to be used by the server if a cipher suite using Diffie-Hellman key exchange is negotiated. If not specified, default parameters are used.
NOTE: The dhfile option is not supported by TLS 1.3.""" - zh: """如果协商使用Diffie-Hellman密钥交换的密码套件,则服务器将使用包含PEM编码的Diffie-Hellman参数的文件的路径。如果未指定,则使用默认参数。
-注意:TLS 1.3不支持dhfile选项。""" - } - label: { - en: "SSL dhfile" - zh: "SSL dhfile" - } -} -server_ssl_opts_schema_fail_if_no_peer_cert { - desc { - en: """Used together with {verify, verify_peer} by an TLS/DTLS server. +server_ssl_opts_schema_dhfile.label: +"""SSL dhfile""" + +flapping_detect_max_count.desc: +"""The maximum number of disconnects allowed for a MQTT Client in `window_time`""" + +flapping_detect_max_count.label: +"""Max count""" + +mqtt_max_topic_levels.desc: +"""Maximum topic levels allowed.""" + +mqtt_max_topic_levels.label: +"""Max Topic Levels""" + +force_shutdown_max_heap_size.desc: +"""Total heap size""" + +force_shutdown_max_heap_size.label: +"""Total heap size""" + +persistent_store_on_disc.desc: +"""Save information about the persistent sessions on disc. +If this option is enabled, persistent sessions will survive full restart of the cluster. +Otherwise, all the data will be stored in RAM, and it will be lost when all the nodes in the cluster are stopped.""" + +persistent_store_on_disc.label: +"""Persist on disc""" + +mqtt_ignore_loop_deliver.desc: +"""Whether the messages sent by the MQTT v3.1.1/v3.1.0 client will be looped back to the publisher itself, similar to No Local in MQTT 5.0.""" + +mqtt_ignore_loop_deliver.label: +"""Ignore Loop Deliver""" + +common_ssl_opts_schema_certfile.desc: +"""PEM format certificates chain file.
+The certificates in this file should be in reversed order of the certificate +issue chain. That is, the host's certificate should be placed in the beginning +of the file, followed by the immediate issuer certificate and so on. +Although the root CA certificate is optional, it should be placed at the end of +the file if it is to be added.""" + +common_ssl_opts_schema_certfile.label: +"""Certfile""" + +mqtt_exclusive_subscription.desc: +"""Whether to enable support for MQTT exclusive subscription.""" + +mqtt_exclusive_subscription.label: +"""Exclusive Subscription""" + +mqtt_retain_available.desc: +"""Whether to enable support for MQTT retained message.""" + +mqtt_retain_available.label: +"""Retain Available""" + +fields_tcp_opts_reuseaddr.desc: +"""The SO_REUSEADDR flag for the connections.""" + +fields_tcp_opts_reuseaddr.label: +"""SO_REUSEADDR""" + +sysmon_vm_long_schedule.desc: +"""When the Erlang VM detect a task scheduled for too long, a warning level 'long_schedule' log is emitted, +and an MQTT message is published to the system topic $SYS/sysmon/long_schedule.""" + +sysmon_vm_long_schedule.label: +"""Enable Long Schedule monitoring.""" + +mqtt_keepalive_backoff.desc: +"""The coefficient EMQX uses to confirm whether the keep alive duration of the client expires. Formula: Keep Alive * Backoff * 2""" + +mqtt_keepalive_backoff.label: +"""Keep Alive Backoff""" + +force_gc_bytes.desc: +"""GC the process after specified number of bytes have passed through.""" + +force_gc_bytes.label: +"""Process GC bytes""" + +server_ssl_opts_schema_fail_if_no_peer_cert.desc: +"""Used together with {verify, verify_peer} by an TLS/DTLS server. If set to true, the server fails if the client does not have a certificate to send, that is, sends an empty certificate. If set to false, it fails only if the client sends an invalid certificate (an empty certificate is considered valid).""" - zh: """TLS/DTLS 服务器与 {verify,verify_peer} 一起使用。 -如果设置为true,则如果客户端没有要发送的证书,即发送空证书,服务器将失败。 -如果设置为false,则仅当客户端发送无效证书(空证书被视为有效证书)时才会失败。""" - } - label: { - en: "SSL fail if no peer cert" - zh: "没有证书则 SSL 失败" - } -} -server_ssl_opts_schema_honor_cipher_order { - desc { - en: """An important security setting, it forces the cipher to be set based - on the server-specified order instead of the client-specified order, - hence enforcing the (usually more properly configured) security - ordering of the server administrator.""" - zh: """一个重要的安全设置,它强制根据服务器指定的顺序而不是客户机指定的顺序设置密码,从而强制服务器管理员执行(通常配置得更正确)安全顺序。""" - } - label: { - en: "SSL honor cipher order" - zh: "SSL honor cipher order" - } -} +server_ssl_opts_schema_fail_if_no_peer_cert.label: +"""SSL fail if no peer cert""" -server_ssl_opts_schema_client_renegotiation { - desc { - en: """In protocols that support client-initiated renegotiation, +fields_ws_opts_compress.desc: +"""If true, compress WebSocket messages using zlib.
+The configuration items under deflate_opts belong to the compression-related parameter configuration.""" + +fields_ws_opts_compress.label: +"""Ws compress""" + +fields_mqtt_quic_listener_keep_alive_interval.desc: +"""How often to send PING frames to keep a connection alive. 0 means disabled.""" + +fields_mqtt_quic_listener_keep_alive_interval.label: +"""Keep Alive Interval""" + +fields_cache_ttl.desc: +"""Time to live for the cached data.""" + +fields_cache_ttl.label: +"""Time to live for the cached data.""" + +sys_topics.desc: +"""System topics configuration.""" + +sys_event_client_subscribed.desc: +"""Enable to publish event message that client subscribed a topic successfully.""" + +sysmon_top_db_port.desc: +"""Port of the PostgreSQL database that collects the data points.""" + +sysmon_top_db_port.label: +"""DB Port""" + +fields_mqtt_quic_listener_max_operations_per_drain.desc: +"""The maximum number of operations to drain per connection quantum. Default: 16""" + +fields_mqtt_quic_listener_max_operations_per_drain.label: +"""Max operations per drain""" + +fields_mqtt_quic_listener_datagram_receive_enabled.desc: +"""Advertise support for QUIC datagram extension. Reserve for the future. Default 0 (FALSE)""" + +fields_mqtt_quic_listener_datagram_receive_enabled.label: +"""Datagram receive enabled""" + +fields_mqtt_quic_listener_initial_rtt_ms.desc: +"""Initial RTT estimate.""" + +fields_mqtt_quic_listener_initial_rtt_ms.label: +"""Initial RTT ms""" + +overload_protection_backoff_gc.desc: +"""When at high load, skip forceful GC.""" + +overload_protection_backoff_gc.label: +"""Skip GC""" + +broker_perf_route_lock_type.desc: +"""Performance tuning for subscribing/unsubscribing a wildcard topic. +Change this parameter only when there are many wildcard topics. + +NOTE: when changing from/to `global` lock, it requires all nodes in the cluster to be stopped before the change. + - `key`: mnesia transactional updates with per-key locks. Recommended for a single-node setup. + - `tab`: mnesia transactional updates with table lock. Recommended for a cluster setup. + - `global`: updates are protected with a global lock. Recommended for large clusters.""" + +fields_tcp_opts_nodelay.desc: +"""The TCP_NODELAY flag for the connections.""" + +fields_tcp_opts_nodelay.label: +"""TCP_NODELAY""" + +sysmon_top_db_username.desc: +"""Username of the PostgreSQL database""" + +sysmon_top_db_username.label: +"""DB Username""" + +broker.desc: +"""Message broker options.""" + +force_gc_count.desc: +"""GC the process after this many received messages.""" + +force_gc_count.label: +"""Process GC messages num""" + +mqtt_max_clientid_len.desc: +"""Maximum allowed length of MQTT Client ID.""" + +mqtt_max_clientid_len.label: +"""Max Client ID Length""" + +fields_ws_opts_supported_subprotocols.desc: +"""Comma-separated list of supported subprotocols.""" + +fields_ws_opts_supported_subprotocols.label: +"""Supported subprotocols""" + +broker_shared_subscription_strategy.desc: +"""Dispatch strategy for shared subscription. + - `random`: dispatch the message to a random selected subscriber + - `round_robin`: select the subscribers in a round-robin manner + - `round_robin_per_group`: select the subscribers in round-robin fashion within each shared subscriber group + - `local`: select random local subscriber otherwise select random cluster-wide + - `sticky`: always use the last selected subscriber to dispatch, until the subscriber disconnects. + - `hash_clientid`: select the subscribers by hashing the `clientIds` + - `hash_topic`: select the subscribers by hashing the source topic""" + +fields_deflate_opts_mem_level.desc: +"""Specifies the size of the compression state.
+Lower values decrease memory usage per connection.""" + +fields_deflate_opts_mem_level.label: +"""Size of the compression state""" + +fields_mqtt_quic_listener_send_idle_timeout_ms.desc: +"""Reset congestion control after being idle for amount of time. Default: 1000""" + +fields_mqtt_quic_listener_send_idle_timeout_ms.label: +"""Send idle timeout ms""" + +base_listener_limiter.desc: +"""Type of the rate limit.""" + +base_listener_limiter.label: +"""Type of the rate limit.""" + +persistent_session_store_backend.desc: +"""Database management system used to store information about persistent sessions and messages. +- `builtin`: Use the embedded database (mria)""" + +persistent_session_store_backend.label: +"""Backend""" + +alarm_validity_period.desc: +"""Retention time of deactivated alarms. Alarms are not deleted immediately +when deactivated, but after the retention time.""" + +alarm_validity_period.label: +"""Alarm validity period""" + +server_ssl_opts_schema_ocsp_issuer_pem.desc: +"""PEM-encoded certificate of the OCSP issuer for the server certificate.""" + +server_ssl_opts_schema_ocsp_issuer_pem.label: +"""OCSP Issuer Certificate""" + +fields_tcp_opts_active_n.desc: +"""Specify the {active, N} option for this Socket.
+See: https://erlang.org/doc/man/inet.html#setopts-2""" + +fields_tcp_opts_active_n.label: +"""active_n""" + +listener_authentication.desc: +"""Per-listener authentication override. +Authentication can be one single authenticator instance or a chain of authenticators as an array. +When authenticating a login (username, client ID, etc.) the authenticators are checked in the configured order.""" + +listener_authentication.label: +"""Per-listener authentication override""" + +fields_trace_payload_encode.desc: +"""Determine the format of the payload format in the trace file.
+`text`: Text-based protocol or plain text protocol. + It is recommended when payload is JSON encoded.
+`hex`: Binary hexadecimal encode. It is recommended when payload is a custom binary protocol.
+`hidden`: payload is obfuscated as `******`""" + +fields_trace_payload_encode.label: +"""Payload encode""" + +mqtt_response_information.desc: +"""UTF-8 string, for creating the response topic, for example, if set to reqrsp/, the publisher/subscriber will communicate using the topic prefix reqrsp/. +To disable this feature, input "" in the text box below. Only applicable to MQTT 5.0 clients.""" + +mqtt_response_information.label: +"""Response Information""" + +persistent_session_store_max_retain_undelivered.desc: +"""The time messages that was not delivered to a persistent session +is stored before being garbage collected if the node the previous +session was handled on restarts of is stopped.""" + +persistent_session_store_max_retain_undelivered.label: +"""Max retain undelivered""" + +fields_mqtt_quic_listener_migration_enabled.desc: +"""Enable clients to migrate IP addresses and tuples. Requires a cooperative load-balancer, or no load-balancer. Default: 1 (Enabled)""" + +fields_mqtt_quic_listener_migration_enabled.label: +"""Migration enabled""" + +common_ssl_opts_schema_password.desc: +"""String containing the user's password. Only used if the private key file is password-protected.""" + +common_ssl_opts_schema_password.label: +"""Keyfile passphrase""" + +common_ssl_opts_schema_hibernate_after.desc: +"""Hibernate the SSL process after idling for amount of time reducing its memory footprint.""" + +common_ssl_opts_schema_hibernate_after.label: +"""hibernate after""" + +fields_mqtt_quic_listener_send_buffering_enabled.desc: +"""Buffer send data instead of holding application buffers until sent data is acknowledged. Default: 1 (Enabled)""" + +fields_mqtt_quic_listener_send_buffering_enabled.label: +"""Send buffering enabled""" + +sys_event_client_unsubscribed.desc: +"""Enable to publish event message that client unsubscribed a topic successfully.""" + +overload_protection_backoff_new_conn.desc: +"""When at high load, close new incoming connections.""" + +overload_protection_backoff_new_conn.label: +"""Close new connections""" + +server_ssl_opts_schema_ocsp_responder_url.desc: +"""URL for the OCSP responder to check the server certificate against.""" + +server_ssl_opts_schema_ocsp_responder_url.label: +"""OCSP Responder URL""" + +mqtt_idle_timeout.desc: +"""Configure the duration of time that a connection can remain idle (i.e., without any data transfer) before being: + - Automatically disconnected if no CONNECT package is received from the client yet. + - Put into hibernation mode to save resources if some CONNECT packages are already received. +Note: Please set the parameter with caution as long idle time will lead to resource waste.""" + +mqtt_idle_timeout.label: +"""Idle Timeout""" + +fields_mqtt_quic_listener_conn_flow_control_window.desc: +"""Connection-wide flow control window. Default: 16777216""" + +fields_mqtt_quic_listener_conn_flow_control_window.label: +"""Conn flow control window""" + +fields_mqtt_quic_listener_maximum_mtu.desc: +"""The maximum MTU supported by a connection. This will be the maximum probed value. Default: 1500""" + +fields_mqtt_quic_listener_maximum_mtu.label: +"""Maximum MTU""" + +sysmon_top_db_name.desc: +"""PostgreSQL database name""" + +sysmon_top_db_name.label: +"""DB Name""" + +mqtt_strict_mode.desc: +"""Whether to parse MQTT messages in strict mode. +In strict mode, invalid utf8 strings in for example client ID, topic name, etc. will cause the client to be disconnected.""" + +mqtt_strict_mode.label: +"""Strict Mode""" + +shared_subscription_group_strategy.desc: +"""Per group dispatch strategy for shared subscription. +This config is a map from shared subscription group name to the strategy +name. The group name should be of format `[A-Za-z0-9]`. i.e. no +special characters are allowed.""" + +fields_deflate_opts_strategy.desc: +"""Specifies the compression strategy.""" + +fields_deflate_opts_strategy.label: +"""compression strategy""" + +shared_subscription_strategy_enum.desc: +"""Dispatch strategy for shared subscription. +- `random`: dispatch the message to a random selected subscriber +- `round_robin`: select the subscribers in a round-robin manner +- `round_robin_per_group`: select the subscribers in round-robin fashion within each shared subscriber group +- `sticky`: always use the last selected subscriber to dispatch, +until the subscriber disconnects. +- `hash`: select the subscribers by the hash of `clientIds` +- `local`: send to a random local subscriber. If local +subscriber was not found, send to a random subscriber cluster-wide""" + +persistent_session_builtin_sess_msg_table.desc: +"""Performance tuning options for built-in session messages table.""" + +persistent_session_builtin_sess_msg_table.label: +"""Persistent session messages""" + +mqtt_mqueue_store_qos0.desc: +"""Specifies whether to store QoS 0 messages in the message queue while the connection is down but the session remains.""" + +mqtt_mqueue_store_qos0.label: +"""Store QoS 0 Message""" + +server_ssl_opts_schema_client_renegotiation.desc: +"""In protocols that support client-initiated renegotiation, the cost of resources of such an operation is higher for the server than the client. This can act as a vector for denial of service attacks. The SSL application already takes measures to counter-act such attempts, @@ -1722,1147 +1210,331 @@ but client-initiated renegotiation can be strictly disabled by setting this opti The default value is true. Note that disabling renegotiation can result in long-lived connections becoming unusable due to limits on the number of messages the underlying cipher suite can encipher.""" - zh: """在支持客户机发起的重新协商的协议中,这种操作的资源成本对于服务器来说高于客户机。 -这可能会成为拒绝服务攻击的载体。 -SSL 应用程序已经采取措施来反击此类尝试,但通过将此选项设置为 false,可以严格禁用客户端发起的重新协商。 -默认值为 true。请注意,由于基础密码套件可以加密的消息数量有限,禁用重新协商可能会导致长期连接变得不可用。""" - } - label: { - en: "SSL client renegotiation" - zh: "SSL 客户端冲协商" - } -} - -server_ssl_opts_schema_handshake_timeout { - desc { - en: """Maximum time duration allowed for the handshake to complete""" - zh: """握手完成所允许的最长时间""" - } - label: { - en: "Handshake timeout" - zh: "握手超时时间" - } -} - -server_ssl_opts_schema_gc_after_handshake { - desc { - en: """Memory usage tuning. If enabled, will immediately perform a garbage collection after the TLS/SSL handshake.""" - zh: """内存使用调优。如果启用,将在TLS/SSL握手完成后立即执行垃圾回收。TLS/SSL握手建立后立即进行GC。""" - } - label: { - en: "Perform GC after handshake" - zh: "握手后执行GC" - } -} - -server_ssl_opts_schema_enable_ocsp_stapling { - desc { - en: "Whether to enable Online Certificate Status Protocol (OCSP) stapling for the listener." - " If set to true, requires defining the OCSP responder URL and issuer PEM path." - zh: "是否为监听器启用 OCSP Stapling 功能。 如果设置为 true," - "需要定义 OCSP Responder 的 URL 和证书签发者的 PEM 文件路径。" - } - label: { - en: "Enable OCSP Stapling" - zh: "启用 OCSP Stapling" - } -} - -server_ssl_opts_schema_ocsp_responder_url { - desc { - en: "URL for the OCSP responder to check the server certificate against." - zh: "用于检查服务器证书的 OCSP Responder 的 URL。" - } - label: { - en: "OCSP Responder URL" - zh: "OCSP Responder 的 URL" - } -} - -server_ssl_opts_schema_ocsp_issuer_pem { - desc { - en: "PEM-encoded certificate of the OCSP issuer for the server certificate." - zh: "服务器证书的 OCSP 签发者的 PEM 编码证书。" - } - label: { - en: "OCSP Issuer Certificate" - zh: "OCSP 签发者证书" - } -} - -server_ssl_opts_schema_ocsp_refresh_interval { - desc { - en: "The period to refresh the OCSP response for the server." - zh: "为服务器刷新OCSP响应的周期。" - } - label: { - en: "OCSP Refresh Interval" - zh: "OCSP 刷新间隔" - } -} - -server_ssl_opts_schema_ocsp_refresh_http_timeout { - desc { - en: "The timeout for the HTTP request when checking OCSP responses." - zh: "检查 OCSP 响应时,HTTP 请求的超时。" - } - label: { - en: "OCSP Refresh HTTP Timeout" - zh: "OCSP 刷新 HTTP 超时" - } -} - -server_ssl_opts_schema_enable_crl_check { - desc { - en: "Whether to enable CRL verification for this listener." - zh: "是否为该监听器启用 CRL 检查。" - } - label: { - en: "Enable CRL Check" - zh: "启用 CRL 检查" - } -} - -crl_cache_refresh_http_timeout { - desc { - en: "The timeout for the HTTP request when fetching CRLs. This is" - " a global setting for all listeners." - zh: "获取 CRLs 时 HTTP 请求的超时。 该配置对所有启用 CRL 检查的监听器监听器有效。" - } - label: { - en: "CRL Cache Refresh HTTP Timeout" - zh: "CRL 缓存刷新 HTTP 超时" - } -} - -crl_cache_refresh_interval { - desc { - en: "The period to refresh the CRLs from the servers. This is a global setting" - " for all URLs and listeners." - zh: "从服务器刷新CRL的周期。 该配置对所有 URL 和监听器有效。" - } - label: { - en: "CRL Cache Refresh Interval" - zh: "CRL 缓存刷新间隔" - } -} - -crl_cache_capacity { - desc { - en: "The maximum number of CRL URLs that can be held in cache. If the cache is at" - " full capacity and a new URL must be fetched, then it'll evict the oldest" - " inserted URL in the cache." - zh: "缓存中可容纳的 CRL URL 的最大数量。" - " 如果缓存的容量已满,并且必须获取一个新的 URL," - "那么它将驱逐缓存中插入的最老的 URL。" - } - label: { - en: "CRL Cache Capacity" - zh: "CRL 缓存容量" - } -} - -fields_listeners_tcp { - desc { - en: """TCP listeners.""" - zh: """TCP 监听器。""" - } - label: { - en: "TCP listeners" - zh: "TCP 监听器" - } -} - -fields_listeners_ssl { - desc { - en: """SSL listeners.""" - zh: """SSL 监听器。""" - } - label: { - en: "SSL listeners" - zh: "SSL 监听器" - } -} - -fields_listeners_ws { - desc { - en: """HTTP websocket listeners.""" - zh: """HTTP websocket 监听器。""" - } - label: { - en: "HTTP websocket listeners" - zh: "HTTP websocket 监听器" - } -} - -fields_listeners_wss { - desc { - en: """HTTPS websocket listeners.""" - zh: """HTTPS websocket 监听器。""" - } - label: { - en: "HTTPS websocket listeners" - zh: "HTTPS websocket 监听器" - } -} - -fields_listeners_quic { - desc { - en: """QUIC listeners.""" - zh: """QUIC 监听器。""" - } - label: { - en: "QUIC listeners" - zh: "QUIC 监听器" - } -} - -fields_listener_enabled { - desc { - en: """Enable listener.""" - zh: """启停监听器。""" - } - label: { - en: "Enable listener" - zh: "启停监听器" - } -} - -fields_mqtt_quic_listener_certfile { - desc { - en: """Path to the certificate file. Will be deprecated in 5.1, use .ssl_options.certfile instead.""" - zh: """证书文件。在 5.1 中会被废弃,使用 .ssl_options.certfile 代替。""" - } - label: { - en: "Certificate file" - zh: "证书文件" - } -} - -fields_mqtt_quic_listener_keyfile { - desc { - en: """Path to the secret key file. Will be deprecated in 5.1, use .ssl_options.keyfile instead.""" - zh: """私钥文件。在 5.1 中会被废弃,使用 .ssl_options.keyfile 代替。""" - } - label: { - en: "Key file" - zh: "私钥文件" - } -} - -fields_mqtt_quic_listener_idle_timeout { - desc { - en: """How long a connection can go idle before it is gracefully shut down. 0 to disable""" - zh: """一个连接在被关闭之前可以空闲多长时间。0表示禁用。""" - } - label: { - en: "Idle Timeout" - zh: "空闲超时时间" - } -} - -fields_mqtt_quic_listener_handshake_idle_timeout { - desc { - en: """How long a handshake can idle before it is discarded.""" - zh: """一个握手在被丢弃之前可以空闲多长时间。""" - } - label: { - en: "Handshake Idle Timeout" - zh: "握手空闲超时时间" - } -} - -fields_mqtt_quic_listener_keep_alive_interval { - desc { - en: """How often to send PING frames to keep a connection alive. 0 means disabled.""" - zh: """发送 PING 帧的频率,以保活连接. 设为 0 表示禁用。""" - } - label: { - en: "Keep Alive Interval" - zh: "PING 保活频率" - } -} - -fields_mqtt_quic_listener_ssl_options { - desc { - en: """TLS options for QUIC transport""" - zh: """QUIC 传输层的 TLS 选项""" - } - label: { - en: "TLS Options" - zh: "TLS 选项" - } -} - -base_listener_bind { - desc { - en: """IP address and port for the listening socket.""" - zh: """监听套接字的 IP 地址和端口。""" - } - label: { - en: "IP address and port" - zh: "IP 地址和端口" - } -} - -base_listener_acceptors { - desc { - en: """The size of the listener's receiving pool.""" - zh: """监听器接收池的大小。""" - } - label: { - en: "Acceptors Num" - zh: "接收器数量" - } -} - -fields_mqtt_quic_listener_max_bytes_per_key { - desc { - en: "Maximum number of bytes to encrypt with a single 1-RTT encryption key before initiating key update. Default: 274877906944" - zh: "在启动密钥更新之前,用单个 1-RTT 加密密钥加密的最大字节数。默认值:274877906944" - } - label { - en: "Max bytes per key" - zh: "每个密钥的最大字节数" - } -} - -fields_mqtt_quic_listener_handshake_idle_timeout_ms { - desc { - en: "How long a handshake can idle before it is discarded. Default: 10 000" - zh: "一个握手在被丢弃之前可以空闲多长时间。 默认值:10 000" - } - label { - en: "Handshake idle timeout ms" - zh: "握手空闲超时毫秒" - } -} - -fields_mqtt_quic_listener_tls_server_max_send_buffer { - desc { - en: "How much Server TLS data to buffer. Default: 8192" - zh: "缓冲多少TLS数据。 默认值:8192" - } - label { - en: "TLS server max send buffer" - zh: "TLS 服务器最大发送缓冲区" - } -} - -fields_mqtt_quic_listener_stream_recv_window_default { - desc { - en: "Initial stream receive window size. Default: 32678" - zh: "初始流接收窗口大小。 默认值:32678" - } - label { - en: "Stream recv window default" - zh: "流接收窗口默认" - } -} - -fields_mqtt_quic_listener_stream_recv_buffer_default { - desc { - en: "Stream initial buffer size. Default: 4096" - zh: "流的初始缓冲区大小。默认:4096" - } - label { - en: "Stream recv buffer default" - zh: "流媒体接收缓冲区默认值" - } -} - -fields_mqtt_quic_listener_conn_flow_control_window { - desc { - en: "Connection-wide flow control window. Default: 16777216" - zh: "连接的流控窗口。默认:16777216" - } - label { - en: "Conn flow control window" - zh: "流控窗口" - } -} - -fields_mqtt_quic_listener_max_stateless_operations { - desc { - en: "The maximum number of stateless operations that may be queued on a worker at any one time. Default: 16" - zh: "无状态操作的最大数量,在任何时候都可以在一个工作者上排队。默认值:16" - } - label { - en: "Max stateless operations" - zh: "最大无状态操作数" - } -} - -fields_mqtt_quic_listener_initial_window_packets { - desc { - en: "The size (in packets) of the initial congestion window for a connection. Default: 10" - zh: "一个连接的初始拥堵窗口的大小(以包为单位)。默认值:10" - } - label { - en: "Initial window packets" - zh: "初始窗口数据包" - } -} - -fields_mqtt_quic_listener_send_idle_timeout_ms { - desc { - en: "Reset congestion control after being idle for amount of time. Default: 1000" - zh: "在闲置一定时间后重置拥堵控制。默认值:1000" - } - label { - en: "Send idle timeout ms" - zh: "发送空闲超时毫秒" - } -} - -fields_mqtt_quic_listener_initial_rtt_ms { - desc { - en: "Initial RTT estimate." - zh: "初始RTT估计" - } - label { - en: "Initial RTT ms" - zh: "Initial RTT 毫秒" - } -} - -fields_mqtt_quic_listener_max_ack_delay_ms { - desc { - en: "How long to wait after receiving data before sending an ACK. Default: 25" - zh: "在收到数据后要等待多长时间才能发送一个ACK。默认值:25" - } - label { - en: "Max ack delay ms" - zh: "最大应答延迟 毫秒" - } -} - -fields_mqtt_quic_listener_disconnect_timeout_ms { - desc { - en: "How long to wait for an ACK before declaring a path dead and disconnecting. Default: 16000" - zh: "在判定路径无效和断开连接之前,要等待多长时间的ACK。默认:16000" - } - label { - en: "Disconnect timeout ms" - zh: "断开连接超时 毫秒" - } -} - -fields_mqtt_quic_listener_idle_timeout_ms { - desc { - en: "How long a connection can go idle before it is gracefully shut down. 0 to disable timeout" - zh: "一个连接在被优雅地关闭之前可以空闲多长时间。0 表示禁用超时" - } - label { - en: "Idle timeout ms" - zh: "空闲超时 毫秒" - } -} - -fields_mqtt_quic_listener_handshake_idle_timeout_ms { - desc { - en: "How long a handshake can idle before it is discarded" - zh: "一个握手在被丢弃之前可以空闲多长时间" - } - label { - en: "Handshake idle timeout ms" - zh: "握手空闲超时 毫秒" - } -} - -fields_mqtt_quic_listener_keep_alive_interval_ms { - desc { - en: "How often to send PING frames to keep a connection alive." - zh: "多长时间发送一次PING帧以保活连接。" - } - label { - en: "Keep alive interval ms" - zh: "保持活着的时间间隔 毫秒" - } -} - -fields_mqtt_quic_listener_peer_bidi_stream_count { - desc { - en: "Number of bidirectional streams to allow the peer to open." - zh: "允许对端打开的双向流的数量" - } - label { - en: "Peer bidi stream count" - zh: "对端双向流的数量" - } -} - -fields_mqtt_quic_listener_peer_unidi_stream_count { - desc { - en: "Number of unidirectional streams to allow the peer to open." - zh: "允许对端打开的单向流的数量" - } - label { - en: "Peer unidi stream count" - zh: "对端单向流的数量" - } -} - -fields_mqtt_quic_listener_retry_memory_limit { - desc { - en: "The percentage of available memory usable for handshake connections before stateless retry is used. Calculated as `N/65535`. Default: 65" - zh: "在使用无状态重试之前,可用于握手连接的可用内存的百分比。计算为`N/65535`。默认值:65" - } - label { - en: "Retry memory limit" - zh: "重试内存限制" - } -} - -fields_mqtt_quic_listener_load_balancing_mode { - desc { - en: "0: Disabled, 1: SERVER_ID_IP, 2: SERVER_ID_FIXED. default: 0" - zh: "0: 禁用, 1: SERVER_ID_IP, 2: SERVER_ID_FIXED. 默认: 0" - } - label { - en: "Load balancing mode" - zh: "负载平衡模式" - } -} - -fields_mqtt_quic_listener_max_operations_per_drain { - desc { - en: "The maximum number of operations to drain per connection quantum. Default: 16" - zh: "每个连接操作的最大耗费操作数。默认:16" - } - label { - en: "Max operations per drain" - zh: "每次操作最大操作数" - } -} - -fields_mqtt_quic_listener_send_buffering_enabled { - desc { - en: "Buffer send data instead of holding application buffers until sent data is acknowledged. Default: 1 (Enabled)" - zh: "缓冲发送数据,而不是保留应用缓冲区,直到发送数据被确认。默认值:1(启用)" - } - label { - en: "Send buffering enabled" - zh: "启用发送缓冲功能" - } -} - -fields_mqtt_quic_listener_pacing_enabled { - desc { - en: "Pace sending to avoid overfilling buffers on the path. Default: 1 (Enabled)" - zh: "有节奏的发送,以避免路径上的缓冲区过度填充。默认值:1(已启用)" - } - label { - en: "Pacing enabled" - zh: "启用节奏发送" - } -} - -fields_mqtt_quic_listener_migration_enabled { - desc { - en: "Enable clients to migrate IP addresses and tuples. Requires a cooperative load-balancer, or no load-balancer. Default: 1 (Enabled)" - zh: "开启客户端地址迁移功能。需要一个支持的负载平衡器,或者没有负载平衡器。默认值:1(已启用)" - } - label { - en: "Migration enabled" - zh: "启用地址迁移" - } -} - -fields_mqtt_quic_listener_datagram_receive_enabled { - desc { - en: "Advertise support for QUIC datagram extension. Reserve for the future. Default 0 (FALSE)" - zh: "宣传对QUIC Datagram 扩展的支持。为将来保留。默认为0(FALSE)" - } - label { - en: "Datagram receive enabled" - zh: "启用 Datagram 接收" - } -} - -fields_mqtt_quic_listener_server_resumption_level { - desc { - en: "Controls resumption tickets and/or 0-RTT server support. Default: 0 (No resumption)" - zh: "连接恢复 和/或 0-RTT 服务器支持。默认值:0(无恢复功能)" - } - label { - en: "Server resumption level" - zh: "服务端连接恢复支持" - } -} - -fields_mqtt_quic_listener_minimum_mtu { - desc { - en: "The minimum MTU supported by a connection. This will be used as the starting MTU. Default: 1248" - zh: "一个连接所支持的最小MTU。这将被作为起始MTU使用。默认值:1248" - } - label { - en: "Minimum MTU" - zh: "最小 MTU" - } -} - -fields_mqtt_quic_listener_maximum_mtu { - desc { - en: "The maximum MTU supported by a connection. This will be the maximum probed value. Default: 1500" - zh: "一个连接所支持的最大MTU。这将是最大的探测值。默认值:1500" - } - label { - en: "Maximum MTU" - zh: "最大 MTU" - } -} - -fields_mqtt_quic_listener_mtu_discovery_search_complete_timeout_us { - desc { - en: "The time in microseconds to wait before reattempting MTU probing if max was not reached. Default: 600000000" - zh: "如果没有达到 max ,在重新尝试 MTU 探测之前要等待的时间,单位是微秒。默认值:600000000" - } - label { - en: "MTU discovery search complete timeout us" - zh: "" - } -} - -fields_mqtt_quic_listener_mtu_discovery_missing_probe_count { - desc { - en: "The maximum number of stateless operations that may be queued on a binding at any one time. Default: 3" - zh: "在任何时候都可以在一个绑定上排队的无状态操作的最大数量。默认值:3" - } - label { - en: "MTU discovery missing probe count" - zh: "MTU发现丢失的探针数量" - } -} - -fields_mqtt_quic_listener_max_binding_stateless_operations { - desc { - en: "The maximum number of stateless operations that may be queued on a binding at any one time. Default: 100" - zh: "在任何时候可以在一个绑定上排队的无状态操作的最大数量。默认值:100" - } - label { - en: "Max binding stateless operations" - zh: "最大绑定无状态操作" - } -} - -fields_mqtt_quic_listener_stateless_operation_expiration_ms { - desc { - en: "The time limit between operations for the same endpoint, in milliseconds. Default: 100" - zh: "同一个对端的操作之间的时间限制,单位是毫秒。 默认:100" - } - label { - en: "Stateless operation expiration ms" - zh: "无状态操作过期 毫秒" - } -} - -base_listener_max_connections { - desc { - en: """The maximum number of concurrent connections allowed by the listener.""" - zh: """监听器允许的最大并发连接数。""" - } - label: { - en: "Max connections" - zh: "最大并发连接数" - } -} - -base_listener_mountpoint { - desc { - en: """When publishing or subscribing, prefix all topics with a mountpoint string. -The prefixed string will be removed from the topic name when the message -is delivered to the subscriber. The mountpoint is a way that users can use -to implement isolation of message routing between different listeners. -For example if a client A subscribes to `t` with `listeners.tcp.\.mountpoint` -set to `some_tenant`, then the client actually subscribes to the topic -`some_tenant/t`. Similarly, if another client B (connected to the same listener -as the client A) sends a message to topic `t`, the message is routed -to all the clients subscribed `some_tenant/t`, so client A will receive the -message, with topic name `t`.
-Set to `""` to disable the feature.
- -Variables in mountpoint string: - - ${clientid}: clientid - - ${username}: username""" - zh: """发布或订阅时,请在所有主题前面加上 mountpoint 字符串。 - -将消息传递给订阅者时,将从主题名称中删除带前缀的字符串。挂载点是一种用户可以用来实现不同侦听器之间消息路由隔离的方法。 - -例如,如果客户机 A 使用 listeners.tcp.\.mountpoint 设置为'some_tenant',那么客户端实际上订阅了主题'some_tenant/t'。
-类似地,如果另一个客户端B(与客户端A连接到同一个侦听器)向主题 't' 发送消息,该消息将路由到所有订阅了'some_租户/t'的客户端,因此客户端 A 将接收主题名为't'的消息
- -设置为"" 以禁用该功能
- -mountpoint 字符串中的变量: -- ${clientid}: clientid -- ${username}: username""" - } - label: { - en: "mountpoint" - zh: "mountpoint" - } -} - -base_listener_zone { - desc { - en: """The configuration zone to which the listener belongs.""" - zh: """监听器所属的配置组。""" - } - label: { - en: "Zone" - zh: "配置组" - } -} - -base_listener_limiter { - desc { - en: """Type of the rate limit.""" - zh: """速率限制类型""" - } - label: { - en: "Type of the rate limit." - zh: "速率限制类型" - } -} - -base_listener_enable_authn { - desc { - en: """Set true (default) to enable client authentication on this listener, the authentication + +server_ssl_opts_schema_client_renegotiation.label: +"""SSL client renegotiation""" + +server_ssl_opts_schema_enable_crl_check.desc: +"""Whether to enable CRL verification for this listener.""" + +server_ssl_opts_schema_enable_crl_check.label: +"""Enable CRL Check""" + +fields_mqtt_quic_listener_peer_bidi_stream_count.desc: +"""Number of bidirectional streams to allow the peer to open.""" + +fields_mqtt_quic_listener_peer_bidi_stream_count.label: +"""Peer bidi stream count""" + +fields_mqtt_quic_listener_max_stateless_operations.desc: +"""The maximum number of stateless operations that may be queued on a worker at any one time. Default: 16""" + +fields_mqtt_quic_listener_max_stateless_operations.label: +"""Max stateless operations""" + +fields_ws_opts_idle_timeout.desc: +"""Close transport-layer connections from the clients that have not sent MQTT CONNECT message within this interval.""" + +fields_ws_opts_idle_timeout.label: +"""WS idle timeout""" + +fields_mqtt_quic_listener_max_ack_delay_ms.desc: +"""How long to wait after receiving data before sending an ACK. Default: 25""" + +fields_mqtt_quic_listener_max_ack_delay_ms.label: +"""Max ack delay ms""" + +base_listener_zone.desc: +"""The configuration zone to which the listener belongs.""" + +base_listener_zone.label: +"""Zone""" + +fields_mqtt_quic_listener_handshake_idle_timeout.desc: +"""How long a handshake can idle before it is discarded.""" + +fields_mqtt_quic_listener_handshake_idle_timeout.label: +"""Handshake Idle Timeout""" + +force_gc_enable.desc: +"""Enable forced garbage collection.""" + +force_gc_enable.label: +"""Enable forced garbage collection""" + +fields_ws_opts_allow_origin_absence.desc: +"""If false and check_origin_enable is + true, the server will reject requests that don't have origin + HTTP header.""" + +fields_ws_opts_allow_origin_absence.label: +"""Allow origin absence""" + +common_ssl_opts_schema_versions.desc: +"""All TLS/DTLS versions to be supported.
+NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config.
+In case PSK cipher suites are intended, make sure to configure +['tlsv1.2', 'tlsv1.1'] here.""" + +common_ssl_opts_schema_versions.label: +"""SSL versions""" + +mqtt_listener_proxy_protocol_timeout.desc: +"""Timeout for proxy protocol. EMQX will close the TCP connection if proxy protocol packet is not received within the timeout.""" + +mqtt_listener_proxy_protocol_timeout.label: +"""Proxy protocol timeout""" + +fields_mqtt_quic_listener_idle_timeout.desc: +"""How long a connection can go idle before it is gracefully shut down. 0 to disable""" + +fields_mqtt_quic_listener_idle_timeout.label: +"""Idle Timeout""" + +common_ssl_opts_schema_secure_renegotiate.desc: +"""SSL parameter renegotiation is a feature that allows a client and a server +to renegotiate the parameters of the SSL connection on the fly. +RFC 5746 defines a more secure way of doing this. By enabling secure renegotiation, +you drop support for the insecure renegotiation, prone to MitM attacks.""" + +common_ssl_opts_schema_secure_renegotiate.label: +"""SSL renegotiate""" + +sysmon_vm_busy_port.desc: +"""When a port (e.g. TCP socket) is overloaded, there will be a busy_port warning log, +and an MQTT message is published to the system topic $SYS/sysmon/busy_port.""" + +sysmon_vm_busy_port.label: +"""Enable Busy Port monitoring.""" + +sys_event_client_connected.desc: +"""Enable to publish client connected event messages""" + +sysmon_vm_process_low_watermark.desc: +"""The threshold, as percentage of processes, for how many + processes can simultaneously exist at the local node before the corresponding + alarm is cleared.""" + +sysmon_vm_process_low_watermark.label: +"""Process low watermark""" + +mqtt_max_packet_size.desc: +"""Maximum MQTT packet size allowed.""" + +mqtt_max_packet_size.label: +"""Max Packet Size""" + +common_ssl_opts_schema_reuse_sessions.desc: +"""Enable TLS session reuse.""" + +common_ssl_opts_schema_reuse_sessions.label: +"""TLS session reuse""" + +common_ssl_opts_schema_depth.desc: +"""Maximum number of non-self-issued intermediate certificates that can follow the peer certificate in a valid certification path. +So, if depth is 0 the PEER must be signed by the trusted ROOT-CA directly;
+if 1 the path can be PEER, Intermediate-CA, ROOT-CA;
+if 2 the path can be PEER, Intermediate-CA1, Intermediate-CA2, ROOT-CA.""" + +common_ssl_opts_schema_depth.label: +"""CACert Depth""" + +sysmon_vm_long_gc.desc: +"""When an Erlang process spends long time to perform garbage collection, a warning level long_gc log is emitted, +and an MQTT message is published to the system topic $SYS/sysmon/long_gc.""" + +sysmon_vm_long_gc.label: +"""Enable Long GC monitoring.""" + +fields_mqtt_quic_listener_keyfile.desc: +"""Path to the secret key file. Will be deprecated in 5.1, use .ssl_options.keyfile instead.""" + +fields_mqtt_quic_listener_keyfile.label: +"""Key file""" + +mqtt_peer_cert_as_clientid.desc: +"""Use the CN, DN field in the peer certificate or the entire certificate content as Client ID. Only works for the TLS connection. +Supported configurations are the following: +- cn: CN field of the certificate +- dn: DN field of the certificate +- crt: DER or PEM certificate +- pem: Convert DER certificate content to PEM format and use as Client ID +- md5: MD5 value of the DER or PEM certificate""" + +mqtt_peer_cert_as_clientid.label: +"""Use Peer Certificate as Client ID""" + +persistent_session_store_message_gc_interval.desc: +"""The starting interval for garbage collection of undelivered messages to +a persistent session. This affects how often the "max_retain_undelivered" +is checked for removal.""" + +persistent_session_store_message_gc_interval.label: +"""Message GC interval""" + +broker_shared_dispatch_ack_enabled.desc: +"""Deprecated, will be removed in 5.1. +Enable/disable shared dispatch acknowledgement for QoS 1 and QoS 2 messages. +This should allow messages to be dispatched to a different subscriber in the group in case the picked (based on `shared_subscription_strategy`) subscriber is offline.""" + +base_listener_enable_authn.desc: +"""Set true (default) to enable client authentication on this listener, the authentication process goes through the configured authentication chain. When set to false to allow any clients with or without authentication information such as username or password to log in. When set to quick_deny_anonymous, it behaves like when set to true, but clients will be denied immediately without going through any authenticators if username is not provided. This is useful to fence off anonymous clients early.""" - zh: """配置 true (默认值)启用客户端进行身份认证,通过检查认配置的认认证器链来决定是否允许接入。 -配置 false 时,将不对客户端做任何认证,任何客户端,不论是不是携带用户名等认证信息,都可以接入。 -配置 quick_deny_anonymous 时,行为跟 true 类似,但是会对匿名 -客户直接拒绝,不做使用任何认证器对客户端进行身份检查。""" - } - label: { - en: "Enable authentication" - zh: "启用身份认证" - } -} -mqtt_listener_access_rules { - desc { - en: """The access control rules for this listener.
See: https://github.com/emqtt/esockd#allowdeny""" - zh: """此监听器的访问控制规则。""" - } - label: { - en: "Access rules" - zh: "访问控制规则" - } -} +base_listener_enable_authn.label: +"""Enable authentication""" -mqtt_listener_proxy_protocol { - desc { - en: """Enable the Proxy Protocol V1/2 if the EMQX cluster is deployed behind HAProxy or Nginx.
+force_shutdown_enable.desc: +"""Enable `force_shutdown` feature.""" + +force_shutdown_enable.label: +"""Enable `force_shutdown` feature""" + +broker_enable_session_registry.desc: +"""Enable session registry""" + +overload_protection_backoff_delay.desc: +"""The maximum duration of delay for background task execution during high load conditions.""" + +overload_protection_backoff_delay.label: +"""Delay Time""" + +ciphers_schema_common.desc: +"""This config holds TLS cipher suite names separated by comma, +or as an array of strings. e.g. +"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256" or +["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]. +
+Ciphers (and their ordering) define the way in which the +client and server encrypts information over the network connection. +Selecting a good cipher suite is critical for the +application's data security, confidentiality and performance. + +The names should be in OpenSSL string format (not RFC format). +All default values and examples provided by EMQX config +documentation are all in OpenSSL format.
+ +NOTE: Certain cipher suites are only compatible with +specific TLS versions ('tlsv1.1', 'tlsv1.2' or 'tlsv1.3') +incompatible cipher suites will be silently dropped. +For instance, if only 'tlsv1.3' is given in the versions, +configuring cipher suites for other versions will have no effect. +
+ +NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
+If PSK cipher suites are intended, 'tlsv1.3' should be disabled from versions.
+PSK cipher suites: "RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, +RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, +RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, +RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"""" + +ciphers_schema_common.label: +"""""" + +sys_event_client_disconnected.desc: +"""Enable to publish client disconnected event messages.""" + +crl_cache_refresh_interval.desc: +"""The period to refresh the CRLs from the servers. This is a global setting for all URLs and listeners.""" + +crl_cache_refresh_interval.label: +"""CRL Cache Refresh Interval""" + +mqtt_listener_proxy_protocol.desc: +"""Enable the Proxy Protocol V1/2 if the EMQX cluster is deployed behind HAProxy or Nginx.
See: https://www.haproxy.com/blog/haproxy/proxy-protocol/""" - zh: """如果EMQX集群部署在 HAProxy 或 Nginx 之后,请启用代理协议 V1/2
-详情见: https://www.haproxy.com/blog/haproxy/proxy-protocol/""" - } - label: { - en: "Proxy protocol" - zh: "Proxy protocol" - } -} -mqtt_listener_proxy_protocol_timeout { - desc { - en: """Timeout for proxy protocol. EMQX will close the TCP connection if proxy protocol packet is not received within the timeout.""" - zh: """代理协议超时。如果在超时时间内未收到代理协议数据包,EMQX将关闭TCP连接。""" - } - label: { - en: "Proxy protocol timeout" - zh: "Proxy protocol 超时时间" - } -} +mqtt_listener_proxy_protocol.label: +"""Proxy protocol""" -global_authentication { - desc { - en: """Default authentication configs for all MQTT listeners. +mqtt_listener_access_rules.desc: +"""The access control rules for this listener.
See: https://github.com/emqtt/esockd#allowdeny""" -For per-listener overrides see authentication in listener configs +mqtt_listener_access_rules.label: +"""Access rules""" -This option can be configured with: -
    -
  • []: The default value, it allows *ALL* logins
  • -
  • one: For example {enable:true,backend:\"built_in_database\",mechanism=\"password_based\"}
  • -
  • chain: An array of structs.
  • -
+server_ssl_opts_schema_enable_ocsp_stapling.desc: +"""Whether to enable Online Certificate Status Protocol (OCSP) stapling for the listener. If set to true, requires defining the OCSP responder URL and issuer PEM path.""" -When a chain is configured, the login credentials are checked against the backends per the configured order, until an 'allow' or 'deny' decision can be made. +server_ssl_opts_schema_enable_ocsp_stapling.label: +"""Enable OCSP Stapling""" -If there is no decision after a full chain exhaustion, the login is rejected.""" - zh: """全局 MQTT 监听器的默认认证配置。 为每个监听器配置认证参考监听器器配置中的authentication 配置。 +fields_tcp_opts_send_timeout_close.desc: +"""Close the connection if send timeout.""" -该配置可以被配置为: -
    -
  • []: 默认值,允许所有的登录请求 -
  • 配置为单认证器,例如 {enable:true,backend:\"built_in_database\",mechanism=\"password_based\"}
  • -
  • 配置为认证器数组
  • -
+fields_tcp_opts_send_timeout_close.label: +"""TCP send timeout close""" -当配置为认证链后,登录凭证会按照配置的顺序进行检查,直到做出allowdeny的结果。 +sysmon_os_cpu_check_interval.desc: +"""The time interval for the periodic CPU check.""" -如果在所有的认证器都执行完后,还是没有结果,登录将被拒绝。""" - } -} +sysmon_os_cpu_check_interval.label: +"""The time interval for the periodic CPU check.""" -listener_authentication { - desc { - en: """Per-listener authentication override. -Authentication can be one single authenticator instance or a chain of authenticators as an array. -When authenticating a login (username, client ID, etc.) the authenticators are checked in the configured order.""" - zh: """监听器认证重载。 -认证配置可以是单个认证器实例,也可以是一个认证器数组组成的认证链。 -执行登录验证时(用户名、客户端 ID 等),将按配置的顺序执行。""" - } - label: { - en: "Per-listener authentication override" - zh: "每个监听器的认证覆盖" - } -} +sysmon_top_sample_interval.desc: +"""Specifies how often process top should be collected""" -fields_rate_limit_max_conn_rate { - desc { - en: """Maximum connections per second.""" - zh: """每秒最大连接数。""" - } - label: { - en: "Max connection rate" - zh: "每秒最大连接数" - } -} +sysmon_top_sample_interval.label: +"""Top sample interval""" -fields_rate_limit_conn_messages_in { - desc { - en: """Message limit for the external MQTT connections.""" - zh: """外部 MQTT 连接的消息限制。""" - } - label: { - en: "connecting messages in" - zh: "外部 MQTT 连接的消息限制" - } -} +fields_mqtt_quic_listener_idle_timeout_ms.desc: +"""How long a connection can go idle before it is gracefully shut down. 0 to disable timeout""" -fields_rate_limit_conn_bytes_in { - desc { - en: """Limit the rate of receiving packets for a MQTT connection. -The rate is counted by bytes of packets per second.""" - zh: """限制 MQTT 连接接收数据包的速率。 速率以每秒的数据包字节数计算。""" - } - label: { - en: "Connection bytes in" - zh: "数据包速率" - } -} +fields_mqtt_quic_listener_idle_timeout_ms.label: +"""Idle timeout ms""" -client_ssl_opts_schema_server_name_indication { - desc { - en: """Specify the host name to be used in TLS Server Name Indication extension.
-For instance, when connecting to "server.example.net", the genuine server -which accepts the connection and performs TLS handshake may differ from the -host the TLS client initially connects to, e.g. when connecting to an IP address -or when the host has multiple resolvable DNS records
-If not specified, it will default to the host name string which is used -to establish the connection, unless it is IP addressed used.
-The host name is then also used in the host name verification of the peer -certificate.
The special value 'disable' prevents the Server Name -Indication extension from being sent and disables the hostname -verification check.""" - zh: """指定要在 TLS 服务器名称指示扩展中使用的主机名。
-例如,当连接到 "server.example.net" 时,接受连接并执行 TLS 握手的真正服务器可能与 TLS 客户端最初连接到的主机不同, -例如,当连接到 IP 地址时,或者当主机具有多个可解析的 DNS 记录时
-如果未指定,它将默认为使用的主机名字符串 -建立连接,除非使用 IP 地址
-然后,主机名也用于对等机的主机名验证证书
-特殊值 disable 阻止发送服务器名称指示扩展,并禁用主机名验证检查。""" - } - label: { - en: "Server Name Indication" - zh: "服务器名称指示" - } -} - -fields_tcp_opts_active_n { - desc { - en: """Specify the {active, N} option for this Socket.
-See: https://erlang.org/doc/man/inet.html#setopts-2""" - zh: """为此套接字指定{active,N}选项
-See: https://erlang.org/doc/man/inet.html#setopts-2""" - } - label: { - en: "active_n" - zh: "active_n" - } -} - -fields_tcp_opts_backlog { - desc { - en: """TCP backlog defines the maximum length that the queue of -pending connections can grow to.""" - zh: """TCP backlog 定义了挂起连接队列可以增长到的最大长度。""" - } - label: { - en: "TCP backlog length" - zh: "TCP 连接队列长度" - } -} - -fields_tcp_opts_send_timeout { - desc { - en: """The TCP send timeout for the connections.""" - zh: """连接的 TCP 发送超时。""" - } - label: { - en: "TCP send timeout" - zh: "TCP 发送超时" - } -} - -fields_tcp_opts_send_timeout_close { - desc { - en: """Close the connection if send timeout.""" - zh: """如果发送超时,则关闭连接。""" - } - label: { - en: "TCP send timeout close" - zh: "TCP 发送超时关闭连接" - } -} - -fields_tcp_opts_recbuf { - desc { - en: """The TCP receive buffer (OS kernel) for the connections.""" - zh: """连接的 TCP 接收缓冲区(OS 内核)。""" - } - label: { - en: "TCP receive buffer" - zh: "TCP 接收缓冲区" - } -} - -fields_tcp_opts_sndbuf { - desc { - en: """The TCP send buffer (OS kernel) for the connections.""" - zh: """连接的 TCP 发送缓冲区(OS 内核)。""" - } - label: { - en: "TCP send buffer" - zh: "TCP 发送缓冲区" - } -} - -fields_tcp_opts_buffer { - desc { - en: """The size of the user-space buffer used by the driver.""" - zh: """驱动程序使用的用户空间缓冲区的大小。""" - } - label: { - en: "TCP user-space buffer" - zh: "TCP 用户态缓冲区" - } -} - -fields_tcp_opts_high_watermark { - desc { - en: """The socket is set to a busy state when the amount of data queued internally -by the VM socket implementation reaches this limit.""" - zh: """当 VM 套接字实现内部排队的数据量达到此限制时,套接字将设置为忙碌状态。""" - } - label: { - en: "TCP 高水位线" - zh: "" - } -} - -fields_tcp_opts_nodelay { - desc { - en: """The TCP_NODELAY flag for the connections.""" - zh: """连接的 TCP_NODELAY 标识""" - } - label: { - en: "TCP_NODELAY" - zh: "TCP_NODELAY" - } -} - -fields_tcp_opts_reuseaddr { - desc { - en: """The SO_REUSEADDR flag for the connections.""" - zh: """连接的 SO_REUSEADDR 标识。""" - } - label: { - en: "SO_REUSEADDR" - zh: "SO_REUSEADDR" - } -} - -fields_trace_payload_encode { - desc { - en: """Determine the format of the payload format in the trace file.
-`text`: Text-based protocol or plain text protocol. - It is recommended when payload is JSON encoded.
-`hex`: Binary hexadecimal encode. It is recommended when payload is a custom binary protocol.
-`hidden`: payload is obfuscated as `******`""" - zh: """确定跟踪文件中有效负载格式的格式。
-`text`:基于文本的协议或纯文本协议。 -建议在有效负载为JSON编码时使用
-`hex`:二进制十六进制编码。当有效负载是自定义二进制协议时,建议使用此选项
-`hidden`:有效负载被模糊化为 `******`""" - } - label: { - en: "Payload encode" - zh: "有效负载编码" - } -} - -fields_ws_opts_mqtt_path { - desc { - en: """WebSocket's MQTT protocol path. So the address of EMQX Broker's WebSocket is: -ws://{ip}:{port}/mqtt""" - zh: """WebSocket 的 MQTT 协议路径。因此,EMQX Broker的WebSocket地址为: -ws://{ip}:{port}/mqtt""" - } - label: { - en: "WS MQTT Path" - zh: "WS MQTT 路径" - } -} - -fields_ws_opts_mqtt_piggyback { - desc { - en: """Whether a WebSocket message is allowed to contain multiple MQTT packets.""" - zh: """WebSocket消息是否允许包含多个 MQTT 数据包。""" - } - label: { - en: "MQTT Piggyback" - zh: "MQTT Piggyback" - } -} - -fields_ws_opts_compress { - desc { - en: """If true, compress WebSocket messages using zlib.
-The configuration items under deflate_opts belong to the compression-related parameter configuration.""" - zh: """如果 true,则使用zlib 压缩 WebSocket 消息
-deflate_opts 下的配置项属于压缩相关参数配置。""" - } - label: { - en: "Ws compress" - zh: "Ws 压缩" - } -} - -fields_ws_opts_idle_timeout { - desc { - en: """Close transport-layer connections from the clients that have not sent MQTT CONNECT message within this interval.""" - zh: """关闭在此间隔内未发送 MQTT CONNECT 消息的客户端的传输层连接。""" - } - label: { - en: "WS idle timeout" - zh: "WS 空闲时间" - } -} - -fields_ws_opts_max_frame_size { - desc { - en: """The maximum length of a single MQTT packet.""" - zh: """单个 MQTT 数据包的最大长度。""" - } - label: { - en: "Max frame size" - zh: "最大数据包长度" - } -} - -fields_ws_opts_fail_if_no_subprotocol { - desc { - en: """If true, the server will return an error when +fields_ws_opts_fail_if_no_subprotocol.desc: +"""If true, the server will return an error when the client does not carry the Sec-WebSocket-Protocol field.
Note: WeChat applet needs to disable this verification.""" - zh: """如果true,当客户端未携带Sec WebSocket Protocol字段时,服务器将返回一个错误。 -
注意:微信小程序需要禁用此验证。""" - } - label: { - en: "Fail if no subprotocol" - zh: "无 subprotocol 则失败" - } -} -fields_ws_opts_supported_subprotocols { - desc { - en: """Comma-separated list of supported subprotocols.""" - zh: """逗号分隔的 subprotocols 支持列表。""" - } - label: { - en: "Supported subprotocols" - zh: "Subprotocols 支持列表" - } -} +fields_ws_opts_fail_if_no_subprotocol.label: +"""Fail if no subprotocol""" -fields_ws_opts_check_origin_enable { - desc { - en: """If true, origin HTTP header will be - validated against the list of allowed origins configured in check_origins - parameter.""" - zh: """如果trueoriginHTTP 头将根据check_origins参数中配置的允许来源列表进行验证。""" - } - label: { - en: "Check origin" - zh: "检查 origin" - } -} +mqtt_wildcard_subscription.desc: +"""Whether to enable support for MQTT wildcard subscription.""" -fields_ws_opts_allow_origin_absence { - desc { - en: """If false and check_origin_enable is - true, the server will reject requests that don't have origin - HTTP header.""" - zh: """If false and check_origin_enable is true, the server will reject requests that don't have origin HTTP header.""" - } - label: { - en: "Allow origin absence" - zh: "允许 origin 缺失" - } -} +mqtt_wildcard_subscription.label: +"""Wildcard Subscription Available""" -fields_ws_opts_check_origins { - desc { - en: """List of allowed origins.
See check_origin_enable.""" - zh: """允许的 origins 列表""" - } - label: { - en: "Allowed origins" - zh: "允许的 origins" - } -} +server_ssl_opts_schema_ocsp_refresh_interval.desc: +"""The period to refresh the OCSP response for the server.""" -fields_ws_opts_proxy_address_header { - desc { - en: """HTTP header used to pass information about the client IP address. -Relevant when the EMQX cluster is deployed behind a load-balancer.""" - zh: """HTTP 头,用于传递有关客户端 IP 地址的信息。 -当 EMQX 集群部署在负载平衡器后面时,这一点非常重要。""" - } - label: { - en: "Proxy address header" - zh: "客户端地址头" - } -} +server_ssl_opts_schema_ocsp_refresh_interval.label: +"""OCSP Refresh Interval""" -fields_ws_opts_proxy_port_header { - desc { - en: """HTTP header used to pass information about the client port. Relevant when the EMQX cluster is deployed behind a load-balancer.""" - zh: """HTTP 头,用于传递有关客户端端口的信息。当 EMQX 集群部署在负载平衡器后面时,这一点非常重要。""" - } - label: { - en: "Proxy port header" - zh: "客户端端口头" - } -} +overload_protection_backoff_hibernation.desc: +"""When at high load, skip process hibernation.""" + +overload_protection_backoff_hibernation.label: +"""Skip hibernation""" + +fields_ws_opts_max_frame_size.desc: +"""The maximum length of a single MQTT packet.""" + +fields_ws_opts_max_frame_size.label: +"""Max frame size""" + +sys_event_messages.desc: +"""Client events messages.""" + +broker_perf_trie_compaction.desc: +"""Enable trie path compaction. +Enabling it significantly improves wildcard topic subscribe rate, if wildcard topics have unique prefixes like: 'sensor/{{id}}/+/', where ID is unique per subscriber. +Topic match performance (when publishing) may degrade if messages are mostly published to topics with large number of levels. + +NOTE: This is a cluster-wide configuration. It requires all nodes to be stopped before changing it.""" + +sysmon_vm_large_heap.desc: +"""When an Erlang process consumed a large amount of memory for its heap space, +the system will write a warning level large_heap log, and an MQTT message is published to +the system topic $SYS/sysmon/large_heap.""" + +sysmon_vm_large_heap.label: +"""Enable Large Heap monitoring.""" } diff --git a/rel/i18n/emqx_slow_subs_api.hocon b/rel/i18n/emqx_slow_subs_api.hocon index 92862bc98..edf473487 100644 --- a/rel/i18n/emqx_slow_subs_api.hocon +++ b/rel/i18n/emqx_slow_subs_api.hocon @@ -1,66 +1,30 @@ emqx_slow_subs_api { - clear_records_api { - desc { - en: "Clear current data and re count slow topic" - zh: "清除当前记录,然后重新开始统计" - } - } +clear_records_api.desc: +"""Clear current data and re count slow topic""" - get_records_api { - desc { - en: "View slow topics statistics record data" - zh: "查看慢订阅的统计数据" - } - } +clientid.desc: +"""Message clientid""" - get_setting_api { - desc { - en: "View slow subs settings" - zh: "查看配置" - } - } +get_records_api.desc: +"""View slow topics statistics record data""" - update_setting_api { - desc { - en: "Update slow subs settings" - zh: "更新配置" - } - } +get_setting_api.desc: +"""View slow subs settings""" - clientid { - desc { - en: "Message clientid" - zh: "消息的客户端 ID" - } - } +last_update_time.desc: +"""The timestamp of last update""" - node { - desc { - en: "Message node name" - zh: "消息的节点名称" - } - } +node.desc: +"""Message node name""" - topic { - desc { - en: "Message topic" - zh: "消息的主题" - } - } +timespan.desc: +"""Timespan for message transmission""" - timespan { - desc { - en: "Timespan for message transmission" - zh: "消息的传输耗时" - } - } +topic.desc: +"""Message topic""" - last_update_time { - desc { - en: "The timestamp of last update" - zh: "记录的更新时间戳" - } - } +update_setting_api.desc: +"""Update slow subs settings""" } diff --git a/rel/i18n/emqx_slow_subs_schema.hocon b/rel/i18n/emqx_slow_subs_schema.hocon index e65e802c2..4164db75a 100644 --- a/rel/i18n/emqx_slow_subs_schema.hocon +++ b/rel/i18n/emqx_slow_subs_schema.hocon @@ -1,38 +1,18 @@ emqx_slow_subs_schema { - enable { - desc { - en: "Enable this feature" - zh: "开启慢订阅" - } - } +enable.desc: +"""Enable this feature""" - threshold { - desc { - en: "The latency threshold for statistics" - zh: "慢订阅统计的阈值" - } - } +expire_interval.desc: +"""The eviction time of the record, which in the statistics record table""" - expire_interval { - desc { - en: "The eviction time of the record, which in the statistics record table" - zh: "慢订阅记录的有效时间" - } - } +stats_type.desc: +"""The method to calculate the latency""" - top_k_num { - desc { - en: "The maximum number of records in the slow subscription statistics record table" - zh: "慢订阅统计表的记录数量上限" - } - } +threshold.desc: +"""The latency threshold for statistics""" - stats_type { - desc { - en: "The method to calculate the latency" - zh: "慢订阅的统计类型" - } - } +top_k_num.desc: +"""The maximum number of records in the slow subscription statistics record table""" } diff --git a/rel/i18n/emqx_statsd_api.hocon b/rel/i18n/emqx_statsd_api.hocon index 2721188bd..d8bab13a7 100644 --- a/rel/i18n/emqx_statsd_api.hocon +++ b/rel/i18n/emqx_statsd_api.hocon @@ -1,16 +1,9 @@ emqx_statsd_api { - get_statsd_config_api { - desc { - en: """List the configuration of StatsD metrics collection and push service.""" - zh: """列出 StatsD 指标采集和推送服务的的配置。""" - } - } +get_statsd_config_api.desc: +"""List the configuration of StatsD metrics collection and push service.""" + +update_statsd_config_api.desc: +"""Update the configuration of StatsD metrics collection and push service.""" - update_statsd_config_api { - desc { - en: """Update the configuration of StatsD metrics collection and push service.""" - zh: """更新 StatsD 指标采集和推送服务的配置。""" - } - } } diff --git a/rel/i18n/emqx_statsd_schema.hocon b/rel/i18n/emqx_statsd_schema.hocon index 46d654a46..fc21710c4 100644 --- a/rel/i18n/emqx_statsd_schema.hocon +++ b/rel/i18n/emqx_statsd_schema.hocon @@ -1,61 +1,30 @@ emqx_statsd_schema { - get_statsd_config_api { - desc { - en: """List the configuration of StatsD metrics collection and push service.""" - zh: """列出 StatsD 指标采集和推送服务的的配置。""" - } - } +enable.desc: +"""Enable or disable StatsD metrics collection and push service.""" - update_statsd_config_api { - desc { - en: """Update the configuration of StatsD metrics collection and push service.""" - zh: """更新 StatsD 指标采集和推送服务的配置。""" - } - } +flush_interval.desc: +"""The push interval for metrics.""" - statsd { - desc { - en: """StatsD metrics collection and push configuration.""" - zh: """StatsD 指标采集与推送配置。""" - } - label { - en: """StatsD""" - zh: """StatsD""" - } - } +get_statsd_config_api.desc: +"""List the configuration of StatsD metrics collection and push service.""" - server { - desc { - en: """StatsD server address.""" - zh: """StatsD 服务器地址。""" - } - } +sample_interval.desc: +"""The sampling interval for metrics.""" - sample_interval { - desc { - en: """The sampling interval for metrics.""" - zh: """指标的采样间隔。""" - } - } +server.desc: +"""StatsD server address.""" - flush_interval { - desc { - en: """The push interval for metrics.""" - zh: """指标的推送间隔。""" - } - } - tags { - desc { - en: """The tags for metrics.""" - zh: """指标的标签。""" - } - } +statsd.desc: +"""StatsD metrics collection and push configuration.""" + +statsd.label: +"""StatsD""" + +tags.desc: +"""The tags for metrics.""" + +update_statsd_config_api.desc: +"""Update the configuration of StatsD metrics collection and push service.""" - enable { - desc { - en: """Enable or disable StatsD metrics collection and push service.""" - zh: """启用或禁用 StatsD 指标采集和推送服务。""" - } - } } diff --git a/rel/i18n/emqx_stomp_schema.hocon b/rel/i18n/emqx_stomp_schema.hocon index 3d166abb5..05d5b9d18 100644 --- a/rel/i18n/emqx_stomp_schema.hocon +++ b/rel/i18n/emqx_stomp_schema.hocon @@ -1,32 +1,16 @@ emqx_stomp_schema { - stomp { - desc { - en: """The Stomp Gateway configuration. + +stom_frame_max_body_length.desc: +"""Maximum number of bytes of Body allowed per Stomp packet""" + +stom_frame_max_headers.desc: +"""The maximum number of Header""" + +stomp.desc: +"""The Stomp Gateway configuration. This gateway supports v1.2/1.1/1.0""" - zh: """Stomp 网关配置。当前实现支持 v1.2/1.1/1.0 协议版本""" - } - } - - stom_frame_max_headers { - desc { - en: """The maximum number of Header""" - zh: """允许的 Header 最大数量""" - } - } - - stomp_frame_max_headers_length { - desc { - en: """The maximum string length of the Header Value""" - zh: """允许的 Header 字符串的最大长度""" - } - } - - stom_frame_max_body_length { - desc { - en: """Maximum number of bytes of Body allowed per Stomp packet""" - zh: """允许的 Stomp 报文 Body 的最大字节数""" - } - } +stomp_frame_max_headers_length.desc: +"""The maximum string length of the Header Value""" } diff --git a/rel/i18n/emqx_telemetry_api.hocon b/rel/i18n/emqx_telemetry_api.hocon index a8f562065..5c61b8d3c 100644 --- a/rel/i18n/emqx_telemetry_api.hocon +++ b/rel/i18n/emqx_telemetry_api.hocon @@ -1,121 +1,54 @@ emqx_telemetry_api { - get_telemetry_status_api { - desc { - en: """Get telemetry status""" - zh: """获取遥测状态""" - } - } +active_modules.desc: +"""Get active modules""" - update_telemetry_status_api { - desc { - en: """Enable or disable telemetry""" - zh: """更新遥测状态""" - } - } +active_plugins.desc: +"""Get active plugins""" - get_telemetry_data_api { - desc { - en: """Get telemetry data""" - zh: """获取遥测数据""" - } - } +emqx_version.desc: +"""Get emqx version""" - enable { - desc { - en: """Enable telemetry""" - zh: """启用遥测""" - } - } +enable.desc: +"""Enable telemetry""" - emqx_version { - desc { - en: """Get emqx version""" - zh: """获取 emqx 版本""" - } - } +get_telemetry_data_api.desc: +"""Get telemetry data""" - license { - desc { - en: """Get license information""" - zh: """获取 license 信息""" - } - } +get_telemetry_status_api.desc: +"""Get telemetry status""" - os_name { - desc { - en: """Get OS name""" - zh: """获取操作系统名称""" - } - } +license.desc: +"""Get license information""" - os_version { - desc { - en: """Get OS version""" - zh: """获取操作系统版本""" - } - } +messages_received.desc: +"""Get number of messages received""" - otp_version { - desc { - en: """Get Erlang OTP version""" - zh: """获取 OTP 版本""" - } - } +messages_sent.desc: +"""Get number of messages sent""" - up_time { - desc { - en: """Get uptime""" - zh: """获取运行时间""" - } - } +nodes_uuid.desc: +"""Get nodes UUID""" - uuid { - desc { - en: """Get UUID""" - zh: """获取 UUID""" - } - } +num_clients.desc: +"""Get number of clients""" - nodes_uuid { - desc { - en: """Get nodes UUID""" - zh: """获取节点 UUID""" - } - } +os_name.desc: +"""Get OS name""" - active_plugins { - desc { - en: """Get active plugins""" - zh: """获取活跃插件""" - } - } +os_version.desc: +"""Get OS version""" - active_modules { - desc { - en: """Get active modules""" - zh: """获取活跃模块""" - } - } +otp_version.desc: +"""Get Erlang OTP version""" - num_clients { - desc { - en: """Get number of clients""" - zh: """获取客户端数量""" - } - } +up_time.desc: +"""Get uptime""" - messages_received { - desc { - en: """Get number of messages received""" - zh: """获取接收到的消息数量""" - } - } +update_telemetry_status_api.desc: +"""Enable or disable telemetry""" + +uuid.desc: +"""Get UUID""" - messages_sent { - desc { - en: """Get number of messages sent""" - zh: """获取发送的消息数量""" - } - } } diff --git a/rel/i18n/emqx_topic_metrics_api.hocon b/rel/i18n/emqx_topic_metrics_api.hocon index 22f038d4e..94c58f0cd 100644 --- a/rel/i18n/emqx_topic_metrics_api.hocon +++ b/rel/i18n/emqx_topic_metrics_api.hocon @@ -1,240 +1,105 @@ emqx_topic_metrics_api { - get_topic_metrics_api { - desc { - en: """List topic metrics""" - zh: """获取主题监控数据""" - } - } - reset_topic_metrics_api{ - desc { - en: """Reset telemetry status""" - zh: """重置主题监控状态""" - } - } +message_qos1_in_rate.desc: +"""QoS1 in messages rate""" - post_topic_metrics_api { - desc { - en: """Create topic metrics""" - zh: """创建主题监控数据""" - } - } +message_out_count.desc: +"""Out messages count""" - gat_topic_metrics_data_api { - desc { - en: """Get topic metrics""" - zh: """获取主题监控数据""" - } - } +message_qos2_out_rate.desc: +"""QoS2 out messages rate""" - delete_topic_metrics_data_api { - desc { - en: """Delete topic metrics""" - zh: """删除主题监控数据""" - } - } +message_qos0_in_rate.desc: +"""QoS0 in messages rate""" - topic_metrics_api_response409 { - desc { - en: """Conflict. Topic metrics exceeded max limit 512""" - zh: """冲突。主题监控数据超过最大限制512""" - } - } +get_topic_metrics_api.desc: +"""List topic metrics""" - topic_metrics_api_response400 { - desc { - en: """Bad request. Already exists or bad topic name""" - zh: """错误请求。已存在或错误的主题名称""" - } - } +reset_time.desc: +"""Reset time. In rfc3339. Nullable if never reset""" - topic_metrics_api_response404 { - desc { - en: """Not Found. Topic metrics not found""" - zh: """未找到。主题监控数据未找到""" - } - } +topic_metrics_api_response400.desc: +"""Bad request. Already exists or bad topic name""" - reset_topic_desc { - desc { - en: """Topic Name. If this parameter is not present,all created topic metrics will be reset.""" - zh: """主题名称。如果此参数不存在,则所有创建的主题监控数据都将重置。""" - } - } +reset_topic_desc.desc: +"""Topic Name. If this parameter is not present,all created topic metrics will be reset.""" - topic { - desc { - en: """Topic""" - zh: """主题""" - } - } +topic_metrics_api_response409.desc: +"""Conflict. Topic metrics exceeded max limit 512""" - topic_in_body { - desc { - en: """Raw topic string""" - zh: """主题字符串""" - } - } +post_topic_metrics_api.desc: +"""Create topic metrics""" - topic_in_path { - desc { - en: """Topic string. Notice: Topic string in url path must be encoded""" - zh: """主题字符串。注意:主题字符串在url路径中必须编码""" - } - } +message_dropped_rate.desc: +"""Dropped messages rate""" - action { - desc { - en: """Action. Only support reset""" - zh: """操作,仅支持 reset""" - } - } +message_qos2_in_rate.desc: +"""QoS2 in messages rate""" - create_time { - desc { - en: """Create time""" - zh: """创建时间。标准 rfc3339 时间格式,例如:2018-01-01T12:00:00Z""" - } - } +message_in_rate.desc: +"""In messages rate""" - reset_time { - desc { - en: """Reset time. In rfc3339. Nullable if never reset""" - zh: """重置时间。标准 rfc3339 时间格式,例如:2018-01-01T12:00:00Z。如果从未重置则为空""" - } - } +message_qos0_out_rate.desc: +"""QoS0 out messages rate""" - metrics { - desc { - en: """Metrics""" - zh: """监控数据""" - } - } +message_qos2_in_count.desc: +"""QoS2 in messages count""" - message_dropped_count { - desc { - en: """Dropped messages count""" - zh: """丢弃消息数量""" - } - } +message_dropped_count.desc: +"""Dropped messages count""" - message_dropped_rate { - desc { - en: """Dropped messages rate""" - zh: """丢弃消息速率""" - } - } +topic_metrics_api_response404.desc: +"""Not Found. Topic metrics not found""" - message_in_count { - desc { - en: """In messages count""" - zh: """接收消息数量""" - } - } +topic_in_path.desc: +"""Topic string. Notice: Topic string in url path must be encoded""" - message_in_rate { - desc { - en: """In messages rate""" - zh: """接收消息速率""" - } - } +action.desc: +"""Action. Only support reset""" - message_out_count { - desc { - en: """Out messages count""" - zh: """发送消息数量""" - } - } +message_qos0_in_count.desc: +"""QoS0 in messages count""" - message_out_rate { - desc { - en: """Out messages rate""" - zh: """发送消息速率""" - } - } +message_qos1_out_rate.desc: +"""QoS1 out messages rate""" - message_qos0_in_count { - desc { - en: """QoS0 in messages count""" - zh: """QoS0 接收消息数量""" - } - } +topic.desc: +"""Topic""" - message_qos0_in_rate { - desc { - en: """QoS0 in messages rate""" - zh: """QoS0 接收消息速率""" - } - } +reset_topic_metrics_api.desc: +"""Reset telemetry status""" - message_qos0_out_count { - desc { - en: """QoS0 out messages count""" - zh: """QoS0 发送消息数量""" - } - } +create_time.desc: +"""Create time""" - message_qos0_out_rate { - desc { - en: """QoS0 out messages rate""" - zh: """QoS0 发送消息速率""" - } - } +metrics.desc: +"""Metrics""" - message_qos1_in_count { - desc { - en: """QoS1 in messages count""" - zh: """QoS1 接收消息数量""" - } - } +message_qos1_out_count.desc: +"""QoS1 out messages count""" - message_qos1_in_rate { - desc { - en: """QoS1 in messages rate""" - zh: """QoS1 接收消息速率""" - } - } +gat_topic_metrics_data_api.desc: +"""Get topic metrics""" - message_qos1_out_count { - desc { - en: """QoS1 out messages count""" - zh: """QoS1 发送消息数量""" - } - } +message_qos1_in_count.desc: +"""QoS1 in messages count""" - message_qos1_out_rate { - desc { - en: """QoS1 out messages rate""" - zh: """QoS1 发送消息速率""" - } - } +delete_topic_metrics_data_api.desc: +"""Delete topic metrics""" - message_qos2_in_count { - desc { - en: """QoS2 in messages count""" - zh: """QoS2 接收消息数量""" - } - } +message_qos0_out_count.desc: +"""QoS0 out messages count""" - message_qos2_in_rate { - desc { - en: """QoS2 in messages rate""" - zh: """QoS2 接收消息速率""" - } - } +topic_in_body.desc: +"""Raw topic string""" - message_qos2_out_count { - desc { - en: """QoS2 out messages count""" - zh: """QoS2 发送消息数量""" - } - } +message_in_count.desc: +"""In messages count""" - message_qos2_out_rate { - desc { - en: """QoS2 out messages rate""" - zh: """QoS2 发送消息速率""" - } - } +message_qos2_out_count.desc: +"""QoS2 out messages count""" + +message_out_rate.desc: +"""Out messages rate""" } diff --git a/rel/i18n/zh/emqx_authn_api.hocon b/rel/i18n/zh/emqx_authn_api.hocon new file mode 100644 index 000000000..bd8332ac7 --- /dev/null +++ b/rel/i18n/zh/emqx_authn_api.hocon @@ -0,0 +1,96 @@ +emqx_authn_api { + +authentication_get.desc: +"""列出全局认证链上的认证器。""" + +authentication_id_delete.desc: +"""删除全局认证链上的指定认证器。""" + +authentication_id_get.desc: +"""获取全局认证链上的指定认证器。""" + +authentication_id_position_put.desc: +"""更改全局认证链上指定认证器的顺序。""" + +authentication_id_put.desc: +"""更新全局认证链上的指定认证器。""" + +authentication_id_status_get.desc: +"""获取全局认证链上指定认证器的状态。""" + +authentication_id_users_get.desc: +"""获取全局认证链上指定认证器中的用户数据。""" + +authentication_id_users_post.desc: +"""为全局认证链上的指定认证器创建用户数据。""" + +authentication_id_users_user_id_delete.desc: +"""删除全局认证链上指定认证器中的指定用户数据。""" + +authentication_id_users_user_id_get.desc: +"""获取全局认证链上指定认证器中的指定用户数据。""" + +authentication_id_users_user_id_put.desc: +"""更新全局认证链上指定认证器中的指定用户数据。""" + +authentication_post.desc: +"""为全局认证链创建认证器。""" + +is_superuser.desc: +"""是否是超级用户""" + +like_user_id.desc: +"""使用用户 ID (username 或 clientid)模糊查询。""" + +like_user_id.label: +"""like_user_id""" + +listeners_listener_id_authentication_get.desc: +"""列出监听器认证链上的认证器。""" + +listeners_listener_id_authentication_id_delete.desc: +"""删除监听器认证链上的指定认证器。""" + +listeners_listener_id_authentication_id_get.desc: +"""获取监听器认证链上的指定认证器。""" + +listeners_listener_id_authentication_id_position_put.desc: +"""更改监听器认证链上指定认证器的顺序。""" + +listeners_listener_id_authentication_id_put.desc: +"""更新监听器认证链上的指定认证器。""" + +listeners_listener_id_authentication_id_status_get.desc: +"""获取监听器认证链上指定认证器的状态。""" + +listeners_listener_id_authentication_id_users_get.desc: +"""列出监听器认证链上指定认证器中的用户数据。""" + +listeners_listener_id_authentication_id_users_post.desc: +"""为监听器认证链上的指定认证器创建用户数据。""" + +listeners_listener_id_authentication_id_users_user_id_delete.desc: +"""删除监听器认证链上指定认证器中的指定用户数据。""" + +listeners_listener_id_authentication_id_users_user_id_get.desc: +"""获取监听器认证链上指定认证器中的指定用户数据。""" + +listeners_listener_id_authentication_id_users_user_id_put.desc: +"""更新监听器认证链上指定认证器中的指定用户数据。""" + +listeners_listener_id_authentication_post.desc: +"""在监听器认证链上创建认证器。""" + +param_auth_id.desc: +"""认证器 ID。""" + +param_listener_id.desc: +"""监听器 ID。""" + +param_position.desc: +"""认证者在链中的位置。可能的值是 'front', 'rear', 'before:{other_authenticator}', 'after:{other_authenticator}'""" + +param_user_id.desc: +"""用户 ID。""" + +} diff --git a/rel/i18n/zh/emqx_authn_http.hocon b/rel/i18n/zh/emqx_authn_http.hocon new file mode 100644 index 000000000..17c922b33 --- /dev/null +++ b/rel/i18n/zh/emqx_authn_http.hocon @@ -0,0 +1,45 @@ +emqx_authn_http { + +body.desc: +"""HTTP request body。""" + +body.label: +"""Request Body""" + +get.desc: +"""使用 HTTP Server 作为认证服务的认证器的配置项 (使用 GET 请求)。""" + +headers.desc: +"""HTTP Headers 列表""" + +headers.label: +"""请求头""" + +headers_no_content_type.desc: +"""HTTP Headers 列表 (无 content-type) 。""" + +headers_no_content_type.label: +"""请求头(无 content-type)""" + +method.desc: +"""HTTP 请求方法。""" + +method.label: +"""请求方法""" + +post.desc: +"""使用 HTTP Server 作为认证服务的认证器的配置项 (使用 POST 请求)。""" + +request_timeout.desc: +"""HTTP 请求超时时长。""" + +request_timeout.label: +"""请求超时时间""" + +url.desc: +"""认证 HTTP 服务器地址。""" + +url.label: +"""URL""" + +} diff --git a/rel/i18n/zh/emqx_authn_jwt.hocon b/rel/i18n/zh/emqx_authn_jwt.hocon new file mode 100644 index 000000000..2aa27c1de --- /dev/null +++ b/rel/i18n/zh/emqx_authn_jwt.hocon @@ -0,0 +1,118 @@ +emqx_authn_jwt { + +acl_claim_name.desc: +"""JWT claim name to use for getting ACL rules.""" + +acl_claim_name.label: +"""ACL claim name""" + +algorithm.desc: +"""JWT 签名算法,支持 HMAC (配置为 hmac-based)和 RSA、ECDSA (配置为 public-key)。""" + +algorithm.label: +"""JWT 签名算法""" + +cacertfile.desc: +"""包含 PEM 编码的 CA 证书的文件的路径。""" + +cacertfile.label: +"""CA 证书文件""" + +certfile.desc: +"""包含用户证书的文件的路径。""" + +certfile.label: +"""证书文件""" + +enable.desc: +"""启用/禁用 SSL。""" + +enable.label: +"""启用/禁用 SSL""" + +endpoint.desc: +"""JWKS 端点, 它是一个以 JWKS 格式返回服务端的公钥集的只读端点。""" + +endpoint.label: +"""JWKS Endpoint""" + +from.desc: +"""要从中获取 JWT 的字段。""" + +from.label: +"""源字段""" + +hmac-based.desc: +"""用于认证的 JWT 使用 HMAC 算法签发时的配置。""" + +jwks.desc: +"""用于认证的 JWTs 需要从 JWKS 端点获取时的配置。""" + +keyfile.desc: +"""包含 PEM 编码的用户私钥的文件的路径。""" + +keyfile.label: +"""私钥文件""" + +public-key.desc: +"""用于认证的 JWT 使用 RSA 或 ECDSA 算法签发时的配置。""" + +public_key.desc: +"""用于验证 JWT 的公钥。""" + +public_key.label: +"""公钥""" + +refresh_interval.desc: +"""JWKS 刷新间隔。""" + +refresh_interval.label: +"""JWKS 刷新间隔""" + +secret.desc: +"""使用 HMAC 算法时用于验证 JWT 的密钥""" + +secret.label: +"""Secret""" + +secret_base64_encoded.desc: +"""密钥是否为 Base64 编码。""" + +secret_base64_encoded.label: +"""密钥是否为 Base64 编码""" + +server_name_indication.desc: +"""服务器名称指示(SNI)。""" + +server_name_indication.label: +"""服务器名称指示""" + +ssl.desc: +"""SSL 选项。""" + +ssl.label: +"""SSL 选项""" + +use_jwks.desc: +"""是否使用 JWKS。""" + +use_jwks.label: +"""是否使用 JWKS""" + +verify.desc: +"""指定握手过程中是否校验对端证书。""" + +verify.label: +"""Verify""" + +verify_claims.desc: +"""需要验证的自定义声明列表,它是一个名称/值对列表。 +值可以使用以下占位符: +- ${username}: 将在运行时被替换为客户端连接时使用的用户名 +- ${clientid}: 将在运行时被替换为客户端连接时使用的客户端标识符 +认证时将验证 JWT(取自 Password 字段)中 claims 的值是否与 verify_claims 中要求的相匹配。""" + +verify_claims.label: +"""Verify Claims""" + +} diff --git a/rel/i18n/zh/emqx_authn_mnesia.hocon b/rel/i18n/zh/emqx_authn_mnesia.hocon new file mode 100644 index 000000000..1ba394627 --- /dev/null +++ b/rel/i18n/zh/emqx_authn_mnesia.hocon @@ -0,0 +1,12 @@ +emqx_authn_mnesia { + +authentication.desc: +"""使用内置数据库作为认证数据源的认证器的配置项。""" + +user_id_type.desc: +"""指定使用客户端ID `clientid` 还是用户名 `username` 进行认证。""" + +user_id_type.label: +"""认证 ID 类型""" + +} diff --git a/rel/i18n/zh/emqx_authn_mongodb.hocon b/rel/i18n/zh/emqx_authn_mongodb.hocon new file mode 100644 index 000000000..01419e2b9 --- /dev/null +++ b/rel/i18n/zh/emqx_authn_mongodb.hocon @@ -0,0 +1,45 @@ +emqx_authn_mongodb { + +collection.desc: +"""存储认证数据的集合。""" + +collection.label: +"""集合""" + +filter.desc: +"""在查询中定义过滤条件的条件表达式。 +过滤器支持如下占位符: +- ${username}: 将在运行时被替换为客户端连接时使用的用户名 +- ${clientid}: 将在运行时被替换为客户端连接时使用的客户端标识符""" + +filter.label: +"""过滤器""" + +is_superuser_field.desc: +"""文档中用于定义用户是否具有超级用户权限的字段。""" + +is_superuser_field.label: +"""超级用户字段""" + +password_hash_field.desc: +"""文档中用于存放密码散列的字段。""" + +password_hash_field.label: +"""密码散列字段""" + +replica-set.desc: +"""使用 MongoDB (Replica Set) 作为认证数据源的认证器的配置项。""" + +salt_field.desc: +"""文档中用于存放盐值的字段。""" + +salt_field.label: +"""盐值字段""" + +sharded-cluster.desc: +"""使用 MongoDB (Sharded Cluster) 作为认证数据源的认证器的配置项。""" + +standalone.desc: +"""使用 MongoDB (Standalone) 作为认证数据源的认证器的配置项。""" + +} diff --git a/rel/i18n/zh/emqx_authn_mysql.hocon b/rel/i18n/zh/emqx_authn_mysql.hocon new file mode 100644 index 000000000..e718ad723 --- /dev/null +++ b/rel/i18n/zh/emqx_authn_mysql.hocon @@ -0,0 +1,18 @@ +emqx_authn_mysql { + +authentication.desc: +"""使用 MySQL 作为认证数据源的认证器的配置项。""" + +query.desc: +"""用于查询密码散列等用于认证的数据的 SQL 语句。""" + +query.label: +"""查询语句""" + +query_timeout.desc: +"""SQL 查询的超时时间。""" + +query_timeout.label: +"""查询超时""" + +} diff --git a/rel/i18n/zh/emqx_authn_pgsql.hocon b/rel/i18n/zh/emqx_authn_pgsql.hocon new file mode 100644 index 000000000..97bf608d2 --- /dev/null +++ b/rel/i18n/zh/emqx_authn_pgsql.hocon @@ -0,0 +1,12 @@ +emqx_authn_pgsql { + +authentication.desc: +"""使用 PostgreSQL 作为认证数据源的认证器的配置项。""" + +query.desc: +"""用于查询密码散列等用于认证的数据的 SQL 语句。""" + +query.label: +"""查询语句""" + +} diff --git a/rel/i18n/zh/emqx_authn_redis.hocon b/rel/i18n/zh/emqx_authn_redis.hocon new file mode 100644 index 000000000..e3d6b9d96 --- /dev/null +++ b/rel/i18n/zh/emqx_authn_redis.hocon @@ -0,0 +1,18 @@ +emqx_authn_redis { + +cluster.desc: +"""使用 Redis (Cluster) 作为认证数据源的认证器的配置项。""" + +cmd.desc: +"""用于查询密码散列等用于认证的数据的 Redis Command,目前仅支持 HGETHMGET。""" + +cmd.label: +"""Command""" + +sentinel.desc: +"""使用 Redis (Sentinel) 作为认证数据源的认证器的配置项。""" + +standalone.desc: +"""使用 Redis (Standalone) 作为认证数据源的认证器的配置项。""" + +} diff --git a/rel/i18n/zh/emqx_authn_schema.hocon b/rel/i18n/zh/emqx_authn_schema.hocon new file mode 100644 index 000000000..e6e76e8ab --- /dev/null +++ b/rel/i18n/zh/emqx_authn_schema.hocon @@ -0,0 +1,135 @@ +emqx_authn_schema { + +backend.desc: +"""后端类型。""" + +backend.label: +"""后端类型""" + +enable.desc: +"""设为 truefalse 以启用或禁用此认证数据源。""" + +enable.label: +"""启用""" + +failed.desc: +"""请求失败次数。""" + +failed.label: +"""失败""" + +matched.desc: +"""请求命中次数。""" + +matched.label: +"""已命中""" + +mechanism.desc: +"""认证机制。""" + +mechanism.label: +"""认证机制""" + +metrics.desc: +"""资源统计指标。""" + +metrics.label: +"""指标""" + +metrics_failed.desc: +"""在当前实例中找到需要的认证信息,并且实例返回认证失败的次数。""" + +metrics_failed.label: +"""实例认证失败的次数""" + +metrics_nomatch.desc: +"""在当前实例中没有找到需要的认证信息,实例被忽略的次数。""" + +metrics_nomatch.label: +"""实例被忽略的次数""" + +metrics_rate.desc: +"""实例被触发的速率。触发速率等于匹配速率 + 忽略速率,单位:次/秒。""" + +metrics_rate.label: +"""实例被触发的速率""" + +metrics_rate_last5m.desc: +"""实例5分钟内平均触发速率,单位:次/秒。""" + +metrics_rate_last5m.label: +"""实例5分钟内平均触发速率""" + +metrics_rate_max.desc: +"""实例曾经达到的最高触发速率,单位:次/秒。""" + +metrics_rate_max.label: +"""实例曾经达到的最高触发速率""" + +metrics_success.desc: +"""在当前实例中找到需要的认证信息,并且实例返回认证成功的次数。""" + +metrics_success.label: +"""实例认证成功的次数""" + +metrics_total.desc: +"""当前实例被触发的总次数。""" + +metrics_total.label: +"""当前实例被触发的总次数""" + +node.desc: +"""节点名称。""" + +node.label: +"""节点名称。""" + +node_error.desc: +"""节点上产生的错误。""" + +node_error.label: +"""节点产生的错误""" + +node_metrics.desc: +"""每个节点上资源的统计指标。""" + +node_metrics.label: +"""节点资源指标""" + +node_status.desc: +"""每个节点上资源的状态。""" + +node_status.label: +"""节点资源状态""" + +rate.desc: +"""命中速率,单位:次/秒。""" + +rate.label: +"""速率""" + +rate_last5m.desc: +"""5分钟内平均命中速率,单位:次/秒。""" + +rate_last5m.label: +"""5分钟内速率""" + +rate_max.desc: +"""最大命中速率,单位:次/秒。""" + +rate_max.label: +"""最大速率""" + +status.desc: +"""资源状态。""" + +status.label: +"""状态""" + +success.desc: +"""请求成功次数。""" + +success.label: +"""成功""" + +} diff --git a/rel/i18n/zh/emqx_authn_user_import_api.hocon b/rel/i18n/zh/emqx_authn_user_import_api.hocon new file mode 100644 index 000000000..a546066bb --- /dev/null +++ b/rel/i18n/zh/emqx_authn_user_import_api.hocon @@ -0,0 +1,9 @@ +emqx_authn_user_import_api { + +authentication_id_import_users_post.desc: +"""为全局认证链上的指定认证器导入用户数据。""" + +listeners_listener_id_authentication_id_import_users_post.desc: +"""为监听器认证链上的指定认证器导入用户数据。""" + +} diff --git a/rel/i18n/zh/emqx_authz_api_cache.hocon b/rel/i18n/zh/emqx_authz_api_cache.hocon new file mode 100644 index 000000000..94486e3e0 --- /dev/null +++ b/rel/i18n/zh/emqx_authz_api_cache.hocon @@ -0,0 +1,6 @@ +emqx_authz_api_cache { + +authorization_cache_delete.desc: +"""清除集群中所有授权数据缓存。""" + +} diff --git a/rel/i18n/zh/emqx_authz_api_mnesia.hocon b/rel/i18n/zh/emqx_authz_api_mnesia.hocon new file mode 100644 index 000000000..bc5121cc3 --- /dev/null +++ b/rel/i18n/zh/emqx_authz_api_mnesia.hocon @@ -0,0 +1,87 @@ +emqx_authz_api_mnesia { + +action.desc: +"""被授权的行为 (发布/订阅/所有)""" + +action.label: +"""行为""" + +clientid.desc: +"""客户端标识符""" + +clientid.label: +"""客户端标识符""" + +fuzzy_clientid.desc: +"""使用字串匹配模糊搜索客户端标识符""" + +fuzzy_clientid.label: +"""客户端标识符子串""" + +fuzzy_username.desc: +"""使用字串匹配模糊搜索用户名""" + +fuzzy_username.label: +"""用户名子串""" + +permission.desc: +"""权限""" + +permission.label: +"""权限""" + +rules_all_delete.desc: +"""删除 `all` 规则""" + +rules_all_get.desc: +"""列出为所有客户端启用的规则列表""" + +rules_all_post.desc: +"""创建/更新 为所有客户端启用的规则列表。""" + +rules_delete.desc: +"""清除内置数据库中的所有类型('users' 、'clients' 、'all')的所有规则""" + +topic.desc: +"""在指定主题上的规则""" + +topic.label: +"""主题""" + +user_clientid_delete.desc: +"""删除内置数据库中指定客户端标识符类型的规则记录""" + +user_clientid_get.desc: +"""获取内置数据库中指定客户端标识符类型的规则记录""" + +user_clientid_put.desc: +"""更新内置数据库中指定客户端标识符类型的规则记录""" + +user_username_delete.desc: +"""删除内置数据库中指定用户名类型的规则记录""" + +user_username_get.desc: +"""获取内置数据库中指定用户名类型的规则记录""" + +user_username_put.desc: +"""更新内置数据库中指定用户名类型的规则记录""" + +username.desc: +"""用户名""" + +username.label: +"""用户名""" + +users_clientid_get.desc: +"""获取内置数据库中所有客户端标识符类型的规则记录""" + +users_clientid_post.desc: +"""添加内置数据库中客户端标识符类型的规则记录""" + +users_username_get.desc: +"""获取内置数据库中所有用户名类型的规则记录""" + +users_username_post.desc: +"""添加内置数据库中用户名类型的规则记录""" + +} diff --git a/rel/i18n/zh/emqx_authz_api_schema.hocon b/rel/i18n/zh/emqx_authz_api_schema.hocon new file mode 100644 index 000000000..41e3819cd --- /dev/null +++ b/rel/i18n/zh/emqx_authz_api_schema.hocon @@ -0,0 +1,90 @@ +emqx_authz_api_schema { + +body.desc: +"""HTTP 请求体。""" + +body.label: +"""请求体""" + +cmd.desc: +"""访问控制数据查询命令。""" + +cmd.label: +"""查询命令""" + +collection.desc: +"""`MongoDB` 授权数据集。""" + +collection.label: +"""数据集""" + +enable.desc: +"""设为 truefalse 以启用或禁用此访问控制数据源。""" + +enable.label: +"""enable""" + +filter.desc: +"""在查询中定义过滤条件的条件表达式。 +过滤器支持如下占位符: +- ${username}: 将在运行时被替换为客户端连接时使用的用户名 +- ${clientid}: 将在运行时被替换为客户端连接时使用的客户端标识符""" + +filter.label: +"""过滤器""" + +headers.desc: +"""HTTP Headers 列表""" + +headers.label: +"""请求头""" + +headers_no_content_type.desc: +"""HTTP Headers 列表(无 content-type)。""" + +headers_no_content_type.label: +"""请求头(无 content-type)""" + +method.desc: +"""HTTP 请求方法。""" + +method.label: +"""method""" + +position.desc: +"""认证数据源位置。""" + +position.label: +"""位置""" + +query.desc: +"""访问控制数据查询语句。""" + +query.label: +"""查询语句""" + +request_timeout.desc: +"""请求超时时间。""" + +request_timeout.label: +"""请求超时""" + +rules.desc: +"""静态授权文件规则。""" + +rules.label: +"""规则""" + +type.desc: +"""数据后端类型。""" + +type.label: +"""type""" + +url.desc: +"""认证服务器 URL。""" + +url.label: +"""url""" + +} diff --git a/rel/i18n/zh/emqx_authz_api_settings.hocon b/rel/i18n/zh/emqx_authz_api_settings.hocon new file mode 100644 index 000000000..c78fb7a0b --- /dev/null +++ b/rel/i18n/zh/emqx_authz_api_settings.hocon @@ -0,0 +1,9 @@ +emqx_authz_api_settings { + +authorization_settings_get.desc: +"""获取授权配置""" + +authorization_settings_put.desc: +"""更新授权配置""" + +} diff --git a/rel/i18n/zh/emqx_authz_api_sources.hocon b/rel/i18n/zh/emqx_authz_api_sources.hocon new file mode 100644 index 000000000..8e9bfef9c --- /dev/null +++ b/rel/i18n/zh/emqx_authz_api_sources.hocon @@ -0,0 +1,48 @@ +emqx_authz_api_sources { + +authorization_sources_get.desc: +"""列出所有授权数据源""" + +authorization_sources_post.desc: +"""添加授权数据源""" + +authorization_sources_type_delete.desc: +"""删除指定类型的授权数据源""" + +authorization_sources_type_get.desc: +"""获取指定类型的授权数据源""" + +authorization_sources_type_move_post.desc: +"""更新授权数据源的优先执行顺序""" + +authorization_sources_type_put.desc: +"""更新指定类型的授权数据源""" + +authorization_sources_type_status_get.desc: +"""获取指定授权数据源的状态""" + +source.desc: +"""授权数据源""" + +source.label: +"""数据源""" + +source_config.desc: +"""数据源配置""" + +source_config.label: +"""数据源配置""" + +source_type.desc: +"""数据源类型""" + +source_type.label: +"""数据源类型""" + +sources.desc: +"""授权数据源列表""" + +sources.label: +"""数据源列表""" + +} diff --git a/rel/i18n/zh/emqx_authz_schema.hocon b/rel/i18n/zh/emqx_authz_schema.hocon new file mode 100644 index 000000000..3dd6d1c01 --- /dev/null +++ b/rel/i18n/zh/emqx_authz_schema.hocon @@ -0,0 +1,285 @@ +emqx_authz_schema { + +deny.desc: +"""授权失败的次数。""" + +deny.label: +"""授权失败次数""" + +redis_sentinel.desc: +"""使用 Redis 授权(哨兵模式)。""" + +redis_sentinel.label: +"""redis_sentinel""" + +rate.desc: +"""命中速率,单位:次/秒。""" + +rate.label: +"""速率""" + +status.desc: +"""资源状态。""" + +status.label: +"""状态""" + +method.desc: +"""HTTP 请求方法""" + +method.label: +"""method""" + +query.desc: +"""访问控制数据查询语句/查询命令。""" + +query.label: +"""查询语句""" + +metrics_total.desc: +"""授权实例被触发的总次数。""" + +metrics_total.label: +"""授权实例被触发的总次数""" + +redis_cluster.desc: +"""使用 Redis 授权(集群模式)。""" + +redis_cluster.label: +"""redis_cluster""" + +mysql.desc: +"""使用 MySOL 数据库授权""" + +mysql.label: +"""mysql""" + +postgresql.desc: +"""使用 PostgreSQL 数据库授权""" + +postgresql.label: +"""postgresql""" + +mongo_rs.desc: +"""使用 MongoDB 授权(副本集模式)""" + +mongo_rs.label: +"""mongo_rs""" + +type.desc: +"""数据后端类型""" + +type.label: +"""type""" + +mongo_sharded.desc: +"""使用 MongoDB 授权(分片集群模式)。""" + +mongo_sharded.label: +"""mongo_sharded""" + +body.desc: +"""HTTP request body。""" + +body.label: +"""Request Body""" + +url.desc: +"""授权 HTTP 服务器地址。""" + +url.label: +"""URL""" + +node.desc: +"""节点名称。""" + +node.label: +"""节点名称。""" + +headers.desc: +"""HTTP Headers 列表""" + +headers.label: +"""请求头""" + +rate_last5m.desc: +"""5分钟内平均命中速率,单位:次/秒。""" + +rate_last5m.label: +"""5分钟内速率""" + +headers_no_content_type.desc: +"""HTTP Headers 列表 (无 content-type) 。""" + +headers_no_content_type.label: +"""请求头(无 content-type)""" + +node_error.desc: +"""节点上产生的错误。""" + +node_error.label: +"""节点产生的错误""" + +mnesia.desc: +"""使用内部数据库授权(mnesia)。""" + +mnesia.label: +"""mnesia""" + +enable.desc: +"""设为 truefalse 以启用或禁用此访问控制数据源""" + +enable.label: +"""enable""" + +matched.desc: +"""请求命中次数。""" + +matched.label: +"""已命中""" + +node_status.desc: +"""每个节点上资源的状态。""" + +node_status.label: +"""节点资源状态""" + +rate_max.desc: +"""最大命中速率,单位:次/秒。""" + +rate_max.label: +"""最大速率""" + +filter.desc: +"""在查询中定义过滤条件的条件表达式。 +过滤器支持如下占位符:
+- ${username}:将在运行时被替换为客户端连接时使用的用户名
+- ${clientid}:将在运行时被替换为客户端连接时使用的客户端标识符""" + +filter.label: +"""过滤器""" + +path.desc: +"""包含 ACL 规则的文件路径。 +如果在启动 EMQX 节点前预先配置该路径, +那么可以将该文件置于任何 EMQX 可以访问到的位置。 + +如果从 EMQX Dashboard 或 HTTP API 创建或修改了规则集, +那么EMQX将会生成一个新的文件并将它存放在 `data_dir` 下的 `authz` 子目录中, +并从此弃用旧的文件。""" + +path.label: +"""path""" + +redis_single.desc: +"""使用 Redis 授权(单实例)。""" + +redis_single.label: +"""redis_single""" + +failed.desc: +"""请求失败次数。""" + +failed.label: +"""失败""" + +metrics.desc: +"""资源统计指标。""" + +metrics.label: +"""指标""" + +authorization.desc: +"""客户端授权相关配置""" + +authorization.label: +"""授权""" + +collection.desc: +"""`MongoDB` 授权数据集。""" + +collection.label: +"""数据集""" + +mongo_single.desc: +"""使用 MongoDB 授权(单实例)。""" + +mongo_single.label: +"""mongo_single""" + +file.desc: +"""使用静态文件授权""" + +file.label: +"""文件""" + +http_post.desc: +"""使用外部 HTTP 服务器授权(POST 请求)。""" + +http_post.label: +"""http_post""" + +request_timeout.desc: +"""HTTP 请求超时时长。""" + +request_timeout.label: +"""请求超时时间""" + +allow.desc: +"""授权成功的次数。""" + +allow.label: +"""授权成功次数""" + +cmd.desc: +"""访问控制数据查查询命令""" + +cmd.label: +"""查询命令""" + +nomatch.desc: +"""没有匹配到任何授权规则的次数。""" + +nomatch.label: +"""没有匹配到任何授权规则的次数""" + +sources.desc: +"""授权数据源。
+授权(ACL)数据源的列表。 +它被设计为一个数组,而不是一个散列映射, +所以可以作为链式访问控制。
+ +当授权一个 'publish' 或 'subscribe' 行为时, +该配置列表中的所有数据源将按顺序进行检查。 +如果在某个客户端未找到时(使用 ClientID 或 Username)。 +将会移动到下一个数据源。直至得到 'allow' 或 'deny' 的结果。
+ +如果在任何数据源中都未找到对应的客户端信息。 +配置的默认行为 ('authorization.no_match') 将生效。
+ +注意: +数据源使用 'type' 进行标识。 +使用同一类型的数据源多于一次不被允许。""" + +sources.label: +"""数据源""" + +node_metrics.desc: +"""每个节点上资源的统计指标。""" + +node_metrics.label: +"""节点资源指标""" + +success.desc: +"""请求成功次数。""" + +success.label: +"""成功""" + +http_get.desc: +"""使用外部 HTTP 服务器授权(GET 请求)。""" + +http_get.label: +"""http_get""" + +} diff --git a/rel/i18n/zh/emqx_auto_subscribe_api.hocon b/rel/i18n/zh/emqx_auto_subscribe_api.hocon new file mode 100644 index 000000000..7a42ece2c --- /dev/null +++ b/rel/i18n/zh/emqx_auto_subscribe_api.hocon @@ -0,0 +1,12 @@ +emqx_auto_subscribe_api { + +list_auto_subscribe_api.desc: +"""获取自动订阅主题列表""" + +update_auto_subscribe_api.desc: +"""更新自动订阅主题列表""" + +update_auto_subscribe_api_response409.desc: +"""超出自定订阅主题列表长度限制""" + +} diff --git a/rel/i18n/zh/emqx_auto_subscribe_schema.hocon b/rel/i18n/zh/emqx_auto_subscribe_schema.hocon new file mode 100644 index 000000000..f4156fe34 --- /dev/null +++ b/rel/i18n/zh/emqx_auto_subscribe_schema.hocon @@ -0,0 +1,48 @@ +emqx_auto_subscribe_schema { + +auto_subscribe.desc: +"""设备登录成功之后,通过预设的订阅表示符,为设备自动完成订阅。支持使用占位符。""" + +auto_subscribe.label: +"""自动订阅""" + +nl.desc: +"""缺省值为0, +MQTT v3.1.1:如果设备订阅了自己发布消息的主题,那么将收到自己发布的所有消息。 +MQTT v5:如果设备在订阅时将此选项设置为 1,那么服务端将不会向设备转发自己发布的消息""" + +nl.label: +"""No Local""" + +qos.desc: +"""缺省值为 0,服务质量, +QoS 0:消息最多传递一次,如果当时客户端不可用,则会丢失该消息。 +QoS 1:消息传递至少 1 次。 +QoS 2:消息仅传送一次。""" + +qos.label: +"""服务质量""" + +rap.desc: +"""缺省值为 0,这一选项用来指定服务端向客户端转发消息时是否要保留其中的 RETAIN 标识,注意这一选项不会影响保留消息中的 RETAIN 标识。因此当 Retain As Publish 选项被设置为 0 时,客户端直接依靠消息中的 RETAIN 标识来区分这是一个正常的转发消息还是一个保留消息,而不是去判断消息是否是自己订阅后收到的第一个消息(转发消息甚至可能会先于保留消息被发送,视不同 Broker 的具体实现而定)。""" + +rap.label: +"""Retain As Publish""" + +rh.desc: +"""指定订阅建立时服务端是否向客户端发送保留消息, +可选值 0:只要客户端订阅成功,服务端就发送保留消息。 +可选值 1:客户端订阅成功且该订阅此前不存在,服务端才发送保留消息。毕竟有些时候客户端重新发起订阅可能只是为了改变一下 QoS,并不意味着它想再次接收保留消息。 +可选值 2:即便客户订阅成功,服务端也不会发送保留消息。""" + +rh.label: +"""Retain Handling""" + +topic.desc: +"""订阅标识符,支持使用占位符,例如 client/${clientid}/username/${username}/host/${host}/port/${port} +必填,且不可为空字符串""" + +topic.label: +"""订阅标识符""" + +} diff --git a/rel/i18n/zh/emqx_bridge_api.hocon b/rel/i18n/zh/emqx_bridge_api.hocon new file mode 100644 index 000000000..06887a711 --- /dev/null +++ b/rel/i18n/zh/emqx_bridge_api.hocon @@ -0,0 +1,100 @@ +emqx_bridge_api { + +desc_api1.desc: +"""列出所有 Bridge""" + +desc_api1.label: +"""列出所有 Bridge""" + +desc_api2.desc: +"""通过类型和名字创建 Bridge""" + +desc_api2.label: +"""创建 Bridge""" + +desc_api3.desc: +"""通过 ID 获取 Bridge""" + +desc_api3.label: +"""获取 Bridge""" + +desc_api4.desc: +"""通过 ID 更新 Bridge""" + +desc_api4.label: +"""更新 Bridge""" + +desc_api5.desc: +"""通过 ID 删除 Bridge""" + +desc_api5.label: +"""删除 Bridge""" + +desc_api6.desc: +"""通过 ID 重置 Bridge 的计数""" + +desc_api6.label: +"""重置 Bridge 计数""" + +desc_api7.desc: +"""停止或启用所有节点上的桥接""" + +desc_api7.label: +"""集群 Bridge 操作""" + +desc_api8.desc: +"""在某个节点上停止/重新启动 Bridge。""" + +desc_api8.label: +"""单节点 Bridge 操作""" + +desc_api9.desc: +"""通过给定的 ID 测试创建一个新的桥接。
+ID 的格式必须为 ’{type}:{name}”""" + +desc_api9.label: +"""测试桥接创建""" + +desc_bridge_metrics.desc: +"""通过 Id 来获取桥接的指标信息""" + +desc_bridge_metrics.label: +"""获取桥接的指标""" + +desc_enable_bridge.desc: +"""启用或禁用所有节点上的桥接""" + +desc_enable_bridge.label: +"""是否启用集群内的桥接""" + +desc_param_path_enable.desc: +"""是否启用桥接""" + +desc_param_path_enable.label: +"""启用桥接""" + +desc_param_path_id.desc: +"""Bridge ID , 格式为 {type}:{name}""" + +desc_param_path_id.label: +"""Bridge ID""" + +desc_param_path_node.desc: +"""节点名,比如 emqx@127.0.0.1""" + +desc_param_path_node.label: +"""节点名""" + +desc_param_path_operation_cluster.desc: +"""集群可用操作:停止、重新启动""" + +desc_param_path_operation_cluster.label: +"""集群可用操作""" + +desc_param_path_operation_on_node.desc: +"""节点可用操作:停止、重新启动""" + +desc_param_path_operation_on_node.label: +"""节点可用操作""" + +} diff --git a/rel/i18n/zh/emqx_bridge_kafka.hocon b/rel/i18n/zh/emqx_bridge_kafka.hocon new file mode 100644 index 000000000..31bae51d3 --- /dev/null +++ b/rel/i18n/zh/emqx_bridge_kafka.hocon @@ -0,0 +1,354 @@ +emqx_bridge_kafka { + +connect_timeout.desc: +"""建立 TCP 连接时的最大等待时长(若启用认证,这个等待时长也包含完成认证所需时间)。""" + +connect_timeout.label: +"""连接超时""" + +producer_opts.desc: +"""本地 MQTT 数据源和 Kafka 桥接的配置。""" + +producer_opts.label: +"""MQTT 到 Kafka""" + +min_metadata_refresh_interval.desc: +"""刷新 Kafka broker 和 Kafka 主题元数据段最短时间间隔。设置太小可能会增加 Kafka 压力。""" + +min_metadata_refresh_interval.label: +"""元数据刷新最小间隔""" + +kafka_producer.desc: +"""Kafka Producer 配置。""" + +kafka_producer.label: +"""Kafka Producer""" + +producer_buffer.desc: +"""配置消息缓存的相关参数。 + +当 EMQX 需要发送的消息超过 Kafka 处理能力,或者当 Kafka 临时下线时,EMQX 内部会将消息缓存起来。""" + +producer_buffer.label: +"""消息缓存""" + +socket_send_buffer.desc: +"""TCP socket 的发送缓存调优。默认值是针对高吞吐量的一个推荐值。""" + +socket_send_buffer.label: +"""Socket 发送缓存大小""" + +desc_name.desc: +"""桥接名字,可读描述""" + +desc_name.label: +"""桥接名字""" + +consumer_offset_commit_interval_seconds.desc: +"""指定 Kafka 消费组偏移量提交的时间间隔。""" + +consumer_offset_commit_interval_seconds.label: +"""偏移提交间隔""" + +consumer_max_batch_bytes.desc: +"""设置每次从 Kafka 拉取数据的字节数。如该配置小于 Kafka 消息的大小,可能会影响消费性能。""" + +consumer_max_batch_bytes.label: +"""拉取字节数""" + +socket_receive_buffer.desc: +"""TCP socket 的收包缓存调优。默认值是针对高吞吐量的一个推荐值。""" + +socket_receive_buffer.label: +"""Socket 收包缓存大小""" + +consumer_topic_mapping.desc: +"""指定 Kafka 主题和 MQTT 主题之间的映射关系。 应至少包含一项。""" + +consumer_topic_mapping.label: +"""主题映射关系""" + +producer_kafka_opts.desc: +"""Kafka 生产者参数。""" + +producer_kafka_opts.label: +"""生产者参数""" + +kafka_topic.desc: +"""Kafka 主题名称""" + +kafka_topic.label: +"""Kafka 主题名称""" + +consumer_kafka_topic.desc: +"""指定从哪个 Kafka 主题消费消息。""" + +consumer_kafka_topic.label: +"""Kafka 主题""" + +auth_username_password.desc: +"""基于用户名密码的认证。""" + +auth_username_password.label: +"""用户名密码认证""" + +auth_sasl_password.desc: +"""SASL 认证的密码。""" + +auth_sasl_password.label: +"""密码""" + +kafka_message_timestamp.desc: +"""生成 Kafka 消息时间戳的模版。该时间必需是一个整型数值(可以是字符串格式)例如 1661326462115'1661326462115'。当所需的输入字段不存在,或不是一个整型时,则会使用当前系统时间。""" + +kafka_message_timestamp.label: +"""消息的时间戳""" + +buffer_mode.desc: +"""消息缓存模式。 +memory: 所有的消息都缓存在内存里。如果 EMQX 服务重启,缓存的消息会丢失。 +disk: 缓存到磁盘上。EMQX 重启后会继续发送重启前未发送完成的消息。 +hybrid: 先将消息缓存在内存中,当内存中的消息堆积超过一定限制(配置项 segment_bytes 描述了该限制)后,后续的消息会缓存到磁盘上。与 memory 模式一样,如果 EMQX 服务重启,缓存的消息会丢失。""" + +buffer_mode.label: +"""缓存模式""" + +consumer_mqtt_qos.desc: +"""转发 MQTT 消息时使用的 QoS。""" + +consumer_mqtt_qos.label: +"""QoS""" + +consumer_key_encoding_mode.desc: +"""通过 MQTT 转发之前,如何处理 Kafka 消息的 Key。none 使用 Kafka 消息中的 Key 原始值,不进行编码。 注意:在这种情况下,Key 必须是一个有效的 UTF-8 字符串。 +base64 对收到的密钥或值使用 base-64 编码。""" + +consumer_key_encoding_mode.label: +"""Key 编码模式""" + +auth_gssapi_kerberos.desc: +"""使用 GSSAPI/Kerberos 认证。""" + +auth_gssapi_kerberos.label: +"""GSSAPI/Kerberos""" + +consumer_mqtt_opts.desc: +"""本地 MQTT 消息转发。""" + +consumer_mqtt_opts.label: +"""MQTT 转发""" + +auth_kerberos_principal.desc: +"""SASL GSSAPI 认证方法的 Kerberos principal,例如 client_name@MY.KERBEROS.REALM.MYDOMAIN.COM注意:这里使用的 realm 需要配置在 EMQX 服务器的 /etc/krb5.conf 中""" + +auth_kerberos_principal.label: +"""Kerberos Principal""" + +socket_opts.desc: +"""更多 Socket 参数设置。""" + +socket_opts.label: +"""Socket 参数""" + +consumer_mqtt_topic.desc: +"""设置 Kafka 消息向哪个本地 MQTT 主题转发消息。""" + +consumer_mqtt_topic.label: +"""MQTT主题""" + +consumer_offset_reset_policy.desc: +"""如不存在偏移量历史记录或历史记录失效,消费者应使用哪个偏移量开始消费。""" + +consumer_offset_reset_policy.label: +"""偏移重置策略""" + +partition_count_refresh_interval.desc: +"""配置 Kafka 刷新分区数量的时间间隔。 +EMQX 发现 Kafka 分区数量增加后,会开始按 partition_strategy 配置,把消息发送到新的分区中。""" + +partition_count_refresh_interval.label: +"""分区数量刷新间隔""" + +max_batch_bytes.desc: +"""最大消息批量字节数。大多数 Kafka 环境的默认最低值是 1 MB,EMQX 的默认值比 1 MB 更小是因为需要补偿 Kafka 消息编码所需要的额外字节(尤其是当每条消息都很小的情况下)。当单个消息的大小超过该限制时,它仍然会被发送,(相当于该批量中只有单个消息)。""" + +max_batch_bytes.label: +"""最大批量字节数""" + +required_acks.desc: +"""设置 Kafka leader 在返回给 EMQX 确认之前需要等待多少个 follower 的确认。 + +all_isr: 需要所有的在线复制者都确认。 +leader_only: 仅需要分区 leader 确认。 +none: 无需 Kafka 回复任何确认。""" + +required_acks.label: +"""Kafka 确认数量""" + +metadata_request_timeout.desc: +"""刷新元数据时最大等待时长。""" + +metadata_request_timeout.label: +"""元数据请求超时""" + +desc_type.desc: +"""桥接类型""" + +desc_type.label: +"""桥接类型""" + +socket_nodelay.desc: +"""设置‘true’让系统内核立即发送。否则当需要发送的内容很少时,可能会有一定延迟(默认 40 毫秒)。""" + +socket_nodelay.label: +"""是否关闭延迟发送""" + +authentication.desc: +"""认证参数。""" + +authentication.label: +"""认证""" + +buffer_memory_overload_protection.desc: +"""缓存模式是 memoryhybrid 时适用。当系统处于高内存压力时,从队列中丢弃旧的消息以减缓内存增长。内存压力值由配置项 sysmon.os.sysmem_high_watermark 决定。注意,该配置仅在 Linux 系统中有效。""" + +buffer_memory_overload_protection.label: +"""内存过载保护""" + +auth_sasl_mechanism.desc: +"""SASL 认证方法名称。""" + +auth_sasl_mechanism.label: +"""认证方法""" + +config_enable.desc: +"""启用(true)或停用该(false)Kafka 数据桥接。""" + +config_enable.label: +"""启用或停用""" + +consumer_mqtt_payload.desc: +"""用于转换收到的 Kafka 消息的模板。 默认情况下,它将使用 JSON 格式来序列化来自 Kafka 的所有字段。 这些字段包括:headers:一个包含字符串键值对的 JSON 对象。 +key:Kafka 消息的键(使用选择的编码方式编码)。 +offset:消息的偏移量。 +topic:Kafka 主题。 +ts: 消息的时间戳。 +ts_type:消息的时间戳类型,值可能是: createappendundefined。 +value: Kafka 消息值(使用选择的编码方式编码)。""" + +consumer_mqtt_payload.label: +"""MQTT Payload Template""" + +consumer_opts.desc: +"""本地 MQTT 转发 和 Kafka 消费者配置。""" + +consumer_opts.label: +"""MQTT 到 Kafka""" + +kafka_consumer.desc: +"""Kafka 消费者配置。""" + +kafka_consumer.label: +"""Kafka 消费者""" + +desc_config.desc: +"""Kafka 桥接配置""" + +desc_config.label: +"""Kafka 桥接配置""" + +consumer_value_encoding_mode.desc: +"""通过 MQTT 转发之前,如何处理 Kafka 消息的 Value。none 使用 Kafka 消息中的 Value 原始值,不进行编码。 注意:在这种情况下,Value 必须是一个有效的 UTF-8 字符串。 +base64 对收到的 Value 使用 base-64 编码。""" + +consumer_value_encoding_mode.label: +"""Value 编码模式""" + +buffer_per_partition_limit.desc: +"""为每个 Kafka 分区设置的最大缓存字节数。当超过这个上限之后,老的消息会被丢弃,为新的消息腾出空间。""" + +buffer_per_partition_limit.label: +"""Kafka 分区缓存上限""" + +bootstrap_hosts.desc: +"""用逗号分隔的 host[:port] 主机列表。默认端口号为 9092。""" + +bootstrap_hosts.label: +"""主机列表""" + +consumer_max_rejoin_attempts.desc: +"""消费组成员允许重新加入小组的最大次数。如超过该配置次数后仍未能成功加入消费组,则会在等待一段时间后重试。""" + +consumer_max_rejoin_attempts.label: +"""最大的重新加入尝试""" + +kafka_message_key.desc: +"""生成 Kafka 消息 Key 的模版。如果模版生成后为空值,则会使用 Kafka 的 NULL ,而非空字符串。""" + +kafka_message_key.label: +"""消息的 Key""" + +kafka_message.desc: +"""用于生成 Kafka 消息的模版。""" + +kafka_message.label: +"""Kafka 消息模版""" + +mqtt_topic.desc: +"""MQTT 主题数据源由桥接指定,或留空由规则动作指定。""" + +mqtt_topic.label: +"""源 MQTT 主题""" + +kafka_message_value.desc: +"""生成 Kafka 消息 Value 的模版。如果模版生成后为空值,则会使用 Kafka 的 NULL,而非空字符串。""" + +kafka_message_value.label: +"""消息的 Value""" + +partition_strategy.desc: +"""设置消息发布时应该如何选择 Kafka 分区。 + +random: 为每个消息随机选择一个分区。 +key_dispatch: Hash Kafka message key to a partition number""" + +partition_strategy.label: +"""分区选择策略""" + +buffer_segment_bytes.desc: +"""当缓存模式是 diskhybrid 时适用。该配置用于指定缓存到磁盘上的文件的大小。""" + +buffer_segment_bytes.label: +"""缓存文件大小""" + +consumer_kafka_opts.desc: +"""Kafka消费者配置。""" + +consumer_kafka_opts.label: +"""Kafka 消费者""" + +max_inflight.desc: +"""设置 Kafka 生产者(每个分区一个)在收到 Kafka 的确认前最多发送多少个请求(批量)。调大这个值通常可以增加吞吐量,但是,当该值设置大于 1 时存在消息乱序的风险。""" + +max_inflight.label: +"""飞行窗口""" + +auth_sasl_username.desc: +"""SASL 认证的用户名。""" + +auth_sasl_username.label: +"""用户名""" + +auth_kerberos_keytab_file.desc: +"""SASL GSSAPI 认证方法的 Kerberos keytab 文件。注意:该文件需要上传到 EMQX 服务器中,且运行 EMQX 服务的系统账户需要有读取权限。""" + +auth_kerberos_keytab_file.label: +"""Kerberos keytab 文件""" + +compression.desc: +"""压缩方法。""" + +compression.label: +"""压缩""" + +} diff --git a/rel/i18n/zh/emqx_bridge_mqtt_schema.hocon b/rel/i18n/zh/emqx_bridge_mqtt_schema.hocon new file mode 100644 index 000000000..669d50398 --- /dev/null +++ b/rel/i18n/zh/emqx_bridge_mqtt_schema.hocon @@ -0,0 +1,21 @@ +emqx_bridge_mqtt_schema { + +config.desc: +"""MQTT Bridge 的配置。""" + +config.label: +"""配置""" + +desc_name.desc: +"""Bridge 名字,Bridge 的可读描述""" + +desc_name.label: +"""Bridge 名字""" + +desc_type.desc: +"""Bridge 的类型""" + +desc_type.label: +"""Bridge 类型""" + +} diff --git a/rel/i18n/zh/emqx_bridge_schema.hocon b/rel/i18n/zh/emqx_bridge_schema.hocon new file mode 100644 index 000000000..7512efa67 --- /dev/null +++ b/rel/i18n/zh/emqx_bridge_schema.hocon @@ -0,0 +1,158 @@ +emqx_bridge_schema { + +bridges_mqtt.desc: +"""桥接到另一个 MQTT Broker 的 MQTT Bridge""" + +bridges_mqtt.label: +"""MQTT Bridge""" + +bridges_webhook.desc: +"""转发消息到 HTTP 服务器的 WebHook""" + +bridges_webhook.label: +"""WebHook""" + +desc_bridges.desc: +"""MQTT Bridge 配置""" + +desc_bridges.label: +"""MQTT Bridge 配置""" + +desc_enable.desc: +"""启用/禁用 Bridge""" + +desc_enable.label: +"""启用/禁用 Bridge""" + +desc_metrics.desc: +"""Bridge 计数""" + +desc_metrics.label: +"""Bridge 计数""" + +desc_node_metrics.desc: +"""节点的计数器""" + +desc_node_metrics.label: +"""节点的计数器""" + +desc_node_name.desc: +"""节点的名字""" + +desc_node_name.label: +"""节点名字""" + +desc_node_status.desc: +"""节点的状态""" + +desc_node_status.label: +"""节点的状态""" + +desc_status.desc: +"""Bridge 的连接状态
+- connecting: 启动时的初始状态。
+- connected: 桥接驱动健康检查正常。
+- disconnected: 当桥接无法通过健康检查。
+- stopped: 桥接处于停用状态。
+- inconsistent: 集群中有各节点汇报的状态不一致。""" + +desc_status.label: +"""Bridge 状态""" + +desc_status_reason.desc: +"""桥接连接失败的原因。""" + +desc_status_reason.label: +"""失败原因""" + +metric_dropped.desc: +"""被丢弃的消息个数。""" + +metric_dropped.label: +"""丢弃""" + +metric_dropped_other.desc: +"""因为其他原因被丢弃的消息个数。""" + +metric_dropped_other.label: +"""其他丢弃""" + +metric_dropped_queue_full.desc: +"""因为队列已满被丢弃的消息个数。""" + +metric_dropped_queue_full.label: +"""队列已满被丢弃""" + +metric_dropped_resource_not_found.desc: +"""因为资源不存在被丢弃的消息个数。""" + +metric_dropped_resource_not_found.label: +"""资源不存在被丢弃""" + +metric_dropped_resource_stopped.desc: +"""因为资源已停用被丢弃的消息个数。""" + +metric_dropped_resource_stopped.label: +"""资源停用被丢弃""" + +metric_inflight.desc: +"""已异步地发送但没有收到 ACK 的消息个数。""" + +metric_inflight.label: +"""已发送未确认""" + +metric_matched.desc: +"""Bridge 被匹配到(被请求)的次数。""" + +metric_matched.label: +"""匹配次数""" + +metric_queuing.desc: +"""当前被缓存到磁盘队列的消息个数。""" + +metric_queuing.label: +"""被缓存""" + +metric_rate.desc: +"""执行操作的速率,次/秒""" + +metric_rate.label: +"""速率""" + +metric_rate_last5m.desc: +"""5 分钟平均速率,次/秒""" + +metric_rate_last5m.label: +"""5 分钟平均速率""" + +metric_rate_max.desc: +"""执行操作的最大速率,次/秒""" + +metric_rate_max.label: +"""执行操作的最大速率""" + +metric_received.desc: +"""从远程系统收到的消息个数。""" + +metric_received.label: +"""已接收""" + +metric_retried.desc: +"""重试的次数。""" + +metric_retried.label: +"""已重试""" + +metric_sent_failed.desc: +"""发送失败的消息个数。""" + +metric_sent_failed.label: +"""发送失败""" + +metric_sent_success.desc: +"""已经发送成功的消息个数。""" + +metric_sent_success.label: +"""发送成功""" + +} diff --git a/rel/i18n/zh/emqx_bridge_webhook_schema.hocon b/rel/i18n/zh/emqx_bridge_webhook_schema.hocon new file mode 100644 index 000000000..d7dd9dae0 --- /dev/null +++ b/rel/i18n/zh/emqx_bridge_webhook_schema.hocon @@ -0,0 +1,87 @@ +emqx_bridge_webhook_schema { + +config_body.desc: +"""HTTP 请求的正文。
+如果没有设置该字段,请求正文将是包含所有可用字段的 JSON object。
+如果该 webhook 是由于收到 MQTT 消息触发的,'所有可用字段' 将是 MQTT 消息的 +上下文信息;如果该 webhook 是由于规则触发的,'所有可用字段' 则为触发事件的上下文信息。
+允许使用带有变量的模板。""" + +config_body.label: +"""HTTP 请求正文""" + +config_direction.desc: +"""已废弃,Bridge 的方向,必须是 egress""" + +config_direction.label: +"""Bridge 方向""" + +config_enable.desc: +"""启用/禁用 Bridge""" + +config_enable.label: +"""启用/禁用 Bridge""" + +config_headers.desc: +"""HTTP 请求的标头。
+允许使用带有变量的模板。""" + +config_headers.label: +"""HTTP 请求标头""" + +config_local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 HTTP 服务器。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 HTTP 服务器。""" + +config_local_topic.label: +"""本地 Topic""" + +config_max_retries.desc: +"""HTTP 请求失败最大重试次数""" + +config_max_retries.label: +"""HTTP 请求重试次数""" + +config_method.desc: +"""HTTP 请求的方法。 所有可用的方法包括:post、put、get、delete。
+允许使用带有变量的模板。""" + +config_method.label: +"""HTTP 请求方法""" + +config_request_timeout.desc: +"""HTTP 请求超时""" + +config_request_timeout.label: +"""HTTP 请求超时""" + +config_url.desc: +"""HTTP Bridge 的 URL。
+路径中允许使用带变量的模板,但是 host, port 不允许使用变量模板。
+例如, http://localhost:9901/${topic} 是允许的, +但是 http://${host}:9901/message +或 http://localhost:${port}/message +不允许。""" + +config_url.label: +"""HTTP Bridge""" + +desc_config.desc: +"""HTTP Bridge 配置""" + +desc_config.label: +"""HTTP Bridge 配置""" + +desc_name.desc: +"""Bridge 名字,Bridge 的可读描述""" + +desc_name.label: +"""Bridge 名字""" + +desc_type.desc: +"""Bridge 类型""" + +desc_type.label: +"""Bridge 类型""" + +} diff --git a/rel/i18n/zh/emqx_coap_api.hocon b/rel/i18n/zh/emqx_coap_api.hocon new file mode 100644 index 000000000..f3bee543e --- /dev/null +++ b/rel/i18n/zh/emqx_coap_api.hocon @@ -0,0 +1,27 @@ +emqx_coap_api { + +content_type.desc: +"""Payload 类型""" + +message_id.desc: +"""消息 ID""" + +method.desc: +"""请求 Method 类型""" + +payload.desc: +"""Payload 内容""" + +response_code.desc: +"""应答码""" + +send_coap_request.desc: +"""发送 CoAP 消息到指定客户端""" + +timeout.desc: +"""请求超时时间""" + +token.desc: +"""消息 Token, 可以为空""" + +} diff --git a/rel/i18n/zh/emqx_coap_schema.hocon b/rel/i18n/zh/emqx_coap_schema.hocon new file mode 100644 index 000000000..132c0f03a --- /dev/null +++ b/rel/i18n/zh/emqx_coap_schema.hocon @@ -0,0 +1,37 @@ +emqx_coap_schema { + +coap.desc: +"""CoAP 网关配置。 +该网关的实现基于 RFC-7252 和 https://core-wg.github.io/coap-pubsub/draft-ietf-core-pubsub.html""" + +coap_connection_required.desc: +"""是否开启连接模式。 +连接模式是非标准协议的功能。它维护 CoAP 客户端上线、认证、和连接状态的保持""" + +coap_heartbeat.desc: +"""CoAP 网关要求客户端的最小心跳间隔时间。 +当 connection_required 开启后,该参数用于检查客户端连接是否存活""" + +coap_notify_type.desc: +"""投递给 CoAP 客户端的通知消息类型。当客户端 Observe 一个资源(或订阅某个主题)时,网关会向客户端推送新产生的消息。其消息类型可设置为:
+ - non: 不需要客户端返回确认消息;
+ - con: 需要客户端返回一个确认消息;
+ - qos: 取决于消息的 QoS 等级; QoS 0 会以 `non` 类型下发,QoS 1/2 会以 `con` 类型下发""" + +coap_publish_qos.desc: +"""客户端发布请求的默认 QoS 等级。 +当 CoAP 客户端发起发布请求时,如果未携带 `qos` 参数则会使用该默认值。默认值可设置为:
+ - qos0、qos1、qos2: 设置为固定的 QoS 等级
+ - coap: 依据发布操作的 CoAP 报文类型来动态决定
+ * 当发布请求为 `non-confirmable` 类型时,取值为 qos0
+ * 当发布请求为 `confirmable` 类型时,取值为 qos1""" + +coap_subscribe_qos.desc: +"""客户端订阅请求的默认 QoS 等级。 +当 CoAP 客户端发起订阅请求时,如果未携带 `qos` 参数则会使用该默认值。默认值可设置为:
+ - qos0、 qos1、qos2: 设置为固定的 QoS 等级
+ - coap: 依据订阅操作的 CoAP 报文类型来动态决定
+ * 当订阅请求为 `non-confirmable` 类型时,取值为 qos0
+ * 当订阅请求为 `confirmable` 类型时,取值为 qos1""" + +} diff --git a/rel/i18n/zh/emqx_conf_schema.hocon b/rel/i18n/zh/emqx_conf_schema.hocon new file mode 100644 index 000000000..4c1edbdee --- /dev/null +++ b/rel/i18n/zh/emqx_conf_schema.hocon @@ -0,0 +1,774 @@ +emqx_conf_schema { + +common_handler_drop_mode_qlen.desc: +"""当缓冲的日志事件数大于此值时,新的日志事件将被丢弃。起到过载保护的功能。 +为了使过载保护算法正常工作必须要: sync_mode_qlen =< drop_mode_qlen =< flush_qlen 且 drop_mode_qlen > 1 +要禁用某些模式,请执行以下操作。 +- 如果sync_mode_qlen被设置为0,所有的日志事件都被同步处理。也就是说,异步日志被禁用。 +- 如果sync_mode_qlen被设置为与drop_mode_qlen相同的值,同步模式被禁用。也就是说,处理程序总是以异步模式运行,除非调用drop或flushing。 +- 如果drop_mode_qlen被设置为与flush_qlen相同的值,则drop模式被禁用,永远不会发生。""" + +common_handler_drop_mode_qlen.label: +"""进入丢弃模式的队列长度""" + +cluster_mcast_addr.desc: +"""指定多播 IPv4 地址。 +当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" + +cluster_mcast_addr.label: +"""多播地址""" + +desc_cluster_dns.desc: +"""DNS SRV 记录服务发现。""" + +desc_cluster_dns.label: +"""DNS SRV 记录服务发现""" + +cluster_dns_name.desc: +"""指定 DNS A 记录的名字。emqx 会通过访问这个 DNS A 记录来获取 IP 地址列表。 +当cluster.discovery_strategydns 时有效。""" + +cluster_dns_name.label: +"""DNS名称""" + +rpc_keyfile.desc: +"""rpc.certfile 的私钥文件的路径。
+注意:此文件内容是私钥,所以需要设置权限为 600。""" + +rpc_keyfile.label: +"""RPC 私钥文件""" + +cluster_mcast_recbuf.desc: +"""接收数据报的内核级缓冲区的大小。 +当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" + +cluster_mcast_recbuf.label: +"""多播接收数据缓冲区""" + +cluster_autoheal.desc: +"""集群脑裂自动恢复机制开关。""" + +cluster_autoheal.label: +"""节点脑裂自动修复机制""" + +log_overload_kill_enable.desc: +"""日志处理进程过载时为保护自己节点其它的业务能正常,强制杀死日志处理进程。""" + +log_overload_kill_enable.label: +"""日志处理进程过载保护""" + +node_etc_dir.desc: +"""etc 存放目录""" + +node_etc_dir.label: +"""Etc 目录""" + +cluster_proto_dist.desc: +"""分布式 Erlang 集群协议类型。可选值为:
+- inet_tcp: 使用 IPv4
+- inet_tls: 使用 TLS,需要配合 etc/ssl_dist.conf 一起使用。""" + +cluster_proto_dist.label: +"""集群内部通信协议""" + +log_burst_limit_enable.desc: +"""启用日志限流保护机制。""" + +log_burst_limit_enable.label: +"""日志限流保护""" + +dist_buffer_size.desc: +"""Erlang分布式缓冲区的繁忙阈值,单位是KB。""" + +dist_buffer_size.label: +"""Erlang分布式缓冲区的繁忙阈值(KB)""" + +common_handler_max_depth.desc: +"""Erlang 内部格式日志格式化和 Erlang 进程消息队列检查的最大深度。""" + +common_handler_max_depth.label: +"""最大深度""" + +desc_log.desc: +"""EMQX 日志记录支持日志事件的多个接收器。 每个接收器由一个_log handler_表示,可以独立配置。""" + +desc_log.label: +"""日志""" + +common_handler_flush_qlen.desc: +"""如果缓冲日志事件的数量增长大于此阈值,则会发生冲刷(删除)操作。 日志处理进程会丢弃缓冲的日志消息。 +来缓解自身不会由于内存瀑涨而影响其它业务进程。日志内容会提醒有多少事件被删除。""" + +common_handler_flush_qlen.label: +"""冲刷阈值""" + +common_handler_chars_limit.desc: +"""设置单个日志消息的最大长度。 如果超过此长度,则日志消息将被截断。最小可设置的长度为100。 +注意:如果日志格式为 JSON,限制字符长度可能会导致截断不完整的 JSON 数据。""" + +common_handler_chars_limit.label: +"""单条日志长度限制""" + +cluster_k8s_namespace.desc: +"""当使用 k8s 方式并且 cluster.k8s.address_type 指定为 dns 类型时, +可设置 emqx 节点名的命名空间。与 cluster.k8s.suffix 一起使用用以拼接得到节点名列表。""" + +cluster_k8s_namespace.label: +"""K8s 命名空间""" + +node_name.desc: +"""节点名。格式为 \@\。其中 可以是 IP 地址,也可以是 FQDN。 +详见 http://erlang.org/doc/reference_manual/distributed.html。""" + +node_name.label: +"""节点名""" + +rpc_port_discovery.desc: +"""manual: 通过 tcp_server_port 来发现端口。 +
stateless: 使用无状态的方式来发现端口,使用如下算法。如果节点名称是 +emqxN@127.0.0.1, N 是一个数字,那么监听端口就是 5370 + N。""" + +rpc_port_discovery.label: +"""RPC 端口发现策略""" + +log_overload_kill_restart_after.desc: +"""处理进程停止后,会在该延迟时间后自动重新启动。除非该值设置为 infinity,这会阻止任何后续的重启。""" + +log_overload_kill_restart_after.label: +"""处理进程重启延迟""" + +log_file_handler_max_size.desc: +"""此参数控制日志文件轮换。 `infinity` 意味着日志文件将无限增长,否则日志文件将在达到 `max_size`(以字节为单位)时进行轮换。 +与 rotation count配合使用。如果 counter 为 10,则是10个文件轮换。""" + +log_file_handler_max_size.label: +"""日志文件轮换大小""" + +desc_log_file_handler.desc: +"""日志处理进程将日志事件打印到文件。""" + +desc_log_file_handler.label: +"""文件日志处理进程""" + +rpc_socket_keepalive_count.desc: +"""keepalive 探测消息发送失败的次数,直到 RPC 连接被认为已经断开。""" + +rpc_socket_keepalive_count.label: +"""RPC Socket Keepalive 次数""" + +cluster_etcd_server.desc: +"""指定 etcd 服务的地址。如有多个服务使用逗号 , 分隔。 +当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。""" + +cluster_etcd_server.label: +"""Etcd 服务器地址""" + +db_backend.desc: +"""配置后端数据库驱动,默认值为 rlog 它适用于大规模的集群。 +mnesia 是备选数据库,适合中小集群。""" + +db_backend.label: +"""内置数据库""" + +desc_authorization.desc: +"""授权相关""" + +desc_authorization.label: +"""授权""" + +cluster_etcd_ssl.desc: +"""当使用 TLS 连接 etcd 时的配置选项。 +当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。""" + +cluster_etcd_ssl.label: +"""Etcd SSL 选项""" + +rpc_insecure_fallback.desc: +"""兼容旧的无鉴权模式""" + +rpc_insecure_fallback.label: +"""向后兼容旧的无鉴权模式""" + +cluster_mcast_buffer.desc: +"""用户级缓冲区的大小。 +当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" + +cluster_mcast_buffer.label: +"""多播用户级缓冲区""" + +rpc_authentication_timeout.desc: +"""远程节点认证的超时时间。""" + +rpc_authentication_timeout.label: +"""RPC 认证超时时间""" + +cluster_call_retry_interval.desc: +"""当集群间调用出错时,多长时间重试一次。""" + +cluster_call_retry_interval.label: +"""重试时间间隔""" + +cluster_mcast_sndbuf.desc: +"""外发数据报的内核级缓冲区的大小。 +当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" + +cluster_mcast_sndbuf.label: +"""多播发送缓存区""" + +rpc_driver.desc: +"""集群间通信使用的传输协议。""" + +rpc_driver.label: +"""RPC 驱动""" + +max_ets_tables.desc: +"""Erlang ETS 表的最大数量""" + +max_ets_tables.label: +"""Erlang 表的最大数量""" + +desc_db.desc: +"""内置数据库的配置。""" + +desc_db.label: +"""数据库""" + +desc_cluster_etcd.desc: +"""使用 'etcd' 服务的服务发现。""" + +desc_cluster_etcd.label: +"""'etcd' 服务的服务发现""" + +cluster_name.desc: +"""EMQX集群名称。每个集群都有一个唯一的名称。服务发现时会用于做路径的一部分。""" + +cluster_name.label: +"""集群名称""" + +log_rotation_enable.desc: +"""启用日志轮换功能。启动后生成日志文件后缀会加上对应的索引数字,比如:log/emqx.log.1。 +系统会默认生成*.siz/*.idx用于记录日志位置,请不要手动修改这两个文件。""" + +log_rotation_enable.label: +"""日志轮换""" + +cluster_call_cleanup_interval.desc: +"""清理过期事务的时间间隔""" + +cluster_call_cleanup_interval.label: +"""清理间隔""" + +desc_cluster_static.desc: +"""静态节点服务发现。新节点通过连接一个节点来加入集群。""" + +desc_cluster_static.label: +"""静态节点服务发现""" + +db_default_shard_transport.desc: +"""定义用于推送事务日志的默认传输。
+这可以在 db.shard_transports 中基于每个分片被覆盖。 +gen_rpc 使用 gen_rpc 库, +distr 使用 Erlang 发行版。""" + +db_default_shard_transport.label: +"""事务日志传输默认协议""" + +cluster_static_seeds.desc: +"""集群中的EMQX节点名称列表, +指定固定的节点列表,多个节点间使用逗号 , 分隔。 +当 cluster.discovery_strategy 为 static 时,此配置项才有效。 +适合于节点数量较少且固定的集群。""" + +cluster_static_seeds.label: +"""集群静态节点""" + +log_overload_kill_qlen.desc: +"""允许的最大队列长度。""" + +log_overload_kill_qlen.label: +"""最大队列长度""" + +node_backtrace_depth.desc: +"""错误信息中打印的最大堆栈层数""" + +node_backtrace_depth.label: +"""最大堆栈导数""" + +desc_log_burst_limit.desc: +"""短时间内产生的大量日志事件可能会导致问题,例如: + - 日志文件变得非常大 + - 日志文件轮换过快,有用信息被覆盖 + - 对系统的整体性能影响 + +日志突发限制功能可以暂时禁用日志记录以避免这些问题。""" + +desc_log_burst_limit.label: +"""日志突发限制""" + +common_handler_enable.desc: +"""启用此日志处理进程。""" + +common_handler_enable.label: +"""启用日志处理进程""" + +cluster_k8s_service_name.desc: +"""指定 Kubernetes 中 EMQX 的服务名。 +当 cluster.discovery_strategy 为 k8s 时,此配置项才有效。""" + +cluster_k8s_service_name.label: +"""K8s 服务别名""" + +log_rotation_count.desc: +"""轮换的最大日志文件数。""" + +log_rotation_count.label: +"""最大日志文件数""" + +node_cookie.desc: +"""分布式 Erlang 集群使用的 cookie 值。集群间保持一致""" + +node_cookie.label: +"""节点 Cookie""" + +db_role.desc: +"""选择节点的角色。
+core 节点提供数据的持久性,并负责写入。建议将核心节点放置在不同的机架或不同的可用区。
+repliant 节点是临时工作节点。 从集群中删除它们,不影响数据库冗余
+建议复制节点多于核心节点。
+注意:该参数仅在设置backend时生效到 rlog。""" + +db_role.label: +"""数据库角色""" + +rpc_tcp_server_port.desc: +"""RPC 本地服务使用的 TCP 端口。
+只有当 rpc.port_discovery 设置为 manual 时,此配置才会生效。""" + +rpc_tcp_server_port.label: +"""RPC TCP 服务监听端口""" + +desc_console_handler.desc: +"""日志处理进程将日志事件打印到 EMQX 控制台。""" + +desc_console_handler.label: +"""Console Handler""" + +node_applications.desc: +"""当新EMQX 加入集群时,应重启的Erlang应用程序的列表。""" + +node_applications.label: +"""应用""" + +log_burst_limit_max_count.desc: +"""在 `window_time` 间隔内处理的最大日志事件数。 达到限制后,将丢弃连续事件,直到 `window_time` 结束。""" + +log_burst_limit_max_count.label: +"""日志事件数""" + +rpc_tcp_client_num.desc: +"""设置本节点与远程节点之间的 RPC 通信通道的最大数量。""" + +rpc_tcp_client_num.label: +"""RPC TCP 客户端数量""" + +cluster_k8s_address_type.desc: +"""当使用 k8s 方式集群时,address_type 用来从 Kubernetes 接口的应答里获取什么形式的 Host 列表。 +指定 cluster.k8s.address_typeip,则将从 Kubernetes 接口中获取集群中其他节点 +的IP地址。""" + +cluster_k8s_address_type.label: +"""K8s 地址类型""" + +rpc_socket_sndbuf.desc: +"""TCP 调节参数。TCP 发送缓冲区大小。""" + +rpc_socket_sndbuf.label: +"""RPC 套接字发送缓冲区大小""" + +cluster_mcast_ttl.desc: +"""指定多播的 Time-To-Live 值。 +当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" + +cluster_mcast_ttl.label: +"""多播TTL""" + +db_core_nodes.desc: +"""当前节点连接的核心节点列表。
+注意:该参数仅在设置backend时生效到 rlog +并且设置rolereplicant时生效。
+该值需要在手动或静态集群发现机制下设置。
+如果使用了自动集群发现机制(如etcd),则不需要设置该值。""" + +db_core_nodes.label: +"""数据库核心节点""" + +log_file_handler_file.desc: +"""日志文件路径及名字。""" + +log_file_handler_file.label: +"""日志文件名字""" + +node_dist_net_ticktime.desc: +"""系统调优参数,此配置将覆盖 vm.args 文件里的 -kernel net_ticktime 参数。当一个节点持续无响应多久之后,认为其已经宕机并断开连接。""" + +node_dist_net_ticktime.label: +"""节点间心跳间隔""" + +desc_cluster_k8s.desc: +"""Kubernetes 服务发现。""" + +desc_cluster_k8s.label: +"""Kubernetes 服务发现""" + +desc_cluster_mcast.desc: +"""UDP 组播服务发现。""" + +desc_cluster_mcast.label: +"""UDP 组播服务发现""" + +rpc_cacertfile.desc: +"""验证 rpc.certfile 的 CA 证书文件的路径。
+注意:集群中所有节点的证书必须使用同一个 CA 签发。""" + +rpc_cacertfile.label: +"""RPC CA 证书文件""" + +desc_node.desc: +"""节点名称、Cookie、配置文件、数据目录和 Erlang 虚拟机(BEAM)启动参数。""" + +desc_node.label: +"""节点""" + +cluster_k8s_apiserver.desc: +"""指定 Kubernetes API Server。如有多个 Server 使用逗号 , 分隔。 +当 cluster.discovery_strategy 为 k8s 时,此配置项才有效。""" + +cluster_k8s_apiserver.label: +"""K8s 服务地址""" + +common_handler_supervisor_reports.desc: +"""Supervisor 报告的类型。默认为 error 类型。
+ - error:仅记录 Erlang 进程中的错误。 + - progress:除了 error 信息外,还需要记录进程启动的详细信息。""" + +common_handler_supervisor_reports.label: +"""报告类型""" + +node_data_dir.desc: +"""节点数据存放目录,可能会自动创建的子目录如下:
+- `mnesia/`。EMQX的内置数据库目录。例如,`mnesia/emqx@127.0.0.1`。
+如果节点要被重新命名(例如,`emqx@10.0.1.1`)。旧目录应该首先被删除。
+- `configs`。在启动时生成的配置,以及集群/本地覆盖的配置。
+- `patches`: 热补丁文件将被放在这里。
+- `trace`: 日志跟踪文件。
+ +**注意**: 一个数据dir不能被两个或更多的EMQX节点同时使用。""" + +node_data_dir.label: +"""节点数据目录""" + +cluster_k8s_suffix.desc: +"""当使用 k8s 方式并且 cluster.k8s.address_type 指定为 dns 类型时,可设置 emqx 节点名的后缀。 +与 cluster.k8s.namespace 一起使用用以拼接得到节点名列表。""" + +cluster_k8s_suffix.label: +"""K8s 前缀""" + +db_rpc_module.desc: +"""集群间推送事务日志到复制节点使用的协议。""" + +db_rpc_module.label: +"""RPC协议""" + +cluster_etcd_prefix.desc: +"""指定 etcd 路径的前缀。每个节点在 etcd 中都会创建一个路径: +v2/keys///
+当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。""" + +cluster_etcd_prefix.label: +"""Etcd 路径前缀""" + +cluster_mcast_iface.desc: +"""指定节点发现服务需要绑定到本地 IP 地址。 +当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" + +cluster_mcast_iface.label: +"""多播绑定地址""" + +log_burst_limit_window_time.desc: +"""参考 max_count。""" + +log_burst_limit_window_time.label: +"""Window Time""" + +cluster_dns_record_type.desc: +"""DNS 记录类型。""" + +cluster_dns_record_type.label: +"""DNS记录类型""" + +cluster_autoclean.desc: +"""指定多久之后从集群中删除离线节点。""" + +cluster_autoclean.label: +"""自动删除离线节点时间""" + +process_limit.desc: +"""Erlang系统同时存在的最大进程数。 +实际选择的最大值可能比设置的数字大得多。 +参考: https://www.erlang.org/doc/man/erl.html""" + +process_limit.label: +"""Erlang 最大进程数""" + +max_ports.desc: +"""Erlang系统同时存在的最大端口数。 +实际选择的最大值可能比设置的数字大得多。 +参考: https://www.erlang.org/doc/man/erl.html""" + +max_ports.label: +"""Erlang 最大端口数""" + +desc_log_rotation.desc: +"""默认情况下,日志存储在 `./log` 目录(用于从 zip 文件安装)或 `/var/log/emqx`(用于二进制安装)。
+这部分配置,控制每个日志处理进程保留的文件数量。""" + +desc_log_rotation.label: +"""日志轮换""" + +desc_log_overload_kill.desc: +"""日志过载终止,具有过载保护功能。当日志处理进程使用过多内存,或者缓存的日志消息过多时该功能被激活。
+检测到过载时,日志处理进程将终止,并在冷却期后重新启动。""" + +desc_log_overload_kill.label: +"""日志过载保护""" + +authorization.desc: +"""授权(ACL)。EMQX 支持完整的客户端访问控制(ACL)。""" + +authorization.label: +"""授权""" + +rpc_socket_keepalive_idle.desc: +"""broker 之间的连接在最后一条消息发送后保持打开的时间。""" + +rpc_socket_keepalive_idle.label: +"""RPC Socket Keepalive Idle""" + +desc_cluster_call.desc: +"""集群调用功能的选项。""" + +desc_cluster_call.label: +"""集群调用""" + +cluster_mcast_ports.desc: +"""指定多播端口。如有多个端口使用逗号 , 分隔。 +当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" + +cluster_mcast_ports.label: +"""多播端口""" + +log_overload_kill_mem_size.desc: +"""日志处理进程允许使用的最大内存。""" + +log_overload_kill_mem_size.label: +"""日志处理进程允许使用的最大内存""" + +rpc_connect_timeout.desc: +"""建立 RPC 连接的超时时间。""" + +rpc_connect_timeout.label: +"""RPC 连接超时时间""" + +cluster_etcd_node_ttl.desc: +"""指定 etcd 中节点信息的过期时间。 +当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。""" + +cluster_etcd_node_ttl.label: +"""Etcd 节点过期时间""" + +rpc_call_receive_timeout.desc: +"""同步 RPC 的回复超时时间。""" + +rpc_call_receive_timeout.label: +"""RPC 调用接收超时时间""" + +rpc_socket_recbuf.desc: +"""TCP 调节参数。TCP 接收缓冲区大小。""" + +rpc_socket_recbuf.label: +"""RPC 套接字接收缓冲区大小""" + +db_tlog_push_mode.desc: +"""同步模式下,核心节点等待复制节点的确认信息,然后再发送下一条事务日志。""" + +db_tlog_push_mode.label: +"""Tlog推送模式""" + +node_crash_dump_bytes.desc: +"""限制崩溃文件的大小,当崩溃时节点内存太大, +如果为了保存现场,需要全部存到崩溃文件中,此处限制最多能保存多大的文件。 +如果超过此限制,崩溃转储将被截断。如果设置为0,系统不会尝试写入崩溃转储文件。""" + +node_crash_dump_bytes.label: +"""崩溃文件最大容量""" + +rpc_certfile.desc: +"""TLS 证书文件的路径,用于验证集群节点的身份。 +只有当 rpc.driver 设置为 ssl 时,此配置才会生效。""" + +rpc_certfile.label: +"""RPC 证书文件""" + +node_crash_dump_seconds.desc: +"""该配置给出了运行时系统允许花费的写入崩溃转储的秒数。当给定的秒数已经过去,运行时系统将被终止。
+- 如果设置为0秒,运行时会立即终止,不会尝试写入崩溃转储文件。
+- 如果设置为一个正数 S,节点会等待 S 秒来完成崩溃转储文件,然后用SIGALRM信号终止运行时系统。
+- 如果设置为一个负值导致运行时系统的终止等待无限期地直到崩溃转储文件已经完全写入。""" + +node_crash_dump_seconds.label: +"""保存崩溃文件最长时间""" + +log_file_handlers.desc: +"""输出到文件的日志处理进程列表""" + +log_file_handlers.label: +"""File Handler""" + +node_global_gc_interval.desc: +"""系统调优参数,设置节点运行多久强制进行一次全局垃圾回收。禁用设置为 disabled。""" + +node_global_gc_interval.label: +"""全局垃圾回收""" + +common_handler_time_offset.desc: +"""日志中的时间戳使用的时间偏移量。 +可选值为: + - system: 本地系统使用的时区偏移量 + - utc: 0 时区的偏移量 + - +-[hh]:[mm]: 自定义偏移量,比如 "-02:00" 或者 "+00:00" +默认值为本地系统的时区偏移量:system。""" + +common_handler_time_offset.label: +"""时间偏移量""" + +rpc_mode.desc: +"""在 sync 模式下,发送端等待接收端的 ack信号。""" + +rpc_mode.label: +"""RPC 模式""" + +node_crash_dump_file.desc: +"""设置 Erlang crash_dump 文件的存储路径和文件名。""" + +node_crash_dump_file.label: +"""节点崩溃时的Dump文件""" + +cluster_mcast_loop.desc: +"""设置多播的报文是否投递到本地回环地址。 +当 cluster.discovery_strategy 为 mcast 时,此配置项才有效。""" + +cluster_mcast_loop.label: +"""多播回环开关""" + +rpc_socket_keepalive_interval.desc: +"""keepalive 消息的间隔。""" + +rpc_socket_keepalive_interval.label: +"""RPC Socket Keepalive 间隔""" + +common_handler_level.desc: +"""当前日志处理进程的日志级别。 +默认为 warning 级别。""" + +common_handler_level.label: +"""日志级别""" + +desc_rpc.desc: +"""EMQX 使用 gen_rpc 库来实现跨节点通信。
+大多数情况下,默认的配置应该可以工作,但如果你需要做一些性能优化或者实验,可以尝试调整这些参数。""" + +desc_rpc.label: +"""RPC""" + +rpc_ssl_server_port.desc: +"""RPC 本地服务使用的监听SSL端口。
+只有当 rpc.port_discovery 设置为 manual 且 dirver 设置为 ssl, +此配置才会生效。""" + +rpc_ssl_server_port.label: +"""RPC SSL 服务监听端口""" + +desc_cluster.desc: +"""EMQX 节点可以组成一个集群,以提高总容量。
这里指定了节点之间如何连接。""" + +desc_cluster.label: +"""集群""" + +common_handler_sync_mode_qlen.desc: +"""只要缓冲的日志事件的数量低于这个值,所有的日志事件都会被异步处理。 +这意味着,日志落地速度不会影响正常的业务进程,因为它们不需要等待日志处理进程的响应。 +如果消息队列的增长超过了这个值,处理程序开始同步处理日志事件。也就是说,发送事件的客户进程必须等待响应。 +当处理程序将消息队列减少到低于sync_mode_qlen阈值的水平时,异步操作就会恢复。 +默认为100条信息,当等待的日志事件大于100条时,就开始同步处理日志。""" + +common_handler_sync_mode_qlen.label: +"""进入异步模式的队列长度""" + +common_handler_formatter.desc: +"""选择日志格式类型。 text 用于纯文本,json 用于结构化日志记录。""" + +common_handler_formatter.label: +"""日志格式类型""" + +rpc_async_batch_size.desc: +"""异步模式下,发送的批量消息的最大数量。""" + +rpc_async_batch_size.label: +"""异步模式下的批量消息数量""" + +cluster_call_max_history.desc: +"""集群间调用最多保留的历史记录数。只用于排错时查看。""" + +cluster_call_max_history.label: +"""最大历史记录""" + +cluster_discovery_strategy.desc: +"""集群节点发现方式。可选值为: +- manual: 使用 emqx ctl cluster 命令管理集群。
+- static: 配置静态节点。配置几个固定的节点,新节点通过连接固定节点中的某一个来加入集群。
+- dns: 使用 DNS A 记录的方式发现节点。
+- etcd: 使用 etcd 发现节点。
+- k8s: 使用 Kubernetes API 发现节点。""" + +cluster_discovery_strategy.label: +"""集群服务发现策略""" + +rpc_send_timeout.desc: +"""发送 RPC 请求的超时时间。""" + +rpc_send_timeout.label: +"""RPC 发送超时时间""" + +common_handler_single_line.desc: +"""如果设置为 true,则单行打印日志。 否则,日志消息可能跨越多行。""" + +common_handler_single_line.label: +"""单行模式""" + +rpc_socket_buffer.desc: +"""TCP 调节参数。用户模式套接字缓冲区大小。""" + +rpc_socket_buffer.label: +"""RPC 套接字缓冲区大小""" + +db_shard_transports.desc: +"""允许为每个 shard 下的事务日志复制操作的传输方法进行调优。
+gen_rpc 使用 gen_rpc 库, +distr 使用 Erlang 自带的 rpc 库。
如果未指定, +默认是使用 db.default_shard_transport 中设置的值。""" + +db_shard_transports.label: +"""事务日志传输协议""" + +} diff --git a/rel/i18n/zh/emqx_connector_api.hocon b/rel/i18n/zh/emqx_connector_api.hocon new file mode 100644 index 000000000..ab651c102 --- /dev/null +++ b/rel/i18n/zh/emqx_connector_api.hocon @@ -0,0 +1,46 @@ +emqx_connector_api { + +conn_get.desc: +"""列出所有连接器""" + +conn_get.label: +"""列出所有连接器""" + +conn_id_delete.desc: +"""通过 ID 删除一个连接器""" + +conn_id_delete.label: +"""删除连接器""" + +conn_id_get.desc: +"""通过 ID 获取连接器""" + +conn_id_get.label: +"""获取连接器""" + +conn_id_put.desc: +"""通过 ID 更新一个连接器""" + +conn_id_put.label: +"""更新连接器""" + +conn_post.desc: +"""创建一个新的连接器""" + +conn_post.label: +"""创建连接器""" + +conn_test_post.desc: +"""通过给定的 ID 测试创建一个新的连接器
+ID 的格式必须为“{type}:{name}”""" + +conn_test_post.label: +"""创建测试连接器""" + +id.desc: +"""连接器 ID, 格式必须为 {type}:{name}""" + +id.label: +"""连接器 ID""" + +} diff --git a/rel/i18n/zh/emqx_connector_http.hocon b/rel/i18n/zh/emqx_connector_http.hocon new file mode 100644 index 000000000..5d6398b2e --- /dev/null +++ b/rel/i18n/zh/emqx_connector_http.hocon @@ -0,0 +1,77 @@ +emqx_connector_http { + +base_url.desc: +"""base URL 只包含host和port。
+发送HTTP请求时,真实的URL是由base URL 和 path parameter连接而成。
+示例:`http://localhost:9901/`""" + +base_url.label: +"""Base Url""" + +body.desc: +"""HTTP请求报文主体。""" + +body.label: +"""HTTP请求报文主体""" + +connect_timeout.desc: +"""连接HTTP服务器的超时时间。""" + +connect_timeout.label: +"""连接超时""" + +enable_pipelining.desc: +"""正整数,设置最大可发送的异步 HTTP 请求数量。当设置为 1 时,表示每次发送完成 HTTP 请求后都需要等待服务器返回,再继续发送下一个请求。""" + +enable_pipelining.label: +"""HTTP 管道""" + +headers.desc: +"""HTTP 头字段列表。""" + +headers.label: +"""HTTP 头字段列表""" + +max_retries.desc: +"""请求出错时的最大重试次数。""" + +max_retries.label: +"""最大重试次数""" + +method.desc: +"""HTTP 请求方法。""" + +method.label: +"""HTTP 请求方法""" + +path.desc: +"""HTTP请求路径。""" + +path.label: +"""HTTP请求路径""" + +pool_size.desc: +"""连接池大小。""" + +pool_size.label: +"""连接池大小""" + +pool_type.desc: +"""连接池的类型,可用类型有`random`, `hash`。""" + +pool_type.label: +"""连接池类型""" + +request.desc: +"""设置 HTTP 请求的参数。""" + +request.label: +"""HTTP 请求""" + +request_timeout.desc: +"""HTTP 请求超时。""" + +request_timeout.label: +"""HTTP 请求超时""" + +} diff --git a/rel/i18n/zh/emqx_connector_ldap.hocon b/rel/i18n/zh/emqx_connector_ldap.hocon new file mode 100644 index 000000000..6826af6e6 --- /dev/null +++ b/rel/i18n/zh/emqx_connector_ldap.hocon @@ -0,0 +1,21 @@ +emqx_connector_ldap { + +bind_dn.desc: +"""LDAP 绑定的 DN 的值""" + +bind_dn.label: +"""Bind DN""" + +port.desc: +"""LDAP 端口""" + +port.label: +"""端口""" + +timeout.desc: +"""LDAP 查询超时时间""" + +timeout.label: +"""超时时间""" + +} diff --git a/rel/i18n/zh/emqx_connector_mongo.hocon b/rel/i18n/zh/emqx_connector_mongo.hocon new file mode 100644 index 000000000..cd84a242e --- /dev/null +++ b/rel/i18n/zh/emqx_connector_mongo.hocon @@ -0,0 +1,152 @@ +emqx_connector_mongo { + +auth_source.desc: +"""与用户证书关联的数据库名称。""" + +auth_source.label: +"""认证源""" + +connect_timeout.desc: +"""超时重连的等待时间。""" + +connect_timeout.label: +"""连接超时""" + +desc_rs.desc: +"""配置 Replica Set""" + +desc_rs.label: +"""配置 Replica Set""" + +desc_sharded.desc: +"""配置 Sharded Cluster""" + +desc_sharded.label: +"""配置 Sharded Cluster""" + +desc_single.desc: +"""配置 Single 模式""" + +desc_single.label: +"""配置 Single 模式""" + +desc_topology.desc: +"""配置 Topology""" + +desc_topology.label: +"""配置 Topology""" + +heartbeat_period.desc: +"""控制驱动程序何时检查MongoDB部署的状态。指定检查的间隔时间,从上一次检查结束到下一次检查开始计算。如果连接数增加(例如,如果你增加池子的大小,就会发生这种情况),你可能也需要增加这个周期,以避免在MongoDB日志文件中创建太多的日志条目。""" + +heartbeat_period.label: +"""心跳期""" + +local_threshold.desc: +"""在多个合适的MongoDB实例中进行选择的延迟窗口的大小。""" + +local_threshold.label: +"""本地阈值""" + +max_overflow.desc: +"""最大溢出。""" + +max_overflow.label: +"""最大溢出""" + +min_heartbeat_period.desc: +"""心跳间的最小间隙""" + +min_heartbeat_period.label: +"""最小心跳周期""" + +overflow_check_period.desc: +"""检查是否有超过配置的工人的周期("溢出")。""" + +overflow_check_period.label: +"""溢出检查周期""" + +overflow_ttl.desc: +"""当池内工人太多时,等待多久清除多余工人。""" + +overflow_ttl.label: +"""溢出TTL""" + +r_mode.desc: +"""读模式。""" + +r_mode.label: +"""读模式""" + +replica_set_name.desc: +"""副本集的名称。""" + +replica_set_name.label: +"""副本集名称""" + +rs_mongo_type.desc: +"""Replica set模式。当 MongoDB 服务运行在 replica-set 模式下,该配置必须设置为 'rs'。""" + +rs_mongo_type.label: +"""Replica set 模式""" + +server.desc: +"""将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
+如果未指定 `[:Port]`,则使用 MongoDB 默认端口 27017。""" + +server.label: +"""服务器地址""" + +server_selection_timeout.desc: +"""指定在抛出异常之前为服务器选择阻断多长时间。""" + +server_selection_timeout.label: +"""服务器选择超时""" + +servers.desc: +"""集群将要连接的节点列表。 节点之间用逗号分隔,如:`Node[,Node].` +每个节点的配置为:将要连接的 IPv4 或 IPv6 地址或主机名。 +主机名具有以下形式:`Host[:Port]`。 +如果未指定 `[:Port]`,则使用 MongoDB 默认端口 27017。""" + +servers.label: +"""服务器列表""" + +sharded_mongo_type.desc: +"""Sharded cluster模式。当 MongoDB 服务运行在 sharded 模式下,该配置必须设置为 'sharded'。""" + +sharded_mongo_type.label: +"""Sharded cluster 模式""" + +single_mongo_type.desc: +"""Standalone 模式。当 MongoDB 服务运行在 standalone 模式下,该配置必须设置为 'single'。""" + +single_mongo_type.label: +"""Standalone 模式""" + +socket_timeout.desc: +"""在尝试超时之前,在套接字上尝试发送或接收的持续时间。""" + +socket_timeout.label: +"""套接字操作超时""" + +srv_record.desc: +"""使用 DNS SRV 记录。""" + +srv_record.label: +"""SRV 记录""" + +w_mode.desc: +"""写模式。""" + +w_mode.label: +"""写模式""" + +wait_queue_timeout.desc: +"""工作者等待连接可用的最长时间。""" + +wait_queue_timeout.label: +"""等待队列超时""" + +} diff --git a/rel/i18n/zh/emqx_connector_mqtt.hocon b/rel/i18n/zh/emqx_connector_mqtt.hocon new file mode 100644 index 000000000..1df6a89b6 --- /dev/null +++ b/rel/i18n/zh/emqx_connector_mqtt.hocon @@ -0,0 +1,21 @@ +emqx_connector_mqtt { + +name.desc: +"""连接器名称,人类可读的连接器描述。""" + +name.label: +"""连接器名称""" + +num_of_bridges.desc: +"""当前使用此连接器的网桥数量。""" + +num_of_bridges.label: +"""网桥数量""" + +type.desc: +"""连接器类型。""" + +type.label: +"""连接器类型""" + +} diff --git a/rel/i18n/zh/emqx_connector_mqtt_schema.hocon b/rel/i18n/zh/emqx_connector_mqtt_schema.hocon new file mode 100644 index 000000000..66ba2accf --- /dev/null +++ b/rel/i18n/zh/emqx_connector_mqtt_schema.hocon @@ -0,0 +1,170 @@ +emqx_connector_mqtt_schema { + +bridge_mode.desc: +"""是否启用 Bridge Mode。 +注意:此设置只针对 MQTT 协议版本 < 5.0 有效,并且需要远程 MQTT Broker 支持 Bridge Mode。 +如果设置为 true ,桥接会告诉远端服务器当前连接是一个桥接而不是一个普通的客户端。 +这意味着消息回环检测会更加高效,并且远端服务器收到的保留消息的标志位会透传给本地。""" + +bridge_mode.label: +"""Bridge 模式""" + +clean_start.desc: +"""与 ingress MQTT 桥的远程服务器重连时是否清除老的 MQTT 会话。""" + +clean_start.label: +"""清除会话""" + +clientid_prefix.desc: +"""可选的前缀,用于在出口网桥使用的clientid前加上前缀。""" + +clientid_prefix.label: +"""客户ID前缀""" + +egress_desc.desc: +"""出口配置定义了该桥接如何将消息从本地 Broker 转发到远程 Broker。 +以下字段中允许使用带有变量的模板:'remote.topic', 'local.qos', 'local.retain', 'local.payload'。
+注意:如果此桥接被用作规则的动作,并且配置了 'local.topic',则从规则输出的数据以及匹配到 'local.topic' 的 MQTT 消息都会被转发。""" + +egress_desc.label: +"""出方向配置""" + +egress_local.desc: +"""如何从本地 Broker 接收消息相关的配置。""" + +egress_local.label: +"""本地配置""" + +egress_local_topic.desc: +"""要转发到远程broker的本地主题""" + +egress_local_topic.label: +"""本地主题""" + +egress_remote.desc: +"""发送消息到远程 Broker 相关的配置。""" + +egress_remote.label: +"""远程配置""" + +egress_remote_qos.desc: +"""待发送 MQTT 消息的 QoS。
+允许使用带有变量的模板。""" + +egress_remote_qos.label: +"""远程 QoS""" + +egress_remote_topic.desc: +"""转发到远程broker的哪个topic。
+允许使用带有变量的模板。""" + +egress_remote_topic.label: +"""远程主题""" + +ingress_desc.desc: +"""入口配置定义了该桥接如何从远程 MQTT Broker 接收消息,然后将消息发送到本地 Broker。
+ 以下字段中允许使用带有变量的模板:'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'。
+ 注意:如果此桥接被用作规则的输入,并且配置了 'local.topic',则从远程代理获取的消息将同时被发送到 'local.topic' 和规则。""" + +ingress_desc.label: +"""入方向配置""" + +ingress_local.desc: +"""发送消息到本地 Broker 相关的配置。""" + +ingress_local.label: +"""本地配置""" + +ingress_local_qos.desc: +"""待发送 MQTT 消息的 QoS。
+允许使用带有变量的模板。""" + +ingress_local_qos.label: +"""本地 QoS""" + +ingress_local_topic.desc: +"""向本地broker的哪个topic发送消息。
+允许使用带有变量的模板。""" + +ingress_local_topic.label: +"""本地主题""" + +ingress_remote.desc: +"""订阅远程 Broker 相关的配置。""" + +ingress_remote.label: +"""远程配置""" + +ingress_remote_qos.desc: +"""订阅远程borker时要使用的 QoS 级别""" + +ingress_remote_qos.label: +"""远程 QoS""" + +ingress_remote_topic.desc: +"""从远程broker的哪个topic接收消息""" + +ingress_remote_topic.label: +"""远程主题""" + +max_inflight.desc: +"""MQTT 协议的最大飞行(已发送但未确认)消息""" + +max_inflight.label: +"""最大飞行消息""" + +mode.desc: +"""MQTT 桥的模式。
+- cluster_shareload:在 emqx 集群的每个节点上创建一个 MQTT 连接。
+在“cluster_shareload”模式下,来自远程代理的传入负载通过共享订阅的方式接收。
+请注意,clientid 以节点名称为后缀,这是为了避免不同节点之间的 clientid 冲突。 +而且对于入口连接的 remote.topic,我们只能使用共享订阅主题过滤器。""" + +mode.label: +"""MQTT 桥接模式""" + +password.desc: +"""MQTT 协议的密码""" + +password.label: +"""密码""" + +payload.desc: +"""要发送的 MQTT 消息的负载。
+允许使用带有变量的模板。""" + +payload.label: +"""消息负载""" + +proto_ver.desc: +"""MQTT 协议版本""" + +proto_ver.label: +"""协议版本""" + +retain.desc: +"""要发送的 MQTT 消息的“保留”标志。
+允许使用带有变量的模板。""" + +retain.label: +"""保留消息标志""" + +server.desc: +"""远程 MQTT Broker的主机和端口。""" + +server.label: +"""Broker主机和端口""" + +server_configs.desc: +"""服务器相关的配置。""" + +server_configs.label: +"""服务配置。""" + +username.desc: +"""MQTT 协议的用户名""" + +username.label: +"""用户名""" + +} diff --git a/rel/i18n/zh/emqx_connector_mysql.hocon b/rel/i18n/zh/emqx_connector_mysql.hocon new file mode 100644 index 000000000..a6900056f --- /dev/null +++ b/rel/i18n/zh/emqx_connector_mysql.hocon @@ -0,0 +1,11 @@ +emqx_connector_mysql { + +server.desc: +"""将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
+如果未指定 `[:Port]`,则使用 MySQL 默认端口 3306。""" + +server.label: +"""服务器地址""" + +} diff --git a/rel/i18n/zh/emqx_connector_pgsql.hocon b/rel/i18n/zh/emqx_connector_pgsql.hocon new file mode 100644 index 000000000..c391034fd --- /dev/null +++ b/rel/i18n/zh/emqx_connector_pgsql.hocon @@ -0,0 +1,11 @@ +emqx_connector_pgsql { + +server.desc: +"""将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
+如果未指定 `[:Port]`,则使用 PostgreSQL 默认端口 5432。""" + +server.label: +"""服务器地址""" + +} diff --git a/rel/i18n/zh/emqx_connector_redis.hocon b/rel/i18n/zh/emqx_connector_redis.hocon new file mode 100644 index 000000000..a0819876e --- /dev/null +++ b/rel/i18n/zh/emqx_connector_redis.hocon @@ -0,0 +1,50 @@ +emqx_connector_redis { + +cluster.desc: +"""集群模式。当 Redis 服务运行在集群模式下,该配置必须设置为 'cluster'。""" + +cluster.label: +"""集群模式""" + +database.desc: +"""Redis 数据库 ID。""" + +database.label: +"""数据库 ID""" + +sentinel.desc: +"""哨兵模式。当 Redis 服务运行在哨兵模式下,该配置必须设置为 'sentinel'。""" + +sentinel.label: +"""哨兵模式""" + +sentinel_desc.desc: +"""Redis 哨兵模式下的集群名称。""" + +sentinel_desc.label: +"""集群名称""" + +server.desc: +"""将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
+如果未指定 `[:Port]`,则使用 Redis 默认端口 6379。""" + +server.label: +"""服务器地址""" + +servers.desc: +"""集群将要连接的节点列表。 节点之间用逗号分隔,如:`Node[,Node].` +每个节点的配置为:将要连接的 IPv4 或 IPv6 地址或主机名。 +主机名具有以下形式:`Host[:Port]`。 +如果未指定 `[:Port]`,则使用 Redis 默认端口 6379。""" + +servers.label: +"""服务器列表""" + +single.desc: +"""单机模式。当 Redis 服务运行在单机模式下,该配置必须设置为 'single'。""" + +single.label: +"""单机模式""" + +} diff --git a/rel/i18n/zh/emqx_connector_schema_lib.hocon b/rel/i18n/zh/emqx_connector_schema_lib.hocon new file mode 100644 index 000000000..318b5c31c --- /dev/null +++ b/rel/i18n/zh/emqx_connector_schema_lib.hocon @@ -0,0 +1,45 @@ +emqx_connector_schema_lib { + +auto_reconnect.desc: +"""已弃用。自动重连数据库。""" + +auto_reconnect.label: +"""已弃用。自动重连数据库""" + +database_desc.desc: +"""数据库名字。""" + +database_desc.label: +"""数据库名字""" + +password.desc: +"""内部数据库密码。""" + +password.label: +"""密码""" + +pool_size.desc: +"""桥接远端服务时使用的连接池大小。""" + +pool_size.label: +"""连接池大小""" + +prepare_statement.desc: +"""SQL 预处理语句列表。""" + +prepare_statement.label: +"""SQL 预处理语句列表""" + +ssl.desc: +"""启用 SSL 连接。""" + +ssl.label: +"""启用SSL""" + +username.desc: +"""内部数据库的用户名。""" + +username.label: +"""用户名""" + +} diff --git a/rel/i18n/zh/emqx_dashboard_api.hocon b/rel/i18n/zh/emqx_dashboard_api.hocon new file mode 100644 index 000000000..8292d8bba --- /dev/null +++ b/rel/i18n/zh/emqx_dashboard_api.hocon @@ -0,0 +1,66 @@ +emqx_dashboard_api { + +change_pwd_api.desc: +"""更改 Dashboard 用户密码""" + +create_user_api.desc: +"""创建 Dashboard 用户""" + +create_user_api_success.desc: +"""创建 Dashboard 用户成功""" + +delete_user_api.desc: +"""删除 Dashboard 用户""" + +license.desc: +"""EMQX 许可类型。可为 opensource 或 enterprise""" + +list_users_api.desc: +"""Dashboard 用户列表""" + +login_api.desc: +"""获取 Dashboard 认证 Token。""" + +login_failed401.desc: +"""登录失败。用户名或密码错误""" + +login_failed_response400.desc: +"""登录失败。用户名或密码错误""" + +login_success.desc: +"""Dashboard 认证成功""" + +logout_api.desc: +"""Dashboard 用户登出""" + +new_pwd.desc: +"""新密码""" + +old_pwd.desc: +"""旧密码""" + +password.desc: +"""Dashboard 密码""" + +token.desc: +"""Dashboard 认证 Token""" + +update_user_api.desc: +"""更新 Dashboard 用户描述""" + +update_user_api200.desc: +"""更新 Dashboard 用户成功""" + +user_description.desc: +"""Dashboard 用户描述""" + +username.desc: +"""Dashboard 用户名""" + +users_api404.desc: +"""Dashboard 用户不存在""" + +version.desc: +"""EMQX 版本""" + +} diff --git a/rel/i18n/zh/emqx_dashboard_schema.hocon b/rel/i18n/zh/emqx_dashboard_schema.hocon new file mode 100644 index 000000000..a07f3e6d8 --- /dev/null +++ b/rel/i18n/zh/emqx_dashboard_schema.hocon @@ -0,0 +1,130 @@ +emqx_dashboard_schema { + +backlog.desc: +"""排队等待连接的队列的最大长度。""" + +backlog.label: +"""排队长度""" + +bind.desc: +"""监听地址和端口,热更新此配置时,会重启 Dashboard 服务。""" + +bind.label: +"""绑定端口""" + +bootstrap_users_file.desc: +"""已废弃,请使用 api_key.bootstrap_file。""" + +bootstrap_users_file.label: +"""已废弃""" + +cors.desc: +"""支持跨域资源共享(CORS), +允许服务器指示任何来源(域名、协议或端口),除了本服务器之外的任何浏览器应允许加载资源。""" + +cors.label: +"""跨域资源共享""" + +default_password.desc: +"""Dashboard 的默认密码,为了安全,应该尽快修改密码。 +当通过网页首次登录 Dashboard 并按提示修改成复杂密码后,此值就会失效。""" + +default_password.label: +"""默认密码""" + +default_username.desc: +"""Dashboard 的默认用户名。""" + +default_username.label: +"""默认用户名""" + +desc_dashboard.desc: +"""EMQX Dashboard 配置。""" + +desc_dashboard.label: +"""Dashboard""" + +desc_http.desc: +"""Dashboard 监听器(HTTP)配置。""" + +desc_http.label: +"""HTTP""" + +desc_https.desc: +"""Dashboard 监听器(HTTPS)配置。""" + +desc_https.label: +"""HTTPS""" + +desc_listeners.desc: +"""Dashboard 监听器配置。""" + +desc_listeners.label: +"""监听器""" + +i18n_lang.desc: +"""设置 Swagger 多语言的版本,可为 en 或 zh。""" + +i18n_lang.label: +"""多语言支持""" + +inet6.desc: +"""启用IPv6, 如果机器不支持IPv6,请关闭此选项,否则会导致 Dashboard 无法使用。""" + +inet6.label: +"""IPv6""" + +ipv6_v6only.desc: +"""当开启 inet6 功能的同时禁用 IPv4-to-IPv6 映射。该配置仅在 inet6 功能开启时有效。""" + +ipv6_v6only.label: +"""IPv6 only""" + +listener_enable.desc: +"""忽略或启用该监听器。""" + +listener_enable.label: +"""启用""" + +listeners.desc: +"""Dashboard 监听器设置。监听器必须有唯一的端口号和IP地址的组合。 +例如,可以通过指定IP地址 0.0.0.0 来监听机器上给定端口上的所有配置的IP地址。 +或者,可以为每个监听器指定唯一的IP地址,但使用相同的端口。""" + +listeners.label: +"""监听器""" + +max_connections.desc: +"""同时处理的最大连接数。""" + +max_connections.label: +"""最大连接数""" + +num_acceptors.desc: +"""TCP协议的Socket acceptor池大小, 默认设置在线的调度器数量(通常为 CPU 核数)""" + +num_acceptors.label: +"""Acceptor 数量""" + +proxy_header.desc: +"""开启对 `HAProxy` 的支持,注意:一旦开启了这个功能,就无法再处理普通的 HTTP 请求了。""" + +proxy_header.label: +"""开启对 `HAProxy` 的支持""" + +sample_interval.desc: +"""Dashboard 中图表指标的时间间隔。必须小于60,且被60的整除,默认设置 10s。""" + +send_timeout.desc: +"""Socket发送超时时间。""" + +send_timeout.label: +"""发送超时时间""" + +token_expired_time.desc: +"""JWT token 过期时间。默认设置为 60 分钟。""" + +token_expired_time.label: +"""JWT 过期时间""" + +} diff --git a/rel/i18n/zh/emqx_delayed_api.hocon b/rel/i18n/zh/emqx_delayed_api.hocon new file mode 100644 index 000000000..d783a17c3 --- /dev/null +++ b/rel/i18n/zh/emqx_delayed_api.hocon @@ -0,0 +1,72 @@ +emqx_delayed_api { + +bad_msgid_format.desc: +"""消息 ID 格式错误""" + +count.desc: +"""延迟消息总数""" + +delayed_interval.desc: +"""延迟时间(秒)""" + +delayed_remaining.desc: +"""剩余时间(秒)""" + +delete_api.desc: +"""删除延迟消息""" + +expected_at.desc: +"""期望的发送时间, RFC 3339 格式""" + +from_clientid.desc: +"""消息的 ClientID""" + +from_username.desc: +"""消息的 Username""" + +get_message_api.desc: +"""查看延迟消息""" + +illegality_limit.desc: +"""数量上限不合法""" + +list_api.desc: +"""查看延迟消息列表""" + +msgid.desc: +"""延迟消息 ID""" + +msgid_not_found.desc: +"""未找到对应消息""" + +node.desc: +"""消息的来源节点""" + +payload.desc: +"""消息内容, base64 格式。如果消息的大小超过 2048 字节,则消息内容会被设置为 'PAYLOAD_TO_LARGE'""" + +publish_at.desc: +"""客户端发送时间, RFC 3339 格式""" + +qos.desc: +"""QoS""" + +topic.desc: +"""主题""" + +update_api.desc: +"""开启或者关闭功能,或者设置延迟消息数量上限""" + +update_success.desc: +"""开启或者关闭功能操作成功""" + +view_limit.desc: +"""每页数量""" + +view_page.desc: +"""查看的页数""" + +view_status_api.desc: +"""查看慢订阅状态""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_cassa.hocon b/rel/i18n/zh/emqx_ee_bridge_cassa.hocon new file mode 100644 index 000000000..2d1125a6b --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_cassa.hocon @@ -0,0 +1,40 @@ +emqx_ee_bridge_cassa { + +config_enable.desc: +"""启用/禁用桥接""" + +config_enable.label: +"""启用/禁用桥接""" + +cql_template.desc: +"""CQL 模板""" + +cql_template.label: +"""CQL 模板""" + +desc_config.desc: +"""Cassandra 桥接配置""" + +desc_config.label: +"""Cassandra 桥接配置""" + +desc_name.desc: +"""桥接名字""" + +desc_name.label: +"""桥接名字""" + +desc_type.desc: +"""Bridge 类型""" + +desc_type.label: +"""桥接类型""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 Cassandra。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" + +local_topic.label: +"""本地 Topic""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_clickhouse.hocon b/rel/i18n/zh/emqx_ee_bridge_clickhouse.hocon new file mode 100644 index 000000000..a3ede08ba --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_clickhouse.hocon @@ -0,0 +1,46 @@ +emqx_ee_bridge_clickhouse { + +batch_value_separator.desc: +"""默认为逗号 ',',适用于 VALUE 格式。您也可以使用其他分隔符, 请参考 [INSERT INTO 语句](https://clickhouse.com/docs/en/sql-reference/statements/insert-into)。""" + +batch_value_separator.label: +"""分隔符""" + +config_enable.desc: +"""启用/禁用桥接""" + +config_enable.label: +"""启用/禁用桥接""" + +desc_config.desc: +"""Clickhouse 桥接配置""" + +desc_config.label: +"""Clickhouse 桥接配置""" + +desc_name.desc: +"""桥接名字""" + +desc_name.label: +"""桥接名字""" + +desc_type.desc: +"""Bridge 类型""" + +desc_type.label: +"""桥接类型""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 Clickhouse。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" + +local_topic.label: +"""本地 Topic""" + +sql_template.desc: +"""可以使用 ${field} 占位符来引用消息与客户端上下文中的变量,请确保对应字段存在且数据格式符合预期。此处不支持 [SQL 预处理](https://docs.emqx.com/zh/enterprise/v5.0/data-integration/data-bridges.html#sql-预处理)。""" + +sql_template.label: +"""SQL 模板""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_dynamo.hocon b/rel/i18n/zh/emqx_ee_bridge_dynamo.hocon new file mode 100644 index 000000000..adf33b9e8 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_dynamo.hocon @@ -0,0 +1,40 @@ +emqx_ee_bridge_dynamo { + +config_enable.desc: +"""启用/禁用桥接""" + +config_enable.label: +"""启用/禁用桥接""" + +desc_config.desc: +"""DynamoDB 桥接配置""" + +desc_config.label: +"""DynamoDB 桥接配置""" + +desc_name.desc: +"""桥接名字""" + +desc_name.label: +"""桥接名字""" + +desc_type.desc: +"""Bridge 类型""" + +desc_type.label: +"""桥接类型""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 DynamoDB。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" + +local_topic.label: +"""本地 Topic""" + +template.desc: +"""模板, 默认为空,为空时将会将整个消息存入数据库""" + +template.label: +"""模板""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_gcp_pubsub.hocon b/rel/i18n/zh/emqx_ee_bridge_gcp_pubsub.hocon new file mode 100644 index 000000000..4318211c9 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_gcp_pubsub.hocon @@ -0,0 +1,77 @@ +emqx_ee_bridge_gcp_pubsub { + +connect_timeout.desc: +"""连接 HTTP 服务器的超时时间。""" + +connect_timeout.label: +"""连接超时""" + +desc_config.desc: +"""GCP PubSub 桥接配置""" + +desc_config.label: +"""GCP PubSub 桥接配置""" + +desc_name.desc: +"""桥接名字,可读描述""" + +desc_name.label: +"""桥接名字""" + +desc_type.desc: +"""桥接类型""" + +desc_type.label: +"""桥接类型""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 GCP PubSub。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 GCP PubSub。""" + +local_topic.label: +"""本地 Topic""" + +max_retries.desc: +"""请求出错时的最大重试次数。""" + +max_retries.label: +"""最大重试次数""" + +payload_template.desc: +"""用于格式化外发信息的模板。 如果未定义,将以JSON格式发送所有可用的上下文。""" + +payload_template.label: +"""HTTP 请求消息体模板""" + +pipelining.desc: +"""正整数,设置最大可发送的异步 HTTP 请求数量。当设置为 1 时,表示每次发送完成 HTTP 请求后都需要等待服务器返回,再继续发送下一个请求。""" + +pipelining.label: +"""HTTP 流水线""" + +pool_size.desc: +"""连接池大小。""" + +pool_size.label: +"""连接池大小""" + +pubsub_topic.desc: +"""要发布消息的GCP PubSub主题。""" + +pubsub_topic.label: +"""GCP PubSub 主题""" + +request_timeout.desc: +"""废弃的。在缓冲区设置中配置请求超时。""" + +request_timeout.label: +"""HTTP 请求超时""" + +service_account_json.desc: +"""包含将与 PubSub 一起使用的 GCP 服务账户凭证的 JSON。 +当创建GCP服务账户时(如https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount),可以选择下载 JSON 形式的凭证,然后在该配置项中使用。""" + +service_account_json.label: +"""GCP 服务账户凭证""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_hstreamdb.hocon b/rel/i18n/zh/emqx_ee_bridge_hstreamdb.hocon new file mode 100644 index 000000000..55cdebe3c --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_hstreamdb.hocon @@ -0,0 +1,52 @@ +emqx_ee_bridge_hstreamdb { + +config_direction.desc: +"""桥接的方向, 必须是 egress""" + +config_direction.label: +"""桥接方向""" + +config_enable.desc: +"""启用/禁用桥接""" + +config_enable.label: +"""启用/禁用桥接""" + +desc_config.desc: +"""HStreamDB 桥接配置""" + +desc_config.label: +"""HStreamDB 桥接配置""" + +desc_connector.desc: +"""连接器的通用配置。""" + +desc_connector.label: +"""连接器通用配置。""" + +desc_name.desc: +"""桥接名字,可读描述""" + +desc_name.label: +"""桥接名字""" + +desc_type.desc: +"""Bridge 类型""" + +desc_type.label: +"""桥接类型""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 HStreamDB。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 HStreamDB。""" + +local_topic.label: +"""本地 Topic""" + +payload.desc: +"""要转发到 HStreamDB 的数据内容,支持占位符""" + +payload.label: +"""消息内容""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_influxdb.hocon b/rel/i18n/zh/emqx_ee_bridge_influxdb.hocon new file mode 100644 index 000000000..c9c7c6a54 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_influxdb.hocon @@ -0,0 +1,47 @@ +emqx_ee_bridge_influxdb { + +config_enable.desc: +"""启用/禁用桥接。""" + +config_enable.label: +"""启用/禁用桥接""" + +desc_config.desc: +"""InfluxDB 桥接配置。""" + +desc_config.label: +"""InfluxDB 桥接配置""" + +desc_name.desc: +"""桥接名称。""" + +desc_name.label: +"""桥接名称""" + +desc_type.desc: +"""桥接类型。""" + +desc_type.label: +"""桥接类型""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 InfluxDB。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 InfluxDB。""" + +local_topic.label: +"""本地 Topic""" + +write_syntax.desc: +"""使用 InfluxDB API Line Protocol 写入 InfluxDB 的数据,支持占位符
+参考 [InfluxDB 2.3 Line Protocol](https://docs.influxdata.com/influxdb/v2.3/reference/syntax/line-protocol/) 及 +[InfluxDB 1.8 Line Protocol](https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/)
+TLDR:
+``` +[,=[,=]] =[,=] [] +``` +注意,整形数值占位符后需要添加一个字符 `i` 类型标识。例如 `${payload.int_value}i`""" + +write_syntax.label: +"""写语句""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_mongodb.hocon b/rel/i18n/zh/emqx_ee_bridge_mongodb.hocon new file mode 100644 index 000000000..71703c662 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_mongodb.hocon @@ -0,0 +1,57 @@ +emqx_ee_bridge_mongodb { + +collection.desc: +"""数据将被存储到的集合""" + +collection.label: +"""将要使用的集合(Collection)""" + +desc_config.desc: +"""为MongoDB桥配置""" + +desc_config.label: +"""MongoDB桥配置""" + +desc_name.desc: +"""桥接名称。""" + +desc_name.label: +"""桥接名称""" + +desc_type.desc: +"""桥接类型。""" + +desc_type.label: +"""桥接类型""" + +enable.desc: +"""启用或停用该MongoDB桥""" + +enable.label: +"""启用或禁用""" + +mongodb_rs_conf.desc: +"""MongoDB(Replica Set)配置""" + +mongodb_rs_conf.label: +"""MongoDB(Replica Set)配置""" + +mongodb_sharded_conf.desc: +"""MongoDB (Sharded)配置""" + +mongodb_sharded_conf.label: +"""MongoDB (Sharded)配置""" + +mongodb_single_conf.desc: +"""MongoDB(独立)配置""" + +mongodb_single_conf.label: +"""MongoDB(独立)配置""" + +payload_template.desc: +"""用于格式化写入 MongoDB 的消息模板。 如果未定义,规则引擎会使用 JSON 格式序列化所有的可见输入,例如 clientid, topic, payload 等。""" + +payload_template.label: +"""有效载荷模板""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_mysql.hocon b/rel/i18n/zh/emqx_ee_bridge_mysql.hocon new file mode 100644 index 000000000..e03b147b0 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_mysql.hocon @@ -0,0 +1,40 @@ +emqx_ee_bridge_mysql { + +config_enable.desc: +"""启用/禁用桥接""" + +config_enable.label: +"""启用/禁用桥接""" + +desc_config.desc: +"""HStreamDB 桥接配置""" + +desc_config.label: +"""HStreamDB 桥接配置""" + +desc_name.desc: +"""桥接名字,可读描述""" + +desc_name.label: +"""桥接名字""" + +desc_type.desc: +"""Bridge 类型""" + +desc_type.label: +"""桥接类型""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 MySQL。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" + +local_topic.label: +"""本地 Topic""" + +sql_template.desc: +"""SQL 模板""" + +sql_template.label: +"""SQL 模板""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_pgsql.hocon b/rel/i18n/zh/emqx_ee_bridge_pgsql.hocon new file mode 100644 index 000000000..ebf7f331a --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_pgsql.hocon @@ -0,0 +1,40 @@ +emqx_ee_bridge_pgsql { + +config_enable.desc: +"""启用/禁用桥接""" + +config_enable.label: +"""启用/禁用桥接""" + +desc_config.desc: +"""PostgreSQL 桥接配置""" + +desc_config.label: +"""PostgreSQL 桥接配置""" + +desc_name.desc: +"""桥接名字""" + +desc_name.label: +"""桥接名字""" + +desc_type.desc: +"""Bridge 类型""" + +desc_type.label: +"""桥接类型""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 PostgreSQL。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" + +local_topic.label: +"""本地 Topic""" + +sql_template.desc: +"""SQL 模板""" + +sql_template.label: +"""SQL 模板""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_redis.hocon b/rel/i18n/zh/emqx_ee_bridge_redis.hocon new file mode 100644 index 000000000..378df508f --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_redis.hocon @@ -0,0 +1,41 @@ +emqx_ee_bridge_redis { + +command_template.desc: +"""用于推送数据的 Redis 命令模板。 每个列表元素代表一个命令名称或其参数。 +例如,要通过键值 `msgs` 将消息体推送到 Redis 列表中,数组元素应该是: `rpush`, `msgs`, `${payload}`。""" + +command_template.label: +"""Redis Command 模板""" + +config_enable.desc: +"""启用/禁用桥接""" + +config_enable.label: +"""启用/禁用桥接""" + +desc_config.desc: +"""Resis 桥接配置""" + +desc_config.label: +"""Redis 桥接配置""" + +desc_name.desc: +"""桥接名字,可读描述""" + +desc_name.label: +"""桥接名字""" + +desc_type.desc: +"""Bridge 类型""" + +desc_type.label: +"""桥接类型""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 Redis。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 Redis。""" + +local_topic.label: +"""本地 Topic""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_rocketmq.hocon b/rel/i18n/zh/emqx_ee_bridge_rocketmq.hocon new file mode 100644 index 000000000..924004361 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_rocketmq.hocon @@ -0,0 +1,40 @@ +emqx_ee_bridge_rocketmq { + +config_enable.desc: +"""启用/禁用桥接""" + +config_enable.label: +"""启用/禁用桥接""" + +desc_config.desc: +"""RocketMQ 桥接配置""" + +desc_config.label: +"""RocketMQ 桥接配置""" + +desc_name.desc: +"""桥接名字""" + +desc_name.label: +"""桥接名字""" + +desc_type.desc: +"""Bridge 类型""" + +desc_type.label: +"""桥接类型""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 RocketMQ。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" + +local_topic.label: +"""本地 Topic""" + +template.desc: +"""模板, 默认为空,为空时将会将整个消息转发给 RocketMQ""" + +template.label: +"""模板""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_sqlserver.hocon b/rel/i18n/zh/emqx_ee_bridge_sqlserver.hocon new file mode 100644 index 000000000..0958d4b7a --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_sqlserver.hocon @@ -0,0 +1,46 @@ +emqx_ee_bridge_sqlserver { + +config_enable.desc: +"""启用/禁用桥接""" + +config_enable.label: +"""启用/禁用桥接""" + +desc_config.desc: +"""Microsoft SQL Server 桥接配置""" + +desc_config.label: +"""Microsoft SQL Server 桥接配置""" + +desc_name.desc: +"""桥接名字""" + +desc_name.label: +"""桥接名字""" + +desc_type.desc: +"""Bridge 类型""" + +desc_type.label: +"""桥接类型""" + +driver.desc: +"""SQL Server Driver 名称""" + +driver.label: +"""SQL Server Driver 名称""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 Microsoft SQL Server。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" + +local_topic.label: +"""本地 Topic""" + +sql_template.desc: +"""SQL 模板""" + +sql_template.label: +"""SQL 模板""" + +} diff --git a/rel/i18n/zh/emqx_ee_bridge_tdengine.hocon b/rel/i18n/zh/emqx_ee_bridge_tdengine.hocon new file mode 100644 index 000000000..5e417a1c7 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_bridge_tdengine.hocon @@ -0,0 +1,40 @@ +emqx_ee_bridge_tdengine { + +config_enable.desc: +"""启用/禁用桥接""" + +config_enable.label: +"""启用/禁用桥接""" + +desc_config.desc: +"""TDengine 桥接配置""" + +desc_config.label: +"""TDengine 桥接配置""" + +desc_name.desc: +"""桥接名字""" + +desc_name.label: +"""桥接名字""" + +desc_type.desc: +"""Bridge 类型""" + +desc_type.label: +"""桥接类型""" + +local_topic.desc: +"""发送到 'local_topic' 的消息都会转发到 TDengine。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" + +local_topic.label: +"""本地 Topic""" + +sql_template.desc: +"""SQL 模板""" + +sql_template.label: +"""SQL 模板""" + +} diff --git a/rel/i18n/zh/emqx_ee_connector_cassa.hocon b/rel/i18n/zh/emqx_ee_connector_cassa.hocon new file mode 100644 index 000000000..ffbadc7de --- /dev/null +++ b/rel/i18n/zh/emqx_ee_connector_cassa.hocon @@ -0,0 +1,17 @@ +emqx_ee_connector_cassa { + +keyspace.desc: +"""要连接到的 Keyspace 名称。""" + +keyspace.label: +"""Keyspace""" + +servers.desc: +"""将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port][,Host2:Port]`。
+如果未指定 `[:Port]`,则使用 Cassandra 默认端口 9042。""" + +servers.label: +"""Servers""" + +} diff --git a/rel/i18n/zh/emqx_ee_connector_clickhouse.hocon b/rel/i18n/zh/emqx_ee_connector_clickhouse.hocon new file mode 100644 index 000000000..f1457a1f6 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_connector_clickhouse.hocon @@ -0,0 +1,15 @@ +emqx_ee_connector_clickhouse { + +base_url.desc: +"""你想连接到的Clickhouse服务器的HTTP URL(例如http://myhostname:8123)。""" + +base_url.label: +"""服务器 URL""" + +connect_timeout.desc: +"""连接HTTP服务器的超时时间。""" + +connect_timeout.label: +"""连接超时""" + +} diff --git a/rel/i18n/zh/emqx_ee_connector_dynamo.hocon b/rel/i18n/zh/emqx_ee_connector_dynamo.hocon new file mode 100644 index 000000000..540d79dd0 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_connector_dynamo.hocon @@ -0,0 +1,9 @@ +emqx_ee_connector_dynamo { + +url.desc: +"""DynamoDB 的地址。""" + +url.label: +"""DynamoDB 地址""" + +} diff --git a/rel/i18n/zh/emqx_ee_connector_hstreamdb.hocon b/rel/i18n/zh/emqx_ee_connector_hstreamdb.hocon new file mode 100644 index 000000000..d9d8fb3ed --- /dev/null +++ b/rel/i18n/zh/emqx_ee_connector_hstreamdb.hocon @@ -0,0 +1,45 @@ +emqx_ee_connector_hstreamdb { + +config.desc: +"""HStreamDB 连接配置。""" + +config.label: +"""连接配置""" + +name.desc: +"""连接器名称,人类可读的连接器描述。""" + +name.label: +"""连接器名称""" + +ordering_key.desc: +"""HStreamDB 分区键""" + +ordering_key.label: +"""HStreamDB 分区键""" + +pool_size.desc: +"""HStreamDB 连接池大小""" + +pool_size.label: +"""HStreamDB 连接池大小""" + +stream_name.desc: +"""HStreamDB 流名称""" + +stream_name.label: +"""HStreamDB 流名称""" + +type.desc: +"""连接器类型。""" + +type.label: +"""连接器类型""" + +url.desc: +"""HStreamDB 服务器 URL""" + +url.label: +"""HStreamDB 服务器 URL""" + +} diff --git a/rel/i18n/zh/emqx_ee_connector_influxdb.hocon b/rel/i18n/zh/emqx_ee_connector_influxdb.hocon new file mode 100644 index 000000000..6148b400a --- /dev/null +++ b/rel/i18n/zh/emqx_ee_connector_influxdb.hocon @@ -0,0 +1,71 @@ +emqx_ee_connector_influxdb { + +bucket.desc: +"""InfluxDB bucket 名称。""" + +bucket.label: +"""Bucket""" + +database.desc: +"""InfluxDB 数据库。""" + +database.label: +"""数据库""" + +influxdb_api_v1.desc: +"""InfluxDB HTTP API 协议。支持 Influxdb v1.8 以及之前的版本。""" + +influxdb_api_v1.label: +"""HTTP API 协议""" + +influxdb_api_v2.desc: +"""InfluxDB HTTP API V2 协议。支持 Influxdb v2.0 以及之后的版本。""" + +influxdb_api_v2.label: +"""HTTP API V2 协议""" + +org.desc: +"""InfluxDB 组织名称。""" + +org.label: +"""组织""" + +password.desc: +"""InfluxDB 密码。""" + +password.label: +"""密码""" + +precision.desc: +"""InfluxDB 时间精度。""" + +precision.label: +"""时间精度""" + +protocol.desc: +"""InfluxDB 协议。HTTP API 或 HTTP API V2。""" + +protocol.label: +"""协议""" + +server.desc: +"""将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
+如果未指定 `[:Port]`,则使用 InfluxDB 默认端口 8086。""" + +server.label: +"""服务器地址""" + +token.desc: +"""InfluxDB token。""" + +token.label: +"""Token""" + +username.desc: +"""InfluxDB 用户名。""" + +username.label: +"""用户名""" + +} diff --git a/rel/i18n/zh/emqx_ee_connector_rocketmq.hocon b/rel/i18n/zh/emqx_ee_connector_rocketmq.hocon new file mode 100644 index 000000000..d32e6ea01 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_connector_rocketmq.hocon @@ -0,0 +1,35 @@ +emqx_ee_connector_rocketmq { + +refresh_interval.desc: +"""RocketMQ 主题路由更新间隔。""" + +refresh_interval.label: +"""主题路由更新间隔""" + +security_token.desc: +"""RocketMQ 服务器安全令牌""" + +security_token.label: +"""安全令牌""" + +send_buffer.desc: +"""RocketMQ 驱动的套字节发送消息的缓冲区大小""" + +send_buffer.label: +"""发送消息的缓冲区大小""" + +server.desc: +"""将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
+如果未指定 `[:Port]`,则使用 RocketMQ 默认端口 9876。""" + +server.label: +"""服务器地址""" + +topic.desc: +"""RocketMQ 主题""" + +topic.label: +"""RocketMQ 主题""" + +} diff --git a/rel/i18n/zh/emqx_ee_connector_sqlserver.hocon b/rel/i18n/zh/emqx_ee_connector_sqlserver.hocon new file mode 100644 index 000000000..44377c86d --- /dev/null +++ b/rel/i18n/zh/emqx_ee_connector_sqlserver.hocon @@ -0,0 +1,11 @@ +emqx_ee_connector_sqlserver { + +server.desc: +"""将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
+如果未指定 `[:Port]`,则使用 SQL Server 默认端口 1433。""" + +server.label: +"""服务器地址""" + +} diff --git a/rel/i18n/zh/emqx_ee_connector_tdengine.hocon b/rel/i18n/zh/emqx_ee_connector_tdengine.hocon new file mode 100644 index 000000000..f3064aeb5 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_connector_tdengine.hocon @@ -0,0 +1,11 @@ +emqx_ee_connector_tdengine { + +server.desc: +"""将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
+如果未指定 `[:Port]`,则使用 TDengine 默认端口 6041。""" + +server.label: +"""服务器地址""" + +} diff --git a/rel/i18n/zh/emqx_ee_schema_registry_http_api.hocon b/rel/i18n/zh/emqx_ee_schema_registry_http_api.hocon new file mode 100644 index 000000000..1dab50a3f --- /dev/null +++ b/rel/i18n/zh/emqx_ee_schema_registry_http_api.hocon @@ -0,0 +1,39 @@ +emqx_ee_schema_registry_http_api { + +desc_param_path_schema_name.desc: +"""模式名称""" + +desc_param_path_schema_name.label: +"""模式名称""" + +desc_schema_registry_api_delete.desc: +"""删除一个模式""" + +desc_schema_registry_api_delete.label: +"""删除模式""" + +desc_schema_registry_api_get.desc: +"""通过名称获取模式""" + +desc_schema_registry_api_get.label: +"""获取模式""" + +desc_schema_registry_api_list.desc: +"""列出所有注册的模式""" + +desc_schema_registry_api_list.label: +"""列表模式""" + +desc_schema_registry_api_post.desc: +"""注册一个新的模式""" + +desc_schema_registry_api_post.label: +"""注册模式""" + +desc_schema_registry_api_put.desc: +"""更新一个现有的模式""" + +desc_schema_registry_api_put.label: +"""更新模式""" + +} diff --git a/rel/i18n/zh/emqx_ee_schema_registry_schema.hocon b/rel/i18n/zh/emqx_ee_schema_registry_schema.hocon new file mode 100644 index 000000000..3bf0a7dc8 --- /dev/null +++ b/rel/i18n/zh/emqx_ee_schema_registry_schema.hocon @@ -0,0 +1,45 @@ +emqx_ee_schema_registry_schema { + +avro_type.desc: +"""[阿帕奇-阿夫罗](https://avro.apache.org/) 序列化格式。""" + +avro_type.label: +"""阿帕奇-阿夫罗""" + +schema_description.desc: +"""对该模式的描述。""" + +schema_description.label: +"""模式描述""" + +schema_name.desc: +"""模式的一个名称,将作为其标识符。""" + +schema_name.label: +"""模式名称""" + +schema_registry_root.desc: +"""模式注册表的配置。""" + +schema_registry_root.label: +"""模式注册表""" + +schema_registry_schemas.desc: +"""注册的模式。""" + +schema_registry_schemas.label: +"""注册的模式""" + +schema_source.desc: +"""模式的源文本。""" + +schema_source.label: +"""模式来源""" + +schema_type.desc: +"""模式类型。""" + +schema_type.label: +"""模式类型""" + +} diff --git a/rel/i18n/zh/emqx_exhook_api.hocon b/rel/i18n/zh/emqx_exhook_api.hocon new file mode 100644 index 000000000..aba0e3e58 --- /dev/null +++ b/rel/i18n/zh/emqx_exhook_api.hocon @@ -0,0 +1,81 @@ +emqx_exhook_api { + +add_server.desc: +"""添加 ExHook 服务器""" + +delete_server.desc: +"""删除 Exhook 服务器""" + +get_detail.desc: +"""查看 Exhook 服务器详细信息""" + +get_hooks.desc: +"""获取 Exhook 服务器的钩子信息""" + +hook_metrics.desc: +"""当前节点中该钩子的指标信息""" + +hook_name.desc: +"""钩子的名称""" + +hook_params.desc: +"""钩子注册时使用的参数""" + +list_all_servers.desc: +"""查看ExHook 服务器列表""" + +metric_failed.desc: +"""钩子执行失败的次数""" + +metric_max_rate.desc: +"""钩子的最大调用速率""" + +metric_rate.desc: +"""钩子的调用速率""" + +metric_succeed.desc: +"""钩子执行成功的次数""" + +metrics.desc: +"""指标信息""" + +move_api.desc: +"""移动 Exhook 服务器顺序。 +注意: 移动的参数只能是:front | rear | before:{name} | after:{name}""" + +move_api.label: +"""改变已注册的Exhook服务器的执行顺序""" + +move_position.desc: +"""移动的方向""" + +node.desc: +"""节点名称""" + +node_hook_metrics.desc: +"""所有节点中该钩子的指标信息""" + +node_metrics.desc: +"""所有节点中该服务器的指标信息""" + +node_status.desc: +"""所有节点中该服务器的状态信息""" + +server_metrics.desc: +"""当前节点中该服务器的指标信息""" + +server_name.desc: +"""Exhook 服务器的名称""" + +status.desc: +"""Exhook 服务器的状态。
+connected: 连接成功
+connecting: 连接失败,重连中
+disconnected: 连接失败,且未设置自动重连
+disabled: 该服务器未开启
+error: 查看该服务器状态时发生错误""" + +update_server.desc: +"""更新 Exhook 服务器""" + +} diff --git a/rel/i18n/zh/emqx_exhook_schema.hocon b/rel/i18n/zh/emqx_exhook_schema.hocon new file mode 100644 index 000000000..54d86169b --- /dev/null +++ b/rel/i18n/zh/emqx_exhook_schema.hocon @@ -0,0 +1,43 @@ +emqx_exhook_schema { + +auto_reconnect.desc: +"""自动重连到 gRPC 服务器的设置。 +当 gRPC 服务器不可用时,Exhook 将会按照这里设置的间隔时间进行重连,并重新初始化注册的钩子""" + +enable.desc: +"""开启这个 Exhook 服务器""" + +failed_action.desc: +"""当 gRPC 请求失败后的操作""" + +keepalive.desc: +"""当没有其他数据交换时,是否向连接的对端套接字定期的发送探测包。如果另一端没有响应,则认为连接断开,并向控制进程发送错误消息""" + +name.desc: +"""ExHook 服务器名称""" + +nodelay.desc: +"""如果为 true,则为套接字设置 TCP_NODELAY 选项,这意味着会立即发送数据包""" + +pool_size.desc: +"""gRPC 客户端进程池大小""" + +recbuf.desc: +"""套接字的最小接收缓冲区大小""" + +request_timeout.desc: +"""gRPC 服务器请求超时时间""" + +servers.desc: +"""ExHook 服务器列表""" + +sndbuf.desc: +"""套接字的最小发送缓冲区大小""" + +socket_options.desc: +"""连接套接字设置""" + +url.desc: +"""gRPC 服务器地址""" + +} diff --git a/rel/i18n/zh/emqx_exproto_schema.hocon b/rel/i18n/zh/emqx_exproto_schema.hocon new file mode 100644 index 000000000..794a563f3 --- /dev/null +++ b/rel/i18n/zh/emqx_exproto_schema.hocon @@ -0,0 +1,26 @@ +emqx_exproto_schema { + +exproto.desc: +"""ExProto 网关""" + +exproto_grpc_handler_address.desc: +"""对端 gRPC 服务器地址。""" + +exproto_grpc_handler_ssl.desc: +"""gRPC 客户端的 SSL 配置。""" + +exproto_grpc_server_bind.desc: +"""服务监听地址和端口。""" + +exproto_grpc_server_ssl.desc: +"""服务 SSL 配置。""" + +exproto_handler.desc: +"""配置 ExProto 网关需要请求的 ConnectionHandler 服务地址。 +该服务用于给 ExProto 提供客户端的 Socket 事件处理、字节解码、订阅消息接收等功能。""" + +exproto_server.desc: +"""配置 ExProto 网关需要启动的 ConnectionAdapter 服务。 +该服务用于提供客户端的认证、发布、订阅和数据下行等功能。""" + +} diff --git a/rel/i18n/zh/emqx_gateway_api.hocon b/rel/i18n/zh/emqx_gateway_api.hocon new file mode 100644 index 000000000..c9ea582a3 --- /dev/null +++ b/rel/i18n/zh/emqx_gateway_api.hocon @@ -0,0 +1,73 @@ +emqx_gateway_api { + +delete_gateway.desc: +"""停用指定网关""" + +enable_gateway.desc: +"""使用配置启动某一网关。""" + +gateway_created_at.desc: +"""网关创建时间""" + +gateway_current_connections.desc: +"""当前连接数""" + +gateway_enable_in_path.desc: +"""是否开启此网关""" + +gateway_listener_id.desc: +"""监听器 ID""" + +gateway_listener_name.desc: +"""监听器名称""" + +gateway_listener_running.desc: +"""监听器运行状态""" + +gateway_listener_type.desc: +"""监听器类型""" + +gateway_listeners.desc: +"""网关监听器列表""" + +gateway_max_connections.desc: +"""最大连接数""" + +gateway_name.desc: +"""网关名称""" + +gateway_name_in_qs.desc: +"""网关名称.
+可取值为 `stomp`、`mqttsn`、`coap`、`lwm2m`、`exproto`""" + +gateway_node_status.desc: +"""网关在集群中每个节点上的状态""" + +gateway_started_at.desc: +"""网关启用时间""" + +gateway_status.desc: +"""网关启用状态""" + +gateway_status_in_qs.desc: +"""通过网关状态筛选
+可选值为 `running`、`stopped`、`unloaded`""" + +gateway_stopped_at.desc: +"""网关停用时间""" + +get_gateway.desc: +"""获取网关配置详情""" + +list_gateway.desc: +"""该接口会返回指定或所有网关的概览状态, +包括当前状态、连接数、监听器状态等。""" + +node.desc: +"""节点名称""" + +update_gateway.desc: +"""更新指定网关的基础配置、和启用的状态。
+注:认证、和监听器的配置更新需参考对应的 API 接口。""" + +} diff --git a/rel/i18n/zh/emqx_gateway_api_authn.hocon b/rel/i18n/zh/emqx_gateway_api_authn.hocon new file mode 100644 index 000000000..662789551 --- /dev/null +++ b/rel/i18n/zh/emqx_gateway_api_authn.hocon @@ -0,0 +1,45 @@ +emqx_gateway_api_authn { + +add_authn.desc: +"""为指定网关开启认证器实现客户端认证的功能。
+当未配置认证器或关闭认证器时,则认为允许所有客户端的连接。
+注:在网关中仅支持添加一个认证器,而不是像 MQTT 一样允许配置多个认证器构成认证链。""" + +add_user.desc: +"""添加用户(仅支持 built_in_database 类型的认证器)""" + +delete_authn.desc: +"""删除指定网关的认证器。""" + +delete_user.desc: +"""删除用户(仅支持 built_in_database 类型的认证器)""" + +get_authn.desc: +"""获取指定网关认证器的配置 +当网关或认证未启用时,返回 404。""" + +get_user.desc: +"""获取用户信息(仅支持 built_in_database 类型的认证器)""" + +import_users.desc: +"""导入用户(仅支持 built_in_database 类型的认证器)""" + +is_superuser.desc: +"""是否是超级用户""" + +like_user_id.desc: +"""使用用户 ID (username 或 clientid)模糊搜索,仅支持按子串的方式进行搜索。""" + +list_users.desc: +"""获取用户列表(仅支持 built_in_database 类型的认证器)""" + +update_authn.desc: +"""更新指定网关认证器的配置,或停用认证器。""" + +update_user.desc: +"""更新用户信息(仅支持 built_in_database 类型的认证器)""" + +user_id.desc: +"""用户 ID""" + +} diff --git a/rel/i18n/zh/emqx_gateway_api_clients.hocon b/rel/i18n/zh/emqx_gateway_api_clients.hocon new file mode 100644 index 000000000..5dc7ff22a --- /dev/null +++ b/rel/i18n/zh/emqx_gateway_api_clients.hocon @@ -0,0 +1,207 @@ +emqx_gateway_api_clients { + +disconnected_at.desc: +"""客户端连接断开时间""" + +heap_size.desc: +"""进程堆内存大小,单位:字节""" + +send_oct.desc: +"""已发送字节数""" + +get_client.desc: +"""获取客户端信息""" + +param_gte_created_at.desc: +"""匹配会话创建时间大于等于指定值的客户端""" + +param_conn_state.desc: +"""匹配客户端连接状态""" + +send_pkt.desc: +"""已发送应用层协议控制报文数""" + +clean_start.desc: +"""标识客户端是否以 clean_start 的标志连接到网关""" + +inflight_cnt.desc: +"""客户端当前飞行窗口大小""" + +delete_subscription.desc: +"""为某客户端删除某订阅关系""" + +param_lte_connected_at.desc: +"""匹配连接创建时间小于等于指定值的客户端""" + +node.desc: +"""客户端连接到的节点名称""" + +awaiting_rel_cnt.desc: +"""客户端当前等待 PUBREL 确认的 PUBREC 消息的条数""" + +rap.desc: +"""Retain as Published 选项,枚举:0,1""" + +inflight_max.desc: +"""客户端允许的飞行窗口最大值""" + +param_username.desc: +"""匹配客户端 Username""" + +param_like_endpoint_name.desc: +"""子串匹配 LwM2M 客户端 Endpoint Name""" + +created_at.desc: +"""会话创建时间""" + +sub_props.desc: +"""订阅属性""" + +list_clients.desc: +"""获取指定网关的客户端列表""" + +subscriptions_cnt.desc: +"""客户端已订阅主题数""" + +mailbox_len.desc: +"""进程邮箱大小""" + +send_cnt.desc: +"""已发送 Socket 报文次数""" + +rh.desc: +"""Retain Handling 选项,枚举:0,1,2""" + +connected.desc: +"""标识客户端是否已连接到网关""" + +qos.desc: +"""QoS 等级,枚举:0,1,2""" + +ip_address.desc: +"""客户端 IP 地址""" + +param_gte_connected_at.desc: +"""匹配连接创建时间大于等于指定值的客户端""" + +awaiting_rel_max.desc: +"""客户端允许的最大 PUBREC 等待队列长度""" + +param_like_username.desc: +"""子串匹配 客户端 Username""" + +nl.desc: +"""No Local 选项,枚举:0,1""" + +param_like_clientid.desc: +"""子串匹配客户端 ID""" + +param_lte_created_at.desc: +"""匹配会话创建时间小于等于指定值的客户端""" + +topic.desc: +"""主题过滤器或主题名称""" + +proto_ver.desc: +"""客户端使用的协议版本""" + +mountpoint.desc: +"""主题固定前缀""" + +proto_name.desc: +"""客户端使用的协议名称""" + +param_lte_lifetime.desc: +"""匹配心跳时间小于等于指定值的 LwM2M 客户端""" + +port.desc: +"""客户端端口""" + +connected_at.desc: +"""客户端连接时间""" + +expiry_interval.desc: +"""会话超期时间,单位:秒""" + +username.desc: +"""客户端连接的用户名""" + +param_clean_start.desc: +"""匹配客户端 `clean_start` 标记""" + +recv_msg.desc: +"""已接收上行的消息条数""" + +list_subscriptions.desc: +"""获取某客户端的主题订阅列表""" + +recv_oct.desc: +"""已接收的字节数""" + +keepalive.desc: +"""Keepalive 时间,单位:秒""" + +param_clientid.desc: +"""匹配客户端 ID""" + +subscriptions_max.desc: +"""客户端允许订阅的最大主题数""" + +param_ip_address.desc: +"""匹配客户端 IP 地址""" + +mqueue_max.desc: +"""客户端允许的最大消息队列长度""" + +mqueue_dropped.desc: +"""由于消息队列过程,客户端消息队列丢弃消息条数""" + +subid.desc: +"""订阅ID,仅用于 Stomp 网关。用于创建订阅关系时指定订阅 ID。取值范围 1-65535。""" + +clientid.desc: +"""客户端 ID""" + +kick_client.desc: +"""踢出指定客户端""" + +is_bridge.desc: +"""标识客户端是否通过 is_bridge 标志连接""" + +lifetime.desc: +"""LwM2M 客户端心跳周期""" + +send_msg.desc: +"""已发送下行消息数条数""" + +add_subscription.desc: +"""为某客户端新增订阅关系""" + +param_endpoint_name.desc: +"""匹配 LwM2M 客户端 Endpoint Name""" + +param_node.desc: +"""匹配客户端的节点名称""" + +recv_cnt.desc: +"""已接收 Socket 报文次数""" + +recv_pkt.desc: +"""已接收应用层协议控制报文数""" + +endpoint_name.desc: +"""LwM2M 客户端 Endpoint Name""" + +param_proto_ver.desc: +"""匹配客户端协议版本""" + +reductions.desc: +"""进程已消耗 Reduction 数""" + +param_gte_lifetime.desc: +"""匹配心跳时间大于等于指定值的 LwM2M 客户端""" + +mqueue_len.desc: +"""客户端当前消息队列长度""" + +} diff --git a/rel/i18n/zh/emqx_gateway_api_listeners.hocon b/rel/i18n/zh/emqx_gateway_api_listeners.hocon new file mode 100644 index 000000000..731b14b74 --- /dev/null +++ b/rel/i18n/zh/emqx_gateway_api_listeners.hocon @@ -0,0 +1,65 @@ +emqx_gateway_api_listeners { + +add_listener.desc: +"""为指定网关添加监听器。
+注:对于某网关不支持的监听器类型,该接口会返回 `400: BAD_REQUEST`。""" + +add_listener_authn.desc: +"""为指定监听器开启认证器以实现客户端认证的能力。
+当某一监听器开启认证后,所有连接到该监听器的客户端会使用该认证器进行认证。""" + +add_user.desc: +"""添加用户(仅支持 built_in_database 类型的认证器)""" + +current_connections.desc: +"""当前连接数""" + +delete_listener.desc: +"""删除指定监听器。被删除的监听器下所有已连接的客户端都会离线。""" + +delete_listener_authn.desc: +"""移除指定监听器的认证器。""" + +delete_user.desc: +"""删除用户(仅支持 built_in_database 类型的认证器)""" + +get_listener.desc: +"""获取指定网关监听器的配置。""" + +get_listener_authn.desc: +"""获取监听器的认证器配置。""" + +get_user.desc: +"""获取用户信息(仅支持 built_in_database 类型的认证器)""" + +import_users.desc: +"""导入用户(仅支持 built_in_database 类型的认证器)""" + +list_listeners.desc: +"""获取网关监听器列表。该接口会返回监听器所有的配置(包括该监听器上的认证器),同时也会返回该监听器在集群中运行的状态。""" + +list_users.desc: +"""获取用户列表(仅支持 built_in_database 类型的认证器)""" + +listener_id.desc: +"""监听器 ID""" + +listener_node_status.desc: +"""监听器在集群中每个节点上的状态""" + +listener_status.desc: +"""监听器状态""" + +node.desc: +"""节点名称""" + +update_listener.desc: +"""更新某网关监听器的配置。被更新的监听器会执行重启,所有已连接到该监听器上的客户端都会被断开。""" + +update_listener_authn.desc: +"""更新指定监听器的认证器配置,或停用/启用该认证器。""" + +update_user.desc: +"""更新用户信息(仅支持 built_in_database 类型的认证器)""" + +} diff --git a/rel/i18n/zh/emqx_gateway_schema.hocon b/rel/i18n/zh/emqx_gateway_schema.hocon new file mode 100644 index 000000000..40cee4efb --- /dev/null +++ b/rel/i18n/zh/emqx_gateway_schema.hocon @@ -0,0 +1,112 @@ +emqx_gateway_schema { + +dtls_listener_acceptors.desc: +"""Acceptor 进程池大小。""" + +dtls_listener_dtls_opts.desc: +"""DTLS Socket 配置""" + +gateway_common_authentication.desc: +"""网关的认证器配置,对该网关下所以的监听器生效。如果每个监听器需要配置不同的认证器,需要配置监听器下的 authentication 字段。""" + +gateway_common_clientinfo_override.desc: +"""ClientInfo 重写。""" + +gateway_common_clientinfo_override_clientid.desc: +"""clientid 重写模板""" + +gateway_common_clientinfo_override_password.desc: +"""password 重写模板""" + +gateway_common_clientinfo_override_username.desc: +"""username 重写模板""" + +gateway_common_enable.desc: +"""是否启用该网关""" + +gateway_common_enable_stats.desc: +"""是否开启客户端统计""" + +gateway_common_idle_timeout.desc: +"""客户端连接过程的空闲时间。该配置用于: + 1. 一个新创建的客户端进程如果在该时间间隔内没有收到任何客户端请求,将被直接关闭。 + 2. 一个正在运行的客户进程如果在这段时间后没有收到任何客户请求,将进入休眠状态以节省资源。""" + +gateway_common_listener_access_rules.desc: +"""配置监听器的访问控制规则。 +见:https://github.com/emqtt/esockd#allowdeny""" + +gateway_common_listener_bind.desc: +"""监听器绑定的 IP 地址或端口。""" + +gateway_common_listener_enable.desc: +"""是否启用该监听器。""" + +gateway_common_listener_enable_authn.desc: +"""配置 true (默认值)启用客户端进行身份认证。 +配置 false 时,将不对客户端做任何认证。""" + +gateway_common_listener_max_conn_rate.desc: +"""监听器支持的最大连接速率。""" + +gateway_common_listener_max_connections.desc: +"""监听器支持的最大连接数。""" + +gateway_mountpoint.desc: +"""发布或订阅时,在所有主题前增加前缀字符串。 +当消息投递给订阅者时,前缀字符串将从主题名称中删除。挂载点是用户可以用来实现不同监听器之间的消息路由隔离的一种方式。 +例如,如果客户端 A 在 `listeners.tcp.\.mountpoint` 设置为 `some_tenant` 的情况下订阅 `t`, +则客户端实际上订阅了 `some_tenant/t` 主题。 +类似地,如果另一个客户端 B(连接到与客户端 A 相同的侦听器)向主题 `t` 发送消息, +则该消息被路由到所有订阅了 `some_tenant/t` 的客户端,因此客户端 A 将收到该消息,带有 主题名称`t`。 设置为 `""` 以禁用该功能。 +挂载点字符串中可用的变量:
+ - ${clientid}:clientid
+ - ${username}:用户名""" + +listener_name_to_settings_map.desc: +"""从监听器名称到配置参数的映射。""" + +ssl_listener_options.desc: +"""SSL Socket 配置。""" + +tcp_listener_acceptors.desc: +"""Acceptor 进程池大小。""" + +tcp_listener_proxy_protocol.desc: +"""是否开启 Proxy Protocol V1/2。当 EMQX 集群部署在 HAProxy 或 Nginx 后需要获取客户端真实 IP 时常用到该选项。参考:https://www.haproxy.com/blog/haproxy/proxy-protocol/""" + +tcp_listener_proxy_protocol_timeout.desc: +"""接收 Proxy Protocol 报文头的超时时间。如果在超时内没有收到 Proxy Protocol 包,EMQX 将关闭 TCP 连接。""" + +tcp_listener_tcp_opts.desc: +"""TCP Socket 配置。""" + +tcp_listeners.desc: +"""配置 TCP 类型的监听器。""" + +tcp_udp_listeners.desc: +"""监听器配置。""" + +udp_listener_active_n.desc: +"""为 Socket 指定 {active, N} 选项。 +参见:https://erlang.org/doc/man/inet.html#setopts-2""" + +udp_listener_buffer.desc: +"""Socket 在用户空间的缓冲区大小。""" + +udp_listener_recbuf.desc: +"""Socket 在内核空间接收缓冲区的大小。""" + +udp_listener_reuseaddr.desc: +"""允许重用本地处于 TIME_WAIT 的端口号。""" + +udp_listener_sndbuf.desc: +"""Socket 在内核空间发送缓冲区的大小。""" + +udp_listener_udp_opts.desc: +"""UDP Socket 配置。""" + +udp_listeners.desc: +"""配置 UDP 类型的监听器。""" + +} diff --git a/rel/i18n/zh/emqx_license_http_api.hocon b/rel/i18n/zh/emqx_license_http_api.hocon new file mode 100644 index 000000000..4ad471684 --- /dev/null +++ b/rel/i18n/zh/emqx_license_http_api.hocon @@ -0,0 +1,15 @@ +emqx_license_http_api { + +desc_license_info_api.desc: +"""获取许可证信息""" + +desc_license_info_api.label: +"""许可证信息""" + +desc_license_key_api.desc: +"""更新一个许可证密钥""" + +desc_license_key_api.label: +"""更新许可证""" + +} diff --git a/rel/i18n/zh/emqx_license_schema.hocon b/rel/i18n/zh/emqx_license_schema.hocon new file mode 100644 index 000000000..0bf5256e8 --- /dev/null +++ b/rel/i18n/zh/emqx_license_schema.hocon @@ -0,0 +1,29 @@ +emqx_license_schema { + +connection_high_watermark_field.desc: +"""高水位线,连接数超过这个水位线时,系统会触发许可证连接配额使用告警""" + +connection_high_watermark_field.label: +"""连接高水位""" + +connection_low_watermark_field.desc: +"""低水位限制,低于此水位线时系统会清除连接配额使用告警""" + +connection_low_watermark_field.label: +"""连接低水位线""" + +key_field.desc: +"""许可证字符串""" + +key_field.label: +"""许可证字符串""" + +license_root.desc: +"""EMQX企业许可证。 +EMQX 自带一个默认的试用许可证,默认试用许可允许最多接入 100 个连接,签发时间是 2023年1月9日,有效期是 5 年(1825 天)。若需要在生产环境部署, +请访问 https://www.emqx.com/apply-licenses/emqx 来申请。""" + +license_root.label: +"""许可证""" + +} diff --git a/rel/i18n/zh/emqx_limiter_schema.hocon b/rel/i18n/zh/emqx_limiter_schema.hocon new file mode 100644 index 000000000..4f5a0ce2f --- /dev/null +++ b/rel/i18n/zh/emqx_limiter_schema.hocon @@ -0,0 +1,89 @@ +emqx_limiter_schema { + +bucket_cfg.desc: +"""桶的配置""" + +bucket_cfg.label: +"""桶的配置""" + +burst.desc: +"""突发速率。 +突发速率允许短时间内速率超过设置的速率值,突发速率 + 速率 = 当前桶能达到的最大速率值""" + +burst.label: +"""突发速率""" + +bytes.desc: +"""流入字节率控制器。 +这个是用来控制当前节点上的数据流入的字节率,每条消息将会消耗和其二进制大小等量的令牌,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间""" + +bytes.label: +"""流入字节率""" + +client.desc: +"""对桶的每个使用者的速率控制设置""" + +client.label: +"""每个使用者的限制""" + +connection.desc: +"""连接速率控制器。 +这个用来控制当前节点上的连接速率,当达到最大速率后,新的连接将会被拒绝""" + +connection.label: +"""连接速率""" + +divisible.desc: +"""申请的令牌数是否可以被分割""" + +divisible.label: +"""是否可分割""" + +failure_strategy.desc: +"""当所有的重试都失败后的处理策略""" + +failure_strategy.label: +"""失败策略""" + +initial.desc: +"""桶中的初始令牌数""" + +initial.label: +"""初始令牌数""" + +internal.desc: +"""EMQX 内部功能所用限制器。""" + +low_watermark.desc: +"""当桶中剩余的令牌数低于这个值,即使令牌申请成功了,也会被强制暂停一会儿""" + +low_watermark.label: +"""低水位线""" + +max_retry_time.desc: +"""申请失败后,尝试重新申请的时长最大值""" + +max_retry_time.label: +"""最大重试时间""" + +message_routing.desc: +"""消息派发速率控制器。 +这个用来控制当前节点内的消息派发速率,当达到最大速率后,新的推送将会被拒绝""" + +message_routing.label: +"""消息派发""" + +messages.desc: +"""流入速率控制器。 +这个用来控制当前节点上的消息流入速率,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间""" + +messages.label: +"""消息流入速率""" + +rate.desc: +"""桶的令牌生成速率""" + +rate.label: +"""速率""" + +} diff --git a/rel/i18n/zh/emqx_lwm2m_api.hocon b/rel/i18n/zh/emqx_lwm2m_api.hocon new file mode 100644 index 000000000..7cf53060a --- /dev/null +++ b/rel/i18n/zh/emqx_lwm2m_api.hocon @@ -0,0 +1,27 @@ +emqx_lwm2m_api { + +dataType.desc: +"""数据类型""" + +lookup_resource.desc: +"""查看指定资源状态""" + +name.desc: +"""资源名称""" + +observe_resource.desc: +"""Observe/Un-Observe 指定资源""" + +operations.desc: +"""资源可用操作列表""" + +path.desc: +"""资源路径""" + +read_resource.desc: +"""发送读指令到某资源""" + +write_resource.desc: +"""发送写指令到某资源""" + +} diff --git a/rel/i18n/zh/emqx_lwm2m_schema.hocon b/rel/i18n/zh/emqx_lwm2m_schema.hocon new file mode 100644 index 000000000..3dea6a0c6 --- /dev/null +++ b/rel/i18n/zh/emqx_lwm2m_schema.hocon @@ -0,0 +1,56 @@ +emqx_lwm2m_schema { + +lwm2m.desc: +"""LwM2M 网关配置。仅支持 v1.0.1 协议。""" + +lwm2m_auto_observe.desc: +"""自动 Observe REGISTER 数据包的 Object 列表。""" + +lwm2m_lifetime_max.desc: +"""允许 LwM2M 客户端允许设置的心跳最大值。""" + +lwm2m_lifetime_min.desc: +"""允许 LwM2M 客户端允许设置的心跳最小值。""" + +lwm2m_qmode_time_window.desc: +"""在QMode模式下,LwM2M网关认为网络链接有效的时间窗口的值。 +例如,在收到客户端的更新信息后,在这个时间窗口内的任何信息都会直接发送到LwM2M客户端,而超过这个时间窗口的所有信息都会暂时储存在内存中。""" + +lwm2m_translators.desc: +"""LwM2M 网关订阅/发布消息的主题映射配置。""" + +lwm2m_translators_command.desc: +"""下行命令主题。 +对于每个成功上线的新 LwM2M 客户端,网关会创建一个订阅关系来接收下行消息并将其发送给客户端。""" + +lwm2m_translators_notify.desc: +"""用于发布来自 LwM2M 客户端的通知事件的主题。 +在成功 Observe 到 LwM2M 客户端的资源后,如果客户端报告任何资源状态的变化,网关将通过该主题发送通知事件。""" + +lwm2m_translators_register.desc: +"""用于发布来自 LwM2M 客户端的注册事件的主题。""" + +lwm2m_translators_response.desc: +"""用于网关发布来自 LwM2M 客户端的确认事件的主题。""" + +lwm2m_translators_update.desc: +"""用于发布来自LwM2M客户端的更新事件的主题。""" + +lwm2m_update_msg_publish_condition.desc: +"""发布UPDATE事件消息的策略。
+ - always: 只要收到 UPDATE 请求,就发送更新事件。
+ - contains_object_list: 仅当 UPDATE 请求携带 Object 列表时才发送更新事件。""" + +lwm2m_xml_dir.desc: +"""LwM2M Resource 定义的 XML 文件目录路径。""" + +translator.desc: +"""配置某网关客户端对于发布消息或订阅的主题和 QoS 等级。""" + +translator_qos.desc: +"""QoS 等级""" + +translator_topic.desc: +"""主题名称""" + +} diff --git a/rel/i18n/zh/emqx_mgmt_api_alarms.hocon b/rel/i18n/zh/emqx_mgmt_api_alarms.hocon new file mode 100644 index 000000000..d9dafd867 --- /dev/null +++ b/rel/i18n/zh/emqx_mgmt_api_alarms.hocon @@ -0,0 +1,37 @@ +emqx_mgmt_api_alarms { + +activate_at.desc: +"""告警开始时间,使用 rfc3339 标准时间格式。""" + +deactivate_at.desc: +"""告警结束时间,使用 rfc3339 标准时间格式。""" + +delete_alarms_api.desc: +"""删除所有历史告警。""" + +delete_alarms_api_response204.desc: +"""历史告警已成功清除。""" + +details.desc: +"""告警详情,提供了更多的告警信息,主要提供给程序处理。""" + +duration.desc: +"""表明告警已经持续了多久,单位:毫秒。""" + +get_alarms_qs_activated.desc: +"""用于指定查询的告警类型, +为 true 时返回当前激活的告警,为 false 时返回历史告警,默认为 false。""" + +list_alarms_api.desc: +"""列出当前激活的告警或历史告警,由查询参数决定。""" + +message.desc: +"""告警消息,以人类可读的方式描述告警内容。""" + +name.desc: +"""告警名称,用于区分不同的告警。""" + +node.desc: +"""触发此告警的节点名称。""" + +} diff --git a/rel/i18n/zh/emqx_mgmt_api_banned.hocon b/rel/i18n/zh/emqx_mgmt_api_banned.hocon new file mode 100644 index 000000000..cee3ba288 --- /dev/null +++ b/rel/i18n/zh/emqx_mgmt_api_banned.hocon @@ -0,0 +1,54 @@ +emqx_mgmt_api_banned { + +as.desc: +"""封禁方式,可以通过客户端 ID、用户名或者 IP 地址等方式进行封禁。""" + +as.label: +"""封禁方式""" + +at.desc: +"""封禁的起始时间,格式为 rfc3339,默认为发起操作的时间。""" + +at.label: +"""封禁时间""" + +by.desc: +"""封禁的发起者。""" + +by.label: +"""封禁发起者""" + +create_banned_api.desc: +"""添加一个客户端 ID、用户名或者 IP 地址到黑名单。""" + +create_banned_api_response400.desc: +"""错误的请求,可能是参数错误或封禁对象已存在等原因。""" + +delete_banned_api.desc: +"""将一个客户端 ID、用户名或者 IP 地址从黑名单中删除。""" + +delete_banned_api_response404.desc: +"""未在黑名单中找到该封禁对象。""" + +list_banned_api.desc: +"""列出目前所有被封禁的客户端 ID、用户名和 IP 地址。""" + +reason.desc: +"""封禁原因,记录当前对象被封禁的原因。""" + +reason.label: +"""封禁原因""" + +until.desc: +"""封禁的结束时间,格式为 rfc3339,默认值为发起操作的时间 + 1 年。""" + +until.label: +"""封禁结束时间""" + +who.desc: +"""封禁对象,具体的客户端 ID、用户名或者 IP 地址。""" + +who.label: +"""封禁对象""" + +} diff --git a/rel/i18n/zh/emqx_mgmt_api_key_schema.hocon b/rel/i18n/zh/emqx_mgmt_api_key_schema.hocon new file mode 100644 index 000000000..9a0536fe6 --- /dev/null +++ b/rel/i18n/zh/emqx_mgmt_api_key_schema.hocon @@ -0,0 +1,19 @@ +emqx_mgmt_api_key_schema { + +api_key.desc: +"""API 密钥, 可用于请求除管理 API 密钥及 Dashboard 用户管理 API 的其它接口""" + +api_key.label: +"""API 密钥""" + +bootstrap_file.desc: +"""用于在启动 emqx 时,添加 API 密钥,其格式为: + ``` + 7e729ae70d23144b:2QILI9AcQ9BYlVqLDHQNWN2saIjBV4egr1CZneTNKr9CpK + ec3907f865805db0:Ee3taYltUKtoBVD9C3XjQl9C6NXheip8Z9B69BpUv5JxVHL + ```""" + +bootstrap_file.label: +"""API 密钥初始化文件""" + +} diff --git a/rel/i18n/zh/emqx_mgmt_api_publish.hocon b/rel/i18n/zh/emqx_mgmt_api_publish.hocon new file mode 100644 index 000000000..2a532fbdd --- /dev/null +++ b/rel/i18n/zh/emqx_mgmt_api_publish.hocon @@ -0,0 +1,81 @@ +emqx_mgmt_api_publish { + +error_message.desc: +"""失败的详细原因。""" + +message_id.desc: +"""全局唯一的一个消息 ID,方便用于关联和追踪。""" + +message_properties.desc: +"""PUBLISH 消息里的 Property 字段。""" + +msg_content_type.desc: +"""内容类型标识符,以 UTF-8 格式编码的字符串,用来描述应用消息的内容,服务端必须把收到的应用消息中的内容类型原封不动的发送给所有的订阅者。""" + +msg_correlation_data.desc: +"""对比数据标识符,服务端在收到应用消息时必须原封不动的把对比数据发送给所有的订阅者。对比数据只对请求消息(Request Message)的发送端和响应消息(Response Message)的接收端有意义。""" + +msg_message_expiry_interval.desc: +"""消息过期间隔标识符,以秒为单位。当消失已经过期时,如果服务端还没有开始向匹配的订阅者投递该消息,则服务端会删除该订阅者的消息副本。如果不设置,则消息永远不会过期""" + +msg_payload_format_indicator.desc: +"""载荷格式指示标识符,0 表示载荷是未指定格式的数据,相当于没有发送载荷格式指示;1 表示载荷是 UTF-8 编码的字符数据,载荷中的 UTF-8 数据必须是按照 Unicode 的规范和 RFC 3629 的标准要求进行编码的。""" + +msg_response_topic.desc: +"""响应主题标识符, UTF-8 编码的字符串,用作响应消息的主题名。响应主题不能包含通配符,也不能包含多个主题,否则将造成协议错误。当存在响应主题时,消息将被视作请求报文。服务端在收到应用消息时必须将响应主题原封不动的发送给所有的订阅者。""" + +msg_user_properties.desc: +"""指定 MQTT 消息的 User Property 键值对。注意,如果出现重复的键,只有最后一个会保留。""" + +payload.desc: +"""MQTT 消息体。""" + +payload_encoding.desc: +"""MQTT 消息体的编码方式,可以是 base64plain。当设置为 base64 时,消息在发布前会先被解码。""" + +publish_api.desc: +"""发布一个消息。
+可能的 HTTP 状态码如下:
+200: 消息被成功发送到至少一个订阅。
+202: 没有匹配到任何订阅。
+400: 消息编码错误,如非法主题,或 QoS 超出范围等。
+503: 服务重启等过程中导致转发失败。""" + +publish_api.label: +"""发布一条信息""" + +publish_bulk_api.desc: +"""批量发布一组消息。
+可能的 HTTP 状态码如下:
+200: 所有的消息都被成功发送到至少一个订阅。
+202: 至少有一个消息没有匹配到任何订阅。
+400: 至少有一个消息编码错误,如非法主题,或 QoS 超出范围等。
+503: 至少有一个小因为服务重启的原因导致转发失败。
+ +请求的 Body 或者 Body 中包含的某个消息无法通过 API 规范的类型检查时,HTTP 响应的消息与发布单个消息的 API + /publish 是一样的。 +如果所有的消息都是合法的,那么 HTTP 返回的内容是一个 JSON 数组,每个元素代表了该消息转发的状态。""" + +publish_bulk_api.label: +"""发布一批信息""" + +qos.desc: +"""MQTT 消息的 QoS""" + +reason_code.desc: +"""MQTT 消息发布的错误码,这些错误码也是 MQTT 规范中 PUBACK 消息可能携带的错误码。
+当前支持如下错误码:
+ +16(0x10):没能匹配到任何订阅;
+131(0x81):消息转发时发生错误,例如 EMQX 服务重启;
+144(0x90):主题名称非法;
+151(0x97):受到了速率限制,或者消息尺寸过大。全局消息大小限制可以通过配置项 mqtt.max_packet_size 来进行修改。
+注意:消息尺寸的是通过主题和消息体的字节数进行估算的。具体发布时所占用的字节数可能会稍大于这个估算的值。""" + +retain.desc: +"""布尔型字段,用于表示该消息是否保留消息。""" + +topic_name.desc: +"""主题名称""" + +} diff --git a/rel/i18n/zh/emqx_mgmt_api_status.hocon b/rel/i18n/zh/emqx_mgmt_api_status.hocon new file mode 100644 index 000000000..3625db967 --- /dev/null +++ b/rel/i18n/zh/emqx_mgmt_api_status.hocon @@ -0,0 +1,22 @@ +emqx_mgmt_api_status { + +get_status_api.desc: +"""作为节点的健康检查。 返回一个纯文本的响应,描述节点的状态。 + +如果 EMQX 应用程序已经启动并运行,返回状态代码 200,否则返回 503。 + +这个API是在v5.0.10中引入的。 +GET `/status`端点(没有`/api/...`前缀)也是这个端点的一个别名,工作方式相同。 这个别名从v5.0.0开始就有了。""" + +get_status_api.label: +"""服务健康检查""" + +get_status_response200.desc: +"""Node emqx@127.0.0.1 is started +emqx is running""" + +get_status_response503.desc: +"""Node emqx@127.0.0.1 is stopped +emqx is not_running""" + +} diff --git a/rel/i18n/zh/emqx_modules_schema.hocon b/rel/i18n/zh/emqx_modules_schema.hocon new file mode 100644 index 000000000..e1c2ca913 --- /dev/null +++ b/rel/i18n/zh/emqx_modules_schema.hocon @@ -0,0 +1,45 @@ +emqx_modules_schema { + +enable.desc: +"""是否开启该功能""" + +max_delayed_messages.desc: +"""延迟消息的数量上限(0 代表无限)""" + +rewrite.desc: +"""EMQX 的主题重写功能支持根据用户配置的规则在客户端订阅主题、发布消息、取消订阅的时候将 A 主题重写为 B 主题。 +重写规则分为 Pub 规则和 Sub 规则,Pub 规则匹配 PUSHLISH 报文携带的主题,Sub 规则匹配 SUBSCRIBE、UNSUBSCRIBE 报文携带的主题。 +每条重写规则都由主题过滤器、正则表达式、目标表达式三部分组成。 +在主题重写功能开启的前提下,EMQX 在收到诸如 PUBLISH 报文等带有主题的 MQTT 报文时,将使用报文中的主题去依次匹配配置文件中规则的主题过滤器部分,一旦成功匹配,则使用正则表达式提取主题中的信息,然后替换至目标表达式以构成新的主题。 +目标表达式中可以使用 `$N` 这种格式的变量匹配正则表达中提取出来的元素,`$N` 的值为正则表达式中提取出来的第 N 个元素,比如 `$1` 即为正则表达式提取的第一个元素。 +需要注意的是,EMQX 使用倒序读取配置文件中的重写规则,当一条主题可以同时匹配多条主题重写规则的主题过滤器时,EMQX 仅会使用它匹配到的第一条规则进行重写,如果该条规则中的正则表达式与 MQTT 报文主题不匹配,则重写失败,不会再尝试使用其他的规则进行重写。 +因此用户在使用时需要谨慎的设计 MQTT 报文主题以及主题重写规则。""" + +rewrite.label: +"""主题重写""" + +tr_action.desc: +"""主题重写在哪种操作上生效: + - `subscribe`:订阅时重写主题; + - `publish`:发布时重写主题; + -`all`:全部重写主题""" + +tr_action.label: +"""Action""" + +tr_dest_topic.desc: +"""目标主题。""" + +tr_dest_topic.label: +"""目标主题""" + +tr_re.desc: +"""正则表达式""" + +tr_source_topic.desc: +"""源主题,客户端业务指定的主题""" + +tr_source_topic.label: +"""源主题""" + +} diff --git a/rel/i18n/zh/emqx_mqttsn_schema.hocon b/rel/i18n/zh/emqx_mqttsn_schema.hocon new file mode 100644 index 000000000..c6d3a98a6 --- /dev/null +++ b/rel/i18n/zh/emqx_mqttsn_schema.hocon @@ -0,0 +1,30 @@ +emqx_mqttsn_schema { + +mqttsn.desc: +"""MQTT-SN 网关配置。当前实现仅支持 v1.2 版本""" + +mqttsn_broadcast.desc: +"""是否周期性广播 ADVERTISE 消息""" + +mqttsn_enable_qos3.desc: +"""是否允许无连接的客户端发送 QoS 等于 -1 的消息。 +该功能主要用于支持轻量的 MQTT-SN 客户端实现,它不会向网关建立连接,注册主题,也不会发起订阅;它只使用 QoS 为 -1 来发布消息""" + +mqttsn_gateway_id.desc: +"""MQTT-SN 网关 ID。 +当 broadcast 打开时,MQTT-SN 网关会使用该 ID 来广播 ADVERTISE 消息""" + +mqttsn_predefined.desc: +"""预定义主题列表。 +预定义的主题列表,是一组 主题 ID 和 主题名称 的映射关系。使用预先定义的主题列表,可以减少 MQTT-SN 客户端和网关对于固定主题的注册请求""" + +mqttsn_predefined_id.desc: +"""主题 ID。范围:1-65535""" + +mqttsn_predefined_topic.desc: +"""主题名称。注:不支持通配符""" + +mqttsn_subs_resume.desc: +"""在会话被重用后,网关是否主动向客户端注册对已订阅主题名称""" + +} diff --git a/rel/i18n/zh/emqx_plugins_schema.hocon b/rel/i18n/zh/emqx_plugins_schema.hocon new file mode 100644 index 000000000..4da5c5a7b --- /dev/null +++ b/rel/i18n/zh/emqx_plugins_schema.hocon @@ -0,0 +1,46 @@ +emqx_plugins_schema { + +check_interval.desc: +"""检查间隔:检查集群中插件的状态是否一致,
+如果连续3次检查结果不一致,则报警。""" + +enable.desc: +"""设置为“true”以启用此插件""" + +enable.label: +"""启用""" + +install_dir.desc: +"""插件安装包的目录,出于安全考虑,该目录应该值允许 emqx,或用于运行 EMQX 服务的用户拥有写入权限。""" + +install_dir.label: +"""安装目录""" + +name_vsn.desc: +"""插件的名称{name}-{version}。
+它应该与插件的发布包名称一致,如my_plugin-0.1.0。""" + +name_vsn.label: +"""名称-版本""" + +plugins.desc: +"""管理EMQX插件。
+插件可以是EMQX安装包中的一部分,也可以是一个独立的安装包。
+独立安装的插件称为“外部插件”。""" + +plugins.label: +"""插件""" + +state.desc: +"""描述插件的状态""" + +state.label: +"""插件状态""" + +states.desc: +"""一组插件的状态。插件将按照定义的顺序启动""" + +states.label: +"""插件启动顺序及状态""" + +} diff --git a/rel/i18n/zh/emqx_prometheus_schema.hocon b/rel/i18n/zh/emqx_prometheus_schema.hocon new file mode 100644 index 000000000..a1e59e517 --- /dev/null +++ b/rel/i18n/zh/emqx_prometheus_schema.hocon @@ -0,0 +1,47 @@ +emqx_prometheus_schema { + +enable.desc: +"""开启或关闭 Prometheus 数据推送""" + +headers.desc: +"""推送到 Push Gateway 的 HTTP Headers 列表。
+例如, { Authorization = "some-authz-tokens"}""" + +interval.desc: +"""数据推送间隔""" + +job_name.desc: +"""推送到 Push Gateway 的 Job 名称。可用变量为:
+- ${name}: EMQX 节点的名称。 +- ${host}: EMQX 节点主机名。 +例如,当 EMQX 节点名为 emqx@127.0.0.1 则 name 变量的值为 emqx,host 变量的值为 127.0.0.1
+默认值为: ${name}/instance/${name}~${host}""" + +mnesia_collector.desc: +"""开启或关闭 Mnesia 采集器, 使用 mnesia:system_info/1 收集 Mnesia 相关指标""" + +prometheus.desc: +"""Prometheus 监控数据推送""" + +prometheus.label: +"""Prometheus""" + +push_gateway_server.desc: +"""Prometheus 服务器地址""" + +vm_dist_collector.desc: +"""开启或关闭 VM 分布采集器,收集 Erlang 分布机制中涉及的套接字和进程的信息。""" + +vm_memory_collector.desc: +"""开启或关闭 VM 内存采集器, 使用 erlang:memory/0 收集 Erlang 虚拟机动态分配的内存信息,同时提供基本的 (D)ETS 统计信息""" + +vm_msacc_collector.desc: +"""开启或关闭 VM msacc 采集器, 使用 erlang:statistics(microstate_accounting) 收集微状态计数指标""" + +vm_statistics_collector.desc: +"""开启或关闭 VM 统计采集器, 使用 erlang:statistics/1 收集 Erlang VM 相关指标""" + +vm_system_info_collector.desc: +"""开启或关闭 VM 系统信息采集器, 使用 erlang:system_info/1 收集 Erlang VM 相关指标""" + +} diff --git a/rel/i18n/zh/emqx_psk_schema.hocon b/rel/i18n/zh/emqx_psk_schema.hocon new file mode 100644 index 000000000..0fc14e730 --- /dev/null +++ b/rel/i18n/zh/emqx_psk_schema.hocon @@ -0,0 +1,28 @@ +emqx_psk_schema { + +chunk_size.desc: +"""将 PSK 文件导入到内建数据时每个块的大小""" + +enable.desc: +"""是否开启 TLS PSK 支持""" + +init_file.desc: +"""如果设置了初始化文件,EMQX 将在启动时从初始化文件中导入 PSK 信息到内建数据库中。 +这个文件需要按行进行组织,每一行必须遵守如下格式: PSKIdentity:SharedSecret +例如: mydevice1:c2VjcmV0""" + +psk_authentication.desc: +"""此配置用于启用 TLS-PSK 身份验证。 + +PSK 是 “Pre-Shared-Keys” 的缩写。 + +注意: 确保 SSL 监听器仅启用了 'tlsv1.2',并且配置了PSK 密码套件,例如 'RSA-PSK-AES256-GCM-SHA384'。 + +可以通过查看监听器中的 SSL 选项,了解更多详细信息。 + +可以通过配置 'init_file' 来设置初始化的 ID 和 密钥""" + +separator.desc: +"""PSK 文件中 PSKIdentitySharedSecret 之间的分隔符""" + +} diff --git a/rel/i18n/zh/emqx_resource_schema.hocon b/rel/i18n/zh/emqx_resource_schema.hocon new file mode 100644 index 000000000..9365b1026 --- /dev/null +++ b/rel/i18n/zh/emqx_resource_schema.hocon @@ -0,0 +1,112 @@ +emqx_resource_schema { + +auto_restart_interval.desc: +"""资源断开以后,自动重连的时间间隔。""" + +auto_restart_interval.label: +"""自动重连间隔""" + +batch_size.desc: +"""最大批量请求大小。如果设为1,则无批处理。""" + +batch_size.label: +"""最大批量请求大小""" + +batch_time.desc: +"""在较低消息率情况下尝试累积批量输出时的最大等待间隔,以提高资源的利用率。""" + +batch_time.label: +"""批量等待最大间隔""" + +buffer_mode.desc: +"""队列操作模式。 +memory_only: 所有的消息都缓存在内存里。volatile_offload: 先将消息缓存在内存中,当内存中的消息堆积超过一定限制(配置项 buffer_seg_bytes 指定该限制)后, 消息会开始缓存到磁盘上。""" + +buffer_mode.label: +"""缓存模式""" + +buffer_seg_bytes.desc: +"""当缓存模式是 volatile_offload 时适用。该配置用于指定缓存到磁盘上的文件的大小。""" + +buffer_seg_bytes.label: +"""缓存文件大小""" + +creation_opts.desc: +"""资源启动相关的选项。""" + +creation_opts.label: +"""资源启动选项""" + +enable_batch.desc: +"""启用批量模式。""" + +enable_batch.label: +"""启用批量模式""" + +enable_queue.desc: +"""启用磁盘缓存队列(仅对 egress 方向桥接有用)。""" + +enable_queue.label: +"""启用磁盘缓存队列""" + +health_check_interval.desc: +"""健康检查间隔。""" + +health_check_interval.label: +"""健康检查间隔""" + +inflight_window.desc: +"""请求飞行队列窗口大小。当请求模式为异步时,如果需要严格保证来自同一 MQTT 客户端的消息有序,则必须将此值设为 1。""" + +inflight_window.label: +"""请求飞行队列窗口""" + +max_buffer_bytes.desc: +"""每个缓存 worker 允许使用的最大字节数。""" + +max_buffer_bytes.label: +"""缓存队列最大长度""" + +query_mode.desc: +"""请求模式。可选 '同步/异步',默认为'异步'模式。""" + +query_mode.label: +"""请求模式""" + +request_timeout.desc: +"""从请求进入缓冲区开始计时,如果请求在规定的时间内仍停留在缓冲区内或者已发送但未能及时收到响应或确认,该请求将被视为过期。""" + +request_timeout.label: +"""请求超期""" + +resource_opts.desc: +"""资源相关的选项。""" + +resource_opts.label: +"""资源选项""" + +resume_interval.desc: +"""在发送失败后尝试重传飞行窗口中的请求的时间间隔。""" + +resume_interval.label: +"""重试时间间隔""" + +start_after_created.desc: +"""是否在创建资源后立即启动资源。""" + +start_after_created.label: +"""创建后立即启动""" + +start_timeout.desc: +"""在回复资源创建请求前等待资源进入健康状态的时间。""" + +start_timeout.label: +"""启动超时时间""" + +worker_pool_size.desc: +"""缓存队列 worker 数量。仅对 egress 类型的桥接有意义。当桥接仅有 ingress 方向时,可设置为 0,否则必须大于 0。""" + +worker_pool_size.label: +"""缓存池大小""" + +} diff --git a/rel/i18n/zh/emqx_retainer_api.hocon b/rel/i18n/zh/emqx_retainer_api.hocon new file mode 100644 index 000000000..f8107f8ce --- /dev/null +++ b/rel/i18n/zh/emqx_retainer_api.hocon @@ -0,0 +1,63 @@ +emqx_retainer_api { + +config_content.desc: +"""配置内容""" + +config_not_found.desc: +"""配置不存在""" + +delete_matching_api.desc: +"""删除对应的消息""" + +from_clientid.desc: +"""发布者的 ClientID""" + +from_username.desc: +"""发布者的用户名""" + +get_config_api.desc: +"""查看配置内容""" + +list_retained_api.desc: +"""查看保留消息列表""" + +lookup_api.desc: +"""通过不带通配符的主题查看对应的保留消息""" + +message_detail.desc: +"""消息详情""" + +message_not_exist.desc: +"""消息不存在""" + +msgid.desc: +"""消息 ID""" + +payload.desc: +"""消息内容""" + +publish_at.desc: +"""消息发送时间, RFC 3339 格式""" + +qos.desc: +"""QoS""" + +retained_list.desc: +"""保留消息列表""" + +topic.desc: +"""主题""" + +unsupported_backend.desc: +"""不支持的后端""" + +update_config_failed.desc: +"""配置更新失败""" + +update_config_success.desc: +"""配置更新成功""" + +update_retainer_api.desc: +"""更新配置""" + +} diff --git a/rel/i18n/zh/emqx_retainer_schema.hocon b/rel/i18n/zh/emqx_retainer_schema.hocon new file mode 100644 index 000000000..1e8630007 --- /dev/null +++ b/rel/i18n/zh/emqx_retainer_schema.hocon @@ -0,0 +1,46 @@ +emqx_retainer_schema { + +backend.desc: +"""保留消息的存储后端""" + +batch_deliver_limiter.desc: +"""批量发送的限流器的名称。 +限流器可以用来防止短时间内向客户端发送太多的消息,从而避免过多的消息导致客户端队列堵塞甚至崩溃。 +这个名称需要是指向 `limiter.batch` 下的一个真实存在的限流器。 +如果这个字段为空,则不使用限流器。""" + +batch_deliver_number.desc: +"""批量派发时每批的数量。0 代表一次性全部派发""" + +batch_read_number.desc: +"""从存储后端批量加载时的每批数量上限,0 代表一次性读取""" + +enable.desc: +"""是否开启消息保留功能""" + +flow_control.desc: +"""流控设置""" + +max_payload_size.desc: +"""消息大小最大值""" + +max_retained_messages.desc: +"""消息保留的数量上限。0 表示无限""" + +mnesia_config_storage_type.desc: +"""选择消息是存放在磁盘还是内存中""" + +mnesia_config_type.desc: +"""后端类型""" + +msg_clear_interval.desc: +"""消息清理间隔。0 代表不进行清理""" + +msg_expiry_interval.desc: +"""消息保留时间。0 代表永久保留""" + +stop_publish_clear_msg.desc: +"""是否不发送保留消息的清理消息,在 MQTT 5.0 中如果一条保留消息的消息体为空,则会清除掉之前存储 +的对应的保留消息,通过这个值控制是否停止发送清理消息""" + +} diff --git a/rel/i18n/zh/emqx_rewrite_api.hocon b/rel/i18n/zh/emqx_rewrite_api.hocon new file mode 100644 index 000000000..2be95d38b --- /dev/null +++ b/rel/i18n/zh/emqx_rewrite_api.hocon @@ -0,0 +1,12 @@ +emqx_rewrite_api { + +list_topic_rewrite_api.desc: +"""列出全部主题重写规则""" + +update_topic_rewrite_api.desc: +"""更新全部主题重写规则""" + +update_topic_rewrite_api_response413.desc: +"""超出主题重写规则数量上限""" + +} diff --git a/rel/i18n/zh/emqx_rule_api_schema.hocon b/rel/i18n/zh/emqx_rule_api_schema.hocon new file mode 100644 index 000000000..854f7707f --- /dev/null +++ b/rel/i18n/zh/emqx_rule_api_schema.hocon @@ -0,0 +1,381 @@ +emqx_rule_api_schema { + +event_action.desc: +"""订阅或发布""" + +event_action.label: +"""订阅或发布""" + +event_payload.desc: +"""消息负载""" + +event_payload.label: +"""消息负载""" + +metrics_actions_failed_out_of_service.desc: +"""由于输出停止服务而导致规则调用输出失败的次数。 例如,桥接被禁用或停止。""" + +metrics_actions_failed_out_of_service.label: +"""调用输出失败次数""" + +metrics_actions_failed_unknown.desc: +"""由于未知错误,规则调用输出失败的次数。""" + +metrics_actions_failed_unknown.label: +"""调用输出失败次数""" + +event_server.desc: +"""MQTT broker的 IP 地址(或主机名)和端口,采用 IP:Port 格式""" + +event_server.label: +"""服务器 IP 地址和端口""" + +metrics_actions_total.desc: +"""规则调用输出的次数。 该值可能是“sql.matched”的几倍,具体取决于规则输出的数量。""" + +metrics_actions_total.label: +"""调用输出次数""" + +event_ctx_disconnected_da.desc: +"""客户端断开连接的时刻""" + +event_ctx_disconnected_da.label: +"""客户端断开连接时刻""" + +event_topic.desc: +"""消息主题""" + +event_topic.label: +"""消息主题""" + +event_peername.desc: +"""对等客户端的 IP 地址和端口""" + +event_peername.label: +"""IP 地址和端口""" + +metrics_sql_passed.desc: +"""SQL 通过的次数""" + +metrics_sql_passed.label: +"""SQL 通过""" + +test_context.desc: +"""测试事件的上下文""" + +test_context.label: +"""事件上下文""" + +node_node.desc: +"""节点名字""" + +node_node.label: +"""节点名字""" + +event_from_clientid.desc: +"""事件来源客户端的 ID""" + +event_from_clientid.label: +"""客户端 ID""" + +event_keepalive.desc: +"""保持连接""" + +event_keepalive.label: +"""保持连接""" + +event_connected_at.desc: +"""客户端连接完成时的时刻""" + +event_connected_at.label: +"""连接完成时的时刻""" + +metrics_sql_failed_exception.desc: +"""SQL 由于执行异常而失败的次数。 这可能是因为调用 SQL 函数时崩溃,或者试图对未定义的变量进行算术运算""" + +metrics_sql_failed_exception.label: +"""SQL 执行异常""" + +event_from_username.desc: +"""事件来源客户端的用户名""" + +event_from_username.label: +"""用户名""" + +event_ctx_connack_reason_code.desc: +"""错误码""" + +event_ctx_connack_reason_code.label: +"""错误码""" + +rs_description.desc: +"""描述""" + +rs_description.label: +"""描述""" + +rule_id.desc: +"""规则的 ID""" + +rule_id.label: +"""规则 ID""" + +rs_event.desc: +"""事件主题""" + +rs_event.label: +"""事件主题""" + +root_rule_engine.desc: +"""规则引擎配置。该 API 可用于查看和修改规则引擎相关的一些设置。但不可用于规则,如需查看或修改规则,请调用 '/rules' API 进行操作。""" + +root_rule_engine.label: +"""规则引擎配置""" + +event_sockname.desc: +"""本地监听的 IP 地址和端口""" + +event_sockname.label: +"""IP 地址和端口""" + +event_qos.desc: +"""消息的 QoS""" + +event_qos.label: +"""消息 QoS""" + +event_mountpoint.desc: +"""挂载点""" + +event_mountpoint.label: +"""挂载点""" + +rs_title.desc: +"""标题""" + +rs_title.label: +"""标题""" + +ri_metrics.desc: +"""规则的计数器""" + +ri_metrics.label: +"""规则计数器""" + +event_retain.desc: +"""是否是保留消息""" + +event_retain.label: +"""保留消息""" + +event_event_type.desc: +"""事件类型""" + +event_event_type.label: +"""事件类型""" + +event_expiry_interval.desc: +"""到期间隔""" + +event_expiry_interval.label: +"""到期间隔""" + +metrics_sql_matched.desc: +"""SQL 的 FROM 子句匹配的次数。""" + +metrics_sql_matched.label: +"""命中数""" + +event_clientid.desc: +"""客户端 ID""" + +event_clientid.label: +"""客户端 ID""" + +metrics_actions_success.desc: +"""规则成功调用输出的次数。""" + +metrics_actions_success.label: +"""成功调用输出次数""" + +metrics_actions_failed.desc: +"""规则调用输出失败的次数。""" + +metrics_actions_failed.label: +"""调用输出失败次数""" + +metrics_sql_matched_rate.desc: +"""命中速率,次/秒""" + +metrics_sql_matched_rate.label: +"""Matched Rate""" + +event_proto_ver.desc: +"""协议版本""" + +event_proto_ver.label: +"""协议版本""" + +event_publish_received_at.desc: +"""消息被接受的时间""" + +event_publish_received_at.label: +"""消息被接受的时间""" + +metrics_sql_matched_rate_last5m.desc: +"""5分钟平均命中速率,次/秒""" + +metrics_sql_matched_rate_last5m.label: +"""平均命中速率""" + +event_is_bridge.desc: +"""是否桥接""" + +event_is_bridge.label: +"""是否桥接""" + +event_authz_source.desc: +"""缓存,插件或者默认值""" + +event_authz_source.label: +"""认证源""" + +metrics_sql_failed_unknown.desc: +"""由于未知错误导致 SQL 失败的次数。""" + +metrics_sql_failed_unknown.label: +"""SQL 未知错误""" + +metrics_sql_failed.desc: +"""SQL 失败的次数""" + +metrics_sql_failed.label: +"""SQL 失败""" + +event_ctx_dropped.desc: +"""消息被丢弃的原因""" + +event_ctx_dropped.label: +"""丢弃原因""" + +root_rule_test.desc: +"""用于规则测试的 Schema""" + +root_rule_test.label: +"""用于规则测试的 Schema""" + +rs_test_columns.desc: +"""测试列""" + +rs_test_columns.label: +"""测试列""" + +event_peerhost.desc: +"""对等客户端的 IP 地址""" + +event_peerhost.label: +"""对等客户端的 IP""" + +event_proto_name.desc: +"""协议名称""" + +event_proto_name.label: +"""协议名称""" + +root_rule_events.desc: +"""用于事件的 Schema""" + +root_rule_events.label: +"""用于规则事件的 Schema""" + +rs_sql_example.desc: +"""SQL 例子""" + +rs_sql_example.label: +"""SQL 例子""" + +metrics_sql_matched_rate_max.desc: +"""最大命中速率,次/秒""" + +metrics_sql_matched_rate_max.label: +"""最大命中速率""" + +event_clean_start.desc: +"""清除会话""" + +event_clean_start.label: +"""清除会话""" + +ri_created_at.desc: +"""规则创建时间""" + +ri_created_at.label: +"""规则创建时间""" + +event_dup.desc: +"""MQTT 消息的 DUP 标志""" + +event_dup.label: +"""DUP 标志""" + +ri_from.desc: +"""规则指定的主题""" + +ri_from.label: +"""规则指定的主题""" + +ri_node_metrics.desc: +"""每个节点的规则计数器""" + +ri_node_metrics.label: +"""每个节点规则计数器""" + +root_rule_creation.desc: +"""用于创建规则的 Schema""" + +root_rule_creation.label: +"""用于创建规则的 Schema""" + +event_result.desc: +"""允许或禁止""" + +event_result.label: +"""认证结果""" + +event_id.desc: +"""消息 ID""" + +event_id.label: +"""消息 ID""" + +event_username.desc: +"""用户名""" + +event_username.label: +"""用户名""" + +root_rule_info.desc: +"""用于规则信息的 Schema""" + +root_rule_info.label: +"""用于规则信息的 Schema""" + +rs_columns.desc: +"""列""" + +rs_columns.label: +"""列""" + +test_sql.desc: +"""测试的 SQL""" + +test_sql.label: +"""测试 SQL""" + +event_ctx_disconnected_reason.desc: +"""断开连接的原因""" + +event_ctx_disconnected_reason.label: +"""断开连接原因""" + +} diff --git a/rel/i18n/zh/emqx_rule_engine_api.hocon b/rel/i18n/zh/emqx_rule_engine_api.hocon new file mode 100644 index 000000000..eb1be4e73 --- /dev/null +++ b/rel/i18n/zh/emqx_rule_engine_api.hocon @@ -0,0 +1,93 @@ +emqx_rule_engine_api { + +api1.desc: +"""列出所有规则""" + +api1.label: +"""列出所有规则""" + +api10.desc: +"""更新规则引擎配置。""" + +api10.label: +"""更新配置""" + +api1_enable.desc: +"""根据规则是否开启条件过滤""" + +api1_from.desc: +"""根据规则来源 Topic 过滤, 需要完全匹配""" + +api1_like_description.desc: +"""根据规则描述过滤, 使用子串模糊匹配""" + +api1_like_from.desc: +"""根据规则来源 Topic 过滤, 使用子串模糊匹配""" + +api1_like_id.desc: +"""根据规则 id 过滤, 使用子串模糊匹配""" + +api1_match_from.desc: +"""根据规则来源 Topic 过滤, 使用 MQTT Topic 匹配""" + +api1_resp.desc: +"""规则列表""" + +api1_resp.label: +"""列出所有规则""" + +api2.desc: +"""通过指定 ID 创建规则""" + +api2.label: +"""通过指定 ID 创建规则""" + +api3.desc: +"""列出所有能被规则使用的事件""" + +api3.label: +"""列出所有能被规则使用的事件""" + +api4.desc: +"""通过 ID 查询规则""" + +api4.label: +"""查询规则""" + +api4_1.desc: +"""通过给定的 Id 获得规则的指标数据""" + +api4_1.label: +"""获得指标数据""" + +api5.desc: +"""通过 ID 更新集群里所有节点上的规则""" + +api5.label: +"""更新集群规则""" + +api6.desc: +"""通过 ID 删除集群里所有节点上的规则""" + +api6.label: +"""基于给定 ID 新建一条规则""" + +api7.desc: +"""重置规则计数""" + +api7.label: +"""重置规则计数""" + +api8.desc: +"""测试一个规则""" + +api8.label: +"""测试规则""" + +api9.desc: +"""获取规则引擎配置。""" + +api9.label: +"""获取配置""" + +} diff --git a/rel/i18n/zh/emqx_rule_engine_schema.hocon b/rel/i18n/zh/emqx_rule_engine_schema.hocon new file mode 100644 index 000000000..26858e10f --- /dev/null +++ b/rel/i18n/zh/emqx_rule_engine_schema.hocon @@ -0,0 +1,184 @@ +emqx_rule_engine_schema { + +console_function.desc: +"""将输出打印到控制台""" + +console_function.label: +"""控制台函数""" + +desc_builtin_action_console.desc: +"""配置打印到控制台""" + +desc_builtin_action_console.label: +"""配置打印到控制台""" + +desc_builtin_action_republish.desc: +"""配置重新发布。""" + +desc_builtin_action_republish.label: +"""配置重新发布""" + +desc_republish_args.desc: +"""内置 'republish' 动作的参数。 +可以在参数中使用变量。 +变量是规则中选择的字段。 例如规则 SQL 定义如下: + + SELECT clientid, qos, payload FROM "t/1" + +然后有 3 个变量可用:clientidqospayload。 如果我们将参数设置为: + + { + topic = "t/${clientid}" + qos = "${qos}" + payload = "msg: ${payload}" + } + +当收到一条消息 payload = `hello`, qos = 1, clientid = `Steve` 时,将重新发布一条新的 MQTT 消息到主题 `t/Steve` +消息内容为 payload = `msg: hello`, and `qos = 1""" + +desc_republish_args.label: +"""重新发布参数""" + +desc_rule_engine.desc: +"""配置 EMQX 规则引擎。""" + +desc_rule_engine.label: +"""配置规则引擎""" + +desc_rules.desc: +"""配置规则""" + +desc_rules.label: +"""配置规则""" + +desc_user_provided_function.desc: +"""配置用户函数""" + +desc_user_provided_function.label: +"""配置用户函数""" + +republish_args_payload.desc: +"""要重新发布的消息的有效负载。允许使用带有变量的模板,请参阅“republish_args”的描述。 +默认为 ${payload}。 如果从所选结果中未找到变量 ${payload},则使用字符串 "undefined"。""" + +republish_args_payload.label: +"""消息负载""" + +republish_args_qos.desc: +"""要重新发布的消息的 qos。允许使用带有变量的模板,请参阅“republish_args”的描述。 +默认为 ${qos}。 如果从规则的选择结果中没有找到变量 ${qos},则使用 0。""" + +republish_args_qos.label: +"""消息 QoS 等级""" + +republish_args_retain.desc: +"""要重新发布的消息的“保留”标志。允许使用带有变量的模板,请参阅“republish_args”的描述。 +默认为 ${retain}。 如果从所选结果中未找到变量 ${retain},则使用 false。""" + +republish_args_retain.label: +"""保留消息标志""" + +republish_args_topic.desc: +"""重新发布消息的目标主题。 +允许使用带有变量的模板,请参阅“republish_args”的描述。""" + +republish_args_topic.label: +"""目标主题""" + +republish_args_user_properties.desc: +"""指定使用哪个变量来填充 MQTT 消息的 User-Property 列表。这个变量的值必须是一个 map 类型。 +可以设置成 ${pub_props.'User-Property'} 或者 +使用 SELECT *,pub_props.'User-Property' as user_properties 来把源 MQTT 消息 +的 User-Property 列表用于填充。 +也可以使用 map_put 函数来添加新的 User-Property, +map_put('my-prop-name', 'my-prop-value', user_properties) as user_properties +注意:MQTT 协议允许一个消息中出现多次同一个 property 名,但是 EMQX 的规则引擎不允许。""" + +republish_function.desc: +"""将消息重新发布为新的 MQTT 消息""" + +republish_function.label: +"""重新发布函数""" + +rule_engine_ignore_sys_message.desc: +"""当设置为“true”(默认)时,规则引擎将忽略发布到 $SYS 主题的消息。""" + +rule_engine_ignore_sys_message.label: +"""忽略系统消息""" + +rule_engine_jq_function_default_timeout.desc: +"""规则引擎内建函数 `jq` 默认时间限制""" + +rule_engine_jq_function_default_timeout.label: +"""规则引擎 jq 函数时间限制""" + +rule_engine_jq_implementation_module.desc: +"""jq 规则引擎功能的实现模块。可用的两个选项是 jq_nif 和 jq_port。jq_nif 使用 Erlang NIF 库访问 jq 库,而 jq_port 使用基于 Erlang Port 的实现。jq_nif 方式(默认选项)是这两个选项中最快的实现,但 jq_port 方式更安全,因为这种情况下 jq 程序不会在 Erlang VM 进程中执行。""" + +rule_engine_jq_implementation_module.label: +"""JQ 实现模块""" + +rule_engine_rules.desc: +"""规则""" + +rule_engine_rules.label: +"""规则""" + +rules_actions.desc: +"""规则的动作列表。 +动作可以是指向 EMQX bridge 的引用,也可以是一个指向函数的对象。 +我们支持一些内置函数,如“republish”和“console”,我们还支持用户提供的函数,它的格式为:“{module}:{function}”。 +列表中的动作按顺序执行。这意味着如果其中一个动作执行缓慢,则以下所有动作都不会被执行直到它返回。 +如果其中一个动作崩溃,在它之后的所有动作仍然会被按照原始顺序执行。 +如果运行动作时出现任何错误,则会出现错误消息,并且相应的计数器会增加。""" + +rules_actions.label: +"""动作列表""" + +rules_description.desc: +"""规则的描述""" + +rules_description.label: +"""规则描述""" + +rules_enable.desc: +"""启用或禁用规则引擎""" + +rules_enable.label: +"""启用或禁用规则引擎""" + +rules_metadata.desc: +"""规则的元数据,不要手动修改""" + +rules_metadata.label: +"""规则的元数据""" + +rules_name.desc: +"""规则名字""" + +rules_name.label: +"""规则名字""" + +rules_sql.desc: +"""用于处理消息的 SQL 。 +示例:SELECT * FROM "test/topic" WHERE payload.x = 1""" + +rules_sql.label: +"""规则 SQL""" + +user_provided_function_args.desc: +"""用户提供的参数将作为函数 module:function/3 的第三个参数, +请检查源文件:apps/emqx_rule_engine/src/emqx_rule_actions.erl 中的示例函数 consolerepublish 。""" + +user_provided_function_args.label: +"""用户提供函数的参数""" + +user_provided_function_function.desc: +"""用户提供的函数。 格式应为:'{module}:{function}'。 +其中 {module} 是 Erlang 回调模块, {function} 是 Erlang 函数。 +要编写自己的函数,请检查源文件:apps/emqx_rule_engine/src/emqx_rule_actions.erl 中的示例函数 consolerepublish 。""" + +user_provided_function_function.label: +"""用户提供的函数""" + +} diff --git a/rel/i18n/zh/emqx_schema.hocon b/rel/i18n/zh/emqx_schema.hocon new file mode 100644 index 000000000..3616abe91 --- /dev/null +++ b/rel/i18n/zh/emqx_schema.hocon @@ -0,0 +1,1473 @@ +emqx_schema { + +fields_mqtt_quic_listener_peer_unidi_stream_count.desc: +"""允许对端打开的单向流的数量""" + +fields_mqtt_quic_listener_peer_unidi_stream_count.label: +"""对端单向流的数量""" + +fields_authorization_no_match.desc: +"""如果用户或客户端不匹配ACL规则,或者从可配置授权源(比如内置数据库、HTTP API 或 PostgreSQL 等。)内未找 +到此类用户或客户端时,模式的认访问控制操作。 +在“授权”中查找更多详细信息。""" + +fields_authorization_no_match.label: +"""未匹时的默认授权动作""" + +sysmon_top_db_hostname.desc: +"""收集数据点的 PostgreSQL 数据库的主机名。""" + +sysmon_top_db_hostname.label: +"""数据库主机名""" + +zones.desc: +"""zone 是按name 分组的一组配置。 +对于灵活的配置映射,可以将 name 设置为侦听器的 zone 配置。 +注:名为 default 的内置区域是自动创建的,无法删除。""" + +fields_mqtt_quic_listener_certfile.desc: +"""证书文件。在 5.1 中会被废弃,使用 .ssl_options.certfile 代替。""" + +fields_mqtt_quic_listener_certfile.label: +"""证书文件""" + +fields_rate_limit_conn_bytes_in.desc: +"""限制 MQTT 连接接收数据包的速率。 速率以每秒的数据包字节数计算。""" + +fields_rate_limit_conn_bytes_in.label: +"""数据包速率""" + +crl_cache_capacity.desc: +"""缓存中可容纳的 CRL URL 的最大数量。 如果缓存的容量已满,并且必须获取一个新的 URL,那么它将驱逐缓存中插入的最老的 URL。""" + +crl_cache_capacity.label: +"""CRL 缓存容量""" + +alarm_actions.desc: +"""警报激活时触发的动作。
目前,支持以下操作:logpublish. +log 将告警写入日志 (控制台或者文件). +publish 将告警作为 MQTT 消息发布到系统主题: +$SYS/brokers/emqx@xx.xx.xx.x/alarms/activate and +$SYS/brokers/emqx@xx.xx.xx.x/alarms/deactivate""" + +alarm_actions.label: +"""告警动作""" + +base_listener_max_connections.desc: +"""监听器允许的最大并发连接数。""" + +base_listener_max_connections.label: +"""最大并发连接数""" + +mqtt_peer_cert_as_username.desc: +"""使用对端证书中的 CN、DN 字段或整个证书内容来作为用户名;仅适用于 TLS 连接。 +目前支持: +- cn: 取证书的 CN 字段 +- dn: 取证书的 DN 字段 +- crt: 取 DERPEM 的证书内容 +- pem: 将 DER 证书转换为 PEM 格式作为用户名 +- md5: 取 DERPEM 证书内容的 MD5 值""" + +mqtt_peer_cert_as_username.label: +"""对端证书作为用户名""" + +fields_cache_enable.desc: +"""启用或禁用授权缓存。""" + +fields_cache_enable.label: +"""启用或禁用授权缓存""" + +fields_mqtt_quic_listener_disconnect_timeout_ms.desc: +"""在判定路径无效和断开连接之前,要等待多长时间的ACK。默认:16000""" + +fields_mqtt_quic_listener_disconnect_timeout_ms.label: +"""断开连接超时 毫秒""" + +mqtt_max_topic_alias.desc: +"""允许的最大主题别名数,0 表示不支持主题别名。""" + +mqtt_max_topic_alias.label: +"""最大主题别名""" + +common_ssl_opts_schema_user_lookup_fun.desc: +"""用于查找预共享密钥(PSK)标识的 EMQX 内部回调。""" + +common_ssl_opts_schema_user_lookup_fun.label: +"""SSL PSK 用户回调""" + +fields_listeners_wss.desc: +"""HTTPS websocket 监听器。""" + +fields_listeners_wss.label: +"""HTTPS websocket 监听器""" + +sysmon_top_max_procs.desc: +"""当 VM 中的进程数超过此值时,停止收集数据。""" + +sysmon_top_max_procs.label: +"""最大进程数""" + +mqtt_use_username_as_clientid.desc: +"""是否使用用户名作为客户端 ID。 +此设置的作用时间晚于 对端证书作为用户名对端证书作为客户端 ID。""" + +mqtt_use_username_as_clientid.label: +"""用户名作为客户端 ID""" + +mqtt_max_qos_allowed.desc: +"""允许的最大 QoS 等级。""" + +mqtt_max_qos_allowed.label: +"""最大 QoS""" + +fields_mqtt_quic_listener_max_binding_stateless_operations.desc: +"""在任何时候可以在一个绑定上排队的无状态操作的最大数量。默认值:100""" + +fields_mqtt_quic_listener_max_binding_stateless_operations.label: +"""最大绑定无状态操作""" + +fields_mqtt_quic_listener_stream_recv_buffer_default.desc: +"""流的初始缓冲区大小。默认:4096""" + +fields_mqtt_quic_listener_stream_recv_buffer_default.label: +"""流媒体接收缓冲区默认值""" + +fields_mqtt_quic_listener_pacing_enabled.desc: +"""有节奏的发送,以避免路径上的缓冲区过度填充。默认值:1(已启用)""" + +fields_mqtt_quic_listener_pacing_enabled.label: +"""启用节奏发送""" + +mqtt_max_subscriptions.desc: +"""允许每个客户端建立的最大订阅数量。""" + +mqtt_max_subscriptions.label: +"""最大订阅数量""" + +persistent_session_builtin_messages_table.desc: +"""用于内建消息表的性能调优参数。""" + +persistent_session_builtin_messages_table.label: +"""持久化消息""" + +sysmon_os_cpu_low_watermark.desc: +"""在解除相应警报之前可以使用多少系统 CPU 的阈值,以系统CPU负载的百分比表示。""" + +sysmon_os_cpu_low_watermark.label: +"""CPU 低水位线""" + +fields_mqtt_quic_listener_tls_server_max_send_buffer.desc: +"""缓冲多少TLS数据。 默认值:8192""" + +fields_mqtt_quic_listener_tls_server_max_send_buffer.label: +"""TLS 服务器最大发送缓冲区""" + +base_listener_bind.desc: +"""监听套接字的 IP 地址和端口。""" + +base_listener_bind.label: +"""IP 地址和端口""" + +server_ssl_opts_schema_handshake_timeout.desc: +"""握手完成所允许的最长时间""" + +server_ssl_opts_schema_handshake_timeout.label: +"""握手超时时间""" + +fields_deflate_opts_server_context_takeover.desc: +"""接管意味着在服务器消息之间保留压缩状态。""" + +fields_deflate_opts_server_context_takeover.label: +"""服务上下文接管""" + +mqtt_session_expiry_interval.desc: +"""指定会话将在连接断开后多久过期,仅适用于非 MQTT 5.0 的连接。""" + +mqtt_session_expiry_interval.label: +"""会话过期间隔""" + +fields_listener_enabled.desc: +"""启停监听器。""" + +fields_listener_enabled.label: +"""启停监听器""" + +mqtt.desc: +"""全局的 MQTT 配置项。 +mqtt 下所有的配置作为全局的默认值存在,它可以被 zone 中的配置覆盖。""" + +crl_cache_refresh_http_timeout.desc: +"""获取 CRLs 时 HTTP 请求的超时。 该配置对所有启用 CRL 检查的监听器监听器有效。""" + +crl_cache_refresh_http_timeout.label: +"""CRL 缓存刷新 HTTP 超时""" + +fields_tcp_opts_backlog.desc: +"""TCP backlog 定义了挂起连接队列可以增长到的最大长度。""" + +fields_tcp_opts_backlog.label: +"""TCP 连接队列长度""" + +broker_route_batch_clean.desc: +"""是否开启批量清除路由。""" + +fields_mqtt_quic_listener_initial_window_packets.desc: +"""一个连接的初始拥堵窗口的大小(以包为单位)。默认值:10""" + +fields_mqtt_quic_listener_initial_window_packets.label: +"""初始窗口数据包""" + +flapping_detect_enable.desc: +"""启用抖动检测功能。""" + +flapping_detect_enable.label: +"""启用抖动检测""" + +sysmon_top_db_password.desc: +"""PostgreSQL 数据库的密码""" + +sysmon_top_db_password.label: +"""数据库密码""" + +fields_ws_opts_check_origins.desc: +"""允许的 origins 列表""" + +fields_ws_opts_check_origins.label: +"""允许的 origins""" + +fields_deflate_opts_client_context_takeover.desc: +"""接管意味着在客户端消息之间保留压缩状态。""" + +fields_deflate_opts_client_context_takeover.label: +"""客户端上下文接管""" + +base_listener_acceptors.desc: +"""监听器接收池的大小。""" + +base_listener_acceptors.label: +"""接收器数量""" + +common_ssl_opts_schema_cacertfile.desc: +"""受信任的PEM格式 CA 证书捆绑文件
+此文件中的证书用于验证TLS对等方的证书。 +如果要信任新 CA,请将新证书附加到文件中。 +无需重启EMQX即可加载更新的文件,因为系统会定期检查文件是否已更新(并重新加载)
+注意:从文件中失效(删除)证书不会影响已建立的连接。""" + +common_ssl_opts_schema_cacertfile.label: +"""CA 证书文件""" + +fields_ws_opts_mqtt_path.desc: +"""WebSocket 的 MQTT 协议路径。因此,EMQX Broker的WebSocket地址为: +ws://{ip}:{port}/mqtt""" + +fields_ws_opts_mqtt_path.label: +"""WS MQTT 路径""" + +sysmon_os_procmem_high_watermark.desc: +"""在发出相应警报之前,一个Erlang进程可以分配多少系统内存的阈值,以系统内存的百分比表示。""" + +sysmon_os_procmem_high_watermark.label: +"""进程内存高水位线""" + +fields_listeners_quic.desc: +"""QUIC 监听器。""" + +fields_listeners_quic.label: +"""QUIC 监听器""" + +fields_listeners_ws.desc: +"""HTTP websocket 监听器。""" + +fields_listeners_ws.label: +"""HTTP websocket 监听器""" + +mqtt_retry_interval.desc: +"""QoS 1/2 消息的重新投递间隔。""" + +mqtt_retry_interval.label: +"""重试间隔""" + +stats_enable.desc: +"""启用/禁用统计数据收集功能。""" + +stats_enable.label: +"""启用/禁用统计数据收集功能""" + +fields_authorization_deny_action.desc: +"""授权检查拒绝操作时的操作。""" + +fields_authorization_deny_action.label: +"""授权检查拒绝操作时的操作""" + +fields_deflate_opts_server_max_window_bits.desc: +"""指定服务器压缩上下文的大小。""" + +fields_deflate_opts_server_max_window_bits.label: +"""服务器压缩窗口大小""" + +client_ssl_opts_schema_server_name_indication.desc: +"""指定要在 TLS 服务器名称指示扩展中使用的主机名。
+例如,当连接到 "server.example.net" 时,接受连接并执行 TLS 握手的真正服务器可能与 TLS 客户端最初连接到的主机不同, +例如,当连接到 IP 地址时,或者当主机具有多个可解析的 DNS 记录时
+如果未指定,它将默认为使用的主机名字符串 +建立连接,除非使用 IP 地址
+然后,主机名也用于对等机的主机名验证证书
+特殊值 disable 阻止发送服务器名称指示扩展,并禁用主机名验证检查。""" + +client_ssl_opts_schema_server_name_indication.label: +"""服务器名称指示""" + +fields_mqtt_quic_listener_retry_memory_limit.desc: +"""在使用无状态重试之前,可用于握手连接的可用内存的百分比。计算为`N/65535`。默认值:65""" + +fields_mqtt_quic_listener_retry_memory_limit.label: +"""重试内存限制""" + +force_shutdown_max_message_queue_len.desc: +"""消息队列的最大长度。""" + +force_shutdown_max_message_queue_len.label: +"""进程邮箱消息队列的最大长度""" + +sys_heartbeat_interval.desc: +"""发送心跳系统消息的间隔时间,它包括: + - `$SYS/brokers//uptime` + - `$SYS/brokers//datetime`""" + +flapping_detect_ban_time.desc: +"""抖动的客户端将会被禁止登录多长时间。""" + +flapping_detect_ban_time.label: +"""禁止登录时长""" + +sysmon_top_num_items.desc: +"""每个监视组的顶级进程数。""" + +sysmon_top_num_items.label: +"""顶级进程数""" + +persistent_session_builtin_session_table.desc: +"""用于内建会话表的性能调优参数。""" + +persistent_session_builtin_session_table.label: +"""持久会话""" + +mqtt_upgrade_qos.desc: +"""投递消息时,是否根据订阅主题时的 QoS 等级来强制提升派发的消息的 QoS 等级。""" + +mqtt_upgrade_qos.label: +"""升级 QoS""" + +mqtt_shared_subscription.desc: +"""是否启用对 MQTT 共享订阅的支持。""" + +mqtt_shared_subscription.label: +"""共享订阅可用""" + +fields_tcp_opts_sndbuf.desc: +"""连接的 TCP 发送缓冲区(OS 内核)。""" + +fields_tcp_opts_sndbuf.label: +"""TCP 发送缓冲区""" + +sysmon_os_mem_check_interval.desc: +"""定期内存检查的时间间隔。""" + +sysmon_os_mem_check_interval.label: +"""内存检查间隔""" + +server_ssl_opts_schema_gc_after_handshake.desc: +"""内存使用调优。如果启用,将在TLS/SSL握手完成后立即执行垃圾回收。TLS/SSL握手建立后立即进行GC。""" + +server_ssl_opts_schema_gc_after_handshake.label: +"""握手后执行GC""" + +fields_mqtt_quic_listener_ssl_options.desc: +"""QUIC 传输层的 TLS 选项""" + +fields_mqtt_quic_listener_ssl_options.label: +"""TLS 选项""" + +fields_ws_opts_mqtt_piggyback.desc: +"""WebSocket消息是否允许包含多个 MQTT 数据包。""" + +fields_ws_opts_mqtt_piggyback.label: +"""MQTT Piggyback""" + +base_listener_mountpoint.desc: +"""发布或订阅时,请在所有主题前面加上 mountpoint 字符串。 + +将消息传递给订阅者时,将从主题名称中删除带前缀的字符串。挂载点是一种用户可以用来实现不同侦听器之间消息路由隔离的方法。 + +例如,如果客户机 A 使用 listeners.tcp.\.mountpoint 设置为'some_tenant',那么客户端实际上订阅了主题'some_tenant/t'。
+类似地,如果另一个客户端B(与客户端A连接到同一个侦听器)向主题 't' 发送消息,该消息将路由到所有订阅了'some_租户/t'的客户端,因此客户端 A 将接收主题名为't'的消息
+ +设置为"" 以禁用该功能
+ +mountpoint 字符串中的变量: +- ${clientid}: clientid +- ${username}: username""" + +base_listener_mountpoint.label: +"""mountpoint""" + +mqtt_max_awaiting_rel.desc: +"""每个发布者的会话中,都存在一个队列来处理客户端发送的 QoS 2 消息。该队列会存储 QoS 2 消息的报文 ID 直到收到客户端的 PUBREL 或超时,达到队列长度的限制后,新的 QoS 2 消息发布会被拒绝,并返回 `147(0x93)` 错误。""" + +mqtt_max_awaiting_rel.label: +"""PUBREL 等待队列长度""" + +ciphers_schema_quic.desc: +"""此配置保存由逗号分隔的 TLS 密码套件名称,或作为字符串数组。例如 +"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256"或 +["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]。 +
+密码(及其顺序)定义了客户端和服务器通过网络连接加密信息的方式。 +选择一个好的密码套件对于应用程序的数据安全性、机密性和性能至关重要。 + +名称应为 OpenSSL 字符串格式(而不是 RFC 格式)。 +EMQX 配置文档提供的所有默认值和示例都是 OpenSSL 格式
+注意:某些密码套件仅与特定的 TLS 版本兼容('tlsv1.1'、'tlsv1.2'或'tlsv1.3')。 +不兼容的密码套件将被自动删除。 + +例如,如果只有 versions 仅配置为 tlsv1.3。为其他版本配置密码套件将无效。 + +
+注:PSK 的 Ciphers 不支持 tlsv1.3
+如果打算使用PSK密码套件,tlsv1.3。应在ssl.versions中禁用。 + +
+PSK 密码套件: +"RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, +RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, +RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, +RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
+ +注:QUIC 监听器不支持 tlsv1.3 的 ciphers""" + +ciphers_schema_quic.label: +"""""" + +fields_mqtt_quic_listener_max_bytes_per_key.desc: +"""在启动密钥更新之前,用单个 1-RTT 加密密钥加密的最大字节数。默认值:274877906944""" + +fields_mqtt_quic_listener_max_bytes_per_key.label: +"""每个密钥的最大字节数""" + +fields_mqtt_quic_listener_mtu_discovery_search_complete_timeout_us.desc: +"""如果没有达到 max ,在重新尝试 MTU 探测之前要等待的时间,单位是微秒。默认值:600000000""" + +fields_mqtt_quic_listener_mtu_discovery_search_complete_timeout_us.label: +"""""" + +fields_ws_opts_check_origin_enable.desc: +"""如果trueoriginHTTP 头将根据check_origins参数中配置的允许来源列表进行验证。""" + +fields_ws_opts_check_origin_enable.label: +"""检查 origin""" + +sysmon_vm_busy_dist_port.desc: +"""启用后,当用于集群接点之间 RPC 的连接过忙时,会触发一条带有 busy_dist_port 关键字的 warning 级别日志。 +同时还会发布一条主题为 $SYS/sysmon/busy_dist_port 的 MQTT 系统消息。""" + +sysmon_vm_busy_dist_port.label: +"""启用分布式端口过忙监控""" + +mqtt_max_mqueue_len.desc: +"""消息队列最大长度。持久客户端断开连接或飞行窗口已满时排队的消息长度。""" + +mqtt_max_mqueue_len.label: +"""最大消息队列长度""" + +mqtt_max_inflight.desc: +"""允许在完成应答前同时投递的 QoS 1 和 QoS 2 消息的最大数量。""" + +mqtt_max_inflight.label: +"""最大飞行窗口""" + +persistent_session_store_enabled.desc: +"""使用数据库存储有关持久会话的信息。 +这使得在节点停止时,可以将客户端连接迁移到另一个群集节点。""" + +persistent_session_store_enabled.label: +"""启用持久会话保存""" + +fields_deflate_opts_level.desc: +"""压缩级别""" + +fields_deflate_opts_level.label: +"""压缩级别""" + +mqtt_server_keepalive.desc: +"""EMQX 要求的保活时间,如设为 disabled,则将使用客户端指定的保持连接时间;仅适用于 MQTT 5.0 客户端。""" + +mqtt_server_keepalive.label: +"""服务端保活时间""" + +global_authentication.desc: +"""全局 MQTT 监听器的默认认证配置。 为每个监听器配置认证参考监听器器配置中的authentication 配置。 + +该配置可以被配置为: +
    +
  • []: 默认值,允许所有的登录请求 +
  • 配置为单认证器,例如 {enable:true,backend:"built_in_database",mechanism="password_based"}
  • +
  • 配置为认证器数组
  • +
+ +当配置为认证链后,登录凭证会按照配置的顺序进行检查,直到做出allowdeny的结果。 + +如果在所有的认证器都执行完后,还是没有结果,登录将被拒绝。""" + +fields_mqtt_quic_listener_load_balancing_mode.desc: +"""0: 禁用, 1: SERVER_ID_IP, 2: SERVER_ID_FIXED. 默认: 0""" + +fields_mqtt_quic_listener_load_balancing_mode.label: +"""负载平衡模式""" + +persistent_session_store_session_message_gc_interval.desc: +"""持久会话消息的临时数据垃圾收集的开始间隔。 +这不会影响持久会话消息的生命周期长度。""" + +persistent_session_store_session_message_gc_interval.label: +"""会话消息清理间隔""" + +server_ssl_opts_schema_ocsp_refresh_http_timeout.desc: +"""检查 OCSP 响应时,HTTP 请求的超时。""" + +server_ssl_opts_schema_ocsp_refresh_http_timeout.label: +"""OCSP 刷新 HTTP 超时""" + +fields_tcp_opts_send_timeout.desc: +"""连接的 TCP 发送超时。""" + +fields_tcp_opts_send_timeout.label: +"""TCP 发送超时""" + +sysmon_vm_process_high_watermark.desc: +"""在发出相应警报之前,本地节点上可以同时存在多少进程的阈值(以进程百分比表示)。""" + +sysmon_vm_process_high_watermark.label: +"""进程数高水位线""" + +fields_tcp_opts_buffer.desc: +"""驱动程序使用的用户空间缓冲区的大小。""" + +fields_tcp_opts_buffer.label: +"""TCP 用户态缓冲区""" + +server_ssl_opts_schema_honor_cipher_order.desc: +"""一个重要的安全设置,它强制根据服务器指定的顺序而不是客户机指定的顺序设置密码,从而强制服务器管理员执行(通常配置得更正确)安全顺序。""" + +server_ssl_opts_schema_honor_cipher_order.label: +"""SSL honor cipher order""" + +conn_congestion_min_alarm_sustain_duration.desc: +"""清除警报前的最短时间。
只有当队列中没有挂起的数据,并且连接至少被堵塞了 min_alarm_sustain_duration 毫秒时,
报警才会被清除。这是为了避免太频繁地清除和再次发出警报。""" + +conn_congestion_min_alarm_sustain_duration.label: +"""告警维持时间""" + +fields_mqtt_quic_listener_keep_alive_interval_ms.desc: +"""多长时间发送一次PING帧以保活连接。""" + +fields_mqtt_quic_listener_keep_alive_interval_ms.label: +"""保持活着的时间间隔 毫秒""" + +fields_mqtt_quic_listener_handshake_idle_timeout_ms.desc: +"""一个握手在被丢弃之前可以空闲多长时间""" + +fields_mqtt_quic_listener_handshake_idle_timeout_ms.label: +"""握手空闲超时 毫秒""" + +broker_session_locking_strategy.desc: +"""Session 在集群中的锁策略。 + - `loca`:仅锁本节点的 Session; + - `one`:任选一个其它节点加锁; + - `quorum`:选择集群中半数以上的节点加锁; + - `all`:选择所有节点加锁。""" + +persistent_store_ram_cache.desc: +"""在内存中保持一份数据的副本,以便更快地访问。""" + +persistent_store_ram_cache.label: +"""内存缓存""" + +fields_mqtt_quic_listener_stream_recv_window_default.desc: +"""初始流接收窗口大小。 默认值:32678""" + +fields_mqtt_quic_listener_stream_recv_window_default.label: +"""流接收窗口默认""" + +mqtt_mqueue_priorities.desc: +"""主题优先级。取值范围 [1-255] +默认优先级表为空,即所有的主题优先级相同。 + +注:优先主题名称中不支持使用逗号和等号。 +注:不在此列表中的主题,被视为最高/最低优先级,这取决于mqtt.mqueue_default_priority 的配置 + +示例: +配置 "topic/1" > "topic/2": +mqueue_priorities: {"topic/1": 10, "topic/2": 8}""" + +mqtt_mqueue_priorities.label: +"""主题优先级""" + +fields_rate_limit_conn_messages_in.desc: +"""外部 MQTT 连接的消息限制。""" + +fields_rate_limit_conn_messages_in.label: +"""外部 MQTT 连接的消息限制""" + +fields_rate_limit_max_conn_rate.desc: +"""每秒最大连接数。""" + +fields_rate_limit_max_conn_rate.label: +"""每秒最大连接数""" + +alarm_size_limit.desc: +"""要保留为历史记录的已停用报警的最大总数。当超过此限制时,将删除最旧的停用报警,以限制总数。""" + +alarm_size_limit.label: +"""告警总数限制""" + +fields_cache_max_size.desc: +"""缓存项的最大数量。""" + +fields_cache_max_size.label: +"""缓存项的最大数量""" + +fields_listeners_tcp.desc: +"""TCP 监听器。""" + +fields_listeners_tcp.label: +"""TCP 监听器""" + +conn_congestion_enable_alarm.desc: +"""启用或者禁用连接阻塞告警功能。""" + +conn_congestion_enable_alarm.label: +"""启用/禁用阻塞告警""" + +fields_ws_opts_proxy_port_header.desc: +"""HTTP 头,用于传递有关客户端端口的信息。当 EMQX 集群部署在负载平衡器后面时,这一点非常重要。""" + +fields_ws_opts_proxy_port_header.label: +"""客户端端口头""" + +overload_protection_enable.desc: +"""是否对系统过载做出反应。""" + +overload_protection_enable.label: +"""是否对系统过载做出反应""" + +fields_mqtt_quic_listener_minimum_mtu.desc: +"""一个连接所支持的最小MTU。这将被作为起始MTU使用。默认值:1248""" + +fields_mqtt_quic_listener_minimum_mtu.label: +"""最小 MTU""" + +sys_msg_interval.desc: +"""发送 `$SYS` 主题的间隔时间。""" + +mqtt_await_rel_timeout.desc: +"""客户端发布 QoS 2 消息时,服务器等待 `PUBREL` 的最长时延。超过该时长后服务器会放弃等待,该PACKET ID 会被释放,从而允许后续新的 PUBLISH 消息使用。如果超时后收到 PUBREL,服务器将会产生一条告警日志。注意,向订阅客户端转发消息的动作发生在进入等待之前。""" + +mqtt_await_rel_timeout.label: +"""PUBREL 最大等待时间""" + +common_ssl_opts_schema_verify.desc: +"""启用或禁用对等验证。""" + +common_ssl_opts_schema_verify.label: +"""对等验证""" + +fields_listeners_ssl.desc: +"""SSL 监听器。""" + +fields_listeners_ssl.label: +"""SSL 监听器""" + +fields_deflate_opts_client_max_window_bits.desc: +"""指定客户端压缩上下文的大小。""" + +fields_deflate_opts_client_max_window_bits.label: +"""压缩窗口大小""" + +common_ssl_opts_schema_keyfile.desc: +"""PEM格式的私钥文件。""" + +common_ssl_opts_schema_keyfile.label: +"""私钥文件""" + +sysmon_os_cpu_high_watermark.desc: +"""在发出相应警报之前可以使用多少系统 CPU 的阈值,以系统CPU负载的百分比表示。""" + +sysmon_os_cpu_high_watermark.label: +"""CPU 高水位线""" + +flapping_detect_window_time.desc: +"""抖动检测的时间窗口。""" + +flapping_detect_window_time.label: +"""时间窗口""" + +mqtt_mqueue_default_priority.desc: +"""默认的主题优先级,不在 主题优先级mqueue_priorities) 中的主题将会使用该优先级。""" + +mqtt_mqueue_default_priority.label: +"""默认主题优先级""" + +client_ssl_opts_schema_enable.desc: +"""启用 TLS。""" + +client_ssl_opts_schema_enable.label: +"""启用 TLS""" + +fields_mqtt_quic_listener_mtu_discovery_missing_probe_count.desc: +"""在任何时候都可以在一个绑定上排队的无状态操作的最大数量。默认值:3""" + +fields_mqtt_quic_listener_mtu_discovery_missing_probe_count.label: +"""MTU发现丢失的探针数量""" + +fields_tcp_opts_recbuf.desc: +"""连接的 TCP 接收缓冲区(OS 内核)。""" + +fields_tcp_opts_recbuf.label: +"""TCP 接收缓冲区""" + +sysmon_vm_process_check_interval.desc: +"""定期进程限制检查的时间间隔。""" + +sysmon_vm_process_check_interval.label: +"""进程限制检查时间""" + +fields_mqtt_quic_listener_server_resumption_level.desc: +"""连接恢复 和/或 0-RTT 服务器支持。默认值:0(无恢复功能)""" + +fields_mqtt_quic_listener_server_resumption_level.label: +"""服务端连接恢复支持""" + +fields_ws_opts_proxy_address_header.desc: +"""HTTP 头,用于传递有关客户端 IP 地址的信息。 +当 EMQX 集群部署在负载平衡器后面时,这一点非常重要。""" + +fields_ws_opts_proxy_address_header.label: +"""客户端地址头""" + +sysmon_os_sysmem_high_watermark.desc: +"""在发出相应报警之前可以分配多少系统内存的阈值,以系统内存的百分比表示。""" + +sysmon_os_sysmem_high_watermark.label: +"""系统内存高水位线""" + +fields_tcp_opts_high_watermark.desc: +"""当 VM 套接字实现内部排队的数据量达到此限制时,套接字将设置为忙碌状态。""" + +fields_tcp_opts_high_watermark.label: +"""""" + +fields_mqtt_quic_listener_stateless_operation_expiration_ms.desc: +"""同一个对端的操作之间的时间限制,单位是毫秒。 默认:100""" + +fields_mqtt_quic_listener_stateless_operation_expiration_ms.label: +"""无状态操作过期 毫秒""" + +server_ssl_opts_schema_dhfile.desc: +"""如果协商使用Diffie-Hellman密钥交换的密码套件,则服务器将使用包含PEM编码的Diffie-Hellman参数的文件的路径。如果未指定,则使用默认参数。
+注意:TLS 1.3不支持dhfile选项。""" + +server_ssl_opts_schema_dhfile.label: +"""SSL dhfile""" + +flapping_detect_max_count.desc: +"""MQTT 客户端在“窗口”时间内允许的最大断开次数。""" + +flapping_detect_max_count.label: +"""最大断开次数""" + +mqtt_max_topic_levels.desc: +"""允许的最大主题层级。""" + +mqtt_max_topic_levels.label: +"""最大主题层级""" + +force_shutdown_max_heap_size.desc: +"""Heap 的总大小。""" + +force_shutdown_max_heap_size.label: +"""Heap 的总大小""" + +persistent_store_on_disc.desc: +"""将持久会话数据保存在磁盘上。如果为 false 则存储在内存中。 +如开启, 持久会话数据可在集群重启后恢复。 +如关闭, 数据仅存储在内存中, 则在整个集群停止后丢失。""" + +persistent_store_on_disc.label: +"""持久化在磁盘上""" + +mqtt_ignore_loop_deliver.desc: +"""设置由 MQTT v3.1.1/v3.1.0 客户端发布的消息是否将转发给其本身;类似 MQTT 5.0 协议中的 No Local 选项。""" + +mqtt_ignore_loop_deliver.label: +"""忽略循环投递""" + +common_ssl_opts_schema_certfile.desc: +"""PEM格式证书链文件
+此文件中的证书应与证书颁发链的顺序相反。也就是说,主机的证书应该放在文件的开头, +然后是直接颁发者 CA 证书,依此类推,一直到根 CA 证书。 +根 CA 证书是可选的,如果想要添加,应加到文件到最末端。""" + +common_ssl_opts_schema_certfile.label: +"""证书文件""" + +mqtt_exclusive_subscription.desc: +"""是否启用对 MQTT 排它订阅的支持。""" + +mqtt_exclusive_subscription.label: +"""排它订阅""" + +mqtt_retain_available.desc: +"""是否启用对 MQTT 保留消息的支持。""" + +mqtt_retain_available.label: +"""保留消息可用""" + +fields_tcp_opts_reuseaddr.desc: +"""连接的 SO_REUSEADDR 标识。""" + +fields_tcp_opts_reuseaddr.label: +"""SO_REUSEADDR""" + +sysmon_vm_long_schedule.desc: +"""启用后,如果 Erlang VM 调度器出现某个任务占用时间过长时,会触发一条带有 'long_schedule' 关键字的日志。 +同时还会发布一条主题为 $SYS/sysmon/long_schedule 的 MQTT 系统消息。""" + +sysmon_vm_long_schedule.label: +"""启用长调度监控""" + +mqtt_keepalive_backoff.desc: +"""EMQX 判定客户端保活超时使用的阈值系数。计算公式为:Keep Alive * Backoff * 2""" + +mqtt_keepalive_backoff.label: +"""保活超时阈值系数""" + +force_gc_bytes.desc: +"""在进程处理过多少个字节之后,对此进程执行垃圾回收。""" + +force_gc_bytes.label: +"""垃圾回收字节数""" + +server_ssl_opts_schema_fail_if_no_peer_cert.desc: +"""TLS/DTLS 服务器与 {verify,verify_peer} 一起使用。 +如果设置为true,则如果客户端没有要发送的证书,即发送空证书,服务器将失败。 +如果设置为false,则仅当客户端发送无效证书(空证书被视为有效证书)时才会失败。""" + +server_ssl_opts_schema_fail_if_no_peer_cert.label: +"""没有证书则 SSL 失败""" + +fields_ws_opts_compress.desc: +"""如果 true,则使用zlib 压缩 WebSocket 消息
+deflate_opts 下的配置项属于压缩相关参数配置。""" + +fields_ws_opts_compress.label: +"""Ws 压缩""" + +fields_mqtt_quic_listener_keep_alive_interval.desc: +"""发送 PING 帧的频率,以保活连接. 设为 0 表示禁用。""" + +fields_mqtt_quic_listener_keep_alive_interval.label: +"""PING 保活频率""" + +fields_cache_ttl.desc: +"""缓存数据的生存时间。""" + +fields_cache_ttl.label: +"""缓存数据的生存时间。""" + +sys_topics.desc: +"""系统主题配置。""" + +sys_event_client_subscribed.desc: +"""是否开启客户端已成功订阅主题事件消息。""" + +sysmon_top_db_port.desc: +"""收集数据点的 PostgreSQL 数据库的端口。""" + +sysmon_top_db_port.label: +"""数据库端口""" + +fields_mqtt_quic_listener_max_operations_per_drain.desc: +"""每个连接操作的最大耗费操作数。默认:16""" + +fields_mqtt_quic_listener_max_operations_per_drain.label: +"""每次操作最大操作数""" + +fields_mqtt_quic_listener_datagram_receive_enabled.desc: +"""宣传对QUIC Datagram 扩展的支持。为将来保留。默认为0(FALSE)""" + +fields_mqtt_quic_listener_datagram_receive_enabled.label: +"""启用 Datagram 接收""" + +fields_mqtt_quic_listener_initial_rtt_ms.desc: +"""初始RTT估计""" + +fields_mqtt_quic_listener_initial_rtt_ms.label: +"""Initial RTT 毫秒""" + +overload_protection_backoff_gc.desc: +"""高负载时,跳过强制 GC。""" + +overload_protection_backoff_gc.label: +"""跳过GC""" + +broker_perf_route_lock_type.desc: +"""通配主题订阅/取消订阅性能调优。 +建议仅当通配符主题较多时才更改此参数。 + +注:当从/更改为 `global` 锁时,它要求集群中的所有节点在更改之前停止。 + - `key`:为 Mnesia 事务涉及到的每个 key 上锁,建议单节点时使用。 + - `tab`:为 Mnesia 事务涉及到的表上锁,建议在集群中使用。 + - `global`:所以更新操作都被全局的锁保护,仅建议在超大规模集群中使用。""" + +fields_tcp_opts_nodelay.desc: +"""连接的 TCP_NODELAY 标识""" + +fields_tcp_opts_nodelay.label: +"""TCP_NODELAY""" + +sysmon_top_db_username.desc: +"""PostgreSQL 数据库的用户名""" + +sysmon_top_db_username.label: +"""数据库用户名""" + +broker.desc: +"""Broker 相关配置项。""" + +force_gc_count.desc: +"""在进程收到多少消息之后,对此进程执行垃圾回收。""" + +force_gc_count.label: +"""垃圾回收消息数""" + +mqtt_max_clientid_len.desc: +"""允许的最大 MQTT Client ID 长度。""" + +mqtt_max_clientid_len.label: +"""最大 Client ID 长度""" + +fields_ws_opts_supported_subprotocols.desc: +"""逗号分隔的 subprotocols 支持列表。""" + +fields_ws_opts_supported_subprotocols.label: +"""Subprotocols 支持列表""" + +broker_shared_subscription_strategy.desc: +"""共享订阅消息派发策略。 + - `random`:随机挑选一个共享订阅者派发; + - `round_robin`:使用 round-robin 策略派发; + - `round_robin_per_group`:在共享组内循环选择下一个成员; + - `local`:选择随机的本地成员,否则选择随机的集群范围内成员; + - `sticky`:总是使用上次选中的订阅者派发,直到它断开连接; + - `hash_clientid`:通过对发送者的客户端 ID 进行 Hash 处理来选择订阅者; + - `hash_topic`:通过对源主题进行 Hash 处理来选择订阅者。""" + +fields_deflate_opts_mem_level.desc: +"""指定压缩状态的大小
+较低的值会减少每个连接的内存使用。""" + +fields_deflate_opts_mem_level.label: +"""压缩状态大小""" + +fields_mqtt_quic_listener_send_idle_timeout_ms.desc: +"""在闲置一定时间后重置拥堵控制。默认值:1000""" + +fields_mqtt_quic_listener_send_idle_timeout_ms.label: +"""发送空闲超时毫秒""" + +base_listener_limiter.desc: +"""速率限制类型""" + +base_listener_limiter.label: +"""速率限制类型""" + +persistent_session_store_backend.desc: +"""用于存储持久性会话和信息的数据库管理后端 +- `builtin`: 使用内置的数据库(mria)""" + +persistent_session_store_backend.label: +"""后端类型""" + +alarm_validity_period.desc: +"""停用报警的保留时间。报警在停用时不会立即删除,而是在保留时间之后删除。""" + +alarm_validity_period.label: +"""告警保留时间""" + +server_ssl_opts_schema_ocsp_issuer_pem.desc: +"""服务器证书的 OCSP 签发者的 PEM 编码证书。""" + +server_ssl_opts_schema_ocsp_issuer_pem.label: +"""OCSP 签发者证书""" + +fields_tcp_opts_active_n.desc: +"""为此套接字指定{active,N}选项
+See: https://erlang.org/doc/man/inet.html#setopts-2""" + +fields_tcp_opts_active_n.label: +"""active_n""" + +listener_authentication.desc: +"""监听器认证重载。 +认证配置可以是单个认证器实例,也可以是一个认证器数组组成的认证链。 +执行登录验证时(用户名、客户端 ID 等),将按配置的顺序执行。""" + +listener_authentication.label: +"""每个监听器的认证覆盖""" + +fields_trace_payload_encode.desc: +"""确定跟踪文件中有效负载格式的格式。
+`text`:基于文本的协议或纯文本协议。 +建议在有效负载为JSON编码时使用
+`hex`:二进制十六进制编码。当有效负载是自定义二进制协议时,建议使用此选项
+`hidden`:有效负载被模糊化为 `******`""" + +fields_trace_payload_encode.label: +"""有效负载编码""" + +mqtt_response_information.desc: +"""UTF-8 字符串,用于指定返回给客户端的响应主题,如 reqrsp/,此时请求和应答客户端都需要使用 reqrsp/ 前缀的主题来完成通讯。 +如希望禁用此功能,请在下方的文字框中输入"";仅适用于 MQTT 5.0 客户端。""" + +mqtt_response_information.label: +"""响应信息""" + +persistent_session_store_max_retain_undelivered.desc: +"""如果重新启动时处理上一个会话的节点已停止,则未传递到持久会话的消息在垃圾收集之前会被存储。""" + +persistent_session_store_max_retain_undelivered.label: +"""未投递的消息保留条数""" + +fields_mqtt_quic_listener_migration_enabled.desc: +"""开启客户端地址迁移功能。需要一个支持的负载平衡器,或者没有负载平衡器。默认值:1(已启用)""" + +fields_mqtt_quic_listener_migration_enabled.label: +"""启用地址迁移""" + +common_ssl_opts_schema_password.desc: +"""包含用户密码的字符串。仅在私钥文件受密码保护时使用。""" + +common_ssl_opts_schema_password.label: +"""秘钥文件密码""" + +common_ssl_opts_schema_hibernate_after.desc: +"""在闲置一定时间后休眠 SSL 进程,减少其内存占用。""" + +common_ssl_opts_schema_hibernate_after.label: +"""闲置多久后休眠""" + +fields_mqtt_quic_listener_send_buffering_enabled.desc: +"""缓冲发送数据,而不是保留应用缓冲区,直到发送数据被确认。默认值:1(启用)""" + +fields_mqtt_quic_listener_send_buffering_enabled.label: +"""启用发送缓冲功能""" + +sys_event_client_unsubscribed.desc: +"""是否开启客户端已成功取消订阅主题事件消息。""" + +overload_protection_backoff_new_conn.desc: +"""高负载时,拒绝新进来的客户端连接。""" + +overload_protection_backoff_new_conn.label: +"""关闭新连接""" + +server_ssl_opts_schema_ocsp_responder_url.desc: +"""用于检查服务器证书的 OCSP Responder 的 URL。""" + +server_ssl_opts_schema_ocsp_responder_url.label: +"""OCSP Responder 的 URL""" + +mqtt_idle_timeout.desc: +"""设置连接被断开或进入休眠状态前的等待时间,空闲超时后, + - 如暂未收到客户端的 CONNECT 报文,连接将断开; + - 如已收到客户端的 CONNECT 报文,连接将进入休眠模式以节省系统资源。 + +注意:请合理设置该参数值,如等待时间设置过长,可能造成系统资源的浪费。""" + +mqtt_idle_timeout.label: +"""空闲超时""" + +fields_mqtt_quic_listener_conn_flow_control_window.desc: +"""连接的流控窗口。默认:16777216""" + +fields_mqtt_quic_listener_conn_flow_control_window.label: +"""流控窗口""" + +fields_mqtt_quic_listener_maximum_mtu.desc: +"""一个连接所支持的最大MTU。这将是最大的探测值。默认值:1500""" + +fields_mqtt_quic_listener_maximum_mtu.label: +"""最大 MTU""" + +sysmon_top_db_name.desc: +"""PostgreSQL 数据库的数据库名""" + +sysmon_top_db_name.label: +"""数据库名""" + +mqtt_strict_mode.desc: +"""是否以严格模式解析 MQTT 消息。 +严格模式下,如客户端 ID、主题名称等中包含无效 utf8 字符串,连接将被断开。""" + +mqtt_strict_mode.label: +"""严格模式""" + +shared_subscription_group_strategy.desc: +"""设置共享订阅组为单位的分发策略。该配置是一个从组名到 +策略名的一个map,组名不得包含 `[A-Za-z0-9]` 之外的特殊字符。""" + +fields_deflate_opts_strategy.desc: +"""指定压缩策略。""" + +fields_deflate_opts_strategy.label: +"""指定压缩策略""" + +shared_subscription_strategy_enum.desc: +"""共享订阅的分发策略名称。 +- `random`:随机选择一个组内成员; +- `round_robin`:循环选择下一个成员; +- `round_robin_per_group`:在共享组内循环选择下一个成员; +- `sticky`:使用上一次选中的成员; +- `hash`:根据 ClientID 哈希映射到一个成员; +- `local`:随机分发到节点本地成成员,如果本地成员不存在,则随机分发到任意一个成员。""" + +persistent_session_builtin_sess_msg_table.desc: +"""优化内置的会话消息表的配置。""" + +persistent_session_builtin_sess_msg_table.label: +"""用于内建会话管理表的性能调优参数""" + +mqtt_mqueue_store_qos0.desc: +"""指定在连接断开但会话保持期间,是否需要在消息队列中存储 QoS 0 消息。""" + +mqtt_mqueue_store_qos0.label: +"""存储 QoS 0 消息""" + +server_ssl_opts_schema_client_renegotiation.desc: +"""在支持客户机发起的重新协商的协议中,这种操作的资源成本对于服务器来说高于客户机。 +这可能会成为拒绝服务攻击的载体。 +SSL 应用程序已经采取措施来反击此类尝试,但通过将此选项设置为 false,可以严格禁用客户端发起的重新协商。 +默认值为 true。请注意,由于基础密码套件可以加密的消息数量有限,禁用重新协商可能会导致长期连接变得不可用。""" + +server_ssl_opts_schema_client_renegotiation.label: +"""SSL 客户端冲协商""" + +server_ssl_opts_schema_enable_crl_check.desc: +"""是否为该监听器启用 CRL 检查。""" + +server_ssl_opts_schema_enable_crl_check.label: +"""启用 CRL 检查""" + +fields_mqtt_quic_listener_peer_bidi_stream_count.desc: +"""允许对端打开的双向流的数量""" + +fields_mqtt_quic_listener_peer_bidi_stream_count.label: +"""对端双向流的数量""" + +fields_mqtt_quic_listener_max_stateless_operations.desc: +"""无状态操作的最大数量,在任何时候都可以在一个工作者上排队。默认值:16""" + +fields_mqtt_quic_listener_max_stateless_operations.label: +"""最大无状态操作数""" + +fields_ws_opts_idle_timeout.desc: +"""关闭在此间隔内未发送 MQTT CONNECT 消息的客户端的传输层连接。""" + +fields_ws_opts_idle_timeout.label: +"""WS 空闲时间""" + +fields_mqtt_quic_listener_max_ack_delay_ms.desc: +"""在收到数据后要等待多长时间才能发送一个ACK。默认值:25""" + +fields_mqtt_quic_listener_max_ack_delay_ms.label: +"""最大应答延迟 毫秒""" + +base_listener_zone.desc: +"""监听器所属的配置组。""" + +base_listener_zone.label: +"""配置组""" + +fields_mqtt_quic_listener_handshake_idle_timeout.desc: +"""一个握手在被丢弃之前可以空闲多长时间。""" + +fields_mqtt_quic_listener_handshake_idle_timeout.label: +"""握手空闲超时时间""" + +force_gc_enable.desc: +"""启用强制垃圾回收。""" + +force_gc_enable.label: +"""启用强制垃圾回收""" + +fields_ws_opts_allow_origin_absence.desc: +"""If false and check_origin_enable is true, the server will reject requests that don't have origin HTTP header.""" + +fields_ws_opts_allow_origin_absence.label: +"""允许 origin 缺失""" + +common_ssl_opts_schema_versions.desc: +"""支持所有TLS/DTLS版本
+注:PSK 的 Ciphers 无法在 tlsv1.3 中使用,如果打算使用 PSK 密码套件,请确保这里配置为 ["tlsv1.2","tlsv1.1"]。""" + +common_ssl_opts_schema_versions.label: +"""SSL 版本""" + +mqtt_listener_proxy_protocol_timeout.desc: +"""代理协议超时。如果在超时时间内未收到代理协议数据包,EMQX将关闭TCP连接。""" + +mqtt_listener_proxy_protocol_timeout.label: +"""Proxy protocol 超时时间""" + +fields_mqtt_quic_listener_idle_timeout.desc: +"""一个连接在被关闭之前可以空闲多长时间。0表示禁用。""" + +fields_mqtt_quic_listener_idle_timeout.label: +"""空闲超时时间""" + +common_ssl_opts_schema_secure_renegotiate.desc: +"""SSL 参数重新协商是一种允许客户端和服务器动态重新协商 SSL 连接参数的功能。 +RFC 5746 定义了一种更安全的方法。通过启用安全的重新协商,您就失去了对不安全的重新协商的支持,从而容易受到 MitM 攻击。""" + +common_ssl_opts_schema_secure_renegotiate.label: +"""SSL 重新协商""" + +sysmon_vm_busy_port.desc: +"""当一个系统接口(例如 TCP socket)过忙,会触发一条带有 busy_port 关键字的 warning 级别的日志。 +同时还会发布一条主题为 $SYS/sysmon/busy_port 的 MQTT 系统消息。""" + +sysmon_vm_busy_port.label: +"""启用端口过忙监控""" + +sys_event_client_connected.desc: +"""是否开启客户端已连接事件消息。""" + +sysmon_vm_process_low_watermark.desc: +"""在清除相应警报之前,本地节点上可以同时存在多少进程的阈值(以进程百分比表示)。""" + +sysmon_vm_process_low_watermark.label: +"""进程数低水位线""" + +mqtt_max_packet_size.desc: +"""允许的最大 MQTT 报文大小。""" + +mqtt_max_packet_size.label: +"""最大报文大小""" + +common_ssl_opts_schema_reuse_sessions.desc: +"""启用 TLS 会话重用。""" + +common_ssl_opts_schema_reuse_sessions.label: +"""TLS 会话重用""" + +common_ssl_opts_schema_depth.desc: +"""在有效的证书路径中,可以跟随对等证书的非自颁发中间证书的最大数量。 +因此,如果深度为0,则对等方必须由受信任的根 CA 直接签名;
+如果是1,路径可以是 PEER、中间 CA、ROOT-CA;
+如果是2,则路径可以是PEER、中间 CA1、中间 CA2、ROOT-CA。""" + +common_ssl_opts_schema_depth.label: +"""CA 证书深度""" + +sysmon_vm_long_gc.desc: +"""当系统检测到某个 Erlang 进程垃圾回收占用过长时间,会触发一条带有 long_gc 关键字的日志。 +同时还会发布一条主题为 $SYS/sysmon/long_gc 的 MQTT 系统消息。""" + +sysmon_vm_long_gc.label: +"""启用长垃圾回收监控""" + +fields_mqtt_quic_listener_keyfile.desc: +"""私钥文件。在 5.1 中会被废弃,使用 .ssl_options.keyfile 代替。""" + +fields_mqtt_quic_listener_keyfile.label: +"""私钥文件""" + +mqtt_peer_cert_as_clientid.desc: +"""使用对端证书中的 CN、DN 字段或整个证书内容来作为客户端 ID。仅适用于 TLS 连接; +目前支持: +- cn: 取证书的 CN 字段 +- dn: 取证书的 DN 字段 +- crt: 取 DERPEM 证书的内容 +- pem: 将 DER 证书内容转换为 PEM 格式作为客户端 ID +- md5: 取 DERPEM 证书内容的 MD5 值""" + +mqtt_peer_cert_as_clientid.label: +"""对端证书作为客户端 ID""" + +persistent_session_store_message_gc_interval.desc: +"""将未送达的消息垃圾收集到持久会话的开始间隔。 +这会影响检查 "max_retain_undelivered"(最大保留未送达)的删除频率。""" + +persistent_session_store_message_gc_interval.label: +"""消息清理间隔""" + +broker_shared_dispatch_ack_enabled.desc: +"""该配置项已废弃,会在 5.1 中移除。 +启用/禁用 QoS 1 和 QoS 2 消息的共享派发确认。 +开启后,允许将消息从未及时回复 ACK 的订阅者 (例如,客户端离线) 重新派发给另外一个订阅者。""" + +base_listener_enable_authn.desc: +"""配置 true (默认值)启用客户端进行身份认证,通过检查认配置的认认证器链来决定是否允许接入。 +配置 false 时,将不对客户端做任何认证,任何客户端,不论是不是携带用户名等认证信息,都可以接入。 +配置 quick_deny_anonymous 时,行为跟 true 类似,但是会对匿名 +客户直接拒绝,不做使用任何认证器对客户端进行身份检查。""" + +base_listener_enable_authn.label: +"""启用身份认证""" + +force_shutdown_enable.desc: +"""启用 `force_shutdown` 功能。""" + +force_shutdown_enable.label: +"""启用 `force_shutdown` 功能""" + +broker_enable_session_registry.desc: +"""是否启用 Session Registry""" + +overload_protection_backoff_delay.desc: +"""高负载时,一些不重要的任务可能会延迟执行,在这里设置允许延迟的时间。""" + +overload_protection_backoff_delay.label: +"""延迟时间""" + +ciphers_schema_common.desc: +"""此配置保存由逗号分隔的 TLS 密码套件名称,或作为字符串数组。例如 +"TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256"或 +["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]。 +
+密码(及其顺序)定义了客户端和服务器通过网络连接加密信息的方式。 +选择一个好的密码套件对于应用程序的数据安全性、机密性和性能至关重要。 + +名称应为 OpenSSL 字符串格式(而不是 RFC 格式)。 +EMQX 配置文档提供的所有默认值和示例都是 OpenSSL 格式
+注意:某些密码套件仅与特定的 TLS 版本兼容('tlsv1.1'、'tlsv1.2'或'tlsv1.3')。 +不兼容的密码套件将被自动删除。 + +例如,如果只有 versions 仅配置为 tlsv1.3。为其他版本配置密码套件将无效。 + +
+注:PSK 的 Ciphers 不支持 tlsv1.3
+如果打算使用PSK密码套件 tlsv1.3。应在ssl.versions中禁用。 + +
+PSK 密码套件: +"RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, +RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, +RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, +RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"""" + +ciphers_schema_common.label: +"""""" + +sys_event_client_disconnected.desc: +"""是否开启客户端已断开连接事件消息。""" + +crl_cache_refresh_interval.desc: +"""从服务器刷新CRL的周期。 该配置对所有 URL 和监听器有效。""" + +crl_cache_refresh_interval.label: +"""CRL 缓存刷新间隔""" + +mqtt_listener_proxy_protocol.desc: +"""如果EMQX集群部署在 HAProxy 或 Nginx 之后,请启用代理协议 V1/2
+详情见: https://www.haproxy.com/blog/haproxy/proxy-protocol/""" + +mqtt_listener_proxy_protocol.label: +"""Proxy protocol""" + +mqtt_listener_access_rules.desc: +"""此监听器的访问控制规则。""" + +mqtt_listener_access_rules.label: +"""访问控制规则""" + +server_ssl_opts_schema_enable_ocsp_stapling.desc: +"""是否为监听器启用 OCSP Stapling 功能。 如果设置为 true,需要定义 OCSP Responder 的 URL 和证书签发者的 PEM 文件路径。""" + +server_ssl_opts_schema_enable_ocsp_stapling.label: +"""启用 OCSP Stapling""" + +fields_tcp_opts_send_timeout_close.desc: +"""如果发送超时,则关闭连接。""" + +fields_tcp_opts_send_timeout_close.label: +"""TCP 发送超时关闭连接""" + +sysmon_os_cpu_check_interval.desc: +"""定期 CPU 检查的时间间隔。""" + +sysmon_os_cpu_check_interval.label: +"""定期 CPU 检查的时间间隔""" + +sysmon_top_sample_interval.desc: +"""指定应收集进程顶部的频率。""" + +sysmon_top_sample_interval.label: +"""取样时间""" + +fields_mqtt_quic_listener_idle_timeout_ms.desc: +"""一个连接在被优雅地关闭之前可以空闲多长时间。0 表示禁用超时""" + +fields_mqtt_quic_listener_idle_timeout_ms.label: +"""空闲超时 毫秒""" + +fields_ws_opts_fail_if_no_subprotocol.desc: +"""如果true,当客户端未携带Sec WebSocket Protocol字段时,服务器将返回一个错误。 +
注意:微信小程序需要禁用此验证。""" + +fields_ws_opts_fail_if_no_subprotocol.label: +"""无 subprotocol 则失败""" + +mqtt_wildcard_subscription.desc: +"""是否启用对 MQTT 通配符订阅的支持。""" + +mqtt_wildcard_subscription.label: +"""通配符订阅可用""" + +server_ssl_opts_schema_ocsp_refresh_interval.desc: +"""为服务器刷新OCSP响应的周期。""" + +server_ssl_opts_schema_ocsp_refresh_interval.label: +"""OCSP 刷新间隔""" + +overload_protection_backoff_hibernation.desc: +"""高负载时,跳过进程休眠。""" + +overload_protection_backoff_hibernation.label: +"""跳过休眠""" + +fields_ws_opts_max_frame_size.desc: +"""单个 MQTT 数据包的最大长度。""" + +fields_ws_opts_max_frame_size.label: +"""最大数据包长度""" + +sys_event_messages.desc: +"""客户端事件消息。""" + +broker_perf_trie_compaction.desc: +"""是否开启主题表压缩存储。 +启用它会显着提高通配符主题订阅率,如果通配符主题具有唯一前缀,例如:'sensor/{{id}}/+/',其中每个订阅者的 ID 是唯一的。 +如果消息主要发布到具有大量级别的主题,则主题匹配性能(发布时)可能会降低。 + +注意:这是一个集群范围的配置。 它要求在更改之前停止所有节点。""" + +sysmon_vm_large_heap.desc: +"""启用后,当一个 Erlang 进程申请了大量内存,系统会触发一条带有 large_heap 关键字的 +warning 级别日志。同时还会发布一条主题为 $SYS/sysmon/busy_dist_port 的 MQTT 系统消息。""" + +sysmon_vm_large_heap.label: +"""启用大 heap 监控""" + +} diff --git a/rel/i18n/zh/emqx_slow_subs_api.hocon b/rel/i18n/zh/emqx_slow_subs_api.hocon new file mode 100644 index 000000000..b0e801ca3 --- /dev/null +++ b/rel/i18n/zh/emqx_slow_subs_api.hocon @@ -0,0 +1,30 @@ +emqx_slow_subs_api { + +clear_records_api.desc: +"""清除当前记录,然后重新开始统计""" + +clientid.desc: +"""消息的客户端 ID""" + +get_records_api.desc: +"""查看慢订阅的统计数据""" + +get_setting_api.desc: +"""查看配置""" + +last_update_time.desc: +"""记录的更新时间戳""" + +node.desc: +"""消息的节点名称""" + +timespan.desc: +"""消息的传输耗时""" + +topic.desc: +"""消息的主题""" + +update_setting_api.desc: +"""更新配置""" + +} diff --git a/rel/i18n/zh/emqx_slow_subs_schema.hocon b/rel/i18n/zh/emqx_slow_subs_schema.hocon new file mode 100644 index 000000000..beadac1ea --- /dev/null +++ b/rel/i18n/zh/emqx_slow_subs_schema.hocon @@ -0,0 +1,18 @@ +emqx_slow_subs_schema { + +enable.desc: +"""开启慢订阅""" + +expire_interval.desc: +"""慢订阅记录的有效时间""" + +stats_type.desc: +"""慢订阅的统计类型""" + +threshold.desc: +"""慢订阅统计的阈值""" + +top_k_num.desc: +"""慢订阅统计表的记录数量上限""" + +} diff --git a/rel/i18n/zh/emqx_statsd_api.hocon b/rel/i18n/zh/emqx_statsd_api.hocon new file mode 100644 index 000000000..5761e2431 --- /dev/null +++ b/rel/i18n/zh/emqx_statsd_api.hocon @@ -0,0 +1,9 @@ +emqx_statsd_api { + +get_statsd_config_api.desc: +"""列出 StatsD 指标采集和推送服务的的配置。""" + +update_statsd_config_api.desc: +"""更新 StatsD 指标采集和推送服务的配置。""" + +} diff --git a/rel/i18n/zh/emqx_statsd_schema.hocon b/rel/i18n/zh/emqx_statsd_schema.hocon new file mode 100644 index 000000000..548ad33bc --- /dev/null +++ b/rel/i18n/zh/emqx_statsd_schema.hocon @@ -0,0 +1,30 @@ +emqx_statsd_schema { + +enable.desc: +"""启用或禁用 StatsD 指标采集和推送服务。""" + +flush_interval.desc: +"""指标的推送间隔。""" + +get_statsd_config_api.desc: +"""列出 StatsD 指标采集和推送服务的的配置。""" + +sample_interval.desc: +"""指标的采样间隔。""" + +server.desc: +"""StatsD 服务器地址。""" + +statsd.desc: +"""StatsD 指标采集与推送配置。""" + +statsd.label: +"""StatsD""" + +tags.desc: +"""指标的标签。""" + +update_statsd_config_api.desc: +"""更新 StatsD 指标采集和推送服务的配置。""" + +} diff --git a/rel/i18n/zh/emqx_stomp_schema.hocon b/rel/i18n/zh/emqx_stomp_schema.hocon new file mode 100644 index 000000000..13cfc0397 --- /dev/null +++ b/rel/i18n/zh/emqx_stomp_schema.hocon @@ -0,0 +1,15 @@ +emqx_stomp_schema { + +stom_frame_max_body_length.desc: +"""允许的 Stomp 报文 Body 的最大字节数""" + +stom_frame_max_headers.desc: +"""允许的 Header 最大数量""" + +stomp.desc: +"""Stomp 网关配置。当前实现支持 v1.2/1.1/1.0 协议版本""" + +stomp_frame_max_headers_length.desc: +"""允许的 Header 字符串的最大长度""" + +} diff --git a/rel/i18n/zh/emqx_telemetry_api.hocon b/rel/i18n/zh/emqx_telemetry_api.hocon new file mode 100644 index 000000000..4e445a56f --- /dev/null +++ b/rel/i18n/zh/emqx_telemetry_api.hocon @@ -0,0 +1,54 @@ +emqx_telemetry_api { + +active_modules.desc: +"""获取活跃模块""" + +active_plugins.desc: +"""获取活跃插件""" + +emqx_version.desc: +"""获取 emqx 版本""" + +enable.desc: +"""启用遥测""" + +get_telemetry_data_api.desc: +"""获取遥测数据""" + +get_telemetry_status_api.desc: +"""获取遥测状态""" + +license.desc: +"""获取 license 信息""" + +messages_received.desc: +"""获取接收到的消息数量""" + +messages_sent.desc: +"""获取发送的消息数量""" + +nodes_uuid.desc: +"""获取节点 UUID""" + +num_clients.desc: +"""获取客户端数量""" + +os_name.desc: +"""获取操作系统名称""" + +os_version.desc: +"""获取操作系统版本""" + +otp_version.desc: +"""获取 OTP 版本""" + +up_time.desc: +"""获取运行时间""" + +update_telemetry_status_api.desc: +"""更新遥测状态""" + +uuid.desc: +"""获取 UUID""" + +} diff --git a/rel/i18n/zh/emqx_topic_metrics_api.hocon b/rel/i18n/zh/emqx_topic_metrics_api.hocon new file mode 100644 index 000000000..23a5791b3 --- /dev/null +++ b/rel/i18n/zh/emqx_topic_metrics_api.hocon @@ -0,0 +1,105 @@ +emqx_topic_metrics_api { + +message_qos1_in_rate.desc: +"""QoS1 接收消息速率""" + +message_out_count.desc: +"""发送消息数量""" + +message_qos2_out_rate.desc: +"""QoS2 发送消息速率""" + +message_qos0_in_rate.desc: +"""QoS0 接收消息速率""" + +get_topic_metrics_api.desc: +"""获取主题监控数据""" + +reset_time.desc: +"""重置时间。标准 rfc3339 时间格式,例如:2018-01-01T12:00:00Z。如果从未重置则为空""" + +topic_metrics_api_response400.desc: +"""错误请求。已存在或错误的主题名称""" + +reset_topic_desc.desc: +"""主题名称。如果此参数不存在,则所有创建的主题监控数据都将重置。""" + +topic_metrics_api_response409.desc: +"""冲突。主题监控数据超过最大限制512""" + +post_topic_metrics_api.desc: +"""创建主题监控数据""" + +message_dropped_rate.desc: +"""丢弃消息速率""" + +message_qos2_in_rate.desc: +"""QoS2 接收消息速率""" + +message_in_rate.desc: +"""接收消息速率""" + +message_qos0_out_rate.desc: +"""QoS0 发送消息速率""" + +message_qos2_in_count.desc: +"""QoS2 接收消息数量""" + +message_dropped_count.desc: +"""丢弃消息数量""" + +topic_metrics_api_response404.desc: +"""未找到。主题监控数据未找到""" + +topic_in_path.desc: +"""主题字符串。注意:主题字符串在url路径中必须编码""" + +action.desc: +"""操作,仅支持 reset""" + +message_qos0_in_count.desc: +"""QoS0 接收消息数量""" + +message_qos1_out_rate.desc: +"""QoS1 发送消息速率""" + +topic.desc: +"""主题""" + +reset_topic_metrics_api.desc: +"""重置主题监控状态""" + +create_time.desc: +"""创建时间。标准 rfc3339 时间格式,例如:2018-01-01T12:00:00Z""" + +metrics.desc: +"""监控数据""" + +message_qos1_out_count.desc: +"""QoS1 发送消息数量""" + +gat_topic_metrics_data_api.desc: +"""获取主题监控数据""" + +message_qos1_in_count.desc: +"""QoS1 接收消息数量""" + +delete_topic_metrics_data_api.desc: +"""删除主题监控数据""" + +message_qos0_out_count.desc: +"""QoS0 发送消息数量""" + +topic_in_body.desc: +"""主题字符串""" + +message_in_count.desc: +"""接收消息数量""" + +message_qos2_out_count.desc: +"""QoS2 发送消息数量""" + +message_out_rate.desc: +"""发送消息速率""" + +} From b63b880116234b9022a41f0ef18811917a57d101 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Wed, 5 Apr 2023 16:20:51 +0200 Subject: [PATCH 013/263] refactor: update i18n style check script to work with new layout --- scripts/check-i18n-style.escript | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/scripts/check-i18n-style.escript b/scripts/check-i18n-style.escript index a76fce90e..b8b6bdac7 100755 --- a/scripts/check-i18n-style.escript +++ b/scripts/check-i18n-style.escript @@ -76,22 +76,16 @@ check_label(_Name, _) -> ok. check_desc(Name, #{<<"desc">> := Desc}) -> - do_check_desc(Name, Desc); + check_desc_string(Name, Desc); check_desc(Name, _) -> die("~s: no 'desc'~n", [Name]). -do_check_desc(Name, #{<<"zh">> := Zh, <<"en">> := En}) -> - ok = check_desc_string(Name, "zh", Zh), - ok = check_desc_string(Name, "en", En); -do_check_desc(Name, _) -> - die("~s: missing 'zh' or 'en'~n", [Name]). - -check_desc_string(Name, Tr, <<>>) -> - logerr("~s.~s: empty string~n", [Name, Tr]); -check_desc_string(Name, Tr, BinStr) -> +check_desc_string(Name, <<>>) -> + logerr("~s: empty string~n", [Name]); +check_desc_string(Name, BinStr) -> Str = unicode:characters_to_list(BinStr, utf8), Err = fun(Reason) -> - logerr("~s.~s: ~s~n", [Name, Tr, Reason]) + logerr("~s: ~s~n", [Name, Reason]) end, case Str of [$\s | _] -> From 9b7800aa8cd5e5ad60edad0fb01e0535ad6b6b63 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Wed, 5 Apr 2023 16:47:40 +0200 Subject: [PATCH 014/263] refactor: merge into per-language i18n files --- scripts/merge-i18n.escript | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/scripts/merge-i18n.escript b/scripts/merge-i18n.escript index b2501d10a..7ffd3aa8a 100755 --- a/scripts/merge-i18n.escript +++ b/scripts/merge-i18n.escript @@ -3,10 +3,14 @@ -mode(compile). main(_) -> + main_per_lang("en"), + main_per_lang("zh"). + +main_per_lang(Lang) -> BaseConf = <<"">>, - Cfgs0 = get_all_files(), + Cfgs0 = get_all_files(Lang), Conf = merge(BaseConf, Cfgs0), - OutputFile = "apps/emqx_dashboard/priv/i18n.conf", + OutputFile = "apps/emqx_dashboard/priv/i18n." ++ Lang ++ ".conf", ok = filelib:ensure_dir(OutputFile), ok = file:write_file(OutputFile, Conf). @@ -21,7 +25,11 @@ merge(BaseConf, Cfgs) -> end end, BaseConf, Cfgs). -get_all_files() -> - Dir = filename:join(["rel","i18n"]), +get_all_files(Lang) -> + Dir = + case Lang of + "en" -> filename:join(["rel", "i18n"]); + "zh" -> filename:join(["rel", "i18n", "zh"]) + end, Files = filelib:wildcard("*.hocon", Dir), lists:map(fun(Name) -> filename:join([Dir, Name]) end, Files). From 18974a8e11132b4cc4ba2bb352f915dcfa519a5a Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Fri, 7 Apr 2023 11:18:29 +0200 Subject: [PATCH 015/263] refactor: make schema dump and swagger spec work with split desc files --- Makefile | 1 - apps/emqx/rebar.config | 2 +- apps/emqx_conf/src/emqx_conf.erl | 89 ++++++++------- apps/emqx_dashboard/src/emqx_dashboard.erl | 63 +++-------- .../src/emqx_dashboard_desc_cache.erl | 105 ++++++++++++++++++ .../src/emqx_dashboard_listener.erl | 71 +++++++----- .../src/emqx_dashboard_schema.erl | 2 + .../emqx_dashboard/src/emqx_dashboard_sup.erl | 2 + .../src/emqx_dashboard_swagger.erl | 94 +++++++++------- .../test/emqx_dashboard_listener_SUITE.erl | 51 +++++++++ .../test/emqx_swagger_parameter_SUITE.erl | 1 - .../test/emqx_swagger_requestBody_SUITE.erl | 1 - .../test/emqx_swagger_response_SUITE.erl | 1 - .../test/emqx_rule_funcs_SUITE.erl | 1 - build | 3 +- mix.exs | 2 +- rebar.config | 2 +- rebar.config.erl | 3 +- scripts/merge-config.escript | 50 ++++++++- scripts/merge-i18n.escript | 35 ------ scripts/pre-compile.sh | 1 - 21 files changed, 375 insertions(+), 205 deletions(-) create mode 100644 apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl create mode 100644 apps/emqx_dashboard/test/emqx_dashboard_listener_SUITE.erl delete mode 100755 scripts/merge-i18n.escript diff --git a/Makefile b/Makefile index 45218bf46..fe75b01bc 100644 --- a/Makefile +++ b/Makefile @@ -239,7 +239,6 @@ $(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt)))) .PHONY: merge-config: @$(SCRIPTS)/merge-config.escript - @$(SCRIPTS)/merge-i18n.escript ## elixir target is to create release packages using Elixir's Mix .PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir) diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index d954a6b1e..5945ccc7c 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -29,7 +29,7 @@ {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, - {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.38.1"}}}, + {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.1"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index 1ecda913d..d77ffb680 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -25,9 +25,9 @@ -export([update/3, update/4]). -export([remove/2, remove/3]). -export([reset/2, reset/3]). --export([dump_schema/1, dump_schema/3]). +-export([dump_schema/2]). -export([schema_module/0]). --export([gen_example_conf/4]). +-export([gen_example_conf/2]). %% for rpc -export([get_node_and_config/1]). @@ -136,24 +136,21 @@ reset(Node, KeyPath, Opts) -> emqx_conf_proto_v2:reset(Node, KeyPath, Opts). %% @doc Called from build script. --spec dump_schema(file:name_all()) -> ok. -dump_schema(Dir) -> - I18nFile = emqx_dashboard:i18n_file(), - dump_schema(Dir, emqx_conf_schema, I18nFile). - -dump_schema(Dir, SchemaModule, I18nFile) -> +dump_schema(Dir, SchemaModule) -> + _ = application:load(emqx_dashboard), + ok = emqx_dashboard_desc_cache:init(), lists:foreach( fun(Lang) -> - gen_config_md(Dir, I18nFile, SchemaModule, Lang), - gen_api_schema_json(Dir, I18nFile, Lang), - gen_example_conf(Dir, I18nFile, SchemaModule, Lang), - gen_schema_json(Dir, I18nFile, SchemaModule, Lang) + ok = gen_config_md(Dir, SchemaModule, Lang), + ok = gen_api_schema_json(Dir, Lang), + ok = gen_schema_json(Dir, SchemaModule, Lang) end, ["en", "zh"] - ). + ), + ok = gen_example_conf(Dir, SchemaModule). %% for scripts/spellcheck. -gen_schema_json(Dir, I18nFile, SchemaModule, Lang) -> +gen_schema_json(Dir, SchemaModule, Lang) -> SchemaJsonFile = filename:join([Dir, "schema-" ++ Lang ++ ".json"]), io:format(user, "===< Generating: ~s~n", [SchemaJsonFile]), %% EMQX_SCHEMA_FULL_DUMP is quite a hidden API @@ -164,40 +161,44 @@ gen_schema_json(Dir, I18nFile, SchemaModule, Lang) -> false -> ?IMPORTANCE_LOW end, io:format(user, "===< Including fields from importance level: ~p~n", [IncludeImportance]), - Opts = #{desc_file => I18nFile, lang => Lang, include_importance_up_from => IncludeImportance}, + Opts = #{ + include_importance_up_from => IncludeImportance, + desc_resolver => make_desc_resolver(Lang) + }, JsonMap = hocon_schema_json:gen(SchemaModule, Opts), IoData = emqx_utils_json:encode(JsonMap, [pretty, force_utf8]), ok = file:write_file(SchemaJsonFile, IoData). -gen_api_schema_json(Dir, I18nFile, Lang) -> - emqx_dashboard:init_i18n(I18nFile, list_to_binary(Lang)), +gen_api_schema_json(Dir, Lang) -> gen_api_schema_json_hotconf(Dir, Lang), - gen_api_schema_json_bridge(Dir, Lang), - emqx_dashboard:clear_i18n(). + gen_api_schema_json_bridge(Dir, Lang). gen_api_schema_json_hotconf(Dir, Lang) -> SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>}, File = schema_filename(Dir, "hot-config-schema-", Lang), - ok = do_gen_api_schema_json(File, emqx_mgmt_api_configs, SchemaInfo). + ok = do_gen_api_schema_json(File, emqx_mgmt_api_configs, SchemaInfo, Lang). gen_api_schema_json_bridge(Dir, Lang) -> SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>}, File = schema_filename(Dir, "bridge-api-", Lang), - ok = do_gen_api_schema_json(File, emqx_bridge_api, SchemaInfo). + ok = do_gen_api_schema_json(File, emqx_bridge_api, SchemaInfo, Lang). schema_filename(Dir, Prefix, Lang) -> Filename = Prefix ++ Lang ++ ".json", filename:join([Dir, Filename]). -gen_config_md(Dir, I18nFile, SchemaModule, Lang) -> +%% TODO: remove it and also remove hocon_md.erl and friends. +%% markdown generation from schema is a failure and we are moving to an interactive +%% viewer like swagger UI. +gen_config_md(Dir, SchemaModule, Lang) -> SchemaMdFile = filename:join([Dir, "config-" ++ Lang ++ ".md"]), io:format(user, "===< Generating: ~s~n", [SchemaMdFile]), - ok = gen_doc(SchemaMdFile, SchemaModule, I18nFile, Lang). + ok = gen_doc(SchemaMdFile, SchemaModule, Lang). -gen_example_conf(Dir, I18nFile, SchemaModule, Lang) -> - SchemaMdFile = filename:join([Dir, "emqx.conf." ++ Lang ++ ".example"]), +gen_example_conf(Dir, SchemaModule) -> + SchemaMdFile = filename:join([Dir, "emqx.conf.example"]), io:format(user, "===< Generating: ~s~n", [SchemaMdFile]), - ok = gen_example(SchemaMdFile, SchemaModule, I18nFile, Lang). + ok = gen_example(SchemaMdFile, SchemaModule). %% @doc return the root schema module. -spec schema_module() -> module(). @@ -211,35 +212,48 @@ schema_module() -> %% Internal functions %%-------------------------------------------------------------------- --spec gen_doc(file:name_all(), module(), file:name_all(), string()) -> ok. -gen_doc(File, SchemaModule, I18nFile, Lang) -> +%% @doc Make a resolver function that can be used to lookup the description by hocon_schema_json dump. +make_desc_resolver(Lang) -> + fun + ({desc, Namespace, Id}) -> + emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, desc); + (Desc) -> + unicode:characters_to_binary(Desc) + end. + +-spec gen_doc(file:name_all(), module(), string()) -> ok. +gen_doc(File, SchemaModule, Lang) -> Version = emqx_release:version(), Title = "# " ++ emqx_release:description() ++ " Configuration\n\n" ++ "", BodyFile = filename:join([rel, "emqx_conf.template." ++ Lang ++ ".md"]), {ok, Body} = file:read_file(BodyFile), - Opts = #{title => Title, body => Body, desc_file => I18nFile, lang => Lang}, + Resolver = make_desc_resolver(Lang), + Opts = #{title => Title, body => Body, desc_resolver => Resolver}, Doc = hocon_schema_md:gen(SchemaModule, Opts), file:write_file(File, Doc). -gen_example(File, SchemaModule, I18nFile, Lang) -> +gen_example(File, SchemaModule) -> + %% we do not generate description in example files + %% so there is no need for a desc_resolver Opts = #{ title => <<"EMQX Configuration Example">>, body => <<"">>, - desc_file => I18nFile, - lang => Lang, include_importance_up_from => ?IMPORTANCE_MEDIUM }, Example = hocon_schema_example:gen(SchemaModule, Opts), file:write_file(File, Example). %% Only gen hot_conf schema, not all configuration fields. -do_gen_api_schema_json(File, SchemaMod, SchemaInfo) -> +do_gen_api_schema_json(File, SchemaMod, SchemaInfo, Lang) -> io:format(user, "===< Generating: ~s~n", [File]), {ApiSpec0, Components0} = emqx_dashboard_swagger:spec( SchemaMod, - #{schema_converter => fun hocon_schema_to_spec/2} + #{ + schema_converter => fun hocon_schema_to_spec/2, + i18n_lang => Lang + } ), ApiSpec = lists:foldl( fun({Path, Spec, _, _}, Acc) -> @@ -278,13 +292,6 @@ do_gen_api_schema_json(File, SchemaMod, SchemaInfo) -> ), file:write_file(File, IoData). --define(INIT_SCHEMA, #{ - fields => #{}, - translations => #{}, - validations => [], - namespace => undefined -}). - -define(TO_REF(_N_, _F_), iolist_to_binary([to_bin(_N_), ".", to_bin(_F_)])). -define(TO_COMPONENTS_SCHEMA(_M_, _F_), iolist_to_binary([ diff --git a/apps/emqx_dashboard/src/emqx_dashboard.erl b/apps/emqx_dashboard/src/emqx_dashboard.erl index 6f0c8334a..08b7f0142 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard.erl @@ -16,22 +16,13 @@ -module(emqx_dashboard). --define(APP, ?MODULE). - -export([ start_listeners/0, start_listeners/1, stop_listeners/1, stop_listeners/0, - list_listeners/0 -]). - --export([ - init_i18n/2, - init_i18n/0, - get_i18n/0, - i18n_file/0, - clear_i18n/0 + list_listeners/0, + wait_for_listeners/0 ]). %% Authorization @@ -90,30 +81,34 @@ start_listeners(Listeners) -> dispatch => Dispatch, middlewares => [?EMQX_MIDDLE, cowboy_router, cowboy_handler] }, - Res = + {OkListeners, ErrListeners} = lists:foldl( - fun({Name, Protocol, Bind, RanchOptions, ProtoOpts}, Acc) -> + fun({Name, Protocol, Bind, RanchOptions, ProtoOpts}, {OkAcc, ErrAcc}) -> Minirest = BaseMinirest#{protocol => Protocol, protocol_options => ProtoOpts}, case minirest:start(Name, RanchOptions, Minirest) of {ok, _} -> ?ULOG("Listener ~ts on ~ts started.~n", [ Name, emqx_listeners:format_bind(Bind) ]), - Acc; + {[Name | OkAcc], ErrAcc}; {error, _Reason} -> %% Don't record the reason because minirest already does(too much logs noise). - [Name | Acc] + {OkAcc, [Name | ErrAcc]} end end, - [], + {[], []}, listeners(Listeners) ), - case Res of - [] -> ok; - _ -> {error, Res} + case ErrListeners of + [] -> + optvar:set(emqx_dashboard_listeners_ready, OkListeners), + ok; + _ -> + {error, ErrListeners} end. stop_listeners(Listeners) -> + optvar:unset(emqx_dashboard_listeners_ready), [ begin case minirest:stop(Name) of @@ -129,23 +124,8 @@ stop_listeners(Listeners) -> ], ok. -get_i18n() -> - application:get_env(emqx_dashboard, i18n). - -init_i18n(File, Lang) when is_atom(Lang) -> - init_i18n(File, atom_to_binary(Lang)); -init_i18n(File, Lang) when is_binary(Lang) -> - Cache = hocon_schema:new_desc_cache(File), - application:set_env(emqx_dashboard, i18n, #{lang => Lang, cache => Cache}). - -clear_i18n() -> - case application:get_env(emqx_dashboard, i18n) of - {ok, #{cache := Cache}} -> - hocon_schema:delete_desc_cache(Cache), - application:unset_env(emqx_dashboard, i18n); - undefined -> - ok - end. +wait_for_listeners() -> + optvar:read(emqx_dashboard_listeners_ready). %%-------------------------------------------------------------------- %% internal @@ -187,11 +167,6 @@ ip_port(error, Opts) -> {Opts#{port => 18083}, 18083}; ip_port({Port, Opts}, _) when is_integer(Port) -> {Opts#{port => Port}, Port}; ip_port({{IP, Port}, Opts}, _) -> {Opts#{port => Port, ip => IP}, {IP, Port}}. -init_i18n() -> - File = i18n_file(), - Lang = emqx_conf:get([dashboard, i18n_lang], en), - init_i18n(File, Lang). - ranch_opts(Options) -> Keys = [ handshake_timeout, @@ -255,12 +230,6 @@ return_unauthorized(Code, Message) -> }, #{code => Code, message => Message}}. -i18n_file() -> - case application:get_env(emqx_dashboard, i18n_file) of - undefined -> filename:join([code:priv_dir(emqx_dashboard), "i18n.conf"]); - {ok, File} -> File - end. - listeners() -> emqx_conf:get([dashboard, listeners], #{}). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl new file mode 100644 index 000000000..8fd6fe3d3 --- /dev/null +++ b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl @@ -0,0 +1,105 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc This module is used to cache the description of the configuration items. +-module(emqx_dashboard_desc_cache). + +-export([init/0]). + +%% internal exports +-export([load_desc/2, lookup/4, lookup/5]). + +-include_lib("emqx/include/logger.hrl"). + +%% @doc Global ETS table to cache the description of the configuration items. +%% The table is owned by the emqx_dashboard_sup the root supervisor of emqx_dashboard. +%% The cache is initialized with the default language (English) and +%% all the desc..hocon files in the www/static directory (extracted from dashboard package). +init() -> + ok = ensure_app_loaded(emqx_dashboard), + PrivDir = code:priv_dir(emqx_dashboard), + EngDesc = filename:join([PrivDir, "desc.en.hocon"]), + WwwStaticDir = filename:join([PrivDir, "www", "static"]), + OtherLangDesc0 = filelib:wildcard("desc.*.hocon", WwwStaticDir), + OtherLangDesc = lists:map(fun(F) -> filename:join([WwwStaticDir, F]) end, OtherLangDesc0), + Files = [EngDesc | OtherLangDesc], + ?MODULE = ets:new(?MODULE, [named_table, public, set, {read_concurrency, true}]), + ok = lists:foreach(fun(F) -> load_desc(?MODULE, F) end, Files). + +%% @doc Load the description of the configuration items from the file. +%% Load is incremental, so it can be called multiple times. +%% NOTE: no garbage collection is done, because stale entries are harmless. +load_desc(EtsTab, File) -> + ?SLOG(info, #{msg => "loading desc", file => File}), + {ok, Descs} = hocon:load(File), + ["desc", Lang, "hocon"] = string:tokens(filename:basename(File), "."), + Insert = fun(Namespace, Id, Tag, Text) -> + Key = {bin(Lang), bin(Namespace), bin(Id), bin(Tag)}, + true = ets:insert(EtsTab, {Key, bin(Text)}), + ok + end, + walk_ns(Insert, maps:to_list(Descs)). + +%% @doc Lookup the description of the configuration item from the global cache. +lookup(Lang, Namespace, Id, Tag) -> + lookup(?MODULE, Lang, Namespace, Id, Tag). + +%% @doc Lookup the description of the configuration item from the given cache. +lookup(EtsTab, Lang0, Namespace, Id, Tag) -> + Lang = bin(Lang0), + case ets:lookup(EtsTab, {Lang, bin(Namespace), bin(Id), bin(Tag)}) of + [{_, Desc}] -> + Desc; + [] when Lang =/= <<"en">> -> + %% fallback to English + lookup(EtsTab, <<"en">>, Namespace, Id, Tag); + _ -> + %% undefined but not <<>> + undefined + end. + +%% The desc files are of names like: +%% desc.en.hocon or desc.zh.hocon +%% And with content like: +%% namespace.id.desc = "description" +%% namespace.id.label = "label" +walk_ns(_Insert, []) -> + ok; +walk_ns(Insert, [{Namespace, Ids} | Rest]) -> + walk_id(Insert, Namespace, maps:to_list(Ids)), + walk_ns(Insert, Rest). + +walk_id(_Insert, _Namespace, []) -> + ok; +walk_id(Insert, Namespace, [{Id, Tags} | Rest]) -> + walk_tag(Insert, Namespace, Id, maps:to_list(Tags)), + walk_id(Insert, Namespace, Rest). + +walk_tag(_Insert, _Namespace, _Id, []) -> + ok; +walk_tag(Insert, Namespace, Id, [{Tag, Text} | Rest]) -> + ok = Insert(Namespace, Id, Tag, Text), + walk_tag(Insert, Namespace, Id, Rest). + +bin(A) when is_atom(A) -> atom_to_binary(A, utf8); +bin(B) when is_binary(B) -> B; +bin(L) when is_list(L) -> list_to_binary(L). + +ensure_app_loaded(App) -> + case application:load(App) of + ok -> ok; + {error, {already_loaded, _}} -> ok + end. diff --git a/apps/emqx_dashboard/src/emqx_dashboard_listener.erl b/apps/emqx_dashboard/src/emqx_dashboard_listener.erl index 01d96bdf0..6a306c288 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_listener.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_listener.erl @@ -15,9 +15,11 @@ %%-------------------------------------------------------------------- -module(emqx_dashboard_listener). --include_lib("emqx/include/logger.hrl"). -behaviour(emqx_config_handler). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + %% API -export([add_handler/0, remove_handler/0]). -export([pre_config_update/3, post_config_update/5]). @@ -54,12 +56,10 @@ init([]) -> {ok, undefined, {continue, regenerate_dispatch}}. handle_continue(regenerate_dispatch, _State) -> - NewState = regenerate_minirest_dispatch(), - {noreply, NewState, hibernate}. + %% initialize the swagger dispatches + ready = regenerate_minirest_dispatch(), + {noreply, ready, hibernate}. -handle_call(is_ready, _From, retry) -> - NewState = regenerate_minirest_dispatch(), - {reply, NewState, NewState, hibernate}; handle_call(is_ready, _From, State) -> {reply, State, State, hibernate}; handle_call(_Request, _From, State) -> @@ -68,6 +68,9 @@ handle_call(_Request, _From, State) -> handle_cast(_Request, State) -> {noreply, State, hibernate}. +handle_info(i18n_lang_changed, _State) -> + NewState = regenerate_minirest_dispatch(), + {noreply, NewState, hibernate}; handle_info({update_listeners, OldListeners, NewListeners}, _State) -> ok = emqx_dashboard:stop_listeners(OldListeners), ok = emqx_dashboard:start_listeners(NewListeners), @@ -83,29 +86,26 @@ terminate(_Reason, _State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. -%% generate dispatch is very slow. +%% generate dispatch is very slow, takes about 1s. regenerate_minirest_dispatch() -> - try - emqx_dashboard:init_i18n(), - lists:foreach( - fun(Listener) -> - minirest:update_dispatch(element(1, Listener)) - end, - emqx_dashboard:list_listeners() - ), - ready - catch - T:E:S -> - ?SLOG(error, #{ - msg => "regenerate_minirest_dispatch_failed", - reason => E, - type => T, - stacktrace => S - }), - retry - after - emqx_dashboard:clear_i18n() - end. + %% optvar:read waits for the var to be set + Names = emqx_dashboard:wait_for_listeners(), + {Time, ok} = timer:tc(fun() -> do_regenerate_minirest_dispatch(Names) end), + Lang = emqx:get_config([dashboard, i18n_lang]), + ?tp(info, regenerate_minirest_dispatch, #{ + elapsed => erlang:convert_time_unit(Time, microsecond, millisecond), + listeners => Names, + i18n_lang => Lang + }), + ready. + +do_regenerate_minirest_dispatch(Names) -> + lists:foreach( + fun(Name) -> + ok = minirest:update_dispatch(Name) + end, + Names + ). add_handler() -> Roots = emqx_dashboard_schema:roots(), @@ -117,6 +117,12 @@ remove_handler() -> ok = emqx_config_handler:remove_handler(Roots), ok. +pre_config_update(_Path, {change_i18n_lang, NewLang}, RawConf) -> + %% e.g. emqx_conf:update([dashboard], {change_i18n_lang, zh}, #{}). + %% TODO: check if there is such a language (all languages are cached in emqx_dashboard_desc_cache) + Update = #{<<"i18n_lang">> => NewLang}, + NewConf = emqx_utils_maps:deep_merge(RawConf, Update), + {ok, NewConf}; pre_config_update(_Path, UpdateConf0, RawConf) -> UpdateConf = remove_sensitive_data(UpdateConf0), NewConf = emqx_utils_maps:deep_merge(RawConf, UpdateConf), @@ -139,6 +145,8 @@ remove_sensitive_data(Conf0) -> Conf1 end. +post_config_update(_, {change_i18n_lang, _}, _NewConf, _OldConf, _AppEnvs) -> + delay_job(i18n_lang_changed); post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) -> OldHttp = get_listener(http, OldConf), OldHttps = get_listener(https, OldConf), @@ -148,7 +156,12 @@ post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) -> {StopHttps, StartHttps} = diff_listeners(https, OldHttps, NewHttps), Stop = maps:merge(StopHttp, StopHttps), Start = maps:merge(StartHttp, StartHttps), - _ = erlang:send_after(500, ?MODULE, {update_listeners, Stop, Start}), + delay_job({update_listeners, Stop, Start}). + +%% in post_config_update, the config is not yet persisted to persistent_term +%% so we need to delegate the listener update to the gen_server a bit later +delay_job(Msg) -> + _ = erlang:send_after(500, ?MODULE, Msg), ok. get_listener(Type, Conf) -> diff --git a/apps/emqx_dashboard/src/emqx_dashboard_schema.erl b/apps/emqx_dashboard/src/emqx_dashboard_schema.erl index d3e4233d3..319c9cee1 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_schema.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_schema.erl @@ -233,6 +233,8 @@ cors(required) -> false; cors(desc) -> ?DESC(cors); cors(_) -> undefined. +%% TODO: change it to string type +%% It will be up to the dashboard package which languagues to support i18n_lang(type) -> ?ENUM([en, zh]); i18n_lang(default) -> en; i18n_lang('readOnly') -> true; diff --git a/apps/emqx_dashboard/src/emqx_dashboard_sup.erl b/apps/emqx_dashboard/src/emqx_dashboard_sup.erl index 896b44859..04d8ed1d5 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_sup.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_sup.erl @@ -28,6 +28,8 @@ start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> + %% supervisor owns the cache table + ok = emqx_dashboard_desc_cache:init(), {ok, {{one_for_one, 5, 100}, [ ?CHILD(emqx_dashboard_listener, brutal_kill), diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index b2ad69997..bdd5866f8 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -84,7 +84,8 @@ -type spec_opts() :: #{ check_schema => boolean() | filter(), translate_body => boolean(), - schema_converter => fun((hocon_schema:schema(), Module :: atom()) -> map()) + schema_converter => fun((hocon_schema:schema(), Module :: atom()) -> map()), + i18n_lang => atom() }. -type route_path() :: string() | binary(). @@ -333,11 +334,11 @@ check_request_body(#{body := Body}, Spec, _Module, _CheckFun, false) when is_map %% tags, description, summary, security, deprecated meta_to_spec(Meta, Module, Options) -> - {Params, Refs1} = parameters(maps:get(parameters, Meta, []), Module), + {Params, Refs1} = parameters(maps:get(parameters, Meta, []), Module, Options), {RequestBody, Refs2} = request_body(maps:get('requestBody', Meta, []), Module, Options), {Responses, Refs3} = responses(maps:get(responses, Meta, #{}), Module, Options), { - generate_method_desc(to_spec(Meta, Params, RequestBody, Responses)), + generate_method_desc(to_spec(Meta, Params, RequestBody, Responses), Options), lists:usort(Refs1 ++ Refs2 ++ Refs3) }. @@ -348,13 +349,13 @@ to_spec(Meta, Params, RequestBody, Responses) -> Spec = to_spec(Meta, Params, [], Responses), maps:put('requestBody', RequestBody, Spec). -generate_method_desc(Spec = #{desc := _Desc}) -> - Spec1 = trans_description(maps:remove(desc, Spec), Spec), +generate_method_desc(Spec = #{desc := _Desc}, Options) -> + Spec1 = trans_description(maps:remove(desc, Spec), Spec, Options), trans_tags(Spec1); -generate_method_desc(Spec = #{description := _Desc}) -> - Spec1 = trans_description(Spec, Spec), +generate_method_desc(Spec = #{description := _Desc}, Options) -> + Spec1 = trans_description(Spec, Spec, Options), trans_tags(Spec1); -generate_method_desc(Spec) -> +generate_method_desc(Spec, _Options) -> trans_tags(Spec). trans_tags(Spec = #{tags := Tags}) -> @@ -362,7 +363,7 @@ trans_tags(Spec = #{tags := Tags}) -> trans_tags(Spec) -> Spec. -parameters(Params, Module) -> +parameters(Params, Module, Options) -> {SpecList, AllRefs} = lists:foldl( fun(Param, {Acc, RefsAcc}) -> @@ -388,7 +389,7 @@ parameters(Params, Module) -> Type ), Spec1 = trans_required(Spec0, Required, In), - Spec2 = trans_description(Spec1, Type), + Spec2 = trans_description(Spec1, Type, Options), {[Spec2 | Acc], Refs ++ RefsAcc} end end, @@ -432,38 +433,38 @@ trans_required(Spec, true, _) -> Spec#{required => true}; trans_required(Spec, _, path) -> Spec#{required => true}; trans_required(Spec, _, _) -> Spec. -trans_desc(Init, Hocon, Func, Name) -> - Spec0 = trans_description(Init, Hocon), +trans_desc(Init, Hocon, Func, Name, Options) -> + Spec0 = trans_description(Init, Hocon, Options), case Func =:= fun hocon_schema_to_spec/2 of true -> Spec0; false -> - Spec1 = trans_label(Spec0, Hocon, Name), + Spec1 = trans_label(Spec0, Hocon, Name, Options), case Spec1 of #{description := _} -> Spec1; _ -> Spec1#{description => <>} end end. -trans_description(Spec, Hocon) -> +trans_description(Spec, Hocon, Options) -> Desc = case desc_struct(Hocon) of undefined -> undefined; - ?DESC(_, _) = Struct -> get_i18n(<<"desc">>, Struct, undefined); - Struct -> to_bin(Struct) + ?DESC(_, _) = Struct -> get_i18n(<<"desc">>, Struct, undefined, Options); + Text -> to_bin(Text) end, case Desc of undefined -> Spec; Desc -> Desc1 = binary:replace(Desc, [<<"\n">>], <<"
">>, [global]), - maybe_add_summary_from_label(Spec#{description => Desc1}, Hocon) + maybe_add_summary_from_label(Spec#{description => Desc1}, Hocon, Options) end. -maybe_add_summary_from_label(Spec, Hocon) -> +maybe_add_summary_from_label(Spec, Hocon, Options) -> Label = case desc_struct(Hocon) of - ?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, undefined); + ?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, undefined, Options); _ -> undefined end, case Label of @@ -471,29 +472,44 @@ maybe_add_summary_from_label(Spec, Hocon) -> _ -> Spec#{summary => Label} end. -get_i18n(Key, Struct, Default) -> - {ok, #{cache := Cache, lang := Lang}} = emqx_dashboard:get_i18n(), - Desc = hocon_schema:resolve_schema(Struct, Cache), - emqx_utils_maps:deep_get([Key, Lang], Desc, Default). +get_i18n(Tag, ?DESC(Namespace, Id), Default, Options) -> + Lang = get_lang(Options), + case emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, Tag) of + undefined -> + Default; + Text -> + Text + end. -trans_label(Spec, Hocon, Default) -> +%% So far i18n_lang in options is only used at build time. +%% At runtime, it's still the global config which controls the language. +get_lang(#{i18n_lang := Lang}) -> Lang; +get_lang(_) -> emqx:get_config([dashboard, i18n_lang]). + +trans_label(Spec, Hocon, Default, Options) -> Label = case desc_struct(Hocon) of - ?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, Default); + ?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, Default, Options); _ -> Default end, Spec#{label => Label}. desc_struct(Hocon) -> - case hocon_schema:field_schema(Hocon, desc) of - undefined -> - case hocon_schema:field_schema(Hocon, description) of - undefined -> get_ref_desc(Hocon); - Struct1 -> Struct1 - end; - Struct -> - Struct - end. + R = + case hocon_schema:field_schema(Hocon, desc) of + undefined -> + case hocon_schema:field_schema(Hocon, description) of + undefined -> get_ref_desc(Hocon); + Struct1 -> Struct1 + end; + Struct -> + Struct + end, + ensure_bin(R). + +ensure_bin(undefined) -> undefined; +ensure_bin(?DESC(_Namespace, _Id) = Desc) -> Desc; +ensure_bin(Text) -> to_bin(Text). get_ref_desc(?R_REF(Mod, Name)) -> case erlang:function_exported(Mod, desc, 1) of @@ -524,7 +540,7 @@ responses(Responses, Module, Options) -> {Spec, Refs}. response(Status, ?DESC(_Mod, _Id) = Schema, {Acc, RefsAcc, Module, Options}) -> - Desc = trans_description(#{}, #{desc => Schema}), + Desc = trans_description(#{}, #{desc => Schema}, Options), {Acc#{integer_to_binary(Status) => Desc}, RefsAcc, Module, Options}; response(Status, Bin, {Acc, RefsAcc, Module, Options}) when is_binary(Bin) -> {Acc#{integer_to_binary(Status) => #{description => Bin}}, RefsAcc, Module, Options}; @@ -553,7 +569,7 @@ response(Status, Schema, {Acc, RefsAcc, Module, Options}) -> Hocon = hocon_schema:field_schema(Schema, type), Examples = hocon_schema:field_schema(Schema, examples), {Spec, Refs} = hocon_schema_to_spec(Hocon, Module), - Init = trans_description(#{}, Schema), + Init = trans_description(#{}, Schema, Options), Content = content(Spec, Examples), { Acc#{integer_to_binary(Status) => Init#{<<"content">> => Content}}, @@ -563,7 +579,7 @@ response(Status, Schema, {Acc, RefsAcc, Module, Options}) -> }; false -> {Props, Refs} = parse_object(Schema, Module, Options), - Init = trans_description(#{}, Schema), + Init = trans_description(#{}, Schema, Options), Content = Init#{<<"content">> => content(Props)}, {Acc#{integer_to_binary(Status) => Content}, Refs ++ RefsAcc, Module, Options} end. @@ -590,7 +606,7 @@ components(Options, [{Module, Field} | Refs], SpecAcc, SubRefsAcc) -> %% parameters in ref only have one value, not array components(Options, [{Module, Field, parameter} | Refs], SpecAcc, SubRefsAcc) -> Props = hocon_schema_fields(Module, Field), - {[Param], SubRefs} = parameters(Props, Module), + {[Param], SubRefs} = parameters(Props, Module, Options), Namespace = namespace(Module), NewSpecAcc = SpecAcc#{?TO_REF(Namespace, Field) => Param}, components(Options, Refs, NewSpecAcc, SubRefs ++ SubRefsAcc). @@ -869,7 +885,7 @@ parse_object_loop([{Name, Hocon} | Rest], Module, Options, Props, Required, Refs HoconType = hocon_schema:field_schema(Hocon, type), Init0 = init_prop([default | ?DEFAULT_FIELDS], #{}, Hocon), SchemaToSpec = schema_converter(Options), - Init = trans_desc(Init0, Hocon, SchemaToSpec, NameBin), + Init = trans_desc(Init0, Hocon, SchemaToSpec, NameBin, Options), {Prop, Refs1} = SchemaToSpec(HoconType, Module), NewRequiredAcc = case is_required(Hocon) of diff --git a/apps/emqx_dashboard/test/emqx_dashboard_listener_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_listener_SUITE.erl new file mode 100644 index 000000000..7f28841fc --- /dev/null +++ b/apps/emqx_dashboard/test/emqx_dashboard_listener_SUITE.erl @@ -0,0 +1,51 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_dashboard_listener_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + ok = change_i18n_lang(en), + Config. + +end_per_suite(_Config) -> + ok = change_i18n_lang(en), + emqx_mgmt_api_test_util:end_suite([emqx_conf]). + +t_change_i18n_lang(_Config) -> + ?check_trace( + begin + ok = change_i18n_lang(zh), + {ok, _} = ?block_until(#{?snk_kind := regenerate_minirest_dispatch}, 10_000), + ok + end, + fun(ok, Trace) -> + ?assertMatch([#{i18n_lang := zh}], ?of_kind(regenerate_minirest_dispatch, Trace)) + end + ), + ok. + +change_i18n_lang(Lang) -> + {ok, _} = emqx_conf:update([dashboard], {change_i18n_lang, Lang}, #{}), + ok. diff --git a/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl index 472e90405..81b3f4402 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl @@ -64,7 +64,6 @@ groups() -> init_per_suite(Config) -> emqx_mgmt_api_test_util:init_suite([emqx_conf]), - emqx_dashboard:init_i18n(), Config. end_per_suite(_Config) -> diff --git a/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl index 3150ed097..e6fa62f77 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl @@ -33,7 +33,6 @@ init_per_suite(Config) -> mria:start(), application:load(emqx_dashboard), emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1), - emqx_dashboard:init_i18n(), Config. set_special_configs(emqx_dashboard) -> diff --git a/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl index 4d1501dae..753aaad7a 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl @@ -33,7 +33,6 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_mgmt_api_test_util:init_suite([emqx_conf]), - emqx_dashboard:init_i18n(), Config. end_per_suite(Config) -> diff --git a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl index ee798868e..f6150e607 100644 --- a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl +++ b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl @@ -686,7 +686,6 @@ t_jq(_) -> %% Got timeout as expected got_timeout end, - _ConfigRootKey = emqx_rule_engine_schema:namespace(), ?assertThrow( {jq_exception, {timeout, _}}, apply_func(jq, [TOProgram, <<"-2">>]) diff --git a/build b/build index 3c558c19a..021cc80a5 100755 --- a/build +++ b/build @@ -117,8 +117,7 @@ make_docs() { mkdir -p "$docdir" "$dashboard_www_static" # shellcheck disable=SC2086 erl -noshell -pa $libs_dir1 $libs_dir2 $libs_dir3 -eval \ - "I18nFile = filename:join([apps, emqx_dashboard, priv, 'i18n.conf']), \ - ok = emqx_conf:dump_schema('$docdir', $SCHEMA_MODULE, I18nFile), \ + "ok = emqx_conf:dump_schema('$docdir', $SCHEMA_MODULE), \ halt(0)." cp "$docdir"/bridge-api-*.json "$dashboard_www_static" cp "$docdir"/hot-config-schema-*.json "$dashboard_www_static" diff --git a/mix.exs b/mix.exs index 2b8de4c54..6c6c7750f 100644 --- a/mix.exs +++ b/mix.exs @@ -72,7 +72,7 @@ defmodule EMQXUmbrella.MixProject do # in conflict by emqtt and hocon {:getopt, "1.0.2", override: true}, {:snabbkaffe, github: "kafka4beam/snabbkaffe", tag: "1.0.7", override: true}, - {:hocon, github: "emqx/hocon", tag: "0.38.1", override: true}, + {:hocon, github: "emqx/hocon", tag: "0.39.1", override: true}, {:emqx_http_lib, github: "emqx/emqx_http_lib", tag: "0.5.2", override: true}, {:esasl, github: "emqx/esasl", tag: "0.2.0"}, {:jose, github: "potatosalad/erlang-jose", tag: "1.11.2"}, diff --git a/rebar.config b/rebar.config index 7e783b56d..040b1a7c0 100644 --- a/rebar.config +++ b/rebar.config @@ -75,7 +75,7 @@ , {system_monitor, {git, "https://github.com/ieQu1/system_monitor", {tag, "3.0.3"}}} , {getopt, "1.0.2"} , {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.7"}}} - , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.38.1"}}} + , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.1"}}} , {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}} , {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}} , {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}} diff --git a/rebar.config.erl b/rebar.config.erl index 7c00622c2..80f126096 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -479,8 +479,7 @@ etc_overlay(ReleaseType, Edition) -> [ {mkdir, "etc/"}, {copy, "{{base_dir}}/lib/emqx/etc/certs", "etc/"}, - {copy, "_build/docgen/" ++ name(Edition) ++ "/emqx.conf.en.example", - "etc/emqx.conf.example"} + {copy, "_build/docgen/" ++ name(Edition) ++ "/emqx.conf.example", "etc/emqx.conf.example"} ] ++ lists:map( fun diff --git a/scripts/merge-config.escript b/scripts/merge-config.escript index d30a0ca68..b3c214dd7 100755 --- a/scripts/merge-config.escript +++ b/scripts/merge-config.escript @@ -34,7 +34,10 @@ main(_) -> ok = file:write_file("apps/emqx_conf/etc/emqx-enterprise.conf.all", EnterpriseConf); false -> ok - end. + end, + merge_desc_files_per_lang("en"), + %% TODO: remove this when we have zh translation moved to dashboard package + merge_desc_files_per_lang("zh"). is_enterprise() -> Profile = os:getenv("PROFILE", "emqx"), @@ -96,3 +99,48 @@ try_enter_child(Dir, Files, Cfgs) -> true -> get_all_cfgs(filename:join([Dir, "src"]), Cfgs) end. + +%% Desc files merge is for now done locally in emqx.git repo for all languages. +%% When zh and other languages are moved to a separate repo, +%% we will only merge the en files. +%% The file for other languages will be merged in the other repo, +%% the built as a part of the dashboard package, +%% finally got pulled at build time as a part of the dashboard package. +merge_desc_files_per_lang(Lang) -> + BaseConf = <<"">>, + Cfgs0 = get_all_desc_files(Lang), + Conf = do_merge_desc_files_per_lang(BaseConf, Cfgs0), + OutputFile = case Lang of + "en" -> + %% en desc will always be in the priv dir of emqx_dashboard + "apps/emqx_dashboard/priv/desc.en.hocon"; + "zh" -> + %% so far we inject zh desc as if it's extracted from dashboard package + %% TODO: remove this when we have zh translation moved to dashboard package + "apps/emqx_dashboard/priv/www/static/desc.zh.hocon" + end, + ok = filelib:ensure_dir(OutputFile), + ok = file:write_file(OutputFile, Conf). + +do_merge_desc_files_per_lang(BaseConf, Cfgs) -> + lists:foldl( + fun(CfgFile, Acc) -> + case filelib:is_regular(CfgFile) of + true -> + {ok, Bin1} = file:read_file(CfgFile), + [Acc, io_lib:nl(), Bin1]; + false -> Acc + end + end, BaseConf, Cfgs). + +get_all_desc_files(Lang) -> + Dir = + case Lang of + "en" -> + filename:join(["rel", "i18n"]); + "zh" -> + %% TODO: remove this when we have zh translation moved to dashboard package + filename:join(["rel", "i18n", "zh"]) + end, + Files = filelib:wildcard("*.hocon", Dir), + lists:map(fun(Name) -> filename:join([Dir, Name]) end, Files). diff --git a/scripts/merge-i18n.escript b/scripts/merge-i18n.escript deleted file mode 100755 index 7ffd3aa8a..000000000 --- a/scripts/merge-i18n.escript +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env escript - --mode(compile). - -main(_) -> - main_per_lang("en"), - main_per_lang("zh"). - -main_per_lang(Lang) -> - BaseConf = <<"">>, - Cfgs0 = get_all_files(Lang), - Conf = merge(BaseConf, Cfgs0), - OutputFile = "apps/emqx_dashboard/priv/i18n." ++ Lang ++ ".conf", - ok = filelib:ensure_dir(OutputFile), - ok = file:write_file(OutputFile, Conf). - -merge(BaseConf, Cfgs) -> - lists:foldl( - fun(CfgFile, Acc) -> - case filelib:is_regular(CfgFile) of - true -> - {ok, Bin1} = file:read_file(CfgFile), - [Acc, io_lib:nl(), Bin1]; - false -> Acc - end - end, BaseConf, Cfgs). - -get_all_files(Lang) -> - Dir = - case Lang of - "en" -> filename:join(["rel", "i18n"]); - "zh" -> filename:join(["rel", "i18n", "zh"]) - end, - Files = filelib:wildcard("*.hocon", Dir), - lists:map(fun(Name) -> filename:join([Dir, Name]) end, Files). diff --git a/scripts/pre-compile.sh b/scripts/pre-compile.sh index 56b7d47b4..71251a03e 100755 --- a/scripts/pre-compile.sh +++ b/scripts/pre-compile.sh @@ -20,5 +20,4 @@ cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")/.." ./scripts/get-dashboard.sh "$dashboard_version" ./scripts/merge-config.escript -./scripts/merge-i18n.escript ./scripts/update-bom.sh "$PROFILE_STR" ./rel From 8dc881f4b9f16c98a43d8800611e0a8ffbd0ad1a Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Sat, 8 Apr 2023 09:41:55 +0200 Subject: [PATCH 016/263] build: fix mix build --- mix.exs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mix.exs b/mix.exs index 6c6c7750f..e358edfcc 100644 --- a/mix.exs +++ b/mix.exs @@ -462,7 +462,7 @@ defmodule EMQXUmbrella.MixProject do profile = System.get_env("MIX_ENV") Mix.Generator.copy_file( - "_build/docgen/#{profile}/emqx.conf.en.example", + "_build/docgen/#{profile}/emqx.conf.example", Path.join(etc, "emqx.conf.example"), force: overwrite? ) From 56b966743638ffe3d52e5e42333265198797de46 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Sat, 8 Apr 2023 09:42:47 +0200 Subject: [PATCH 017/263] refactor(emqx_conf): prepare for dynamic api schema generation --- apps/emqx_conf/src/emqx_conf.erl | 44 ++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 10 deletions(-) diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index d77ffb680..8632df139 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -29,6 +29,12 @@ -export([schema_module/0]). -export([gen_example_conf/2]). +%% TODO: move to emqx_dashboard when we stop building api schema at build time +-export([ + hotconf_schema_json/1, + bridge_schema_json/1 +]). + %% for rpc -export([get_node_and_config/1]). @@ -136,6 +142,7 @@ reset(Node, KeyPath, Opts) -> emqx_conf_proto_v2:reset(Node, KeyPath, Opts). %% @doc Called from build script. +%% TODO: move to a external escript after all refactoring is done dump_schema(Dir, SchemaModule) -> _ = application:load(emqx_dashboard), ok = emqx_dashboard_desc_cache:init(), @@ -169,19 +176,37 @@ gen_schema_json(Dir, SchemaModule, Lang) -> IoData = emqx_utils_json:encode(JsonMap, [pretty, force_utf8]), ok = file:write_file(SchemaJsonFile, IoData). +%% TODO: delete this function when we stop generating this JSON at build time. gen_api_schema_json(Dir, Lang) -> gen_api_schema_json_hotconf(Dir, Lang), gen_api_schema_json_bridge(Dir, Lang). +%% TODO: delete this function when we stop generating this JSON at build time. gen_api_schema_json_hotconf(Dir, Lang) -> - SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>}, File = schema_filename(Dir, "hot-config-schema-", Lang), - ok = do_gen_api_schema_json(File, emqx_mgmt_api_configs, SchemaInfo, Lang). + IoData = hotconf_schema_json(Lang), + ok = write_api_schema_json_file(File, IoData). +%% TODO: delete this function when we stop generating this JSON at build time. gen_api_schema_json_bridge(Dir, Lang) -> - SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>}, File = schema_filename(Dir, "bridge-api-", Lang), - ok = do_gen_api_schema_json(File, emqx_bridge_api, SchemaInfo, Lang). + IoData = bridge_schema_json(Lang), + ok = write_api_schema_json_file(File, IoData). + +%% TODO: delete this function when we stop generating this JSON at build time. +write_api_schema_json_file(File, IoData) -> + io:format(user, "===< Generating: ~s~n", [File]), + file:write_file(File, IoData). + +%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. +hotconf_schema_json(Lang) -> + SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>}, + gen_api_schema_json_iodata(emqx_mgmt_api_configs, SchemaInfo, Lang). + +%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. +bridge_schema_json(Lang) -> + SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>}, + gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo, Lang). schema_filename(Dir, Prefix, Lang) -> Filename = Prefix ++ Lang ++ ".json", @@ -245,9 +270,9 @@ gen_example(File, SchemaModule) -> Example = hocon_schema_example:gen(SchemaModule, Opts), file:write_file(File, Example). -%% Only gen hot_conf schema, not all configuration fields. -do_gen_api_schema_json(File, SchemaMod, SchemaInfo, Lang) -> - io:format(user, "===< Generating: ~s~n", [File]), +%% TODO: move this to emqx_dashboard when we stop generating +%% this JSON at build time. +gen_api_schema_json_iodata(SchemaMod, SchemaInfo, Lang) -> {ApiSpec0, Components0} = emqx_dashboard_swagger:spec( SchemaMod, #{ @@ -282,15 +307,14 @@ do_gen_api_schema_json(File, SchemaMod, SchemaInfo, Lang) -> ApiSpec0 ), Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0), - IoData = emqx_utils_json:encode( + emqx_utils_json:encode( #{ info => SchemaInfo, paths => ApiSpec, components => #{schemas => Components} }, [pretty, force_utf8] - ), - file:write_file(File, IoData). + ). -define(TO_REF(_N_, _F_), iolist_to_binary([to_bin(_N_), ".", to_bin(_F_)])). -define(TO_COMPONENTS_SCHEMA(_M_, _F_), From 1aa5b528e9ba0a27f9977184c63cb277aa73dbc1 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Sat, 8 Apr 2023 11:05:16 +0200 Subject: [PATCH 018/263] feat: generate hotconf and bridge schema on the fly --- apps/emqx/include/http_api.hrl | 2 +- .../emqx_dashboard/src/emqx_dashboard_api.erl | 1 - .../src/emqx_dashboard_schema_api.erl | 84 +++++++++++++++++++ .../src/emqx_dashboard_swagger.erl | 10 ++- 4 files changed, 92 insertions(+), 5 deletions(-) create mode 100644 apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl diff --git a/apps/emqx/include/http_api.hrl b/apps/emqx/include/http_api.hrl index 08dd08362..6a5cefec1 100644 --- a/apps/emqx/include/http_api.hrl +++ b/apps/emqx/include/http_api.hrl @@ -57,7 +57,7 @@ -define(ERROR_CODES, [ {?BAD_USERNAME_OR_PWD, <<"Bad username or password">>}, {?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>}, - {'BAD_REQUEST', <<"Request parameters are not legal">>}, + {'BAD_REQUEST', <<"Request parameters are not valid">>}, {'NOT_MATCH', <<"Conditions are not matched">>}, {'ALREADY_EXISTS', <<"Resource already existed">>}, {'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>}, diff --git a/apps/emqx_dashboard/src/emqx_dashboard_api.erl b/apps/emqx_dashboard/src/emqx_dashboard_api.erl index d5655d99d..108cde379 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_api.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_api.erl @@ -18,7 +18,6 @@ -behaviour(minirest_api). --include("emqx_dashboard.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("typerefl/include/types.hrl"). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl b/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl new file mode 100644 index 000000000..898d95b3c --- /dev/null +++ b/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl @@ -0,0 +1,84 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% This module is for dashboard to retrieve the schema hot config and bridges. +-module(emqx_dashboard_schema_api). + +-behaviour(minirest_api). + +-include_lib("hocon/include/hoconsc.hrl"). + +%% minirest API +-export([api_spec/0, paths/0, schema/1]). + +-export([get_schema/2]). + +-define(TAGS, [<<"dashboard">>]). +-define(BAD_REQUEST, 'BAD_REQUEST'). + +%%-------------------------------------------------------------------- +%% minirest API and schema +%%-------------------------------------------------------------------- + +api_spec() -> + emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). + +paths() -> + ["/schemas/:name"]. + +%% This is a rather hidden API, so we don't need to add translations for the description. +schema("/schemas/:name") -> + #{ + 'operationId' => get_schema, + get => #{ + parameters => [ + {name, hoconsc:mk(hoconsc:enum([hotconf, bridges]), #{in => path})}, + {lang, + hoconsc:mk(typerefl:string(), #{ + in => query, + default => <<"en">>, + desc => <<"The language of the schema.">> + })} + ], + desc => << + "Get the schema JSON of the specified name. " + "NOTE: you should never need to make use of this API " + "unless you are building a multi-lang dashboaard." + >>, + tags => ?TAGS, + security => [], + responses => #{ + 200 => hoconsc:mk(binary(), #{desc => <<"The JSON schema of the specified name.">>}) + } + } + }. + +%%-------------------------------------------------------------------- +%% API Handler funcs +%%-------------------------------------------------------------------- + +get_schema(get, #{ + bindings := #{name := Name}, + query_string := #{<<"lang">> := Lang} +}) -> + {200, gen_schema(Name, iolist_to_binary(Lang))}; +get_schema(get, _) -> + {400, ?BAD_REQUEST, <<"unknown">>}. + +gen_schema(hotconf, Lang) -> + emqx_conf:hotconf_schema_json(Lang); +gen_schema(bridges, Lang) -> + emqx_conf:bridge_schema_json(Lang). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index bdd5866f8..e0b50346d 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -85,7 +85,7 @@ check_schema => boolean() | filter(), translate_body => boolean(), schema_converter => fun((hocon_schema:schema(), Module :: atom()) -> map()), - i18n_lang => atom() + i18n_lang => atom() | string() | binary() }. -type route_path() :: string() | binary(). @@ -238,8 +238,12 @@ parse_spec_ref(Module, Path, Options) -> erlang:apply(Module, schema, [Path]) %% better error message catch - error:Reason -> - throw({error, #{mfa => {Module, schema, [Path]}, reason => Reason}}) + error:Reason:Stacktrace -> + erlang:raise( + error, + #{mfa => {Module, schema, [Path]}, reason => Reason}, + Stacktrace + ) end, {Specs, Refs} = maps:fold( fun(Method, Meta, {Acc, RefsAcc}) -> From 6969c2a670b0a6347c9a966668e2718a833546ce Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Sat, 8 Apr 2023 21:34:20 +0200 Subject: [PATCH 019/263] refactor: not leagal -> invalid --- apps/emqx/include/http_api.hrl | 6 +++--- .../emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/apps/emqx/include/http_api.hrl b/apps/emqx/include/http_api.hrl index 6a5cefec1..ba1438374 100644 --- a/apps/emqx/include/http_api.hrl +++ b/apps/emqx/include/http_api.hrl @@ -57,16 +57,16 @@ -define(ERROR_CODES, [ {?BAD_USERNAME_OR_PWD, <<"Bad username or password">>}, {?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>}, - {'BAD_REQUEST', <<"Request parameters are not valid">>}, + {'BAD_REQUEST', <<"Request parameters are invalid">>}, {'NOT_MATCH', <<"Conditions are not matched">>}, {'ALREADY_EXISTS', <<"Resource already existed">>}, - {'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>}, + {'BAD_CONFIG_SCHEMA', <<"Configuration data is invalid">>}, {'BAD_LISTENER_ID', <<"Bad listener ID">>}, {'BAD_NODE_NAME', <<"Bad Node Name">>}, {'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>}, {'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>}, {'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>}, - {'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>}, + {'INVALID_PARAMETER', <<"Request parameters is invalid and exceeds the boundary value">>}, {'CONFLICT', <<"Conflicting request resources">>}, {'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>}, {'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>}, diff --git a/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl index c0a772d2d..588a69065 100644 --- a/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl @@ -57,7 +57,7 @@ t_look_up_code(_) -> t_description_code(_) -> {error, not_found} = emqx_dashboard_error_code:description('_____NOT_EXIST_NAME'), - {ok, <<"Request parameters are not legal">>} = + {ok, <<"Request parameters are invalid">>} = emqx_dashboard_error_code:description('BAD_REQUEST'), ok. @@ -79,7 +79,7 @@ t_api_code(_) -> Url = ?SERVER ++ "/error_codes/BAD_REQUEST", {ok, #{ <<"code">> := <<"BAD_REQUEST">>, - <<"description">> := <<"Request parameters are not legal">> + <<"description">> := <<"Request parameters are invalid">> }} = request(Url), ok. From 373e7b33f93b1ad0a4f4acb51f18e13838da54bc Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Sun, 9 Apr 2023 23:40:49 +0200 Subject: [PATCH 020/263] test: allow tests to run without desc cache --- apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl index 8fd6fe3d3..9d8d1905d 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl @@ -60,7 +60,7 @@ lookup(Lang, Namespace, Id, Tag) -> %% @doc Lookup the description of the configuration item from the given cache. lookup(EtsTab, Lang0, Namespace, Id, Tag) -> Lang = bin(Lang0), - case ets:lookup(EtsTab, {Lang, bin(Namespace), bin(Id), bin(Tag)}) of + try ets:lookup(EtsTab, {Lang, bin(Namespace), bin(Id), bin(Tag)}) of [{_, Desc}] -> Desc; [] when Lang =/= <<"en">> -> @@ -69,6 +69,11 @@ lookup(EtsTab, Lang0, Namespace, Id, Tag) -> _ -> %% undefined but not <<>> undefined + catch + error:badarg -> + %% schema is not initialized + %% most likely in test cases + undefined end. %% The desc files are of names like: From 466a28daf25ca9788af9a4501ceaad0b9f1c6a2f Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Sat, 8 Apr 2023 21:44:39 +0200 Subject: [PATCH 021/263] test: fix test cases to work with new exctption --- apps/emqx_dashboard/src/emqx_dashboard_swagger.erl | 4 ++++ .../emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl | 7 +++++-- apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl | 6 +++--- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index e0b50346d..f700ec146 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -239,6 +239,10 @@ parse_spec_ref(Module, Path, Options) -> %% better error message catch error:Reason:Stacktrace -> + %% raise a new error with the same stacktrace. + %% it's a bug if this happens. + %% i.e. if a path is listed in the spec but the module doesn't + %% implement it or crashes when trying to build the schema. erlang:raise( error, #{mfa => {Module, schema, [Path]}, reason => Reason}, diff --git a/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl index e6fa62f77..f2ba56e08 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl @@ -307,8 +307,11 @@ t_nest_ref(_Config) -> t_none_ref(_Config) -> Path = "/ref/none", - ?assertThrow( - {error, #{mfa := {?MODULE, schema, [Path]}}}, + ?assertError( + #{ + mfa := {?MODULE, schema, [Path]}, + reason := function_clause + }, emqx_dashboard_swagger:parse_spec_ref(?MODULE, Path, #{}) ), ok. diff --git a/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl index 753aaad7a..cda533cc2 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl @@ -277,11 +277,11 @@ t_bad_ref(_Config) -> t_none_ref(_Config) -> Path = "/ref/none", - ?assertThrow( - {error, #{ + ?assertError( + #{ mfa := {?MODULE, schema, ["/ref/none"]}, reason := function_clause - }}, + }, validate(Path, #{}, []) ), ok. From 9d15247dd54b6eb9565f0d019442971388c34d35 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Fri, 14 Apr 2023 17:33:25 -0300 Subject: [PATCH 022/263] feat(schema_registry): add support for protobuf schemas Fixes https://emqx.atlassian.net/browse/EMQX-9470 --- build | 5 +- changes/ee/feat-10409.en.md | 1 + .../include/emqx_ee_schema_registry.hrl | 21 +- lib-ee/emqx_ee_schema_registry/rebar.config | 3 +- .../src/emqx_ee_schema_registry.app.src | 3 +- .../src/emqx_ee_schema_registry.erl | 9 +- .../src/emqx_ee_schema_registry_schema.erl | 20 +- .../src/emqx_ee_schema_registry_serde.erl | 132 ++++++ .../test/emqx_ee_schema_registry_SUITE.erl | 390 +++++++++++++++--- ...emqx_ee_schema_registry_http_api_SUITE.erl | 88 +++- .../emqx_ee_schema_registry_serde_SUITE.erl | 53 ++- mix.exs | 2 +- rebar.config | 2 +- rel/i18n/emqx_ee_schema_registry_schema.hocon | 6 + .../zh/emqx_ee_schema_registry_schema.hocon | 6 + 15 files changed, 640 insertions(+), 101 deletions(-) create mode 100644 changes/ee/feat-10409.en.md diff --git a/build b/build index 021cc80a5..77d4dbfc8 100755 --- a/build +++ b/build @@ -124,10 +124,7 @@ make_docs() { } assert_no_compile_time_only_deps() { - if [ "$("$FIND" "_build/$PROFILE/rel/emqx/lib/" -maxdepth 1 -name 'gpb-*' -type d)" != "" ]; then - echo "gpb should not be included in the release" - exit 1 - fi + : } make_rel() { diff --git a/changes/ee/feat-10409.en.md b/changes/ee/feat-10409.en.md new file mode 100644 index 000000000..dfa9bfa76 --- /dev/null +++ b/changes/ee/feat-10409.en.md @@ -0,0 +1 @@ +Add support for [Protocol Buffers](https://protobuf.dev/) schemas in Schema Registry. diff --git a/lib-ee/emqx_ee_schema_registry/include/emqx_ee_schema_registry.hrl b/lib-ee/emqx_ee_schema_registry/include/emqx_ee_schema_registry.hrl index af49db6dd..058abf007 100644 --- a/lib-ee/emqx_ee_schema_registry/include/emqx_ee_schema_registry.hrl +++ b/lib-ee/emqx_ee_schema_registry/include/emqx_ee_schema_registry.hrl @@ -10,14 +10,19 @@ -define(SCHEMA_REGISTRY_SHARD, emqx_ee_schema_registry_shard). -define(SERDE_TAB, emqx_ee_schema_registry_serde_tab). +-define(PROTOBUF_CACHE_TAB, emqx_ee_schema_registry_protobuf_cache_tab). -type schema_name() :: binary(). -type schema_source() :: binary(). -type encoded_data() :: iodata(). -type decoded_data() :: map(). --type serializer() :: fun((decoded_data()) -> encoded_data()). --type deserializer() :: fun((encoded_data()) -> decoded_data()). +-type serializer() :: + fun((decoded_data()) -> encoded_data()) + | fun((decoded_data(), term()) -> encoded_data()). +-type deserializer() :: + fun((encoded_data()) -> decoded_data()) + | fun((encoded_data(), term()) -> decoded_data()). -type destructor() :: fun(() -> ok). -type serde_type() :: avro. -type serde_opts() :: map(). @@ -29,6 +34,18 @@ destructor :: destructor() }). -type serde() :: #serde{}. + +-record(protobuf_cache, { + fingerprint, + module, + module_binary +}). +-type protobuf_cache() :: #protobuf_cache{ + fingerprint :: binary(), + module :: module(), + module_binary :: binary() +}. + -type serde_map() :: #{ name := schema_name(), serializer := serializer(), diff --git a/lib-ee/emqx_ee_schema_registry/rebar.config b/lib-ee/emqx_ee_schema_registry/rebar.config index 223ebf533..e42ff7278 100644 --- a/lib-ee/emqx_ee_schema_registry/rebar.config +++ b/lib-ee/emqx_ee_schema_registry/rebar.config @@ -4,7 +4,8 @@ {deps, [ {emqx, {path, "../../apps/emqx"}}, {emqx_utils, {path, "../../apps/emqx_utils"}}, - {erlavro, {git, "https://github.com/klarna/erlavro.git", {tag, "2.9.8"}}} + {erlavro, {git, "https://github.com/klarna/erlavro.git", {tag, "2.9.8"}}}, + {gpb, "4.19.7"} ]}. {shell, [ diff --git a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.app.src b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.app.src index c40fb808a..21f51b361 100644 --- a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.app.src +++ b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.app.src @@ -6,7 +6,8 @@ {applications, [ kernel, stdlib, - erlavro + erlavro, + gpb ]}, {env, []}, {modules, []}, diff --git a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.erl b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.erl index 4c00903d5..59a224fc7 100644 --- a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.erl +++ b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.erl @@ -176,7 +176,14 @@ create_tables() -> {record_name, serde}, {attributes, record_info(fields, serde)} ]), - ok = mria:wait_for_tables([?SERDE_TAB]), + ok = mria:create_table(?PROTOBUF_CACHE_TAB, [ + {type, set}, + {rlog_shard, ?SCHEMA_REGISTRY_SHARD}, + {storage, disc_only_copies}, + {record_name, protobuf_cache}, + {attributes, record_info(fields, protobuf_cache)} + ]), + ok = mria:wait_for_tables([?SERDE_TAB, ?PROTOBUF_CACHE_TAB]), ok. do_build_serdes(Schemas) -> diff --git a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry_schema.erl b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry_schema.erl index bcdc63166..237ec706f 100644 --- a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry_schema.erl +++ b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry_schema.erl @@ -53,10 +53,20 @@ fields(avro) -> mk(emqx_schema:json_binary(), #{required => true, desc => ?DESC("schema_source")})}, {description, mk(binary(), #{default => <<>>, desc => ?DESC("schema_description")})} ]; +fields(protobuf) -> + [ + {type, mk(protobuf, #{required => true, desc => ?DESC("schema_type")})}, + {source, mk(binary(), #{required => true, desc => ?DESC("schema_source")})}, + {description, mk(binary(), #{default => <<>>, desc => ?DESC("schema_description")})} + ]; fields("get_avro") -> [{name, mk(binary(), #{required => true, desc => ?DESC("schema_name")})} | fields(avro)]; +fields("get_protobuf") -> + [{name, mk(binary(), #{required => true, desc => ?DESC("schema_name")})} | fields(protobuf)]; fields("put_avro") -> fields(avro); +fields("put_protobuf") -> + fields(protobuf); fields("post_" ++ Type) -> fields("get_" ++ Type). @@ -64,6 +74,8 @@ desc(?CONF_KEY_ROOT) -> ?DESC("schema_registry_root"); desc(avro) -> ?DESC("avro_type"); +desc(protobuf) -> + ?DESC("protobuf_type"); desc(_) -> undefined. @@ -96,7 +108,7 @@ mk(Type, Meta) -> hoconsc:mk(Type, Meta). ref(Name) -> hoconsc:ref(?MODULE, Name). supported_serde_types() -> - [avro]. + [avro, protobuf]. refs() -> [ref(Type) || Type <- supported_serde_types()]. @@ -105,6 +117,8 @@ refs(#{<<"type">> := TypeAtom} = Value) when is_atom(TypeAtom) -> refs(Value#{<<"type">> := atom_to_binary(TypeAtom)}); refs(#{<<"type">> := <<"avro">>}) -> [ref(avro)]; +refs(#{<<"type">> := <<"protobuf">>}) -> + [ref(protobuf)]; refs(_) -> Expected = lists:join(" | ", [atom_to_list(T) || T <- supported_serde_types()]), throw(#{ @@ -113,12 +127,14 @@ refs(_) -> }). refs_get_api() -> - [ref("get_avro")]. + [ref("get_avro"), ref("get_protobuf")]. refs_get_api(#{<<"type">> := TypeAtom} = Value) when is_atom(TypeAtom) -> refs(Value#{<<"type">> := atom_to_binary(TypeAtom)}); refs_get_api(#{<<"type">> := <<"avro">>}) -> [ref("get_avro")]; +refs_get_api(#{<<"type">> := <<"protobuf">>}) -> + [ref("get_protobuf")]; refs_get_api(_) -> Expected = lists:join(" | ", [atom_to_list(T) || T <- supported_serde_types()]), throw(#{ diff --git a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry_serde.erl b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry_serde.erl index 9835ec7c2..c65574032 100644 --- a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry_serde.erl +++ b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry_serde.erl @@ -4,8 +4,11 @@ -module(emqx_ee_schema_registry_serde). -include("emqx_ee_schema_registry.hrl"). +-include_lib("emqx/include/logger.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-elvis([{elvis_style, invalid_dynamic_call, #{ignore => [emqx_ee_schema_registry_serde]}}]). + %% API -export([ decode/2, @@ -55,6 +58,27 @@ make_serde(avro, Name, Source0) -> ?tp(serde_destroyed, #{type => avro, name => Name}), ok end, + {Serializer, Deserializer, Destructor}; +make_serde(protobuf, Name, Source) -> + SerdeMod = make_protobuf_serde_mod(Name, Source), + Serializer = + fun(DecodedData0, MessageName0) -> + DecodedData = emqx_utils_maps:safe_atom_key_map(DecodedData0), + MessageName = binary_to_existing_atom(MessageName0, utf8), + SerdeMod:encode_msg(DecodedData, MessageName) + end, + Deserializer = + fun(EncodedData, MessageName0) -> + MessageName = binary_to_existing_atom(MessageName0, utf8), + Decoded = SerdeMod:decode_msg(EncodedData, MessageName), + emqx_utils_maps:binary_key_map(Decoded) + end, + Destructor = + fun() -> + unload_code(SerdeMod), + ?tp(serde_destroyed, #{type => protobuf, name => Name}), + ok + end, {Serializer, Deserializer, Destructor}. %%------------------------------------------------------------------------------ @@ -68,3 +92,111 @@ inject_avro_name(Name, Source0) -> Schema0 = emqx_utils_json:decode(Source0, [return_maps]), Schema = Schema0#{<<"name">> => Name}, emqx_utils_json:encode(Schema). + +-spec make_protobuf_serde_mod(schema_name(), schema_source()) -> module(). +make_protobuf_serde_mod(Name, Source) -> + {SerdeMod0, SerdeModFileName} = protobuf_serde_mod_name(Name), + case lazy_generate_protobuf_code(SerdeMod0, Source) of + {ok, SerdeMod, ModBinary} -> + load_code(SerdeMod, SerdeModFileName, ModBinary), + SerdeMod; + {error, #{error := Error, warnings := Warnings}} -> + ?SLOG( + warning, + #{ + msg => "error_generating_protobuf_code", + error => Error, + warnings => Warnings + } + ), + error({invalid_protobuf_schema, Error}) + end. + +-spec protobuf_serde_mod_name(schema_name()) -> {module(), string()}. +protobuf_serde_mod_name(Name) -> + %% must be a string (list) + SerdeModName = "$schema_parser_" ++ binary_to_list(Name), + SerdeMod = list_to_atom(SerdeModName), + %% the "path" to the module, for `code:load_binary'. + SerdeModFileName = SerdeModName ++ ".memory", + {SerdeMod, SerdeModFileName}. + +-spec lazy_generate_protobuf_code(module(), schema_source()) -> + {ok, module(), binary()} | {error, #{error := term(), warnings := [term()]}}. +lazy_generate_protobuf_code(SerdeMod0, Source) -> + %% We run this inside a transaction with locks to avoid running + %% the compile on all nodes; only one will get the lock, compile + %% the schema, and other nodes will simply read the final result. + {atomic, Res} = mria:transaction( + ?SCHEMA_REGISTRY_SHARD, + fun lazy_generate_protobuf_code_trans/2, + [SerdeMod0, Source] + ), + Res. + +-spec lazy_generate_protobuf_code_trans(module(), schema_source()) -> + {ok, module(), binary()} | {error, #{error := term(), warnings := [term()]}}. +lazy_generate_protobuf_code_trans(SerdeMod0, Source) -> + Fingerprint = erlang:md5(Source), + _ = mnesia:lock({record, ?PROTOBUF_CACHE_TAB, Fingerprint}, write), + case mnesia:read(?PROTOBUF_CACHE_TAB, Fingerprint) of + [#protobuf_cache{module = SerdeMod, module_binary = ModBinary}] -> + ?tp(schema_registry_protobuf_cache_hit, #{}), + {ok, SerdeMod, ModBinary}; + [] -> + ?tp(schema_registry_protobuf_cache_miss, #{}), + case generate_protobuf_code(SerdeMod0, Source) of + {ok, SerdeMod, ModBinary} -> + CacheEntry = #protobuf_cache{ + fingerprint = Fingerprint, + module = SerdeMod, + module_binary = ModBinary + }, + ok = mnesia:write(?PROTOBUF_CACHE_TAB, CacheEntry, write), + {ok, SerdeMod, ModBinary}; + {ok, SerdeMod, ModBinary, _Warnings} -> + CacheEntry = #protobuf_cache{ + fingerprint = Fingerprint, + module = SerdeMod, + module_binary = ModBinary + }, + ok = mnesia:write(?PROTOBUF_CACHE_TAB, CacheEntry, write), + {ok, SerdeMod, ModBinary}; + error -> + {error, #{error => undefined, warnings => []}}; + {error, Error} -> + {error, #{error => Error, warnings => []}}; + {error, Error, Warnings} -> + {error, #{error => Error, warnings => Warnings}} + end + end. + +generate_protobuf_code(SerdeMod, Source) -> + gpb_compile:string( + SerdeMod, + Source, + [ + binary, + strings_as_binaries, + {maps, true}, + %% Fixme: currently, some bug in `gpb' prevents this + %% option from working with `oneof' types... We're then + %% forced to use atom key maps. + %% {maps_key_type, binary}, + {maps_oneof, flat}, + {verify, always}, + {maps_unset_optional, omitted} + ] + ). + +-spec load_code(module(), string(), binary()) -> ok. +load_code(SerdeMod, SerdeModFileName, ModBinary) -> + _ = code:purge(SerdeMod), + {module, SerdeMod} = code:load_binary(SerdeMod, SerdeModFileName, ModBinary), + ok. + +-spec unload_code(module()) -> ok. +unload_code(SerdeMod) -> + _ = code:purge(SerdeMod), + _ = code:delete(SerdeMod), + ok. diff --git a/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_SUITE.erl b/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_SUITE.erl index 5bfba34b3..7ad01fa06 100644 --- a/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_SUITE.erl +++ b/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_SUITE.erl @@ -21,11 +21,19 @@ %%------------------------------------------------------------------------------ all() -> - [{group, avro}]. + [{group, avro}, {group, protobuf}]. groups() -> - TCs = emqx_common_test_helpers:all(?MODULE), - [{avro, TCs}]. + AllTCs = emqx_common_test_helpers:all(?MODULE), + ProtobufOnlyTCs = protobuf_only_tcs(), + TCs = AllTCs -- ProtobufOnlyTCs, + [{avro, TCs}, {protobuf, AllTCs}]. + +protobuf_only_tcs() -> + [ + t_protobuf_union_encode, + t_protobuf_union_decode + ]. init_per_suite(Config) -> emqx_config:save_schema_mod_and_names(emqx_ee_schema_registry_schema), @@ -38,6 +46,8 @@ end_per_suite(_Config) -> init_per_group(avro, Config) -> [{serde_type, avro} | Config]; +init_per_group(protobuf, Config) -> + [{serde_type, protobuf} | Config]; init_per_group(_Group, Config) -> Config. @@ -95,8 +105,12 @@ create_rule_http(RuleParams) -> Path = emqx_mgmt_api_test_util:api_path(["rules"]), AuthHeader = emqx_mgmt_api_test_util:auth_header_(), case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of - {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; - Error -> Error + {ok, Res0} -> + Res = #{<<"id">> := RuleId} = emqx_utils_json:decode(Res0, [return_maps]), + on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + {ok, Res}; + Error -> + Error end. schema_params(avro) -> @@ -108,35 +122,174 @@ schema_params(avro) -> ] }, SourceBin = emqx_utils_json:encode(Source), - #{type => avro, source => SourceBin}. + #{type => avro, source => SourceBin}; +schema_params(protobuf) -> + SourceBin = + << + "message Person {\n" + " required string name = 1;\n" + " required int32 id = 2;\n" + " optional string email = 3;\n" + " }\n" + "message UnionValue {\n" + " oneof u {\n" + " int32 a = 1;\n" + " string b = 2;\n" + " }\n" + "}\n" + >>, + #{type => protobuf, source => SourceBin}. create_serde(SerdeType, SerdeName) -> Schema = schema_params(SerdeType), ok = emqx_ee_schema_registry:add_schema(SerdeName, Schema), ok. -sql_for(avro, encode_decode1) -> - << - "select\n" - " schema_decode('my_serde',\n" - " schema_encode('my_serde', json_decode(payload))) as decoded,\n" - " decoded.i as decoded_int,\n" - " decoded.s as decoded_string\n" - " from t" - >>; -sql_for(avro, encode1) -> - << - "select\n" - " schema_encode('my_serde', json_decode(payload)) as encoded\n" - " from t" - >>; -sql_for(avro, decode1) -> - << - "select\n" - " schema_decode('my_serde', payload) as decoded\n" - " from t" - >>; -sql_for(Type, Name) -> +test_params_for(avro, encode_decode1) -> + SQL = + << + "select\n" + " schema_decode('my_serde',\n" + " schema_encode('my_serde', json_decode(payload))) as decoded,\n\n" + " decoded.i as decoded_int,\n" + " decoded.s as decoded_string\n" + "from t\n" + >>, + Payload = #{<<"i">> => 10, <<"s">> => <<"text">>}, + ExpectedRuleOutput = + #{ + <<"decoded">> => #{<<"i">> => 10, <<"s">> => <<"text">>}, + <<"decoded_int">> => 10, + <<"decoded_string">> => <<"text">> + }, + ExtraArgs = [], + #{ + sql => SQL, + payload => Payload, + expected_rule_output => ExpectedRuleOutput, + extra_args => ExtraArgs + }; +test_params_for(avro, encode1) -> + SQL = + << + "select\n" + " schema_encode('my_serde', json_decode(payload)) as encoded\n" + "from t\n" + >>, + Payload = #{<<"i">> => 10, <<"s">> => <<"text">>}, + ExtraArgs = [], + #{ + sql => SQL, + payload => Payload, + extra_args => ExtraArgs + }; +test_params_for(avro, decode1) -> + SQL = + << + "select\n" + " schema_decode('my_serde', payload) as decoded\n" + "from t\n" + >>, + Payload = #{<<"i">> => 10, <<"s">> => <<"text">>}, + ExtraArgs = [], + #{ + sql => SQL, + payload => Payload, + extra_args => ExtraArgs + }; +test_params_for(protobuf, encode_decode1) -> + SQL = + << + "select\n" + " schema_decode('my_serde',\n" + " schema_encode('my_serde', json_decode(payload), 'Person'),\n" + " 'Person') as decoded,\n" + " decoded.name as decoded_name,\n" + " decoded.email as decoded_email,\n" + " decoded.id as decoded_id\n" + "from t\n" + >>, + Payload = #{<<"name">> => <<"some name">>, <<"id">> => 10, <<"email">> => <<"emqx@emqx.io">>}, + ExpectedRuleOutput = + #{ + <<"decoded">> => + #{ + <<"email">> => <<"emqx@emqx.io">>, + <<"id">> => 10, + <<"name">> => <<"some name">> + }, + <<"decoded_email">> => <<"emqx@emqx.io">>, + <<"decoded_id">> => 10, + <<"decoded_name">> => <<"some name">> + }, + ExtraArgs = [<<"Person">>], + #{ + sql => SQL, + payload => Payload, + extra_args => ExtraArgs, + expected_rule_output => ExpectedRuleOutput + }; +test_params_for(protobuf, decode1) -> + SQL = + << + "select\n" + " schema_decode('my_serde', payload, 'Person') as decoded\n" + "from t\n" + >>, + Payload = #{<<"name">> => <<"some name">>, <<"id">> => 10, <<"email">> => <<"emqx@emqx.io">>}, + ExtraArgs = [<<"Person">>], + #{ + sql => SQL, + payload => Payload, + extra_args => ExtraArgs + }; +test_params_for(protobuf, encode1) -> + SQL = + << + "select\n" + " schema_encode('my_serde', json_decode(payload), 'Person') as encoded\n" + "from t\n" + >>, + Payload = #{<<"name">> => <<"some name">>, <<"id">> => 10, <<"email">> => <<"emqx@emqx.io">>}, + ExtraArgs = [<<"Person">>], + #{ + sql => SQL, + payload => Payload, + extra_args => ExtraArgs + }; +test_params_for(protobuf, union1) -> + SQL = + << + "select\n" + " schema_decode('my_serde', payload, 'UnionValue') as decoded,\n" + " decoded.a as decoded_a,\n" + " decoded.b as decoded_b\n" + "from t\n" + >>, + PayloadA = #{<<"a">> => 10}, + PayloadB = #{<<"b">> => <<"string">>}, + ExtraArgs = [<<"UnionValue">>], + #{ + sql => SQL, + payload => #{a => PayloadA, b => PayloadB}, + extra_args => ExtraArgs + }; +test_params_for(protobuf, union2) -> + SQL = + << + "select\n" + " schema_encode('my_serde', json_decode(payload), 'UnionValue') as encoded\n" + "from t\n" + >>, + PayloadA = #{<<"a">> => 10}, + PayloadB = #{<<"b">> => <<"string">>}, + ExtraArgs = [<<"UnionValue">>], + #{ + sql => SQL, + payload => #{a => PayloadA, b => PayloadB}, + extra_args => ExtraArgs + }; +test_params_for(Type, Name) -> ct:fail("unimplemented: ~p", [{Type, Name}]). clear_schemas() -> @@ -238,6 +391,40 @@ wait_for_cluster_rpc(Node) -> true = is_pid(erpc:call(Node, erlang, whereis, [emqx_config_handler])) ). +serde_deletion_calls_destructor_spec(#{serde_type := SerdeType}, Trace) -> + ?assert( + ?strict_causality( + #{?snk_kind := will_delete_schema}, + #{?snk_kind := serde_destroyed, type := SerdeType}, + Trace + ) + ), + ok. + +protobuf_unique_cache_hit_spec(#{serde_type := protobuf} = Res, Trace) -> + #{nodes := Nodes} = Res, + CacheEvents = ?of_kind( + [ + schema_registry_protobuf_cache_hit, + schema_registry_protobuf_cache_miss + ], + Trace + ), + ?assertMatch( + [ + schema_registry_protobuf_cache_hit, + schema_registry_protobuf_cache_miss + ], + lists:sort(?projection(?snk_kind, CacheEvents)) + ), + ?assertEqual( + lists:usort(Nodes), + lists:usort([N || #{?snk_meta := #{node := N}} <- CacheEvents]) + ), + ok; +protobuf_unique_cache_hit_spec(_Res, _Trace) -> + ok. + %%------------------------------------------------------------------------------ %% Testcases %%------------------------------------------------------------------------------ @@ -259,27 +446,16 @@ t_encode_decode(Config) -> SerdeType = ?config(serde_type, Config), SerdeName = my_serde, ok = create_serde(SerdeType, SerdeName), - {ok, #{<<"id">> := RuleId}} = create_rule_http(#{sql => sql_for(SerdeType, encode_decode1)}), - on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - Payload = #{<<"i">> => 10, <<"s">> => <<"text">>}, + #{ + sql := SQL, + payload := Payload, + expected_rule_output := ExpectedRuleOutput + } = test_params_for(SerdeType, encode_decode1), + {ok, _} = create_rule_http(#{sql => SQL}), PayloadBin = emqx_utils_json:encode(Payload), emqx:publish(emqx_message:make(<<"t">>, PayloadBin)), Res = receive_action_results(), - ?assertMatch( - #{ - data := - #{ - <<"decoded">> := - #{ - <<"i">> := 10, - <<"s">> := <<"text">> - }, - <<"decoded_int">> := 10, - <<"decoded_string">> := <<"text">> - } - }, - Res - ), + ?assertMatch(#{data := ExpectedRuleOutput}, Res), ok. t_delete_serde(Config) -> @@ -308,9 +484,12 @@ t_encode(Config) -> SerdeType = ?config(serde_type, Config), SerdeName = my_serde, ok = create_serde(SerdeType, SerdeName), - {ok, #{<<"id">> := RuleId}} = create_rule_http(#{sql => sql_for(SerdeType, encode1)}), - on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - Payload = #{<<"i">> => 10, <<"s">> => <<"text">>}, + #{ + sql := SQL, + payload := Payload, + extra_args := ExtraArgs + } = test_params_for(SerdeType, encode1), + {ok, _} = create_rule_http(#{sql => SQL}), PayloadBin = emqx_utils_json:encode(Payload), emqx:publish(emqx_message:make(<<"t">>, PayloadBin)), Published = receive_published(?LINE), @@ -320,18 +499,21 @@ t_encode(Config) -> ), #{payload := #{<<"encoded">> := Encoded}} = Published, {ok, #{deserializer := Deserializer}} = emqx_ee_schema_registry:get_serde(SerdeName), - ?assertEqual(Payload, Deserializer(Encoded)), + ?assertEqual(Payload, apply(Deserializer, [Encoded | ExtraArgs])), ok. t_decode(Config) -> SerdeType = ?config(serde_type, Config), SerdeName = my_serde, ok = create_serde(SerdeType, SerdeName), - {ok, #{<<"id">> := RuleId}} = create_rule_http(#{sql => sql_for(SerdeType, decode1)}), - on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - Payload = #{<<"i">> => 10, <<"s">> => <<"text">>}, + #{ + sql := SQL, + payload := Payload, + extra_args := ExtraArgs + } = test_params_for(SerdeType, decode1), + {ok, _} = create_rule_http(#{sql => SQL}), {ok, #{serializer := Serializer}} = emqx_ee_schema_registry:get_serde(SerdeName), - EncodedBin = Serializer(Payload), + EncodedBin = apply(Serializer, [Payload | ExtraArgs]), emqx:publish(emqx_message:make(<<"t">>, EncodedBin)), Published = receive_published(?LINE), ?assertMatch( @@ -342,6 +524,76 @@ t_decode(Config) -> ?assertEqual(Payload, Decoded), ok. +t_protobuf_union_encode(Config) -> + SerdeType = ?config(serde_type, Config), + ?assertEqual(protobuf, SerdeType), + SerdeName = my_serde, + ok = create_serde(SerdeType, SerdeName), + #{ + sql := SQL, + payload := #{a := PayloadA, b := PayloadB}, + extra_args := ExtraArgs + } = test_params_for(SerdeType, union1), + {ok, _} = create_rule_http(#{sql => SQL}), + {ok, #{serializer := Serializer}} = emqx_ee_schema_registry:get_serde(SerdeName), + + EncodedBinA = apply(Serializer, [PayloadA | ExtraArgs]), + emqx:publish(emqx_message:make(<<"t">>, EncodedBinA)), + PublishedA = receive_published(?LINE), + ?assertMatch( + #{payload := #{<<"decoded">> := _}}, + PublishedA + ), + #{payload := #{<<"decoded">> := DecodedA}} = PublishedA, + ?assertEqual(PayloadA, DecodedA), + + EncodedBinB = apply(Serializer, [PayloadB | ExtraArgs]), + emqx:publish(emqx_message:make(<<"t">>, EncodedBinB)), + PublishedB = receive_published(?LINE), + ?assertMatch( + #{payload := #{<<"decoded">> := _}}, + PublishedB + ), + #{payload := #{<<"decoded">> := DecodedB}} = PublishedB, + ?assertEqual(PayloadB, DecodedB), + + ok. + +t_protobuf_union_decode(Config) -> + SerdeType = ?config(serde_type, Config), + ?assertEqual(protobuf, SerdeType), + SerdeName = my_serde, + ok = create_serde(SerdeType, SerdeName), + #{ + sql := SQL, + payload := #{a := PayloadA, b := PayloadB}, + extra_args := ExtraArgs + } = test_params_for(SerdeType, union2), + {ok, _} = create_rule_http(#{sql => SQL}), + {ok, #{deserializer := Deserializer}} = emqx_ee_schema_registry:get_serde(SerdeName), + + EncodedBinA = emqx_utils_json:encode(PayloadA), + emqx:publish(emqx_message:make(<<"t">>, EncodedBinA)), + PublishedA = receive_published(?LINE), + ?assertMatch( + #{payload := #{<<"encoded">> := _}}, + PublishedA + ), + #{payload := #{<<"encoded">> := EncodedA}} = PublishedA, + ?assertEqual(PayloadA, apply(Deserializer, [EncodedA | ExtraArgs])), + + EncodedBinB = emqx_utils_json:encode(PayloadB), + emqx:publish(emqx_message:make(<<"t">>, EncodedBinB)), + PublishedB = receive_published(?LINE), + ?assertMatch( + #{payload := #{<<"encoded">> := _}}, + PublishedB + ), + #{payload := #{<<"encoded">> := EncodedB}} = PublishedB, + ?assertEqual(PayloadB, apply(Deserializer, [EncodedB | ExtraArgs])), + + ok. + t_fail_rollback(Config) -> SerdeType = ?config(serde_type, Config), OkSchema = emqx_utils_maps:binary_key_map(schema_params(SerdeType)), @@ -369,6 +621,10 @@ t_cluster_serde_build(Config) -> Cluster = cluster(Config), SerdeName = my_serde, Schema = schema_params(SerdeType), + #{ + payload := Payload, + extra_args := ExtraArgs + } = test_params_for(SerdeType, encode_decode1), ?check_trace( begin Nodes = [N1, N2 | _] = start_cluster(Cluster), @@ -385,8 +641,14 @@ t_cluster_serde_build(Config) -> Res0 = emqx_ee_schema_registry:get_serde(SerdeName), ?assertMatch({ok, #{}}, Res0, #{node => N}), {ok, #{serializer := Serializer, deserializer := Deserializer}} = Res0, - Payload = #{<<"i">> => 10, <<"s">> => <<"text">>}, - ?assertEqual(Payload, Deserializer(Serializer(Payload)), #{node => N}), + ?assertEqual( + Payload, + apply( + Deserializer, + [apply(Serializer, [Payload | ExtraArgs]) | ExtraArgs] + ), + #{node => N} + ), ok end) end, @@ -417,17 +679,11 @@ t_cluster_serde_build(Config) -> end, Nodes ), - ok + #{serde_type => SerdeType, nodes => Nodes} end, - fun(Trace) -> - ?assert( - ?strict_causality( - #{?snk_kind := will_delete_schema}, - #{?snk_kind := serde_destroyed, type := SerdeType}, - Trace - ) - ), - ok - end + [ + {"destructor is always called", fun ?MODULE:serde_deletion_calls_destructor_spec/2}, + {"protobuf is only built on one node", fun ?MODULE:protobuf_unique_cache_hit_spec/2} + ] ), ok. diff --git a/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_http_api_SUITE.erl b/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_http_api_SUITE.erl index e7034d562..ee6a693db 100644 --- a/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_http_api_SUITE.erl +++ b/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_http_api_SUITE.erl @@ -19,7 +19,17 @@ %%------------------------------------------------------------------------------ all() -> - emqx_common_test_helpers:all(?MODULE). + [ + {group, avro}, + {group, protobuf} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + [ + {avro, AllTCs}, + {protobuf, AllTCs} + ]. init_per_suite(Config) -> emqx_config:save_schema_mod_and_names(emqx_ee_schema_registry_schema), @@ -30,6 +40,48 @@ end_per_suite(_Config) -> emqx_mgmt_api_test_util:end_suite(lists:reverse(?APPS)), ok. +init_per_group(avro, Config) -> + Source = #{ + type => record, + fields => [ + #{name => <<"i">>, type => <<"int">>}, + #{name => <<"s">>, type => <<"string">>} + ] + }, + SourceBin = emqx_utils_json:encode(Source), + InvalidSourceBin = <<"{}">>, + [ + {serde_type, avro}, + {schema_source, SourceBin}, + {invalid_schema_source, InvalidSourceBin} + | Config + ]; +init_per_group(protobuf, Config) -> + SourceBin = + << + "message Person {\n" + " required string name = 1;\n" + " required int32 id = 2;\n" + " optional string email = 3;\n" + " }\n" + "message UnionValue {\n" + " oneof u {\n" + " int32 a = 1;\n" + " string b = 2;\n" + " }\n" + "}\n" + >>, + InvalidSourceBin = <<"xxxx">>, + [ + {serde_type, protobuf}, + {schema_source, SourceBin}, + {invalid_schema_source, InvalidSourceBin} + | Config + ]. + +end_per_group(_Group, _Config) -> + ok. + init_per_testcase(_TestCase, Config) -> clear_schemas(), ok = snabbkaffe:start_trace(), @@ -93,18 +145,14 @@ clear_schemas() -> %% Testcases %%------------------------------------------------------------------------------ -t_crud(_Config) -> - SchemaName = <<"my_avro_schema">>, - Source = #{ - type => record, - fields => [ - #{name => <<"i">>, type => <<"int">>}, - #{name => <<"s">>, type => <<"string">>} - ] - }, - SourceBin = emqx_utils_json:encode(Source), +t_crud(Config) -> + SerdeType = ?config(serde_type, Config), + SourceBin = ?config(schema_source, Config), + InvalidSourceBin = ?config(invalid_schema_source, Config), + SerdeTypeBin = atom_to_binary(SerdeType), + SchemaName = <<"my_schema">>, Params = #{ - <<"type">> => <<"avro">>, + <<"type">> => SerdeTypeBin, <<"source">> => SourceBin, <<"name">> => SchemaName, <<"description">> => <<"My schema">> @@ -138,7 +186,7 @@ t_crud(_Config) -> %% create a schema ?assertMatch( {ok, 201, #{ - <<"type">> := <<"avro">>, + <<"type">> := SerdeTypeBin, <<"source">> := SourceBin, <<"name">> := SchemaName, <<"description">> := <<"My schema">> @@ -147,7 +195,7 @@ t_crud(_Config) -> ), ?assertMatch( {ok, 200, #{ - <<"type">> := <<"avro">>, + <<"type">> := SerdeTypeBin, <<"source">> := SourceBin, <<"name">> := SchemaName, <<"description">> := <<"My schema">> @@ -157,7 +205,7 @@ t_crud(_Config) -> ?assertMatch( {ok, 200, [ #{ - <<"type">> := <<"avro">>, + <<"type">> := SerdeTypeBin, <<"source">> := SourceBin, <<"name">> := SchemaName, <<"description">> := <<"My schema">> @@ -168,7 +216,7 @@ t_crud(_Config) -> UpdateParams1 = UpdateParams#{<<"description">> := <<"My new schema">>}, ?assertMatch( {ok, 200, #{ - <<"type">> := <<"avro">>, + <<"type">> := SerdeTypeBin, <<"source">> := SourceBin, <<"name">> := SchemaName, <<"description">> := <<"My new schema">> @@ -188,9 +236,9 @@ t_crud(_Config) -> {ok, 400, #{ <<"code">> := <<"BAD_REQUEST">>, <<"message">> := - <<"{post_config_update,emqx_ee_schema_registry,{not_found,<<\"type\">>}}">> + <<"{post_config_update,emqx_ee_schema_registry,", _/binary>> }}, - request({put, SchemaName, UpdateParams#{<<"source">> := <<"{}">>}}) + request({put, SchemaName, UpdateParams#{<<"source">> := InvalidSourceBin}}) ), ?assertMatch( @@ -229,9 +277,9 @@ t_crud(_Config) -> {ok, 400, #{ <<"code">> := <<"BAD_REQUEST">>, <<"message">> := - <<"{post_config_update,emqx_ee_schema_registry,{not_found,<<\"type\">>}}">> + <<"{post_config_update,emqx_ee_schema_registry,", _/binary>> }}, - request({post, Params#{<<"source">> := <<"{}">>}}) + request({post, Params#{<<"source">> := InvalidSourceBin}}) ), %% unknown serde type diff --git a/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_serde_SUITE.erl b/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_serde_SUITE.erl index 12798c6a2..1ab5e3c01 100644 --- a/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_serde_SUITE.erl +++ b/lib-ee/emqx_ee_schema_registry/test/emqx_ee_schema_registry_serde_SUITE.erl @@ -60,7 +60,25 @@ schema_params(avro) -> ] }, SourceBin = emqx_utils_json:encode(Source), - #{type => avro, source => SourceBin}. + #{type => avro, source => SourceBin}; +schema_params(protobuf) -> + SourceBin = + << + "\n" + " message Person {\n" + " required string name = 1;\n" + " required int32 id = 2;\n" + " optional string email = 3;\n" + " }\n" + " message UnionValue {\n" + " oneof u {\n" + " int32 a = 1;\n" + " string b = 2;\n" + " }\n" + " }\n" + " " + >>, + #{type => protobuf, source => SourceBin}. assert_roundtrip(SerdeName, Original) -> Encoded = emqx_ee_schema_registry_serde:encode(SerdeName, Original), @@ -119,3 +137,36 @@ t_serde_not_found(_Config) -> emqx_ee_schema_registry_serde:decode(NonexistentSerde, Original) ), ok. + +t_roundtrip_protobuf(_Config) -> + SerdeName = my_serde, + Params = schema_params(protobuf), + ok = emqx_ee_schema_registry:add_schema(SerdeName, Params), + ExtraArgsPerson = [<<"Person">>], + + Original0 = #{<<"name">> => <<"some name">>, <<"id">> => 10, <<"email">> => <<"emqx@emqx.io">>}, + assert_roundtrip(SerdeName, Original0, ExtraArgsPerson, ExtraArgsPerson), + + %% removing optional field + Original1 = #{<<"name">> => <<"some name">>, <<"id">> => 10}, + assert_roundtrip(SerdeName, Original1, ExtraArgsPerson, ExtraArgsPerson), + + %% `oneof' fields + ExtraArgsUnion = [<<"UnionValue">>], + Original2 = #{<<"a">> => 1}, + assert_roundtrip(SerdeName, Original2, ExtraArgsUnion, ExtraArgsUnion), + + Original3 = #{<<"b">> => <<"string">>}, + assert_roundtrip(SerdeName, Original3, ExtraArgsUnion, ExtraArgsUnion), + + ok. + +t_protobuf_invalid_schema(_Config) -> + SerdeName = my_serde, + Params = schema_params(protobuf), + WrongParams = Params#{source := <<"xxxx">>}, + ?assertMatch( + {error, {post_config_update, _, {invalid_protobuf_schema, _}}}, + emqx_ee_schema_registry:add_schema(SerdeName, WrongParams) + ), + ok. diff --git a/mix.exs b/mix.exs index e358edfcc..36ea9e157 100644 --- a/mix.exs +++ b/mix.exs @@ -94,7 +94,7 @@ defmodule EMQXUmbrella.MixProject do {:ranch, github: "ninenines/ranch", ref: "a692f44567034dacf5efcaa24a24183788594eb7", override: true}, # in conflict by grpc and eetcd - {:gpb, "4.19.5", override: true, runtime: false}, + {:gpb, "4.19.7", override: true, runtime: false}, {:hackney, github: "emqx/hackney", tag: "1.18.1-1", override: true} ] ++ emqx_apps(profile_info, version) ++ diff --git a/rebar.config b/rebar.config index 040b1a7c0..edb544298 100644 --- a/rebar.config +++ b/rebar.config @@ -53,7 +53,7 @@ [ {lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}} , {redbug, "2.0.8"} , {covertool, {git, "https://github.com/zmstone/covertool", {tag, "2.0.4.1"}}} - , {gpb, "4.19.5"} %% gpb only used to build, but not for release, pin it here to avoid fetching a wrong version due to rebar plugins scattered in all the deps + , {gpb, "4.19.7"} , {typerefl, {git, "https://github.com/ieQu1/typerefl", {tag, "0.9.1"}}} , {gun, {git, "https://github.com/emqx/gun", {tag, "1.3.9"}}} , {ehttpc, {git, "https://github.com/emqx/ehttpc", {tag, "0.4.7"}}} diff --git a/rel/i18n/emqx_ee_schema_registry_schema.hocon b/rel/i18n/emqx_ee_schema_registry_schema.hocon index 667c4c0a4..3d1ce4072 100644 --- a/rel/i18n/emqx_ee_schema_registry_schema.hocon +++ b/rel/i18n/emqx_ee_schema_registry_schema.hocon @@ -6,6 +6,12 @@ avro_type.desc: avro_type.label: """Apache Avro""" +protobuf_type.desc: +"""[Protocol Buffers](https://protobuf.dev/) serialization format.""" + +protobuf_type.label: +"""Protocol Buffers""" + schema_description.desc: """A description for this schema.""" diff --git a/rel/i18n/zh/emqx_ee_schema_registry_schema.hocon b/rel/i18n/zh/emqx_ee_schema_registry_schema.hocon index 3bf0a7dc8..2f4c972ec 100644 --- a/rel/i18n/zh/emqx_ee_schema_registry_schema.hocon +++ b/rel/i18n/zh/emqx_ee_schema_registry_schema.hocon @@ -6,6 +6,12 @@ avro_type.desc: avro_type.label: """阿帕奇-阿夫罗""" +protobuf_type.desc: +"""[协议缓冲器](https://protobuf.dev/) 序列化格式。""" + +protobuf_type.label: +"""协议缓冲器""" + schema_description.desc: """对该模式的描述。""" From 88ca94b4171c4880013f15312fed2d33541d4f8e Mon Sep 17 00:00:00 2001 From: Ilya Averyanov Date: Mon, 17 Apr 2023 20:00:57 +0300 Subject: [PATCH 023/263] fix(auth): fix uri path handling Fix uri path handling `emqx_connector_http`, HTTP authentication and authorization backends. --- apps/emqx_authn/src/emqx_authn_utils.erl | 13 ++++ .../src/simple_authn/emqx_authn_http.erl | 14 ++-- .../emqx_authn/test/emqx_authn_http_SUITE.erl | 49 +++++++++++++- apps/emqx_authz/src/emqx_authz_http.erl | 14 ++-- apps/emqx_authz/src/emqx_authz_utils.erl | 14 +++- .../emqx_authz/test/emqx_authz_http_SUITE.erl | 10 +-- .../src/emqx_connector_http.erl | 65 +++++++++++++++---- changes/ce/fix-10420.en.md | 3 + changes/ce/fix-10420.zh.md | 0 9 files changed, 146 insertions(+), 36 deletions(-) create mode 100644 changes/ce/fix-10420.en.md create mode 100644 changes/ce/fix-10420.zh.md diff --git a/apps/emqx_authn/src/emqx_authn_utils.erl b/apps/emqx_authn/src/emqx_authn_utils.erl index 1352e3daf..12520251e 100644 --- a/apps/emqx_authn/src/emqx_authn_utils.erl +++ b/apps/emqx_authn/src/emqx_authn_utils.erl @@ -28,6 +28,7 @@ parse_sql/2, render_deep/2, render_str/2, + render_urlencoded_str/2, render_sql_params/2, is_superuser/1, bin/1, @@ -129,6 +130,13 @@ render_str(Template, Credential) -> #{return => full_binary, var_trans => fun handle_var/2} ). +render_urlencoded_str(Template, Credential) -> + emqx_placeholder:proc_tmpl( + Template, + mapping_credential(Credential), + #{return => full_binary, var_trans => fun urlencode_var/2} + ). + render_sql_params(ParamList, Credential) -> emqx_placeholder:proc_tmpl( ParamList, @@ -217,6 +225,11 @@ without_password(Credential, [Name | Rest]) -> without_password(Credential, Rest) end. +urlencode_var({var, _} = Var, Value) -> + emqx_http_lib:uri_encode(handle_var(Var, Value)); +urlencode_var(Var, Value) -> + handle_var(Var, Value). + handle_var({var, _Name}, undefined) -> <<>>; handle_var({var, <<"peerhost">>}, PeerHost) -> diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl index 3c34d878e..502562e2c 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl @@ -285,9 +285,9 @@ parse_url(Url) -> BaseUrl = iolist_to_binary([Scheme, "//", HostPort]), case string:split(Remaining, "?", leading) of [Path, QueryString] -> - {BaseUrl, Path, QueryString}; + {BaseUrl, <<"/", Path/binary>>, QueryString}; [Path] -> - {BaseUrl, Path, <<>>} + {BaseUrl, <<"/", Path/binary>>, <<>>} end; [HostPort] -> {iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>} @@ -328,7 +328,7 @@ generate_request(Credential, #{ body_template := BodyTemplate }) -> Headers = maps:to_list(Headers0), - Path = emqx_authn_utils:render_str(BasePathTemplate, Credential), + Path = emqx_authn_utils:render_urlencoded_str(BasePathTemplate, Credential), Query = emqx_authn_utils:render_deep(BaseQueryTemplate, Credential), Body = emqx_authn_utils:render_deep(BodyTemplate, Credential), case Method of @@ -343,9 +343,9 @@ generate_request(Credential, #{ end. append_query(Path, []) -> - encode_path(Path); + Path; append_query(Path, Query) -> - encode_path(Path) ++ "?" ++ binary_to_list(qs(Query)). + Path ++ "?" ++ binary_to_list(qs(Query)). qs(KVs) -> qs(KVs, []). @@ -407,10 +407,6 @@ parse_body(ContentType, _) -> uri_encode(T) -> emqx_http_lib:uri_encode(to_list(T)). -encode_path(Path) -> - Parts = string:split(Path, "/", all), - lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]). - request_for_log(Credential, #{url := Url} = State) -> SafeCredential = emqx_authn_utils:without_password(Credential), case generate_request(SafeCredential, State) of diff --git a/apps/emqx_authn/test/emqx_authn_http_SUITE.erl b/apps/emqx_authn/test/emqx_authn_http_SUITE.erl index 851e80f6d..b08167a5b 100644 --- a/apps/emqx_authn/test/emqx_authn_http_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_http_SUITE.erl @@ -47,7 +47,6 @@ }) ). --define(SERVER_RESPONSE_URLENCODE(Result), ?SERVER_RESPONSE_URLENCODE(Result, false)). -define(SERVER_RESPONSE_URLENCODE(Result, IsSuperuser), list_to_binary( "result=" ++ @@ -166,6 +165,54 @@ test_user_auth(#{ ?GLOBAL ). +t_authenticate_path_placeholders(_Config) -> + ok = emqx_authn_http_test_server:stop(), + {ok, _} = emqx_authn_http_test_server:start_link(?HTTP_PORT, <<"/[...]">>), + ok = emqx_authn_http_test_server:set_handler( + fun(Req0, State) -> + Req = + case cowboy_req:path(Req0) of + <<"/my/p%20ath//us%20er/auth//">> -> + cowboy_req:reply( + 200, + #{<<"content-type">> => <<"application/json">>}, + emqx_utils_json:encode(#{result => allow, is_superuser => false}), + Req0 + ); + Path -> + ct:pal("Unexpected path: ~p", [Path]), + cowboy_req:reply(403, Req0) + end, + {ok, Req, State} + end + ), + + Credentials = ?CREDENTIALS#{ + username => <<"us er">> + }, + + AuthConfig = maps:merge( + raw_http_auth_config(), + #{ + <<"url">> => <<"http://127.0.0.1:32333/my/p%20ath//${username}/auth//">>, + <<"body">> => #{} + } + ), + {ok, _} = emqx:update_config( + ?PATH, + {create_authenticator, ?GLOBAL, AuthConfig} + ), + + ?assertMatch( + {ok, #{is_superuser := false}}, + emqx_access_control:authenticate(Credentials) + ), + + _ = emqx_authn_test_lib:delete_authenticators( + [authentication], + ?GLOBAL + ). + t_no_value_for_placeholder(_Config) -> Handler = fun(Req0, State) -> {ok, RawBody, Req1} = cowboy_req:read_body(Req0), diff --git a/apps/emqx_authz/src/emqx_authz_http.erl b/apps/emqx_authz/src/emqx_authz_http.erl index 53378d9c2..5747e6eeb 100644 --- a/apps/emqx_authz/src/emqx_authz_http.erl +++ b/apps/emqx_authz/src/emqx_authz_http.erl @@ -161,9 +161,9 @@ parse_url(Url) -> BaseUrl = iolist_to_binary([Scheme, "//", HostPort]), case string:split(Remaining, "?", leading) of [Path, QueryString] -> - {BaseUrl, Path, QueryString}; + {BaseUrl, <<"/", Path/binary>>, QueryString}; [Path] -> - {BaseUrl, Path, <<>>} + {BaseUrl, <<"/", Path/binary>>, <<>>} end; [HostPort] -> {iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>} @@ -185,7 +185,7 @@ generate_request( } ) -> Values = client_vars(Client, PubSub, Topic), - Path = emqx_authz_utils:render_str(BasePathTemplate, Values), + Path = emqx_authz_utils:render_urlencoded_str(BasePathTemplate, Values), Query = emqx_authz_utils:render_deep(BaseQueryTemplate, Values), Body = emqx_authz_utils:render_deep(BodyTemplate, Values), case Method of @@ -202,9 +202,9 @@ generate_request( end. append_query(Path, []) -> - encode_path(Path); + to_list(Path); append_query(Path, Query) -> - encode_path(Path) ++ "?" ++ to_list(query_string(Query)). + to_list(Path) ++ "?" ++ to_list(query_string(Query)). query_string(Body) -> query_string(Body, []). @@ -222,10 +222,6 @@ query_string([{K, V} | More], Acc) -> uri_encode(T) -> emqx_http_lib:uri_encode(to_list(T)). -encode_path(Path) -> - Parts = string:split(Path, "/", all), - lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]). - serialize_body(<<"application/json">>, Body) -> emqx_utils_json:encode(Body); serialize_body(<<"application/x-www-form-urlencoded">>, Body) -> diff --git a/apps/emqx_authz/src/emqx_authz_utils.erl b/apps/emqx_authz/src/emqx_authz_utils.erl index 560141d0a..c01505680 100644 --- a/apps/emqx_authz/src/emqx_authz_utils.erl +++ b/apps/emqx_authz/src/emqx_authz_utils.erl @@ -16,7 +16,6 @@ -module(emqx_authz_utils). --include_lib("emqx/include/emqx_placeholder.hrl"). -include_lib("emqx_authz.hrl"). -export([ @@ -28,6 +27,7 @@ update_config/2, parse_deep/2, parse_str/2, + render_urlencoded_str/2, parse_sql/3, render_deep/2, render_str/2, @@ -128,6 +128,13 @@ render_str(Template, Values) -> #{return => full_binary, var_trans => fun handle_var/2} ). +render_urlencoded_str(Template, Values) -> + emqx_placeholder:proc_tmpl( + Template, + client_vars(Values), + #{return => full_binary, var_trans => fun urlencode_var/2} + ). + render_sql_params(ParamList, Values) -> emqx_placeholder:proc_tmpl( ParamList, @@ -181,6 +188,11 @@ convert_client_var({dn, DN}) -> {cert_subject, DN}; convert_client_var({protocol, Proto}) -> {proto_name, Proto}; convert_client_var(Other) -> Other. +urlencode_var({var, _} = Var, Value) -> + emqx_http_lib:uri_encode(handle_var(Var, Value)); +urlencode_var(Var, Value) -> + handle_var(Var, Value). + handle_var({var, _Name}, undefined) -> <<>>; handle_var({var, <<"peerhost">>}, IpAddr) -> diff --git a/apps/emqx_authz/test/emqx_authz_http_SUITE.erl b/apps/emqx_authz/test/emqx_authz_http_SUITE.erl index 9ff84b805..702bf2756 100644 --- a/apps/emqx_authz/test/emqx_authz_http_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_http_SUITE.erl @@ -199,7 +199,7 @@ t_query_params(_Config) -> peerhost := <<"127.0.0.1">>, proto_name := <<"MQTT">>, mountpoint := <<"MOUNTPOINT">>, - topic := <<"t">>, + topic := <<"t/1">>, action := <<"publish">> } = cowboy_req:match_qs( [ @@ -241,7 +241,7 @@ t_query_params(_Config) -> ?assertEqual( allow, - emqx_access_control:authorize(ClientInfo, publish, <<"t">>) + emqx_access_control:authorize(ClientInfo, publish, <<"t/1">>) ). t_path(_Config) -> @@ -249,13 +249,13 @@ t_path(_Config) -> fun(Req0, State) -> ?assertEqual( << - "/authz/users/" + "/authz/use%20rs/" "user%20name/" "client%20id/" "127.0.0.1/" "MQTT/" "MOUNTPOINT/" - "t/1/" + "t%2F1/" "publish" >>, cowboy_req:path(Req0) @@ -264,7 +264,7 @@ t_path(_Config) -> end, #{ <<"url">> => << - "http://127.0.0.1:33333/authz/users/" + "http://127.0.0.1:33333/authz/use%20rs/" "${username}/" "${clientid}/" "${peerhost}/" diff --git a/apps/emqx_connector/src/emqx_connector_http.erl b/apps/emqx_connector/src/emqx_connector_http.erl index ef2e11eb7..610632e36 100644 --- a/apps/emqx_connector/src/emqx_connector_http.erl +++ b/apps/emqx_connector/src/emqx_connector_http.erl @@ -47,7 +47,7 @@ namespace/0 ]). --export([check_ssl_opts/2, validate_method/1]). +-export([check_ssl_opts/2, validate_method/1, join_paths/2]). -type connect_timeout() :: emqx_schema:duration() | infinity. -type pool_type() :: random | hash. @@ -458,7 +458,7 @@ preprocess_request( } = Req ) -> #{ - method => emqx_plugin_libs_rule:preproc_tmpl(bin(Method)), + method => emqx_plugin_libs_rule:preproc_tmpl(to_bin(Method)), path => emqx_plugin_libs_rule:preproc_tmpl(Path), body => maybe_preproc_tmpl(body, Req), headers => preproc_headers(Headers), @@ -471,8 +471,8 @@ preproc_headers(Headers) when is_map(Headers) -> fun(K, V, Acc) -> [ { - emqx_plugin_libs_rule:preproc_tmpl(bin(K)), - emqx_plugin_libs_rule:preproc_tmpl(bin(V)) + emqx_plugin_libs_rule:preproc_tmpl(to_bin(K)), + emqx_plugin_libs_rule:preproc_tmpl(to_bin(V)) } | Acc ] @@ -484,8 +484,8 @@ preproc_headers(Headers) when is_list(Headers) -> lists:map( fun({K, V}) -> { - emqx_plugin_libs_rule:preproc_tmpl(bin(K)), - emqx_plugin_libs_rule:preproc_tmpl(bin(V)) + emqx_plugin_libs_rule:preproc_tmpl(to_bin(K)), + emqx_plugin_libs_rule:preproc_tmpl(to_bin(V)) } end, Headers @@ -553,15 +553,41 @@ formalize_request(Method, BasePath, {Path, Headers, _Body}) when -> formalize_request(Method, BasePath, {Path, Headers}); formalize_request(_Method, BasePath, {Path, Headers, Body}) -> - {filename:join(BasePath, Path), Headers, Body}; + {join_paths(BasePath, Path), Headers, Body}; formalize_request(_Method, BasePath, {Path, Headers}) -> - {filename:join(BasePath, Path), Headers}. + {join_paths(BasePath, Path), Headers}. -bin(Bin) when is_binary(Bin) -> +%% By default, we cannot treat HTTP paths as "file" or "resource" paths, +%% because an HTTP server may handle paths like +%% "/a/b/c/", "/a/b/c" and "/a//b/c" differently. +%% +%% So we try to avoid unneccessary path normalization. +%% +%% See also: `join_paths_test_/0` +join_paths(Path1, Path2) -> + do_join_paths(lists:reverse(to_list(Path1)), to_list(Path2)). + +%% "abc/" + "/cde" +do_join_paths([$/ | Path1], [$/ | Path2]) -> + lists:reverse(Path1) ++ [$/ | Path2]; +%% "abc/" + "cde" +do_join_paths([$/ | Path1], Path2) -> + lists:reverse(Path1) ++ [$/ | Path2]; +%% "abc" + "/cde" +do_join_paths(Path1, [$/ | Path2]) -> + lists:reverse(Path1) ++ [$/ | Path2]; +%% "abc" + "cde" +do_join_paths(Path1, Path2) -> + lists:reverse(Path1) ++ [$/ | Path2]. + +to_list(List) when is_list(List) -> List; +to_list(Bin) when is_binary(Bin) -> binary_to_list(Bin). + +to_bin(Bin) when is_binary(Bin) -> Bin; -bin(Str) when is_list(Str) -> +to_bin(Str) when is_list(Str) -> list_to_binary(Str); -bin(Atom) when is_atom(Atom) -> +to_bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). reply_delegator(ReplyFunAndArgs, Result) -> @@ -642,4 +668,21 @@ redact_test_() -> ?_assertNotEqual(TestData2, redact(TestData2)) ]. +join_paths_test_() -> + [ + ?_assertEqual("abc/cde", join_paths("abc", "cde")), + ?_assertEqual("abc/cde", join_paths("abc", "/cde")), + ?_assertEqual("abc/cde", join_paths("abc/", "cde")), + ?_assertEqual("abc/cde", join_paths("abc/", "/cde")), + + ?_assertEqual("/", join_paths("", "")), + ?_assertEqual("/cde", join_paths("", "cde")), + ?_assertEqual("/cde", join_paths("", "/cde")), + ?_assertEqual("/cde", join_paths("/", "cde")), + ?_assertEqual("/cde", join_paths("/", "/cde")), + + ?_assertEqual("//cde/", join_paths("/", "//cde/")), + ?_assertEqual("abc///cde/", join_paths("abc//", "//cde/")) + ]. + -endif. diff --git a/changes/ce/fix-10420.en.md b/changes/ce/fix-10420.en.md new file mode 100644 index 000000000..70afd8b45 --- /dev/null +++ b/changes/ce/fix-10420.en.md @@ -0,0 +1,3 @@ +Fix HTTP path handling when composing the URL for the HTTP requests in authentication and authorization modules. +* Avoid unnecessary URL normalization since we cannot assume that external servers treat original and normalized URLs equally. This led to bugs like [#10411](https://github.com/emqx/emqx/issues/10411). +* Fix the issue that path segments could be HTTP encoded twice. diff --git a/changes/ce/fix-10420.zh.md b/changes/ce/fix-10420.zh.md new file mode 100644 index 000000000..e69de29bb From 933e6727ba492067d7dd9d08ce2a30b820b11cfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9F=90=E6=96=87?= Date: Sun, 9 Apr 2023 21:35:46 +0800 Subject: [PATCH 024/263] feat: node array support array(atom()) and comma_separated_atoms --- apps/emqx_conf/src/emqx_conf_schema.erl | 7 +++- .../emqx_conf/test/emqx_conf_schema_tests.erl | 40 +++++++++++++++++++ 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index 362fc1587..ae3691682 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -135,7 +135,7 @@ fields("cluster") -> )}, {"core_nodes", sc( - emqx_schema:comma_separated_atoms(), + node_array(), #{ mapping => "mria.core_nodes", default => [], @@ -203,7 +203,7 @@ fields(cluster_static) -> [ {"seeds", sc( - hoconsc:array(atom()), + node_array(), #{ default => [], desc => ?DESC(cluster_static_seeds), @@ -1312,3 +1312,6 @@ validator_string_re(Val, RE, Error) -> catch _:_ -> {error, Error} end. + +node_array() -> + hoconsc:union([emqx_schema:comma_separated_atoms(), hoconsc:array(atom())]). diff --git a/apps/emqx_conf/test/emqx_conf_schema_tests.erl b/apps/emqx_conf/test/emqx_conf_schema_tests.erl index 3653b9d19..453aca7a8 100644 --- a/apps/emqx_conf/test/emqx_conf_schema_tests.erl +++ b/apps/emqx_conf/test/emqx_conf_schema_tests.erl @@ -5,6 +5,46 @@ -module(emqx_conf_schema_tests). -include_lib("eunit/include/eunit.hrl"). +array_nodes_test() -> + ExpectNodes = ['emqx1@127.0.0.1', 'emqx2@127.0.0.1'], + BaseConf = + "" + "\n" + " node {\n" + " name = \"emqx1@127.0.0.1\"\n" + " cookie = \"emqxsecretcookie\"\n" + " data_dir = \"data\"\n" + " }\n" + " cluster {\n" + " name = emqxcl\n" + " discovery_strategy = static\n" + " static.seeds = ~p\n" + " core_nodes = ~p\n" + " }\n" + " " + "", + lists:foreach( + fun(Nodes) -> + ConfFile = iolist_to_binary(io_lib:format(BaseConf, [Nodes, Nodes])), + {ok, Conf} = hocon:binary(ConfFile, #{format => richmap}), + ConfList = hocon_tconf:generate(emqx_conf_schema, Conf), + ClusterDiscovery = proplists:get_value( + cluster_discovery, proplists:get_value(ekka, ConfList) + ), + ?assertEqual( + {static, [{seeds, ExpectNodes}]}, + ClusterDiscovery, + Nodes + ), + ?assertEqual( + ExpectNodes, + proplists:get_value(core_nodes, proplists:get_value(mria, ConfList)), + Nodes + ) + end, + [["emqx1@127.0.0.1", "emqx2@127.0.0.1"], "emqx1@127.0.0.1, emqx2@127.0.0.1"] + ), + ok. doc_gen_test() -> %% the json file too large to encode. From 1ef74f1adac67566e62bd29434626dcbc9bdfc6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9F=90=E6=96=87?= Date: Thu, 13 Apr 2023 20:19:39 +0800 Subject: [PATCH 025/263] chore: upgrade hocon to 0.39.1 --- changes/ce/feat-10389.en.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 changes/ce/feat-10389.en.md diff --git a/changes/ce/feat-10389.en.md b/changes/ce/feat-10389.en.md new file mode 100644 index 000000000..51d48bdfb --- /dev/null +++ b/changes/ce/feat-10389.en.md @@ -0,0 +1,2 @@ +Now `cluster.core_nodes` and `cluster.statics.seeds` are specified in the same way. +configure them as `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` and `"emqx1@127.0.0.1,emqx2@127.0.0.1"` has the same effect. From ad3e529994d22cc6519c7cc73d080460980897da Mon Sep 17 00:00:00 2001 From: zhongwencool Date: Thu, 13 Apr 2023 21:02:37 +0800 Subject: [PATCH 026/263] chore: update changes/ce/feat-10389.en.md Co-authored-by: Thales Macedo Garitezi --- apps/emqx/rebar.config | 2 +- changes/ce/feat-10389.en.md | 2 +- mix.exs | 2 +- rebar.config | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 5945ccc7c..6788b4f40 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -29,7 +29,7 @@ {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, - {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.1"}}}, + {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, diff --git a/changes/ce/feat-10389.en.md b/changes/ce/feat-10389.en.md index 51d48bdfb..d6b8d236f 100644 --- a/changes/ce/feat-10389.en.md +++ b/changes/ce/feat-10389.en.md @@ -1,2 +1,2 @@ Now `cluster.core_nodes` and `cluster.statics.seeds` are specified in the same way. -configure them as `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` and `"emqx1@127.0.0.1,emqx2@127.0.0.1"` has the same effect. +Configuring them as `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` and `"emqx1@127.0.0.1,emqx2@127.0.0.1"` has the same effect. diff --git a/mix.exs b/mix.exs index 36ea9e157..1dbde8980 100644 --- a/mix.exs +++ b/mix.exs @@ -72,7 +72,7 @@ defmodule EMQXUmbrella.MixProject do # in conflict by emqtt and hocon {:getopt, "1.0.2", override: true}, {:snabbkaffe, github: "kafka4beam/snabbkaffe", tag: "1.0.7", override: true}, - {:hocon, github: "emqx/hocon", tag: "0.39.1", override: true}, + {:hocon, github: "emqx/hocon", tag: "0.39.2", override: true}, {:emqx_http_lib, github: "emqx/emqx_http_lib", tag: "0.5.2", override: true}, {:esasl, github: "emqx/esasl", tag: "0.2.0"}, {:jose, github: "potatosalad/erlang-jose", tag: "1.11.2"}, diff --git a/rebar.config b/rebar.config index edb544298..de520f124 100644 --- a/rebar.config +++ b/rebar.config @@ -75,7 +75,7 @@ , {system_monitor, {git, "https://github.com/ieQu1/system_monitor", {tag, "3.0.3"}}} , {getopt, "1.0.2"} , {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.7"}}} - , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.1"}}} + , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}} , {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}} , {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}} , {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}} From 7eacbffae9a1c3c2536f57403ffbf71068c090b9 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Fri, 14 Apr 2023 17:16:50 +0800 Subject: [PATCH 027/263] chore: create dirs and README template for all ee bridges --- apps/emqx_bridge_cassandra/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_cassandra/README.md | 39 +++++++++++ apps/emqx_bridge_clickhouse/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_clickhouse/README.md | 37 +++++++++++ apps/emqx_bridge_dynamo/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_dynamo/README.md | 40 ++++++++++++ apps/emqx_bridge_gcp_pubsub/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_gcp_pubsub/README.md | 35 ++++++++++ apps/emqx_bridge_hstreamdb/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_hstreamdb/README.md | 38 +++++++++++ apps/emqx_bridge_influxdb/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_influxdb/README.md | 49 ++++++++++++++ apps/emqx_bridge_matrix/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_matrix/README.md | 36 ++++++++++ apps/emqx_bridge_mongodb/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_mongodb/README.md | 39 +++++++++++ apps/emqx_bridge_mysql/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_mysql/README.md | 36 ++++++++++ apps/emqx_bridge_pgsql/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_pgsql/README.md | 38 +++++++++++ apps/emqx_bridge_redis/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_redis/README.md | 37 +++++++++++ apps/emqx_bridge_rocketmq/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_rocketmq/README.md | 37 +++++++++++ apps/emqx_bridge_tdengine/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_tdengine/README.md | 39 +++++++++++ apps/emqx_bridge_timescale/BSL.txt | 94 +++++++++++++++++++++++++++ apps/emqx_bridge_timescale/README.md | 38 +++++++++++ 28 files changed, 1854 insertions(+) create mode 100644 apps/emqx_bridge_cassandra/BSL.txt create mode 100644 apps/emqx_bridge_cassandra/README.md create mode 100644 apps/emqx_bridge_clickhouse/BSL.txt create mode 100644 apps/emqx_bridge_clickhouse/README.md create mode 100644 apps/emqx_bridge_dynamo/BSL.txt create mode 100644 apps/emqx_bridge_dynamo/README.md create mode 100644 apps/emqx_bridge_gcp_pubsub/BSL.txt create mode 100644 apps/emqx_bridge_gcp_pubsub/README.md create mode 100644 apps/emqx_bridge_hstreamdb/BSL.txt create mode 100644 apps/emqx_bridge_hstreamdb/README.md create mode 100644 apps/emqx_bridge_influxdb/BSL.txt create mode 100644 apps/emqx_bridge_influxdb/README.md create mode 100644 apps/emqx_bridge_matrix/BSL.txt create mode 100644 apps/emqx_bridge_matrix/README.md create mode 100644 apps/emqx_bridge_mongodb/BSL.txt create mode 100644 apps/emqx_bridge_mongodb/README.md create mode 100644 apps/emqx_bridge_mysql/BSL.txt create mode 100644 apps/emqx_bridge_mysql/README.md create mode 100644 apps/emqx_bridge_pgsql/BSL.txt create mode 100644 apps/emqx_bridge_pgsql/README.md create mode 100644 apps/emqx_bridge_redis/BSL.txt create mode 100644 apps/emqx_bridge_redis/README.md create mode 100644 apps/emqx_bridge_rocketmq/BSL.txt create mode 100644 apps/emqx_bridge_rocketmq/README.md create mode 100644 apps/emqx_bridge_tdengine/BSL.txt create mode 100644 apps/emqx_bridge_tdengine/README.md create mode 100644 apps/emqx_bridge_timescale/BSL.txt create mode 100644 apps/emqx_bridge_timescale/README.md diff --git a/apps/emqx_bridge_cassandra/BSL.txt b/apps/emqx_bridge_cassandra/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_cassandra/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_cassandra/README.md b/apps/emqx_bridge_cassandra/README.md new file mode 100644 index 000000000..fbf57660d --- /dev/null +++ b/apps/emqx_bridge_cassandra/README.md @@ -0,0 +1,39 @@ +# EMQX Cassandra Bridge + +[Apache Cassandra](https://github.com/apache/cassandra) is an open-source, distributed +NoSQL database management system that is designed to manage large amounts of structured +and semi-structured data across many commodity servers, providing high availability +with no single point of failure. +It is commonly used in web and mobile applications, IoT, and other systems that +require storing, querying, and analyzing large amounts of data. + +The application is used to connect EMQX and Cassandra. User can create a rule +and easily ingest IoT data into Cassandra by leveraging the +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into Cassandra](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-cassa.html) + for how to use EMQX dashboard to ingest IoT data into Cassandra. +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). + diff --git a/apps/emqx_bridge_clickhouse/BSL.txt b/apps/emqx_bridge_clickhouse/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_clickhouse/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_clickhouse/README.md b/apps/emqx_bridge_clickhouse/README.md new file mode 100644 index 000000000..fb61fea9c --- /dev/null +++ b/apps/emqx_bridge_clickhouse/README.md @@ -0,0 +1,37 @@ +# EMQX ClickHouse Bridge + +[ClickHouse](https://github.com/ClickHouse/ClickHouse)is an open-source, column-based +database management system. It is designed for real-time processing of large volumes of +data and is known for its high performance and scalability. + +The application is used to connect EMQX and ClickHouse. +User can create a rule and easily ingest IoT data into ClickHouse by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into ClickHouse](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-clickhouse.html) + for how to use EMQX dashboard to ingest IoT data into ClickHouse. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_dynamo/BSL.txt b/apps/emqx_bridge_dynamo/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_dynamo/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_dynamo/README.md b/apps/emqx_bridge_dynamo/README.md new file mode 100644 index 000000000..deb9d1879 --- /dev/null +++ b/apps/emqx_bridge_dynamo/README.md @@ -0,0 +1,40 @@ +# EMQX DynamoDB Bridge + +[Dynamodb](https://aws.amazon.com/dynamodb/) is a high-performance NoSQL database +service provided by Amazon that's designed for scalability and low-latency access +to structured data. + +It's often used in applications that require fast and reliable access to data, +such as mobile, ad tech, and IoT. + +The application is used to connect EMQX and DynamoDB. +User can create a rule and easily ingest IoT data into DynamoDB by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into DynamoDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-dynamo.html) + for how to use EMQX dashboard to ingest IoT data into DynamoDB. + +- Refer to [Rules engine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_gcp_pubsub/BSL.txt b/apps/emqx_bridge_gcp_pubsub/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_gcp_pubsub/README.md b/apps/emqx_bridge_gcp_pubsub/README.md new file mode 100644 index 000000000..2e876ef0f --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/README.md @@ -0,0 +1,35 @@ +# EMQX GCP Pub/Sub Bridge + +[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) is a messaging service provided by Google Cloud Platform (GCP). + +The application is used to connect EMQX and GCP Pub/Sub. +User can create a rule and easily ingest IoT data into GCP Pub/Sub by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into GCP Pub/Sub](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-gcp-pubsub.html) + for how to use EMQX dashboard to ingest IoT data into GCP Pub/Sub. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_hstreamdb/BSL.txt b/apps/emqx_bridge_hstreamdb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_hstreamdb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_hstreamdb/README.md b/apps/emqx_bridge_hstreamdb/README.md new file mode 100644 index 000000000..0201fa087 --- /dev/null +++ b/apps/emqx_bridge_hstreamdb/README.md @@ -0,0 +1,38 @@ +# EMQX HStreamDB Bridge + +[HStreamDB](https://hstream.io/)is streaming database purpose-built to ingest, +store, process, and analyze massive data streams. It is a modern data infrastructure +that unifies messaging, stream processing, and storage to help get value out of +your data in real-time. + +The application is used to connect EMQX and HStreamDB. +User can create a rule and easily ingest IoT data into HStreamDB by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into HStreamDB](todo) + for how to use EMQX dashboard to ingest IoT data into HStreamDB. + +- Refer to [Rules engine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_influxdb/BSL.txt b/apps/emqx_bridge_influxdb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_influxdb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_influxdb/README.md b/apps/emqx_bridge_influxdb/README.md new file mode 100644 index 000000000..e9ad733f9 --- /dev/null +++ b/apps/emqx_bridge_influxdb/README.md @@ -0,0 +1,49 @@ +# EMQX InfluxDB Bridge + +[InfluxDB](https://github.com/influxdata/influxdb) is an open-source time-series +database that is optimized for storing, retrieving, and querying large volumes of +time-stamped data. +It is commonly used for monitoring and analysis of metrics, events, and real-time +analytics. +InfluxDB is designed to be fast, efficient, and scalable, and it has a SQL-like +query language that makes it easy to extract insights from time-series data. + +The application is used to connect EMQX and InfluxDB. User can create a rule and +easily ingest IoT data into InfluxDB by leveraging the +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into InfluxDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-influxdb.html) + for how to use EMQX dashboard to ingest IoT data into InfluxDB. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + +- [Create bridge API doc](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges/paths/~1bridges/post) + list required parameters for creating a InfluxDB bridge. + There are two types of InfluxDB API (`v1` and `v2`), please select the right + version of InfluxDB. Below are several important parameters for `v1`, + - `server`: The IPv4 or IPv6 address or the hostname to connect to. + - `database`: InfluxDB database name + - `write_syntax`: Conf of InfluxDB line protocol to write data points. It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point, and placeholder supported. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). + diff --git a/apps/emqx_bridge_matrix/BSL.txt b/apps/emqx_bridge_matrix/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_matrix/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_matrix/README.md b/apps/emqx_bridge_matrix/README.md new file mode 100644 index 000000000..c0098e3f6 --- /dev/null +++ b/apps/emqx_bridge_matrix/README.md @@ -0,0 +1,36 @@ +# EMQX MatrixDB Bridge + +[MatrixDB](http://matrixdb.univ-lyon1.fr/) is a biological database focused on +molecular interactions between extracellular proteins and polysaccharides. + +The application is used to connect EMQX and MatrixDB. +User can create a rule and easily ingest IoT data into MatrixDB by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into MatrixDB](todo) + for how to use EMQX dashboard to ingest IoT data into MatrixDB. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_mongodb/BSL.txt b/apps/emqx_bridge_mongodb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_mongodb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_mongodb/README.md b/apps/emqx_bridge_mongodb/README.md new file mode 100644 index 000000000..4608b2e84 --- /dev/null +++ b/apps/emqx_bridge_mongodb/README.md @@ -0,0 +1,39 @@ +# EMQX MongoDB Bridge + +[MongoDB](https://github.com/mongodb/mongo) is a source-available cross-platform +document-oriented database. It is a NoSQL database that stores flexible JSON-like +documents for faster iteration and better data organization. +It provides high availability and scaling with its built-in replication and sharding +features, and is used in a variety of industries + +The application is used to connect EMQX and MongoDB. +User can create a rule and easily ingest IoT data into MongoDB by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into MongoDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-mongodb.html) + for how to use EMQX dashboard to ingest IoT data into MongoDB. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_mysql/BSL.txt b/apps/emqx_bridge_mysql/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_mysql/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_mysql/README.md b/apps/emqx_bridge_mysql/README.md new file mode 100644 index 000000000..11d854d2d --- /dev/null +++ b/apps/emqx_bridge_mysql/README.md @@ -0,0 +1,36 @@ +# EMQX MySQL Bridge + +[MySQL](https://github.com/MySQL/MySQL) is a popular open-source relational database +management system. + +The application is used to connect EMQX and MySQL. +User can create a rule and easily ingest IoT data into MySQL by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into MySQL](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-mysql.html) + for how to use EMQX dashboard to ingest IoT data into MySQL. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_pgsql/BSL.txt b/apps/emqx_bridge_pgsql/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_pgsql/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_pgsql/README.md b/apps/emqx_bridge_pgsql/README.md new file mode 100644 index 000000000..3cc33e8ed --- /dev/null +++ b/apps/emqx_bridge_pgsql/README.md @@ -0,0 +1,38 @@ +# EMQX PostgreSQL Bridge + +[PostgreSQL](https://github.com/PostgreSQL/PostgreSQL) is an open-source relational +database management system (RDBMS) that uses and extends the SQL language. +It is known for its reliability, data integrity, and advanced features such as +support for JSON, XML, and other data formats. + +The application is used to connect EMQX and PostgreSQL. +User can create a rule and easily ingest IoT data into PostgreSQL by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into PostgreSQL](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-pgsql.html) + for how to use EMQX dashboard to ingest IoT data into PostgreSQL. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_redis/BSL.txt b/apps/emqx_bridge_redis/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_redis/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_redis/README.md b/apps/emqx_bridge_redis/README.md new file mode 100644 index 000000000..6a2f4a46e --- /dev/null +++ b/apps/emqx_bridge_redis/README.md @@ -0,0 +1,37 @@ +# EMQX Redis Bridge + +[Redis](https://github.com/redis/redis) is an in-memory data structure store, +used as a distributed, in-memory key–value database, cache and message broker, +with optional durability. + +The application is used to connect EMQX and Redis. +User can create a rule and easily ingest IoT data into Redis by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into Redis](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-redis.html) + for how to use EMQX dashboard to ingest IoT data into Redis. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_rocketmq/BSL.txt b/apps/emqx_bridge_rocketmq/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_rocketmq/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_rocketmq/README.md b/apps/emqx_bridge_rocketmq/README.md new file mode 100644 index 000000000..b2337fcef --- /dev/null +++ b/apps/emqx_bridge_rocketmq/README.md @@ -0,0 +1,37 @@ +# EMQX RocketMQ Bridge + +[RocketMQ](https://github.com/apache/rocketmq) is a distributed messaging and +streaming platform developed by the Apache Software Foundation. +It provides reliable, scalable, and high-throughput messaging services for modern cloud-native applications + +The application is used to connect EMQX and RocketMQ. +User can create a rule and easily ingest IoT data into RocketMQ by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into RocketMQ](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-rocketmq.html) + for how to use EMQX dashboard to ingest IoT data into RocketMQ. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_tdengine/BSL.txt b/apps/emqx_bridge_tdengine/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_tdengine/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_tdengine/README.md b/apps/emqx_bridge_tdengine/README.md new file mode 100644 index 000000000..65c3d415c --- /dev/null +++ b/apps/emqx_bridge_tdengine/README.md @@ -0,0 +1,39 @@ +# EMQX TDEngine Bridge + +[TDEngine](https://github.com/taosdata/TDengine) is an open-source, cloud-native +time series database (TSDB) optimized for Internet of Things (IoT), Connected Cars, +and Industrial IoT. +It enables efficient, real-time ingestion, processing, and monitoring of petabytes +of data per day, generated by billions of sensors and data collectors. + +The application is used to connect EMQX and TDEngine. +User can create a rule and easily ingest IoT data into TDEngine by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into TDEngine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-tdengine.html) + for how to use EMQX dashboard to ingest IoT data into TDEngine. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_timescale/BSL.txt b/apps/emqx_bridge_timescale/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_timescale/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_timescale/README.md b/apps/emqx_bridge_timescale/README.md new file mode 100644 index 000000000..c8bcde173 --- /dev/null +++ b/apps/emqx_bridge_timescale/README.md @@ -0,0 +1,38 @@ +# EMQX TimescaleDB Bridge + +[TimescaleDB](https://github.com/timescaleDB/timescaleDB) is an open-source database +designed to make SQL scalable for time-series data. +It is engineered up from PostgreSQL and packaged as a PostgreSQL extension, +providing automatic partitioning across time and space (partitioning key), as well as full SQL support. + +The application is used to connect EMQX and TimescaleDB. +User can create a rule and easily ingest IoT data into TimescaleDB by leveraging +the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documention + +- Refer to [Ingest data into TimescaleDB](todo) + for how to use EMQX dashboard to ingest IoT data into TimescaleDB. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). From ec8d8b805fa3d0efd2bbf030a236ea231577976c Mon Sep 17 00:00:00 2001 From: JianBo He Date: Sun, 16 Apr 2023 14:32:24 +0800 Subject: [PATCH 028/263] chore: add ee bridge apps --- .../src/emqx_bridge_cassandra.app.src | 9 +++++++++ .../src/emqx_bridge_clickhouse.app.src | 9 +++++++++ apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src | 9 +++++++++ .../src/emqx_bridge_gcp_pubsub.app.src | 9 +++++++++ .../src/emqx_bridge_hstreamdb.app.src | 9 +++++++++ .../src/emqx_bridge_influxdb.app.src | 9 +++++++++ apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src | 9 +++++++++ apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src | 9 +++++++++ apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src | 9 +++++++++ apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src | 9 +++++++++ apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src | 9 +++++++++ .../src/emqx_bridge_rocketmq.app.src | 9 +++++++++ .../src/emqx_bridge_tdengine.app.src | 9 +++++++++ .../src/emqx_bridge_timescale.app.src | 9 +++++++++ 14 files changed, 126 insertions(+) create mode 100644 apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src create mode 100644 apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src create mode 100644 apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src create mode 100644 apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src create mode 100644 apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src create mode 100644 apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src create mode 100644 apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src create mode 100644 apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src create mode 100644 apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src create mode 100644 apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src create mode 100644 apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src create mode 100644 apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src create mode 100644 apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src create mode 100644 apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src new file mode 100644 index 000000000..1ed65ea9f --- /dev/null +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_cassandra, [ + {description, "EMQX Enterprise Cassandra Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src new file mode 100644 index 000000000..a0b409d5b --- /dev/null +++ b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_clickhouse, [ + {description, "EMQX Enterprise ClickHouse Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src new file mode 100644 index 000000000..51c717220 --- /dev/null +++ b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_dynamo, [ + {description, "EMQX Enterprise Dynamo Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src new file mode 100644 index 000000000..0e1427888 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_gcp_pubsub, [ + {description, "EMQX Enterprise GCP Pub/Sub Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src new file mode 100644 index 000000000..1cb3742b3 --- /dev/null +++ b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_hstreamdb, [ + {description, "EMQX Enterprise HStreamDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src new file mode 100644 index 000000000..5443417c3 --- /dev/null +++ b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_influxdb, [ + {description, "EMQX Enterprise InfluxDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src new file mode 100644 index 000000000..e2a17e070 --- /dev/null +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_matrix, [ + {description, "EMQX Enterprise MatrixDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src new file mode 100644 index 000000000..008a9e164 --- /dev/null +++ b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_mongodb, [ + {description, "EMQX Enterprise MongoDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src b/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src new file mode 100644 index 000000000..2e36587a7 --- /dev/null +++ b/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_mysql, [ + {description, "EMQX Enterprise MySQL Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src new file mode 100644 index 000000000..c695283f3 --- /dev/null +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_pgsql, [ + {description, "EMQX Enterprise PostgreSQL Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src b/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src new file mode 100644 index 000000000..6b57c6cd7 --- /dev/null +++ b/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_redis, [ + {description, "EMQX Enterprise Redis Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src new file mode 100644 index 000000000..202bb38f2 --- /dev/null +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_rocketmq, [ + {description, "EMQX Enterprise RcoketMQ Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src new file mode 100644 index 000000000..05e8a6f9f --- /dev/null +++ b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_tdengine, [ + {description, "EMQX Enterprise TDEngine Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src new file mode 100644 index 000000000..5b4431f73 --- /dev/null +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_timescale, [ + {description, "EMQX Enterprise TimescaleDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. From 862817408af0f7741aa3504c3b63db0a257d4be0 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Mon, 17 Apr 2023 14:37:25 +0800 Subject: [PATCH 029/263] chore: ignore no suites tests --- Makefile | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index fe75b01bc..0403a07e9 100644 --- a/Makefile +++ b/Makefile @@ -89,12 +89,17 @@ APPS=$(shell $(SCRIPTS)/find-apps.sh) .PHONY: $(APPS:%=%-ct) define gen-app-ct-target $1-ct: $(REBAR) - @$(SCRIPTS)/pre-compile.sh $(PROFILE) - @ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \ - --readable=$(CT_READABLE) \ - --name $(CT_NODE_NAME) \ - --cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \ - --suite $(shell $(SCRIPTS)/find-suites.sh $1) + $(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1)) +ifneq ($(SUITES),) + @$(SCRIPTS)/pre-compile.sh $(PROFILE) + @ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \ + --readable=$(CT_READABLE) \ + --name $(CT_NODE_NAME) \ + --cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \ + --suite $(SUITES) +else + @echo 'No suites found for $1' +endif endef $(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app)))) From 8cb9389d215de0f69a2b5a1fa4c165f25668aa13 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Tue, 18 Apr 2023 09:32:35 +0800 Subject: [PATCH 030/263] chore: apply suggestions from code review Co-authored-by: Thales Macedo Garitezi --- apps/emqx_bridge_cassandra/README.md | 2 +- apps/emqx_bridge_gcp_pubsub/README.md | 2 +- apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src | 2 +- apps/emqx_bridge_timescale/README.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/emqx_bridge_cassandra/README.md b/apps/emqx_bridge_cassandra/README.md index fbf57660d..30dd9b6d5 100644 --- a/apps/emqx_bridge_cassandra/README.md +++ b/apps/emqx_bridge_cassandra/README.md @@ -12,7 +12,7 @@ and easily ingest IoT data into Cassandra by leveraging the [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into Cassandra](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-cassa.html) for how to use EMQX dashboard to ingest IoT data into Cassandra. diff --git a/apps/emqx_bridge_gcp_pubsub/README.md b/apps/emqx_bridge_gcp_pubsub/README.md index 2e876ef0f..aa62ca0db 100644 --- a/apps/emqx_bridge_gcp_pubsub/README.md +++ b/apps/emqx_bridge_gcp_pubsub/README.md @@ -4,7 +4,7 @@ The application is used to connect EMQX and GCP Pub/Sub. User can create a rule and easily ingest IoT data into GCP Pub/Sub by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +the [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). # Documention diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src index 202bb38f2..e1916034c 100644 --- a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src @@ -1,5 +1,5 @@ {application, emqx_bridge_rocketmq, [ - {description, "EMQX Enterprise RcoketMQ Bridge"}, + {description, "EMQX Enterprise RocketMQ Bridge"}, {vsn, "0.1.0"}, {registered, []}, {applications, [kernel, stdlib]}, diff --git a/apps/emqx_bridge_timescale/README.md b/apps/emqx_bridge_timescale/README.md index c8bcde173..ac631c003 100644 --- a/apps/emqx_bridge_timescale/README.md +++ b/apps/emqx_bridge_timescale/README.md @@ -12,7 +12,7 @@ the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/ # Documention -- Refer to [Ingest data into TimescaleDB](todo) +- Refer to [Ingest data into TimescaleDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-timescaledb.html) for how to use EMQX dashboard to ingest IoT data into TimescaleDB. - Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) From acb30c79573c9907acf1f161d015668d9baa7cbb Mon Sep 17 00:00:00 2001 From: zhongwencool Date: Tue, 18 Apr 2023 10:47:07 +0800 Subject: [PATCH 031/263] chore: update changes/ce/feat-10389.en.md Co-authored-by: JianBo He --- changes/ce/feat-10389.en.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/changes/ce/feat-10389.en.md b/changes/ce/feat-10389.en.md index d6b8d236f..b656f5362 100644 --- a/changes/ce/feat-10389.en.md +++ b/changes/ce/feat-10389.en.md @@ -1,2 +1,2 @@ -Now `cluster.core_nodes` and `cluster.statics.seeds` are specified in the same way. -Configuring them as `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` and `"emqx1@127.0.0.1,emqx2@127.0.0.1"` has the same effect. +Unify the config formats for `cluster.core_nodes` and `cluster.statics.seeds`. +Now they both support formats in arrays `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` or semicolon-separated strings `"emqx1@127.0.0.1,emqx2@127.0.0.1"`. From 77d300482cdf5144e3fa2db67a0ea45e412a1a43 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Tue, 18 Apr 2023 10:49:51 +0800 Subject: [PATCH 032/263] chore: update changes/ce/feat-10389.en.md --- changes/ce/feat-10389.en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changes/ce/feat-10389.en.md b/changes/ce/feat-10389.en.md index b656f5362..3789e80ae 100644 --- a/changes/ce/feat-10389.en.md +++ b/changes/ce/feat-10389.en.md @@ -1,2 +1,2 @@ Unify the config formats for `cluster.core_nodes` and `cluster.statics.seeds`. -Now they both support formats in arrays `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` or semicolon-separated strings `"emqx1@127.0.0.1,emqx2@127.0.0.1"`. +Now they both support formats in array `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` or semicolon-separated string `"emqx1@127.0.0.1,emqx2@127.0.0.1"`. From c53ccfea61e6a6e17b33f9b23aef9489d2932dcf Mon Sep 17 00:00:00 2001 From: JianBo He Date: Tue, 18 Apr 2023 09:36:14 +0800 Subject: [PATCH 033/263] chore: fix typos --- apps/emqx_bridge_cassandra/README.md | 2 +- apps/emqx_bridge_clickhouse/README.md | 6 +++--- apps/emqx_bridge_dynamo/README.md | 4 ++-- apps/emqx_bridge_gcp_pubsub/README.md | 7 ++++--- apps/emqx_bridge_hstreamdb/README.md | 8 ++++---- apps/emqx_bridge_influxdb/README.md | 4 ++-- apps/emqx_bridge_matrix/README.md | 4 ++-- apps/emqx_bridge_mongodb/README.md | 4 ++-- apps/emqx_bridge_mysql/README.md | 4 ++-- apps/emqx_bridge_pgsql/README.md | 4 ++-- apps/emqx_bridge_redis/README.md | 4 ++-- apps/emqx_bridge_rocketmq/README.md | 4 ++-- apps/emqx_bridge_tdengine/README.md | 4 ++-- apps/emqx_bridge_timescale/README.md | 4 ++-- apps/emqx_machine/README.md | 2 +- 15 files changed, 33 insertions(+), 32 deletions(-) diff --git a/apps/emqx_bridge_cassandra/README.md b/apps/emqx_bridge_cassandra/README.md index 30dd9b6d5..d26bd2fbb 100644 --- a/apps/emqx_bridge_cassandra/README.md +++ b/apps/emqx_bridge_cassandra/README.md @@ -8,7 +8,7 @@ It is commonly used in web and mobile applications, IoT, and other systems that require storing, querying, and analyzing large amounts of data. The application is used to connect EMQX and Cassandra. User can create a rule -and easily ingest IoT data into Cassandra by leveraging the +and easily ingest IoT data into Cassandra by leveraging [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). diff --git a/apps/emqx_bridge_clickhouse/README.md b/apps/emqx_bridge_clickhouse/README.md index fb61fea9c..ff870e87d 100644 --- a/apps/emqx_bridge_clickhouse/README.md +++ b/apps/emqx_bridge_clickhouse/README.md @@ -1,15 +1,15 @@ # EMQX ClickHouse Bridge -[ClickHouse](https://github.com/ClickHouse/ClickHouse)is an open-source, column-based +[ClickHouse](https://github.com/ClickHouse/ClickHouse) is an open-source, column-based database management system. It is designed for real-time processing of large volumes of data and is known for its high performance and scalability. The application is used to connect EMQX and ClickHouse. User can create a rule and easily ingest IoT data into ClickHouse by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into ClickHouse](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-clickhouse.html) for how to use EMQX dashboard to ingest IoT data into ClickHouse. diff --git a/apps/emqx_bridge_dynamo/README.md b/apps/emqx_bridge_dynamo/README.md index deb9d1879..48dcb781d 100644 --- a/apps/emqx_bridge_dynamo/README.md +++ b/apps/emqx_bridge_dynamo/README.md @@ -9,10 +9,10 @@ such as mobile, ad tech, and IoT. The application is used to connect EMQX and DynamoDB. User can create a rule and easily ingest IoT data into DynamoDB by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into DynamoDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-dynamo.html) for how to use EMQX dashboard to ingest IoT data into DynamoDB. diff --git a/apps/emqx_bridge_gcp_pubsub/README.md b/apps/emqx_bridge_gcp_pubsub/README.md index aa62ca0db..e33c5ab15 100644 --- a/apps/emqx_bridge_gcp_pubsub/README.md +++ b/apps/emqx_bridge_gcp_pubsub/README.md @@ -1,13 +1,14 @@ # EMQX GCP Pub/Sub Bridge -[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) is a messaging service provided by Google Cloud Platform (GCP). +[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) is a messaging service +provided by Google Cloud Platform (GCP). The application is used to connect EMQX and GCP Pub/Sub. User can create a rule and easily ingest IoT data into GCP Pub/Sub by leveraging -the [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into GCP Pub/Sub](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-gcp-pubsub.html) for how to use EMQX dashboard to ingest IoT data into GCP Pub/Sub. diff --git a/apps/emqx_bridge_hstreamdb/README.md b/apps/emqx_bridge_hstreamdb/README.md index 0201fa087..3a7c6b49d 100644 --- a/apps/emqx_bridge_hstreamdb/README.md +++ b/apps/emqx_bridge_hstreamdb/README.md @@ -1,21 +1,21 @@ # EMQX HStreamDB Bridge -[HStreamDB](https://hstream.io/)is streaming database purpose-built to ingest, +[HStreamDB](https://hstream.io/) is streaming database purpose-built to ingest, store, process, and analyze massive data streams. It is a modern data infrastructure that unifies messaging, stream processing, and storage to help get value out of your data in real-time. The application is used to connect EMQX and HStreamDB. User can create a rule and easily ingest IoT data into HStreamDB by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into HStreamDB](todo) for how to use EMQX dashboard to ingest IoT data into HStreamDB. -- Refer to [Rules engine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) for the EMQX rules engine introduction. diff --git a/apps/emqx_bridge_influxdb/README.md b/apps/emqx_bridge_influxdb/README.md index e9ad733f9..fe0f14600 100644 --- a/apps/emqx_bridge_influxdb/README.md +++ b/apps/emqx_bridge_influxdb/README.md @@ -9,11 +9,11 @@ InfluxDB is designed to be fast, efficient, and scalable, and it has a SQL-like query language that makes it easy to extract insights from time-series data. The application is used to connect EMQX and InfluxDB. User can create a rule and -easily ingest IoT data into InfluxDB by leveraging the +easily ingest IoT data into InfluxDB by leveraging [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into InfluxDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-influxdb.html) for how to use EMQX dashboard to ingest IoT data into InfluxDB. diff --git a/apps/emqx_bridge_matrix/README.md b/apps/emqx_bridge_matrix/README.md index c0098e3f6..976120ffe 100644 --- a/apps/emqx_bridge_matrix/README.md +++ b/apps/emqx_bridge_matrix/README.md @@ -5,10 +5,10 @@ molecular interactions between extracellular proteins and polysaccharides. The application is used to connect EMQX and MatrixDB. User can create a rule and easily ingest IoT data into MatrixDB by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into MatrixDB](todo) for how to use EMQX dashboard to ingest IoT data into MatrixDB. diff --git a/apps/emqx_bridge_mongodb/README.md b/apps/emqx_bridge_mongodb/README.md index 4608b2e84..088c8467f 100644 --- a/apps/emqx_bridge_mongodb/README.md +++ b/apps/emqx_bridge_mongodb/README.md @@ -8,10 +8,10 @@ features, and is used in a variety of industries The application is used to connect EMQX and MongoDB. User can create a rule and easily ingest IoT data into MongoDB by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into MongoDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-mongodb.html) for how to use EMQX dashboard to ingest IoT data into MongoDB. diff --git a/apps/emqx_bridge_mysql/README.md b/apps/emqx_bridge_mysql/README.md index 11d854d2d..73f6987b6 100644 --- a/apps/emqx_bridge_mysql/README.md +++ b/apps/emqx_bridge_mysql/README.md @@ -5,10 +5,10 @@ management system. The application is used to connect EMQX and MySQL. User can create a rule and easily ingest IoT data into MySQL by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into MySQL](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-mysql.html) for how to use EMQX dashboard to ingest IoT data into MySQL. diff --git a/apps/emqx_bridge_pgsql/README.md b/apps/emqx_bridge_pgsql/README.md index 3cc33e8ed..fc0bd3c6f 100644 --- a/apps/emqx_bridge_pgsql/README.md +++ b/apps/emqx_bridge_pgsql/README.md @@ -7,10 +7,10 @@ support for JSON, XML, and other data formats. The application is used to connect EMQX and PostgreSQL. User can create a rule and easily ingest IoT data into PostgreSQL by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into PostgreSQL](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-pgsql.html) for how to use EMQX dashboard to ingest IoT data into PostgreSQL. diff --git a/apps/emqx_bridge_redis/README.md b/apps/emqx_bridge_redis/README.md index 6a2f4a46e..73ec41f07 100644 --- a/apps/emqx_bridge_redis/README.md +++ b/apps/emqx_bridge_redis/README.md @@ -6,10 +6,10 @@ with optional durability. The application is used to connect EMQX and Redis. User can create a rule and easily ingest IoT data into Redis by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into Redis](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-redis.html) for how to use EMQX dashboard to ingest IoT data into Redis. diff --git a/apps/emqx_bridge_rocketmq/README.md b/apps/emqx_bridge_rocketmq/README.md index b2337fcef..252e6beac 100644 --- a/apps/emqx_bridge_rocketmq/README.md +++ b/apps/emqx_bridge_rocketmq/README.md @@ -6,10 +6,10 @@ It provides reliable, scalable, and high-throughput messaging services for moder The application is used to connect EMQX and RocketMQ. User can create a rule and easily ingest IoT data into RocketMQ by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into RocketMQ](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-rocketmq.html) for how to use EMQX dashboard to ingest IoT data into RocketMQ. diff --git a/apps/emqx_bridge_tdengine/README.md b/apps/emqx_bridge_tdengine/README.md index 65c3d415c..25faf4c14 100644 --- a/apps/emqx_bridge_tdengine/README.md +++ b/apps/emqx_bridge_tdengine/README.md @@ -8,10 +8,10 @@ of data per day, generated by billions of sensors and data collectors. The application is used to connect EMQX and TDEngine. User can create a rule and easily ingest IoT data into TDEngine by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into TDEngine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-tdengine.html) for how to use EMQX dashboard to ingest IoT data into TDEngine. diff --git a/apps/emqx_bridge_timescale/README.md b/apps/emqx_bridge_timescale/README.md index ac631c003..96f70f847 100644 --- a/apps/emqx_bridge_timescale/README.md +++ b/apps/emqx_bridge_timescale/README.md @@ -7,10 +7,10 @@ providing automatic partitioning across time and space (partitioning key), as we The application is used to connect EMQX and TimescaleDB. User can create a rule and easily ingest IoT data into TimescaleDB by leveraging -the EMQX [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). -# Documention +# Documentation - Refer to [Ingest data into TimescaleDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-timescaledb.html) for how to use EMQX dashboard to ingest IoT data into TimescaleDB. diff --git a/apps/emqx_machine/README.md b/apps/emqx_machine/README.md index 8c2bb6391..a8221ba73 100644 --- a/apps/emqx_machine/README.md +++ b/apps/emqx_machine/README.md @@ -31,7 +31,7 @@ It helps to shut down EMQX broker gracefully when it receives `SIGTERM` signal. Currently `emqx_machine` boots the business apps before starting autocluster, so a fresh node joining the cluster actually starts business application twice: first in the singleton mode, and then in clustered mode. -# Documention links +# Documentation links Configuration: [node.global_gc_interval](https://www.emqx.io/docs/en/v5.0/configuration/configuration-manual.html#node-and-cookie) From a947df1ea3312b188ec11d2bce4ccc507e835227 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Tue, 11 Apr 2023 13:59:08 +0200 Subject: [PATCH 034/263] fix(emqx_management): Ignore results from the nodes that are down --- .../emqx_management/src/emqx_mgmt_api_stats.erl | 17 +++++++++++++---- changes/ce/fix-10369.en.md | 6 ++++++ 2 files changed, 19 insertions(+), 4 deletions(-) create mode 100644 changes/ce/fix-10369.en.md diff --git a/apps/emqx_management/src/emqx_mgmt_api_stats.erl b/apps/emqx_management/src/emqx_mgmt_api_stats.erl index 1e752aaac..080a37b4d 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_stats.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_stats.erl @@ -127,10 +127,19 @@ list(get, #{query_string := Qs}) -> true -> {200, emqx_mgmt:get_stats()}; _ -> - Data = [ - maps:from_list(emqx_mgmt:get_stats(Node) ++ [{node, Node}]) - || Node <- running_nodes() - ], + Data = lists:foldl( + fun(Node, Acc) -> + case emqx_mgmt:get_stats(Node) of + {error, _Err} -> + Acc; + Stats when is_list(Stats) -> + Data = maps:from_list([{node, Node} | Stats]), + [Data | Acc] + end + end, + [], + mria:running_nodes() + ), {200, Data} end. diff --git a/changes/ce/fix-10369.en.md b/changes/ce/fix-10369.en.md new file mode 100644 index 000000000..594e91fc9 --- /dev/null +++ b/changes/ce/fix-10369.en.md @@ -0,0 +1,6 @@ +Fix error `/api/v5/monitor_current` API endpoint when some EMQX nodes are down. + +Prior to this fix, sometimes the request returned HTTP code 500 and the following message: +``` +{"code":"INTERNAL_ERROR","message":"error, badarg, [{erlang,'++',[{error,nodedown},[{node,'emqx@10.42.0.150'}]], ... +``` From 282bfee8ff120bee4ad7da501142b2c63438ad3c Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Wed, 12 Apr 2023 10:56:03 +0200 Subject: [PATCH 035/263] feat(emqx): Add an API that returns the list of running nodes --- apps/emqx/src/emqx.erl | 18 ++++++ apps/emqx/test/emqx_SUITE.erl | 8 +++ apps/emqx_management/src/emqx_mgmt.erl | 57 +++++++++++-------- apps/emqx_management/src/emqx_mgmt_api.erl | 2 +- .../src/emqx_mgmt_api_cluster.erl | 2 +- .../src/emqx_mgmt_api_configs.erl | 2 +- .../src/emqx_mgmt_api_listeners.erl | 2 +- .../src/emqx_mgmt_api_metrics.erl | 2 +- .../src/emqx_mgmt_api_stats.erl | 2 +- .../src/emqx_mgmt_api_trace.erl | 10 ++-- apps/emqx_management/test/emqx_mgmt_SUITE.erl | 10 ++-- .../test/emqx_mgmt_api_SUITE.erl | 6 +- .../test/emqx_mgmt_api_configs_SUITE.erl | 4 +- .../test/emqx_mgmt_api_listeners_SUITE.erl | 4 +- .../test/emqx_mgmt_api_stats_SUITE.erl | 2 + apps/emqx_prometheus/src/emqx_prometheus.erl | 4 +- changes/ce/fix-10369.en.md | 2 +- 17 files changed, 86 insertions(+), 51 deletions(-) diff --git a/apps/emqx/src/emqx.erl b/apps/emqx/src/emqx.erl index ef870685a..ffee5fba7 100644 --- a/apps/emqx/src/emqx.erl +++ b/apps/emqx/src/emqx.erl @@ -30,6 +30,12 @@ stop/0 ]). +%% Cluster API +-export([ + cluster_nodes/1, + running_nodes/0 +]). + %% PubSub API -export([ subscribe/1, @@ -102,6 +108,18 @@ is_running() -> _ -> true end. +%%-------------------------------------------------------------------- +%% Cluster API +%%-------------------------------------------------------------------- + +-spec running_nodes() -> [node()]. +running_nodes() -> + mria:running_nodes(). + +-spec cluster_nodes(all | running | cores | stopped) -> [node()]. +cluster_nodes(Type) -> + mria:cluster_nodes(Type). + %%-------------------------------------------------------------------- %% PubSub API %%-------------------------------------------------------------------- diff --git a/apps/emqx/test/emqx_SUITE.erl b/apps/emqx/test/emqx_SUITE.erl index 09d5d8017..64ed2ea19 100644 --- a/apps/emqx/test/emqx_SUITE.erl +++ b/apps/emqx/test/emqx_SUITE.erl @@ -148,6 +148,14 @@ t_run_hook(_) -> ?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)), ?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)). +t_cluster_nodes(_) -> + Expected = [node()], + ?assertEqual(Expected, emqx:running_nodes()), + ?assertEqual(Expected, emqx:cluster_nodes(running)), + ?assertEqual(Expected, emqx:cluster_nodes(all)), + ?assertEqual(Expected, emqx:cluster_nodes(cores)), + ?assertEqual([], emqx:cluster_nodes(stopped)). + %%-------------------------------------------------------------------- %% Hook fun %%-------------------------------------------------------------------- diff --git a/apps/emqx_management/src/emqx_mgmt.erl b/apps/emqx_management/src/emqx_mgmt.erl index 5ba12646f..0b91817f0 100644 --- a/apps/emqx_management/src/emqx_mgmt.erl +++ b/apps/emqx_management/src/emqx_mgmt.erl @@ -112,8 +112,8 @@ %%-------------------------------------------------------------------- list_nodes() -> - Running = mria:cluster_nodes(running), - Stopped = mria:cluster_nodes(stopped), + Running = emqx:cluster_nodes(running), + Stopped = emqx:cluster_nodes(stopped), DownNodes = lists:map(fun stopped_node_info/1, Stopped), [{Node, Info} || #{node := Node} = Info <- node_info(Running)] ++ DownNodes. @@ -199,7 +199,7 @@ vm_stats() -> %%-------------------------------------------------------------------- list_brokers() -> - Running = mria:running_nodes(), + Running = emqx:running_nodes(), [{Node, Broker} || #{node := Node} = Broker <- broker_info(Running)]. lookup_broker(Node) -> @@ -223,7 +223,7 @@ broker_info(Nodes) -> %%-------------------------------------------------------------------- get_metrics() -> - nodes_info_count([get_metrics(Node) || Node <- mria:running_nodes()]). + nodes_info_count([get_metrics(Node) || Node <- emqx:running_nodes()]). get_metrics(Node) -> unwrap_rpc(emqx_proto_v1:get_metrics(Node)). @@ -238,13 +238,20 @@ get_stats() -> 'subscriptions.shared.count', 'subscriptions.shared.max' ], - CountStats = nodes_info_count([ - begin - Stats = get_stats(Node), - delete_keys(Stats, GlobalStatsKeys) - end - || Node <- mria:running_nodes() - ]), + CountStats = nodes_info_count( + lists:foldl( + fun(Node, Acc) -> + case get_stats(Node) of + {error, _} -> + Acc; + Stats -> + [delete_keys(Stats, GlobalStatsKeys) | Acc] + end + end, + [], + emqx:running_nodes() + ) + ), GlobalStats = maps:with(GlobalStatsKeys, maps:from_list(get_stats(node()))), maps:merge(CountStats, GlobalStats). @@ -275,12 +282,12 @@ nodes_info_count(PropList) -> lookup_client({clientid, ClientId}, FormatFun) -> lists:append([ lookup_client(Node, {clientid, ClientId}, FormatFun) - || Node <- mria:running_nodes() + || Node <- emqx:running_nodes() ]); lookup_client({username, Username}, FormatFun) -> lists:append([ lookup_client(Node, {username, Username}, FormatFun) - || Node <- mria:running_nodes() + || Node <- emqx:running_nodes() ]). lookup_client(Node, Key, FormatFun) -> @@ -307,7 +314,7 @@ kickout_client(ClientId) -> [] -> {error, not_found}; _ -> - Results = [kickout_client(Node, ClientId) || Node <- mria:running_nodes()], + Results = [kickout_client(Node, ClientId) || Node <- emqx:running_nodes()], check_results(Results) end. @@ -322,7 +329,7 @@ list_client_subscriptions(ClientId) -> [] -> {error, not_found}; _ -> - Results = [client_subscriptions(Node, ClientId) || Node <- mria:running_nodes()], + Results = [client_subscriptions(Node, ClientId) || Node <- emqx:running_nodes()], Filter = fun ({error, _}) -> @@ -340,18 +347,18 @@ client_subscriptions(Node, ClientId) -> {Node, unwrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId))}. clean_authz_cache(ClientId) -> - Results = [clean_authz_cache(Node, ClientId) || Node <- mria:running_nodes()], + Results = [clean_authz_cache(Node, ClientId) || Node <- emqx:running_nodes()], check_results(Results). clean_authz_cache(Node, ClientId) -> unwrap_rpc(emqx_proto_v1:clean_authz_cache(Node, ClientId)). clean_authz_cache_all() -> - Results = [{Node, clean_authz_cache_all(Node)} || Node <- mria:running_nodes()], + Results = [{Node, clean_authz_cache_all(Node)} || Node <- emqx:running_nodes()], wrap_results(Results). clean_pem_cache_all() -> - Results = [{Node, clean_pem_cache_all(Node)} || Node <- mria:running_nodes()], + Results = [{Node, clean_pem_cache_all(Node)} || Node <- emqx:running_nodes()], wrap_results(Results). wrap_results(Results) -> @@ -379,7 +386,7 @@ set_keepalive(_ClientId, _Interval) -> %% @private call_client(ClientId, Req) -> - Results = [call_client(Node, ClientId, Req) || Node <- mria:running_nodes()], + Results = [call_client(Node, ClientId, Req) || Node <- emqx:running_nodes()], Expected = lists:filter( fun ({error, _}) -> false; @@ -428,7 +435,7 @@ list_subscriptions(Node) -> list_subscriptions_via_topic(Topic, FormatFun) -> lists:append([ list_subscriptions_via_topic(Node, Topic, FormatFun) - || Node <- mria:running_nodes() + || Node <- emqx:running_nodes() ]). list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) -> @@ -442,7 +449,7 @@ list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) -> %%-------------------------------------------------------------------- subscribe(ClientId, TopicTables) -> - subscribe(mria:running_nodes(), ClientId, TopicTables). + subscribe(emqx:running_nodes(), ClientId, TopicTables). subscribe([Node | Nodes], ClientId, TopicTables) -> case unwrap_rpc(emqx_management_proto_v3:subscribe(Node, ClientId, TopicTables)) of @@ -467,7 +474,7 @@ publish(Msg) -> -spec unsubscribe(emqx_types:clientid(), emqx_types:topic()) -> {unsubscribe, _} | {error, channel_not_found}. unsubscribe(ClientId, Topic) -> - unsubscribe(mria:running_nodes(), ClientId, Topic). + unsubscribe(emqx:running_nodes(), ClientId, Topic). -spec unsubscribe([node()], emqx_types:clientid(), emqx_types:topic()) -> {unsubscribe, _} | {error, channel_not_found}. @@ -490,7 +497,7 @@ do_unsubscribe(ClientId, Topic) -> -spec unsubscribe_batch(emqx_types:clientid(), [emqx_types:topic()]) -> {unsubscribe, _} | {error, channel_not_found}. unsubscribe_batch(ClientId, Topics) -> - unsubscribe_batch(mria:running_nodes(), ClientId, Topics). + unsubscribe_batch(emqx:running_nodes(), ClientId, Topics). -spec unsubscribe_batch([node()], emqx_types:clientid(), [emqx_types:topic()]) -> {unsubscribe_batch, _} | {error, channel_not_found}. @@ -515,7 +522,7 @@ do_unsubscribe_batch(ClientId, Topics) -> %%-------------------------------------------------------------------- get_alarms(Type) -> - [{Node, get_alarms(Node, Type)} || Node <- mria:running_nodes()]. + [{Node, get_alarms(Node, Type)} || Node <- emqx:running_nodes()]. get_alarms(Node, Type) -> add_duration_field(unwrap_rpc(emqx_proto_v1:get_alarms(Node, Type))). @@ -524,7 +531,7 @@ deactivate(Node, Name) -> unwrap_rpc(emqx_proto_v1:deactivate_alarm(Node, Name)). delete_all_deactivated_alarms() -> - [delete_all_deactivated_alarms(Node) || Node <- mria:running_nodes()]. + [delete_all_deactivated_alarms(Node) || Node <- emqx:running_nodes()]. delete_all_deactivated_alarms(Node) -> unwrap_rpc(emqx_proto_v1:delete_all_deactivated_alarms(Node)). diff --git a/apps/emqx_management/src/emqx_mgmt_api.erl b/apps/emqx_management/src/emqx_mgmt_api.erl index c77752f7d..8365b983c 100644 --- a/apps/emqx_management/src/emqx_mgmt_api.erl +++ b/apps/emqx_management/src/emqx_mgmt_api.erl @@ -163,7 +163,7 @@ cluster_query(Tab, QString, QSchema, MsFun, FmtFun) -> {error, page_limit_invalid}; Meta -> {_CodCnt, NQString} = parse_qstring(QString, QSchema), - Nodes = mria:running_nodes(), + Nodes = emqx:running_nodes(), ResultAcc = init_query_result(), QueryState = init_query_state(Tab, NQString, MsFun, Meta), NResultAcc = do_cluster_query( diff --git a/apps/emqx_management/src/emqx_mgmt_api_cluster.erl b/apps/emqx_management/src/emqx_mgmt_api_cluster.erl index 68bb6c81d..e74b6c362 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_cluster.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_cluster.erl @@ -101,7 +101,7 @@ cluster_info(get, _) -> ClusterName = application:get_env(ekka, cluster_name, emqxcl), Info = #{ name => ClusterName, - nodes => mria:running_nodes(), + nodes => emqx:running_nodes(), self => node() }, {200, Info}. diff --git a/apps/emqx_management/src/emqx_mgmt_api_configs.erl b/apps/emqx_management/src/emqx_mgmt_api_configs.erl index c94dc17b6..0efaace20 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_configs.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_configs.erl @@ -260,7 +260,7 @@ configs(get, Params, _Req) -> QS = maps:get(query_string, Params, #{}), Node = maps:get(<<"node">>, QS, node()), case - lists:member(Node, mria:running_nodes()) andalso + lists:member(Node, emqx:running_nodes()) andalso emqx_management_proto_v2:get_full_config(Node) of false -> diff --git a/apps/emqx_management/src/emqx_mgmt_api_listeners.erl b/apps/emqx_management/src/emqx_mgmt_api_listeners.erl index d7f3ff321..de86700ef 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_listeners.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_listeners.erl @@ -483,7 +483,7 @@ err_msg_str(Reason) -> io_lib:format("~p", [Reason]). list_listeners() -> - [list_listeners(Node) || Node <- mria:running_nodes()]. + [list_listeners(Node) || Node <- emqx:running_nodes()]. list_listeners(Node) -> wrap_rpc(emqx_management_proto_v2:list_listeners(Node)). diff --git a/apps/emqx_management/src/emqx_mgmt_api_metrics.erl b/apps/emqx_management/src/emqx_mgmt_api_metrics.erl index 1c5c8f62a..0fcc45d8e 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_metrics.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_metrics.erl @@ -59,7 +59,7 @@ metrics(get, #{query_string := Qs}) -> maps:from_list( emqx_mgmt:get_metrics(Node) ++ [{node, Node}] ) - || Node <- mria:running_nodes() + || Node <- emqx:running_nodes() ], {200, Data} end. diff --git a/apps/emqx_management/src/emqx_mgmt_api_stats.erl b/apps/emqx_management/src/emqx_mgmt_api_stats.erl index 080a37b4d..19857f267 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_stats.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_stats.erl @@ -138,7 +138,7 @@ list(get, #{query_string := Qs}) -> end end, [], - mria:running_nodes() + emqx:running_nodes() ), {200, Data} end. diff --git a/apps/emqx_management/src/emqx_mgmt_api_trace.erl b/apps/emqx_management/src/emqx_mgmt_api_trace.erl index 5df641add..25cc2734f 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_trace.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_trace.erl @@ -390,7 +390,7 @@ trace(get, _Params) -> fun(#{start_at := A}, #{start_at := B}) -> A > B end, emqx_trace:format(List0) ), - Nodes = mria:running_nodes(), + Nodes = emqx:running_nodes(), TraceSize = wrap_rpc(emqx_mgmt_trace_proto_v2:get_trace_size(Nodes)), AllFileSize = lists:foldl(fun(F, Acc) -> maps:merge(Acc, F) end, #{}, TraceSize), Now = erlang:system_time(second), @@ -464,7 +464,7 @@ format_trace(Trace0) -> LogSize = lists:foldl( fun(Node, Acc) -> Acc#{Node => 0} end, #{}, - mria:running_nodes() + emqx:running_nodes() ), Trace2 = maps:without([enable, filter], Trace1), Trace2#{ @@ -560,13 +560,13 @@ group_trace_file(ZipDir, TraceLog, TraceFiles) -> ). collect_trace_file(undefined, TraceLog) -> - Nodes = mria:running_nodes(), + Nodes = emqx:running_nodes(), wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog)); collect_trace_file(Node, TraceLog) -> wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file([Node], TraceLog)). collect_trace_file_detail(TraceLog) -> - Nodes = mria:running_nodes(), + Nodes = emqx:running_nodes(), wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file_detail(Nodes, TraceLog)). wrap_rpc({GoodRes, BadNodes}) -> @@ -696,7 +696,7 @@ parse_node(Query, Default) -> {ok, Default}; {ok, NodeBin} -> Node = binary_to_existing_atom(NodeBin), - true = lists:member(Node, mria:running_nodes()), + true = lists:member(Node, emqx:running_nodes()), {ok, Node} end catch diff --git a/apps/emqx_management/test/emqx_mgmt_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_SUITE.erl index 71b51b67c..3eb37060e 100644 --- a/apps/emqx_management/test/emqx_mgmt_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_SUITE.erl @@ -36,16 +36,16 @@ end_per_suite(_) -> emqx_mgmt_api_test_util:end_suite([emqx_management, emqx_conf]). init_per_testcase(TestCase, Config) -> - meck:expect(mria, running_nodes, 0, [node()]), + meck:expect(emqx, running_nodes, 0, [node()]), emqx_common_test_helpers:init_per_testcase(?MODULE, TestCase, Config). end_per_testcase(TestCase, Config) -> - meck:unload(mria), + meck:unload(emqx), emqx_common_test_helpers:end_per_testcase(?MODULE, TestCase, Config). t_list_nodes(init, Config) -> meck:expect( - mria, + emqx, cluster_nodes, fun (running) -> [node()]; @@ -125,7 +125,7 @@ t_lookup_client(_Config) -> emqx_mgmt:lookup_client({username, <<"user1">>}, ?FORMATFUN) ), ?assertEqual([], emqx_mgmt:lookup_client({clientid, <<"notfound">>}, ?FORMATFUN)), - meck:expect(mria, running_nodes, 0, [node(), 'fake@nonode']), + meck:expect(emqx, running_nodes, 0, [node(), 'fake@nonode']), ?assertMatch( [_ | {error, nodedown}], emqx_mgmt:lookup_client({clientid, <<"client1">>}, ?FORMATFUN) ). @@ -188,7 +188,7 @@ t_clean_cache(_Config) -> {error, _}, emqx_mgmt:clean_pem_cache_all() ), - meck:expect(mria, running_nodes, 0, [node(), 'fake@nonode']), + meck:expect(emqx, running_nodes, 0, [node(), 'fake@nonode']), ?assertMatch( {error, [{'fake@nonode', {error, _}}]}, emqx_mgmt:clean_authz_cache_all() diff --git a/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl index bacec718d..a53ffc9c4 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl @@ -179,14 +179,14 @@ t_bad_rpc(_) -> ClientLs1 = [start_emqtt_client(node(), I, 1883) || I <- lists:seq(1, 10)], Path = emqx_mgmt_api_test_util:api_path(["clients?limit=2&page=2"]), try - meck:expect(mria, running_nodes, 0, ['fake@nohost']), + meck:expect(emqx, running_nodes, 0, ['fake@nohost']), {error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path), %% good cop, bad cop - meck:expect(mria, running_nodes, 0, [node(), 'fake@nohost']), + meck:expect(emqx, running_nodes, 0, [node(), 'fake@nohost']), {error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path) after _ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1), - meck:unload(mria), + meck:unload(emqx), emqx_mgmt_api_test_util:end_suite() end. diff --git a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl index 1638b815a..5a0116a4d 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl @@ -246,7 +246,7 @@ t_dashboard(_Config) -> t_configs_node({'init', Config}) -> Node = node(), - meck:expect(mria, running_nodes, fun() -> [Node, bad_node, other_node] end), + meck:expect(emqx, running_nodes, fun() -> [Node, bad_node, other_node] end), meck:expect( emqx_management_proto_v2, get_full_config, @@ -258,7 +258,7 @@ t_configs_node({'init', Config}) -> ), Config; t_configs_node({'end', _}) -> - meck:unload([mria, emqx_management_proto_v2]); + meck:unload([emqx, emqx_management_proto_v2]); t_configs_node(_) -> Node = atom_to_list(node()), diff --git a/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl index 62f689a84..33cb66eb2 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl @@ -168,8 +168,8 @@ t_api_listeners_list_not_ready(Config) when is_list(Config) -> L3 = get_tcp_listeners(Node2), Comment = #{ - node1 => rpc:call(Node1, mria, running_nodes, []), - node2 => rpc:call(Node2, mria, running_nodes, []) + node1 => rpc:call(Node1, emqx, running_nodes, []), + node2 => rpc:call(Node2, emqx, running_nodes, []) }, ?assert(length(L1) > length(L2), Comment), diff --git a/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl index 4099426b8..2550bdbe2 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl @@ -24,10 +24,12 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> + meck:expect(emqx, running_nodes, 0, [node(), 'fake@node']), emqx_mgmt_api_test_util:init_suite(), Config. end_per_suite(_) -> + meck:unload(emqx), emqx_mgmt_api_test_util:end_suite(). t_stats_api(_) -> diff --git a/apps/emqx_prometheus/src/emqx_prometheus.erl b/apps/emqx_prometheus/src/emqx_prometheus.erl index 05d9508b6..d999f294e 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus.erl @@ -599,8 +599,8 @@ emqx_cluster() -> ]. emqx_cluster_data() -> - Running = mria:cluster_nodes(running), - Stopped = mria:cluster_nodes(stopped), + Running = emqx:cluster_nodes(running), + Stopped = emqx:cluster_nodes(stopped), [ {nodes_running, length(Running)}, {nodes_stopped, length(Stopped)} diff --git a/changes/ce/fix-10369.en.md b/changes/ce/fix-10369.en.md index 594e91fc9..3c32d33f3 100644 --- a/changes/ce/fix-10369.en.md +++ b/changes/ce/fix-10369.en.md @@ -1,4 +1,4 @@ -Fix error `/api/v5/monitor_current` API endpoint when some EMQX nodes are down. +Fix error in `/api/v5/monitor_current` API endpoint that happens when some EMQX nodes are down. Prior to this fix, sometimes the request returned HTTP code 500 and the following message: ``` From 721125a9f66d6a971a77823b889cc0ff696b598f Mon Sep 17 00:00:00 2001 From: firest Date: Tue, 18 Apr 2023 15:34:42 +0800 Subject: [PATCH 036/263] fix(retainer): mark `flow-control` as non-importance field --- apps/emqx_retainer/src/emqx_retainer_schema.erl | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/apps/emqx_retainer/src/emqx_retainer_schema.erl b/apps/emqx_retainer/src/emqx_retainer_schema.erl index dbe1ad9d5..74f1e2371 100644 --- a/apps/emqx_retainer/src/emqx_retainer_schema.erl +++ b/apps/emqx_retainer/src/emqx_retainer_schema.erl @@ -53,7 +53,8 @@ fields("retainer") -> sc( ?R_REF(flow_control), flow_control, - #{} + #{}, + ?IMPORTANCE_HIDDEN )}, {max_payload_size, sc( @@ -125,7 +126,9 @@ desc(_) -> %% hoconsc:mk(Type, #{desc => ?DESC(DescId)}). sc(Type, DescId, Default) -> - hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId)}). + sc(Type, DescId, Default, ?DEFAULT_IMPORTANCE). +sc(Type, DescId, Default, Importance) -> + hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId), importance => Importance}). backend_config() -> hoconsc:mk(hoconsc:ref(?MODULE, mnesia_config), #{desc => ?DESC(backend)}). From 6dd7befaab936706cd217ed928c7f5f2885991bd Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Mon, 17 Apr 2023 20:58:21 +0200 Subject: [PATCH 037/263] refactor: unify authn authz type names --- apps/emqx/src/emqx_schema.erl | 16 ++++-- .../emqx_enhanced_authn_scram_mnesia.erl | 12 ++-- .../src/simple_authn/emqx_authn_http.erl | 21 +++---- .../src/simple_authn/emqx_authn_jwt.erl | 57 +++++++++++++------ .../src/simple_authn/emqx_authn_mnesia.erl | 14 +++-- .../src/simple_authn/emqx_authn_mongodb.erl | 31 +++++----- .../src/simple_authn/emqx_authn_mysql.erl | 14 +++-- .../src/simple_authn/emqx_authn_pgsql.erl | 14 +++-- .../src/simple_authn/emqx_authn_redis.erl | 31 +++++----- .../test/emqx_authn_pgsql_SUITE.erl | 2 +- .../test/emqx_authn_redis_SUITE.erl | 2 +- .../test/emqx_authn_schema_SUITE.erl | 4 +- .../test/emqx_authn_schema_tests.erl | 2 +- apps/emqx_authz/src/emqx_authz_schema.erl | 15 +++-- apps/emqx_conf/src/emqx_conf_schema.erl | 4 +- 15 files changed, 140 insertions(+), 99 deletions(-) diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 8335d69b8..8073e19b5 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -164,7 +164,7 @@ roots(high) -> } )}, {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(global)}, - %% NOTE: authorization schema here is only to keep emqx app prue + %% NOTE: authorization schema here is only to keep emqx app pure %% the full schema for EMQX node is injected in emqx_conf_schema. {?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME, sc( @@ -2762,10 +2762,16 @@ str(S) when is_list(S) -> S. authentication(Which) -> - Desc = + {Importance, Desc} = case Which of - global -> ?DESC(global_authentication); - listener -> ?DESC(listener_authentication) + global -> + %% For root level authentication, it is recommended to configure + %% from the dashboard or API. + %% Hence it's considered a low-importance when it comes to + %% configuration importance. + {?IMPORTANCE_LOW, ?DESC(global_authentication)}; + listener -> + {?IMPORTANCE_HIDDEN, ?DESC(listener_authentication)} end, %% poor man's dependency injection %% this is due to the fact that authn is implemented outside of 'emqx' app. @@ -2781,7 +2787,7 @@ authentication(Which) -> hoconsc:mk(Type, #{ desc => Desc, converter => fun ensure_array/2, - importance => ?IMPORTANCE_HIDDEN + importance => Importance }). %% the older version schema allows individual element (instead of a chain) in config diff --git a/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl b/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl index 84f2c9525..b11b89081 100644 --- a/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl +++ b/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl @@ -105,14 +105,16 @@ mnesia(boot) -> %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-scram-builtin_db". +namespace() -> "authn". tags() -> [<<"Authentication">>]. -roots() -> [?CONF_NS]. +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, scram))}]. -fields(?CONF_NS) -> +fields(scram) -> [ {mechanism, emqx_authn_schema:mechanism(scram)}, {backend, emqx_authn_schema:backend(built_in_database)}, @@ -120,7 +122,7 @@ fields(?CONF_NS) -> {iteration_count, fun iteration_count/1} ] ++ emqx_authn_schema:common_fields(). -desc(?CONF_NS) -> +desc(scram) -> "Settings for Salted Challenge Response Authentication Mechanism\n" "(SCRAM) authentication."; desc(_) -> @@ -141,7 +143,7 @@ iteration_count(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, scram)]. create( AuthenticatorID, diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl index 3c34d878e..27eb8cc6e 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl @@ -51,34 +51,35 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-http". +namespace() -> "authn". tags() -> [<<"Authentication">>]. +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(fun union_member_selector/1), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields(get) -> +fields(http_get) -> [ {method, #{type => get, required => true, desc => ?DESC(method)}}, {headers, fun headers_no_content_type/1} ] ++ common_fields(); -fields(post) -> +fields(http_post) -> [ {method, #{type => post, required => true, desc => ?DESC(method)}}, {headers, fun headers/1} ] ++ common_fields(). -desc(get) -> +desc(http_get) -> ?DESC(get); -desc(post) -> +desc(http_post) -> ?DESC(post); desc(_) -> undefined. @@ -156,8 +157,8 @@ request_timeout(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, get), - hoconsc:ref(?MODULE, post) + hoconsc:ref(?MODULE, http_get), + hoconsc:ref(?MODULE, http_post) ]. union_member_selector(all_union_members) -> @@ -166,9 +167,9 @@ union_member_selector({value, Value}) -> refs(Value). refs(#{<<"method">> := <<"get">>}) -> - [hoconsc:ref(?MODULE, get)]; + [hoconsc:ref(?MODULE, http_get)]; refs(#{<<"method">> := <<"post">>}) -> - [hoconsc:ref(?MODULE, post)]; + [hoconsc:ref(?MODULE, http_post)]; refs(_) -> throw(#{ field_name => method, diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl index a891a55e2..92c6970cc 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl @@ -43,36 +43,57 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-jwt". +namespace() -> "authn". tags() -> [<<"Authentication">>]. +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(fun union_member_selector/1), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields('hmac-based') -> +fields(jwt_hmac) -> [ - {use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})}, + %% for hmac, it's the 'algorithm' field which selects this type + %% use_jwks field can be ignored (kept for backward compatibility) + {use_jwks, + sc( + hoconsc:enum([false]), + #{ + required => false, + desc => ?DESC(use_jwks), + importance => ?IMPORTANCE_HIDDEN + } + )}, {algorithm, sc(hoconsc:enum(['hmac-based']), #{required => true, desc => ?DESC(algorithm)})}, {secret, fun secret/1}, {secret_base64_encoded, fun secret_base64_encoded/1} ] ++ common_fields(); -fields('public-key') -> +fields(jwt_public_key) -> [ - {use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})}, + %% for public-key, it's the 'algorithm' field which selects this type + %% use_jwks field can be ignored (kept for backward compatibility) + {use_jwks, + sc( + hoconsc:enum([false]), + #{ + required => false, + desc => ?DESC(use_jwks), + importance => ?IMPORTANCE_HIDDEN + } + )}, {algorithm, sc(hoconsc:enum(['public-key']), #{required => true, desc => ?DESC(algorithm)})}, {public_key, fun public_key/1} ] ++ common_fields(); -fields('jwks') -> +fields(jwt_jwks) -> [ {use_jwks, sc(hoconsc:enum([true]), #{required => true, desc => ?DESC(use_jwks)})}, {endpoint, fun endpoint/1}, @@ -85,12 +106,12 @@ fields('jwks') -> }} ] ++ common_fields(). -desc('hmac-based') -> - ?DESC('hmac-based'); -desc('public-key') -> - ?DESC('public-key'); -desc('jwks') -> - ?DESC('jwks'); +desc(jwt_hmac) -> + ?DESC(jwt_hmac); +desc(jwt_public_key) -> + ?DESC(jwt_public_key); +desc(jwt_jwks) -> + ?DESC(jwt_jwks); desc(undefined) -> undefined. @@ -160,9 +181,9 @@ from(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, 'hmac-based'), - hoconsc:ref(?MODULE, 'public-key'), - hoconsc:ref(?MODULE, 'jwks') + hoconsc:ref(?MODULE, jwt_hmac), + hoconsc:ref(?MODULE, jwt_public_key), + hoconsc:ref(?MODULE, jwt_jwks) ]. union_member_selector(all_union_members) -> @@ -181,9 +202,9 @@ boolean(Other) -> Other. select_ref(true, _) -> [hoconsc:ref(?MODULE, 'jwks')]; select_ref(false, #{<<"public_key">> := _}) -> - [hoconsc:ref(?MODULE, 'public-key')]; + [hoconsc:ref(?MODULE, jwt_public_key)]; select_ref(false, _) -> - [hoconsc:ref(?MODULE, 'hmac-based')]; + [hoconsc:ref(?MODULE, jwt_hmac)]; select_ref(_, _) -> throw(#{ field_name => use_jwks, diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl index 20b604e70..d57e9e00e 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl @@ -107,14 +107,16 @@ mnesia(boot) -> %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-builtin_db". +namespace() -> "authn". tags() -> [<<"Authentication">>]. -roots() -> [?CONF_NS]. +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, builtin_db))}]. -fields(?CONF_NS) -> +fields(builtin_db) -> [ {mechanism, emqx_authn_schema:mechanism(password_based)}, {backend, emqx_authn_schema:backend(built_in_database)}, @@ -122,8 +124,8 @@ fields(?CONF_NS) -> {password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1} ] ++ emqx_authn_schema:common_fields(). -desc(?CONF_NS) -> - ?DESC(?CONF_NS); +desc(builtin_db) -> + ?DESC(builtin_db); desc(_) -> undefined. @@ -138,7 +140,7 @@ user_id_type(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, builtin_db)]. create(_AuthenticatorID, Config) -> create(Config). diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl index 22b930485..1a766b975 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl @@ -44,32 +44,33 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-mongodb". +namespace() -> "authn". tags() -> [<<"Authentication">>]. +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(fun union_member_selector/1), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields(standalone) -> +fields(mongo_single) -> common_fields() ++ emqx_connector_mongo:fields(single); -fields('replica-set') -> +fields(mongo_rs) -> common_fields() ++ emqx_connector_mongo:fields(rs); -fields('sharded-cluster') -> +fields(mongo_sharded) -> common_fields() ++ emqx_connector_mongo:fields(sharded). -desc(standalone) -> - ?DESC(standalone); -desc('replica-set') -> +desc(mongo_single) -> + ?DESC(single); +desc(mongo_rs) -> ?DESC('replica-set'); -desc('sharded-cluster') -> +desc(mongo_sharded) -> ?DESC('sharded-cluster'); desc(_) -> undefined. @@ -126,9 +127,9 @@ is_superuser_field(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, standalone), - hoconsc:ref(?MODULE, 'replica-set'), - hoconsc:ref(?MODULE, 'sharded-cluster') + hoconsc:ref(?MODULE, mongo_single), + hoconsc:ref(?MODULE, mongo_rs), + hoconsc:ref(?MODULE, mongo_sharded) ]. create(_AuthenticatorID, Config) -> @@ -254,11 +255,11 @@ union_member_selector({value, Value}) -> refs(Value). refs(#{<<"mongo_type">> := <<"single">>}) -> - [hoconsc:ref(?MODULE, standalone)]; + [hoconsc:ref(?MODULE, mongo_single)]; refs(#{<<"mongo_type">> := <<"rs">>}) -> - [hoconsc:ref(?MODULE, 'replica-set')]; + [hoconsc:ref(?MODULE, mongo_rs)]; refs(#{<<"mongo_type">> := <<"sharded">>}) -> - [hoconsc:ref(?MODULE, 'sharded-cluster')]; + [hoconsc:ref(?MODULE, mongo_sharded)]; refs(_) -> throw(#{ field_name => mongo_type, diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl index bedd169e2..d8e631885 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl @@ -45,14 +45,16 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-mysql". +namespace() -> "authn". tags() -> [<<"Authentication">>]. -roots() -> [?CONF_NS]. +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, mysql))}]. -fields(?CONF_NS) -> +fields(mysql) -> [ {mechanism, emqx_authn_schema:mechanism(password_based)}, {backend, emqx_authn_schema:backend(mysql)}, @@ -62,8 +64,8 @@ fields(?CONF_NS) -> ] ++ emqx_authn_schema:common_fields() ++ proplists:delete(prepare_statement, emqx_connector_mysql:fields(config)). -desc(?CONF_NS) -> - ?DESC(?CONF_NS); +desc(mysql) -> + ?DESC(mysql); desc(_) -> undefined. @@ -82,7 +84,7 @@ query_timeout(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, mysql)]. create(_AuthenticatorID, Config) -> create(Config). diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl index 3d9e1e08f..d9526cc7b 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl @@ -49,14 +49,16 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-postgresql". +namespace() -> "authn". tags() -> [<<"Authentication">>]. -roots() -> [?CONF_NS]. +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, postgresql))}]. -fields(?CONF_NS) -> +fields(postgresql) -> [ {mechanism, emqx_authn_schema:mechanism(password_based)}, {backend, emqx_authn_schema:backend(postgresql)}, @@ -66,8 +68,8 @@ fields(?CONF_NS) -> emqx_authn_schema:common_fields() ++ proplists:delete(prepare_statement, emqx_connector_pgsql:fields(config)). -desc(?CONF_NS) -> - ?DESC(?CONF_NS); +desc(postgresql) -> + ?DESC(postgresql); desc(_) -> undefined. @@ -81,7 +83,7 @@ query(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, postgresql)]. create(_AuthenticatorID, Config) -> create(Config). diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl index ff81fd4ca..27d8c540a 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl @@ -44,32 +44,33 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-redis". +namespace() -> "authn". tags() -> [<<"Authentication">>]. +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(fun union_member_selector/1), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields(standalone) -> +fields(redis_single) -> common_fields() ++ emqx_connector_redis:fields(single); -fields(cluster) -> +fields(redis_cluster) -> common_fields() ++ emqx_connector_redis:fields(cluster); -fields(sentinel) -> +fields(redis_sentinel) -> common_fields() ++ emqx_connector_redis:fields(sentinel). -desc(standalone) -> - ?DESC(standalone); -desc(cluster) -> +desc(redis_single) -> + ?DESC(single); +desc(redis_cluster) -> ?DESC(cluster); -desc(sentinel) -> +desc(redis_sentinel) -> ?DESC(sentinel); desc(_) -> "". @@ -93,9 +94,9 @@ cmd(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, standalone), - hoconsc:ref(?MODULE, cluster), - hoconsc:ref(?MODULE, sentinel) + hoconsc:ref(?MODULE, redis_single), + hoconsc:ref(?MODULE, redis_cluster), + hoconsc:ref(?MODULE, redis_sentinel) ]. union_member_selector(all_union_members) -> @@ -104,11 +105,11 @@ union_member_selector({value, Value}) -> refs(Value). refs(#{<<"redis_type">> := <<"single">>}) -> - [hoconsc:ref(?MODULE, standalone)]; + [hoconsc:ref(?MODULE, redis_single)]; refs(#{<<"redis_type">> := <<"cluster">>}) -> - [hoconsc:ref(?MODULE, cluster)]; + [hoconsc:ref(?MODULE, redis_cluster)]; refs(#{<<"redis_type">> := <<"sentinel">>}) -> - [hoconsc:ref(?MODULE, sentinel)]; + [hoconsc:ref(?MODULE, redis_sentinel)]; refs(_) -> throw(#{ field_name => redis_type, diff --git a/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl b/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl index 65c783298..075ae5cb7 100644 --- a/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl @@ -107,7 +107,7 @@ t_update_with_invalid_config(_Config) -> ?assertMatch( {error, #{ kind := validation_error, - matched_type := "authn-postgresql:authentication", + matched_type := "authn:postgresql", path := "authentication.1.server", reason := required_field }}, diff --git a/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl b/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl index 4f95bef93..1354e06cc 100644 --- a/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl @@ -162,7 +162,7 @@ t_create_invalid_config(_Config) -> ?assertMatch( {error, #{ kind := validation_error, - matched_type := "authn-redis:standalone", + matched_type := "authn:redis_single", path := "authentication.1.server", reason := required_field }}, diff --git a/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl b/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl index 7e67584ac..7a766281b 100644 --- a/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl @@ -53,7 +53,7 @@ t_check_schema(_Config) -> ?assertThrow( #{ path := "authentication.1.password_hash_algorithm.name", - matched_type := "authn-builtin_db:authentication/authn-hash:simple", + matched_type := "authn:builtin_db/authn-hash:simple", reason := unable_to_convert_to_enum_symbol }, Check(ConfigNotOk) @@ -72,7 +72,7 @@ t_check_schema(_Config) -> #{ path := "authentication.1.password_hash_algorithm", reason := "algorithm_name_missing", - matched_type := "authn-builtin_db:authentication" + matched_type := "authn:builtin_db" }, Check(ConfigMissingAlgoName) ). diff --git a/apps/emqx_authn/test/emqx_authn_schema_tests.erl b/apps/emqx_authn/test/emqx_authn_schema_tests.erl index 25fcd28e4..9de2c2e99 100644 --- a/apps/emqx_authn/test/emqx_authn_schema_tests.erl +++ b/apps/emqx_authn/test/emqx_authn_schema_tests.erl @@ -32,7 +32,7 @@ union_member_selector_mongo_test_() -> end}, {"single", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-mongodb:standalone"}), + ?ERR(#{matched_type := "authn:redis_single"}), Check("{mongo_type: single}") ) end}, diff --git a/apps/emqx_authz/src/emqx_authz_schema.erl b/apps/emqx_authz/src/emqx_authz_schema.erl index f03ae52a8..7aaa68b62 100644 --- a/apps/emqx_authz/src/emqx_authz_schema.erl +++ b/apps/emqx_authz/src/emqx_authz_schema.erl @@ -54,7 +54,7 @@ type_names() -> file, http_get, http_post, - mnesia, + builtin_db, mongo_single, mongo_rs, mongo_sharded, @@ -93,7 +93,7 @@ fields(http_post) -> {method, method(post)}, {headers, fun headers/1} ]; -fields(mnesia) -> +fields(builtin_db) -> authz_common_fields(built_in_database); fields(mongo_single) -> authz_common_fields(mongodb) ++ @@ -191,8 +191,8 @@ desc(http_get) -> ?DESC(http_get); desc(http_post) -> ?DESC(http_post); -desc(mnesia) -> - ?DESC(mnesia); +desc(builtin_db) -> + ?DESC(builtin_db); desc(mongo_single) -> ?DESC(mongo_single); desc(mongo_rs) -> @@ -459,7 +459,7 @@ select_union_member(#{<<"type">> := <<"http">>} = Value) -> }) end; select_union_member(#{<<"type">> := <<"built_in_database">>}) -> - ?R_REF(mnesia); + ?R_REF(builtin_db); select_union_member(#{<<"type">> := Type}) -> select_union_member_loop(Type, type_names()); select_union_member(_) -> @@ -494,7 +494,10 @@ authz_fields() -> default => [], desc => ?DESC(sources), %% doc_lift is force a root level reference instead of nesting sub-structs - extra => #{doc_lift => true} + extra => #{doc_lift => true}, + %% it is recommended to configure authz sources from dashboard + %% hance the importance level for config is low + importance => ?IMPORTANCE_LOW } )} ]. diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index ae3691682..f3f014321 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -100,7 +100,7 @@ roots() -> ?R_REF("rpc"), #{ translate_to => ["gen_rpc"], - importance => ?IMPORTANCE_HIDDEN + importance => ?IMPORTANCE_LOW } )} ] ++ @@ -1288,7 +1288,7 @@ emqx_schema_high_prio_roots() -> ?R_REF("authorization"), #{ desc => ?DESC(authorization), - importance => ?IMPORTANCE_HIDDEN + importance => ?IMPORTANCE_HIGH } )}, lists:keyreplace("authorization", 1, Roots, Authz). From 48d8381a6df506b850d20fcbed734fb9f83b634f Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Mon, 17 Apr 2023 21:22:28 +0200 Subject: [PATCH 038/263] docs: update doc refs --- rel/i18n/emqx_authn_jwt.hocon | 6 +++--- rel/i18n/emqx_authn_mnesia.hocon | 2 +- rel/i18n/emqx_authn_mongodb.hocon | 2 +- rel/i18n/emqx_authn_mysql.hocon | 2 +- rel/i18n/emqx_authn_pgsql.hocon | 2 +- rel/i18n/emqx_authn_redis.hocon | 2 +- rel/i18n/emqx_authz_schema.hocon | 6 +++--- rel/i18n/zh/emqx_authn_jwt.hocon | 6 +++--- rel/i18n/zh/emqx_authn_mnesia.hocon | 2 +- rel/i18n/zh/emqx_authn_mongodb.hocon | 2 +- rel/i18n/zh/emqx_authn_mysql.hocon | 2 +- rel/i18n/zh/emqx_authn_pgsql.hocon | 2 +- rel/i18n/zh/emqx_authn_redis.hocon | 2 +- rel/i18n/zh/emqx_authz_schema.hocon | 6 +++--- 14 files changed, 22 insertions(+), 22 deletions(-) diff --git a/rel/i18n/emqx_authn_jwt.hocon b/rel/i18n/emqx_authn_jwt.hocon index e28213e37..f947dcf5d 100644 --- a/rel/i18n/emqx_authn_jwt.hocon +++ b/rel/i18n/emqx_authn_jwt.hocon @@ -42,10 +42,10 @@ from.desc: from.label: """From Field""" -hmac-based.desc: +jwt_hmac.desc: """Configuration when the JWT for authentication is issued using the HMAC algorithm.""" -jwks.desc: +jwt_jwks.desc: """Configuration when JWTs used for authentication need to be fetched from the JWKS endpoint.""" keyfile.desc: @@ -54,7 +54,7 @@ keyfile.desc: keyfile.label: """Key File""" -public-key.desc: +jwt_public_key.desc: """Configuration when the JWT for authentication is issued using RSA or ECDSA algorithm.""" public_key.desc: diff --git a/rel/i18n/emqx_authn_mnesia.hocon b/rel/i18n/emqx_authn_mnesia.hocon index 4cfab9493..6770df090 100644 --- a/rel/i18n/emqx_authn_mnesia.hocon +++ b/rel/i18n/emqx_authn_mnesia.hocon @@ -1,6 +1,6 @@ emqx_authn_mnesia { -authentication.desc: +builtin_db.desc: """Configuration of authenticator using built-in database as data source.""" user_id_type.desc: diff --git a/rel/i18n/emqx_authn_mongodb.hocon b/rel/i18n/emqx_authn_mongodb.hocon index 97311f751..6d851f58f 100644 --- a/rel/i18n/emqx_authn_mongodb.hocon +++ b/rel/i18n/emqx_authn_mongodb.hocon @@ -39,7 +39,7 @@ salt_field.label: sharded-cluster.desc: """Configuration of authenticator using MongoDB (Sharded Cluster) as authentication data source.""" -standalone.desc: +single.desc: """Configuration of authenticator using MongoDB (Standalone) as authentication data source.""" } diff --git a/rel/i18n/emqx_authn_mysql.hocon b/rel/i18n/emqx_authn_mysql.hocon index 45634a4ad..5eb2d23e9 100644 --- a/rel/i18n/emqx_authn_mysql.hocon +++ b/rel/i18n/emqx_authn_mysql.hocon @@ -1,6 +1,6 @@ emqx_authn_mysql { -authentication.desc: +mysql.desc: """Configuration of authenticator using MySQL as authentication data source.""" query.desc: diff --git a/rel/i18n/emqx_authn_pgsql.hocon b/rel/i18n/emqx_authn_pgsql.hocon index a9d727785..696a17861 100644 --- a/rel/i18n/emqx_authn_pgsql.hocon +++ b/rel/i18n/emqx_authn_pgsql.hocon @@ -1,6 +1,6 @@ emqx_authn_pgsql { -authentication.desc: +postgresql.desc: """Configuration of authenticator using PostgreSQL as authentication data source.""" query.desc: diff --git a/rel/i18n/emqx_authn_redis.hocon b/rel/i18n/emqx_authn_redis.hocon index f39d54061..e8d98b9a1 100644 --- a/rel/i18n/emqx_authn_redis.hocon +++ b/rel/i18n/emqx_authn_redis.hocon @@ -12,7 +12,7 @@ cmd.label: sentinel.desc: """Configuration of authenticator using Redis (Sentinel) as authentication data source.""" -standalone.desc: +single.desc: """Configuration of authenticator using Redis (Standalone) as authentication data source.""" } diff --git a/rel/i18n/emqx_authz_schema.hocon b/rel/i18n/emqx_authz_schema.hocon index 9e5339b2c..5318eb769 100644 --- a/rel/i18n/emqx_authz_schema.hocon +++ b/rel/i18n/emqx_authz_schema.hocon @@ -120,11 +120,11 @@ node_error.desc: node_error.label: """Error in Node""" -mnesia.desc: +builtin_db.desc: """Authorization using a built-in database (mnesia).""" -mnesia.label: -"""mnesia""" +builtin_db.label: +"""Builtin Database""" enable.desc: """Set to true or false to disable this ACL provider""" diff --git a/rel/i18n/zh/emqx_authn_jwt.hocon b/rel/i18n/zh/emqx_authn_jwt.hocon index 2aa27c1de..76b999101 100644 --- a/rel/i18n/zh/emqx_authn_jwt.hocon +++ b/rel/i18n/zh/emqx_authn_jwt.hocon @@ -42,10 +42,10 @@ from.desc: from.label: """源字段""" -hmac-based.desc: +jwt_hmac.desc: """用于认证的 JWT 使用 HMAC 算法签发时的配置。""" -jwks.desc: +jwt_jwks.desc: """用于认证的 JWTs 需要从 JWKS 端点获取时的配置。""" keyfile.desc: @@ -54,7 +54,7 @@ keyfile.desc: keyfile.label: """私钥文件""" -public-key.desc: +jwt_public_key.desc: """用于认证的 JWT 使用 RSA 或 ECDSA 算法签发时的配置。""" public_key.desc: diff --git a/rel/i18n/zh/emqx_authn_mnesia.hocon b/rel/i18n/zh/emqx_authn_mnesia.hocon index 1ba394627..78fc96c75 100644 --- a/rel/i18n/zh/emqx_authn_mnesia.hocon +++ b/rel/i18n/zh/emqx_authn_mnesia.hocon @@ -1,6 +1,6 @@ emqx_authn_mnesia { -authentication.desc: +builtin_db.desc: """使用内置数据库作为认证数据源的认证器的配置项。""" user_id_type.desc: diff --git a/rel/i18n/zh/emqx_authn_mongodb.hocon b/rel/i18n/zh/emqx_authn_mongodb.hocon index 01419e2b9..949322a76 100644 --- a/rel/i18n/zh/emqx_authn_mongodb.hocon +++ b/rel/i18n/zh/emqx_authn_mongodb.hocon @@ -39,7 +39,7 @@ salt_field.label: sharded-cluster.desc: """使用 MongoDB (Sharded Cluster) 作为认证数据源的认证器的配置项。""" -standalone.desc: +single.desc: """使用 MongoDB (Standalone) 作为认证数据源的认证器的配置项。""" } diff --git a/rel/i18n/zh/emqx_authn_mysql.hocon b/rel/i18n/zh/emqx_authn_mysql.hocon index e718ad723..c8b72d720 100644 --- a/rel/i18n/zh/emqx_authn_mysql.hocon +++ b/rel/i18n/zh/emqx_authn_mysql.hocon @@ -1,6 +1,6 @@ emqx_authn_mysql { -authentication.desc: +mysql.desc: """使用 MySQL 作为认证数据源的认证器的配置项。""" query.desc: diff --git a/rel/i18n/zh/emqx_authn_pgsql.hocon b/rel/i18n/zh/emqx_authn_pgsql.hocon index 97bf608d2..b9d47828b 100644 --- a/rel/i18n/zh/emqx_authn_pgsql.hocon +++ b/rel/i18n/zh/emqx_authn_pgsql.hocon @@ -1,6 +1,6 @@ emqx_authn_pgsql { -authentication.desc: +postgresql.desc: """使用 PostgreSQL 作为认证数据源的认证器的配置项。""" query.desc: diff --git a/rel/i18n/zh/emqx_authn_redis.hocon b/rel/i18n/zh/emqx_authn_redis.hocon index e3d6b9d96..58137f195 100644 --- a/rel/i18n/zh/emqx_authn_redis.hocon +++ b/rel/i18n/zh/emqx_authn_redis.hocon @@ -12,7 +12,7 @@ cmd.label: sentinel.desc: """使用 Redis (Sentinel) 作为认证数据源的认证器的配置项。""" -standalone.desc: +single.desc: """使用 Redis (Standalone) 作为认证数据源的认证器的配置项。""" } diff --git a/rel/i18n/zh/emqx_authz_schema.hocon b/rel/i18n/zh/emqx_authz_schema.hocon index 3dd6d1c01..f0f973a55 100644 --- a/rel/i18n/zh/emqx_authz_schema.hocon +++ b/rel/i18n/zh/emqx_authz_schema.hocon @@ -120,11 +120,11 @@ node_error.desc: node_error.label: """节点产生的错误""" -mnesia.desc: +builtin_db.desc: """使用内部数据库授权(mnesia)。""" -mnesia.label: -"""mnesia""" +builtin_db.label: +"""Buitin Database""" enable.desc: """设为 truefalse 以启用或禁用此访问控制数据源""" From cd1197925f8bee00c36bcfc4f95884e2bcc93962 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 18 Apr 2023 09:14:02 +0200 Subject: [PATCH 039/263] test: fix unit tests after type names changed --- .../src/simple_authn/emqx_authn_jwt.erl | 2 +- .../test/emqx_authn_schema_tests.erl | 22 +++++++++---------- .../test/emqx_ee_bridge_influxdb_tests.erl | 12 ++++++++-- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl index 92c6970cc..0df9014b8 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl @@ -200,7 +200,7 @@ boolean(<<"false">>) -> false; boolean(Other) -> Other. select_ref(true, _) -> - [hoconsc:ref(?MODULE, 'jwks')]; + [hoconsc:ref(?MODULE, 'jwt_jwks')]; select_ref(false, #{<<"public_key">> := _}) -> [hoconsc:ref(?MODULE, jwt_public_key)]; select_ref(false, _) -> diff --git a/apps/emqx_authn/test/emqx_authn_schema_tests.erl b/apps/emqx_authn/test/emqx_authn_schema_tests.erl index 9de2c2e99..622655b2d 100644 --- a/apps/emqx_authn/test/emqx_authn_schema_tests.erl +++ b/apps/emqx_authn/test/emqx_authn_schema_tests.erl @@ -32,19 +32,19 @@ union_member_selector_mongo_test_() -> end}, {"single", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn:redis_single"}), + ?ERR(#{matched_type := "authn:mongo_single"}), Check("{mongo_type: single}") ) end}, {"replica-set", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-mongodb:replica-set"}), + ?ERR(#{matched_type := "authn:mongo_rs"}), Check("{mongo_type: rs}") ) end}, {"sharded", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-mongodb:sharded-cluster"}), + ?ERR(#{matched_type := "authn:mongo_sharded"}), Check("{mongo_type: sharded}") ) end} @@ -61,19 +61,19 @@ union_member_selector_jwt_test_() -> end}, {"jwks", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-jwt:jwks"}), + ?ERR(#{matched_type := "authn:jwt_jwks"}), Check("{use_jwks = true}") ) end}, {"publick-key", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-jwt:public-key"}), + ?ERR(#{matched_type := "authn:jwt_public_key"}), Check("{use_jwks = false, public_key = 1}") ) end}, {"hmac-based", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-jwt:hmac-based"}), + ?ERR(#{matched_type := "authn:jwt_hmac"}), Check("{use_jwks = false}") ) end} @@ -90,19 +90,19 @@ union_member_selector_redis_test_() -> end}, {"single", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-redis:standalone"}), + ?ERR(#{matched_type := "authn:redis_single"}), Check("{redis_type = single}") ) end}, {"cluster", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-redis:cluster"}), + ?ERR(#{matched_type := "authn:redis_cluster"}), Check("{redis_type = cluster}") ) end}, {"sentinel", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-redis:sentinel"}), + ?ERR(#{matched_type := "authn:redis_sentinel"}), Check("{redis_type = sentinel}") ) end} @@ -119,13 +119,13 @@ union_member_selector_http_test_() -> end}, {"get", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-http:get"}), + ?ERR(#{matched_type := "authn:http_get"}), Check("{method = get}") ) end}, {"post", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-http:post"}), + ?ERR(#{matched_type := "authn:http_post"}), Check("{method = post}") ) end} diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_tests.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_tests.erl index ce3a0b06f..1e065f6c8 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_tests.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_tests.erl @@ -5,8 +5,6 @@ -include_lib("eunit/include/eunit.hrl"). --import(emqx_ee_bridge_influxdb, [to_influx_lines/1]). - -define(INVALID_LINES, [ " ", " \n", @@ -326,3 +324,13 @@ test_pairs(PairsList) -> join(Sep, LinesList) -> lists:flatten(lists:join(Sep, LinesList)). + +to_influx_lines(RawLines) -> + OldLevel = emqx_logger:get_primary_log_level(), + try + %% mute error logs from this call + emqx_logger:set_primary_log_level(none), + emqx_ee_bridge_influxdb:to_influx_lines(RawLines) + after + emqx_logger:set_primary_log_level(OldLevel) + end. From 61f3e62ba8b2682fc133afa856e61a99d5b956fe Mon Sep 17 00:00:00 2001 From: firest Date: Tue, 18 Apr 2023 15:49:34 +0800 Subject: [PATCH 040/263] chore: update changes --- changes/ce/perf-10430.en.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 changes/ce/perf-10430.en.md diff --git a/changes/ce/perf-10430.en.md b/changes/ce/perf-10430.en.md new file mode 100644 index 000000000..e01d8ba64 --- /dev/null +++ b/changes/ce/perf-10430.en.md @@ -0,0 +1,2 @@ +Simplify the configuration of the `retainer` feature. +- Mark `flow_control` as non-importance field. From 29584ca7215937578d969ce7dc13742df19f193d Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 14 Apr 2023 18:23:15 +0200 Subject: [PATCH 041/263] feat: add mongo_date functions to the rule engine This commit adds mong_date built-in functions to the rule engine SQL-like language. Corresponding functions already existed in EMQX 4.4 and this commit makes sure that EMQX 5.X also has these functions. Fixes: https://emqx.atlassian.net/browse/EMQX-9244 --- apps/emqx_rule_engine/src/emqx_rule_funcs.erl | 28 ++++++ changes/ee/feat-10408.en.md | 1 + .../test/emqx_ee_bridge_mongodb_SUITE.erl | 75 +++++++++++++++- .../src/emqx_ee_connector_mongodb.erl | 88 ++++++++++++++++++- 4 files changed, 188 insertions(+), 4 deletions(-) create mode 100644 changes/ee/feat-10408.en.md diff --git a/apps/emqx_rule_engine/src/emqx_rule_funcs.erl b/apps/emqx_rule_engine/src/emqx_rule_funcs.erl index 81435d9ac..dfb79a40c 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_funcs.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_funcs.erl @@ -230,6 +230,16 @@ date_to_unix_ts/4 ]). +%% MongoDB specific date functions. These functions return a date tuple. The +%% MongoDB bridge converts such date tuples to a MongoDB date type. The +%% following functions are therefore only useful for rules with at least one +%% MongoDB action. +-export([ + mongo_date/0, + mongo_date/1, + mongo_date/2 +]). + %% Proc Dict Func -export([ proc_dict_get/1, @@ -1135,3 +1145,21 @@ function_literal(Fun, [FArg | Args]) when is_atom(Fun), is_list(Args) -> ) ++ ")"; function_literal(Fun, Args) -> {invalid_func, {Fun, Args}}. + +mongo_date() -> + erlang:timestamp(). + +mongo_date(MillisecondsTimestamp) -> + convert_timestamp(MillisecondsTimestamp). + +mongo_date(Timestamp, Unit) -> + InsertedTimeUnit = time_unit(Unit), + ScaledEpoch = erlang:convert_time_unit(Timestamp, InsertedTimeUnit, millisecond), + convert_timestamp(ScaledEpoch). + +convert_timestamp(MillisecondsTimestamp) -> + MicroTimestamp = MillisecondsTimestamp * 1000, + MegaSecs = MicroTimestamp div 1000_000_000_000, + Secs = MicroTimestamp div 1000_000 - MegaSecs * 1000_000, + MicroSecs = MicroTimestamp rem 1000_000, + {MegaSecs, Secs, MicroSecs}. diff --git a/changes/ee/feat-10408.en.md b/changes/ee/feat-10408.en.md new file mode 100644 index 000000000..bc18b8a80 --- /dev/null +++ b/changes/ee/feat-10408.en.md @@ -0,0 +1 @@ +The rule engine SQL-like language has got three more built-in functions for creating values of the MongoDB date type. These functions are only useful for rules with at least one MongoDB bridge action. diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl index 0959e3c78..1c3c4a2c3 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl @@ -27,7 +27,8 @@ group_tests() -> t_setup_via_config_and_publish, t_setup_via_http_api_and_publish, t_payload_template, - t_collection_template + t_collection_template, + t_mongo_date_rule_engine_functions ]. groups() -> @@ -140,10 +141,11 @@ start_apps() -> %% we want to make sure they are loaded before %% ekka start in emqx_common_test_helpers:start_apps/1 emqx_common_test_helpers:render_and_load_app_config(emqx_conf), - ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]). + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_rule_engine, emqx_bridge]). ensure_loaded() -> _ = application:load(emqx_ee_bridge), + _ = application:load(emqtt), _ = emqx_ee_bridge:module_info(), ok. @@ -282,6 +284,27 @@ find_all(Config) -> ResourceID = emqx_bridge_resource:resource_id(Type, Name), emqx_resource:simple_sync_query(ResourceID, {find, Collection, #{}, #{}}). +find_all_wait_until_non_empty(Config) -> + wait_until( + fun() -> + case find_all(Config) of + {ok, []} -> false; + _ -> true + end + end, + 5_000 + ), + find_all(Config). + +wait_until(Fun, Timeout) when Timeout >= 0 -> + case Fun() of + true -> + ok; + false -> + timer:sleep(100), + wait_until(Fun, Timeout - 100) + end. + send_message(Config, Payload) -> Name = ?config(mongo_name, Config), Type = mongo_type_bin(?config(mongo_type, Config)), @@ -376,3 +399,51 @@ t_collection_template(Config) -> find_all(Config) ), ok. + +t_mongo_date_rule_engine_functions(Config) -> + {ok, _} = + create_bridge( + Config, + #{ + <<"payload_template">> => + <<"{\"date_0\": ${date_0}, \"date_1\": ${date_1}, \"date_2\": ${date_2}}">> + } + ), + Type = mongo_type_bin(?config(mongo_type, Config)), + Name = ?config(mongo_name, Config), + SQL = + "SELECT mongo_date() as date_0, mongo_date(1000) as date_1, mongo_date(1, 'second') as date_2 FROM " + "\"t_mongo_date_rule_engine_functions/topic\"", + %% Remove rule if it already exists + RuleId = <<"rule:t_mongo_date_rule_engine_functions">>, + emqx_rule_engine:delete_rule(RuleId), + BridgeId = emqx_bridge_resource:bridge_id(Type, Name), + {ok, Rule} = emqx_rule_engine:create_rule( + #{ + id => <<"rule:t_mongo_date_rule_engine_functions">>, + sql => SQL, + actions => [ + BridgeId, + #{function => console} + ], + description => <<"to mongo bridge">> + } + ), + %% Send a message to topic + {ok, Client} = emqtt:start_link([{clientid, <<"pub-02">>}, {proto_ver, v5}]), + {ok, _} = emqtt:connect(Client), + emqtt:publish(Client, <<"t_mongo_date_rule_engine_functions/topic">>, #{}, <<"{\"x\":1}">>, [ + {qos, 2} + ]), + emqtt:stop(Client), + ?assertMatch( + {ok, [ + #{ + <<"date_0">> := {_, _, _}, + <<"date_1">> := {0, 1, 0}, + <<"date_2">> := {0, 1, 0} + } + ]}, + find_all_wait_until_non_empty(Config) + ), + ok. diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl index 4e7adcd6e..90c422643 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl @@ -83,5 +83,89 @@ render_message(undefined = _PayloadTemplate, Message) -> render_message(PayloadTemplate, Message) -> %% Note: mongo expects a map as a document, so the rendered result %% must be JSON-serializable - Rendered = emqx_plugin_libs_rule:proc_tmpl(PayloadTemplate, Message), - emqx_utils_json:decode(Rendered, [return_maps]). + format_data(PayloadTemplate, Message). + +%% The following function was originally copied over from +%% https://github.com/emqx/emqx-enterprise/commit/50e3628129720f13f544053600ca1502731e29e0. +%% The rule engine has support for producing fields that are date tuples +%% (produced by the SQL language's built-in functions mongo_date/0, +%% mongo_date/1 and mongo_date/2) which the MongoDB driver recognizes and +%% converts to the MongoDB ISODate type +%% (https://www.compose.com/articles/understanding-dates-in-compose-mongodb/). +%% For this to work we have to replace the tuple values with references, make +%% an instance of the template, convert the instance to map with the help of +%% emqx_utils_json:decode and then finally replace the references with the +%% corresponding tuples in the resulting map. +format_data(PayloadTks, Msg) -> + % Check the Message for any tuples that need to be extracted before running the template though a json parser + PreparedTupleMap = create_mapping_of_references_to_tuple_values(Msg), + case maps:size(PreparedTupleMap) of + % If no tuples were found simply proceed with the json decoding and be done with it + 0 -> + emqx_utils_json:decode(emqx_plugin_libs_rule:proc_tmpl(PayloadTks, Msg), [return_maps]); + _ -> + % If tuples were found, replace the tuple values with the references created, run + % the modified message through the json parser, and then at the end replace the + % references with the actual tuple values. + ProcessedMessage = replace_message_values_with_references(Msg, PreparedTupleMap), + DecodedMap = emqx_utils_json:decode( + emqx_plugin_libs_rule:proc_tmpl(PayloadTks, ProcessedMessage), [return_maps] + ), + populate_map_with_tuple_values(PreparedTupleMap, DecodedMap) + end. + +replace_message_values_with_references(RawMessage, TupleMap) -> + % Iterate over every created reference/value pair and inject the reference into the message + maps:fold( + fun(Reference, OriginalValue, OriginalMessage) -> + % Iterate over the Message, which is a map, and look for the element which + % matches the Value in the map which holds the references/original values and replace + % with the reference + maps:fold( + fun(Key, Value, NewMap) -> + case Value == OriginalValue of + true -> + %% Wrap the reference in a string to make it JSON-serializable + StringRef = io_lib:format("\"~s\"", [Reference]), + WrappedRef = erlang:iolist_to_binary(StringRef), + maps:put(Key, WrappedRef, NewMap); + false -> + maps:put(Key, Value, NewMap) + end + end, + #{}, + OriginalMessage + ) + end, + RawMessage, + TupleMap + ). + +create_mapping_of_references_to_tuple_values(Message) -> + maps:fold( + fun + (_Key, Value, TupleMap) when is_tuple(Value) -> + Ref0 = emqx_guid:to_hexstr(emqx_guid:gen()), + Ref = <<"MONGO_DATE_REF_", Ref0/binary>>, + maps:put(Ref, Value, TupleMap); + (_key, _value, TupleMap) -> + TupleMap + end, + #{}, + Message + ). + +populate_map_with_tuple_values(TupleMap, MapToMap) -> + MappingFun = + fun + (_Key, Value) when is_map(Value) -> + populate_map_with_tuple_values(TupleMap, Value); + (_Key, Value) -> + case maps:is_key(Value, TupleMap) of + true -> + maps:get(Value, TupleMap); + false -> + Value + end + end, + maps:map(MappingFun, MapToMap). From 88360113106fded2af1d11899d4cb313616fbe5d Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Mon, 17 Apr 2023 18:08:03 +0200 Subject: [PATCH 042/263] docs: better changelog entry for mongo_date functions Co-authored-by: Thales Macedo Garitezi --- changes/ee/feat-10408.en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changes/ee/feat-10408.en.md b/changes/ee/feat-10408.en.md index bc18b8a80..75cc4b945 100644 --- a/changes/ee/feat-10408.en.md +++ b/changes/ee/feat-10408.en.md @@ -1 +1 @@ -The rule engine SQL-like language has got three more built-in functions for creating values of the MongoDB date type. These functions are only useful for rules with at least one MongoDB bridge action. +The rule engine SQL-like language has got three more built-in functions for creating values of the MongoDB date type. These functions are useful for rules with MongoDB bridge actions only and not supported in other actions. From d0f30d2f7ced0c0e89d056dd3854ad8f52b52123 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Tue, 18 Apr 2023 16:13:17 +0800 Subject: [PATCH 043/263] feat: delete trace/limiter config's hot update api --- apps/emqx_management/src/emqx_mgmt_api_configs.erl | 2 -- 1 file changed, 2 deletions(-) diff --git a/apps/emqx_management/src/emqx_mgmt_api_configs.erl b/apps/emqx_management/src/emqx_mgmt_api_configs.erl index c94dc17b6..41bca45cf 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_configs.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_configs.erl @@ -42,8 +42,6 @@ <<"alarm">>, <<"sys_topics">>, <<"sysmon">>, - <<"limiter">>, - <<"trace">>, <<"log">>, <<"persistent_session_store">>, <<"zones">> From a6688ed07a7f4eb139f4535ce2df7f81ebf243f7 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Tue, 18 Apr 2023 10:24:39 +0200 Subject: [PATCH 044/263] refactor(emqx_management): Use emqx:running_node function --- apps/emqx_management/src/emqx_mgmt_api_stats.erl | 9 --------- 1 file changed, 9 deletions(-) diff --git a/apps/emqx_management/src/emqx_mgmt_api_stats.erl b/apps/emqx_management/src/emqx_mgmt_api_stats.erl index 19857f267..5f4bbce65 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_stats.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_stats.erl @@ -145,12 +145,3 @@ list(get, #{query_string := Qs}) -> %%%============================================================================================== %% Internal - -running_nodes() -> - Nodes = erlang:nodes([visible, this]), - RpcResults = emqx_proto_v2:are_running(Nodes), - [ - Node - || {Node, IsRunning} <- lists:zip(Nodes, RpcResults), - IsRunning =:= {ok, true} - ]. From 8a3fccb3309b64e6ecf562580281d7262e480ce0 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Tue, 18 Apr 2023 10:32:37 +0200 Subject: [PATCH 045/263] style: fix variable name style --- lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl index 90c422643..59f763904 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl @@ -148,7 +148,7 @@ create_mapping_of_references_to_tuple_values(Message) -> Ref0 = emqx_guid:to_hexstr(emqx_guid:gen()), Ref = <<"MONGO_DATE_REF_", Ref0/binary>>, maps:put(Ref, Value, TupleMap); - (_key, _value, TupleMap) -> + (_Key, _Value, TupleMap) -> TupleMap end, #{}, From f3614b2c6577108461d7d8f44cc5281a4fb1253e Mon Sep 17 00:00:00 2001 From: firest Date: Mon, 10 Apr 2023 10:59:46 +0800 Subject: [PATCH 046/263] chore: add README for slow subscriptions --- apps/emqx_slow_subs/README.md | 41 +++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 apps/emqx_slow_subs/README.md diff --git a/apps/emqx_slow_subs/README.md b/apps/emqx_slow_subs/README.md new file mode 100644 index 000000000..e4454a56c --- /dev/null +++ b/apps/emqx_slow_subs/README.md @@ -0,0 +1,41 @@ +# EMQX Slow Subscriptions + +This application can calculate the latency (time spent) of the message to be processed and transmitted since it arrives at EMQX. + +If the latency exceeds a specified threshold, this application will add the subscriber and topic information to a slow subscriptions list or update the existing record. + +# How To Use + +You can add the below section into `emqx.conf` to enable this application + + +```yaml +slow_subs { + enable = true + threshold = "500ms" + expire_interval = "300s" + top_k_num = 10 + stats_type = whole +} +``` + +**threshold**: Latency threshold for statistics, only messages with latency exceeding this value will be collected. + +Minimum value: 100ms +Default value: 500ms + +**expire_interval**: Eviction time of the record, will start the counting since the creation of the record, and the records that are not triggered again within this specified period will be removed from the rank list. + +Default: 300s + +**top_k_num**: Maximum number of records in the slow subscription statistics record table. + +Maximum value: 1,000 +Default value: 10 + +**stats_type**: Calculation methods of the latency, which are +- **whole**: From the time the message arrives at EMQX until the message transmission completes +- **internal**: From when the message arrives at EMQX until when EMQX starts delivering the message +- **response**: From the time EMQX starts delivering the message until the message transmission completes + +Default value: whole From 69e334a77d9226c7c673e987008f53babc0b396e Mon Sep 17 00:00:00 2001 From: firest Date: Tue, 18 Apr 2023 17:25:11 +0800 Subject: [PATCH 047/263] chore: reorganize the README to follow template --- apps/emqx_slow_subs/README.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/apps/emqx_slow_subs/README.md b/apps/emqx_slow_subs/README.md index e4454a56c..cfc87f775 100644 --- a/apps/emqx_slow_subs/README.md +++ b/apps/emqx_slow_subs/README.md @@ -4,11 +4,12 @@ This application can calculate the latency (time spent) of the message to be pro If the latency exceeds a specified threshold, this application will add the subscriber and topic information to a slow subscriptions list or update the existing record. -# How To Use +More introduction: [Slow Subscriptions](https://www.emqx.io/docs/en/v5.0/observability/slow-subscribers-statistics.html) + +# Usage You can add the below section into `emqx.conf` to enable this application - ```yaml slow_subs { enable = true @@ -19,6 +20,8 @@ slow_subs { } ``` +# Configurations + **threshold**: Latency threshold for statistics, only messages with latency exceeding this value will be collected. Minimum value: 100ms @@ -28,7 +31,7 @@ Default value: 500ms Default: 300s -**top_k_num**: Maximum number of records in the slow subscription statistics record table. +**top_k_num**: Maximum number of records in the slow subscription statistics record table. Maximum value: 1,000 Default value: 10 @@ -39,3 +42,6 @@ Default value: 10 - **response**: From the time EMQX starts delivering the message until the message transmission completes Default value: whole + +# Contributing - [Mandatory] +Please see our [contributing.md](../../CONTRIBUTING.md). From 32e0a03c163f7d351bb4b8bd1bc72ddec3f22d89 Mon Sep 17 00:00:00 2001 From: Kinplemelon Date: Tue, 18 Apr 2023 17:51:09 +0800 Subject: [PATCH 048/263] chore: upgrade dashboard to v1.2.2 for ce --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fe75b01bc..c6874234d 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2 export EMQX_DEFAULT_RUNNER = debian:11-slim export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) -export EMQX_DASHBOARD_VERSION ?= v1.2.1 +export EMQX_DASHBOARD_VERSION ?= v1.2.2 export EMQX_EE_DASHBOARD_VERSION ?= e1.0.5 export EMQX_REL_FORM ?= tgz export QUICER_DOWNLOAD_FROM_RELEASE = 1 From 21e19a33ce5bf0c240c82509454ae8467a6739ae Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Wed, 12 Apr 2023 16:37:51 +0300 Subject: [PATCH 049/263] feat(respool): switch to `emqx_resource_pool` Which was previously known as `emqx_plugin_libs_pool`. This is part of the effort to get rid of `emqx_plugin_libs` application. --- .../emqx_authn_jwks_connector.erl | 22 +++-- .../src/emqx_connector_http.erl | 5 +- .../src/emqx_connector_ldap.erl | 11 ++- .../src/emqx_connector_mongo.erl | 22 ++--- .../src/emqx_connector_mysql.erl | 27 +++--- .../src/emqx_connector_pgsql.erl | 37 ++++---- .../src/emqx_connector_redis.erl | 21 ++--- .../test/emqx_connector_mongo_SUITE.erl | 40 ++++----- .../test/emqx_connector_mysql_SUITE.erl | 44 ++++----- .../test/emqx_connector_pgsql_SUITE.erl | 40 ++++----- .../test/emqx_connector_redis_SUITE.erl | 40 ++++----- .../src/emqx_resource_pool.erl} | 28 +++--- .../test/emqx_ee_bridge_gcp_pubsub_SUITE.erl | 10 +-- .../test/emqx_ee_bridge_mysql_SUITE.erl | 2 +- .../src/emqx_ee_connector_cassa.erl | 36 ++++---- .../src/emqx_ee_connector_clickhouse.erl | 79 +++++++--------- .../src/emqx_ee_connector_dynamo.erl | 14 +-- .../src/emqx_ee_connector_gcp_pubsub.erl | 90 +++++++------------ .../src/emqx_ee_connector_sqlserver.erl | 26 +++--- .../src/emqx_ee_connector_tdengine.erl | 14 +-- .../test/emqx_ee_connector_cassa_SUITE.erl | 38 ++++---- .../emqx_ee_connector_clickhouse_SUITE.erl | 38 ++++---- 22 files changed, 312 insertions(+), 372 deletions(-) rename apps/{emqx_plugin_libs/src/emqx_plugin_libs_pool.erl => emqx_resource/src/emqx_resource_pool.erl} (82%) diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl index 1fdd7cef7..fe0349b4a 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl @@ -35,18 +35,17 @@ callback_mode() -> always_sync. on_start(InstId, Opts) -> - PoolName = emqx_plugin_libs_pool:pool_name(InstId), PoolOpts = [ {pool_size, maps:get(pool_size, Opts, ?DEFAULT_POOL_SIZE)}, {connector_opts, Opts} ], - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, PoolOpts) of - ok -> {ok, #{pool_name => PoolName}}; + case emqx_resource_pool:start(InstId, ?MODULE, PoolOpts) of + ok -> {ok, #{pool_name => InstId}}; {error, Reason} -> {error, Reason} end. on_stop(_InstId, #{pool_name := PoolName}) -> - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query(InstId, get_jwks, #{pool_name := PoolName}) -> Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover), @@ -72,18 +71,17 @@ on_query(_InstId, {update, Opts}, #{pool_name := PoolName}) -> ok. on_get_status(_InstId, #{pool_name := PoolName}) -> - Func = - fun(Conn) -> - case emqx_authn_jwks_client:get_jwks(Conn) of - {ok, _} -> true; - _ -> false - end - end, - case emqx_plugin_libs_pool:health_check_ecpool_workers(PoolName, Func) of + case emqx_resource_pool:health_check_workers(PoolName, fun health_check/1) of true -> connected; false -> disconnected end. +health_check(Conn) -> + case emqx_authn_jwks_client:get_jwks(Conn) of + {ok, _} -> true; + _ -> false + end. + connect(Opts) -> ConnectorOpts = proplists:get_value(connector_opts, Opts), emqx_authn_jwks_client:start_link(ConnectorOpts). diff --git a/apps/emqx_connector/src/emqx_connector_http.erl b/apps/emqx_connector/src/emqx_connector_http.erl index ef2e11eb7..411df7899 100644 --- a/apps/emqx_connector/src/emqx_connector_http.erl +++ b/apps/emqx_connector/src/emqx_connector_http.erl @@ -231,9 +231,8 @@ on_start( {transport_opts, NTransportOpts}, {enable_pipelining, maps:get(enable_pipelining, Config, ?DEFAULT_PIPELINE_SIZE)} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), State = #{ - pool_name => PoolName, + pool_name => InstId, pool_type => PoolType, host => Host, port => Port, @@ -241,7 +240,7 @@ on_start( base_path => BasePath, request => preprocess_request(maps:get(request, Config, undefined)) }, - case ehttpc_sup:start_pool(PoolName, PoolOpts) of + case ehttpc_sup:start_pool(InstId, PoolOpts) of {ok, _} -> {ok, State}; {error, {already_started, _}} -> {ok, State}; {error, Reason} -> {error, Reason} diff --git a/apps/emqx_connector/src/emqx_connector_ldap.erl b/apps/emqx_connector/src/emqx_connector_ldap.erl index ac2af301e..e2121de22 100644 --- a/apps/emqx_connector/src/emqx_connector_ldap.erl +++ b/apps/emqx_connector/src/emqx_connector_ldap.erl @@ -87,20 +87,19 @@ on_start( {pool_size, PoolSize}, {auto_reconnect, ?AUTO_RECONNECT_INTERVAL} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ SslOpts) of - ok -> {ok, #{poolname => PoolName}}; + case emqx_resource_pool:start(InstId, ?MODULE, Opts ++ SslOpts) of + ok -> {ok, #{pool_name => InstId}}; {error, Reason} -> {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_ldap_connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). -on_query(InstId, {search, Base, Filter, Attributes}, #{poolname := PoolName} = State) -> +on_query(InstId, {search, Base, Filter, Attributes}, #{pool_name := PoolName} = State) -> Request = {Base, Filter, Attributes}, ?TRACE( "QUERY", diff --git a/apps/emqx_connector/src/emqx_connector_mongo.erl b/apps/emqx_connector/src/emqx_connector_mongo.erl index a5873bcf6..a65a32842 100644 --- a/apps/emqx_connector/src/emqx_connector_mongo.erl +++ b/apps/emqx_connector/src/emqx_connector_mongo.erl @@ -182,12 +182,11 @@ on_start( {options, init_topology_options(maps:to_list(Topology), [])}, {worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), Collection = maps:get(collection, Config, <<"mqtt">>), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts) of + case emqx_resource_pool:start(InstId, ?MODULE, Opts) of ok -> {ok, #{ - poolname => PoolName, + pool_name => InstId, type => Type, collection => Collection }}; @@ -195,17 +194,17 @@ on_start( {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_mongodb_connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query( InstId, {send_message, Document}, - #{poolname := PoolName, collection := Collection} = State + #{pool_name := PoolName, collection := Collection} = State ) -> Request = {insert, Collection, Document}, ?TRACE( @@ -234,7 +233,7 @@ on_query( on_query( InstId, {Action, Collection, Filter, Projector}, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> Request = {Action, Collection, Filter, Projector}, ?TRACE( @@ -263,8 +262,7 @@ on_query( {ok, Result} end. --dialyzer({nowarn_function, [on_get_status/2]}). -on_get_status(InstId, #{poolname := PoolName} = _State) -> +on_get_status(InstId, #{pool_name := PoolName}) -> case health_check(PoolName) of true -> ?tp(debug, emqx_connector_mongo_health_check, #{ @@ -281,8 +279,10 @@ on_get_status(InstId, #{poolname := PoolName} = _State) -> end. health_check(PoolName) -> - emqx_plugin_libs_pool:health_check_ecpool_workers( - PoolName, fun ?MODULE:check_worker_health/1, ?HEALTH_CHECK_TIMEOUT + timer:seconds(1) + emqx_resource_pool:health_check_workers( + PoolName, + fun ?MODULE:check_worker_health/1, + ?HEALTH_CHECK_TIMEOUT + timer:seconds(1) ). %% =================================================================== diff --git a/apps/emqx_connector/src/emqx_connector_mysql.erl b/apps/emqx_connector/src/emqx_connector_mysql.erl index 6600a5f77..45d459e70 100644 --- a/apps/emqx_connector/src/emqx_connector_mysql.erl +++ b/apps/emqx_connector/src/emqx_connector_mysql.erl @@ -51,7 +51,7 @@ -type sqls() :: #{atom() => binary()}. -type state() :: #{ - poolname := atom(), + pool_name := binary(), prepare_statement := prepares(), params_tokens := params_tokens(), batch_inserts := sqls(), @@ -123,13 +123,10 @@ on_start( {pool_size, PoolSize} ] ), - - PoolName = emqx_plugin_libs_pool:pool_name(InstId), - Prepares = parse_prepare_sql(Config), - State = maps:merge(#{poolname => PoolName}, Prepares), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of + State = parse_prepare_sql(Config), + case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of ok -> - {ok, init_prepare(State)}; + {ok, init_prepare(State#{pool_name => InstId})}; {error, Reason} -> ?tp( mysql_connector_start_failed, @@ -143,12 +140,12 @@ maybe_add_password_opt(undefined, Options) -> maybe_add_password_opt(Password, Options) -> [{password, Password} | Options]. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_mysql_connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query(InstId, {TypeOrKey, SQLOrKey}, State) -> on_query(InstId, {TypeOrKey, SQLOrKey, [], default_timeout}, State); @@ -157,7 +154,7 @@ on_query(InstId, {TypeOrKey, SQLOrKey, Params}, State) -> on_query( InstId, {TypeOrKey, SQLOrKey, Params, Timeout}, - #{poolname := PoolName, prepare_statement := Prepares} = State + #{pool_name := PoolName, prepare_statement := Prepares} = State ) -> MySqlFunction = mysql_function(TypeOrKey), {SQLOrKey2, Data} = proc_sql_params(TypeOrKey, SQLOrKey, Params, State), @@ -216,8 +213,8 @@ mysql_function(prepared_query) -> mysql_function(_) -> mysql_function(prepared_query). -on_get_status(_InstId, #{poolname := Pool} = State) -> - case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of +on_get_status(_InstId, #{pool_name := PoolName} = State) -> + case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of true -> case do_check_prepares(State) of ok -> @@ -238,7 +235,7 @@ do_get_status(Conn) -> do_check_prepares(#{prepare_statement := Prepares}) when is_map(Prepares) -> ok; -do_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, Prepares}}) -> +do_check_prepares(State = #{pool_name := PoolName, prepare_statement := {error, Prepares}}) -> %% retry to prepare case prepare_sql(Prepares, PoolName) of ok -> @@ -253,7 +250,7 @@ do_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, P connect(Options) -> mysql:start_link(Options). -init_prepare(State = #{prepare_statement := Prepares, poolname := PoolName}) -> +init_prepare(State = #{prepare_statement := Prepares, pool_name := PoolName}) -> case maps:size(Prepares) of 0 -> State; @@ -409,7 +406,7 @@ on_sql_query( SQLOrKey, Params, Timeout, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> LogMeta = #{connector => InstId, sql => SQLOrKey, state => State}, ?TRACE("QUERY", "mysql_connector_received", LogMeta), diff --git a/apps/emqx_connector/src/emqx_connector_pgsql.erl b/apps/emqx_connector/src/emqx_connector_pgsql.erl index 8796e00a5..ddbf9491d 100644 --- a/apps/emqx_connector/src/emqx_connector_pgsql.erl +++ b/apps/emqx_connector/src/emqx_connector_pgsql.erl @@ -56,7 +56,7 @@ -type state() :: #{ - poolname := atom(), + pool_name := binary(), prepare_sql := prepares(), params_tokens := params_tokens(), prepare_statement := epgsql:statement() @@ -120,13 +120,10 @@ on_start( {auto_reconnect, ?AUTO_RECONNECT_INTERVAL}, {pool_size, PoolSize} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), - Prepares = parse_prepare_sql(Config), - InitState = #{poolname => PoolName, prepare_statement => #{}}, - State = maps:merge(InitState, Prepares), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of + State = parse_prepare_sql(Config), + case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of ok -> - {ok, init_prepare(State)}; + {ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})}; {error, Reason} -> ?tp( pgsql_connector_start_failed, @@ -135,19 +132,19 @@ on_start( {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping postgresql connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). -on_query(InstId, {TypeOrKey, NameOrSQL}, #{poolname := _PoolName} = State) -> +on_query(InstId, {TypeOrKey, NameOrSQL}, State) -> on_query(InstId, {TypeOrKey, NameOrSQL, []}, State); on_query( InstId, {TypeOrKey, NameOrSQL, Params}, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> ?SLOG(debug, #{ msg => "postgresql connector received sql query", @@ -174,7 +171,7 @@ pgsql_query_type(_) -> on_batch_query( InstId, BatchReq, - #{poolname := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State + #{pool_name := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State ) -> case BatchReq of [{Key, _} = Request | _] -> @@ -258,8 +255,8 @@ on_sql_query(InstId, PoolName, Type, NameOrSQL, Data) -> {error, {unrecoverable_error, invalid_request}} end. -on_get_status(_InstId, #{poolname := Pool} = State) -> - case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of +on_get_status(_InstId, #{pool_name := PoolName} = State) -> + case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of true -> case do_check_prepares(State) of ok -> @@ -280,7 +277,7 @@ do_get_status(Conn) -> do_check_prepares(#{prepare_sql := Prepares}) when is_map(Prepares) -> ok; -do_check_prepares(State = #{poolname := PoolName, prepare_sql := {error, Prepares}}) -> +do_check_prepares(State = #{pool_name := PoolName, prepare_sql := {error, Prepares}}) -> %% retry to prepare case prepare_sql(Prepares, PoolName) of {ok, Sts} -> @@ -358,7 +355,7 @@ parse_prepare_sql([], Prepares, Tokens) -> params_tokens => Tokens }. -init_prepare(State = #{prepare_sql := Prepares, poolname := PoolName}) -> +init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName}) -> case maps:size(Prepares) of 0 -> State; @@ -389,17 +386,17 @@ prepare_sql(Prepares, PoolName) -> end. do_prepare_sql(Prepares, PoolName) -> - do_prepare_sql(ecpool:workers(PoolName), Prepares, PoolName, #{}). + do_prepare_sql(ecpool:workers(PoolName), Prepares, #{}). -do_prepare_sql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) -> +do_prepare_sql([{_Name, Worker} | T], Prepares, _LastSts) -> {ok, Conn} = ecpool_worker:client(Worker), case prepare_sql_to_conn(Conn, Prepares) of {ok, Sts} -> - do_prepare_sql(T, Prepares, PoolName, Sts); + do_prepare_sql(T, Prepares, Sts); Error -> Error end; -do_prepare_sql([], _Prepares, _PoolName, LastSts) -> +do_prepare_sql([], _Prepares, LastSts) -> {ok, LastSts}. prepare_sql_to_conn(Conn, Prepares) -> diff --git a/apps/emqx_connector/src/emqx_connector_redis.erl b/apps/emqx_connector/src/emqx_connector_redis.erl index 4ef778e6b..e2155eb49 100644 --- a/apps/emqx_connector/src/emqx_connector_redis.erl +++ b/apps/emqx_connector/src/emqx_connector_redis.erl @@ -153,11 +153,10 @@ on_start( false -> [{ssl, false}] end ++ [{sentinel, maps:get(sentinel, Config, undefined)}], - PoolName = InstId, - State = #{poolname => PoolName, type => Type}, + State = #{pool_name => InstId, type => Type}, case Type of cluster -> - case eredis_cluster:start_pool(PoolName, Opts ++ [{options, Options}]) of + case eredis_cluster:start_pool(InstId, Opts ++ [{options, Options}]) of {ok, _} -> {ok, State}; {ok, _, _} -> @@ -166,22 +165,20 @@ on_start( {error, Reason} end; _ -> - case - emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ [{options, Options}]) - of + case emqx_resource_pool:start(InstId, ?MODULE, Opts ++ [{options, Options}]) of ok -> {ok, State}; {error, Reason} -> {error, Reason} end end. -on_stop(InstId, #{poolname := PoolName, type := Type}) -> +on_stop(InstId, #{pool_name := PoolName, type := Type}) -> ?SLOG(info, #{ msg => "stopping_redis_connector", connector => InstId }), case Type of cluster -> eredis_cluster:stop_pool(PoolName); - _ -> emqx_plugin_libs_pool:stop_pool(PoolName) + _ -> emqx_resource_pool:stop(PoolName) end. on_query(InstId, {cmd, _} = Query, State) -> @@ -189,7 +186,7 @@ on_query(InstId, {cmd, _} = Query, State) -> on_query(InstId, {cmds, _} = Query, State) -> do_query(InstId, Query, State). -do_query(InstId, Query, #{poolname := PoolName, type := Type} = State) -> +do_query(InstId, Query, #{pool_name := PoolName, type := Type} = State) -> ?TRACE( "QUERY", "redis_connector_received", @@ -227,7 +224,7 @@ is_unrecoverable_error({error, invalid_cluster_command}) -> is_unrecoverable_error(_) -> false. -on_get_status(_InstId, #{type := cluster, poolname := PoolName}) -> +on_get_status(_InstId, #{type := cluster, pool_name := PoolName}) -> case eredis_cluster:pool_exists(PoolName) of true -> Health = eredis_cluster:ping_all(PoolName), @@ -235,8 +232,8 @@ on_get_status(_InstId, #{type := cluster, poolname := PoolName}) -> false -> disconnected end; -on_get_status(_InstId, #{poolname := Pool}) -> - Health = emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1), +on_get_status(_InstId, #{pool_name := PoolName}) -> + Health = emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1), status_result(Health). do_get_status(Conn) -> diff --git a/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl b/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl index 2be30466c..9067c85de 100644 --- a/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl @@ -64,15 +64,15 @@ t_lifecycle(_Config) -> mongo_config() ). -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceId, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?MONGO_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?MONGO_RESOURCE_MOD, CheckedConfig, @@ -84,39 +84,39 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % % Perform query as further check that the resource is working as expected - ?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())), - ?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertMatch({ok, []}, emqx_resource:query(ResourceId, test_query_find())), + ?assertMatch({ok, undefined}, emqx_resource:query(ResourceId, test_query_find_one())), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())), - ?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertMatch({ok, []}, emqx_resource:query(ResourceId, test_query_find())), + ?assertMatch({ok, undefined}, emqx_resource:query(ResourceId, test_query_find_one())), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers diff --git a/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl b/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl index dc5826766..a0455c92c 100644 --- a/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl @@ -64,14 +64,14 @@ t_lifecycle(_Config) -> mysql_config() ). -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceId, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?MYSQL_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?MYSQL_RESOURCE_MOD, CheckedConfig, @@ -83,53 +83,53 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % % Perform query as further check that the resource is working as expected - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_with_params())), ?assertMatch( {ok, _, [[1]]}, emqx_resource:query( - PoolName, + ResourceId, test_query_with_params_and_timeout() ) ), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_with_params())), ?assertMatch( {ok, _, [[1]]}, emqx_resource:query( - PoolName, + ResourceId, test_query_with_params_and_timeout() ) ), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers diff --git a/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl b/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl index 2f77ca38d..a4ac4f932 100644 --- a/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl @@ -64,15 +64,15 @@ t_lifecycle(_Config) -> pgsql_config() ). -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceId, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?PGSQL_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?PGSQL_RESOURCE_MOD, CheckedConfig, @@ -84,39 +84,39 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % % Perform query as further check that the resource is working as expected - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_with_params())), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_with_params())), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers diff --git a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl index 3a134ad35..e6df4f711 100644 --- a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl @@ -102,14 +102,14 @@ t_sentinel_lifecycle(_Config) -> [<<"PING">>] ). -perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) -> +perform_lifecycle_check(ResourceId, InitialConfig, RedisCommand) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?REDIS_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?REDIS_RESOURCE_MOD, CheckedConfig, @@ -121,49 +121,49 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % Perform query as further check that the resource is working as expected - ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})), + ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(ResourceId, {cmd, RedisCommand})), ?assertEqual( {ok, [{ok, <<"PONG">>}, {ok, <<"PONG">>}]}, - emqx_resource:query(PoolName, {cmds, [RedisCommand, RedisCommand]}) + emqx_resource:query(ResourceId, {cmds, [RedisCommand, RedisCommand]}) ), ?assertMatch( {error, {unrecoverable_error, [{ok, <<"PONG">>}, {error, _}]}}, emqx_resource:query( - PoolName, + ResourceId, {cmds, [RedisCommand, [<<"INVALID_COMMAND">>]]}, #{timeout => 500} ) ), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(ResourceId, {cmd, RedisCommand})), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl b/apps/emqx_resource/src/emqx_resource_pool.erl similarity index 82% rename from apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl rename to apps/emqx_resource/src/emqx_resource_pool.erl index a3048e5dd..913b29c86 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl +++ b/apps/emqx_resource/src/emqx_resource_pool.erl @@ -14,31 +14,27 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_plugin_libs_pool). +-module(emqx_resource_pool). -export([ - start_pool/3, - stop_pool/1, - pool_name/1, - health_check_ecpool_workers/2, - health_check_ecpool_workers/3 + start/3, + stop/1, + health_check_workers/2, + health_check_workers/3 ]). -include_lib("emqx/include/logger.hrl"). -define(HEALTH_CHECK_TIMEOUT, 15000). -pool_name(ID) when is_binary(ID) -> - list_to_atom(binary_to_list(ID)). - -start_pool(Name, Mod, Options) -> +start(Name, Mod, Options) -> case ecpool:start_sup_pool(Name, Mod, Options) of {ok, _} -> ?SLOG(info, #{msg => "start_ecpool_ok", pool_name => Name}), ok; {error, {already_started, _Pid}} -> - stop_pool(Name), - start_pool(Name, Mod, Options); + stop(Name), + start(Name, Mod, Options); {error, Reason} -> NReason = parse_reason(Reason), ?SLOG(error, #{ @@ -49,7 +45,7 @@ start_pool(Name, Mod, Options) -> {error, {start_pool_failed, Name, NReason}} end. -stop_pool(Name) -> +stop(Name) -> case ecpool:stop_sup_pool(Name) of ok -> ?SLOG(info, #{msg => "stop_ecpool_ok", pool_name => Name}); @@ -64,10 +60,10 @@ stop_pool(Name) -> error({stop_pool_failed, Name, Reason}) end. -health_check_ecpool_workers(PoolName, CheckFunc) -> - health_check_ecpool_workers(PoolName, CheckFunc, ?HEALTH_CHECK_TIMEOUT). +health_check_workers(PoolName, CheckFunc) -> + health_check_workers(PoolName, CheckFunc, ?HEALTH_CHECK_TIMEOUT). -health_check_ecpool_workers(PoolName, CheckFunc, Timeout) -> +health_check_workers(PoolName, CheckFunc, Timeout) -> Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)], DoPerWorker = fun(Worker) -> diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl index a785924d4..d2eb9ee73 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl @@ -917,7 +917,7 @@ t_invalid_private_key(Config) -> #{<<"private_key">> => InvalidPrivateKeyPEM} } ), - #{?snk_kind := gcp_pubsub_bridge_jwt_worker_failed_to_start}, + #{?snk_kind := "gcp_pubsub_bridge_jwt_worker_failed_to_start"}, 20_000 ), Res @@ -928,7 +928,7 @@ t_invalid_private_key(Config) -> [#{reason := Reason}] when Reason =:= noproc orelse Reason =:= {shutdown, {error, empty_key}}, - ?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace) + ?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace) ), ?assertMatch( [#{error := empty_key}], @@ -956,14 +956,14 @@ t_jwt_worker_start_timeout(Config) -> #{<<"private_key">> => InvalidPrivateKeyPEM} } ), - #{?snk_kind := gcp_pubsub_bridge_jwt_timeout}, + #{?snk_kind := "gcp_pubsub_bridge_jwt_timeout"}, 20_000 ), Res end, fun(Res, Trace) -> ?assertMatch({ok, _}, Res), - ?assertMatch([_], ?of_kind(gcp_pubsub_bridge_jwt_timeout, Trace)), + ?assertMatch([_], ?of_kind("gcp_pubsub_bridge_jwt_timeout", Trace)), ok end ), @@ -1329,7 +1329,7 @@ t_failed_to_start_jwt_worker(Config) -> fun(Trace) -> ?assertMatch( [#{reason := {error, restarting}}], - ?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace) + ?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace) ), ok end diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl index be07a2bb7..e497e0a47 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl @@ -265,7 +265,7 @@ unprepare(Config, Key) -> Name = ?config(mysql_name, Config), BridgeType = ?config(mysql_bridge_type, Config), ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - {ok, _, #{state := #{poolname := PoolName}}} = emqx_resource:get_instance(ResourceID), + {ok, _, #{state := #{pool_name := PoolName}}} = emqx_resource:get_instance(ResourceID), [ begin {ok, Conn} = ecpool_worker:client(Worker), diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_cassa.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_cassa.erl index 86b908038..397532f47 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_cassa.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_cassa.erl @@ -44,7 +44,7 @@ -type state() :: #{ - poolname := atom(), + pool_name := binary(), prepare_cql := prepares(), params_tokens := params_tokens(), %% returned by ecql:prepare/2 @@ -124,14 +124,10 @@ on_start( false -> [] end, - %% use InstaId of binary type as Pool name, which is supported in ecpool. - PoolName = InstId, - Prepares = parse_prepare_cql(Config), - InitState = #{poolname => PoolName, prepare_statement => #{}}, - State = maps:merge(InitState, Prepares), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of + State = parse_prepare_cql(Config), + case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of ok -> - {ok, init_prepare(State)}; + {ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})}; {error, Reason} -> ?tp( cassandra_connector_start_failed, @@ -140,12 +136,12 @@ on_start( {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_cassandra_connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). -type request() :: % emqx_bridge.erl @@ -184,7 +180,7 @@ do_single_query( InstId, Request, Async, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> {Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request), ?tp( @@ -232,7 +228,7 @@ do_batch_query( InstId, Requests, Async, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> CQLs = lists:map( @@ -305,8 +301,8 @@ exec_cql_batch_query(InstId, PoolName, Async, CQLs) -> Result end. -on_get_status(_InstId, #{poolname := Pool} = State) -> - case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of +on_get_status(_InstId, #{pool_name := PoolName} = State) -> + case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of true -> case do_check_prepares(State) of ok -> @@ -327,7 +323,7 @@ do_get_status(Conn) -> do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) -> ok; -do_check_prepares(State = #{poolname := PoolName, prepare_cql := {error, Prepares}}) -> +do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepares}}) -> %% retry to prepare case prepare_cql(Prepares, PoolName) of {ok, Sts} -> @@ -397,7 +393,7 @@ parse_prepare_cql([], Prepares, Tokens) -> params_tokens => Tokens }. -init_prepare(State = #{prepare_cql := Prepares, poolname := PoolName}) -> +init_prepare(State = #{prepare_cql := Prepares, pool_name := PoolName}) -> case maps:size(Prepares) of 0 -> State; @@ -429,17 +425,17 @@ prepare_cql(Prepares, PoolName) -> end. do_prepare_cql(Prepares, PoolName) -> - do_prepare_cql(ecpool:workers(PoolName), Prepares, PoolName, #{}). + do_prepare_cql(ecpool:workers(PoolName), Prepares, #{}). -do_prepare_cql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) -> +do_prepare_cql([{_Name, Worker} | T], Prepares, _LastSts) -> {ok, Conn} = ecpool_worker:client(Worker), case prepare_cql_to_conn(Conn, Prepares) of {ok, Sts} -> - do_prepare_cql(T, Prepares, PoolName, Sts); + do_prepare_cql(T, Prepares, Sts); Error -> Error end; -do_prepare_cql([], _Prepares, _PoolName, LastSts) -> +do_prepare_cql([], _Prepares, LastSts) -> {ok, LastSts}. prepare_cql_to_conn(Conn, Prepares) -> diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_clickhouse.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_clickhouse.erl index 2872e8cf0..a7afcd6d5 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_clickhouse.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_clickhouse.erl @@ -62,7 +62,8 @@ -type state() :: #{ templates := templates(), - poolname := atom() + pool_name := binary(), + connect_timeout := pos_integer() }. -type clickhouse_config() :: map(). @@ -141,7 +142,6 @@ on_start( connector => InstanceID, config => emqx_utils:redact(Config) }), - PoolName = emqx_plugin_libs_pool:pool_name(InstanceID), Options = [ {url, URL}, {user, maps:get(username, Config, "default")}, @@ -149,46 +149,43 @@ on_start( {database, DB}, {auto_reconnect, ?AUTO_RECONNECT_INTERVAL}, {pool_size, PoolSize}, - {pool, PoolName} + {pool, InstanceID} ], - InitState = #{ - poolname => PoolName, - connect_timeout => ConnectTimeout - }, try Templates = prepare_sql_templates(Config), - State = maps:merge(InitState, #{templates => Templates}), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options) of + State = #{ + pool_name => InstanceID, + templates => Templates, + connect_timeout => ConnectTimeout + }, + case emqx_resource_pool:start(InstanceID, ?MODULE, Options) of ok -> {ok, State}; {error, Reason} -> - log_start_error(Config, Reason, none), + ?tp( + info, + "clickhouse_connector_start_failed", + #{ + error => Reason, + config => emqx_utils:redact(Config) + } + ), {error, Reason} end catch _:CatchReason:Stacktrace -> - log_start_error(Config, CatchReason, Stacktrace), + ?tp( + info, + "clickhouse_connector_start_failed", + #{ + error => CatchReason, + stacktrace => Stacktrace, + config => emqx_utils:redact(Config) + } + ), {error, CatchReason} end. -log_start_error(Config, Reason, Stacktrace) -> - StacktraceMap = - case Stacktrace of - none -> #{}; - _ -> #{stacktrace => Stacktrace} - end, - LogMessage = - #{ - msg => "clickhouse_connector_start_failed", - error_reason => Reason, - config => emqx_utils:redact(Config) - }, - ?SLOG(info, maps:merge(LogMessage, StacktraceMap)), - ?tp( - clickhouse_connector_start_failed, - #{error => Reason} - ). - %% Helper functions to prepare SQL tempaltes prepare_sql_templates(#{ @@ -240,7 +237,7 @@ split_clickhouse_insert_sql(SQL) -> end. % This is a callback for ecpool which is triggered by the call to -% emqx_plugin_libs_pool:start_pool in on_start/2 +% emqx_resource_pool:start in on_start/2 connect(Options) -> URL = iolist_to_binary(emqx_http_lib:normalize(proplists:get_value(url, Options))), @@ -277,23 +274,20 @@ connect(Options) -> -spec on_stop(resource_id(), resource_state()) -> term(). -on_stop(ResourceID, #{poolname := PoolName}) -> +on_stop(InstanceID, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping clickouse connector", - connector => ResourceID + connector => InstanceID }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). %% ------------------------------------------------------------------- %% on_get_status emqx_resouce callback and related functions %% ------------------------------------------------------------------- on_get_status( - _InstId, - #{ - poolname := PoolName, - connect_timeout := Timeout - } = State + _InstanceID, + #{pool_name := PoolName, connect_timeout := Timeout} = State ) -> case do_get_status(PoolName, Timeout) of ok -> @@ -352,7 +346,7 @@ do_get_status(PoolName, Timeout) -> on_query( ResourceID, {RequestType, DataOrSQL}, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> ?SLOG(debug, #{ msg => "clickhouse connector received sql query", @@ -391,16 +385,11 @@ query_type(send_message) -> on_batch_query( ResourceID, BatchReq, - State + #{pool_name := PoolName, templates := Templates} = _State ) -> %% Currently we only support batch requests with the send_message key {Keys, ObjectsToInsert} = lists:unzip(BatchReq), ensure_keys_are_of_type_send_message(Keys), - %% Pick out the SQL template - #{ - templates := Templates, - poolname := PoolName - } = State, %% Create batch insert SQL statement SQL = objects_to_sql(ObjectsToInsert, Templates), %% Do the actual query in the database diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl index 85daefbb7..1d273cdd7 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl @@ -114,23 +114,23 @@ on_start( Templates = parse_template(Config), State = #{ - poolname => InstanceId, + pool_name => InstanceId, database => Database, templates => Templates }, - case emqx_plugin_libs_pool:start_pool(InstanceId, ?MODULE, Options) of + case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of ok -> {ok, State}; Error -> Error end. -on_stop(InstanceId, #{poolname := PoolName} = _State) -> +on_stop(InstanceId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_dynamo_connector", connector => InstanceId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query(InstanceId, Query, State) -> do_query(InstanceId, Query, handover, State). @@ -160,8 +160,8 @@ on_batch_query_async(InstanceId, [{send_message, _} | _] = Query, Reply, State) on_batch_query_async(_InstanceId, Query, _Reply, _State) -> {error, {unrecoverable_error, {invalid_request, Query}}}. -on_get_status(_InstanceId, #{poolname := Pool}) -> - Health = emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1), +on_get_status(_InstanceId, #{pool_name := PoolName}) -> + Health = emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1), status_result(Health). do_get_status(_Conn) -> @@ -183,7 +183,7 @@ do_query( InstanceId, Query, ApplyMode, - #{poolname := PoolName, templates := Templates, database := Database} = State + #{pool_name := PoolName, templates := Templates, database := Database} = State ) -> ?TRACE( "QUERY", diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl index 2295f63ab..7b068ec8f 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl @@ -26,7 +26,6 @@ ]). -export([reply_delegator/3]). --type bridge_id() :: binary(). -type jwt_worker() :: binary(). -type service_account_json() :: emqx_ee_bridge_gcp_pubsub:service_account_json(). -type config() :: #{ @@ -43,7 +42,7 @@ jwt_worker_id := jwt_worker(), max_retries := non_neg_integer(), payload_template := emqx_plugin_libs_rule:tmpl_token(), - pool_name := atom(), + pool_name := binary(), project_id := binary(), pubsub_topic := binary(), request_timeout := timer:time() @@ -102,14 +101,13 @@ on_start( jwt_worker_id := JWTWorkerId, project_id := ProjectId } = ensure_jwt_worker(InstanceId, Config), - PoolName = emqx_plugin_libs_pool:pool_name(InstanceId), State = #{ connect_timeout => ConnectTimeout, instance_id => InstanceId, jwt_worker_id => JWTWorkerId, max_retries => MaxRetries, payload_template => emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate), - pool_name => PoolName, + pool_name => InstanceId, project_id => ProjectId, pubsub_topic => PubSubTopic, request_timeout => RequestTimeout @@ -118,20 +116,20 @@ on_start( gcp_pubsub_on_start_before_starting_pool, #{ instance_id => InstanceId, - pool_name => PoolName, + pool_name => InstanceId, pool_opts => PoolOpts } ), - ?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => PoolName}), - case ehttpc_sup:start_pool(PoolName, PoolOpts) of + ?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => InstanceId}), + case ehttpc_sup:start_pool(InstanceId, PoolOpts) of {ok, _} -> {ok, State}; {error, {already_started, _}} -> - ?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => PoolName}), + ?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => InstanceId}), {ok, State}; {error, Reason} -> ?tp(gcp_pubsub_ehttpc_pool_start_failure, #{ - pool_name => PoolName, + pool_name => InstanceId, reason => Reason }), {error, Reason} @@ -140,10 +138,7 @@ on_start( -spec on_stop(manager_id(), state()) -> ok | {error, term()}. on_stop( InstanceId, - _State = #{ - jwt_worker_id := JWTWorkerId, - pool_name := PoolName - } + _State = #{jwt_worker_id := JWTWorkerId, pool_name := PoolName} ) -> ?tp(gcp_pubsub_stop, #{instance_id => InstanceId, jwt_worker_id => JWTWorkerId}), ?SLOG(info, #{ @@ -155,7 +150,7 @@ on_stop( ehttpc_sup:stop_pool(PoolName). -spec on_query( - bridge_id(), + resource_id(), {send_message, map()}, state() ) -> @@ -163,32 +158,32 @@ on_stop( | {ok, status_code(), headers(), body()} | {error, {recoverable_error, term()}} | {error, term()}. -on_query(BridgeId, {send_message, Selected}, State) -> +on_query(ResourceId, {send_message, Selected}, State) -> Requests = [{send_message, Selected}], ?TRACE( "QUERY_SYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_sync(State, Requests, BridgeId). + do_send_requests_sync(State, Requests, ResourceId). -spec on_query_async( - bridge_id(), + resource_id(), {send_message, map()}, {ReplyFun :: function(), Args :: list()}, state() ) -> {ok, pid()}. -on_query_async(BridgeId, {send_message, Selected}, ReplyFunAndArgs, State) -> +on_query_async(ResourceId, {send_message, Selected}, ReplyFunAndArgs, State) -> Requests = [{send_message, Selected}], ?TRACE( "QUERY_ASYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_async(State, Requests, ReplyFunAndArgs, BridgeId). + do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId). -spec on_batch_query( - bridge_id(), + resource_id(), [{send_message, map()}], state() ) -> @@ -196,34 +191,30 @@ on_query_async(BridgeId, {send_message, Selected}, ReplyFunAndArgs, State) -> | {ok, status_code(), headers(), body()} | {error, {recoverable_error, term()}} | {error, term()}. -on_batch_query(BridgeId, Requests, State) -> +on_batch_query(ResourceId, Requests, State) -> ?TRACE( "QUERY_SYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_sync(State, Requests, BridgeId). + do_send_requests_sync(State, Requests, ResourceId). -spec on_batch_query_async( - bridge_id(), + resource_id(), [{send_message, map()}], {ReplyFun :: function(), Args :: list()}, state() ) -> {ok, pid()}. -on_batch_query_async(BridgeId, Requests, ReplyFunAndArgs, State) -> +on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) -> ?TRACE( "QUERY_ASYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_async(State, Requests, ReplyFunAndArgs, BridgeId). + do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId). -spec on_get_status(manager_id(), state()) -> connected | disconnected. -on_get_status(InstanceId, State) -> - #{ - connect_timeout := Timeout, - pool_name := PoolName - } = State, +on_get_status(InstanceId, #{connect_timeout := Timeout, pool_name := PoolName} = State) -> case do_get_status(InstanceId, PoolName, Timeout) of true -> connected; @@ -245,8 +236,7 @@ on_get_status(InstanceId, State) -> project_id := binary() }. ensure_jwt_worker(InstanceId, #{ - service_account_json := ServiceAccountJSON, - pubsub_topic := PubSubTopic + service_account_json := ServiceAccountJSON }) -> #{ project_id := ProjectId, @@ -276,14 +266,8 @@ ensure_jwt_worker(InstanceId, #{ {ok, Worker0} -> Worker0; Error -> - ?tp( - gcp_pubsub_bridge_jwt_worker_failed_to_start, - #{instance_id => InstanceId, reason => Error} - ), - ?SLOG(error, #{ - msg => "failed_to_start_gcp_pubsub_jwt_worker", - instance_id => InstanceId, - pubsub_topic => PubSubTopic, + ?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{ + connector => InstanceId, reason => Error }), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), @@ -301,26 +285,14 @@ ensure_jwt_worker(InstanceId, #{ demonitor(MRef, [flush]), ok; {'DOWN', MRef, process, Worker, Reason} -> - ?tp( - gcp_pubsub_bridge_jwt_worker_failed_to_start, - #{ - resource_id => InstanceId, - reason => Reason - } - ), - ?SLOG(error, #{ - msg => "gcp_pubsub_bridge_jwt_worker_failed_to_start", + ?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{ connector => InstanceId, reason => Reason }), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), throw(failed_to_start_jwt_worker) after 10_000 -> - ?tp(gcp_pubsub_bridge_jwt_timeout, #{resource_id => InstanceId}), - ?SLOG(warning, #{ - msg => "gcp_pubsub_bridge_jwt_timeout", - connector => InstanceId - }), + ?tp(warning, "gcp_pubsub_bridge_jwt_timeout", #{connector => InstanceId}), demonitor(MRef, [flush]), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), throw(timeout_creating_jwt) @@ -569,7 +541,7 @@ reply_delegator(_ResourceId, ReplyFunAndArgs, Result) -> emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result) end. --spec do_get_status(manager_id(), atom(), timer:time()) -> boolean(). +-spec do_get_status(manager_id(), binary(), timer:time()) -> boolean(). do_get_status(InstanceId, PoolName, Timeout) -> Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(PoolName)], DoPerWorker = diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl index f11441a3b..70bd76d14 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl @@ -126,7 +126,7 @@ %% -type size() :: integer(). -type state() :: #{ - poolname := binary(), + pool_name := binary(), resource_opts := map(), sql_templates := map() }. @@ -208,17 +208,16 @@ on_start( {password, Password}, {driver, Driver}, {database, Database}, - {pool_size, PoolSize}, - {poolname, PoolName} + {pool_size, PoolSize} ], State = #{ %% also InstanceId - poolname => PoolName, + pool_name => PoolName, sql_templates => parse_sql_template(Config), resource_opts => ResourceOpts }, - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options) of + case emqx_resource_pool:start(PoolName, ?MODULE, Options) of ok -> {ok, State}; {error, Reason} -> @@ -229,12 +228,12 @@ on_start( {error, Reason} end. -on_stop(InstanceId, #{poolname := PoolName} = _State) -> +on_stop(InstanceId, #{pool_name := PoolName} = _State) -> ?SLOG(info, #{ msg => "stopping_sqlserver_connector", connector => InstanceId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). -spec on_query( manager_id(), @@ -265,7 +264,6 @@ on_query_async( InstanceId, {?ACTION_SEND_MESSAGE, _Msg} = Query, ReplyFunAndArgs, - %% #{poolname := PoolName, sql_templates := Templates} = State State ) -> ?TRACE( @@ -306,10 +304,12 @@ on_batch_query_async(InstanceId, Requests, ReplyFunAndArgs, State) -> ), do_query(InstanceId, Requests, ?ASYNC_QUERY_MODE(ReplyFunAndArgs), State). -on_get_status(_InstanceId, #{poolname := Pool, resource_opts := ResourceOpts} = _State) -> +on_get_status(_InstanceId, #{pool_name := PoolName, resource_opts := ResourceOpts} = _State) -> RequestTimeout = ?REQUEST_TIMEOUT(ResourceOpts), - Health = emqx_plugin_libs_pool:health_check_ecpool_workers( - Pool, {?MODULE, do_get_status, [RequestTimeout]}, RequestTimeout + Health = emqx_resource_pool:health_check_workers( + PoolName, + {?MODULE, do_get_status, [RequestTimeout]}, + RequestTimeout ), status_result(Health). @@ -382,7 +382,7 @@ do_query( InstanceId, Query, ApplyMode, - #{poolname := PoolName, sql_templates := Templates} = State + #{pool_name := PoolName, sql_templates := Templates} = State ) -> ?TRACE( "SINGLE_QUERY_SYNC", @@ -425,7 +425,7 @@ do_query( end. worker_do_insert( - Conn, SQL, #{resource_opts := ResourceOpts, poolname := InstanceId} = State + Conn, SQL, #{resource_opts := ResourceOpts, pool_name := InstanceId} = State ) -> LogMeta = #{connector => InstanceId, state => State}, try diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_tdengine.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_tdengine.erl index 9b6718882..f9ca21ad7 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_tdengine.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_tdengine.erl @@ -107,20 +107,20 @@ on_start( ], Prepares = parse_prepare_sql(Config), - State = maps:merge(Prepares, #{poolname => InstanceId, query_opts => query_opts(Config)}), - case emqx_plugin_libs_pool:start_pool(InstanceId, ?MODULE, Options) of + State = Prepares#{pool_name => InstanceId, query_opts => query_opts(Config)}, + case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of ok -> {ok, State}; Error -> Error end. -on_stop(InstanceId, #{poolname := PoolName} = _State) -> +on_stop(InstanceId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_tdengine_connector", connector => InstanceId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query(InstanceId, {query, SQL}, State) -> do_query(InstanceId, SQL, State); @@ -150,8 +150,8 @@ on_batch_query( {error, {unrecoverable_error, invalid_request}} end. -on_get_status(_InstanceId, #{poolname := Pool}) -> - Health = emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1), +on_get_status(_InstanceId, #{pool_name := PoolName}) -> + Health = emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1), status_result(Health). do_get_status(Conn) -> @@ -171,7 +171,7 @@ do_batch_insert(InstanceId, BatchReqs, InsertPart, Tokens, State) -> SQL = emqx_plugin_libs_rule:proc_batch_sql(BatchReqs, InsertPart, Tokens), do_query(InstanceId, SQL, State). -do_query(InstanceId, Query, #{poolname := PoolName, query_opts := Opts} = State) -> +do_query(InstanceId, Query, #{pool_name := PoolName, query_opts := Opts} = State) -> ?TRACE( "QUERY", "tdengine_connector_received", diff --git a/lib-ee/emqx_ee_connector/test/emqx_ee_connector_cassa_SUITE.erl b/lib-ee/emqx_ee_connector/test/emqx_ee_connector_cassa_SUITE.erl index 52ed03a62..f2647d756 100644 --- a/lib-ee/emqx_ee_connector/test/emqx_ee_connector_cassa_SUITE.erl +++ b/lib-ee/emqx_ee_connector/test/emqx_ee_connector_cassa_SUITE.erl @@ -101,15 +101,15 @@ show(Label, What) -> erlang:display({Label, What}), What. -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceId, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?CASSANDRA_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?CASSANDRA_RESOURCE_MOD, CheckedConfig, @@ -121,45 +121,45 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % % Perform query as further check that the resource is working as expected (fun() -> - erlang:display({pool_name, PoolName}), - QueryNoParamsResWrapper = emqx_resource:query(PoolName, test_query_no_params()), + erlang:display({pool_name, ResourceId}), + QueryNoParamsResWrapper = emqx_resource:query(ResourceId, test_query_no_params()), ?assertMatch({ok, _}, QueryNoParamsResWrapper) end)(), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), (fun() -> QueryNoParamsResWrapper = - emqx_resource:query(PoolName, test_query_no_params()), + emqx_resource:query(ResourceId, test_query_no_params()), ?assertMatch({ok, _}, QueryNoParamsResWrapper) end)(), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). %%-------------------------------------------------------------------- %% utils diff --git a/lib-ee/emqx_ee_connector/test/emqx_ee_connector_clickhouse_SUITE.erl b/lib-ee/emqx_ee_connector/test/emqx_ee_connector_clickhouse_SUITE.erl index 73018e14f..e704a2c0c 100644 --- a/lib-ee/emqx_ee_connector/test/emqx_ee_connector_clickhouse_SUITE.erl +++ b/lib-ee/emqx_ee_connector/test/emqx_ee_connector_clickhouse_SUITE.erl @@ -95,15 +95,15 @@ show(Label, What) -> erlang:display({Label, What}), What. -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceID, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?CLICKHOUSE_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceID, ?CONNECTOR_RESOURCE_GROUP, ?CLICKHOUSE_RESOURCE_MOD, CheckedConfig, @@ -115,49 +115,49 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceID), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceID)), % % Perform query as further check that the resource is working as expected (fun() -> - erlang:display({pool_name, PoolName}), - QueryNoParamsResWrapper = emqx_resource:query(PoolName, test_query_no_params()), + erlang:display({pool_name, ResourceID}), + QueryNoParamsResWrapper = emqx_resource:query(ResourceID, test_query_no_params()), ?assertMatch({ok, _}, QueryNoParamsResWrapper), {_, QueryNoParamsRes} = QueryNoParamsResWrapper, ?assertMatch(<<"1">>, string:trim(QueryNoParamsRes)) end)(), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceID)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceID), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceID)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceID)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceID)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceID), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceID)), (fun() -> QueryNoParamsResWrapper = - emqx_resource:query(PoolName, test_query_no_params()), + emqx_resource:query(ResourceID, test_query_no_params()), ?assertMatch({ok, _}, QueryNoParamsResWrapper), {_, QueryNoParamsRes} = QueryNoParamsResWrapper, ?assertMatch(<<"1">>, string:trim(QueryNoParamsRes)) end)(), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceID)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceID)). % %%------------------------------------------------------------------------------ % %% Helpers From a08c0002229aa079af013954b8b64c9c328fa51f Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Wed, 12 Apr 2023 12:37:36 +0300 Subject: [PATCH 050/263] test(mongo): do not rely on internals to clean database --- .../test/emqx_ee_bridge_mongodb_SUITE.erl | 29 ++++++++++++------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl index 0959e3c78..fc4270fd8 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl @@ -147,6 +147,16 @@ ensure_loaded() -> _ = emqx_ee_bridge:module_info(), ok. +mongo_type(Config) -> + case ?config(mongo_type, Config) of + rs -> + {rs, maps:get(<<"replica_set_name">>, ?config(mongo_config, Config))}; + sharded -> + sharded; + single -> + single + end. + mongo_type_bin(rs) -> <<"mongodb_rs">>; mongo_type_bin(sharded) -> @@ -263,17 +273,14 @@ create_bridge_http(Params) -> end. clear_db(Config) -> - Type = mongo_type_bin(?config(mongo_type, Config)), - Name = ?config(mongo_name, Config), - #{<<"collection">> := Collection} = ?config(mongo_config, Config), - ResourceID = emqx_bridge_resource:resource_id(Type, Name), - {ok, _, #{state := #{connector_state := #{poolname := PoolName}}}} = - emqx_resource:get_instance(ResourceID), - Selector = #{}, - {true, _} = ecpool:pick_and_do( - PoolName, {mongo_api, delete, [Collection, Selector]}, no_handover - ), - ok. + Type = mongo_type(Config), + Host = ?config(mongo_host, Config), + Port = ?config(mongo_port, Config), + Server = Host ++ ":" ++ integer_to_list(Port), + #{<<"database">> := Db, <<"collection">> := Collection} = ?config(mongo_config, Config), + {ok, Client} = mongo_api:connect(Type, [Server], [], [{database, Db}, {w_mode, unsafe}]), + {true, _} = mongo_api:delete(Client, Collection, _Selector = #{}), + mongo_api:disconnect(Client). find_all(Config) -> Type = mongo_type_bin(?config(mongo_type, Config)), From e3d6fa1f21a3997da02f2af78d0a67cac187d6c8 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Tue, 18 Apr 2023 14:24:08 +0800 Subject: [PATCH 051/263] chore: update config's changelog and emqx_conf.template --- apps/emqx_conf/etc/emqx_conf.conf | 17 ++++++++++------- changes/ce/feat-10358.en.md | 2 -- changes/ce/feat-10381.en.md | 1 - changes/ce/feat-10385.en.md | 1 - changes/ce/feat-10391.en.md | 1 - changes/ce/feat-10426.en.md | 10 ++++++++++ rel/emqx_conf.template.en.md | 11 +++++------ rel/emqx_conf.template.zh.md | 6 +++--- 8 files changed, 28 insertions(+), 21 deletions(-) delete mode 100644 changes/ce/feat-10358.en.md delete mode 100644 changes/ce/feat-10381.en.md delete mode 100644 changes/ce/feat-10385.en.md delete mode 100644 changes/ce/feat-10391.en.md create mode 100644 changes/ce/feat-10426.en.md diff --git a/apps/emqx_conf/etc/emqx_conf.conf b/apps/emqx_conf/etc/emqx_conf.conf index 86147bf25..45733d158 100644 --- a/apps/emqx_conf/etc/emqx_conf.conf +++ b/apps/emqx_conf/etc/emqx_conf.conf @@ -1,11 +1,14 @@ ## NOTE: -## Configs in this file might be overridden by: -## 1. Environment variables which start with 'EMQX_' prefix -## 2. File $EMQX_NODE__DATA_DIR/configs/cluster-override.conf -## 3. File $EMQX_NODE__DATA_DIR/configs/local-override.conf -## -## The *-override.conf files are overwritten at runtime when changes -## are made from EMQX dashboard UI, management HTTP API, or CLI. +## The order of priority for configuration is: +## environment variables, 'etc/emqx.conf', and 'data/configs/cluster.hocon'. +## 1. Settings in environment variables starting with 'EMQX_' are given the highest priority. +## 2. Configuration settings in the 'etc/emqx.conf' file may be overwritten by environment variables. +## 3. Changes made through the EMQX dashboard UI, management HTTP API, or CLI +## overwrite the 'data/configs/cluster.hocon' file at runtime. +## 4. If you modify a configuration setting using the API, the change takes effect immediately. +## 5. However, if the same setting is configured differently in the 'etc/emqx.conf' file, +## 'etc/emqx.conf' takes priority after a reboot. + ## All configuration details can be found in emqx.conf.example node { diff --git a/changes/ce/feat-10358.en.md b/changes/ce/feat-10358.en.md deleted file mode 100644 index e6d05c84b..000000000 --- a/changes/ce/feat-10358.en.md +++ /dev/null @@ -1,2 +0,0 @@ -Hide `flapping_detect/conn_congestion/stats` configuration. -Deprecate `flapping_detect.enable`. diff --git a/changes/ce/feat-10381.en.md b/changes/ce/feat-10381.en.md deleted file mode 100644 index 3ea11188f..000000000 --- a/changes/ce/feat-10381.en.md +++ /dev/null @@ -1 +0,0 @@ -Hide the `auto_subscribe` configuration items so that they can be modified later only through the HTTP API. diff --git a/changes/ce/feat-10385.en.md b/changes/ce/feat-10385.en.md deleted file mode 100644 index 667e01890..000000000 --- a/changes/ce/feat-10385.en.md +++ /dev/null @@ -1 +0,0 @@ -Hide data items(rule_engine/bridge/authz/authn) from configuration files and documentation. diff --git a/changes/ce/feat-10391.en.md b/changes/ce/feat-10391.en.md deleted file mode 100644 index a64b01221..000000000 --- a/changes/ce/feat-10391.en.md +++ /dev/null @@ -1 +0,0 @@ -hide exhook/rewrite/topic_metric/persistent_session_store/overload_protection from the docs and configuration file. diff --git a/changes/ce/feat-10426.en.md b/changes/ce/feat-10426.en.md new file mode 100644 index 000000000..660d4baa2 --- /dev/null +++ b/changes/ce/feat-10426.en.md @@ -0,0 +1,10 @@ +1. Changes in configuration priority: + For new EMQX installations, configuration priority is `ENV > emqx.conf > HTTP API`. + For upgrades from an older version with cluster-override.conf, configuration priority remains the same: `HTTP API > ENV > emqx.conf`. +2. Deprecation of `data/configs/local-override.conf`. +3. Simplified configuration items, hidden some advanced items + The hidden configurations include: exhook,rewrite,topic_metric,persistent_session_store,overload_protection, + flapping_detect,conn_congestion,stats,auto_subscribe,broker_perf,rule_engine,bridge,shared_subscription_group,slow_subs + and some advance items in node/dashboard. +4. This hidden update doesn't change the functionality of the original configuration, + but it sets the stage for an improved presentation of configuration documentation in future versions. diff --git a/rel/emqx_conf.template.en.md b/rel/emqx_conf.template.en.md index 8740e4319..b63b50a14 100644 --- a/rel/emqx_conf.template.en.md +++ b/rel/emqx_conf.template.en.md @@ -9,18 +9,18 @@ From bottom up: 1. Immutable base: `emqx.conf` + `EMQX_` prefixed environment variables.
Changes in this layer require a full node restart to take effect. -1. Cluster overrides: `$EMQX_NODE__DATA_DIR/configs/cluster-override.conf` +2. Cluster overrides: `$EMQX_NODE__DATA_DIR/configs/cluster.hocon` When environment variable `$EMQX_NODE__DATA_DIR` is not set, config `node.data_dir` is used. -The `cluster-override.conf` file is overwritten at runtime when changes +The `cluster.hocon` file is overwritten at runtime when changes are made from dashboard UI, management HTTP API, or CLI. When clustered, after EMQX restarts, it copies the file from the node which has the greatest `uptime`. :::tip Tip Some of the configs (such as `node.name`) are boot-only configs and not overridable. -Config values from `*-override.conf` are **not** mapped to boot configs for +Config values from `cluster.hocon` are **not** mapped to boot configs for the config fields attributed with `mapping: path.to.boot.config.key` ::: @@ -148,7 +148,7 @@ export EMQX_LISTENERS__SSL__L1__AUTHENTICATION__SSL__CIPHERS='["TLS_AES_256_GCM_ However this also means a string value should be quoted if it happens to contain special characters such as `=` and `:`. -For example, a string value `"localhost:1883"` would be +For example, a string value `"localhost:1883"` would be parsed into object (struct): `{"localhost": 1883}`. To keep it as a string, one should quote the value like below: @@ -226,7 +226,7 @@ Arrays in EMQX config have two different representations Dot-separated paths with number in it are parsed to indexed-maps e.g. `authentication.1={...}` is parsed as `authentication={"1": {...}}` -This feature makes it easy to override array elment values. For example: +This feature makes it easy to override array element values. For example: ``` authentication=[{enable=true, backend="built_in_database", mechanism="password_based"}] @@ -322,4 +322,3 @@ ciphers = "PSK-AES128-CBC-SHA" ] ``` - diff --git a/rel/emqx_conf.template.zh.md b/rel/emqx_conf.template.zh.md index 9402760a2..916eed38d 100644 --- a/rel/emqx_conf.template.zh.md +++ b/rel/emqx_conf.template.zh.md @@ -7,18 +7,18 @@ EMQX的配置文件可分为二层,自底向上依次是: 1. 不可变的基础层 `emqx.conf` 加上 `EMQX_` 前缀的环境变量。
修改这一层的配置之后,需要重启节点来使之生效。 -1. 集群范围重载层:`$EMQX_NODE__DATA_DIR/configs/cluster-override.conf` +2. 集群范围重载层:`$EMQX_NODE__DATA_DIR/configs/cluster.hocon` 如果环境变量 `$EMQX_NODE__DATA_DIR` 没有设置,那么该目录会从 `emqx.conf` 的 `node.data_dir` 配置中读取。 -配置文件 `cluster-override.conf` 的内容会在运行时被EMQX重写。 +配置文件 `cluster.hocon` 的内容会在运行时被EMQX重写。 这些重写发生在 dashboard UI,管理HTTP API,或者CLI对集群配置进行修改时。 当EMQX运行在集群中时,一个EMQX节点重启之后,会从集群中其他节点复制该文件内容到本地。 :::tip Tip 有些配置项是不能被重载的(例如 `node.name`)。 配置项如果有 `mapping: path.to.boot.config.key` 这个属性, -则不能被添加到重载文件 `*-override.conf` 中。 +则不能被添加到重载文件 `cluster.hocon` 中。 ::: 更多的重载规则,请参考下文 [配置重载规则](#配置重载规则)。 From 22a1d05d7b64859249b182373c10a6c6976f66c1 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Tue, 18 Apr 2023 14:38:17 +0800 Subject: [PATCH 052/263] feat: hide ssl_options.user_lookup_fun --- apps/emqx/src/emqx_schema.erl | 2 ++ changes/ce/feat-10426.en.md | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 8073e19b5..38566f3cb 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -2225,6 +2225,7 @@ common_ssl_opts_schema(Defaults) -> #{ default => AvailableVersions, desc => ?DESC(common_ssl_opts_schema_versions), + importance => ?IMPORTANCE_HIGH, validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end } )}, @@ -2235,6 +2236,7 @@ common_ssl_opts_schema(Defaults) -> #{ default => <<"emqx_tls_psk:lookup">>, converter => fun ?MODULE:user_lookup_fun_tr/2, + importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(common_ssl_opts_schema_user_lookup_fun) } )}, diff --git a/changes/ce/feat-10426.en.md b/changes/ce/feat-10426.en.md index 660d4baa2..b169dd19b 100644 --- a/changes/ce/feat-10426.en.md +++ b/changes/ce/feat-10426.en.md @@ -5,6 +5,6 @@ 3. Simplified configuration items, hidden some advanced items The hidden configurations include: exhook,rewrite,topic_metric,persistent_session_store,overload_protection, flapping_detect,conn_congestion,stats,auto_subscribe,broker_perf,rule_engine,bridge,shared_subscription_group,slow_subs - and some advance items in node/dashboard. + ssl_options.user_lookup_fun and some advance items in node/dashboard. 4. This hidden update doesn't change the functionality of the original configuration, but it sets the stage for an improved presentation of configuration documentation in future versions. From e3a84e0010eb3ea07e8b4e2f1b243d666c4d8e43 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Tue, 18 Apr 2023 15:50:08 +0800 Subject: [PATCH 053/263] chore: improve the changes for configuration --- changes/ce/feat-10391.en.md | 7 +++++++ changes/ce/feat-10426.en.md | 14 ++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) create mode 100644 changes/ce/feat-10391.en.md diff --git a/changes/ce/feat-10391.en.md b/changes/ce/feat-10391.en.md new file mode 100644 index 000000000..5c37436dd --- /dev/null +++ b/changes/ce/feat-10391.en.md @@ -0,0 +1,7 @@ +Hide a large number of advanced options to simplify the configuration file. + +That includes `exhook`, `rewrite`, `topic_metric`, `persistent_session_store`, `overload_protection`, +`flapping_detect`, `conn_congestion`, `stats,auto_subscribe`, `broker_perf`, `rule_engine`, `bridge`, +`shared_subscription_group`, `slow_subs`, `ssl_options.user_lookup_fun` and some advance items +in `node` and `dashboard` section, [#10358](https://github.com/emqx/emqx/pull/10358), +[#10381](https://github.com/emqx/emqx/pull/10381), [#10385](https://github.com/emqx/emqx/pull/10385). diff --git a/changes/ce/feat-10426.en.md b/changes/ce/feat-10426.en.md index b169dd19b..8575347d6 100644 --- a/changes/ce/feat-10426.en.md +++ b/changes/ce/feat-10426.en.md @@ -1,10 +1,4 @@ -1. Changes in configuration priority: - For new EMQX installations, configuration priority is `ENV > emqx.conf > HTTP API`. - For upgrades from an older version with cluster-override.conf, configuration priority remains the same: `HTTP API > ENV > emqx.conf`. -2. Deprecation of `data/configs/local-override.conf`. -3. Simplified configuration items, hidden some advanced items - The hidden configurations include: exhook,rewrite,topic_metric,persistent_session_store,overload_protection, - flapping_detect,conn_congestion,stats,auto_subscribe,broker_perf,rule_engine,bridge,shared_subscription_group,slow_subs - ssl_options.user_lookup_fun and some advance items in node/dashboard. -4. This hidden update doesn't change the functionality of the original configuration, - but it sets the stage for an improved presentation of configuration documentation in future versions. +Optimize the configuration priority mechanism to fix the issue where the configuration +changes made to `etc/emqx.conf` do not take effect after restarting EMQX. + +More introduction about the new mechanism: [Configure Override Rules](https://www.emqx.io/docs/en/v5.0/configuration/configuration.html#configure-override-rules) From fbadfc06e421c749f08b33a0e09decda9e801801 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9F=90=E6=96=87?= Date: Tue, 18 Apr 2023 20:15:19 +0800 Subject: [PATCH 054/263] feat: change exhook, rule_engine, bridge to low importance level instead of hidden --- apps/emqx_bridge/src/schema/emqx_bridge_schema.erl | 2 +- apps/emqx_exhook/src/emqx_exhook_schema.erl | 2 +- apps/emqx_rule_engine/src/emqx_rule_engine_schema.erl | 2 +- changes/ce/feat-10391.en.md | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl index e5def2d64..4b9b7e3fe 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl @@ -137,7 +137,7 @@ namespace() -> "bridge". tags() -> [<<"Bridge">>]. -roots() -> [{bridges, ?HOCON(?R_REF(bridges), #{importance => ?IMPORTANCE_HIDDEN})}]. +roots() -> [{bridges, ?HOCON(?R_REF(bridges), #{importance => ?IMPORTANCE_LOW})}]. fields(bridges) -> [ diff --git a/apps/emqx_exhook/src/emqx_exhook_schema.erl b/apps/emqx_exhook/src/emqx_exhook_schema.erl index 708e164fc..f6cc896f3 100644 --- a/apps/emqx_exhook/src/emqx_exhook_schema.erl +++ b/apps/emqx_exhook/src/emqx_exhook_schema.erl @@ -32,7 +32,7 @@ namespace() -> exhook. roots() -> - [{exhook, ?HOCON(?R_REF(exhook), #{importance => ?IMPORTANCE_HIDDEN})}]. + [{exhook, ?HOCON(?R_REF(exhook), #{importance => ?IMPORTANCE_LOW})}]. fields(exhook) -> [ diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine_schema.erl b/apps/emqx_rule_engine/src/emqx_rule_engine_schema.erl index 242c86c71..bc8cae07a 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine_schema.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_engine_schema.erl @@ -38,7 +38,7 @@ namespace() -> rule_engine. tags() -> [<<"Rule Engine">>]. -roots() -> [{"rule_engine", ?HOCON(?R_REF("rule_engine"), #{importance => ?IMPORTANCE_HIDDEN})}]. +roots() -> [{"rule_engine", ?HOCON(?R_REF("rule_engine"), #{importance => ?IMPORTANCE_LOW})}]. fields("rule_engine") -> rule_engine_settings() ++ diff --git a/changes/ce/feat-10391.en.md b/changes/ce/feat-10391.en.md index 5c37436dd..f33757404 100644 --- a/changes/ce/feat-10391.en.md +++ b/changes/ce/feat-10391.en.md @@ -1,7 +1,7 @@ Hide a large number of advanced options to simplify the configuration file. -That includes `exhook`, `rewrite`, `topic_metric`, `persistent_session_store`, `overload_protection`, -`flapping_detect`, `conn_congestion`, `stats,auto_subscribe`, `broker_perf`, `rule_engine`, `bridge`, +That includes `rewrite`, `topic_metric`, `persistent_session_store`, `overload_protection`, +`flapping_detect`, `conn_congestion`, `stats,auto_subscribe`, `broker_perf`, `shared_subscription_group`, `slow_subs`, `ssl_options.user_lookup_fun` and some advance items in `node` and `dashboard` section, [#10358](https://github.com/emqx/emqx/pull/10358), [#10381](https://github.com/emqx/emqx/pull/10381), [#10385](https://github.com/emqx/emqx/pull/10385). From 444196922cb9523e6bf8eae915ed2b49c4298ea4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9F=90=E6=96=87?= Date: Tue, 18 Apr 2023 20:18:46 +0800 Subject: [PATCH 055/263] chore: update emqx.conf's note --- apps/emqx_conf/etc/emqx_conf.conf | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/apps/emqx_conf/etc/emqx_conf.conf b/apps/emqx_conf/etc/emqx_conf.conf index 45733d158..8ad84fd7c 100644 --- a/apps/emqx_conf/etc/emqx_conf.conf +++ b/apps/emqx_conf/etc/emqx_conf.conf @@ -1,13 +1,17 @@ ## NOTE: -## The order of priority for configuration is: -## environment variables, 'etc/emqx.conf', and 'data/configs/cluster.hocon'. -## 1. Settings in environment variables starting with 'EMQX_' are given the highest priority. -## 2. Configuration settings in the 'etc/emqx.conf' file may be overwritten by environment variables. -## 3. Changes made through the EMQX dashboard UI, management HTTP API, or CLI -## overwrite the 'data/configs/cluster.hocon' file at runtime. -## 4. If you modify a configuration setting using the API, the change takes effect immediately. -## 5. However, if the same setting is configured differently in the 'etc/emqx.conf' file, -## 'etc/emqx.conf' takes priority after a reboot. +## The EMQX configuration is prioritized (overlayed) in the following order: +## `cluster.hocon < emqx.conf < environment variables`. + +## Settings in environment variables that begin with 'EMQX_' have the highest priority +## and will override any settings in the `etc/emqx.conf` file. + +## Changes made through the EMQX dashboard UI, management HTTP API, or CLI +## will be written into the `data/configs/cluster.hocon` file at runtime and will take effect immediately. + +## However, if the same configuration items are set differently in the `etc/emqx.conf` file, +## the runtime updates will be overridden by the settings in `etc/emqx.conf` after the node restarts. + +## To avoid confusion, it is highly recommend NOT to have the same config keys in both `cluster.hocon` and `emqx.conf`. ## All configuration details can be found in emqx.conf.example From 9ca09383ba725887e2d00829fd6ae153a7a1550a Mon Sep 17 00:00:00 2001 From: zhongwencool Date: Tue, 18 Apr 2023 21:07:02 +0800 Subject: [PATCH 056/263] chore: remove desc from emqx.conf Co-authored-by: Zaiming (Stone) Shi --- apps/emqx_conf/etc/emqx_conf.conf | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/apps/emqx_conf/etc/emqx_conf.conf b/apps/emqx_conf/etc/emqx_conf.conf index 8ad84fd7c..dea04e9e4 100644 --- a/apps/emqx_conf/etc/emqx_conf.conf +++ b/apps/emqx_conf/etc/emqx_conf.conf @@ -2,18 +2,6 @@ ## The EMQX configuration is prioritized (overlayed) in the following order: ## `cluster.hocon < emqx.conf < environment variables`. -## Settings in environment variables that begin with 'EMQX_' have the highest priority -## and will override any settings in the `etc/emqx.conf` file. - -## Changes made through the EMQX dashboard UI, management HTTP API, or CLI -## will be written into the `data/configs/cluster.hocon` file at runtime and will take effect immediately. - -## However, if the same configuration items are set differently in the `etc/emqx.conf` file, -## the runtime updates will be overridden by the settings in `etc/emqx.conf` after the node restarts. - -## To avoid confusion, it is highly recommend NOT to have the same config keys in both `cluster.hocon` and `emqx.conf`. - -## All configuration details can be found in emqx.conf.example node { name = "emqx@127.0.0.1" From 59182ee0fce6e5be75c54508af645b44268dedbd Mon Sep 17 00:00:00 2001 From: zhongwencool Date: Tue, 18 Apr 2023 21:07:16 +0800 Subject: [PATCH 057/263] chore: update apps/emqx_conf/etc/emqx_conf.conf Co-authored-by: Zaiming (Stone) Shi --- apps/emqx_conf/etc/emqx_conf.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx_conf/etc/emqx_conf.conf b/apps/emqx_conf/etc/emqx_conf.conf index dea04e9e4..a54894dcd 100644 --- a/apps/emqx_conf/etc/emqx_conf.conf +++ b/apps/emqx_conf/etc/emqx_conf.conf @@ -1,6 +1,6 @@ ## NOTE: ## The EMQX configuration is prioritized (overlayed) in the following order: -## `cluster.hocon < emqx.conf < environment variables`. +## `data/configs/cluster.hocon < etc/emqx.conf < environment variables`. node { From 97b8f00508a57824191d9d56833e9b032c4c94fe Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 18 Apr 2023 15:07:57 +0200 Subject: [PATCH 058/263] test: pick random port number for gcp pubsub mock server --- .../test/emqx_connector_web_hook_server.erl | 26 +++++++++++++- .../test/emqx_ee_bridge_gcp_pubsub_SUITE.erl | 35 +++++++++---------- 2 files changed, 42 insertions(+), 19 deletions(-) diff --git a/apps/emqx_connector/test/emqx_connector_web_hook_server.erl b/apps/emqx_connector/test/emqx_connector_web_hook_server.erl index b68ebcbba..bdc6e100c 100644 --- a/apps/emqx_connector/test/emqx_connector_web_hook_server.erl +++ b/apps/emqx_connector/test/emqx_connector_web_hook_server.erl @@ -29,7 +29,14 @@ start_link(Port, Path) -> start_link(Port, Path, false). start_link(Port, Path, SSLOpts) -> - supervisor:start_link({local, ?MODULE}, ?MODULE, [Port, Path, SSLOpts]). + case Port of + random -> + PickedPort = pick_port_number(56000), + {ok, Pid} = supervisor:start_link({local, ?MODULE}, ?MODULE, [PickedPort, Path, SSLOpts]), + {ok, {PickedPort, Pid}}; + _ -> + supervisor:start_link({local, ?MODULE}, ?MODULE, [Port, Path, SSLOpts]) + end. stop() -> try @@ -103,3 +110,20 @@ default_handler(Req0, State) -> Req0 ), {ok, Req, State}. + +pick_port_number(Port) -> + case is_port_in_use(Port) of + true -> + pick_port_number(Port + 1); + false -> + Port + end. + +is_port_in_use(Port) -> + case gen_tcp:listen(Port, [{reuseaddr, true}, {active, false}]) of + {ok, ListenSocket} -> + gen_tcp:close(ListenSocket), + false; + {error, eaddrinuse} -> + true + end. diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl index a785924d4..b8377d814 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl @@ -70,22 +70,13 @@ init_per_suite(Config) -> ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]), {ok, _} = application:ensure_all_started(emqx_connector), emqx_mgmt_api_test_util:init_suite(), - HTTPHost = "localhost", - HTTPPort = 56000, - HostPort = HTTPHost ++ ":" ++ integer_to_list(HTTPPort), - true = os:putenv("PUBSUB_EMULATOR_HOST", HostPort), - [ - {http_host, HTTPHost}, - {http_port, HTTPPort} - | Config - ]. + Config. end_per_suite(_Config) -> emqx_mgmt_api_test_util:end_suite(), ok = emqx_common_test_helpers:stop_apps([emqx_conf]), ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]), _ = application:stop(emqx_connector), - os:unsetenv("PUBSUB_EMULATOR_HOST"), ok. init_per_group(sync_query, Config) -> @@ -113,26 +104,26 @@ init_per_testcase(TestCase, Config0) when 1 -> [{skip_due_to_no_batching, true}]; _ -> - {ok, _} = start_echo_http_server(), delete_all_bridges(), Tid = install_telemetry_handler(TestCase), Config = generate_config(Config0), put(telemetry_table, Tid), - [{telemetry_table, Tid} | Config] + {ok, HttpServer} = start_echo_http_server(), + [{telemetry_table, Tid}, {http_server, HttpServer} | Config] end; init_per_testcase(TestCase, Config0) -> ct:timetrap({seconds, 30}), - {ok, _} = start_echo_http_server(), + {ok, HttpServer} = start_echo_http_server(), delete_all_bridges(), Tid = install_telemetry_handler(TestCase), Config = generate_config(Config0), put(telemetry_table, Tid), - [{telemetry_table, Tid} | Config]. + [{telemetry_table, Tid}, {http_server, HttpServer} | Config]. end_per_testcase(_TestCase, _Config) -> ok = snabbkaffe:stop(), delete_all_bridges(), - ok = emqx_connector_web_hook_server:stop(), + ok = stop_echo_http_server(), emqx_common_test_helpers:call_janitor(), ok. @@ -242,7 +233,6 @@ success_http_handler() -> start_echo_http_server() -> HTTPHost = "localhost", - HTTPPort = 56000, HTTPPath = <<"/v1/projects/myproject/topics/mytopic:publish">>, ServerSSLOpts = [ @@ -250,14 +240,23 @@ start_echo_http_server() -> {versions, ['tlsv1.2', 'tlsv1.3']}, {ciphers, ["ECDHE-RSA-AES256-GCM-SHA384", "TLS_CHACHA20_POLY1305_SHA256"]} ] ++ certs(), - {ok, _} = emqx_connector_web_hook_server:start_link(HTTPPort, HTTPPath, ServerSSLOpts), + {ok, {HTTPPort, _Pid}} = emqx_connector_web_hook_server:start_link( + random, HTTPPath, ServerSSLOpts + ), ok = emqx_connector_web_hook_server:set_handler(success_http_handler()), + HTTPHost = "localhost", + HostPort = HTTPHost ++ ":" ++ integer_to_list(HTTPPort), + true = os:putenv("PUBSUB_EMULATOR_HOST", HostPort), {ok, #{ - host_port => HTTPHost ++ ":" ++ integer_to_list(HTTPPort), + host_port => HostPort, host => HTTPHost, port => HTTPPort }}. +stop_echo_http_server() -> + os:unsetenv("PUBSUB_EMULATOR_HOST"), + ok = emqx_connector_web_hook_server:stop(). + certs() -> CertsPath = emqx_common_test_helpers:deps_path(emqx, "etc/certs"), [ From 3cd9ca706795b2ccd5c3a14f0270a77a031450c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9F=90=E6=96=87?= Date: Tue, 18 Apr 2023 21:10:40 +0800 Subject: [PATCH 059/263] chore: change config seq --- rel/emqx_conf.template.en.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/rel/emqx_conf.template.en.md b/rel/emqx_conf.template.en.md index b63b50a14..bded20da6 100644 --- a/rel/emqx_conf.template.en.md +++ b/rel/emqx_conf.template.en.md @@ -7,9 +7,10 @@ and a superset of JSON. EMQX configuration consists of two layers. From bottom up: -1. Immutable base: `emqx.conf` + `EMQX_` prefixed environment variables.
+1. Cluster configs: `$EMQX_NODE__DATA_DIR/configs/cluster.hocon` +2. `emqx.conf` + `EMQX_` prefixed environment variables.
Changes in this layer require a full node restart to take effect. -2. Cluster overrides: `$EMQX_NODE__DATA_DIR/configs/cluster.hocon` + When environment variable `$EMQX_NODE__DATA_DIR` is not set, config `node.data_dir` is used. @@ -144,8 +145,7 @@ For example, this environment variable sets an array value. ``` export EMQX_LISTENERS__SSL__L1__AUTHENTICATION__SSL__CIPHERS='["TLS_AES_256_GCM_SHA384"]' ``` - -However this also means a string value should be quoted if it happens to contain special +However, this also means a string value should be quoted if it happens to contain special characters such as `=` and `:`. For example, a string value `"localhost:1883"` would be From 87c517069608724d72f08a199ff11ac9f2342d13 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Tue, 18 Apr 2023 21:33:41 +0800 Subject: [PATCH 060/263] chore: fix flaky test --- .../test/emqx_ee_bridge_cassa_SUITE.erl | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl index 3e442a926..4711d1981 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl @@ -530,15 +530,16 @@ t_write_failure(Config) -> fun(Trace0) -> ct:pal("trace: ~p", [Trace0]), Trace = ?of_kind(buffer_worker_flush_nack, Trace0), - ?assertMatch([#{result := {async_return, {error, _}}} | _], Trace), - [#{result := {async_return, {error, Error}}} | _] = Trace, - case Error of - {resource_error, _} -> + [#{result := Result} | _] = Trace, + case Result of + {async_return, {error, {resource_error, _}}} -> ok; - {recoverable_error, disconnected} -> + {async_return, {error, {recoverable_error, disconnected}}} -> + ok; + {error, {resource_error, _}} -> ok; _ -> - ct:fail("unexpected error: ~p", [Error]) + ct:fail("unexpected error: ~p", [Result]) end end ), From 89cd6cfede44b08b6e28d26bdbb6ff6f2001e746 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 17 Apr 2023 17:01:39 -0300 Subject: [PATCH 061/263] fix(plugins): attempt to extract plugin from current node on startup Fixes https://emqx.atlassian.net/browse/EMQX-9605 Fixes https://github.com/emqx/emqx-elixir-plugin/issues/25 If an user happens to configure a plugin in a lone-node cluster via environment variables, it would fail to start up as there are no other nodes to copy the plugin from. Here, we attempt to check if the package is present in the current node but not yet extracted. --- apps/emqx/test/emqx_common_test_helpers.erl | 1 + apps/emqx_plugins/src/emqx_plugins.app.src | 2 +- apps/emqx_plugins/src/emqx_plugins.erl | 37 ++++++--- apps/emqx_plugins/test/emqx_plugins_SUITE.erl | 77 ++++++++++++++++++- changes/ce/fix-10422.en.md | 1 + 5 files changed, 106 insertions(+), 12 deletions(-) create mode 100644 changes/ce/fix-10422.en.md diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl index e9ddc61a8..8603be879 100644 --- a/apps/emqx/test/emqx_common_test_helpers.erl +++ b/apps/emqx/test/emqx_common_test_helpers.erl @@ -764,6 +764,7 @@ setup_node(Node, Opts) when is_map(Opts) -> load_apps => LoadApps, apps => Apps, env => Env, + join_to => JoinTo, start_apps => StartApps } ] diff --git a/apps/emqx_plugins/src/emqx_plugins.app.src b/apps/emqx_plugins/src/emqx_plugins.app.src index c0372c003..d5c16ea59 100644 --- a/apps/emqx_plugins/src/emqx_plugins.app.src +++ b/apps/emqx_plugins/src/emqx_plugins.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_plugins, [ {description, "EMQX Plugin Management"}, - {vsn, "0.1.3"}, + {vsn, "0.1.4"}, {modules, []}, {mod, {emqx_plugins_app, []}}, {applications, [kernel, stdlib, emqx]}, diff --git a/apps/emqx_plugins/src/emqx_plugins.erl b/apps/emqx_plugins/src/emqx_plugins.erl index 264247086..04faa44e9 100644 --- a/apps/emqx_plugins/src/emqx_plugins.erl +++ b/apps/emqx_plugins/src/emqx_plugins.erl @@ -479,22 +479,39 @@ ensure_exists_and_installed(NameVsn) -> case filelib:is_dir(dir(NameVsn)) of true -> ok; - _ -> - Nodes = [N || N <- mria:running_nodes(), N /= node()], - case get_from_any_node(Nodes, NameVsn, []) of + false -> + %% Do we have the package, but it's not extracted yet? + case get_tar(NameVsn) of {ok, TarContent} -> ok = file:write_file(pkg_file(NameVsn), TarContent), ok = do_ensure_installed(NameVsn); - {error, NodeErrors} -> - ?SLOG(error, #{ - msg => "failed_to_copy_plugin_from_other_nodes", - name_vsn => NameVsn, - node_errors => NodeErrors - }), - {error, plugin_not_found} + _ -> + %% If not, try to get it from the cluster. + do_get_from_cluster(NameVsn) end end. +do_get_from_cluster(NameVsn) -> + Nodes = [N || N <- mria:running_nodes(), N /= node()], + case get_from_any_node(Nodes, NameVsn, []) of + {ok, TarContent} -> + ok = file:write_file(pkg_file(NameVsn), TarContent), + ok = do_ensure_installed(NameVsn); + {error, NodeErrors} when Nodes =/= [] -> + ?SLOG(error, #{ + msg => "failed_to_copy_plugin_from_other_nodes", + name_vsn => NameVsn, + node_errors => NodeErrors + }), + {error, plugin_not_found}; + {error, _} -> + ?SLOG(error, #{ + msg => "no_nodes_to_copy_plugin_from", + name_vsn => NameVsn + }), + {error, plugin_not_found} + end. + get_from_any_node([], _NameVsn, Errors) -> {error, Errors}; get_from_any_node([Node | T], NameVsn, Errors) -> diff --git a/apps/emqx_plugins/test/emqx_plugins_SUITE.erl b/apps/emqx_plugins/test/emqx_plugins_SUITE.erl index 260ad1681..14d6d06fc 100644 --- a/apps/emqx_plugins/test/emqx_plugins_SUITE.erl +++ b/apps/emqx_plugins/test/emqx_plugins_SUITE.erl @@ -45,7 +45,10 @@ all() -> groups() -> [ - {copy_plugin, [sequence], [group_t_copy_plugin_to_a_new_node]}, + {copy_plugin, [sequence], [ + group_t_copy_plugin_to_a_new_node, + group_t_copy_plugin_to_a_new_node_single_node + ]}, {create_tar_copy_plugin, [sequence], [group_t_copy_plugin_to_a_new_node]} ]. @@ -601,6 +604,78 @@ group_t_copy_plugin_to_a_new_node(Config) -> rpc:call(CopyToNode, emqx_plugins, describe, [NameVsn]) ). +%% checks that we can start a cluster with a lone node. +group_t_copy_plugin_to_a_new_node_single_node({init, Config}) -> + PrivDataDir = ?config(priv_dir, Config), + ToInstallDir = filename:join(PrivDataDir, "plugins_copy_to"), + file:del_dir_r(ToInstallDir), + ok = filelib:ensure_path(ToInstallDir), + #{package := Package, release_name := PluginName} = get_demo_plugin_package(ToInstallDir), + NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), + [{CopyTo, CopyToOpts}] = + emqx_common_test_helpers:emqx_cluster( + [ + {core, plugins_copy_to} + ], + #{ + apps => [emqx_conf, emqx_plugins], + env => [ + {emqx, init_config_load_done, false}, + {emqx, boot_modules, []} + ], + env_handler => fun + (emqx_plugins) -> + ok = emqx_plugins:put_config(install_dir, ToInstallDir), + %% this is to simulate an user setting the state + %% via environment variables before starting the node + ok = emqx_plugins:put_config( + states, + [#{name_vsn => NameVsn, enable => true}] + ), + ok; + (_) -> + ok + end, + priv_data_dir => PrivDataDir, + schema_mod => emqx_conf_schema, + peer_mod => slave, + load_schema => true + } + ), + [ + {to_install_dir, ToInstallDir}, + {copy_to_node_name, CopyTo}, + {copy_to_opts, CopyToOpts}, + {name_vsn, NameVsn}, + {plugin_name, PluginName} + | Config + ]; +group_t_copy_plugin_to_a_new_node_single_node({'end', Config}) -> + CopyToNode = proplists:get_value(copy_to_node, Config), + ok = emqx_common_test_helpers:stop_slave(CopyToNode), + ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)), + ok; +group_t_copy_plugin_to_a_new_node_single_node(Config) -> + CopyTo = ?config(copy_to_node_name, Config), + CopyToOpts = ?config(copy_to_opts, Config), + ToInstallDir = ?config(to_install_dir, Config), + NameVsn = proplists:get_value(name_vsn, Config), + %% Start the node for the first time. The plugin should start + %% successfully even if it's not extracted yet. Simply starting + %% the node would crash if not working properly. + CopyToNode = emqx_common_test_helpers:start_slave(CopyTo, CopyToOpts), + ct:pal("~p config:\n ~p", [ + CopyToNode, erpc:call(CopyToNode, emqx_plugins, get_config, [[], #{}]) + ]), + ct:pal("~p install_dir:\n ~p", [ + CopyToNode, erpc:call(CopyToNode, file, list_dir, [ToInstallDir]) + ]), + ?assertMatch( + {ok, #{running_status := running, config_status := enabled}}, + rpc:call(CopyToNode, emqx_plugins, describe, [NameVsn]) + ), + ok. + make_tar(Cwd, NameWithVsn) -> make_tar(Cwd, NameWithVsn, NameWithVsn). diff --git a/changes/ce/fix-10422.en.md b/changes/ce/fix-10422.en.md new file mode 100644 index 000000000..7c18ccf32 --- /dev/null +++ b/changes/ce/fix-10422.en.md @@ -0,0 +1 @@ +Fixed a bug where external plugins could not be configured via environment variables in a lone-node cluster. From acea64790be45101ec8c70dbcc583721d914023b Mon Sep 17 00:00:00 2001 From: JianBo He Date: Tue, 18 Apr 2023 17:00:37 +0800 Subject: [PATCH 062/263] chore: add README for emqx_modoules --- apps/emqx_modules/README.md | 54 +++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 apps/emqx_modules/README.md diff --git a/apps/emqx_modules/README.md b/apps/emqx_modules/README.md new file mode 100644 index 000000000..7cb1db0c5 --- /dev/null +++ b/apps/emqx_modules/README.md @@ -0,0 +1,54 @@ +# EMQX Modules + +The application provide some minor functional modules that are not included in the MQTT +protocol standard, including "Delayed Publish", "Topic Rewrite", "Topic Metrics" and "Telemetry". + + +## Delayed Publish + +After enabling this module, messages sent by the user with the prefix +`$delayed/{Interval}/{Topic}` will be delayed by `{Interval}` seconds before +being published to the `{Topic}`. + +More introduction about [Delayed Publish](https://www.emqx.io/docs/en/v5.0/mqtt/mqtt-delayed-publish.html). + +See [Enabling/Disabling Delayed Publish via HTTP API](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1delayed/put). + + +## Topic Rewrite + +Topic Rewrite allows users to configure rules to change the topic strings that +the client requests to subscribe or publish. + +This feature is very useful when designing topics that are compatible with different +client versions. For example, an old device that has already been issued and cannot +be upgraded may use old topic rules, but the production environment need to apply +a new design rules for the topics. + +More introduction about [Topic Rewrite](https://www.emqx.io/docs/en/v5.0/mqtt/mqtt-topic-rewrite.html). + +See [List all rewrite rules](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_rewrite/get) +and [Create or Update rewrite rules](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_rewrite/put). + + +## Topic Metrics + +Topic Metrics is used for users to specify monitoring of certain topics and to +count the number of messages, QoS distribution, and rate for all messages on that topic. + +More introduction about [Topic Metrics](https://www.emqx.io/docs/en/v5.0/dashboard/diagnose.html#topic-metrics). + +See HTTP API docs to [List all monitored topics](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_metrics/get), +[Create topic metrics](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_metrics/post) +and [Get the monitored result](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_metrics~1%7Btopic%7D/get). + + +## Telemetry + +Telemetry is used for collecting non-sensitive information about the EMQX cluster. + +More introduction about [Telemetry](https://www.emqx.io/docs/en/v5.0/telemetry/telemetry.html#telemetry). + +See HTTP API docs to [Enable/Disable telemetry](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Telemetry/paths/~1telemetry~1status/put), +[Get the enabled status](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Telemetry/paths/~1telemetry~1status/get) +and [Get the data of the module collected](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Telemetry/paths/~1telemetry~1data/get). From 7de6c5b3921a2c6d4ead2a7557760af48e5a2a24 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Tue, 18 Apr 2023 17:38:36 +0800 Subject: [PATCH 063/263] chore: update prometheus README --- apps/emqx_prometheus/README.md | 283 ++------------------------------- 1 file changed, 10 insertions(+), 273 deletions(-) diff --git a/apps/emqx_prometheus/README.md b/apps/emqx_prometheus/README.md index ddff9774c..c008adc81 100644 --- a/apps/emqx_prometheus/README.md +++ b/apps/emqx_prometheus/README.md @@ -1,279 +1,16 @@ -# emqx-prometheus +# EMQX Prometheus Agent -EMQX Prometheus Agent - -## push emqx stats/metrics to prometheus PushGateway - -``` -prometheus.push.gateway.server = http://127.0.0.1:9091 - -prometheus.interval = 15000 -``` - -## pull emqx stats/metrics - -``` -Method: GET -Path: api/v4/emqx_prometheus?type=prometheus -params: type: [prometheus| json] - -prometheus data - -# TYPE erlang_vm_ets_limit gauge -erlang_vm_ets_limit 256000 -# TYPE erlang_vm_logical_processors gauge -erlang_vm_logical_processors 4 -# TYPE erlang_vm_logical_processors_available gauge -erlang_vm_logical_processors_available NaN -# TYPE erlang_vm_logical_processors_online gauge -erlang_vm_logical_processors_online 4 -# TYPE erlang_vm_port_count gauge -erlang_vm_port_count 17 -# TYPE erlang_vm_port_limit gauge -erlang_vm_port_limit 1048576 +This application provides the ability to integrate with Prometheus. It provides +an HTTP API for collecting metrics of the current node +and also supports configuring a Push Gateway URL address for pushing these metrics. -json data +More introduction about [Integrate with Prometheus](https://www.emqx.io/docs/en/v5.0/observability/prometheus.html#integrate-with-prometheus) -{ - "stats": {key:value}, - "metrics": {key:value}, - "packets": {key:value}, - "messages": {key:value}, - "delivery": {key:value}, - "client": {key:value}, - "session": {key:value} -} - -``` +See HTTP API docs to learn how to +[Update Prometheus config](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Monitor/paths/~1prometheus/put) +and [Get all metrics data](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Monitor/paths/~1prometheus~1stats/get). -## Before EMQX v4.0.0 -The prometheus data simple is: - - -```bash -# TYPE erlang_vm_ets_limit gauge -erlang_vm_ets_limit 2097152 -# TYPE erlang_vm_logical_processors gauge -erlang_vm_logical_processors 2 -# TYPE erlang_vm_logical_processors_available gauge -erlang_vm_logical_processors_available 2 -# TYPE erlang_vm_logical_processors_online gauge -erlang_vm_logical_processors_online 2 -# TYPE erlang_vm_port_count gauge -erlang_vm_port_count 19 -# TYPE erlang_vm_port_limit gauge -erlang_vm_port_limit 1048576 -# TYPE erlang_vm_process_count gauge -erlang_vm_process_count 460 -# TYPE erlang_vm_process_limit gauge -erlang_vm_process_limit 2097152 -# TYPE erlang_vm_schedulers gauge -erlang_vm_schedulers 2 -# TYPE erlang_vm_schedulers_online gauge -erlang_vm_schedulers_online 2 -# TYPE erlang_vm_smp_support untyped -erlang_vm_smp_support 1 -# TYPE erlang_vm_threads untyped -erlang_vm_threads 1 -# TYPE erlang_vm_thread_pool_size gauge -erlang_vm_thread_pool_size 32 -# TYPE erlang_vm_time_correction untyped -erlang_vm_time_correction 1 -# TYPE erlang_vm_statistics_context_switches counter -erlang_vm_statistics_context_switches 39850 -# TYPE erlang_vm_statistics_garbage_collection_number_of_gcs counter -erlang_vm_statistics_garbage_collection_number_of_gcs 17116 -# TYPE erlang_vm_statistics_garbage_collection_words_reclaimed counter -erlang_vm_statistics_garbage_collection_words_reclaimed 55711819 -# TYPE erlang_vm_statistics_garbage_collection_bytes_reclaimed counter -erlang_vm_statistics_garbage_collection_bytes_reclaimed 445694552 -# TYPE erlang_vm_statistics_bytes_received_total counter -erlang_vm_statistics_bytes_received_total 400746 -# TYPE erlang_vm_statistics_bytes_output_total counter -erlang_vm_statistics_bytes_output_total 337197 -# TYPE erlang_vm_statistics_reductions_total counter -erlang_vm_statistics_reductions_total 21157980 -# TYPE erlang_vm_statistics_run_queues_length_total gauge -erlang_vm_statistics_run_queues_length_total 0 -# TYPE erlang_vm_statistics_runtime_milliseconds counter -erlang_vm_statistics_runtime_milliseconds 6559 -# TYPE erlang_vm_statistics_wallclock_time_milliseconds counter -erlang_vm_statistics_wallclock_time_milliseconds 261243 -# TYPE erlang_vm_memory_atom_bytes_total gauge -erlang_vm_memory_atom_bytes_total{usage="used"} 1814822 -erlang_vm_memory_atom_bytes_total{usage="free"} 22459 -# TYPE erlang_vm_memory_bytes_total gauge -erlang_vm_memory_bytes_total{kind="system"} 109820104 -erlang_vm_memory_bytes_total{kind="processes"} 44983656 -# TYPE erlang_vm_dets_tables gauge -erlang_vm_dets_tables 1 -# TYPE erlang_vm_ets_tables gauge -erlang_vm_ets_tables 139 -# TYPE erlang_vm_memory_processes_bytes_total gauge -erlang_vm_memory_processes_bytes_total{usage="used"} 44983656 -erlang_vm_memory_processes_bytes_total{usage="free"} 0 -# TYPE erlang_vm_memory_system_bytes_total gauge -erlang_vm_memory_system_bytes_total{usage="atom"} 1837281 -erlang_vm_memory_system_bytes_total{usage="binary"} 595872 -erlang_vm_memory_system_bytes_total{usage="code"} 40790577 -erlang_vm_memory_system_bytes_total{usage="ets"} 37426896 -erlang_vm_memory_system_bytes_total{usage="other"} 29169478 -# TYPE erlang_mnesia_held_locks gauge -erlang_mnesia_held_locks 0 -# TYPE erlang_mnesia_lock_queue gauge -erlang_mnesia_lock_queue 0 -# TYPE erlang_mnesia_transaction_participants gauge -erlang_mnesia_transaction_participants 0 -# TYPE erlang_mnesia_transaction_coordinators gauge -erlang_mnesia_transaction_coordinators 0 -# TYPE erlang_mnesia_failed_transactions counter -erlang_mnesia_failed_transactions 2 -# TYPE erlang_mnesia_committed_transactions counter -erlang_mnesia_committed_transactions 239 -# TYPE erlang_mnesia_logged_transactions counter -erlang_mnesia_logged_transactions 60 -# TYPE erlang_mnesia_restarted_transactions counter -erlang_mnesia_restarted_transactions 0 -# TYPE emqx_packets_auth_received counter -emqx_packets_auth_received 0 -# TYPE emqx_packets_auth_sent counter -emqx_packets_auth_sent 0 -# TYPE emqx_packets_received counter -emqx_packets_received 0 -# TYPE emqx_packets_sent counter -emqx_packets_sent 0 -# TYPE emqx_packets_connect counter -emqx_packets_connect 0 -# TYPE emqx_packets_connack_sent counter -emqx_packets_connack_sent 0 -# TYPE emqx_packets_connack_error counter -emqx_packets_connack_error 0 -# TYPE emqx_packets_connack_auth_error counter -emqx_packets_connack_auth_error 0 -# TYPE emqx_packets_disconnect_received counter -emqx_packets_disconnect_received 0 -# TYPE emqx_packets_disconnect_sent counter -emqx_packets_disconnect_sent 0 -# TYPE emqx_packets_subscribe counter -emqx_packets_subscribe 0 -# TYPE emqx_packets_subscribe_error counter -emqx_packets_subscribe_error 0 -# TYPE emqx_packets_subscribe_auth_error counter -emqx_packets_subscribe_auth_error 0 -# TYPE emqx_packets_suback counter -emqx_packets_suback 0 -# TYPE emqx_packets_unsubscribe counter -emqx_packets_unsubscribe 0 -# TYPE emqx_packets_unsubscribe_error counter -emqx_packets_unsubscribe_error 0 -# TYPE emqx_packets_unsuback counter -emqx_packets_unsuback 0 -# TYPE emqx_packets_publish_received counter -emqx_packets_publish_received 0 -# TYPE emqx_packets_publish_sent counter -emqx_packets_publish_sent 0 -# TYPE emqx_packets_publish_auth_error counter -emqx_packets_publish_auth_error 0 -# TYPE emqx_packets_publish_error counter -emqx_packets_publish_error 0 -# TYPE emqx_packets_puback_received counter -emqx_packets_puback_received 0 -# TYPE emqx_packets_puback_sent counter -emqx_packets_puback_sent 0 -# TYPE emqx_packets_puback_missed counter -emqx_packets_puback_missed 0 -# TYPE emqx_packets_pubrec_received counter -emqx_packets_pubrec_received 0 -# TYPE emqx_packets_pubrec_sent counter -emqx_packets_pubrec_sent 0 -# TYPE emqx_packets_pubrec_missed counter -emqx_packets_pubrec_missed 0 -# TYPE emqx_packets_pubrel_received counter -emqx_packets_pubrel_received 0 -# TYPE emqx_packets_pubrel_sent counter -emqx_packets_pubrel_sent 0 -# TYPE emqx_packets_pubrel_missed counter -emqx_packets_pubrel_missed 0 -# TYPE emqx_packets_pubcomp_received counter -emqx_packets_pubcomp_received 0 -# TYPE emqx_packets_pubcomp_sent counter -emqx_packets_pubcomp_sent 0 -# TYPE emqx_packets_pubcomp_missed counter -emqx_packets_pubcomp_missed 0 -# TYPE emqx_packets_pingreq counter -emqx_packets_pingreq 0 -# TYPE emqx_packets_pingresp counter -emqx_packets_pingresp 0 -# TYPE emqx_bytes_received counter -emqx_bytes_received 0 -# TYPE emqx_bytes_sent counter -emqx_bytes_sent 0 -# TYPE emqx_connections_count gauge -emqx_connections_count 0 -# TYPE emqx_connections_max gauge -emqx_connections_max 0 -# TYPE emqx_retained_count gauge -emqx_retained_count 3 -# TYPE emqx_retained_max gauge -emqx_retained_max 3 -# TYPE emqx_sessions_count gauge -emqx_sessions_count 0 -# TYPE emqx_sessions_max gauge -emqx_sessions_max 0 -# TYPE emqx_subscriptions_count gauge -emqx_subscriptions_count 0 -# TYPE emqx_subscriptions_max gauge -emqx_subscriptions_max 0 -# TYPE emqx_topics_count gauge -emqx_topics_count 0 -# TYPE emqx_topics_max gauge -emqx_topics_max 0 -# TYPE emqx_vm_cpu_use gauge -emqx_vm_cpu_use 100.0 -# TYPE emqx_vm_cpu_idle gauge -emqx_vm_cpu_idle 0.0 -# TYPE emqx_vm_run_queue gauge -emqx_vm_run_queue 1 -# TYPE emqx_vm_process_messages_in_queues gauge -emqx_vm_process_messages_in_queues 0 -# TYPE emqx_messages_received counter -emqx_messages_received 0 -# TYPE emqx_messages_sent counter -emqx_messages_sent 0 -# TYPE emqx_messages_dropped counter -emqx_messages_dropped 0 -# TYPE emqx_messages_retained counter -emqx_messages_retained 3 -# TYPE emqx_messages_qos0_received counter -emqx_messages_qos0_received 0 -# TYPE emqx_messages_qos0_sent counter -emqx_messages_qos0_sent 0 -# TYPE emqx_messages_qos1_received counter -emqx_messages_qos1_received 0 -# TYPE emqx_messages_qos1_sent counter -emqx_messages_qos1_sent 0 -# TYPE emqx_messages_qos2_received counter -emqx_messages_qos2_received 0 -# TYPE emqx_messages_qos2_expired counter -emqx_messages_qos2_expired 0 -# TYPE emqx_messages_qos2_sent counter -emqx_messages_qos2_sent 0 -# TYPE emqx_messages_qos2_dropped counter -emqx_messages_qos2_dropped 0 -# TYPE emqx_messages_forward counter -emqx_messages_forward 0 -``` - - -License -------- - -Apache License Version 2.0 - -Author ------- - -EMQX Team. - +Correspondingly, we have also provided a [Grafana template](https://grafana.com/grafana/dashboards/17446-emqx/) +for visualizing these metrics. From c858fba9d47eb41268d534fb88025b7004a4ab3a Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Tue, 18 Apr 2023 17:34:28 +0200 Subject: [PATCH 064/263] chore: v5.0.23 --- apps/emqx/include/emqx_release.hrl | 2 +- deploy/charts/emqx/Chart.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index ea79dcd0e..ec3883c77 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -32,7 +32,7 @@ %% `apps/emqx/src/bpapi/README.md' %% Community edition --define(EMQX_RELEASE_CE, "5.0.22"). +-define(EMQX_RELEASE_CE, "5.0.23"). %% Enterprise edition -define(EMQX_RELEASE_EE, "5.0.3-alpha.1"). diff --git a/deploy/charts/emqx/Chart.yaml b/deploy/charts/emqx/Chart.yaml index ecc211e35..312a9dfbe 100644 --- a/deploy/charts/emqx/Chart.yaml +++ b/deploy/charts/emqx/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 5.0.22 +version: 5.0.23 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 5.0.22 +appVersion: 5.0.23 From f8ff2d6bc97bb81d1751a2e3a86c7d77cb869cc8 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Tue, 18 Apr 2023 18:09:24 +0200 Subject: [PATCH 065/263] docs: Generate changelog for v5.0.23 --- changes/v5.0.23.en.md | 62 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 changes/v5.0.23.en.md diff --git a/changes/v5.0.23.en.md b/changes/v5.0.23.en.md new file mode 100644 index 000000000..6d016d2da --- /dev/null +++ b/changes/v5.0.23.en.md @@ -0,0 +1,62 @@ +# v5.0.23 + +## Enhancements + +- [#10156](https://github.com/emqx/emqx/pull/10156) Change the priority of the configuration: + 1. If it is a new installation of EMQX, the priority of configuration is `ENV > emqx.conf > HTTP API`. + 2. If EMQX is upgraded from an old version (i.e., the cluster-override.conf file still exists in EMQX's data directory), then the configuration priority remains the same as before. That is, `HTTP API > ENV > emqx.conf`. + + Deprecated data/configs/local-override.conf. + + Stabilizing the HTTP API for hot updates. + +- [#10354](https://github.com/emqx/emqx/pull/10354) More specific error messages when configure with bad max_heap_size value. + Log current value and the max value when the `message_queue_too_long` error is thrown. + +- [#10359](https://github.com/emqx/emqx/pull/10359) Metrics now are not implicitly collected in places where API handlers don't make any use of them. Instead, a separate backplane RPC gathers cluster-wide metrics. + +- [#10373](https://github.com/emqx/emqx/pull/10373) Deprecate the trace.payload_encode configuration. + Add payload_encode=[text,hidden,hex] option when creating a trace via HTTP API. + +- [#10389](https://github.com/emqx/emqx/pull/10389) Unify the config formats for `cluster.core_nodes` and `cluster.statics.seeds`. + Now they both support formats in array `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` or semicolon-separated string `"emqx1@127.0.0.1,emqx2@127.0.0.1"`. + +- [#10391](https://github.com/emqx/emqx/pull/10391) Hide a large number of advanced options to simplify the configuration file. + + That includes `rewrite`, `topic_metric`, `persistent_session_store`, `overload_protection`, + `flapping_detect`, `conn_congestion`, `stats,auto_subscribe`, `broker_perf`, + `shared_subscription_group`, `slow_subs`, `ssl_options.user_lookup_fun` and some advance items + in `node` and `dashboard` section, [#10358](https://github.com/emqx/emqx/pull/10358), + [#10381](https://github.com/emqx/emqx/pull/10381), [#10385](https://github.com/emqx/emqx/pull/10385). + +- [#10392](https://github.com/emqx/emqx/pull/10392) A new function to convert a formatted date to an integer timestamp has been added: date_to_unix_ts/3 + +- [#10404](https://github.com/emqx/emqx/pull/10404) Change the default queue mode for buffer workers to `memory_only`. + Before this change, the default queue mode was `volatile_offload`. When under high message rate pressure and when the resource is not keeping up with such rate, the buffer performance degraded a lot due to the constant disk operations. + +- [#10426](https://github.com/emqx/emqx/pull/10426) Optimize the configuration priority mechanism to fix the issue where the configuration + changes made to `etc/emqx.conf` do not take effect after restarting EMQX. + + More introduction about the new mechanism: [Configure Override Rules](https://www.emqx.io/docs/en/v5.0/configuration/configuration.html#configure-override-rules) + +- [#10376](https://github.com/emqx/emqx/pull/10376) Simplify the configuration of the limiter feature and optimize some codes + - Rename `message_in` to `messages` + - Rename `bytes_in` to `bytes` + - Use `burst` instead of `capacity` + - Hide non-importance fields + - Optimize limiter instances in different rate settings + +- [#10430](https://github.com/emqx/emqx/pull/10430) Simplify the configuration of the `retainer` feature. + - Mark `flow_control` as non-importance field. + +## Bug Fixes + +- [#10369](https://github.com/emqx/emqx/pull/10369) Fix error in `/api/v5/monitor_current` API endpoint that happens when some EMQX nodes are down. + + Prior to this fix, sometimes the request returned HTTP code 500 and the following message: + ``` + {"code":"INTERNAL_ERROR","message":"error, badarg, [{erlang,'++',[{error,nodedown},[{node,'emqx@10.42.0.150'}]], ... + ``` + +- [#10410](https://github.com/emqx/emqx/pull/10410) Fix config check failed when gateways are configured in emqx.conf. + This issue was first introduced in v5.0.22 via [#10278](https://github.com/emqx/emqx/pull/10278), the boot-time config check was missing. From 9a4af6bd7663e667da0bec3254b1e53e7c8dcc54 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 18 Apr 2023 18:23:21 +0200 Subject: [PATCH 066/263] docs: update config docs --- rel/emqx_conf.template.en.md | 19 ++++++++++++------- rel/emqx_conf.template.zh.md | 16 ++++++++++------ 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/rel/emqx_conf.template.en.md b/rel/emqx_conf.template.en.md index bded20da6..c1259869c 100644 --- a/rel/emqx_conf.template.en.md +++ b/rel/emqx_conf.template.en.md @@ -7,22 +7,27 @@ and a superset of JSON. EMQX configuration consists of two layers. From bottom up: -1. Cluster configs: `$EMQX_NODE__DATA_DIR/configs/cluster.hocon` -2. `emqx.conf` + `EMQX_` prefixed environment variables.
- Changes in this layer require a full node restart to take effect. +1. Cluster-synced configs: `$EMQX_NODE__DATA_DIR/configs/cluster.hocon`. +2. Local node configs: `emqx.conf` + `EMQX_` prefixed environment variables. +:::tip Tip +Prior to v5.0.23 and e5.0.3, the cluster-synced configs are stored in +`cluster-override.conf` which is applied on top of the local configs. + +If upgraded from an earlier version, as long as `cluster-override.conf` exists, +`cluster.hocon` will not be created, and `cluster-override.conf` will stay on +top of the overriding layers. +::: When environment variable `$EMQX_NODE__DATA_DIR` is not set, config `node.data_dir` is used. The `cluster.hocon` file is overwritten at runtime when changes -are made from dashboard UI, management HTTP API, or CLI. When clustered, +are made from Dashboard, management HTTP API, or CLI. When clustered, after EMQX restarts, it copies the file from the node which has the greatest `uptime`. :::tip Tip -Some of the configs (such as `node.name`) are boot-only configs and not overridable. -Config values from `cluster.hocon` are **not** mapped to boot configs for -the config fields attributed with `mapping: path.to.boot.config.key` +To avoid confusion, don't add the same keys in both `cluster.hocon` and `emqx.conf`. ::: For detailed override rules, see [Config Overlay Rules](#config-overlay-rules). diff --git a/rel/emqx_conf.template.zh.md b/rel/emqx_conf.template.zh.md index 916eed38d..a9df27f63 100644 --- a/rel/emqx_conf.template.zh.md +++ b/rel/emqx_conf.template.zh.md @@ -5,9 +5,15 @@ HOCON(Human-Optimized Config Object Notation)是一个JSON的超集,非常 EMQX的配置文件可分为二层,自底向上依次是: -1. 不可变的基础层 `emqx.conf` 加上 `EMQX_` 前缀的环境变量。
- 修改这一层的配置之后,需要重启节点来使之生效。 -2. 集群范围重载层:`$EMQX_NODE__DATA_DIR/configs/cluster.hocon` +1. 集群同步配置:`$EMQX_NODE__DATA_DIR/configs/cluster.hocon`。 +2. 本地节点配置:`emqx.conf` 加上 `EMQX_` 前缀的环境变量。 + +:::tip Tip +在 v5.0.23 或 e5.0.3 之前,集群同步配置保存在文件 `cluster-override.conf` 中,并且它覆盖在配置的最上层。 + +如果从之前的版本升级上来,只要 `cluster-override.conf` 文件存在, +EMQX 就不会创建 `cluster.hocon`,并且 `cluster-override.conf` 会继续覆盖在配置的最上层。 +::: 如果环境变量 `$EMQX_NODE__DATA_DIR` 没有设置,那么该目录会从 `emqx.conf` 的 `node.data_dir` 配置中读取。 @@ -16,9 +22,7 @@ EMQX的配置文件可分为二层,自底向上依次是: 当EMQX运行在集群中时,一个EMQX节点重启之后,会从集群中其他节点复制该文件内容到本地。 :::tip Tip -有些配置项是不能被重载的(例如 `node.name`)。 -配置项如果有 `mapping: path.to.boot.config.key` 这个属性, -则不能被添加到重载文件 `cluster.hocon` 中。 +为避免歧义,应尽量避免让 `cluster.hocon` 和 `emqx.conf` 出现配置交集。 ::: 更多的重载规则,请参考下文 [配置重载规则](#配置重载规则)。 From 199cbc60d910022e48e57dc546faeb4fa70d42c9 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 18 Apr 2023 10:19:09 -0300 Subject: [PATCH 067/263] chore: un-hide ocsp stapling config Undoing https://github.com/emqx/emqx/pull/10160 --- apps/emqx/src/emqx.app.src | 2 +- apps/emqx/src/emqx_schema.erl | 3 +-- apps/emqx/test/emqx_ocsp_cache_SUITE.erl | 8 ++++++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index b2dfca9e1..d42478fea 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -3,7 +3,7 @@ {id, "emqx"}, {description, "EMQX Core"}, % strict semver, bump manually! - {vsn, "5.0.23"}, + {vsn, "5.0.24"}, {modules, []}, {registered, []}, {applications, [ diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 38566f3cb..ace6d3332 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -2322,8 +2322,6 @@ server_ssl_opts_schema(Defaults, IsRanchListener) -> ref("ocsp"), #{ required => false, - %% TODO: remove after e5.0.2 - importance => ?IMPORTANCE_HIDDEN, validator => fun ocsp_inner_validator/1 } )}, @@ -2332,6 +2330,7 @@ server_ssl_opts_schema(Defaults, IsRanchListener) -> boolean(), #{ default => false, + importance => ?IMPORTANCE_MEDIUM, desc => ?DESC("server_ssl_opts_schema_enable_crl_check") } )} diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE.erl b/apps/emqx/test/emqx_ocsp_cache_SUITE.erl index dff8ce5a7..15ca29853 100644 --- a/apps/emqx/test/emqx_ocsp_cache_SUITE.erl +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE.erl @@ -677,8 +677,12 @@ do_t_update_listener(Config) -> %% no ocsp at first ListenerId = "ssl:default", {ok, {{_, 200, _}, _, ListenerData0}} = get_listener_via_api(ListenerId), - ?assertEqual( - undefined, + ?assertMatch( + #{ + <<"enable_ocsp_stapling">> := false, + <<"refresh_http_timeout">> := _, + <<"refresh_interval">> := _ + }, emqx_utils_maps:deep_get([<<"ssl_options">>, <<"ocsp">>], ListenerData0, undefined) ), assert_no_http_get(), From d947b663271890d4ceafde55254e83afa935add2 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Wed, 19 Apr 2023 09:51:19 +0800 Subject: [PATCH 068/263] chore: update apps/emqx_slow_subs/README.md --- apps/emqx_slow_subs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx_slow_subs/README.md b/apps/emqx_slow_subs/README.md index cfc87f775..8b83508c2 100644 --- a/apps/emqx_slow_subs/README.md +++ b/apps/emqx_slow_subs/README.md @@ -43,5 +43,5 @@ Default value: 10 Default value: whole -# Contributing - [Mandatory] +# Contributing Please see our [contributing.md](../../CONTRIBUTING.md). From 5455500647785e0d780590d26835df92f8f3c162 Mon Sep 17 00:00:00 2001 From: firest Date: Wed, 19 Apr 2023 14:17:43 +0800 Subject: [PATCH 069/263] fix(limiter): fix compatibility problem of configuration --- .../emqx_limiter/src/emqx_limiter_schema.erl | 76 +++++++++--------- apps/emqx/test/emqx_ratelimiter_SUITE.erl | 80 +++++++++++++++++-- .../src/emqx_dashboard_swagger.erl | 2 +- 3 files changed, 111 insertions(+), 47 deletions(-) diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl index 730559f80..c762a0f1d 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl @@ -24,6 +24,7 @@ fields/1, to_rate/1, to_capacity/1, + to_burst/1, default_period/0, to_burst_rate/1, to_initial/1, @@ -54,8 +55,10 @@ -type bucket_name() :: atom(). -type rate() :: infinity | float(). -type burst_rate() :: 0 | float(). +%% this is a compatible type for the deprecated field and type `capacity`. +-type burst() :: burst_rate(). %% the capacity of the token bucket --type capacity() :: non_neg_integer(). +%%-type capacity() :: non_neg_integer(). %% initial capacity of the token bucket -type initial() :: non_neg_integer(). -type bucket_path() :: list(atom()). @@ -72,13 +75,13 @@ -typerefl_from_string({rate/0, ?MODULE, to_rate}). -typerefl_from_string({burst_rate/0, ?MODULE, to_burst_rate}). --typerefl_from_string({capacity/0, ?MODULE, to_capacity}). +-typerefl_from_string({burst/0, ?MODULE, to_burst}). -typerefl_from_string({initial/0, ?MODULE, to_initial}). -reflect_type([ rate/0, burst_rate/0, - capacity/0, + burst/0, initial/0, failure_strategy/0, bucket_name/0 @@ -130,39 +133,9 @@ fields(node_opts) -> fields(client_fields) -> client_fields(types(), #{default => #{}}); fields(bucket_infinity) -> - [ - {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})}, - {burst, - ?HOCON(capacity(), #{ - desc => ?DESC(capacity), - default => <<"0">>, - importance => ?IMPORTANCE_HIDDEN, - aliases => [capacity] - })}, - {initial, - ?HOCON(initial(), #{ - default => <<"0">>, - desc => ?DESC(initial), - importance => ?IMPORTANCE_HIDDEN - })} - ]; + fields_of_bucket(<<"infinity">>); fields(bucket_limit) -> - [ - {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"1000/s">>})}, - {burst, - ?HOCON(capacity(), #{ - desc => ?DESC(burst), - default => <<"0">>, - importance => ?IMPORTANCE_HIDDEN, - aliases => [capacity] - })}, - {initial, - ?HOCON(initial(), #{ - default => <<"0">>, - desc => ?DESC(initial), - importance => ?IMPORTANCE_HIDDEN - })} - ]; + fields_of_bucket(<<"1000/s">>); fields(client_opts) -> [ {rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})}, @@ -186,7 +159,7 @@ fields(client_opts) -> } )}, {burst, - ?HOCON(capacity(), #{ + ?HOCON(burst(), #{ desc => ?DESC(burst), default => <<"0">>, importance => ?IMPORTANCE_HIDDEN, @@ -265,8 +238,6 @@ types() -> calc_capacity(#{rate := infinity}) -> infinity; -calc_capacity(#{burst := infinity}) -> - infinity; calc_capacity(#{rate := Rate, burst := Burst}) -> erlang:floor(1000 * Rate / default_period()) + Burst. @@ -277,6 +248,17 @@ calc_capacity(#{rate := Rate, burst := Burst}) -> to_burst_rate(Str) -> to_rate(Str, false, true). +%% The default value of `capacity` is `infinity`, +%% but we have changed `capacity` to `burst` which should not be `infinity` +%% and its default value is 0, so we should convert `infinity` to 0 +to_burst(Str) -> + case to_rate(Str, true, true) of + {ok, infinity} -> + {ok, 0}; + Any -> + Any + end. + %% rate can be: 10 10MB 10MB/s 10MB/2s infinity %% e.g. the bytes_in regex tree is: %% @@ -415,6 +397,24 @@ composite_bucket_fields(Types, ClientRef) -> )} ]. +fields_of_bucket(Default) -> + [ + {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => Default})}, + {burst, + ?HOCON(burst(), #{ + desc => ?DESC(burst), + default => <<"0">>, + importance => ?IMPORTANCE_HIDDEN, + aliases => [capacity] + })}, + {initial, + ?HOCON(initial(), #{ + default => <<"0">>, + desc => ?DESC(initial), + importance => ?IMPORTANCE_HIDDEN + })} + ]. + client_fields(Types, Meta) -> [ {Type, diff --git a/apps/emqx/test/emqx_ratelimiter_SUITE.erl b/apps/emqx/test/emqx_ratelimiter_SUITE.erl index 7288dcf7c..26048873e 100644 --- a/apps/emqx/test/emqx_ratelimiter_SUITE.erl +++ b/apps/emqx/test/emqx_ratelimiter_SUITE.erl @@ -220,7 +220,7 @@ t_try_restore_agg(_) -> }, Cli2 = Cli#{ rate := infinity, - burst := infinity, + burst := 0, divisible := true, max_retry_time := 100, failure_strategy := force @@ -264,11 +264,11 @@ t_rate(_) -> Bucket2 = Bucket#{ rate := ?RATE("100/100ms"), initial := 0, - burst := infinity + burst := 0 }, Cli2 = Cli#{ rate := infinity, - burst := infinity, + burst := 0, initial := 0 }, Bucket2#{client := Cli2} @@ -295,7 +295,7 @@ t_capacity(_) -> }, Cli2 = Cli#{ rate := infinity, - burst := infinity, + burst := 0, initial := 0 }, Bucket2#{client := Cli2} @@ -403,11 +403,11 @@ t_limit_global_with_unlimit_other(_) -> Bucket2 = Bucket#{ rate := infinity, initial := 0, - burst := infinity + burst := 0 }, Cli2 = Cli#{ rate := infinity, - burst := infinity, + burst := 0, initial := 0 }, Bucket2#{client := Cli2} @@ -574,6 +574,66 @@ t_schema_unit(_) -> ?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")), ok. +compatibility_for_capacity(_) -> + CfgStr = << + "" + "\n" + "listeners.tcp.default {\n" + " bind = \"0.0.0.0:1883\"\n" + " max_connections = 1024000\n" + " limiter.messages.capacity = infinity\n" + " limiter.client.messages.capacity = infinity\n" + "}\n" + "" + >>, + ?assertMatch( + #{ + messages := #{burst := 0}, + client := #{messages := #{burst := 0}} + }, + parse_and_check(CfgStr) + ). + +compatibility_for_message_in(_) -> + CfgStr = << + "" + "\n" + "listeners.tcp.default {\n" + " bind = \"0.0.0.0:1883\"\n" + " max_connections = 1024000\n" + " limiter.message_in.rate = infinity\n" + " limiter.client.message_in.rate = infinity\n" + "}\n" + "" + >>, + ?assertMatch( + #{ + messages := #{rate := infinity}, + client := #{messages := #{rate := infinity}} + }, + parse_and_check(CfgStr) + ). + +compatibility_for_bytes_in(_) -> + CfgStr = << + "" + "\n" + "listeners.tcp.default {\n" + " bind = \"0.0.0.0:1883\"\n" + " max_connections = 1024000\n" + " limiter.bytes_in.rate = infinity\n" + " limiter.client.bytes_in.rate = infinity\n" + "}\n" + "" + >>, + ?assertMatch( + #{ + bytes := #{rate := infinity}, + client := #{bytes := #{rate := infinity}} + }, + parse_and_check(CfgStr) + ). + %%-------------------------------------------------------------------- %%% Internal functions %%-------------------------------------------------------------------- @@ -753,13 +813,13 @@ make_limiter_cfg() -> Client = #{ rate => infinity, initial => 0, - burst => infinity, + burst => 0, low_watermark => 0, divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force }, - #{client => Client, rate => infinity, initial => 0, burst => infinity}. + #{client => Client, rate => infinity, initial => 0, burst => 0}. add_bucket(Cfg) -> add_bucket(?MODULE, Cfg). @@ -813,3 +873,7 @@ apply_modifier(Pairs, #{default := Template}) -> Acc#{N => M(Template)} end, lists:foldl(Fun, #{}, Pairs). + +parse_and_check(ConfigString) -> + ok = emqx_common_test_helpers:load_config(emqx_schema, ConfigString), + emqx:get_config([listeners, tcp, default, limiter]). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index f700ec146..e471486e5 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -768,7 +768,7 @@ typename_to_spec("log_level()", _Mod) -> }; typename_to_spec("rate()", _Mod) -> #{type => string, example => <<"10MB">>}; -typename_to_spec("capacity()", _Mod) -> +typename_to_spec("burst()", _Mod) -> #{type => string, example => <<"100MB">>}; typename_to_spec("burst_rate()", _Mod) -> %% 0/0s = no burst From 4f0c891aa6f34c2d623961ad25b72a1e4c150664 Mon Sep 17 00:00:00 2001 From: firest Date: Wed, 19 Apr 2023 15:23:19 +0800 Subject: [PATCH 070/263] chore: bump version && update changes --- apps/emqx/src/emqx.app.src | 2 +- apps/emqx_dashboard/src/emqx_dashboard.app.src | 2 +- changes/ce/fix-10448.en.md | 3 +++ 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changes/ce/fix-10448.en.md diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index b2dfca9e1..d42478fea 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -3,7 +3,7 @@ {id, "emqx"}, {description, "EMQX Core"}, % strict semver, bump manually! - {vsn, "5.0.23"}, + {vsn, "5.0.24"}, {modules, []}, {registered, []}, {applications, [ diff --git a/apps/emqx_dashboard/src/emqx_dashboard.app.src b/apps/emqx_dashboard/src/emqx_dashboard.app.src index b810f9c5f..8c7e424e0 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.app.src +++ b/apps/emqx_dashboard/src/emqx_dashboard.app.src @@ -2,7 +2,7 @@ {application, emqx_dashboard, [ {description, "EMQX Web Dashboard"}, % strict semver, bump manually! - {vsn, "5.0.18"}, + {vsn, "5.0.19"}, {modules, []}, {registered, [emqx_dashboard_sup]}, {applications, [kernel, stdlib, mnesia, minirest, emqx, emqx_ctl]}, diff --git a/changes/ce/fix-10448.en.md b/changes/ce/fix-10448.en.md new file mode 100644 index 000000000..eaa0dc656 --- /dev/null +++ b/changes/ce/fix-10448.en.md @@ -0,0 +1,3 @@ +Fix a compatibility issue of limiter configuration introduced by v5.0.23 which broke the upgrade from previous versions if the `capacity` is `infinity`. + +Since v5.0.23, we had instead of `capacity` by `burst`, after this fix, a `capacity` with `infinity` value will be converted to a `burst` with a zero value. From 8219af2fd439acb1a75c3f3dca0399361082b31e Mon Sep 17 00:00:00 2001 From: firest Date: Wed, 19 Apr 2023 16:45:53 +0800 Subject: [PATCH 071/263] chore: improve changelog --- changes/ce/fix-10448.en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changes/ce/fix-10448.en.md b/changes/ce/fix-10448.en.md index eaa0dc656..c35ecdd8b 100644 --- a/changes/ce/fix-10448.en.md +++ b/changes/ce/fix-10448.en.md @@ -1,3 +1,3 @@ Fix a compatibility issue of limiter configuration introduced by v5.0.23 which broke the upgrade from previous versions if the `capacity` is `infinity`. -Since v5.0.23, we had instead of `capacity` by `burst`, after this fix, a `capacity` with `infinity` value will be converted to a `burst` with a zero value. +In v5.0.23 we have replaced `capacity` with `burst`. After this fix, a `capacity = infinity` config will be automatically converted to equivalent `burst = 0`. From 39c213d2d7f0f9e684426beee19da30a7e996f87 Mon Sep 17 00:00:00 2001 From: Kinplemelon Date: Wed, 19 Apr 2023 18:12:43 +0800 Subject: [PATCH 072/263] chore: upgrade dashboard to v1.2.3 for ce --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c874b81ce..10e6d1424 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2 export EMQX_DEFAULT_RUNNER = debian:11-slim export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) -export EMQX_DASHBOARD_VERSION ?= v1.2.2 +export EMQX_DASHBOARD_VERSION ?= v1.2.3 export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.1 export EMQX_REL_FORM ?= tgz export QUICER_DOWNLOAD_FROM_RELEASE = 1 From 13f50b2ba9e92d77c233d453f87d93192e43d07c Mon Sep 17 00:00:00 2001 From: JianBo He Date: Wed, 19 Apr 2023 21:05:10 +0800 Subject: [PATCH 073/263] chore: update README files --- apps/emqx_conf/README.md | 16 ++++++++++++++++ apps/emqx_plugins/README.md | 12 ++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 apps/emqx_conf/README.md create mode 100644 apps/emqx_plugins/README.md diff --git a/apps/emqx_conf/README.md b/apps/emqx_conf/README.md new file mode 100644 index 000000000..726fd72cd --- /dev/null +++ b/apps/emqx_conf/README.md @@ -0,0 +1,16 @@ +# Configuration Management + +This application provides configuration management capabilities for EMQX. + +This includes, during compilation: +- Read all configuration schemas and generate the following files: + * `config-en.md`: documentation for all configuration options. + * `schema-en.json`: JSON description of all configuration schema options. + * `emqx.conf.example`: an example of a complete configuration file. + +At runtime, it provides: +- Cluster configuration synchronization capability. + Responsible for synchronizing hot-update configurations from the HTTP API to the entire cluster + and ensuring consistency. + +In addition, this application manages system-level configurations such as `cluster`, `node`, `log`. diff --git a/apps/emqx_plugins/README.md b/apps/emqx_plugins/README.md new file mode 100644 index 000000000..9c8faccd1 --- /dev/null +++ b/apps/emqx_plugins/README.md @@ -0,0 +1,12 @@ +# Plugins Management + +This application provides the feature for users to upload and install custom, Erlang-based plugins. + +More introduction about [Plugins](https://www.emqx.io/docs/en/v5.0/extensions/plugins.html#develop-emqx-plugins) + +See HTTP API to learn how to [Install/Uninstall a Plugin](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Plugins) + +## Plugin Template + +We provide a [plugin template](https://github.com/emqx/emqx-plugin-template) that +you can use to learn how to write and package custom plugins. From bd935e34e67283ebc0f0f4a1924df152138bdea3 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Wed, 19 Apr 2023 21:12:24 +0800 Subject: [PATCH 074/263] chore: update apps/emqx_modules/README.md --- apps/emqx_modules/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx_modules/README.md b/apps/emqx_modules/README.md index 7cb1db0c5..65ceb12e1 100644 --- a/apps/emqx_modules/README.md +++ b/apps/emqx_modules/README.md @@ -6,7 +6,7 @@ protocol standard, including "Delayed Publish", "Topic Rewrite", "Topic Metrics" ## Delayed Publish -After enabling this module, messages sent by the user with the prefix +After enabling this module, messages sent by the clients with the topic prefixed with `$delayed/{Interval}/{Topic}` will be delayed by `{Interval}` seconds before being published to the `{Topic}`. From eda2f0819d1560226038cd01a64eca77f65325b3 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Wed, 19 Apr 2023 21:22:01 +0800 Subject: [PATCH 075/263] chore: apply suggestions from code review --- apps/emqx_modules/README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/apps/emqx_modules/README.md b/apps/emqx_modules/README.md index 65ceb12e1..567b1ac71 100644 --- a/apps/emqx_modules/README.md +++ b/apps/emqx_modules/README.md @@ -20,10 +20,9 @@ See [Enabling/Disabling Delayed Publish via HTTP API](https://www.emqx.io/docs/e Topic Rewrite allows users to configure rules to change the topic strings that the client requests to subscribe or publish. -This feature is very useful when designing topics that are compatible with different -client versions. For example, an old device that has already been issued and cannot -be upgraded may use old topic rules, but the production environment need to apply -a new design rules for the topics. +This feature is very useful when it need to compatibility with different versions of topic designs. +For example, an old device that has already been issued and cannot +be upgraded may use old topic designs, but for some reason, we adjusted the format of topics. We can use this feature to rewrite the old topics as the new format to eliminate these differences. More introduction about [Topic Rewrite](https://www.emqx.io/docs/en/v5.0/mqtt/mqtt-topic-rewrite.html). From a9bd91fcfffa2d6cb44fc7772e2937bff3bbc6a8 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 19 Apr 2023 10:23:04 -0300 Subject: [PATCH 076/263] refactor(gcp_pubsub): move GCP PubSub Bridge to its own app Fixes https://emqx.atlassian.net/browse/EMQX-9536 Note: since GCP PubSub is not shared by any authn/authz backend, there's no need to separate its connector into another app. --- apps/emqx_bridge_gcp_pubsub/rebar.config | 10 ++++++++++ .../src/emqx_bridge_gcp_pubsub.app.src | 6 +++++- .../src/emqx_bridge_gcp_pubsub.erl | 2 +- .../src/emqx_bridge_gcp_pubsub_connector.erl | 4 ++-- .../test/emqx_bridge_gcp_pubsub_SUITE.erl | 2 +- lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src | 5 +++-- lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl | 8 ++++---- lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src | 2 +- mix.exs | 4 +++- rebar.config.erl | 2 ++ ...e_gcp_pubsub.hocon => emqx_bridge_gcp_pubsub.hocon} | 2 +- ...e_gcp_pubsub.hocon => emqx_bridge_gcp_pubsub.hocon} | 2 +- scripts/find-apps.sh | 3 +++ 13 files changed, 37 insertions(+), 15 deletions(-) create mode 100644 apps/emqx_bridge_gcp_pubsub/rebar.config rename lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_gcp_pubsub.erl => apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.erl (99%) rename lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl => apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl (99%) rename lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl => apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_SUITE.erl (99%) rename rel/i18n/{emqx_ee_bridge_gcp_pubsub.hocon => emqx_bridge_gcp_pubsub.hocon} (98%) rename rel/i18n/zh/{emqx_ee_bridge_gcp_pubsub.hocon => emqx_bridge_gcp_pubsub.hocon} (98%) diff --git a/apps/emqx_bridge_gcp_pubsub/rebar.config b/apps/emqx_bridge_gcp_pubsub/rebar.config new file mode 100644 index 000000000..2fd264fc0 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/rebar.config @@ -0,0 +1,10 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_gcp_pubsub]} +]}. diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src index 0e1427888..86627eb2a 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src @@ -2,7 +2,11 @@ {description, "EMQX Enterprise GCP Pub/Sub Bridge"}, {vsn, "0.1.0"}, {registered, []}, - {applications, [kernel, stdlib]}, + {applications, [ + kernel, + stdlib, + ehttpc + ]}, {env, []}, {modules, []}, {links, []} diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_gcp_pubsub.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.erl similarity index 99% rename from lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_gcp_pubsub.erl rename to apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.erl index 180640d65..70109a0ea 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_gcp_pubsub.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_gcp_pubsub). +-module(emqx_bridge_gcp_pubsub). -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl similarity index 99% rename from lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl rename to apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl index 7b068ec8f..a3f0ef36b 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_connector_gcp_pubsub). +-module(emqx_bridge_gcp_pubsub_connector). -behaviour(emqx_resource). @@ -27,7 +27,7 @@ -export([reply_delegator/3]). -type jwt_worker() :: binary(). --type service_account_json() :: emqx_ee_bridge_gcp_pubsub:service_account_json(). +-type service_account_json() :: emqx_bridge_gcp_pubsub:service_account_json(). -type config() :: #{ connect_timeout := emqx_schema:duration_ms(), max_retries := non_neg_integer(), diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_SUITE.erl similarity index 99% rename from lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl rename to apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_SUITE.erl index 57c06fa7f..55527bf1f 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_SUITE.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_gcp_pubsub_SUITE). +-module(emqx_bridge_gcp_pubsub_SUITE). -compile(nowarn_export_all). -compile(export_all). diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src index d6c59c716..440889d02 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src @@ -1,13 +1,14 @@ {application, emqx_ee_bridge, [ {description, "EMQX Enterprise data bridges"}, - {vsn, "0.1.10"}, + {vsn, "0.1.11"}, {registered, []}, {applications, [ kernel, stdlib, emqx_ee_connector, telemetry, - emqx_bridge_kafka + emqx_bridge_kafka, + emqx_bridge_gcp_pubsub ]}, {env, []}, {modules, []}, diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl index 3ad5cbbb4..9465464d9 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl @@ -14,7 +14,7 @@ api_schemas(Method) -> [ - ref(emqx_ee_bridge_gcp_pubsub, Method), + ref(emqx_bridge_gcp_pubsub, Method), ref(emqx_bridge_kafka, Method ++ "_consumer"), ref(emqx_bridge_kafka, Method ++ "_producer"), ref(emqx_ee_bridge_mysql, Method), @@ -42,7 +42,7 @@ schema_modules() -> [ emqx_bridge_kafka, emqx_ee_bridge_hstreamdb, - emqx_ee_bridge_gcp_pubsub, + emqx_bridge_gcp_pubsub, emqx_ee_bridge_influxdb, emqx_ee_bridge_mongodb, emqx_ee_bridge_mysql, @@ -76,7 +76,7 @@ resource_type(kafka_consumer) -> emqx_bridge_kafka_impl_consumer; %% to hocon; keeping this as just `kafka' for backwards compatibility. resource_type(kafka) -> emqx_bridge_kafka_impl_producer; resource_type(hstreamdb) -> emqx_ee_connector_hstreamdb; -resource_type(gcp_pubsub) -> emqx_ee_connector_gcp_pubsub; +resource_type(gcp_pubsub) -> emqx_bridge_gcp_pubsub_connector; resource_type(mongodb_rs) -> emqx_ee_connector_mongodb; resource_type(mongodb_sharded) -> emqx_ee_connector_mongodb; resource_type(mongodb_single) -> emqx_ee_connector_mongodb; @@ -108,7 +108,7 @@ fields(bridges) -> )}, {gcp_pubsub, mk( - hoconsc:map(name, ref(emqx_ee_bridge_gcp_pubsub, "config")), + hoconsc:map(name, ref(emqx_bridge_gcp_pubsub, "config")), #{ desc => <<"EMQX Enterprise Config">>, required => false diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src index 7ebc320e5..ced7ae86a 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src @@ -1,6 +1,6 @@ {application, emqx_ee_connector, [ {description, "EMQX Enterprise connectors"}, - {vsn, "0.1.10"}, + {vsn, "0.1.11"}, {registered, []}, {applications, [ kernel, diff --git a/mix.exs b/mix.exs index 1dbde8980..f3149a584 100644 --- a/mix.exs +++ b/mix.exs @@ -154,7 +154,8 @@ defmodule EMQXUmbrella.MixProject do # need to remove those when listing `/apps/`... defp enterprise_umbrella_apps() do MapSet.new([ - :emqx_bridge_kafka + :emqx_bridge_kafka, + :emqx_bridge_gcp_pubsub ]) end @@ -344,6 +345,7 @@ defmodule EMQXUmbrella.MixProject do emqx_ee_connector: :permanent, emqx_ee_bridge: :permanent, emqx_bridge_kafka: :permanent, + emqx_bridge_gcp_pubsub: :permanent, emqx_ee_schema_registry: :permanent ], else: [] diff --git a/rebar.config.erl b/rebar.config.erl index 80f126096..9bdbfb848 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -79,6 +79,7 @@ is_enterprise(ce) -> false; is_enterprise(ee) -> true. is_community_umbrella_app("apps/emqx_bridge_kafka") -> false; +is_community_umbrella_app("apps/emqx_bridge_gcp_pubsub") -> false; is_community_umbrella_app(_) -> true. is_jq_supported() -> @@ -439,6 +440,7 @@ relx_apps_per_edition(ee) -> emqx_ee_connector, emqx_ee_bridge, emqx_bridge_kafka, + emqx_bridge_gcp_pubsub, emqx_ee_schema_registry ]; relx_apps_per_edition(ce) -> diff --git a/rel/i18n/emqx_ee_bridge_gcp_pubsub.hocon b/rel/i18n/emqx_bridge_gcp_pubsub.hocon similarity index 98% rename from rel/i18n/emqx_ee_bridge_gcp_pubsub.hocon rename to rel/i18n/emqx_bridge_gcp_pubsub.hocon index 6f864a524..cc255aec3 100644 --- a/rel/i18n/emqx_ee_bridge_gcp_pubsub.hocon +++ b/rel/i18n/emqx_bridge_gcp_pubsub.hocon @@ -1,4 +1,4 @@ -emqx_ee_bridge_gcp_pubsub { +emqx_bridge_gcp_pubsub { connect_timeout.desc: """The timeout when connecting to the HTTP server.""" diff --git a/rel/i18n/zh/emqx_ee_bridge_gcp_pubsub.hocon b/rel/i18n/zh/emqx_bridge_gcp_pubsub.hocon similarity index 98% rename from rel/i18n/zh/emqx_ee_bridge_gcp_pubsub.hocon rename to rel/i18n/zh/emqx_bridge_gcp_pubsub.hocon index 4318211c9..19bd7058e 100644 --- a/rel/i18n/zh/emqx_ee_bridge_gcp_pubsub.hocon +++ b/rel/i18n/zh/emqx_bridge_gcp_pubsub.hocon @@ -1,4 +1,4 @@ -emqx_ee_bridge_gcp_pubsub { +emqx_bridge_gcp_pubsub { connect_timeout.desc: """连接 HTTP 服务器的超时时间。""" diff --git a/scripts/find-apps.sh b/scripts/find-apps.sh index 66990ae12..eed2acdb8 100755 --- a/scripts/find-apps.sh +++ b/scripts/find-apps.sh @@ -75,6 +75,9 @@ describe_app() { apps/emqx_bridge_kafka) profile='emqx-enterprise' ;; + apps/emqx_bridge_gcp_pubsub) + profile='emqx-enterprise' + ;; apps/*) profile='emqx' ;; From cb995e20330d89aa44270c925058318b5a9e128b Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 19 Apr 2023 17:42:27 -0300 Subject: [PATCH 077/263] fix(buffer_worker): avoid sending late reply messages to callers Fixes https://emqx.atlassian.net/browse/EMQX-9635 During a sync call from process `A` to a buffer worker `B`, its call to the underlying resource `C` can be very slow. In those cases, `A` will receive a timeout response and expect no more messages from `B` nor `C`. However, prior to this fix, if `B` is stuck in a long sync call to `C` and then gets its response after `A` timed out, `B` would still send the late response to `A`, polluting its mailbox. --- apps/emqx_resource/src/emqx_resource.app.src | 2 +- .../src/emqx_resource_buffer_worker.erl | 17 +++++-- .../test/emqx_connector_demo.erl | 6 ++- .../test/emqx_resource_SUITE.erl | 45 +++++++++++++++++++ changes/ce/fix-10455.en.md | 9 ++++ 5 files changed, 73 insertions(+), 6 deletions(-) create mode 100644 changes/ce/fix-10455.en.md diff --git a/apps/emqx_resource/src/emqx_resource.app.src b/apps/emqx_resource/src/emqx_resource.app.src index 00c315714..2553e6dd8 100644 --- a/apps/emqx_resource/src/emqx_resource.app.src +++ b/apps/emqx_resource/src/emqx_resource.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_resource, [ {description, "Manager for all external resources"}, - {vsn, "0.1.13"}, + {vsn, "0.1.14"}, {registered, []}, {mod, {emqx_resource_app, []}}, {applications, [ diff --git a/apps/emqx_resource/src/emqx_resource_buffer_worker.erl b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl index e34cf5d0a..2e2cd5631 100644 --- a/apps/emqx_resource/src/emqx_resource_buffer_worker.erl +++ b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl @@ -52,7 +52,7 @@ -export([queue_item_marshaller/1, estimate_size/1]). --export([handle_async_reply/2, handle_async_batch_reply/2]). +-export([handle_async_reply/2, handle_async_batch_reply/2, reply_call/2]). -export([clear_disk_queue_dir/2]). @@ -293,10 +293,8 @@ code_change(_OldVsn, State, _Extra) -> pick_call(Id, Key, Query, Timeout) -> ?PICK(Id, Key, Pid, begin - Caller = self(), MRef = erlang:monitor(process, Pid, [{alias, reply_demonitor}]), - From = {Caller, MRef}, - ReplyTo = {fun gen_statem:reply/2, [From]}, + ReplyTo = {fun ?MODULE:reply_call/2, [MRef]}, erlang:send(Pid, ?SEND_REQ(ReplyTo, Query)), receive {MRef, Response} -> @@ -1703,6 +1701,17 @@ default_resume_interval(_RequestTimeout = infinity, HealthCheckInterval) -> default_resume_interval(RequestTimeout, HealthCheckInterval) -> max(1, min(HealthCheckInterval, RequestTimeout div 3)). +-spec reply_call(reference(), term()) -> ok. +reply_call(Alias, Response) -> + %% Since we use a reference created with `{alias, + %% reply_demonitor}', after we `demonitor' it in case of a + %% timeout, we won't send any more messages that the caller is not + %% expecting anymore. Using `gen_statem:reply({pid(), + %% reference()}, _)' would still send a late reply even after the + %% demonitor. + erlang:send(Alias, {Alias, Response}), + ok. + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). adjust_batch_time_test_() -> diff --git a/apps/emqx_resource/test/emqx_connector_demo.erl b/apps/emqx_resource/test/emqx_connector_demo.erl index a1393c574..5be854e93 100644 --- a/apps/emqx_resource/test/emqx_connector_demo.erl +++ b/apps/emqx_resource/test/emqx_connector_demo.erl @@ -144,7 +144,11 @@ on_query(_InstId, {sleep_before_reply, For}, #{pid := Pid}) -> Result after 1000 -> {error, timeout} - end. + end; +on_query(_InstId, {sync_sleep_before_reply, SleepFor}, _State) -> + %% This simulates a slow sync call + timer:sleep(SleepFor), + {ok, slept}. on_query_async(_InstId, block, ReplyFun, #{pid := Pid}) -> Pid ! {block, ReplyFun}, diff --git a/apps/emqx_resource/test/emqx_resource_SUITE.erl b/apps/emqx_resource/test/emqx_resource_SUITE.erl index 385b4cb91..e098c2e1c 100644 --- a/apps/emqx_resource/test/emqx_resource_SUITE.erl +++ b/apps/emqx_resource/test/emqx_resource_SUITE.erl @@ -2751,6 +2751,51 @@ t_volatile_offload_mode(_Config) -> end ). +t_late_call_reply(_Config) -> + emqx_connector_demo:set_callback_mode(always_sync), + RequestTimeout = 500, + ?assertMatch( + {ok, _}, + emqx_resource:create( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource}, + #{ + buffer_mode => memory_only, + request_timeout => RequestTimeout, + query_mode => sync + } + ) + ), + ?check_trace( + begin + %% Sleep for longer than the request timeout; the call reply will + %% have been already returned (a timeout), but the resource will + %% still send a message with the reply. + %% The demo connector will reply with `{error, timeout}' after 1 s. + SleepFor = RequestTimeout + 500, + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + emqx_resource:query( + ?ID, + {sync_sleep_before_reply, SleepFor}, + #{timeout => RequestTimeout} + ) + ), + %% Our process shouldn't receive any late messages. + receive + LateReply -> + ct:fail("received late reply: ~p", [LateReply]) + after SleepFor -> + ok + end, + ok + end, + [] + ), + ok. + %%------------------------------------------------------------------------------ %% Helpers %%------------------------------------------------------------------------------ diff --git a/changes/ce/fix-10455.en.md b/changes/ce/fix-10455.en.md new file mode 100644 index 000000000..07d8c71db --- /dev/null +++ b/changes/ce/fix-10455.en.md @@ -0,0 +1,9 @@ +Fixed an issue that could cause (otherwise harmless) noise in the logs. + +During some particularly slow synchronous calls to bridges, some late replies could be sent to connections processes that were no longer expecting a reply, and then emit an error log like: + +``` +2023-04-19T18:24:35.350233+00:00 [error] msg: unexpected_info, mfa: emqx_channel:handle_info/2, line: 1278, peername: 172.22.0.1:36384, clientid: caribdis_bench_sub_1137967633_4788, info: {#Ref<0.408802983.1941504010.189402>,{ok,200,[{<<"cache-control">>,<<"max-age=0, ...">>}} +``` + +Those logs are harmless, but they could flood and worry the users without need. From 712f7b74540af5e0c0b1e3352b836feaa2083665 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Thu, 20 Apr 2023 14:15:09 +0800 Subject: [PATCH 078/263] chore: deprecate statsd --- apps/emqx_statsd/src/emqx_statsd_api.erl | 2 ++ apps/emqx_statsd/src/emqx_statsd_schema.erl | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/apps/emqx_statsd/src/emqx_statsd_api.erl b/apps/emqx_statsd/src/emqx_statsd_api.erl index e65c93432..4ee144d57 100644 --- a/apps/emqx_statsd/src/emqx_statsd_api.erl +++ b/apps/emqx_statsd/src/emqx_statsd_api.erl @@ -49,6 +49,7 @@ schema("/statsd") -> 'operationId' => statsd, get => #{ + deprecated => true, description => ?DESC(get_statsd_config_api), tags => ?API_TAG_STATSD, responses => @@ -56,6 +57,7 @@ schema("/statsd") -> }, put => #{ + deprecated => true, description => ?DESC(update_statsd_config_api), tags => ?API_TAG_STATSD, 'requestBody' => statsd_config_schema(), diff --git a/apps/emqx_statsd/src/emqx_statsd_schema.erl b/apps/emqx_statsd/src/emqx_statsd_schema.erl index e44f94954..01decc6f7 100644 --- a/apps/emqx_statsd/src/emqx_statsd_schema.erl +++ b/apps/emqx_statsd/src/emqx_statsd_schema.erl @@ -32,7 +32,8 @@ namespace() -> "statsd". -roots() -> ["statsd"]. +roots() -> + [{"statsd", hoconsc:mk(hoconsc:ref(?MODULE, "statsd"), #{importance => ?IMPORTANCE_HIDDEN})}]. fields("statsd") -> [ From 794ddd8d73ab62fade2c2998d470db7efc3b03c8 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Thu, 20 Apr 2023 14:23:55 +0800 Subject: [PATCH 079/263] chore: update changes --- apps/emqx_statsd/src/emqx_statsd.app.src | 2 +- changes/ce/feat-10457.en.md | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changes/ce/feat-10457.en.md diff --git a/apps/emqx_statsd/src/emqx_statsd.app.src b/apps/emqx_statsd/src/emqx_statsd.app.src index 412e0b685..87fc8c596 100644 --- a/apps/emqx_statsd/src/emqx_statsd.app.src +++ b/apps/emqx_statsd/src/emqx_statsd.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_statsd, [ {description, "EMQX Statsd"}, - {vsn, "5.0.8"}, + {vsn, "5.0.9"}, {registered, []}, {mod, {emqx_statsd_app, []}}, {applications, [ diff --git a/changes/ce/feat-10457.en.md b/changes/ce/feat-10457.en.md new file mode 100644 index 000000000..d6a44bd53 --- /dev/null +++ b/changes/ce/feat-10457.en.md @@ -0,0 +1,4 @@ +Deprecates the integration with StatsD. + +Since StatsD is not used a lot. So we will deprecate it in the next release +and plan to remove it in 5.1 From adc422d0635a8ecebfb1220bd10cea29f786f94a Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Thu, 20 Apr 2023 11:11:35 +0200 Subject: [PATCH 080/263] chore: re-split dynamo i18n file --- rel/i18n/emqx_ee_connector_dynamo.hocon | 50 ++++++++-------------- rel/i18n/zh/emqx_ee_connector_dynamo.hocon | 18 ++++++++ scripts/split-i18n-files.escript | 12 ++++-- 3 files changed, 45 insertions(+), 35 deletions(-) diff --git a/rel/i18n/emqx_ee_connector_dynamo.hocon b/rel/i18n/emqx_ee_connector_dynamo.hocon index a0c7aacbe..29b6bf99e 100644 --- a/rel/i18n/emqx_ee_connector_dynamo.hocon +++ b/rel/i18n/emqx_ee_connector_dynamo.hocon @@ -1,41 +1,27 @@ emqx_ee_connector_dynamo { +aws_access_key_id.desc: +"""Access Key ID for connecting to DynamoDB.""" + +aws_access_key_id.label: +"""AWS Access Key ID""" + +aws_secret_access_key.desc: +"""AWS Secret Access Key for connecting to DynamoDB.""" + +aws_secret_access_key.label: +"""AWS Secret Access Key""" + +table.desc: +"""DynamoDB Table.""" + +table.label: +"""Table """ + url.desc: """The url of DynamoDB endpoint.""" url.label: """DynamoDB Endpoint""" - table { - desc { - en: """DynamoDB Table.""" - zh: """DynamoDB 的表。""" - } - label: { - en: "Table " - zh: "表" - } - } - - aws_access_key_id { - desc { - en: """Access Key ID for connecting to DynamoDB.""" - zh: """DynamoDB 的访问 ID。""" - } - label: { - en: "AWS Access Key ID" - zh: "连接访问 ID" - } - } - - aws_secret_access_key { - desc { - en: """AWS Secret Access Key for connecting to DynamoDB.""" - zh: """DynamoDB 的访问密钥。""" - } - label: { - en: "AWS Secret Access Key" - zh: "连接访问密钥" - } - } } diff --git a/rel/i18n/zh/emqx_ee_connector_dynamo.hocon b/rel/i18n/zh/emqx_ee_connector_dynamo.hocon index 540d79dd0..e7b911c1e 100644 --- a/rel/i18n/zh/emqx_ee_connector_dynamo.hocon +++ b/rel/i18n/zh/emqx_ee_connector_dynamo.hocon @@ -1,5 +1,23 @@ emqx_ee_connector_dynamo { +aws_access_key_id.desc: +"""DynamoDB 的访问 ID。""" + +aws_access_key_id.label: +"""连接访问 ID""" + +aws_secret_access_key.desc: +"""DynamoDB 的访问密钥。""" + +aws_secret_access_key.label: +"""连接访问密钥""" + +table.desc: +"""DynamoDB 的表。""" + +table.label: +"""表""" + url.desc: """DynamoDB 的地址。""" diff --git a/scripts/split-i18n-files.escript b/scripts/split-i18n-files.escript index 5910db667..b9f558925 100755 --- a/scripts/split-i18n-files.escript +++ b/scripts/split-i18n-files.escript @@ -27,9 +27,13 @@ add_ebin(Dir) -> split_file(Path) -> {ok, DescMap} = hocon:load(Path), [{Module, Descs}] = maps:to_list(DescMap), - ok = split(Path, Module, <<"en">>, Descs), - ok = split(Path, Module, <<"zh">>, Descs), - ok. + try + ok = split(Path, Module, <<"en">>, Descs), + ok = split(Path, Module, <<"zh">>, Descs) + catch + throw : already_done -> + ok + end. split(Path, Module, Lang, Fields) when is_map(Fields) -> split(Path, Module, Lang, maps:to_list(Fields)); @@ -54,6 +58,8 @@ rename(FilePath, Lang) -> BaseName = filename:basename(FilePath), filename:join([Dir, Lang, BaseName]). +do_split(_Path, _Name, _Lang, #{<<"desc">> := Desc}) when is_binary(Desc) -> + throw(already_done); do_split(Path, Name, Lang, #{<<"desc">> := Desc} = D) -> try Label = maps:get(<<"label">>, D, #{}), From bcc8f4313bf0f26e43dd5148f43ef465337bc149 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Thu, 20 Apr 2023 18:00:09 +0800 Subject: [PATCH 081/263] chore: make plugins config to low level --- apps/emqx_plugins/src/emqx_plugins_schema.erl | 15 +++++++++------ .../emqx_rule_engine/src/emqx_rule_engine.app.src | 2 +- changes/ce/feat-10458.en.md | 3 +++ .../src/emqx_ee_connector.app.src | 2 +- 4 files changed, 14 insertions(+), 8 deletions(-) create mode 100644 changes/ce/feat-10458.en.md diff --git a/apps/emqx_plugins/src/emqx_plugins_schema.erl b/apps/emqx_plugins/src/emqx_plugins_schema.erl index 9d9d045de..b86f6b6c1 100644 --- a/apps/emqx_plugins/src/emqx_plugins_schema.erl +++ b/apps/emqx_plugins/src/emqx_plugins_schema.erl @@ -29,7 +29,7 @@ namespace() -> "plugin". -roots() -> [?CONF_ROOT]. +roots() -> [{?CONF_ROOT, ?HOCON(?R_REF(?CONF_ROOT), #{importance => ?IMPORTANCE_LOW})}]. fields(?CONF_ROOT) -> #{ @@ -73,16 +73,19 @@ states(type) -> ?ARRAY(?R_REF(state)); states(required) -> false; states(default) -> []; states(desc) -> ?DESC(states); +states(importance) -> ?IMPORTANCE_HIGH; states(_) -> undefined. install_dir(type) -> string(); install_dir(required) -> false; -%% runner's root dir +%% runner's root dir todo move to data dir in 5.1 install_dir(default) -> <<"plugins">>; -install_dir(T) when T =/= desc -> undefined; -install_dir(desc) -> ?DESC(install_dir). +install_dir(desc) -> ?DESC(install_dir); +install_dir(importance) -> ?IMPORTANCE_LOW; +install_dir(_) -> undefined. check_interval(type) -> emqx_schema:duration(); check_interval(default) -> <<"5s">>; -check_interval(T) when T =/= desc -> undefined; -check_interval(desc) -> ?DESC(check_interval). +check_interval(desc) -> ?DESC(check_interval); +check_interval(deprecated) -> {since, "5.0.24"}; +check_interval(_) -> undefined. diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine.app.src b/apps/emqx_rule_engine/src/emqx_rule_engine.app.src index 133138252..932ebc5ed 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine.app.src +++ b/apps/emqx_rule_engine/src/emqx_rule_engine.app.src @@ -2,7 +2,7 @@ {application, emqx_rule_engine, [ {description, "EMQX Rule Engine"}, % strict semver, bump manually! - {vsn, "5.0.14"}, + {vsn, "5.0.15"}, {modules, []}, {registered, [emqx_rule_engine_sup, emqx_rule_engine]}, {applications, [kernel, stdlib, rulesql, getopt, emqx_ctl]}, diff --git a/changes/ce/feat-10458.en.md b/changes/ce/feat-10458.en.md new file mode 100644 index 000000000..655885145 --- /dev/null +++ b/changes/ce/feat-10458.en.md @@ -0,0 +1,3 @@ +Set the level of plugin configuration options to low level, +in most cases, users only need to manage plugins on the dashboard +without the need for manual modification, so we lowered the level. diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src index 7ebc320e5..ced7ae86a 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src @@ -1,6 +1,6 @@ {application, emqx_ee_connector, [ {description, "EMQX Enterprise connectors"}, - {vsn, "0.1.10"}, + {vsn, "0.1.11"}, {registered, []}, {applications, [ kernel, From 4464a31fabe99846df88f60d8cd307fc26a70018 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Thu, 20 Apr 2023 18:01:27 +0800 Subject: [PATCH 082/263] chore: remove *_collector for prometheus api's example --- apps/emqx_prometheus/src/emqx_prometheus.app.src | 2 +- apps/emqx_prometheus/src/emqx_prometheus_api.erl | 8 +------- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/apps/emqx_prometheus/src/emqx_prometheus.app.src b/apps/emqx_prometheus/src/emqx_prometheus.app.src index ae879da8f..f94b22d81 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus.app.src +++ b/apps/emqx_prometheus/src/emqx_prometheus.app.src @@ -2,7 +2,7 @@ {application, emqx_prometheus, [ {description, "Prometheus for EMQX"}, % strict semver, bump manually! - {vsn, "5.0.9"}, + {vsn, "5.0.10"}, {modules, []}, {registered, [emqx_prometheus_sup]}, {applications, [kernel, stdlib, prometheus, emqx, emqx_management]}, diff --git a/apps/emqx_prometheus/src/emqx_prometheus_api.erl b/apps/emqx_prometheus/src/emqx_prometheus_api.erl index 945c6eba9..d3bfc0224 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus_api.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus_api.erl @@ -122,13 +122,7 @@ prometheus_config_example() -> interval => "15s", push_gateway_server => <<"http://127.0.0.1:9091">>, headers => #{'header-name' => 'header-value'}, - job_name => <<"${name}/instance/${name}~${host}">>, - vm_dist_collector => enabled, - mnesia_collector => enabled, - vm_statistics_collector => enabled, - vm_system_info_collector => enabled, - vm_memory_collector => enabled, - vm_msacc_collector => enabled + job_name => <<"${name}/instance/${name}~${host}">> }. prometheus_data_schema() -> From 21473e7ca59aa60eef38b7ef99f371ffff591c3e Mon Sep 17 00:00:00 2001 From: firest Date: Thu, 20 Apr 2023 18:16:04 +0800 Subject: [PATCH 083/263] fix(dynamo): fix field name errors --- lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl index f132438b6..ebb86f577 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl @@ -107,7 +107,7 @@ on_start( Templates = parse_template(Config), State = #{ - poolname => InstanceId, + pool_name => InstanceId, table => Table, templates => Templates }, @@ -176,7 +176,7 @@ do_query( InstanceId, Query, ApplyMode, - #{poolname := PoolName, templates := Templates, table := Table} = State + #{pool_name := PoolName, templates := Templates, table := Table} = State ) -> ?TRACE( "QUERY", From a55017ffaae40fda1eefb653b134e9d4e110f439 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 19 Apr 2023 10:39:42 -0300 Subject: [PATCH 084/263] ci: check if Elixir files are formatted in pre-commit hook --- scripts/git-hook-pre-commit.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/git-hook-pre-commit.sh b/scripts/git-hook-pre-commit.sh index b0e0699b9..cebd92b22 100755 --- a/scripts/git-hook-pre-commit.sh +++ b/scripts/git-hook-pre-commit.sh @@ -4,6 +4,9 @@ set -euo pipefail OPT="${1:--c}" +# mix format check is quite fast +mix format --check-formatted + files_dirty="$(git diff --name-only | grep -E '.*\.erl' || true)" files_cached="$(git diff --cached --name-only | grep -E '.*\.erl' || true)" if [[ "${files_dirty}" == '' ]] && [[ "${files_cached}" == '' ]]; then From b5eda9f0d1112d9343fcc85f01b4aa059beaddc3 Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Fri, 14 Apr 2023 19:09:07 +0300 Subject: [PATCH 085/263] perf(emqx_resource): don't reactivate alarms on reoccurring errors Avoid unnecessary calls to activate an alarm if it has been already activated. Fixes: EMQX-9529/#10357 --- .../src/emqx_resource_manager.erl | 47 ++++++++++++------- .../test/emqx_connector_demo.erl | 2 + .../test/emqx_resource_SUITE.erl | 37 ++++++++++++++- 3 files changed, 68 insertions(+), 18 deletions(-) diff --git a/apps/emqx_resource/src/emqx_resource_manager.erl b/apps/emqx_resource/src/emqx_resource_manager.erl index 7ecf56c18..b35bade77 100644 --- a/apps/emqx_resource/src/emqx_resource_manager.erl +++ b/apps/emqx_resource/src/emqx_resource_manager.erl @@ -511,10 +511,10 @@ start_resource(Data, From) -> id => Data#data.id, reason => Reason }), - _ = maybe_alarm(disconnected, Data#data.id, Data#data.error), + _ = maybe_alarm(disconnected, Data#data.id, Err, Data#data.error), %% Keep track of the error reason why the connection did not work %% so that the Reason can be returned when the verification call is made. - UpdatedData = Data#data{status = disconnected, error = Reason}, + UpdatedData = Data#data{status = disconnected, error = Err}, Actions = maybe_reply(retry_actions(UpdatedData), From, Err), {next_state, disconnected, update_state(UpdatedData, Data), Actions} end. @@ -582,11 +582,11 @@ handle_connected_health_check(Data) -> with_health_check(#data{state = undefined} = Data, Func) -> Func(disconnected, Data); -with_health_check(Data, Func) -> +with_health_check(#data{error = PrevError} = Data, Func) -> ResId = Data#data.id, HCRes = emqx_resource:call_health_check(Data#data.manager_id, Data#data.mod, Data#data.state), {Status, NewState, Err} = parse_health_check_result(HCRes, Data), - _ = maybe_alarm(Status, ResId, Err), + _ = maybe_alarm(Status, ResId, Err, PrevError), ok = maybe_resume_resource_workers(ResId, Status), UpdatedData = Data#data{ state = NewState, status = Status, error = Err @@ -605,21 +605,25 @@ update_state(Data, _DataWas) -> health_check_interval(Opts) -> maps:get(health_check_interval, Opts, ?HEALTHCHECK_INTERVAL). -maybe_alarm(connected, _ResId, _Error) -> +maybe_alarm(connected, _ResId, _Error, _PrevError) -> ok; -maybe_alarm(_Status, <>, _Error) -> +maybe_alarm(_Status, <>, _Error, _PrevError) -> ok; -maybe_alarm(_Status, ResId, Error) -> +%% Assume that alarm is already active +maybe_alarm(_Status, _ResId, Error, Error) -> + ok; +maybe_alarm(_Status, ResId, Error, _PrevError) -> HrError = case Error of - undefined -> <<"Unknown reason">>; - _Else -> emqx_utils:readable_error_msg(Error) + {error, undefined} -> <<"Unknown reason">>; + {error, Reason} -> emqx_utils:readable_error_msg(Reason) end, emqx_alarm:activate( ResId, #{resource_id => ResId, reason => resource_down}, <<"resource down: ", HrError/binary>> - ). + ), + ?tp(resource_activate_alarm, #{resource_id => ResId}). maybe_resume_resource_workers(ResId, connected) -> lists:foreach( @@ -635,11 +639,11 @@ maybe_clear_alarm(ResId) -> emqx_alarm:deactivate(ResId). parse_health_check_result(Status, Data) when ?IS_STATUS(Status) -> - {Status, Data#data.state, undefined}; + {Status, Data#data.state, status_to_error(Status)}; parse_health_check_result({Status, NewState}, _Data) when ?IS_STATUS(Status) -> - {Status, NewState, undefined}; + {Status, NewState, status_to_error(Status)}; parse_health_check_result({Status, NewState, Error}, _Data) when ?IS_STATUS(Status) -> - {Status, NewState, Error}; + {Status, NewState, {error, Error}}; parse_health_check_result({error, Error}, Data) -> ?SLOG( error, @@ -649,7 +653,16 @@ parse_health_check_result({error, Error}, Data) -> reason => Error } ), - {disconnected, Data#data.state, Error}. + {disconnected, Data#data.state, {error, Error}}. + +status_to_error(connected) -> + undefined; +status_to_error(_) -> + {error, undefined}. + +%% Compatibility +external_error({error, Reason}) -> Reason; +external_error(Other) -> Other. maybe_reply(Actions, undefined, _Reply) -> Actions; @@ -660,7 +673,7 @@ maybe_reply(Actions, From, Reply) -> data_record_to_external_map(Data) -> #{ id => Data#data.id, - error => Data#data.error, + error => external_error(Data#data.error), mod => Data#data.mod, callback_mode => Data#data.callback_mode, query_mode => Data#data.query_mode, @@ -679,8 +692,8 @@ do_wait_for_ready(ResId, Retry) -> case read_cache(ResId) of {_Group, #data{status = connected}} -> ok; - {_Group, #data{status = disconnected, error = Reason}} -> - {error, Reason}; + {_Group, #data{status = disconnected, error = Err}} -> + {error, external_error(Err)}; _ -> timer:sleep(?WAIT_FOR_RESOURCE_DELAY), do_wait_for_ready(ResId, Retry - 1) diff --git a/apps/emqx_resource/test/emqx_connector_demo.erl b/apps/emqx_resource/test/emqx_connector_demo.erl index 5be854e93..96e22c6b6 100644 --- a/apps/emqx_resource/test/emqx_connector_demo.erl +++ b/apps/emqx_resource/test/emqx_connector_demo.erl @@ -62,6 +62,7 @@ set_callback_mode(Mode) -> persistent_term:put(?CM_KEY, Mode). on_start(_InstId, #{create_error := true}) -> + ?tp(connector_demo_start_error, #{}), error("some error"); on_start(InstId, #{name := Name} = Opts) -> Register = maps:get(register, Opts, false), @@ -243,6 +244,7 @@ batch_big_payload({async, ReplyFunAndArgs}, InstId, Batch, State = #{pid := Pid} {ok, Pid}. on_get_status(_InstId, #{health_check_error := true}) -> + ?tp(connector_demo_health_check_error, #{}), disconnected; on_get_status(_InstId, #{pid := Pid}) -> timer:sleep(300), diff --git a/apps/emqx_resource/test/emqx_resource_SUITE.erl b/apps/emqx_resource/test/emqx_resource_SUITE.erl index e098c2e1c..f8ddd56b5 100644 --- a/apps/emqx_resource/test/emqx_resource_SUITE.erl +++ b/apps/emqx_resource/test/emqx_resource_SUITE.erl @@ -2635,7 +2635,6 @@ t_call_mode_uncoupled_from_query_mode(_Config) -> Trace2 ) ), - ok end ). @@ -2796,6 +2795,42 @@ t_late_call_reply(_Config) -> ), ok. +t_resource_create_error_activate_alarm_once(_) -> + do_t_resource_activate_alarm_once( + #{name => test_resource, create_error => true}, + connector_demo_start_error + ). + +t_resource_health_check_error_activate_alarm_once(_) -> + do_t_resource_activate_alarm_once( + #{name => test_resource, health_check_error => true}, + connector_demo_health_check_error + ). + +do_t_resource_activate_alarm_once(ResourceConfig, SubscribeEvent) -> + ?check_trace( + begin + ?wait_async_action( + emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + ResourceConfig, + #{auto_restart_interval => 100, health_check_interval => 100} + ), + #{?snk_kind := resource_activate_alarm, resource_id := ?ID} + ), + ?assertMatch([#{activated := true, name := ?ID}], emqx_alarm:get_alarms(activated)), + {ok, SubRef} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := SubscribeEvent}), 4, 7000 + ), + ?assertMatch({ok, [_, _, _, _]}, snabbkaffe:receive_events(SubRef)) + end, + fun(Trace) -> + ?assertMatch([_], ?of_kind(resource_activate_alarm, Trace)) + end + ). + %%------------------------------------------------------------------------------ %% Helpers %%------------------------------------------------------------------------------ From 662206fd33fbbf3e7114980430b36db8994f51cf Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 20 Apr 2023 10:53:12 -0300 Subject: [PATCH 086/263] ci: simplify find-apps.sh for ee apps --- scripts/find-apps.sh | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/scripts/find-apps.sh b/scripts/find-apps.sh index eed2acdb8..bfb6ba2cc 100755 --- a/scripts/find-apps.sh +++ b/scripts/find-apps.sh @@ -72,14 +72,12 @@ describe_app() { runner="docker" fi case "${app}" in - apps/emqx_bridge_kafka) - profile='emqx-enterprise' - ;; - apps/emqx_bridge_gcp_pubsub) - profile='emqx-enterprise' - ;; apps/*) - profile='emqx' + if [[ -f "${app}/BSL.txt" ]]; then + profile='emqx-enterprise' + else + profile='emqx' + fi ;; lib-ee/*) profile='emqx-enterprise' From b960d2ecb39100b17141a5409e7ef6c35cc674be Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Wed, 19 Apr 2023 20:42:14 +0300 Subject: [PATCH 087/263] perf(emqx_alarm): use dirty Mnesia operations to activate an alarm Alarms are stored in a local content shard and all 'activate' operations are serialized as they are called by one process ('emqx_alarm' gen_server), so using dirty operations gives performance gain without sacrificing consistency. Fixes: EMQX-9529/#10357 --- apps/emqx/src/emqx_alarm.erl | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/apps/emqx/src/emqx_alarm.erl b/apps/emqx/src/emqx_alarm.erl index 6aa3cb95d..7255e96da 100644 --- a/apps/emqx/src/emqx_alarm.erl +++ b/apps/emqx/src/emqx_alarm.erl @@ -57,7 +57,6 @@ %% Internal exports (RPC) -export([ - create_activate_alarm/3, do_get_alarms/0 ]). @@ -218,17 +217,12 @@ init([]) -> {ok, #{}, get_validity_period()}. handle_call({activate_alarm, Name, Details, Message}, _From, State) -> - Res = mria:transaction( - mria:local_content_shard(), - fun ?MODULE:create_activate_alarm/3, - [Name, Details, Message] - ), - case Res of - {atomic, Alarm} -> + case create_activate_alarm(Name, Details, Message) of + {ok, Alarm} -> do_actions(activate, Alarm, emqx:get_config([alarm, actions])), {reply, ok, State, get_validity_period()}; - {aborted, Reason} -> - {reply, Reason, State, get_validity_period()} + Err -> + {reply, Err, State, get_validity_period()} end; handle_call({deactivate_alarm, Name, Details, Message}, _From, State) -> case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of @@ -283,9 +277,9 @@ get_validity_period() -> emqx:get_config([alarm, validity_period]). create_activate_alarm(Name, Details, Message) -> - case mnesia:read(?ACTIVATED_ALARM, Name) of + case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of [#activated_alarm{name = Name}] -> - mnesia:abort({error, already_existed}); + {error, already_existed}; [] -> Alarm = #activated_alarm{ name = Name, @@ -293,8 +287,8 @@ create_activate_alarm(Name, Details, Message) -> message = normalize_message(Name, iolist_to_binary(Message)), activate_at = erlang:system_time(microsecond) }, - ok = mnesia:write(?ACTIVATED_ALARM, Alarm, write), - Alarm + ok = mria:dirty_write(?ACTIVATED_ALARM, Alarm), + {ok, Alarm} end. do_get_alarms() -> From 423a30fbb3392538807b1289a401e0f4b7b2ded2 Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Wed, 19 Apr 2023 21:35:56 +0300 Subject: [PATCH 088/263] fix(emqx_alarm): add safe call API to activate/deactivate alarms and use it in resource_manager Don't let 'emqx_resource_manager' crash because of emqx_alarm timeouts. Fixes: EMQX-9529/#10357 --- apps/emqx/src/emqx_alarm.erl | 26 ++++++++++++++++++- .../src/emqx_resource_manager.erl | 6 ++--- changes/ce/fix-10407.en.md | 7 +++++ 3 files changed, 35 insertions(+), 4 deletions(-) create mode 100644 changes/ce/fix-10407.en.md diff --git a/apps/emqx/src/emqx_alarm.erl b/apps/emqx/src/emqx_alarm.erl index 7255e96da..056f36050 100644 --- a/apps/emqx/src/emqx_alarm.erl +++ b/apps/emqx/src/emqx_alarm.erl @@ -42,7 +42,9 @@ get_alarms/0, get_alarms/1, format/1, - format/2 + format/2, + safe_activate/3, + safe_deactivate/1 ]). %% gen_server callbacks @@ -122,6 +124,9 @@ activate(Name, Details) -> activate(Name, Details, Message) -> gen_server:call(?MODULE, {activate_alarm, Name, Details, Message}). +safe_activate(Name, Details, Message) -> + safe_call({activate_alarm, Name, Details, Message}). + -spec ensure_deactivated(binary() | atom()) -> ok. ensure_deactivated(Name) -> ensure_deactivated(Name, no_details). @@ -154,6 +159,9 @@ deactivate(Name, Details) -> deactivate(Name, Details, Message) -> gen_server:call(?MODULE, {deactivate_alarm, Name, Details, Message}). +safe_deactivate(Name) -> + safe_call({deactivate_alarm, Name, no_details, <<"">>}). + -spec delete_all_deactivated_alarms() -> ok. delete_all_deactivated_alarms() -> gen_server:call(?MODULE, delete_all_deactivated_alarms). @@ -468,3 +476,19 @@ normalize_message(Name, <<"">>) -> list_to_binary(io_lib:format("~p", [Name])); normalize_message(_Name, Message) -> Message. + +safe_call(Req) -> + try + gen_server:call(?MODULE, Req) + catch + _:{timeout, _} = Reason -> + ?SLOG(warning, #{msg => "emqx_alarm_safe_call_timeout", reason => Reason}), + {error, timeout}; + _:Reason:St -> + ?SLOG(error, #{ + msg => "emqx_alarm_safe_call_exception", + reason => Reason, + stacktrace => St + }), + {error, Reason} + end. diff --git a/apps/emqx_resource/src/emqx_resource_manager.erl b/apps/emqx_resource/src/emqx_resource_manager.erl index b35bade77..877b35fff 100644 --- a/apps/emqx_resource/src/emqx_resource_manager.erl +++ b/apps/emqx_resource/src/emqx_resource_manager.erl @@ -375,7 +375,7 @@ handle_event(state_timeout, health_check, connecting, Data) -> %% and successful health_checks handle_event(enter, _OldState, connected = State, Data) -> ok = log_state_consistency(State, Data), - _ = emqx_alarm:deactivate(Data#data.id), + _ = emqx_alarm:safe_deactivate(Data#data.id), ?tp(resource_connected_enter, #{}), {keep_state_and_data, health_check_actions(Data)}; handle_event(state_timeout, health_check, connected, Data) -> @@ -618,7 +618,7 @@ maybe_alarm(_Status, ResId, Error, _PrevError) -> {error, undefined} -> <<"Unknown reason">>; {error, Reason} -> emqx_utils:readable_error_msg(Reason) end, - emqx_alarm:activate( + emqx_alarm:safe_activate( ResId, #{resource_id => ResId, reason => resource_down}, <<"resource down: ", HrError/binary>> @@ -636,7 +636,7 @@ maybe_resume_resource_workers(_, _) -> maybe_clear_alarm(<>) -> ok; maybe_clear_alarm(ResId) -> - emqx_alarm:deactivate(ResId). + emqx_alarm:safe_deactivate(ResId). parse_health_check_result(Status, Data) when ?IS_STATUS(Status) -> {Status, Data#data.state, status_to_error(Status)}; diff --git a/changes/ce/fix-10407.en.md b/changes/ce/fix-10407.en.md new file mode 100644 index 000000000..d9df9ce69 --- /dev/null +++ b/changes/ce/fix-10407.en.md @@ -0,0 +1,7 @@ +Improve 'emqx_alarm' performance by using Mnesia dirty operations and avoiding +unnecessary calls from 'emqx_resource_manager' to reactivate alarms that have been already activated. +Use new safe 'emqx_alarm' API to activate/deactivate alarms to ensure that emqx_resource_manager +doesn't crash because of alarm timeouts. +The crashes were possible when the following conditions co-occurred: + - a relatively high number of failing resources, e.g. bridges tried to activate alarms on re-occurring errors; + - the system experienced a very high load. From 45254d7d85c9dd2c0f75052b5216563718c73d96 Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Thu, 20 Apr 2023 16:30:43 +0300 Subject: [PATCH 089/263] fix(emqx_bridge): validate Webhook bad URL and return 'BAD_REQUEST' if it's invalid Fixes: EMQX-9310 --- apps/emqx_bridge/src/emqx_bridge.app.src | 2 +- apps/emqx_bridge/src/emqx_bridge_api.erl | 4 +- apps/emqx_bridge/src/emqx_bridge_resource.erl | 47 ++++++++++------ .../test/emqx_bridge_api_SUITE.erl | 56 +++++++++++++++++++ changes/ce/fix-10463.en.md | 2 + 5 files changed, 93 insertions(+), 18 deletions(-) create mode 100644 changes/ce/fix-10463.en.md diff --git a/apps/emqx_bridge/src/emqx_bridge.app.src b/apps/emqx_bridge/src/emqx_bridge.app.src index 04f2b58a7..d6c140fef 100644 --- a/apps/emqx_bridge/src/emqx_bridge.app.src +++ b/apps/emqx_bridge/src/emqx_bridge.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge, [ {description, "EMQX bridges"}, - {vsn, "0.1.16"}, + {vsn, "0.1.17"}, {registered, [emqx_bridge_sup]}, {mod, {emqx_bridge_app, []}}, {applications, [ diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index c87ea8ea6..09d1159bd 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -64,7 +64,7 @@ {BridgeType, BridgeName} -> EXPR catch - throw:{invalid_bridge_id, Reason} -> + throw:#{reason := Reason} -> ?NOT_FOUND(<<"Invalid bridge ID, ", Reason/binary>>) end ). @@ -546,6 +546,8 @@ schema("/bridges_probe") -> case emqx_bridge_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1)) of ok -> ?NO_CONTENT; + {error, #{kind := validation_error} = Reason} -> + ?BAD_REQUEST('TEST_FAILED', map_to_json(Reason)); {error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' -> ?BAD_REQUEST('TEST_FAILED', Reason) end; diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index 347f9d973..1ad024c40 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -87,7 +87,7 @@ parse_bridge_id(BridgeId) -> [Type, Name] -> {to_type_atom(Type), validate_name(Name)}; _ -> - invalid_bridge_id( + invalid_data( <<"should be of pattern {type}:{name}, but got ", BridgeId/binary>> ) end. @@ -108,14 +108,14 @@ validate_name(Name0) -> true -> Name0; false -> - invalid_bridge_id(<<"bad name: ", Name0/binary>>) + invalid_data(<<"bad name: ", Name0/binary>>) end; false -> - invalid_bridge_id(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>) + invalid_data(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>) end. --spec invalid_bridge_id(binary()) -> no_return(). -invalid_bridge_id(Reason) -> throw({?FUNCTION_NAME, Reason}). +-spec invalid_data(binary()) -> no_return(). +invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}). is_id_char(C) when C >= $0 andalso C =< $9 -> true; is_id_char(C) when C >= $a andalso C =< $z -> true; @@ -130,7 +130,7 @@ to_type_atom(Type) -> erlang:binary_to_existing_atom(Type, utf8) catch _:_ -> - invalid_bridge_id(<<"unknown type: ", Type/binary>>) + invalid_data(<<"unknown bridge type: ", Type/binary>>) end. reset_metrics(ResourceId) -> @@ -243,12 +243,19 @@ create_dry_run(Type, Conf0) -> {error, Reason} -> {error, Reason}; {ok, ConfNew} -> - ParseConf = parse_confs(bin(Type), TmpPath, ConfNew), - Res = emqx_resource:create_dry_run_local( - bridge_to_resource_type(Type), ParseConf - ), - _ = maybe_clear_certs(TmpPath, ConfNew), - Res + try + ParseConf = parse_confs(bin(Type), TmpPath, ConfNew), + Res = emqx_resource:create_dry_run_local( + bridge_to_resource_type(Type), ParseConf + ), + Res + catch + %% validation errors + throw:Reason -> + {error, Reason} + after + _ = maybe_clear_certs(TmpPath, ConfNew) + end end. remove(BridgeId) -> @@ -300,10 +307,18 @@ parse_confs( max_retries := Retry } = Conf ) -> - {BaseUrl, Path} = parse_url(Url), - {ok, BaseUrl2} = emqx_http_lib:uri_parse(BaseUrl), + Url1 = bin(Url), + {BaseUrl, Path} = parse_url(Url1), + BaseUrl1 = + case emqx_http_lib:uri_parse(BaseUrl) of + {ok, BUrl} -> + BUrl; + {error, Reason} -> + Reason1 = emqx_utils:readable_error_msg(Reason), + invalid_data(<<"Invalid URL: ", Url1/binary, ", details: ", Reason1/binary>>) + end, Conf#{ - base_url => BaseUrl2, + base_url => BaseUrl1, request => #{ path => Path, @@ -338,7 +353,7 @@ parse_url(Url) -> {iolist_to_binary([Scheme, "//", HostPort]), <<>>} end; [Url] -> - error({invalid_url, Url}) + invalid_data(<<"Missing scheme in URL: ", Url/binary>>) end. str(Bin) when is_binary(Bin) -> binary_to_list(Bin); diff --git a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl index 3afe17080..d55b92138 100644 --- a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl @@ -414,6 +414,18 @@ t_http_crud_apis(Config) -> }, json(maps:get(<<"message">>, PutFail2)) ), + {ok, 400, _} = request_json( + put, + uri(["bridges", BridgeID]), + ?HTTP_BRIDGE(<<"localhost:1234/foo">>, Name), + Config + ), + {ok, 400, _} = request_json( + put, + uri(["bridges", BridgeID]), + ?HTTP_BRIDGE(<<"htpp://localhost:12341234/foo">>, Name), + Config + ), %% delete the bridge {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config), @@ -498,6 +510,22 @@ t_http_crud_apis(Config) -> %% Try create bridge with bad characters as name {ok, 400, _} = request(post, uri(["bridges"]), ?HTTP_BRIDGE(URL1, <<"隋达"/utf8>>), Config), + %% Missing scheme in URL + {ok, 400, _} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(<<"localhost:1234/foo">>, <<"missing_url_scheme">>), + Config + ), + + %% Invalid port + {ok, 400, _} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(<<"http://localhost:12341234/foo">>, <<"invalid_port">>), + Config + ), + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config). t_http_bridges_local_topic(Config) -> @@ -1016,6 +1044,34 @@ t_bridges_probe(Config) -> ) ), + %% Missing scheme in URL + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := _ + }}, + request_json( + post, + uri(["bridges_probe"]), + ?HTTP_BRIDGE(<<"203.0.113.3:1234/foo">>), + Config + ) + ), + + %% Invalid port + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := _ + }}, + request_json( + post, + uri(["bridges_probe"]), + ?HTTP_BRIDGE(<<"http://203.0.113.3:12341234/foo">>), + Config + ) + ), + {ok, 204, _} = request( post, uri(["bridges_probe"]), diff --git a/changes/ce/fix-10463.en.md b/changes/ce/fix-10463.en.md new file mode 100644 index 000000000..9d57bc1b0 --- /dev/null +++ b/changes/ce/fix-10463.en.md @@ -0,0 +1,2 @@ +Improve bridges API error handling. +If Webhook bridge URL is not valid, bridges API will return '400' error instead of '500'. From 5a7685a341b6466e9c1e18c84aa8b70df67004c5 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Thu, 20 Apr 2023 22:26:29 +0800 Subject: [PATCH 090/263] chore: apply suggestions from code review Co-authored-by: ieQu1 <99872536+ieQu1@users.noreply.github.com> Co-authored-by: Thales Macedo Garitezi --- apps/emqx_modules/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/emqx_modules/README.md b/apps/emqx_modules/README.md index 567b1ac71..dfa349514 100644 --- a/apps/emqx_modules/README.md +++ b/apps/emqx_modules/README.md @@ -1,6 +1,6 @@ # EMQX Modules -The application provide some minor functional modules that are not included in the MQTT +The application provides some minor functional modules that are not included in the MQTT protocol standard, including "Delayed Publish", "Topic Rewrite", "Topic Metrics" and "Telemetry". @@ -20,7 +20,7 @@ See [Enabling/Disabling Delayed Publish via HTTP API](https://www.emqx.io/docs/e Topic Rewrite allows users to configure rules to change the topic strings that the client requests to subscribe or publish. -This feature is very useful when it need to compatibility with different versions of topic designs. +This feature is very useful when there's a need to transform between different topic structures. For example, an old device that has already been issued and cannot be upgraded may use old topic designs, but for some reason, we adjusted the format of topics. We can use this feature to rewrite the old topics as the new format to eliminate these differences. From 8e8ba6ce7e94e84ae7b3e6f3e9a5aee0d3c601db Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Wed, 19 Apr 2023 16:42:54 +0800 Subject: [PATCH 091/263] fix: always check authn_http's header and ssl_option --- apps/emqx_authn/src/emqx_authn.app.src | 2 +- apps/emqx_authn/src/emqx_authn_schema.erl | 26 +++++ .../src/simple_authn/emqx_authn_http.erl | 52 ++++++--- .../test/emqx_authn_https_SUITE.erl | 17 +++ .../emqx_conf/test/emqx_conf_schema_tests.erl | 100 +++++++++++++++--- 5 files changed, 163 insertions(+), 34 deletions(-) diff --git a/apps/emqx_authn/src/emqx_authn.app.src b/apps/emqx_authn/src/emqx_authn.app.src index 063771e24..c1d48909c 100644 --- a/apps/emqx_authn/src/emqx_authn.app.src +++ b/apps/emqx_authn/src/emqx_authn.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_authn, [ {description, "EMQX Authentication"}, - {vsn, "0.1.17"}, + {vsn, "0.1.18"}, {modules, []}, {registered, [emqx_authn_sup, emqx_authn_registry]}, {applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]}, diff --git a/apps/emqx_authn/src/emqx_authn_schema.erl b/apps/emqx_authn/src/emqx_authn_schema.erl index 112ea2076..a7cdaac5f 100644 --- a/apps/emqx_authn/src/emqx_authn_schema.erl +++ b/apps/emqx_authn/src/emqx_authn_schema.erl @@ -18,10 +18,12 @@ -elvis([{elvis_style, invalid_dynamic_call, disable}]). -include_lib("hocon/include/hoconsc.hrl"). +-include("emqx_authn.hrl"). -export([ common_fields/0, roots/0, + validations/0, tags/0, fields/1, authenticator_type/0, @@ -207,3 +209,27 @@ array(Name) -> array(Name, DescId) -> {Name, ?HOCON(?R_REF(Name), #{desc => ?DESC(DescId)})}. + +validations() -> + [ + {check_http_ssl_opts, fun(Conf) -> + CheckFun = fun emqx_authn_http:check_ssl_opts/1, + validation(Conf, CheckFun) + end}, + {check_http_headers, fun(Conf) -> + CheckFun = fun emqx_authn_http:check_headers/1, + validation(Conf, CheckFun) + end} + ]. + +validation(Conf, CheckFun) when is_map(Conf) -> + validation(hocon_maps:get(?CONF_NS, Conf), CheckFun); +validation(undefined, _) -> + ok; +validation([], _) -> + ok; +validation([AuthN | Tail], CheckFun) -> + case CheckFun(#{?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY => AuthN}) of + ok -> validation(Tail, CheckFun); + Error -> Error + end. diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl index 33587a2db..33bd07efc 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl @@ -38,6 +38,8 @@ headers/1 ]). +-export([check_headers/1, check_ssl_opts/1]). + -export([ refs/0, union_member_selector/1, @@ -107,8 +109,8 @@ common_fields() -> validations() -> [ - {check_ssl_opts, fun check_ssl_opts/1}, - {check_headers, fun check_headers/1} + {check_ssl_opts, fun ?MODULE:check_ssl_opts/1}, + {check_headers, fun ?MODULE:check_headers/1} ]. url(type) -> binary(); @@ -262,21 +264,39 @@ transform_header_name(Headers) -> ). check_ssl_opts(Conf) -> - {BaseUrl, _Path, _Query} = parse_url(get_conf_val("url", Conf)), - case BaseUrl of - <<"https://", _/binary>> -> - case get_conf_val("ssl.enable", Conf) of - true -> ok; - false -> false - end; - <<"http://", _/binary>> -> - ok + case get_conf_val("url", Conf) of + undefined -> + ok; + Url -> + {BaseUrl, _Path, _Query} = parse_url(Url), + case BaseUrl of + <<"https://", _/binary>> -> + case get_conf_val("ssl.enable", Conf) of + true -> + ok; + false -> + <<"it's required to enable the TLS option to establish a https connection">> + end; + <<"http://", _/binary>> -> + ok + end end. check_headers(Conf) -> - Method = to_bin(get_conf_val("method", Conf)), - Headers = get_conf_val("headers", Conf), - Method =:= <<"post">> orelse (not maps:is_key(<<"content-type">>, Headers)). + case get_conf_val("headers", Conf) of + undefined -> + ok; + Headers -> + case to_bin(get_conf_val("method", Conf)) of + <<"post">> -> + ok; + <<"get">> -> + case maps:is_key(<<"content-type">>, Headers) of + false -> ok; + true -> <<"HTTP GET requests cannot include content-type header.">> + end + end + end. parse_url(Url) -> case string:split(Url, "//", leading) of @@ -311,7 +331,7 @@ parse_config( method => Method, path => Path, headers => ensure_header_name_type(Headers), - base_path_templete => emqx_authn_utils:parse_str(Path), + base_path_template => emqx_authn_utils:parse_str(Path), base_query_template => emqx_authn_utils:parse_deep( cow_qs:parse_qs(to_bin(Query)) ), @@ -324,7 +344,7 @@ parse_config( generate_request(Credential, #{ method := Method, headers := Headers0, - base_path_templete := BasePathTemplate, + base_path_template := BasePathTemplate, base_query_template := BaseQueryTemplate, body_template := BodyTemplate }) -> diff --git a/apps/emqx_authn/test/emqx_authn_https_SUITE.erl b/apps/emqx_authn/test/emqx_authn_https_SUITE.erl index c4315b69f..f23a160d1 100644 --- a/apps/emqx_authn/test/emqx_authn_https_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_https_SUITE.erl @@ -114,6 +114,22 @@ t_create_invalid_version(_Config) -> emqx_access_control:authenticate(?CREDENTIALS) ). +t_create_disable_ssl_opts_when_https(_Config) -> + {ok, _} = create_https_auth_with_ssl_opts( + #{ + <<"server_name_indication">> => <<"authn-server">>, + <<"verify">> => <<"verify_peer">>, + <<"versions">> => [<<"tlsv1.2">>], + <<"ciphers">> => [<<"ECDHE-RSA-AES256-GCM-SHA384">>], + <<"enable">> => <<"false">> + } + ), + + ?assertEqual( + {error, not_authorized}, + emqx_access_control:authenticate(?CREDENTIALS) + ). + t_create_invalid_ciphers(_Config) -> {ok, _} = create_https_auth_with_ssl_opts( #{ @@ -135,6 +151,7 @@ t_create_invalid_ciphers(_Config) -> create_https_auth_with_ssl_opts(SpecificSSLOpts) -> AuthConfig = raw_https_auth_config(SpecificSSLOpts), + ct:pal("111:~p~n", [AuthConfig]), emqx:update_config(?PATH, {create_authenticator, ?GLOBAL, AuthConfig}). raw_https_auth_config(SpecificSSLOpts) -> diff --git a/apps/emqx_conf/test/emqx_conf_schema_tests.erl b/apps/emqx_conf/test/emqx_conf_schema_tests.erl index 453aca7a8..79307c803 100644 --- a/apps/emqx_conf/test/emqx_conf_schema_tests.erl +++ b/apps/emqx_conf/test/emqx_conf_schema_tests.erl @@ -5,27 +5,27 @@ -module(emqx_conf_schema_tests). -include_lib("eunit/include/eunit.hrl"). +-define(BASE_CONF, + "" + "\n" + " node {\n" + " name = \"emqx1@127.0.0.1\"\n" + " cookie = \"emqxsecretcookie\"\n" + " data_dir = \"data\"\n" + " }\n" + " cluster {\n" + " name = emqxcl\n" + " discovery_strategy = static\n" + " static.seeds = ~p\n" + " core_nodes = ~p\n" + " }\n" + "" +). array_nodes_test() -> ExpectNodes = ['emqx1@127.0.0.1', 'emqx2@127.0.0.1'], - BaseConf = - "" - "\n" - " node {\n" - " name = \"emqx1@127.0.0.1\"\n" - " cookie = \"emqxsecretcookie\"\n" - " data_dir = \"data\"\n" - " }\n" - " cluster {\n" - " name = emqxcl\n" - " discovery_strategy = static\n" - " static.seeds = ~p\n" - " core_nodes = ~p\n" - " }\n" - " " - "", lists:foreach( fun(Nodes) -> - ConfFile = iolist_to_binary(io_lib:format(BaseConf, [Nodes, Nodes])), + ConfFile = iolist_to_binary(io_lib:format(?BASE_CONF, [Nodes, Nodes])), {ok, Conf} = hocon:binary(ConfFile, #{format => richmap}), ConfList = hocon_tconf:generate(emqx_conf_schema, Conf), ClusterDiscovery = proplists:get_value( @@ -46,6 +46,72 @@ array_nodes_test() -> ), ok. +authn_validations_test() -> + BaseConf = iolist_to_binary(io_lib:format(?BASE_CONF, ["emqx1@127.0.0.1", "emqx1@127.0.0.1"])), + DisableSSLWithHttps = + "" + "\n" + "authentication = [\n" + "{backend = \"http\"\n" + "body {password = \"${password}\", username = \"${username}\"}\n" + "connect_timeout = \"15s\"\n" + "enable_pipelining = 100\n" + "headers {\"content-type\" = \"application/json\"}\n" + "mechanism = \"password_based\"\n" + "method = \"post\"\n" + "pool_size = 8\n" + "request_timeout = \"5s\"\n" + "ssl {enable = false, verify = \"verify_peer\"}\n" + "url = \"https://127.0.0.1:8080\"\n" + "}\n" + "]\n" + "", + Conf = <>, + {ok, ConfMap} = hocon:binary(Conf, #{format => richmap}), + ?assertThrow( + {emqx_conf_schema, [ + #{ + kind := validation_error, + reason := integrity_validation_failure, + result := _, + validation_name := check_http_ssl_opts + } + ]}, + hocon_tconf:generate(emqx_conf_schema, ConfMap) + ), + BadHeader = + "" + "\n" + "authentication = [\n" + "{backend = \"http\"\n" + "body {password = \"${password}\", username = \"${username}\"}\n" + "connect_timeout = \"15s\"\n" + "enable_pipelining = 100\n" + "headers {\"content-type\" = \"application/json\"}\n" + "mechanism = \"password_based\"\n" + "method = \"get\"\n" + "pool_size = 8\n" + "request_timeout = \"5s\"\n" + "ssl {enable = false, verify = \"verify_peer\"}\n" + "url = \"http://127.0.0.1:8080\"\n" + "}\n" + "]\n" + "", + Conf1 = <>, + {ok, ConfMap1} = hocon:binary(Conf1, #{format => richmap}), + ?assertThrow( + {emqx_conf_schema, [ + #{ + kind := validation_error, + reason := integrity_validation_failure, + result := _, + validation_name := check_http_headers + } + ]}, + hocon_tconf:generate(emqx_conf_schema, ConfMap1) + ), + ok. + doc_gen_test() -> %% the json file too large to encode. { From 2aef9ca21544e70f7a4917e3769bebd44d12ebf7 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Wed, 19 Apr 2023 17:10:07 +0800 Subject: [PATCH 092/263] chore: add changlog for authn_http validation --- apps/emqx_authn/test/emqx_authn_https_SUITE.erl | 17 ----------------- changes/ce/fix-10449.en.md | 2 ++ 2 files changed, 2 insertions(+), 17 deletions(-) create mode 100644 changes/ce/fix-10449.en.md diff --git a/apps/emqx_authn/test/emqx_authn_https_SUITE.erl b/apps/emqx_authn/test/emqx_authn_https_SUITE.erl index f23a160d1..c4315b69f 100644 --- a/apps/emqx_authn/test/emqx_authn_https_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_https_SUITE.erl @@ -114,22 +114,6 @@ t_create_invalid_version(_Config) -> emqx_access_control:authenticate(?CREDENTIALS) ). -t_create_disable_ssl_opts_when_https(_Config) -> - {ok, _} = create_https_auth_with_ssl_opts( - #{ - <<"server_name_indication">> => <<"authn-server">>, - <<"verify">> => <<"verify_peer">>, - <<"versions">> => [<<"tlsv1.2">>], - <<"ciphers">> => [<<"ECDHE-RSA-AES256-GCM-SHA384">>], - <<"enable">> => <<"false">> - } - ), - - ?assertEqual( - {error, not_authorized}, - emqx_access_control:authenticate(?CREDENTIALS) - ). - t_create_invalid_ciphers(_Config) -> {ok, _} = create_https_auth_with_ssl_opts( #{ @@ -151,7 +135,6 @@ t_create_invalid_ciphers(_Config) -> create_https_auth_with_ssl_opts(SpecificSSLOpts) -> AuthConfig = raw_https_auth_config(SpecificSSLOpts), - ct:pal("111:~p~n", [AuthConfig]), emqx:update_config(?PATH, {create_authenticator, ?GLOBAL, AuthConfig}). raw_https_auth_config(SpecificSSLOpts) -> diff --git a/changes/ce/fix-10449.en.md b/changes/ce/fix-10449.en.md new file mode 100644 index 000000000..e10b52fb4 --- /dev/null +++ b/changes/ce/fix-10449.en.md @@ -0,0 +1,2 @@ +Validate the ssl_options and header configurations when creating authentication http (`authn_http`). +Prior to this, incorrect ssl_options configuration could result in successful creation but the entire authn being unusable. From 1e54d23d316bcf137ee63399018a15e2ed31982b Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Thu, 20 Apr 2023 18:20:51 +0800 Subject: [PATCH 093/263] test: add a test for authn {} --- .../emqx_conf/test/emqx_conf_schema_tests.erl | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/apps/emqx_conf/test/emqx_conf_schema_tests.erl b/apps/emqx_conf/test/emqx_conf_schema_tests.erl index 79307c803..e3033e4a7 100644 --- a/apps/emqx_conf/test/emqx_conf_schema_tests.erl +++ b/apps/emqx_conf/test/emqx_conf_schema_tests.erl @@ -110,6 +110,37 @@ authn_validations_test() -> ]}, hocon_tconf:generate(emqx_conf_schema, ConfMap1) ), + BadHeader2 = + "" + "\n" + "authentication = \n" + "{backend = \"http\"\n" + "body {password = \"${password}\", username = \"${username}\"}\n" + "connect_timeout = \"15s\"\n" + "enable_pipelining = 100\n" + "headers {\"content-type\" = \"application/json\"}\n" + "mechanism = \"password_based\"\n" + "method = \"get\"\n" + "pool_size = 8\n" + "request_timeout = \"5s\"\n" + "ssl {enable = false, verify = \"verify_peer\"}\n" + "url = \"http://127.0.0.1:8080\"\n" + "}\n" + "\n" + "", + Conf2 = <>, + {ok, ConfMap2} = hocon:binary(Conf2, #{format => richmap}), + ?assertThrow( + {emqx_conf_schema, [ + #{ + kind := validation_error, + reason := integrity_validation_failure, + result := _, + validation_name := check_http_headers + } + ]}, + hocon_tconf:generate(emqx_conf_schema, ConfMap2) + ), ok. doc_gen_test() -> From ad6090a77881ae85b0f0b2a84c61f4f6019a6710 Mon Sep 17 00:00:00 2001 From: zhongwencool Date: Fri, 21 Apr 2023 11:21:05 +0800 Subject: [PATCH 094/263] chore: update changes/ce/fix-10449.en.md Co-authored-by: JianBo He --- changes/ce/fix-10449.en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changes/ce/fix-10449.en.md b/changes/ce/fix-10449.en.md index e10b52fb4..005dea73c 100644 --- a/changes/ce/fix-10449.en.md +++ b/changes/ce/fix-10449.en.md @@ -1,2 +1,2 @@ Validate the ssl_options and header configurations when creating authentication http (`authn_http`). -Prior to this, incorrect ssl_options configuration could result in successful creation but the entire authn being unusable. +Prior to this, incorrect `ssl` configuration could result in successful creation but the entire authn being unusable. From fdf9b2a3837fad852252df64f7fce1334c760147 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Fri, 21 Apr 2023 11:54:11 +0800 Subject: [PATCH 095/263] chore: apply review suggestions --- .../src/simple_authn/emqx_authn_http.erl | 28 ++- apps/emqx_authz/src/emqx_authz.app.src | 2 +- .../emqx_conf/test/emqx_conf_schema_tests.erl | 186 ++++++++---------- .../emqx_connector/src/emqx_connector.app.src | 2 +- apps/emqx_prometheus/TODO | 2 - 5 files changed, 101 insertions(+), 119 deletions(-) delete mode 100644 apps/emqx_prometheus/TODO diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl index 33bd07efc..eddad92a3 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl @@ -264,10 +264,9 @@ transform_header_name(Headers) -> ). check_ssl_opts(Conf) -> - case get_conf_val("url", Conf) of - undefined -> - ok; - Url -> + case is_backend_http(Conf) of + true -> + Url = get_conf_val("url", Conf), {BaseUrl, _Path, _Query} = parse_url(Url), case BaseUrl of <<"https://", _/binary>> -> @@ -279,14 +278,15 @@ check_ssl_opts(Conf) -> end; <<"http://", _/binary>> -> ok - end + end; + false -> + ok end. check_headers(Conf) -> - case get_conf_val("headers", Conf) of - undefined -> - ok; - Headers -> + case is_backend_http(Conf) of + true -> + Headers = get_conf_val("headers", Conf), case to_bin(get_conf_val("method", Conf)) of <<"post">> -> ok; @@ -295,7 +295,15 @@ check_headers(Conf) -> false -> ok; true -> <<"HTTP GET requests cannot include content-type header.">> end - end + end; + false -> + ok + end. + +is_backend_http(Conf) -> + case get_conf_val("backend", Conf) of + http -> true; + _ -> false end. parse_url(Url) -> diff --git a/apps/emqx_authz/src/emqx_authz.app.src b/apps/emqx_authz/src/emqx_authz.app.src index a17ce5dea..dd658a6aa 100644 --- a/apps/emqx_authz/src/emqx_authz.app.src +++ b/apps/emqx_authz/src/emqx_authz.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_authz, [ {description, "An OTP application"}, - {vsn, "0.1.17"}, + {vsn, "0.1.18"}, {registered, []}, {mod, {emqx_authz_app, []}}, {applications, [ diff --git a/apps/emqx_conf/test/emqx_conf_schema_tests.erl b/apps/emqx_conf/test/emqx_conf_schema_tests.erl index e3033e4a7..667d1766f 100644 --- a/apps/emqx_conf/test/emqx_conf_schema_tests.erl +++ b/apps/emqx_conf/test/emqx_conf_schema_tests.erl @@ -5,27 +5,28 @@ -module(emqx_conf_schema_tests). -include_lib("eunit/include/eunit.hrl"). + +%% erlfmt-ignore -define(BASE_CONF, - "" - "\n" - " node {\n" - " name = \"emqx1@127.0.0.1\"\n" - " cookie = \"emqxsecretcookie\"\n" - " data_dir = \"data\"\n" - " }\n" - " cluster {\n" - " name = emqxcl\n" - " discovery_strategy = static\n" - " static.seeds = ~p\n" - " core_nodes = ~p\n" - " }\n" - "" -). + """ + node { + name = \"emqx1@127.0.0.1\" + cookie = \"emqxsecretcookie\" + data_dir = \"data\" + } + cluster { + name = emqxcl + discovery_strategy = static + static.seeds = ~p + core_nodes = ~p + } + """). + array_nodes_test() -> ExpectNodes = ['emqx1@127.0.0.1', 'emqx2@127.0.0.1'], lists:foreach( fun(Nodes) -> - ConfFile = iolist_to_binary(io_lib:format(?BASE_CONF, [Nodes, Nodes])), + ConfFile = to_bin(?BASE_CONF, [Nodes, Nodes]), {ok, Conf} = hocon:binary(ConfFile, #{format => richmap}), ConfList = hocon_tconf:generate(emqx_conf_schema, Conf), ClusterDiscovery = proplists:get_value( @@ -46,101 +47,73 @@ array_nodes_test() -> ), ok. +%% erlfmt-ignore +-define(BASE_AUTHN_ARRAY, + """ + authentication = [ + {backend = \"http\" + body {password = \"${password}\", username = \"${username}\"} + connect_timeout = \"15s\" + enable_pipelining = 100 + headers {\"content-type\" = \"application/json\"} + mechanism = \"password_based\" + method = \"~p\" + pool_size = 8 + request_timeout = \"5s\" + ssl {enable = ~p, verify = \"verify_peer\"} + url = \"~ts\" + } + ] + """ +). + +-define(ERROR(Reason), + {emqx_conf_schema, [ + #{ + kind := validation_error, + reason := integrity_validation_failure, + result := _, + validation_name := Reason + } + ]} +). + authn_validations_test() -> - BaseConf = iolist_to_binary(io_lib:format(?BASE_CONF, ["emqx1@127.0.0.1", "emqx1@127.0.0.1"])), - DisableSSLWithHttps = - "" - "\n" - "authentication = [\n" - "{backend = \"http\"\n" - "body {password = \"${password}\", username = \"${username}\"}\n" - "connect_timeout = \"15s\"\n" - "enable_pipelining = 100\n" - "headers {\"content-type\" = \"application/json\"}\n" - "mechanism = \"password_based\"\n" - "method = \"post\"\n" - "pool_size = 8\n" - "request_timeout = \"5s\"\n" - "ssl {enable = false, verify = \"verify_peer\"}\n" - "url = \"https://127.0.0.1:8080\"\n" - "}\n" - "]\n" - "", - Conf = <>, - {ok, ConfMap} = hocon:binary(Conf, #{format => richmap}), - ?assertThrow( - {emqx_conf_schema, [ - #{ - kind := validation_error, - reason := integrity_validation_failure, - result := _, - validation_name := check_http_ssl_opts - } - ]}, - hocon_tconf:generate(emqx_conf_schema, ConfMap) - ), - BadHeader = - "" - "\n" - "authentication = [\n" - "{backend = \"http\"\n" - "body {password = \"${password}\", username = \"${username}\"}\n" - "connect_timeout = \"15s\"\n" - "enable_pipelining = 100\n" - "headers {\"content-type\" = \"application/json\"}\n" - "mechanism = \"password_based\"\n" - "method = \"get\"\n" - "pool_size = 8\n" - "request_timeout = \"5s\"\n" - "ssl {enable = false, verify = \"verify_peer\"}\n" - "url = \"http://127.0.0.1:8080\"\n" - "}\n" - "]\n" - "", - Conf1 = <>, + BaseConf = to_bin(?BASE_CONF, ["emqx1@127.0.0.1", "emqx1@127.0.0.1"]), + + OKHttps = to_bin(?BASE_AUTHN_ARRAY, [post, true, <<"https://127.0.0.1:8080">>]), + Conf0 = <>, + {ok, ConfMap0} = hocon:binary(Conf0, #{format => richmap}), + ?assert(is_list(hocon_tconf:generate(emqx_conf_schema, ConfMap0))), + + OKHttp = to_bin(?BASE_AUTHN_ARRAY, [post, false, <<"http://127.0.0.1:8080">>]), + Conf1 = <>, {ok, ConfMap1} = hocon:binary(Conf1, #{format => richmap}), - ?assertThrow( - {emqx_conf_schema, [ - #{ - kind := validation_error, - reason := integrity_validation_failure, - result := _, - validation_name := check_http_headers - } - ]}, - hocon_tconf:generate(emqx_conf_schema, ConfMap1) - ), - BadHeader2 = - "" - "\n" - "authentication = \n" - "{backend = \"http\"\n" - "body {password = \"${password}\", username = \"${username}\"}\n" - "connect_timeout = \"15s\"\n" - "enable_pipelining = 100\n" - "headers {\"content-type\" = \"application/json\"}\n" - "mechanism = \"password_based\"\n" - "method = \"get\"\n" - "pool_size = 8\n" - "request_timeout = \"5s\"\n" - "ssl {enable = false, verify = \"verify_peer\"}\n" - "url = \"http://127.0.0.1:8080\"\n" - "}\n" - "\n" - "", - Conf2 = <>, + ?assert(is_list(hocon_tconf:generate(emqx_conf_schema, ConfMap1))), + + DisableSSLWithHttps = to_bin(?BASE_AUTHN_ARRAY, [post, false, <<"https://127.0.0.1:8080">>]), + Conf2 = <>, {ok, ConfMap2} = hocon:binary(Conf2, #{format => richmap}), ?assertThrow( - {emqx_conf_schema, [ - #{ - kind := validation_error, - reason := integrity_validation_failure, - result := _, - validation_name := check_http_headers - } - ]}, + ?ERROR(check_http_ssl_opts), hocon_tconf:generate(emqx_conf_schema, ConfMap2) ), + + BadHeader = to_bin(?BASE_AUTHN_ARRAY, [get, true, <<"https://127.0.0.1:8080">>]), + Conf3 = <>, + {ok, ConfMap3} = hocon:binary(Conf3, #{format => richmap}), + ?assertThrow( + ?ERROR(check_http_headers), + hocon_tconf:generate(emqx_conf_schema, ConfMap3) + ), + + BadHeaderWithTuple = binary:replace(BadHeader, [<<"[">>, <<"]">>], <<"">>, [global]), + Conf4 = <>, + {ok, ConfMap4} = hocon:binary(Conf4, #{format => richmap}), + ?assertThrow( + ?ERROR(check_http_headers), + hocon_tconf:generate(emqx_conf_schema, ConfMap4) + ), ok. doc_gen_test() -> @@ -163,3 +136,6 @@ doc_gen_test() -> ok end }. + +to_bin(Format, Args) -> + iolist_to_binary(io_lib:format(Format, Args)). diff --git a/apps/emqx_connector/src/emqx_connector.app.src b/apps/emqx_connector/src/emqx_connector.app.src index 38ca230b2..c0a19824c 100644 --- a/apps/emqx_connector/src/emqx_connector.app.src +++ b/apps/emqx_connector/src/emqx_connector.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_connector, [ {description, "EMQX Data Integration Connectors"}, - {vsn, "0.1.20"}, + {vsn, "0.1.21"}, {registered, []}, {mod, {emqx_connector_app, []}}, {applications, [ diff --git a/apps/emqx_prometheus/TODO b/apps/emqx_prometheus/TODO deleted file mode 100644 index a868fba7e..000000000 --- a/apps/emqx_prometheus/TODO +++ /dev/null @@ -1,2 +0,0 @@ -1. Add more VM Metrics -2. Add more emqx Metrics From bdce32e713f23234ecf650b80eb3ecaae46c585b Mon Sep 17 00:00:00 2001 From: JianBo He Date: Wed, 19 Apr 2023 13:05:25 +0800 Subject: [PATCH 096/263] refactor(cassandra): move cassandra bridge into its own app --- apps/emqx_bridge_cassandra/docker-ct | 2 + apps/emqx_bridge_cassandra/rebar.config | 11 +++++ .../src/emqx_bridge_cassandra.app.src | 2 +- .../src/emqx_bridge_cassandra.erl | 4 +- .../src/emqx_bridge_cassandra_impl.erl | 2 +- .../test/emqx_bridge_cassandra_SUITE.erl | 6 +-- .../test/emqx_bridge_cassandra_impl_SUITE.erl | 4 +- lib-ee/emqx_ee_bridge/docker-ct | 1 - lib-ee/emqx_ee_bridge/rebar.config | 3 +- lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl | 8 ++-- lib-ee/emqx_ee_connector/docker-ct | 1 - .../src/emqx_ee_connector.app.src | 1 - mix.exs | 13 ++++++ rebar.config.erl | 26 ++++++++++++ ...assa.hocon => emqx_bridge_cassandra.hocon} | 2 +- ...hocon => emqx_bridge_cassandra_impl.hocon} | 2 +- scripts/ct/run.sh | 42 +++++++++++++++++++ 17 files changed, 110 insertions(+), 20 deletions(-) create mode 100644 apps/emqx_bridge_cassandra/docker-ct create mode 100644 apps/emqx_bridge_cassandra/rebar.config rename lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_cassa.erl => apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl (97%) rename lib-ee/emqx_ee_connector/src/emqx_ee_connector_cassa.erl => apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_impl.erl (99%) rename lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl => apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl (99%) rename lib-ee/emqx_ee_connector/test/emqx_ee_connector_cassa_SUITE.erl => apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_impl_SUITE.erl (98%) rename rel/i18n/{emqx_ee_bridge_cassa.hocon => emqx_bridge_cassandra.hocon} (97%) rename rel/i18n/{emqx_ee_connector_cassa.hocon => emqx_bridge_cassandra_impl.hocon} (92%) diff --git a/apps/emqx_bridge_cassandra/docker-ct b/apps/emqx_bridge_cassandra/docker-ct new file mode 100644 index 000000000..2626b4068 --- /dev/null +++ b/apps/emqx_bridge_cassandra/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +cassandra diff --git a/apps/emqx_bridge_cassandra/rebar.config b/apps/emqx_bridge_cassandra/rebar.config new file mode 100644 index 000000000..b8bfc7dd6 --- /dev/null +++ b/apps/emqx_bridge_cassandra/rebar.config @@ -0,0 +1,11 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {ecql, {git, "https://github.com/emqx/ecql.git", {tag, "v0.5.1"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_cassandra]} +]}. diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src index 1ed65ea9f..58e4a1984 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src @@ -2,7 +2,7 @@ {description, "EMQX Enterprise Cassandra Bridge"}, {vsn, "0.1.0"}, {registered, []}, - {applications, [kernel, stdlib]}, + {applications, [kernel, stdlib, ecql]}, {env, []}, {modules, []}, {links, []} diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_cassa.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl similarity index 97% rename from lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_cassa.erl rename to apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl index 26c6de04d..32dc11839 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_cassa.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl @@ -1,7 +1,7 @@ %%-------------------------------------------------------------------- %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_cassa). +-module(emqx_bridge_cassandra). -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). @@ -88,7 +88,7 @@ fields("config") -> #{desc => ?DESC("local_topic"), default => undefined} )} ] ++ emqx_resource_schema:fields("resource_opts") ++ - (emqx_ee_connector_cassa:fields(config) -- + (emqx_bridge_cassandra_impl:fields(config) -- emqx_connector_schema_lib:prepare_statement_fields()); fields("post") -> fields("post", cassandra); diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_cassa.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_impl.erl similarity index 99% rename from lib-ee/emqx_ee_connector/src/emqx_ee_connector_cassa.erl rename to apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_impl.erl index 397532f47..8fe329a98 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_cassa.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_impl.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_connector_cassa). +-module(emqx_bridge_cassandra_impl). -behaviour(emqx_resource). diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl similarity index 99% rename from lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl rename to apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl index 4711d1981..482f2c1f2 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_cassa_SUITE). +-module(emqx_bridge_cassandra_SUITE). -compile(nowarn_export_all). -compile(export_all). @@ -57,7 +57,7 @@ %% CASSA_TCP_HOST=127.0.0.1 CASSA_TCP_PORT=19042 \ %% CASSA_TLS_HOST=127.0.0.1 CASSA_TLS_PORT=19142 \ %% PROXY_HOST=127.0.0.1 ./rebar3 as test ct -c -v --name ct@127.0.0.1 \ -%% --suite lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl +%% --suite apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl %% %%------------------------------------------------------------------------------ @@ -590,7 +590,7 @@ t_missing_data(Config) -> {ok, _}, create_bridge(Config) ), - %% emqx_ee_connector_cassa will send missed data as a `null` atom + %% emqx_bridge_cassandra_impl will send missed data as a `null` atom %% to ecql driver ?check_trace( begin diff --git a/lib-ee/emqx_ee_connector/test/emqx_ee_connector_cassa_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_impl_SUITE.erl similarity index 98% rename from lib-ee/emqx_ee_connector/test/emqx_ee_connector_cassa_SUITE.erl rename to apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_impl_SUITE.erl index f2647d756..faaee3c63 100644 --- a/lib-ee/emqx_ee_connector/test/emqx_ee_connector_cassa_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_impl_SUITE.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_connector_cassa_SUITE). +-module(emqx_bridge_cassandra_impl_SUITE). -compile(nowarn_export_all). -compile(export_all). @@ -16,7 +16,7 @@ %% Cassandra server defined at `.ci/docker-compose-file/docker-compose-cassandra-tcp.yaml` %% You can change it to `127.0.0.1`, if you run this SUITE locally -define(CASSANDRA_HOST, "cassandra"). --define(CASSANDRA_RESOURCE_MOD, emqx_ee_connector_cassa). +-define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_impl). %% This test SUITE requires a running cassandra instance. If you don't want to %% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script diff --git a/lib-ee/emqx_ee_bridge/docker-ct b/lib-ee/emqx_ee_bridge/docker-ct index 35d6b9d5b..aa19a495f 100644 --- a/lib-ee/emqx_ee_bridge/docker-ct +++ b/lib-ee/emqx_ee_bridge/docker-ct @@ -10,5 +10,4 @@ tdengine clickhouse dynamo rocketmq -cassandra sqlserver diff --git a/lib-ee/emqx_ee_bridge/rebar.config b/lib-ee/emqx_ee_bridge/rebar.config index b26df658a..358ff3bc8 100644 --- a/lib-ee/emqx_ee_bridge/rebar.config +++ b/lib-ee/emqx_ee_bridge/rebar.config @@ -1,6 +1,5 @@ {erl_opts, [debug_info]}. -{deps, [ {ecql, {git, "https://github.com/emqx/ecql.git", {tag, "v0.5.1"}}} - , {emqx_connector, {path, "../../apps/emqx_connector"}} +{deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}} , {emqx_resource, {path, "../../apps/emqx_resource"}} , {emqx_bridge, {path, "../../apps/emqx_bridge"}} , {emqx_utils, {path, "../emqx_utils"}} diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl index 9465464d9..e8be79cdc 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl @@ -17,6 +17,7 @@ api_schemas(Method) -> ref(emqx_bridge_gcp_pubsub, Method), ref(emqx_bridge_kafka, Method ++ "_consumer"), ref(emqx_bridge_kafka, Method ++ "_producer"), + ref(emqx_bridge_cassandra, Method), ref(emqx_ee_bridge_mysql, Method), ref(emqx_ee_bridge_pgsql, Method), ref(emqx_ee_bridge_mongodb, Method ++ "_rs"), @@ -34,13 +35,13 @@ api_schemas(Method) -> ref(emqx_ee_bridge_clickhouse, Method), ref(emqx_ee_bridge_dynamo, Method), ref(emqx_ee_bridge_rocketmq, Method), - ref(emqx_ee_bridge_cassa, Method), ref(emqx_ee_bridge_sqlserver, Method) ]. schema_modules() -> [ emqx_bridge_kafka, + emqx_bridge_cassandra, emqx_ee_bridge_hstreamdb, emqx_bridge_gcp_pubsub, emqx_ee_bridge_influxdb, @@ -54,7 +55,6 @@ schema_modules() -> emqx_ee_bridge_clickhouse, emqx_ee_bridge_dynamo, emqx_ee_bridge_rocketmq, - emqx_ee_bridge_cassa, emqx_ee_bridge_sqlserver ]. @@ -75,6 +75,7 @@ resource_type(kafka_consumer) -> emqx_bridge_kafka_impl_consumer; %% TODO: rename this to `kafka_producer' after alias support is added %% to hocon; keeping this as just `kafka' for backwards compatibility. resource_type(kafka) -> emqx_bridge_kafka_impl_producer; +resource_type(cassandra) -> emqx_bridge_cassandra_impl; resource_type(hstreamdb) -> emqx_ee_connector_hstreamdb; resource_type(gcp_pubsub) -> emqx_bridge_gcp_pubsub_connector; resource_type(mongodb_rs) -> emqx_ee_connector_mongodb; @@ -93,7 +94,6 @@ resource_type(tdengine) -> emqx_ee_connector_tdengine; resource_type(clickhouse) -> emqx_ee_connector_clickhouse; resource_type(dynamo) -> emqx_ee_connector_dynamo; resource_type(rocketmq) -> emqx_ee_connector_rocketmq; -resource_type(cassandra) -> emqx_ee_connector_cassa; resource_type(sqlserver) -> emqx_ee_connector_sqlserver. fields(bridges) -> @@ -148,7 +148,7 @@ fields(bridges) -> )}, {cassandra, mk( - hoconsc:map(name, ref(emqx_ee_bridge_cassa, "config")), + hoconsc:map(name, ref(emqx_bridge_cassandra, "config")), #{ desc => <<"Cassandra Bridge Config">>, required => false diff --git a/lib-ee/emqx_ee_connector/docker-ct b/lib-ee/emqx_ee_connector/docker-ct index fc8e75e68..cb2f6f028 100644 --- a/lib-ee/emqx_ee_connector/docker-ct +++ b/lib-ee/emqx_ee_connector/docker-ct @@ -1,5 +1,4 @@ toxiproxy influxdb clickhouse -cassandra sqlserver diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src index ced7ae86a..82f556bdb 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src @@ -12,7 +12,6 @@ clickhouse, erlcloud, rocketmq, - ecql, odbc ]}, {env, []}, diff --git a/mix.exs b/mix.exs index f3149a584..c32fa798d 100644 --- a/mix.exs +++ b/mix.exs @@ -156,6 +156,19 @@ defmodule EMQXUmbrella.MixProject do MapSet.new([ :emqx_bridge_kafka, :emqx_bridge_gcp_pubsub + :emqx_bridge_cassandra, + :emqx_bridge_clickhouse, + :emqx_bridge_dynamo, + :emqx_bridge_hstreamdb, + :emqx_bridge_influxdb, + :emqx_bridge_matrix, + :emqx_bridge_mongodb, + :emqx_bridge_mysql, + :emqx_bridge_pgsql, + :emqx_bridge_redis, + :emqx_bridge_rocketmq, + :emqx_bridge_tdengine, + :emqx_bridge_timescale ]) end diff --git a/rebar.config.erl b/rebar.config.erl index 9bdbfb848..88471c39d 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -80,6 +80,19 @@ is_enterprise(ee) -> true. is_community_umbrella_app("apps/emqx_bridge_kafka") -> false; is_community_umbrella_app("apps/emqx_bridge_gcp_pubsub") -> false; +is_community_umbrella_app("apps/emqx_bridge_cassandra") -> false; +is_community_umbrella_app("apps/emqx_bridge_clickhouse") -> false; +is_community_umbrella_app("apps/emqx_bridge_dynamo") -> false; +is_community_umbrella_app("apps/emqx_bridge_hstreamdb") -> false; +is_community_umbrella_app("apps/emqx_bridge_influxdb") -> false; +is_community_umbrella_app("apps/emqx_bridge_matrix") -> false; +is_community_umbrella_app("apps/emqx_bridge_mongodb") -> false; +is_community_umbrella_app("apps/emqx_bridge_mysql") -> false; +is_community_umbrella_app("apps/emqx_bridge_pgsql") -> false; +is_community_umbrella_app("apps/emqx_bridge_redis") -> false; +is_community_umbrella_app("apps/emqx_bridge_rocketmq") -> false; +is_community_umbrella_app("apps/emqx_bridge_tdengine") -> false; +is_community_umbrella_app("apps/emqx_bridge_timescale") -> false; is_community_umbrella_app(_) -> true. is_jq_supported() -> @@ -441,6 +454,19 @@ relx_apps_per_edition(ee) -> emqx_ee_bridge, emqx_bridge_kafka, emqx_bridge_gcp_pubsub, + emqx_bridge_cassandra, + emqx_bridge_clickhouse, + emqx_bridge_dynamo, + emqx_bridge_hstreamdb, + emqx_bridge_influxdb, + emqx_bridge_matrix, + emqx_bridge_mongodb, + emqx_bridge_mysql, + emqx_bridge_pgsql, + emqx_bridge_redis, + emqx_bridge_rocketmq, + emqx_bridge_tdengine, + emqx_bridge_timescale, emqx_ee_schema_registry ]; relx_apps_per_edition(ce) -> diff --git a/rel/i18n/emqx_ee_bridge_cassa.hocon b/rel/i18n/emqx_bridge_cassandra.hocon similarity index 97% rename from rel/i18n/emqx_ee_bridge_cassa.hocon rename to rel/i18n/emqx_bridge_cassandra.hocon index d86c95a5f..d598d3921 100644 --- a/rel/i18n/emqx_ee_bridge_cassa.hocon +++ b/rel/i18n/emqx_bridge_cassandra.hocon @@ -1,4 +1,4 @@ -emqx_ee_bridge_cassa { +emqx_bridge_cassandra { config_enable.desc: """Enable or disable this bridge""" diff --git a/rel/i18n/emqx_ee_connector_cassa.hocon b/rel/i18n/emqx_bridge_cassandra_impl.hocon similarity index 92% rename from rel/i18n/emqx_ee_connector_cassa.hocon rename to rel/i18n/emqx_bridge_cassandra_impl.hocon index bd5fb544c..91157fc09 100644 --- a/rel/i18n/emqx_ee_connector_cassa.hocon +++ b/rel/i18n/emqx_bridge_cassandra_impl.hocon @@ -1,4 +1,4 @@ -emqx_ee_connector_cassa { +emqx_bridge_cassa_impl { keyspace.desc: """Keyspace name to connect to.""" diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index 4e79476e0..aca83ae86 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -112,6 +112,48 @@ case "${WHICH_APP}" in ## ensure enterprise profile when testing ee applications export PROFILE='emqx-enterprise' ;; + apps/emqx_bridge_cassandra) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_clickhouse) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_dynamo) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_gcp_pubsub) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_hstreamdb) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_influxdb) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_matrix) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_mongodb) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_mysql) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_pgsql) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_redis) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_rocketmq) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_tdengine) + export PROFILE='emqx-enterprise' + ;; + apps/emqx_bridge_timescale) + export PROFILE='emqx-enterprise' + ;; *) export PROFILE="${PROFILE:-emqx}" ;; From 5cc28a7b4539affd0de71420911e7437cc54978c Mon Sep 17 00:00:00 2001 From: JianBo He Date: Wed, 19 Apr 2023 13:21:31 +0800 Subject: [PATCH 097/263] chore: fix mix.exs checking --- .../include/emqx_bridge_cassandra.hrl | 5 +++++ .../src/emqx_bridge_cassandra_impl.erl | 2 +- .../test/emqx_bridge_cassandra_impl_SUITE.erl | 4 ++-- .../emqx_ee_connector/include/emqx_ee_connector.hrl | 1 - mix.exs | 13 +++++++++++++ rel/i18n/emqx_bridge_cassandra_impl.hocon | 2 +- 6 files changed, 22 insertions(+), 5 deletions(-) create mode 100644 apps/emqx_bridge_cassandra/include/emqx_bridge_cassandra.hrl diff --git a/apps/emqx_bridge_cassandra/include/emqx_bridge_cassandra.hrl b/apps/emqx_bridge_cassandra/include/emqx_bridge_cassandra.hrl new file mode 100644 index 000000000..eef7c5d2b --- /dev/null +++ b/apps/emqx_bridge_cassandra/include/emqx_bridge_cassandra.hrl @@ -0,0 +1,5 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-define(CASSANDRA_DEFAULT_PORT, 9042). diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_impl.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_impl.erl index 8fe329a98..a4247a8c7 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_impl.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_impl.erl @@ -7,7 +7,7 @@ -behaviour(emqx_resource). -include_lib("emqx_connector/include/emqx_connector.hrl"). --include_lib("emqx_ee_connector/include/emqx_ee_connector.hrl"). +-include("emqx_bridge_cassandra.hrl"). -include_lib("typerefl/include/types.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("hocon/include/hoconsc.hrl"). diff --git a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_impl_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_impl_SUITE.erl index faaee3c63..db91ca956 100644 --- a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_impl_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_impl_SUITE.erl @@ -7,8 +7,8 @@ -compile(nowarn_export_all). -compile(export_all). --include("emqx_connector.hrl"). --include("emqx_ee_connector.hrl"). +-include("emqx_bridge_cassandra.hrl"). +-include("emqx_connector/include/emqx_connector.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("emqx/include/emqx.hrl"). -include_lib("stdlib/include/assert.hrl"). diff --git a/lib-ee/emqx_ee_connector/include/emqx_ee_connector.hrl b/lib-ee/emqx_ee_connector/include/emqx_ee_connector.hrl index 2a91d2524..4b6fbbd92 100644 --- a/lib-ee/emqx_ee_connector/include/emqx_ee_connector.hrl +++ b/lib-ee/emqx_ee_connector/include/emqx_ee_connector.hrl @@ -3,4 +3,3 @@ %%------------------------------------------------------------------- -define(INFLUXDB_DEFAULT_PORT, 8086). --define(CASSANDRA_DEFAULT_PORT, 9042). diff --git a/mix.exs b/mix.exs index c32fa798d..02c239a99 100644 --- a/mix.exs +++ b/mix.exs @@ -359,6 +359,19 @@ defmodule EMQXUmbrella.MixProject do emqx_ee_bridge: :permanent, emqx_bridge_kafka: :permanent, emqx_bridge_gcp_pubsub: :permanent, + emqx_bridge_cassandra: :permanent, + emqx_bridge_clickhouse: :permanent, + emqx_bridge_dynamo: :permanent, + emqx_bridge_hstreamdb: :permanent, + emqx_bridge_influxdb: :permanent, + emqx_bridge_matrix: :permanent, + emqx_bridge_mongodb: :permanent, + emqx_bridge_mysql: :permanent, + emqx_bridge_pgsql: :permanent, + emqx_bridge_redis: :permanent, + emqx_bridge_rocketmq: :permanent, + emqx_bridge_tdengine: :permanent, + emqx_bridge_timescale: :permanent, emqx_ee_schema_registry: :permanent ], else: [] diff --git a/rel/i18n/emqx_bridge_cassandra_impl.hocon b/rel/i18n/emqx_bridge_cassandra_impl.hocon index 91157fc09..03f389edd 100644 --- a/rel/i18n/emqx_bridge_cassandra_impl.hocon +++ b/rel/i18n/emqx_bridge_cassandra_impl.hocon @@ -1,4 +1,4 @@ -emqx_bridge_cassa_impl { +emqx_bridge_cassandra_impl { keyspace.desc: """Keyspace name to connect to.""" From cea0502160ddf9cd9aed83580685eb3d34dd25b7 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Fri, 21 Apr 2023 16:23:47 +0800 Subject: [PATCH 098/263] chore: update apps/emqx_conf/README.md Co-authored-by: ieQu1 <99872536+ieQu1@users.noreply.github.com> --- apps/emqx_conf/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/apps/emqx_conf/README.md b/apps/emqx_conf/README.md index 726fd72cd..f1efe7987 100644 --- a/apps/emqx_conf/README.md +++ b/apps/emqx_conf/README.md @@ -2,8 +2,7 @@ This application provides configuration management capabilities for EMQX. -This includes, during compilation: -- Read all configuration schemas and generate the following files: +At compile time it reads all configuration schemas and generates the following files: * `config-en.md`: documentation for all configuration options. * `schema-en.json`: JSON description of all configuration schema options. * `emqx.conf.example`: an example of a complete configuration file. From b270623c46fc342f71b201b3c64ce25eb20db74a Mon Sep 17 00:00:00 2001 From: JianBo He Date: Fri, 21 Apr 2023 14:48:31 +0800 Subject: [PATCH 099/263] chore: rename cassandra_impl to cassandra_connector --- apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl | 2 +- ...cassandra_impl.erl => emqx_bridge_cassandra_connector.erl} | 2 +- .../test/emqx_bridge_cassandra_SUITE.erl | 2 +- ...pl_SUITE.erl => emqx_bridge_cassandra_connector_SUITE.erl} | 4 ++-- lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl | 2 +- mix.exs | 2 +- ...andra_impl.hocon => emqx_bridge_cassandra_connector.hocon} | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) rename apps/emqx_bridge_cassandra/src/{emqx_bridge_cassandra_impl.erl => emqx_bridge_cassandra_connector.erl} (99%) rename apps/emqx_bridge_cassandra/test/{emqx_bridge_cassandra_impl_SUITE.erl => emqx_bridge_cassandra_connector_SUITE.erl} (98%) rename rel/i18n/{emqx_bridge_cassandra_impl.hocon => emqx_bridge_cassandra_connector.hocon} (90%) diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl index 32dc11839..e8f7d50ce 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl @@ -88,7 +88,7 @@ fields("config") -> #{desc => ?DESC("local_topic"), default => undefined} )} ] ++ emqx_resource_schema:fields("resource_opts") ++ - (emqx_bridge_cassandra_impl:fields(config) -- + (emqx_bridge_cassandra_connector:fields(config) -- emqx_connector_schema_lib:prepare_statement_fields()); fields("post") -> fields("post", cassandra); diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_impl.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl similarity index 99% rename from apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_impl.erl rename to apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl index a4247a8c7..cf6ddff9f 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_impl.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_bridge_cassandra_impl). +-module(emqx_bridge_cassandra_connector). -behaviour(emqx_resource). diff --git a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl index 482f2c1f2..7865f0415 100644 --- a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl @@ -590,7 +590,7 @@ t_missing_data(Config) -> {ok, _}, create_bridge(Config) ), - %% emqx_bridge_cassandra_impl will send missed data as a `null` atom + %% emqx_bridge_cassandra_connector will send missed data as a `null` atom %% to ecql driver ?check_trace( begin diff --git a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_impl_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl similarity index 98% rename from apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_impl_SUITE.erl rename to apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl index db91ca956..f419283a8 100644 --- a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_impl_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_bridge_cassandra_impl_SUITE). +-module(emqx_bridge_cassandra_connector_SUITE). -compile(nowarn_export_all). -compile(export_all). @@ -16,7 +16,7 @@ %% Cassandra server defined at `.ci/docker-compose-file/docker-compose-cassandra-tcp.yaml` %% You can change it to `127.0.0.1`, if you run this SUITE locally -define(CASSANDRA_HOST, "cassandra"). --define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_impl). +-define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_connector). %% This test SUITE requires a running cassandra instance. If you don't want to %% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl index e8be79cdc..7fdfbba99 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl @@ -75,7 +75,7 @@ resource_type(kafka_consumer) -> emqx_bridge_kafka_impl_consumer; %% TODO: rename this to `kafka_producer' after alias support is added %% to hocon; keeping this as just `kafka' for backwards compatibility. resource_type(kafka) -> emqx_bridge_kafka_impl_producer; -resource_type(cassandra) -> emqx_bridge_cassandra_impl; +resource_type(cassandra) -> emqx_bridge_cassandra_connector; resource_type(hstreamdb) -> emqx_ee_connector_hstreamdb; resource_type(gcp_pubsub) -> emqx_bridge_gcp_pubsub_connector; resource_type(mongodb_rs) -> emqx_ee_connector_mongodb; diff --git a/mix.exs b/mix.exs index 02c239a99..c5d6df804 100644 --- a/mix.exs +++ b/mix.exs @@ -155,7 +155,7 @@ defmodule EMQXUmbrella.MixProject do defp enterprise_umbrella_apps() do MapSet.new([ :emqx_bridge_kafka, - :emqx_bridge_gcp_pubsub + :emqx_bridge_gcp_pubsub, :emqx_bridge_cassandra, :emqx_bridge_clickhouse, :emqx_bridge_dynamo, diff --git a/rel/i18n/emqx_bridge_cassandra_impl.hocon b/rel/i18n/emqx_bridge_cassandra_connector.hocon similarity index 90% rename from rel/i18n/emqx_bridge_cassandra_impl.hocon rename to rel/i18n/emqx_bridge_cassandra_connector.hocon index 03f389edd..b149cce8a 100644 --- a/rel/i18n/emqx_bridge_cassandra_impl.hocon +++ b/rel/i18n/emqx_bridge_cassandra_connector.hocon @@ -1,4 +1,4 @@ -emqx_bridge_cassandra_impl { +emqx_bridge_cassandra_connector { keyspace.desc: """Keyspace name to connect to.""" From 701a1f65f994d6b152c341f235d749b2bdaa32ac Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Thu, 20 Apr 2023 13:57:47 +0200 Subject: [PATCH 100/263] chore: Hide config shared_dispatch_ack_enabled --- apps/emqx/src/emqx_schema.erl | 6 ++---- apps/emqx/src/emqx_shared_sub.erl | 4 ++-- apps/emqx/test/emqx_config_SUITE.erl | 1 - rel/i18n/emqx_schema.hocon | 6 +++--- rel/i18n/zh/emqx_schema.hocon | 5 +++-- 5 files changed, 10 insertions(+), 12 deletions(-) diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index ace6d3332..4c314456b 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -1489,10 +1489,8 @@ fields("broker") -> sc( boolean(), #{ - %% TODO: deprecated => {since, "5.1.0"} - %% in favor of session message re-dispatch at termination - %% we will stop supporting dispatch acks for shared - %% subscriptions. + deprecated => {since, "5.1.0"}, + importance => ?IMPORTANCE_HIDDEN, default => false, desc => ?DESC(broker_shared_dispatch_ack_enabled) } diff --git a/apps/emqx/src/emqx_shared_sub.erl b/apps/emqx/src/emqx_shared_sub.erl index d7dc8c5a6..997364898 100644 --- a/apps/emqx/src/emqx_shared_sub.erl +++ b/apps/emqx/src/emqx_shared_sub.erl @@ -165,7 +165,7 @@ strategy(Group) -> -spec ack_enabled() -> boolean(). ack_enabled() -> - emqx:get_config([broker, shared_dispatch_ack_enabled]). + emqx:get_config([broker, shared_dispatch_ack_enabled], false). do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() -> %% Deadlock otherwise @@ -181,7 +181,7 @@ do_dispatch(SubPid, _Group, Topic, Msg, retry) -> do_dispatch(SubPid, Group, Topic, Msg, fresh) -> case ack_enabled() of true -> - %% FIXME: replace with `emqx_shared_sub_proto:dispatch_with_ack' in 5.2 + %% TODO: delete this clase after 5.1.0 do_dispatch_with_ack(SubPid, Group, Topic, Msg); false -> send(SubPid, Topic, {deliver, Topic, Msg}) diff --git a/apps/emqx/test/emqx_config_SUITE.erl b/apps/emqx/test/emqx_config_SUITE.erl index 7befd7a16..b54f67f07 100644 --- a/apps/emqx/test/emqx_config_SUITE.erl +++ b/apps/emqx/test/emqx_config_SUITE.erl @@ -50,7 +50,6 @@ t_fill_default_values(_) -> }, <<"route_batch_clean">> := false, <<"session_locking_strategy">> := quorum, - <<"shared_dispatch_ack_enabled">> := false, <<"shared_subscription_strategy">> := round_robin } }, diff --git a/rel/i18n/emqx_schema.hocon b/rel/i18n/emqx_schema.hocon index 9c2a1530d..76cce8e78 100644 --- a/rel/i18n/emqx_schema.hocon +++ b/rel/i18n/emqx_schema.hocon @@ -1373,9 +1373,9 @@ persistent_session_store_message_gc_interval.label: """Message GC interval""" broker_shared_dispatch_ack_enabled.desc: -"""Deprecated, will be removed in 5.1. -Enable/disable shared dispatch acknowledgement for QoS 1 and QoS 2 messages. -This should allow messages to be dispatched to a different subscriber in the group in case the picked (based on `shared_subscription_strategy`) subscriber is offline.""" +"""Deprecated. +This was designed to avoid dispatching messages to a shared-subscription session which has the client disconnected. +However it's no longer useful because the shared-subscrption messages in a expired session will be redispatched to other sessions in the group.""" base_listener_enable_authn.desc: """Set true (default) to enable client authentication on this listener, the authentication diff --git a/rel/i18n/zh/emqx_schema.hocon b/rel/i18n/zh/emqx_schema.hocon index 3616abe91..1e42a4589 100644 --- a/rel/i18n/zh/emqx_schema.hocon +++ b/rel/i18n/zh/emqx_schema.hocon @@ -1313,9 +1313,10 @@ persistent_session_store_message_gc_interval.label: """消息清理间隔""" broker_shared_dispatch_ack_enabled.desc: -"""该配置项已废弃,会在 5.1 中移除。 +"""该配置项已废弃。 启用/禁用 QoS 1 和 QoS 2 消息的共享派发确认。 -开启后,允许将消息从未及时回复 ACK 的订阅者 (例如,客户端离线) 重新派发给另外一个订阅者。""" +该配置最初设计用于避免将消息派发给客户端离线状态下的会话中去。 +但新版本中,已做增强:在一个会话结束时,会话中的消息会重新派发到组内的其他会话中 -- 使这个老配置失去存在的意义。""" base_listener_enable_authn.desc: """配置 true (默认值)启用客户端进行身份认证,通过检查认配置的认认证器链来决定是否允许接入。 From 895963c0f6e66c9399182d3ffbee2418fd2fd6aa Mon Sep 17 00:00:00 2001 From: JianBo He Date: Fri, 21 Apr 2023 20:00:26 +0800 Subject: [PATCH 101/263] chore: shorten ct/run.sh script --- scripts/ct/run.sh | 51 ++++++----------------------------------------- 1 file changed, 6 insertions(+), 45 deletions(-) diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index aca83ae86..ab7fff444 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -108,51 +108,12 @@ case "${WHICH_APP}" in ## ensure enterprise profile when testing lib-ee applications export PROFILE='emqx-enterprise' ;; - apps/emqx_bridge_kafka) - ## ensure enterprise profile when testing ee applications - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_cassandra) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_clickhouse) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_dynamo) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_gcp_pubsub) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_hstreamdb) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_influxdb) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_matrix) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_mongodb) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_mysql) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_pgsql) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_redis) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_rocketmq) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_tdengine) - export PROFILE='emqx-enterprise' - ;; - apps/emqx_bridge_timescale) - export PROFILE='emqx-enterprise' + apps/*) + if [[ -f "${WHICH_APP}/BSL.txt" ]]; then + export PROFILE='emqx-enterprise' + else + export PROFILE='emqx' + fi ;; *) export PROFILE="${PROFILE:-emqx}" From ceafc52ad6b3c8ef72abdbe92e960a4c8c1bbd6d Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 18 Apr 2023 19:59:23 +0200 Subject: [PATCH 102/263] refactor: use emqx_utils_ets for ets table creation --- apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl | 2 +- scripts/merge-config.escript | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl index 9d8d1905d..b503fed88 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl @@ -36,7 +36,7 @@ init() -> OtherLangDesc0 = filelib:wildcard("desc.*.hocon", WwwStaticDir), OtherLangDesc = lists:map(fun(F) -> filename:join([WwwStaticDir, F]) end, OtherLangDesc0), Files = [EngDesc | OtherLangDesc], - ?MODULE = ets:new(?MODULE, [named_table, public, set, {read_concurrency, true}]), + ok = emqx_utils_ets:new(?MODULE, [public, ordered_set, {read_concurrency, true}]), ok = lists:foreach(fun(F) -> load_desc(?MODULE, F) end, Files). %% @doc Load the description of the configuration items from the file. diff --git a/scripts/merge-config.escript b/scripts/merge-config.escript index b3c214dd7..14ec979f2 100755 --- a/scripts/merge-config.escript +++ b/scripts/merge-config.escript @@ -110,14 +110,14 @@ merge_desc_files_per_lang(Lang) -> BaseConf = <<"">>, Cfgs0 = get_all_desc_files(Lang), Conf = do_merge_desc_files_per_lang(BaseConf, Cfgs0), - OutputFile = case Lang of + OutputFile = case Lang of "en" -> %% en desc will always be in the priv dir of emqx_dashboard "apps/emqx_dashboard/priv/desc.en.hocon"; "zh" -> %% so far we inject zh desc as if it's extracted from dashboard package %% TODO: remove this when we have zh translation moved to dashboard package - "apps/emqx_dashboard/priv/www/static/desc.zh.hocon" + "apps/emqx_dashboard/priv/www/static/desc.zh.hocon" end, ok = filelib:ensure_dir(OutputFile), ok = file:write_file(OutputFile, Conf). From a6d72b178bb32bd90fdf8931b168db74afbebba3 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 18 Apr 2023 20:04:22 +0200 Subject: [PATCH 103/263] chore: delete old script split-i19n-files.escript is no longer needed --- scripts/split-i18n-files.escript | 84 -------------------------------- 1 file changed, 84 deletions(-) delete mode 100755 scripts/split-i18n-files.escript diff --git a/scripts/split-i18n-files.escript b/scripts/split-i18n-files.escript deleted file mode 100755 index b9f558925..000000000 --- a/scripts/split-i18n-files.escript +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env escript - -%% This script is for one-time use. -%% will be deleted after the migration is done. - --mode(compile). - -main([]) -> - %% we need to parse hocon - %% so we'll just add all compiled libs to path - code:add_pathsz(find_ebin_paths("_build/default/lib/*")), - Files = filelib:wildcard("rel/i18n/*.hocon"), - ok = lists:foreach(fun split_file/1, Files), - ok. - -find_ebin_paths(DirPattern) -> - LibDirs = filelib:wildcard(DirPattern), - lists:filtermap(fun add_ebin/1, LibDirs). - -add_ebin(Dir) -> - EbinDir = filename:join(Dir, "ebin"), - case filelib:is_dir(EbinDir) of - true -> {true, EbinDir}; - false -> false - end. - -split_file(Path) -> - {ok, DescMap} = hocon:load(Path), - [{Module, Descs}] = maps:to_list(DescMap), - try - ok = split(Path, Module, <<"en">>, Descs), - ok = split(Path, Module, <<"zh">>, Descs) - catch - throw : already_done -> - ok - end. - -split(Path, Module, Lang, Fields) when is_map(Fields) -> - split(Path, Module, Lang, maps:to_list(Fields)); -split(Path, Module, Lang, Fields) when is_list(Fields) -> - Split = lists:map(fun({Name, Desc})-> do_split(Path, Name, Lang, Desc) end, Fields), - IoData = [Module, " {\n\n", Split, "}\n"], - %% assert it's a valid HOCON object - {ok, _} = hocon:binary(IoData), - %io:format(user, "~s", [IoData]). - WritePath = case Lang of - <<"en">> -> - Path; - <<"zh">> -> - rename(Path, "zh") - end, - ok = filelib:ensure_dir(WritePath), - ok = file:write_file(WritePath, IoData), - ok. - -rename(FilePath, Lang) -> - Dir = filename:dirname(FilePath), - BaseName = filename:basename(FilePath), - filename:join([Dir, Lang, BaseName]). - -do_split(_Path, _Name, _Lang, #{<<"desc">> := Desc}) when is_binary(Desc) -> - throw(already_done); -do_split(Path, Name, Lang, #{<<"desc">> := Desc} = D) -> - try - Label = maps:get(<<"label">>, D, #{}), - DescL = maps:get(Lang, Desc), - LabelL = maps:get(Lang, Label, undefined), - [fmt([Name, ".desc:\n"], DescL), - fmt([Name, ".label:\n"], LabelL) - ] - catch - C : E : S-> - erlang:raise(C, {Path, Name, E}, S) - end. - - -tq() -> - "\"\"\"". - -fmt(_Key, undefined) -> - []; -fmt(Key, Content) -> - [Key, tq(), Content, tq(), "\n\n"]. - From 81340edbca43f042ba76b9de1f4bf9d45a0ba05e Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Sat, 22 Apr 2023 09:08:07 +0200 Subject: [PATCH 104/263] docs: add changelog --- changes/ce/fix-10462.en.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 changes/ce/fix-10462.en.md diff --git a/changes/ce/fix-10462.en.md b/changes/ce/fix-10462.en.md new file mode 100644 index 000000000..9e7922be2 --- /dev/null +++ b/changes/ce/fix-10462.en.md @@ -0,0 +1,4 @@ +Deprecate config `broker.shared_dispatch_ack_enabled`. +This was designed to avoid dispatching messages to a shared-subscription session which has the client disconnected. +However since v5.0.9, this feature is no longer useful because the shared-subscrption messages in a expired session will be redispatched to other sessions in the group. +See also: https://github.com/emqx/emqx/pull/9104 From 5074825075363047dacba56f11bd46ed166d1f31 Mon Sep 17 00:00:00 2001 From: firest Date: Sun, 23 Apr 2023 09:56:24 +0800 Subject: [PATCH 105/263] feat(opents): OpenTSDB integration --- apps/emqx_bridge_opents/.gitignore | 19 ++ apps/emqx_bridge_opents/BSL.txt | 94 +++++++++ apps/emqx_bridge_opents/README.md | 9 + .../etc/emqx_bridge_opents.conf | 0 apps/emqx_bridge_opents/rebar.config | 8 + .../src/emqx_bridge_opents.app.src | 15 ++ .../src/emqx_bridge_opents.erl | 85 ++++++++ .../emqx_ee_bridge/src/emqx_ee_bridge.app.src | 3 +- lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl | 17 +- .../src/emqx_ee_connector_opents.erl | 182 ++++++++++++++++++ rel/i18n/emqx_bridge_opents.hocon | 26 +++ rel/i18n/emqx_ee_connector_opents.hocon | 20 ++ rel/i18n/zh/emqx_bridge_opents.hocon | 26 +++ rel/i18n/zh/emqx_ee_connector_opents.hocon | 20 ++ 14 files changed, 520 insertions(+), 4 deletions(-) create mode 100644 apps/emqx_bridge_opents/.gitignore create mode 100644 apps/emqx_bridge_opents/BSL.txt create mode 100644 apps/emqx_bridge_opents/README.md create mode 100644 apps/emqx_bridge_opents/etc/emqx_bridge_opents.conf create mode 100644 apps/emqx_bridge_opents/rebar.config create mode 100644 apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src create mode 100644 apps/emqx_bridge_opents/src/emqx_bridge_opents.erl create mode 100644 lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl create mode 100644 rel/i18n/emqx_bridge_opents.hocon create mode 100644 rel/i18n/emqx_ee_connector_opents.hocon create mode 100644 rel/i18n/zh/emqx_bridge_opents.hocon create mode 100644 rel/i18n/zh/emqx_ee_connector_opents.hocon diff --git a/apps/emqx_bridge_opents/.gitignore b/apps/emqx_bridge_opents/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/apps/emqx_bridge_opents/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/apps/emqx_bridge_opents/BSL.txt b/apps/emqx_bridge_opents/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_opents/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_opents/README.md b/apps/emqx_bridge_opents/README.md new file mode 100644 index 000000000..a172cba15 --- /dev/null +++ b/apps/emqx_bridge_opents/README.md @@ -0,0 +1,9 @@ +emqx_bridge_opentsdb +===== + +An OTP application + +Build +----- + + $ rebar3 compile diff --git a/apps/emqx_bridge_opents/etc/emqx_bridge_opents.conf b/apps/emqx_bridge_opents/etc/emqx_bridge_opents.conf new file mode 100644 index 000000000..e69de29bb diff --git a/apps/emqx_bridge_opents/rebar.config b/apps/emqx_bridge_opents/rebar.config new file mode 100644 index 000000000..d7bd4560f --- /dev/null +++ b/apps/emqx_bridge_opents/rebar.config @@ -0,0 +1,8 @@ +{erl_opts, [debug_info]}. + +{deps, [ + {opentsdb, {git, "https://github.com/emqx/opentsdb-client-erl", {tag, "v0.5.1"}}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src b/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src new file mode 100644 index 000000000..d001446b3 --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src @@ -0,0 +1,15 @@ +{application, emqx_bridge_opents, [ + {description, "EMQX Enterprise OpenTSDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + opentsdb + ]}, + {env, []}, + {modules, []}, + + {licenses, ["BSL"]}, + {links, []} +]}. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl new file mode 100644 index 000000000..9001e391c --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl @@ -0,0 +1,85 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_opents). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% api +conn_bridge_examples(Method) -> + [ + #{ + <<"opents">> => #{ + summary => <<"OpenTSDB Bridge">>, + value => values(Method) + } + } + ]. + +values(_Method) -> + #{ + enable => true, + type => opents, + name => <<"foo">>, + server => <<"http://127.0.0.1:4242">>, + pool_size => 8, + resource_opts => #{ + worker_pool_size => 1, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_opents". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + emqx_ee_connector_opents:fields(config); +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for OpenTSDB using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- +%% internal + +type_field() -> + {type, mk(enum([opents]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src index 440889d02..7dc8882b3 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src @@ -8,7 +8,8 @@ emqx_ee_connector, telemetry, emqx_bridge_kafka, - emqx_bridge_gcp_pubsub + emqx_bridge_gcp_pubsub, + emqx_bridge_opents ]}, {env, []}, {modules, []}, diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl index 7fdfbba99..636166d90 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl @@ -35,7 +35,8 @@ api_schemas(Method) -> ref(emqx_ee_bridge_clickhouse, Method), ref(emqx_ee_bridge_dynamo, Method), ref(emqx_ee_bridge_rocketmq, Method), - ref(emqx_ee_bridge_sqlserver, Method) + ref(emqx_ee_bridge_sqlserver, Method), + ref(emqx_bridge_opents, Method) ]. schema_modules() -> @@ -55,7 +56,8 @@ schema_modules() -> emqx_ee_bridge_clickhouse, emqx_ee_bridge_dynamo, emqx_ee_bridge_rocketmq, - emqx_ee_bridge_sqlserver + emqx_ee_bridge_sqlserver, + emqx_bridge_opents ]. examples(Method) -> @@ -94,7 +96,8 @@ resource_type(tdengine) -> emqx_ee_connector_tdengine; resource_type(clickhouse) -> emqx_ee_connector_clickhouse; resource_type(dynamo) -> emqx_ee_connector_dynamo; resource_type(rocketmq) -> emqx_ee_connector_rocketmq; -resource_type(sqlserver) -> emqx_ee_connector_sqlserver. +resource_type(sqlserver) -> emqx_ee_connector_sqlserver; +resource_type(opents) -> emqx_ee_connector_opents. fields(bridges) -> [ @@ -153,6 +156,14 @@ fields(bridges) -> desc => <<"Cassandra Bridge Config">>, required => false } + )}, + {opents, + mk( + hoconsc:map(name, ref(emqx_bridge_opents, "config")), + #{ + desc => <<"OpenTSDB Bridge Config">>, + required => false + } )} ] ++ kafka_structs() ++ mongodb_structs() ++ influxdb_structs() ++ redis_structs() ++ pgsql_structs() ++ clickhouse_structs() ++ sqlserver_structs(). diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl new file mode 100644 index 000000000..457fde0a0 --- /dev/null +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl @@ -0,0 +1,182 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_ee_connector_opents). + +-behaviour(emqx_resource). + +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-export([roots/0, fields/1]). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +-export([connect/1]). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +%%===================================================================== +%% Hocon schema +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + [ + {server, mk(binary(), #{required => true, desc => ?DESC("server")})}, + {pool_size, fun emqx_connector_schema_lib:pool_size/1}, + {summary, mk(boolean(), #{default => true, desc => ?DESC("summary")})}, + {details, mk(boolean(), #{default => false, desc => ?DESC("details")})}, + {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} + ]. + +%%======================================================================================== +%% `emqx_resource' API +%%======================================================================================== + +callback_mode() -> always_sync. + +is_buffer_supported() -> false. + +on_start( + InstanceId, + #{ + server := Server, + pool_size := PoolSize, + summary := Summary, + details := Details, + resource_opts := #{batch_size := BatchSize} + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_opents_connector", + connector => InstanceId, + config => emqx_utils:redact(Config) + }), + + Options = [ + {server, to_str(Server)}, + {summary, Summary}, + {details, Details}, + {max_batch_size, BatchSize}, + {pool_size, PoolSize} + ], + + State = #{poolname => InstanceId, server => Server}, + case opentsdb_connectivity(Server) of + ok -> + case emqx_plugin_libs_pool:start_pool(InstanceId, ?MODULE, Options) of + ok -> + {ok, State}; + Error -> + Error + end; + {error, Reason} = Error -> + ?SLOG(error, #{msg => "Initiate resource failed", reason => Reason}), + Error + end. + +on_stop(InstanceId, #{poolname := PoolName} = _State) -> + ?SLOG(info, #{ + msg => "stopping_opents_connector", + connector => InstanceId + }), + emqx_plugin_libs_pool:stop_pool(PoolName). + +on_query(InstanceId, Request, State) -> + on_batch_query(InstanceId, [Request], State). + +on_batch_query( + InstanceId, + BatchReq, + State +) -> + Datas = [format_opentsdb_msg(Msg) || {_Key, Msg} <- BatchReq], + do_query(InstanceId, Datas, State). + +on_get_status(_InstanceId, #{server := Server}) -> + case opentsdb_connectivity(Server) of + ok -> + connected; + {error, Reason} -> + ?SLOG(error, #{msg => "OpenTSDB lost connection", reason => Reason}), + connecting + end. + +%%======================================================================================== +%% Helper fns +%%======================================================================================== + +do_query(InstanceId, Query, #{poolname := PoolName} = State) -> + ?TRACE( + "QUERY", + "opents_connector_received", + #{connector => InstanceId, query => Query, state => State} + ), + Result = ecpool:pick_and_do(PoolName, {opentsdb, put, [Query]}, no_handover), + + case Result of + {error, Reason} -> + ?tp( + opents_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "opents_connector_do_query_failed", + connector => InstanceId, + query => Query, + reason => Reason + }), + Result; + _ -> + ?tp( + opents_connector_query_return, + #{result => Result} + ), + Result + end. + +connect(Opts) -> + opentsdb:start_link(Opts). + +to_str(List) when is_list(List) -> + List; +to_str(Bin) when is_binary(Bin) -> + erlang:binary_to_list(Bin). + +opentsdb_connectivity(Server) -> + SvrUrl = + case Server of + <<"http://", _/binary>> -> Server; + <<"https://", _/binary>> -> Server; + _ -> "http://" ++ Server + end, + emqx_plugin_libs_rule:http_connectivity(SvrUrl). + +format_opentsdb_msg(Msg) -> + maps:with( + [ + timestamp, + metric, + tags, + value, + <<"timestamp">>, + <<"metric">>, + <<"tags">>, + <<"value">> + ], + Msg + ). diff --git a/rel/i18n/emqx_bridge_opents.hocon b/rel/i18n/emqx_bridge_opents.hocon new file mode 100644 index 000000000..ff44a9e18 --- /dev/null +++ b/rel/i18n/emqx_bridge_opents.hocon @@ -0,0 +1,26 @@ +emqx_bridge_opents { + + config_enable.desc: + """Enable or disable this bridge""" + + config_enable.label: + "Enable Or Disable Bridge" + + desc_config.desc: + """Configuration for an OpenTSDB bridge.""" + + desc_config.label: + "OpenTSDB Bridge Configuration" + + desc_type.desc: + """The Bridge Type""" + + desc_type.label: + "Bridge Type" + + desc_name.desc: + """Bridge name.""" + + desc_name.label: + "Bridge Name" +} diff --git a/rel/i18n/emqx_ee_connector_opents.hocon b/rel/i18n/emqx_ee_connector_opents.hocon new file mode 100644 index 000000000..4e51454c9 --- /dev/null +++ b/rel/i18n/emqx_ee_connector_opents.hocon @@ -0,0 +1,20 @@ +emqx_ee_connector_opents { + + server.desc: + """The URL of OpenTSDB endpoint.""" + + server.label: + "URL" + + summary.desc: + """Whether or not to return summary information.""" + + summary.label: + "Summary" + + details.desc: + """Whether or not to return detailed information.""" + + details.label: + "Details" +} diff --git a/rel/i18n/zh/emqx_bridge_opents.hocon b/rel/i18n/zh/emqx_bridge_opents.hocon new file mode 100644 index 000000000..137e687df --- /dev/null +++ b/rel/i18n/zh/emqx_bridge_opents.hocon @@ -0,0 +1,26 @@ +emqx_bridge_opents { + + config_enable.desc: + """启用/禁用桥接""" + + config_enable.label: + "启用/禁用桥接" + + desc_config.desc: + """OpenTSDB 桥接配置""" + + desc_config.label: + "OpenTSDB 桥接配置" + + desc_type.desc: + """Bridge 类型""" + + desc_type.label: + "桥接类型" + + desc_name.desc: + """桥接名字""" + + desc_name.label: + "桥接名字" +} diff --git a/rel/i18n/zh/emqx_ee_connector_opents.hocon b/rel/i18n/zh/emqx_ee_connector_opents.hocon new file mode 100644 index 000000000..7e58da9bd --- /dev/null +++ b/rel/i18n/zh/emqx_ee_connector_opents.hocon @@ -0,0 +1,20 @@ +emqx_ee_connector_opents { + + server.desc: + """服务器的地址。""" + + server.label: + "服务器地址" + + summary.desc: + """是否返回摘要信息。""" + + summary.label: + "摘要信息" + + details.desc: + """是否返回详细信息。""" + + details.label: + "详细信息" +} From 0b46acda87716f89cff6ee1e5ec273bf6c11873e Mon Sep 17 00:00:00 2001 From: firest Date: Sun, 23 Apr 2023 09:57:47 +0800 Subject: [PATCH 106/263] test(opents): add test cases for OpenTSDB --- .ci/docker-compose-file/.env | 1 + .../docker-compose-opents.yaml | 9 + .../docker-compose-toxiproxy.yaml | 1 + .ci/docker-compose-file/toxiproxy.json | 6 + .github/workflows/run_test_cases.yaml | 1 + apps/emqx_bridge_opents/.gitignore | 19 - apps/emqx_bridge_opents/docker-ct | 2 + .../etc/emqx_bridge_opents.conf | 0 .../test/emqx_bridge_opents_SUITE.erl | 363 ++++++++++++++++++ .../src/emqx_ee_connector_opents.erl | 16 +- mix.exs | 5 +- rebar.config.erl | 2 + ...con => emqx_bridge_opents_connector.hocon} | 2 +- scripts/ct/run.sh | 9 +- scripts/find-apps.sh | 3 + 15 files changed, 410 insertions(+), 29 deletions(-) create mode 100644 .ci/docker-compose-file/docker-compose-opents.yaml delete mode 100644 apps/emqx_bridge_opents/.gitignore create mode 100644 apps/emqx_bridge_opents/docker-ct delete mode 100644 apps/emqx_bridge_opents/etc/emqx_bridge_opents.conf create mode 100644 apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl rename rel/i18n/{emqx_ee_connector_opents.hocon => emqx_bridge_opents_connector.hocon} (91%) diff --git a/.ci/docker-compose-file/.env b/.ci/docker-compose-file/.env index d33637ea0..3b00b454f 100644 --- a/.ci/docker-compose-file/.env +++ b/.ci/docker-compose-file/.env @@ -7,6 +7,7 @@ INFLUXDB_TAG=2.5.0 TDENGINE_TAG=3.0.2.4 DYNAMO_TAG=1.21.0 CASSANDRA_TAG=3.11.6 +OPENTS_TAG=9aa7f88 MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server SQLSERVER_TAG=2019-CU19-ubuntu-20.04 diff --git a/.ci/docker-compose-file/docker-compose-opents.yaml b/.ci/docker-compose-file/docker-compose-opents.yaml new file mode 100644 index 000000000..545aeb015 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-opents.yaml @@ -0,0 +1,9 @@ +version: '3.9' + +services: + opents_server: + container_name: opents + image: petergrace/opentsdb-docker:${OPENTS_TAG} + restart: always + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml index ba5e831a5..a1ae41e2c 100644 --- a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml +++ b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml @@ -26,6 +26,7 @@ services: - 19876:9876 - 19042:9042 - 19142:9142 + - 14242:4242 command: - "-host=0.0.0.0" - "-config=/config/toxiproxy.json" diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json index da2dff763..f6b31da4c 100644 --- a/.ci/docker-compose-file/toxiproxy.json +++ b/.ci/docker-compose-file/toxiproxy.json @@ -101,5 +101,11 @@ "listen": "0.0.0.0:1433", "upstream": "sqlserver:1433", "enabled": true + }, + { + "name": "opents", + "listen": "0.0.0.0:4242", + "upstream": "opents:4242", + "enabled": true } ] diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index fb4f264e7..f7b775f08 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -168,6 +168,7 @@ jobs: REDIS_TAG: "7.0" INFLUXDB_TAG: "2.5.0" TDENGINE_TAG: "3.0.2.4" + OPENTS_TAG: "9aa7f88" PROFILE: ${{ matrix.profile }} CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }} diff --git a/apps/emqx_bridge_opents/.gitignore b/apps/emqx_bridge_opents/.gitignore deleted file mode 100644 index f1c455451..000000000 --- a/apps/emqx_bridge_opents/.gitignore +++ /dev/null @@ -1,19 +0,0 @@ -.rebar3 -_* -.eunit -*.o -*.beam -*.plt -*.swp -*.swo -.erlang.cookie -ebin -log -erl_crash.dump -.rebar -logs -_build -.idea -*.iml -rebar3.crashdump -*~ diff --git a/apps/emqx_bridge_opents/docker-ct b/apps/emqx_bridge_opents/docker-ct new file mode 100644 index 000000000..fc68b978e --- /dev/null +++ b/apps/emqx_bridge_opents/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +opents diff --git a/apps/emqx_bridge_opents/etc/emqx_bridge_opents.conf b/apps/emqx_bridge_opents/etc/emqx_bridge_opents.conf deleted file mode 100644 index e69de29bb..000000000 diff --git a/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl b/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl new file mode 100644 index 000000000..6f444b93e --- /dev/null +++ b/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl @@ -0,0 +1,363 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_opents_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% DB defaults +-define(BATCH_SIZE, 10). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, with_batch}, + {group, without_batch} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + [ + {with_batch, TCs}, + {without_batch, TCs} + ]. + +init_per_group(with_batch, Config0) -> + Config = [{batch_size, ?BATCH_SIZE} | Config0], + common_init(Config); +init_per_group(without_batch, Config0) -> + Config = [{batch_size, 1} | Config0], + common_init(Config); +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + delete_bridge(Config), + snabbkaffe:start_trace(), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = snabbkaffe:stop(), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(ConfigT) -> + Host = os:getenv("OPENTS_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("OPENTS_PORT", "4242")), + + Config0 = [ + {opents_host, Host}, + {opents_port, Port}, + {proxy_name, "opents"} + | ConfigT + ], + + BridgeType = proplists:get_value(bridge_type, Config0, <<"opents">>), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + {Name, OpenTSConf} = opents_config(BridgeType, Config0), + Config = + [ + {opents_config, OpenTSConf}, + {opents_bridge_type, BridgeType}, + {opents_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_opents); + _ -> + {skip, no_opents} + end + end. + +opents_config(BridgeType, Config) -> + Port = integer_to_list(?config(opents_port, Config)), + Server = "http://" ++ ?config(opents_host, Config) ++ ":" ++ Port, + Name = atom_to_binary(?MODULE), + BatchSize = ?config(batch_size, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " server = ~p\n" + " resource_opts = {\n" + " request_timeout = 500ms\n" + " batch_size = ~b\n" + " query_mode = sync\n" + " }\n" + "}", + [ + BridgeType, + Name, + Server, + BatchSize + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + Config0 = ?config(opents_config, Config), + Config1 = emqx_utils_maps:deep_merge(Config0, Overrides), + emqx_bridge:create(BridgeType, Name, Config1). + +delete_bridge(Config) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + emqx_bridge:remove(BridgeType, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +query_resource(Config, Request) -> + query_resource(Config, Request, 1_000). + +query_resource(Config, Request, Timeout) -> + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + emqx_resource:query(ResourceID, Request, #{timeout => Timeout}). + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + SentData = make_data(), + ?check_trace( + begin + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + {ok, 200, #{failed := 0, success := 1}}, Result + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(opents_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace), + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + OpentsConfig0 = ?config(opents_config, Config), + OpentsConfig = OpentsConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(OpentsConfig) + ), + SentData = make_data(), + ?check_trace( + begin + Request = {send_message, SentData}, + Res0 = query_resource(Config, Request, 2_500), + ?assertMatch( + {ok, 200, #{failed := 0, success := 1}}, Res0 + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(opents_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace), + ok + end + ), + ok. + +t_get_status(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + ok. + +t_create_disconnected(Config) -> + BridgeType = proplists:get_value(bridge_type, Config, <<"opents">>), + Config1 = lists:keyreplace(opents_port, 1, Config, {opents_port, 61234}), + {_Name, OpenTSConf} = opents_config(BridgeType, Config1), + + Config2 = lists:keyreplace(opents_config, 1, Config1, {opents_config, OpenTSConf}), + ?assertMatch({ok, _}, create_bridge(Config2)), + + Name = ?config(opents_name, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceID)), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge(Config), + SentData = make_data(), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch({error, _}, Result), + ok + end), + ok. + +t_write_timeout(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge( + Config, + #{ + <<"resource_opts">> => #{ + <<"request_timeout">> => 500, + <<"resume_interval">> => 100, + <<"health_check_interval">> => 100 + } + } + ), + SentData = make_data(), + emqx_common_test_helpers:with_failure( + timeout, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + query_resource(Config, {send_message, SentData}) + ) + end + ), + ok. + +t_missing_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, #{}), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + {error, {400, #{failed := 1, success := 0}}}, + Result + ), + ok. + +t_bad_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Data = maps:without([metric], make_data()), + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, Data), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + + ?assertMatch( + {error, {400, #{failed := 1, success := 0}}}, Result + ), + ok. + +make_data() -> + make_data(<<"cpu">>, 12). + +make_data(Metric, Value) -> + #{ + metric => Metric, + tags => #{ + <<"host">> => <<"serverA">> + }, + value => Value + }. diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl index 457fde0a0..dfc960493 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl @@ -108,13 +108,15 @@ on_batch_query( do_query(InstanceId, Datas, State). on_get_status(_InstanceId, #{server := Server}) -> - case opentsdb_connectivity(Server) of - ok -> - connected; - {error, Reason} -> - ?SLOG(error, #{msg => "OpenTSDB lost connection", reason => Reason}), - connecting - end. + Result = + case opentsdb_connectivity(Server) of + ok -> + connected; + {error, Reason} -> + ?SLOG(error, #{msg => "OpenTSDB lost connection", reason => Reason}), + connecting + end, + Result. %%======================================================================================== %% Helper fns diff --git a/mix.exs b/mix.exs index c5d6df804..e2230d55d 100644 --- a/mix.exs +++ b/mix.exs @@ -157,6 +157,7 @@ defmodule EMQXUmbrella.MixProject do :emqx_bridge_kafka, :emqx_bridge_gcp_pubsub, :emqx_bridge_cassandra, + :emqx_bridge_opents, :emqx_bridge_clickhouse, :emqx_bridge_dynamo, :emqx_bridge_hstreamdb, @@ -182,7 +183,8 @@ defmodule EMQXUmbrella.MixProject do {:brod, github: "kafka4beam/brod", tag: "3.16.8"}, {:snappyer, "1.2.8", override: true}, {:crc32cer, "0.1.8", override: true}, - {:supervisor3, "1.1.12", override: true} + {:supervisor3, "1.1.12", override: true}, + {:opentsdb, github: "emqx/opentsdb-client-erl", tag: "v0.5.1", override: true} ] end @@ -360,6 +362,7 @@ defmodule EMQXUmbrella.MixProject do emqx_bridge_kafka: :permanent, emqx_bridge_gcp_pubsub: :permanent, emqx_bridge_cassandra: :permanent, + emqx_bridge_opents: :permanent, emqx_bridge_clickhouse: :permanent, emqx_bridge_dynamo: :permanent, emqx_bridge_hstreamdb: :permanent, diff --git a/rebar.config.erl b/rebar.config.erl index 88471c39d..3c863046f 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -81,6 +81,7 @@ is_enterprise(ee) -> true. is_community_umbrella_app("apps/emqx_bridge_kafka") -> false; is_community_umbrella_app("apps/emqx_bridge_gcp_pubsub") -> false; is_community_umbrella_app("apps/emqx_bridge_cassandra") -> false; +is_community_umbrella_app("apps/emqx_bridge_opents") -> false; is_community_umbrella_app("apps/emqx_bridge_clickhouse") -> false; is_community_umbrella_app("apps/emqx_bridge_dynamo") -> false; is_community_umbrella_app("apps/emqx_bridge_hstreamdb") -> false; @@ -455,6 +456,7 @@ relx_apps_per_edition(ee) -> emqx_bridge_kafka, emqx_bridge_gcp_pubsub, emqx_bridge_cassandra, + emqx_bridge_opents, emqx_bridge_clickhouse, emqx_bridge_dynamo, emqx_bridge_hstreamdb, diff --git a/rel/i18n/emqx_ee_connector_opents.hocon b/rel/i18n/emqx_bridge_opents_connector.hocon similarity index 91% rename from rel/i18n/emqx_ee_connector_opents.hocon rename to rel/i18n/emqx_bridge_opents_connector.hocon index 4e51454c9..cd82809d2 100644 --- a/rel/i18n/emqx_ee_connector_opents.hocon +++ b/rel/i18n/emqx_bridge_opents_connector.hocon @@ -1,4 +1,4 @@ -emqx_ee_connector_opents { +emqx_bridge_opents_connector { server.desc: """The URL of OpenTSDB endpoint.""" diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index ab7fff444..c1a01a593 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -115,7 +115,11 @@ case "${WHICH_APP}" in export PROFILE='emqx' fi ;; - *) + apps/emqx_bridge_opents) + ## ensure enterprise profile when testing ee applications + export PROFILE='emqx-enterprise' + ;; + *) export PROFILE="${PROFILE:-emqx}" ;; esac @@ -188,6 +192,9 @@ for dep in ${CT_DEPS}; do ODBC_REQUEST='yes' FILES+=( '.ci/docker-compose-file/docker-compose-sqlserver.yaml' ) ;; + opents) + FILES+=( '.ci/docker-compose-file/docker-compose-opents.yaml' ) + ;; *) echo "unknown_ct_dependency $dep" exit 1 diff --git a/scripts/find-apps.sh b/scripts/find-apps.sh index bfb6ba2cc..64d28529f 100755 --- a/scripts/find-apps.sh +++ b/scripts/find-apps.sh @@ -72,6 +72,9 @@ describe_app() { runner="docker" fi case "${app}" in + apps/emqx_bridge_opents) + profile='emqx-enterprise' + ;; apps/*) if [[ -f "${app}/BSL.txt" ]]; then profile='emqx-enterprise' From 540518eac308608a310f89782c4e1ca83852af98 Mon Sep 17 00:00:00 2001 From: firest Date: Tue, 18 Apr 2023 10:24:39 +0800 Subject: [PATCH 107/263] chore: add README for OpenTSDB bridge --- apps/emqx_bridge_opents/README.md | 39 ++++++++++++++++++++++++++----- scripts/ct/run.sh | 6 +---- scripts/find-apps.sh | 3 --- 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/apps/emqx_bridge_opents/README.md b/apps/emqx_bridge_opents/README.md index a172cba15..a1d6511ee 100644 --- a/apps/emqx_bridge_opents/README.md +++ b/apps/emqx_bridge_opents/README.md @@ -1,9 +1,36 @@ -emqx_bridge_opentsdb -===== +# EMQX OpenTSDB Bridge -An OTP application +[OpenTSDB](http://opentsdb.net) is a distributed, scalable Time Series Database (TSDB) written on top of HBase. -Build ------ +OpenTSDB was written to address a common need: store, index and serve metrics collected from computer systems (network gear, operating systems, applications) at a large scale, and make this data easily accessible and graphable. - $ rebar3 compile +OpenTSDB allows you to collect thousands of metrics from tens of thousands of hosts and applications, at a high rate (every few seconds). + +OpenTSDB will never delete or downsample data and can easily store hundreds of billions of data points. + +The application is used to connect EMQX and OpenTSDB. User can create a rule and easily ingest IoT data into OpenTSDB by leveraging the +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index c1a01a593..c153669f4 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -115,11 +115,7 @@ case "${WHICH_APP}" in export PROFILE='emqx' fi ;; - apps/emqx_bridge_opents) - ## ensure enterprise profile when testing ee applications - export PROFILE='emqx-enterprise' - ;; - *) + *) export PROFILE="${PROFILE:-emqx}" ;; esac diff --git a/scripts/find-apps.sh b/scripts/find-apps.sh index 64d28529f..bfb6ba2cc 100755 --- a/scripts/find-apps.sh +++ b/scripts/find-apps.sh @@ -72,9 +72,6 @@ describe_app() { runner="docker" fi case "${app}" in - apps/emqx_bridge_opents) - profile='emqx-enterprise' - ;; apps/*) if [[ -f "${app}/BSL.txt" ]]; then profile='emqx-enterprise' From 6631fb7457efd61697656d4549886ca9fc4d4287 Mon Sep 17 00:00:00 2001 From: firest Date: Tue, 18 Apr 2023 10:41:30 +0800 Subject: [PATCH 108/263] chore: update changes --- changes/ee/feat-10425.en.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/ee/feat-10425.en.md diff --git a/changes/ee/feat-10425.en.md b/changes/ee/feat-10425.en.md new file mode 100644 index 000000000..7144241df --- /dev/null +++ b/changes/ee/feat-10425.en.md @@ -0,0 +1 @@ +Implement OpenTSDB data bridge. From 932a327952d7fbaeeaecc89a10bc10440940187b Mon Sep 17 00:00:00 2001 From: firest Date: Thu, 20 Apr 2023 15:11:37 +0800 Subject: [PATCH 109/263] chore: make spellcheck and xref happy --- .../emqx_ee_connector/src/emqx_ee_connector_opents.erl | 10 +++++----- rel/i18n/emqx_bridge_opents_connector.hocon | 4 ++-- scripts/spellcheck/dicts/emqx.txt | 1 + 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl index dfc960493..633e120bd 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl @@ -75,10 +75,10 @@ on_start( {pool_size, PoolSize} ], - State = #{poolname => InstanceId, server => Server}, + State = #{pool_name => InstanceId, server => Server}, case opentsdb_connectivity(Server) of ok -> - case emqx_plugin_libs_pool:start_pool(InstanceId, ?MODULE, Options) of + case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of ok -> {ok, State}; Error -> @@ -89,12 +89,12 @@ on_start( Error end. -on_stop(InstanceId, #{poolname := PoolName} = _State) -> +on_stop(InstanceId, #{pool_name := PoolName} = _State) -> ?SLOG(info, #{ msg => "stopping_opents_connector", connector => InstanceId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query(InstanceId, Request, State) -> on_batch_query(InstanceId, [Request], State). @@ -122,7 +122,7 @@ on_get_status(_InstanceId, #{server := Server}) -> %% Helper fns %%======================================================================================== -do_query(InstanceId, Query, #{poolname := PoolName} = State) -> +do_query(InstanceId, Query, #{pool_name := PoolName} = State) -> ?TRACE( "QUERY", "opents_connector_received", diff --git a/rel/i18n/emqx_bridge_opents_connector.hocon b/rel/i18n/emqx_bridge_opents_connector.hocon index cd82809d2..5c39d1e0e 100644 --- a/rel/i18n/emqx_bridge_opents_connector.hocon +++ b/rel/i18n/emqx_bridge_opents_connector.hocon @@ -7,13 +7,13 @@ emqx_bridge_opents_connector { "URL" summary.desc: - """Whether or not to return summary information.""" + """Whether to return summary information.""" summary.label: "Summary" details.desc: - """Whether or not to return detailed information.""" + """Whether to return detailed information.""" details.label: "Details" diff --git a/scripts/spellcheck/dicts/emqx.txt b/scripts/spellcheck/dicts/emqx.txt index 168275e1e..a9afcf6ca 100644 --- a/scripts/spellcheck/dicts/emqx.txt +++ b/scripts/spellcheck/dicts/emqx.txt @@ -274,3 +274,4 @@ clickhouse FormatType RocketMQ Keyspace +OpenTSDB From 6e1d6f1991d4115990bdcb741adf8c7c50d0cb64 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Sun, 23 Apr 2023 11:27:59 +0800 Subject: [PATCH 110/263] chore: hide bad links in README files --- apps/emqx_bridge_cassandra/README.md | 2 ++ apps/emqx_bridge_hstreamdb/README.md | 2 ++ apps/emqx_bridge_matrix/README.md | 2 ++ apps/emqx_bridge_timescale/README.md | 2 ++ 4 files changed, 8 insertions(+) diff --git a/apps/emqx_bridge_cassandra/README.md b/apps/emqx_bridge_cassandra/README.md index d26bd2fbb..c5a2609a5 100644 --- a/apps/emqx_bridge_cassandra/README.md +++ b/apps/emqx_bridge_cassandra/README.md @@ -11,6 +11,7 @@ The application is used to connect EMQX and Cassandra. User can create a rule and easily ingest IoT data into Cassandra by leveraging [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + # HTTP APIs diff --git a/apps/emqx_bridge_hstreamdb/README.md b/apps/emqx_bridge_hstreamdb/README.md index 3a7c6b49d..520817e82 100644 --- a/apps/emqx_bridge_hstreamdb/README.md +++ b/apps/emqx_bridge_hstreamdb/README.md @@ -9,6 +9,7 @@ The application is used to connect EMQX and HStreamDB. User can create a rule and easily ingest IoT data into HStreamDB by leveraging [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + # HTTP APIs diff --git a/apps/emqx_bridge_matrix/README.md b/apps/emqx_bridge_matrix/README.md index 976120ffe..339eb0605 100644 --- a/apps/emqx_bridge_matrix/README.md +++ b/apps/emqx_bridge_matrix/README.md @@ -7,6 +7,7 @@ The application is used to connect EMQX and MatrixDB. User can create a rule and easily ingest IoT data into MatrixDB by leveraging [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + # HTTP APIs diff --git a/apps/emqx_bridge_timescale/README.md b/apps/emqx_bridge_timescale/README.md index 96f70f847..071cb0fa6 100644 --- a/apps/emqx_bridge_timescale/README.md +++ b/apps/emqx_bridge_timescale/README.md @@ -9,6 +9,7 @@ The application is used to connect EMQX and TimescaleDB. User can create a rule and easily ingest IoT data into TimescaleDB by leveraging [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + # HTTP APIs From 5ad5d7ee8dc60c9ed3c9812a9261e83d6b5ce63e Mon Sep 17 00:00:00 2001 From: firest Date: Fri, 21 Apr 2023 18:32:14 +0800 Subject: [PATCH 111/263] fix(opents): adjust code structure --- apps/emqx_bridge_opents/src/emqx_bridge_opents.erl | 2 +- .../emqx_bridge_opents/src/emqx_bridge_opents_connector.erl | 2 +- lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl | 2 +- ...onnector_opents.hocon => emqx_bridge_opents_connector.hocon} | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) rename lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl => apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl (99%) rename rel/i18n/zh/{emqx_ee_connector_opents.hocon => emqx_bridge_opents_connector.hocon} (90%) diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl index 9001e391c..2eb6a554f 100644 --- a/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl @@ -60,7 +60,7 @@ fields("config") -> [ {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})} ] ++ emqx_resource_schema:fields("resource_opts") ++ - emqx_ee_connector_opents:fields(config); + emqx_bridge_opents_connector:fields(config); fields("post") -> [type_field(), name_field() | fields("config")]; fields("put") -> diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl similarity index 99% rename from lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl rename to apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl index 633e120bd..0366c9dc2 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_opents.erl +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_connector_opents). +-module(emqx_bridge_opents_connector). -behaviour(emqx_resource). diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl index 636166d90..4b83fda3f 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl @@ -97,7 +97,7 @@ resource_type(clickhouse) -> emqx_ee_connector_clickhouse; resource_type(dynamo) -> emqx_ee_connector_dynamo; resource_type(rocketmq) -> emqx_ee_connector_rocketmq; resource_type(sqlserver) -> emqx_ee_connector_sqlserver; -resource_type(opents) -> emqx_ee_connector_opents. +resource_type(opents) -> emqx_bridge_opents_connector. fields(bridges) -> [ diff --git a/rel/i18n/zh/emqx_ee_connector_opents.hocon b/rel/i18n/zh/emqx_bridge_opents_connector.hocon similarity index 90% rename from rel/i18n/zh/emqx_ee_connector_opents.hocon rename to rel/i18n/zh/emqx_bridge_opents_connector.hocon index 7e58da9bd..f8a80b10e 100644 --- a/rel/i18n/zh/emqx_ee_connector_opents.hocon +++ b/rel/i18n/zh/emqx_bridge_opents_connector.hocon @@ -1,4 +1,4 @@ -emqx_ee_connector_opents { +emqx_bridge_opents_connector { server.desc: """服务器的地址。""" From 7af9c18caa391aa14d4886f7b807bec0d757ca0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9F=90=E6=96=87?= Date: Sun, 23 Apr 2023 15:43:18 +0800 Subject: [PATCH 112/263] fix: copy cluster-override.conf from old version --- apps/emqx_conf/src/emqx_conf.app.src | 2 +- apps/emqx_conf/src/emqx_conf_app.erl | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index 234690374..03cd36522 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,6 +1,6 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.17"}, + {vsn, "0.1.18"}, {registered, []}, {mod, {emqx_conf_app, []}}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/apps/emqx_conf/src/emqx_conf_app.erl b/apps/emqx_conf/src/emqx_conf_app.erl index 35a79ea6e..9d6bb35d7 100644 --- a/apps/emqx_conf/src/emqx_conf_app.erl +++ b/apps/emqx_conf/src/emqx_conf_app.erl @@ -175,7 +175,7 @@ copy_override_conf_from_core_node() -> _ -> [{ok, Info} | _] = lists:sort(fun conf_sort/2, Ready), #{node := Node, conf := RawOverrideConf, tnx_id := TnxId} = Info, - HasDeprecatedFile = maps:get(has_deprecated_file, Info, false), + HasDeprecatedFile = has_deprecated_file(Info), ?SLOG(debug, #{ msg => "copy_cluster_conf_from_core_node_success", node => Node, @@ -227,3 +227,16 @@ sync_data_from_node(Node) -> ?SLOG(emergency, #{node => Node, msg => "sync_data_from_node_failed", reason => Error}), error(Error) end. + +has_deprecated_file(#{node := Node} = Info) -> + case maps:find(has_deprecated_file, Info) of + {ok, HasDeprecatedFile} -> + HasDeprecatedFile; + error -> + %% The old version don't have emqx_config:has_deprecated_file/0 + Timeout = 5000, + {ok, File} = rpc:call( + Node, application, get_env, [emqx, cluster_override_conf_file], Timeout + ), + rpc:call(Node, filelib, is_regular, [File], Timeout) + end. From 38cebf2fdc5efadda7a6c46a9bf8cb58d0bfb46b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9F=90=E6=96=87?= Date: Sun, 23 Apr 2023 15:53:17 +0800 Subject: [PATCH 113/263] chore: add changelog for 10484 --- changes/ce/fix-10484.en.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 changes/ce/fix-10484.en.md diff --git a/changes/ce/fix-10484.en.md b/changes/ce/fix-10484.en.md new file mode 100644 index 000000000..d1a501384 --- /dev/null +++ b/changes/ce/fix-10484.en.md @@ -0,0 +1,3 @@ +Fix the issue that the priority of the configuration cannot be set during rolling upgrade. +For example, when authorization is modified in v5.0.21 and then upgraded v5.0.23 through rolling upgrade, +the authorization will be restored to the default. From 6dcecfed40b55a3b9f21323aca4561d72ca9db91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9F=90=E6=96=87?= Date: Sun, 23 Apr 2023 17:20:54 +0800 Subject: [PATCH 114/263] chore: make static_check happy --- apps/emqx/priv/bpapi.versions | 1 + apps/emqx_conf/src/emqx_conf_app.erl | 8 +- .../src/proto/emqx_conf_proto_v2.erl | 4 + .../src/proto/emqx_conf_proto_v3.erl | 114 ++++++++++++++++++ 4 files changed, 122 insertions(+), 5 deletions(-) create mode 100644 apps/emqx_conf/src/proto/emqx_conf_proto_v3.erl diff --git a/apps/emqx/priv/bpapi.versions b/apps/emqx/priv/bpapi.versions index db4765e3f..11bd4aa77 100644 --- a/apps/emqx/priv/bpapi.versions +++ b/apps/emqx/priv/bpapi.versions @@ -11,6 +11,7 @@ {emqx_cm,1}. {emqx_conf,1}. {emqx_conf,2}. +{emqx_conf,3}. {emqx_dashboard,1}. {emqx_delayed,1}. {emqx_exhook,1}. diff --git a/apps/emqx_conf/src/emqx_conf_app.erl b/apps/emqx_conf/src/emqx_conf_app.erl index 9d6bb35d7..fd0a56853 100644 --- a/apps/emqx_conf/src/emqx_conf_app.erl +++ b/apps/emqx_conf/src/emqx_conf_app.erl @@ -234,9 +234,7 @@ has_deprecated_file(#{node := Node} = Info) -> HasDeprecatedFile; error -> %% The old version don't have emqx_config:has_deprecated_file/0 - Timeout = 5000, - {ok, File} = rpc:call( - Node, application, get_env, [emqx, cluster_override_conf_file], Timeout - ), - rpc:call(Node, filelib, is_regular, [File], Timeout) + DataDir = emqx_conf_proto_v2:get_config(Node, [node, data_dir]), + File = filename:join([DataDir, "configs", "cluster-override.conf"]), + emqx_conf_proto_v3:file_exist(Node, File) end. diff --git a/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl b/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl index 97446ee9f..3bcf532f6 100644 --- a/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl +++ b/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl @@ -20,6 +20,7 @@ -export([ introduced_in/0, + deprecated_since/0, sync_data_from_node/1, get_config/2, get_config/3, @@ -41,6 +42,9 @@ introduced_in() -> "5.0.1". +deprecated_since() -> + "5.0.23". + -spec sync_data_from_node(node()) -> {ok, binary()} | emqx_rpc:badrpc(). sync_data_from_node(Node) -> rpc:call(Node, emqx_conf_app, sync_data_from_node, [], 20000). diff --git a/apps/emqx_conf/src/proto/emqx_conf_proto_v3.erl b/apps/emqx_conf/src/proto/emqx_conf_proto_v3.erl new file mode 100644 index 000000000..802436f98 --- /dev/null +++ b/apps/emqx_conf/src/proto/emqx_conf_proto_v3.erl @@ -0,0 +1,114 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_conf_proto_v3). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + sync_data_from_node/1, + get_config/2, + get_config/3, + get_all/1, + + update/3, + update/4, + remove_config/2, + remove_config/3, + + reset/2, + reset/3, + + get_override_config_file/1, + file_exist/2 +]). + +-include_lib("emqx/include/bpapi.hrl"). + +introduced_in() -> + "5.0.24". + +-spec sync_data_from_node(node()) -> {ok, binary()} | emqx_rpc:badrpc(). +sync_data_from_node(Node) -> + rpc:call(Node, emqx_conf_app, sync_data_from_node, [], 20000). +-type update_config_key_path() :: [emqx_utils_maps:config_key(), ...]. + +-spec get_config(node(), emqx_utils_maps:config_key_path()) -> + term() | emqx_rpc:badrpc(). +get_config(Node, KeyPath) -> + rpc:call(Node, emqx, get_config, [KeyPath]). + +-spec get_config(node(), emqx_utils_maps:config_key_path(), _Default) -> + term() | emqx_rpc:badrpc(). +get_config(Node, KeyPath, Default) -> + rpc:call(Node, emqx, get_config, [KeyPath, Default]). + +-spec get_all(emqx_utils_maps:config_key_path()) -> emqx_rpc:multicall_result(). +get_all(KeyPath) -> + rpc:multicall(emqx_conf, get_node_and_config, [KeyPath], 5000). + +-spec update( + update_config_key_path(), + emqx_config:update_request(), + emqx_config:update_opts() +) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. +update(KeyPath, UpdateReq, Opts) -> + emqx_cluster_rpc:multicall(emqx, update_config, [KeyPath, UpdateReq, Opts]). + +-spec update( + node(), + update_config_key_path(), + emqx_config:update_request(), + emqx_config:update_opts() +) -> + {ok, emqx_config:update_result()} + | {error, emqx_config:update_error()} + | emqx_rpc:badrpc(). +update(Node, KeyPath, UpdateReq, Opts) -> + rpc:call(Node, emqx, update_config, [KeyPath, UpdateReq, Opts], 5000). + +-spec remove_config(update_config_key_path(), emqx_config:update_opts()) -> + {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. +remove_config(KeyPath, Opts) -> + emqx_cluster_rpc:multicall(emqx, remove_config, [KeyPath, Opts]). + +-spec remove_config(node(), update_config_key_path(), emqx_config:update_opts()) -> + {ok, emqx_config:update_result()} + | {error, emqx_config:update_error()} + | emqx_rpc:badrpc(). +remove_config(Node, KeyPath, Opts) -> + rpc:call(Node, emqx, remove_config, [KeyPath, Opts], 5000). + +-spec reset(update_config_key_path(), emqx_config:update_opts()) -> + {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. +reset(KeyPath, Opts) -> + emqx_cluster_rpc:multicall(emqx, reset_config, [KeyPath, Opts]). + +-spec reset(node(), update_config_key_path(), emqx_config:update_opts()) -> + {ok, emqx_config:update_result()} + | {error, emqx_config:update_error()} + | emqx_rpc:badrpc(). +reset(Node, KeyPath, Opts) -> + rpc:call(Node, emqx, reset_config, [KeyPath, Opts]). + +-spec get_override_config_file([node()]) -> emqx_rpc:multicall_result(). +get_override_config_file(Nodes) -> + rpc:multicall(Nodes, emqx_conf_app, get_override_config_file, [], 20000). + +-spec file_exist(node(), string()) -> emqx_rpc:badrpc() | boolean(). +file_exist(Node, File) -> + rpc:call(Node, filelib, is_regular, [File], 5000). From c2e35a42b0102db0c0053289b3129b7e05e065d8 Mon Sep 17 00:00:00 2001 From: firest Date: Sun, 23 Apr 2023 17:47:00 +0800 Subject: [PATCH 115/263] fix(limiter): optimize the instance of limiter We can reduce a limiter container with all types are `infinity` to just a `infinity` atom --- apps/emqx/src/emqx_channel.erl | 4 +- apps/emqx/src/emqx_connection.erl | 86 ++++++++++--------- .../src/emqx_limiter_container.erl | 45 +++++++--- apps/emqx/src/emqx_ws_connection.erl | 85 +++++++++--------- apps/emqx/test/emqx_connection_SUITE.erl | 30 ++++--- apps/emqx/test/emqx_ws_connection_SUITE.erl | 7 +- 6 files changed, 153 insertions(+), 104 deletions(-) diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index 8a936067e..862b72c06 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -89,7 +89,7 @@ %% Authentication Data Cache auth_cache :: maybe(map()), %% Quota checkers - quota :: maybe(emqx_limiter_container:limiter()), + quota :: emqx_limiter_container:limiter(), %% Timers timers :: #{atom() => disabled | maybe(reference())}, %% Conn State @@ -760,7 +760,7 @@ do_publish( handle_out(disconnect, RC, Channel) end. -ensure_quota(_, Channel = #channel{quota = undefined}) -> +ensure_quota(_, Channel = #channel{quota = infinity}) -> Channel; ensure_quota(PubRes, Channel = #channel{quota = Limiter}) -> Cnt = lists:foldl( diff --git a/apps/emqx/src/emqx_connection.erl b/apps/emqx/src/emqx_connection.erl index 27b6f3e84..79654e510 100644 --- a/apps/emqx/src/emqx_connection.erl +++ b/apps/emqx/src/emqx_connection.erl @@ -111,7 +111,7 @@ listener :: {Type :: atom(), Name :: atom()}, %% Limiter - limiter :: maybe(limiter()), + limiter :: limiter(), %% limiter buffer for overload use limiter_buffer :: queue:queue(pending_req()), @@ -974,55 +974,61 @@ handle_cast(Req, State) -> list(any()), state() ) -> _. + +check_limiter( + _Needs, + Data, + WhenOk, + Msgs, + #state{limiter = infinity} = State +) -> + WhenOk(Data, Msgs, State); check_limiter( Needs, Data, WhenOk, Msgs, - #state{ - limiter = Limiter, - limiter_timer = LimiterTimer, - limiter_buffer = Cache - } = State -) when Limiter =/= undefined -> - case LimiterTimer of - undefined -> - case emqx_limiter_container:check_list(Needs, Limiter) of - {ok, Limiter2} -> - WhenOk(Data, Msgs, State#state{limiter = Limiter2}); - {pause, Time, Limiter2} -> - ?SLOG(debug, #{ - msg => "pause_time_dueto_rate_limit", - needs => Needs, - time_in_ms => Time - }), + #state{limiter_timer = undefined, limiter = Limiter} = State +) -> + case emqx_limiter_container:check_list(Needs, Limiter) of + {ok, Limiter2} -> + WhenOk(Data, Msgs, State#state{limiter = Limiter2}); + {pause, Time, Limiter2} -> + ?SLOG(debug, #{ + msg => "pause_time_dueto_rate_limit", + needs => Needs, + time_in_ms => Time + }), - Retry = #retry{ - types = [Type || {_, Type} <- Needs], - data = Data, - next = WhenOk - }, + Retry = #retry{ + types = [Type || {_, Type} <- Needs], + data = Data, + next = WhenOk + }, - Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), + Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), - TRef = start_timer(Time, limit_timeout), + TRef = start_timer(Time, limit_timeout), - {ok, State#state{ - limiter = Limiter3, - limiter_timer = TRef - }}; - {drop, Limiter2} -> - {ok, State#state{limiter = Limiter2}} - end; - _ -> - %% if there has a retry timer, - %% cache the operation and execute it after the retry is over - %% the maximum length of the cache queue is equal to the active_n - New = #pending_req{need = Needs, data = Data, next = WhenOk}, - {ok, State#state{limiter_buffer = queue:in(New, Cache)}} + {ok, State#state{ + limiter = Limiter3, + limiter_timer = TRef + }}; + {drop, Limiter2} -> + {ok, State#state{limiter = Limiter2}} end; -check_limiter(_, Data, WhenOk, Msgs, State) -> - WhenOk(Data, Msgs, State). +check_limiter( + Needs, + Data, + WhenOk, + _Msgs, + #state{limiter_buffer = Cache} = State +) -> + %% if there has a retry timer, + %% cache the operation and execute it after the retry is over + %% the maximum length of the cache queue is equal to the active_n + New = #pending_req{need = Needs, data = Data, next = WhenOk}, + {ok, State#state{limiter_buffer = queue:in(New, Cache)}}. %% try to perform a retry -spec retry_limiter(state()) -> _. diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl index ea02152a9..6a9101a0f 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl @@ -34,16 +34,18 @@ -export_type([container/0, check_result/0]). --type container() :: #{ - limiter_type() => undefined | limiter(), - %% the retry context of the limiter - retry_key() => - undefined - | retry_context() - | future(), - %% the retry context of the container - retry_ctx := undefined | any() -}. +-type container() :: + infinity + | #{ + limiter_type() => undefined | limiter(), + %% the retry context of the limiter + retry_key() => + undefined + | retry_context() + | future(), + %% the retry context of the container + retry_ctx := undefined | any() + }. -type future() :: pos_integer(). -type limiter_id() :: emqx_limiter_schema:limiter_id(). @@ -78,7 +80,20 @@ get_limiter_by_types(Id, Types, BucketCfgs) -> {ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs), add_new(Type, Limiter, Acc) end, - lists:foldl(Init, #{retry_ctx => undefined}, Types). + Container = lists:foldl(Init, #{retry_ctx => undefined}, Types), + case + lists:all( + fun(Type) -> + maps:get(Type, Container) =:= infinity + end, + Types + ) + of + true -> + infinity; + _ -> + Container + end. -spec add_new(limiter_type(), limiter(), container()) -> container(). add_new(Type, Limiter, Container) -> @@ -89,11 +104,15 @@ add_new(Type, Limiter, Container) -> %% @doc check the specified limiter -spec check(pos_integer(), limiter_type(), container()) -> check_result(). +check(_Need, _Type, infinity) -> + {ok, infinity}; check(Need, Type, Container) -> check_list([{Need, Type}], Container). %% @doc check multiple limiters -spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result(). +check_list(_Need, infinity) -> + {ok, infinity}; check_list([{Need, Type} | T], Container) -> Limiter = maps:get(Type, Container), case emqx_htb_limiter:check(Need, Limiter) of @@ -121,11 +140,15 @@ check_list([], Container) -> %% @doc retry the specified limiter -spec retry(limiter_type(), container()) -> check_result(). +retry(_Type, infinity) -> + {ok, infinity}; retry(Type, Container) -> retry_list([Type], Container). %% @doc retry multiple limiters -spec retry_list(list(limiter_type()), container()) -> check_result(). +retry_list(_Types, infinity) -> + {ok, infinity}; retry_list([Type | T], Container) -> Key = ?RETRY_KEY(Type), case Container of diff --git a/apps/emqx/src/emqx_ws_connection.erl b/apps/emqx/src/emqx_ws_connection.erl index faf62f98d..00fe545eb 100644 --- a/apps/emqx/src/emqx_ws_connection.erl +++ b/apps/emqx/src/emqx_ws_connection.erl @@ -90,7 +90,7 @@ listener :: {Type :: atom(), Name :: atom()}, %% Limiter - limiter :: maybe(container()), + limiter :: container(), %% cache operation when overload limiter_cache :: queue:queue(cache()), @@ -579,54 +579,61 @@ handle_timeout(TRef, TMsg, State) -> list(any()), state() ) -> state(). +check_limiter( + _Needs, + Data, + WhenOk, + Msgs, + #state{limiter = infinity} = State +) -> + WhenOk(Data, Msgs, State); check_limiter( Needs, Data, WhenOk, Msgs, - #state{ - limiter = Limiter, - limiter_timer = LimiterTimer, - limiter_cache = Cache - } = State + #state{limiter_timer = undefined, limiter = Limiter} = State ) -> - case LimiterTimer of - undefined -> - case emqx_limiter_container:check_list(Needs, Limiter) of - {ok, Limiter2} -> - WhenOk(Data, Msgs, State#state{limiter = Limiter2}); - {pause, Time, Limiter2} -> - ?SLOG(debug, #{ - msg => "pause_time_due_to_rate_limit", - needs => Needs, - time_in_ms => Time - }), + case emqx_limiter_container:check_list(Needs, Limiter) of + {ok, Limiter2} -> + WhenOk(Data, Msgs, State#state{limiter = Limiter2}); + {pause, Time, Limiter2} -> + ?SLOG(debug, #{ + msg => "pause_time_due_to_rate_limit", + needs => Needs, + time_in_ms => Time + }), - Retry = #retry{ - types = [Type || {_, Type} <- Needs], - data = Data, - next = WhenOk - }, + Retry = #retry{ + types = [Type || {_, Type} <- Needs], + data = Data, + next = WhenOk + }, - Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), + Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), - TRef = start_timer(Time, limit_timeout), + TRef = start_timer(Time, limit_timeout), - enqueue( - {active, false}, - State#state{ - sockstate = blocked, - limiter = Limiter3, - limiter_timer = TRef - } - ); - {drop, Limiter2} -> - {ok, State#state{limiter = Limiter2}} - end; - _ -> - New = #cache{need = Needs, data = Data, next = WhenOk}, - State#state{limiter_cache = queue:in(New, Cache)} - end. + enqueue( + {active, false}, + State#state{ + sockstate = blocked, + limiter = Limiter3, + limiter_timer = TRef + } + ); + {drop, Limiter2} -> + {ok, State#state{limiter = Limiter2}} + end; +check_limiter( + Needs, + Data, + WhenOk, + _Msgs, + #state{limiter_cache = Cache} = State +) -> + New = #cache{need = Needs, data = Data, next = WhenOk}, + State#state{limiter_cache = queue:in(New, Cache)}. -spec retry_limiter(state()) -> state(). retry_limiter(#state{limiter = Limiter} = State) -> diff --git a/apps/emqx/test/emqx_connection_SUITE.erl b/apps/emqx/test/emqx_connection_SUITE.erl index f24c1c895..6b89227ab 100644 --- a/apps/emqx/test/emqx_connection_SUITE.erl +++ b/apps/emqx/test/emqx_connection_SUITE.erl @@ -38,8 +38,6 @@ init_per_suite(Config) -> ok = meck:new(emqx_cm, [passthrough, no_history, no_link]), ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end), ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end), - %% Meck Limiter - ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), %% Meck Pd ok = meck:new(emqx_pd, [passthrough, no_history, no_link]), %% Meck Metrics @@ -67,7 +65,6 @@ end_per_suite(_Config) -> ok = meck:unload(emqx_transport), catch meck:unload(emqx_channel), ok = meck:unload(emqx_cm), - ok = meck:unload(emqx_htb_limiter), ok = meck:unload(emqx_pd), ok = meck:unload(emqx_metrics), ok = meck:unload(emqx_hooks), @@ -421,6 +418,14 @@ t_ensure_rate_limit(_) -> {ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})), ?assertEqual(Limiter, emqx_connection:info(limiter, State1)), + ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), + + ok = meck:expect( + emqx_htb_limiter, + make_infinity_limiter, + fun() -> non_infinity end + ), + ok = meck:expect( emqx_htb_limiter, check, @@ -431,10 +436,10 @@ t_ensure_rate_limit(_) -> [], WhenOk, [], - st(#{limiter => Limiter}) + st(#{limiter => init_limiter()}) ), meck:unload(emqx_htb_limiter), - ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), + ?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)). t_activate_socket(_) -> @@ -707,7 +712,14 @@ init_limiter() -> limiter_cfg() -> Cfg = bucket_cfg(), - Client = #{ + Client = client_cfg(), + #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}. + +bucket_cfg() -> + #{rate => infinity, initial => 0, burst => 0}. + +client_cfg() -> + #{ rate => infinity, initial => 0, burst => 0, @@ -715,11 +727,7 @@ limiter_cfg() -> divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force - }, - #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}. - -bucket_cfg() -> - #{rate => infinity, initial => 0, burst => 0}. + }. add_bucket() -> Cfg = bucket_cfg(), diff --git a/apps/emqx/test/emqx_ws_connection_SUITE.erl b/apps/emqx/test/emqx_ws_connection_SUITE.erl index 1ae23361e..813656e6a 100644 --- a/apps/emqx/test/emqx_ws_connection_SUITE.erl +++ b/apps/emqx/test/emqx_ws_connection_SUITE.erl @@ -443,7 +443,12 @@ t_websocket_info_deliver(_) -> t_websocket_info_timeout_limiter(_) -> Ref = make_ref(), - LimiterT = init_limiter(), + {ok, Rate} = emqx_limiter_schema:to_rate("50MB"), + LimiterT = init_limiter(#{ + bytes => bucket_cfg(), + messages => bucket_cfg(), + client => #{bytes => client_cfg(Rate)} + }), Next = fun emqx_ws_connection:when_msg_in/3, Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT), Event = {timeout, Ref, limit_timeout}, From 2f2f32ac7bffffeab9c409c5b3ee1b9fc5a634c3 Mon Sep 17 00:00:00 2001 From: firest Date: Mon, 24 Apr 2023 10:52:30 +0800 Subject: [PATCH 116/263] chore: update changes --- changes/ce/perf-10487.en.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/ce/perf-10487.en.md diff --git a/changes/ce/perf-10487.en.md b/changes/ce/perf-10487.en.md new file mode 100644 index 000000000..6f2b2d156 --- /dev/null +++ b/changes/ce/perf-10487.en.md @@ -0,0 +1 @@ +Optimize the instance of limiter for whose rate is `infinity` to reduce memory and CPU usage. From 7b51a49f84df0c5f96872edd658ad25b11625798 Mon Sep 17 00:00:00 2001 From: firest Date: Mon, 24 Apr 2023 14:09:23 +0800 Subject: [PATCH 117/263] fix(limiter): remove the default limit of connect rate --- .../emqx_limiter/src/emqx_limiter_schema.erl | 29 ++++++++----------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl index c762a0f1d..ae8529470 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl @@ -36,11 +36,11 @@ ]). -define(KILOBYTE, 1024). --define(BUCKET_KEYS, [ - {bytes, bucket_infinity}, - {messages, bucket_infinity}, - {connection, bucket_limit}, - {message_routing, bucket_infinity} +-define(LISTENER_BUCKET_KEYS, [ + bytes, + messages, + connection, + message_routing ]). -type limiter_type() :: @@ -132,10 +132,8 @@ fields(node_opts) -> ]; fields(client_fields) -> client_fields(types(), #{default => #{}}); -fields(bucket_infinity) -> +fields(bucket_opts) -> fields_of_bucket(<<"infinity">>); -fields(bucket_limit) -> - fields_of_bucket(<<"1000/s">>); fields(client_opts) -> [ {rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})}, @@ -194,10 +192,9 @@ fields(client_opts) -> )} ]; fields(listener_fields) -> - composite_bucket_fields(?BUCKET_KEYS, listener_client_fields); + composite_bucket_fields(?LISTENER_BUCKET_KEYS, listener_client_fields); fields(listener_client_fields) -> - {Types, _} = lists:unzip(?BUCKET_KEYS), - client_fields(Types, #{required => false}); + client_fields(?LISTENER_BUCKET_KEYS, #{required => false}); fields(Type) -> simple_bucket_field(Type). @@ -205,10 +202,8 @@ desc(limiter) -> "Settings for the rate limiter."; desc(node_opts) -> "Settings for the limiter of the node level."; -desc(bucket_infinity) -> +desc(bucket_opts) -> "Settings for the bucket."; -desc(bucket_limit) -> - desc(bucket_infinity); desc(client_opts) -> "Settings for the client in bucket level."; desc(client_fields) -> @@ -360,7 +355,7 @@ apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit). %% A bucket with only one type simple_bucket_field(Type) when is_atom(Type) -> - fields(bucket_infinity) ++ + fields(bucket_opts) ++ [ {client, ?HOCON( @@ -378,13 +373,13 @@ simple_bucket_field(Type) when is_atom(Type) -> composite_bucket_fields(Types, ClientRef) -> [ {Type, - ?HOCON(?R_REF(?MODULE, Opts), #{ + ?HOCON(?R_REF(?MODULE, bucket_opts), #{ desc => ?DESC(?MODULE, Type), required => false, importance => importance_of_type(Type), aliases => alias_of_type(Type) })} - || {Type, Opts} <- Types + || Type <- Types ] ++ [ {client, From 24cecae1f8fd5cbfb7025335a6a1dda2166c4222 Mon Sep 17 00:00:00 2001 From: firest Date: Mon, 24 Apr 2023 14:15:45 +0800 Subject: [PATCH 118/263] chore: update changes --- changes/ce/perf-10490.en.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/ce/perf-10490.en.md diff --git a/changes/ce/perf-10490.en.md b/changes/ce/perf-10490.en.md new file mode 100644 index 000000000..5c1c183a5 --- /dev/null +++ b/changes/ce/perf-10490.en.md @@ -0,0 +1 @@ +Remove the default limit of connect rate which used to be `1000/s` From 0b1a2dd1939428af2131490007a492c598363898 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Mon, 24 Apr 2023 14:40:30 +0800 Subject: [PATCH 119/263] feat: rename etcd.ssl to etcd.ssl_options --- apps/emqx_conf/src/emqx_conf.app.src | 2 +- apps/emqx_conf/src/emqx_conf_schema.erl | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index 234690374..03cd36522 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,6 +1,6 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.17"}, + {vsn, "0.1.18"}, {registered, []}, {mod, {emqx_conf_app, []}}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index f3f014321..abccca9fb 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -335,11 +335,12 @@ fields(cluster_etcd) -> desc => ?DESC(cluster_etcd_node_ttl) } )}, - {"ssl", + {"ssl_options", sc( ?R_REF(emqx_schema, "ssl_client_opts"), #{ desc => ?DESC(cluster_etcd_ssl), + alias => [ssl], 'readOnly' => true } )} From dacf92c4ab892867467d31f0d6cd947cd689a791 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Mon, 24 Apr 2023 14:45:53 +0800 Subject: [PATCH 120/263] chore: rename etcd.ssl changelog --- changes/ce/feat-10491.en.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/ce/feat-10491.en.md diff --git a/changes/ce/feat-10491.en.md b/changes/ce/feat-10491.en.md new file mode 100644 index 000000000..e1c38b6bb --- /dev/null +++ b/changes/ce/feat-10491.en.md @@ -0,0 +1 @@ +Rename `etcd.ssl` to `etcd.ssl_options` to keep all of SSL options consistent in the configuration file. From db0c951e3013e2e59adc7dd101cd70ebb04ad9f3 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Mon, 24 Apr 2023 15:27:42 +0800 Subject: [PATCH 121/263] feat: don't do rpc call to check deprecated file --- apps/emqx/priv/bpapi.versions | 1 - apps/emqx_conf/src/emqx_conf_app.erl | 12 +- .../src/proto/emqx_conf_proto_v2.erl | 4 - .../src/proto/emqx_conf_proto_v3.erl | 114 ------------------ 4 files changed, 7 insertions(+), 124 deletions(-) delete mode 100644 apps/emqx_conf/src/proto/emqx_conf_proto_v3.erl diff --git a/apps/emqx/priv/bpapi.versions b/apps/emqx/priv/bpapi.versions index 11bd4aa77..db4765e3f 100644 --- a/apps/emqx/priv/bpapi.versions +++ b/apps/emqx/priv/bpapi.versions @@ -11,7 +11,6 @@ {emqx_cm,1}. {emqx_conf,1}. {emqx_conf,2}. -{emqx_conf,3}. {emqx_dashboard,1}. {emqx_delayed,1}. {emqx_exhook,1}. diff --git a/apps/emqx_conf/src/emqx_conf_app.erl b/apps/emqx_conf/src/emqx_conf_app.erl index fd0a56853..fbfb97a79 100644 --- a/apps/emqx_conf/src/emqx_conf_app.erl +++ b/apps/emqx_conf/src/emqx_conf_app.erl @@ -66,7 +66,8 @@ get_override_config_file() -> conf => Conf, tnx_id => TnxId, node => Node, - has_deprecated_file => HasDeprecateFile + has_deprecated_file => HasDeprecateFile, + release => emqx_app:get_release() } end, case mria:ro_transaction(?CLUSTER_RPC_SHARD, Fun) of @@ -180,6 +181,8 @@ copy_override_conf_from_core_node() -> msg => "copy_cluster_conf_from_core_node_success", node => Node, has_deprecated_file => HasDeprecatedFile, + local_release => emqx_app:get_release(), + remote_release => maps:get(release, Info, "before_v5.0.24|e5.0.3"), data_dir => emqx:data_dir(), tnx_id => TnxId }), @@ -228,13 +231,12 @@ sync_data_from_node(Node) -> error(Error) end. -has_deprecated_file(#{node := Node} = Info) -> +has_deprecated_file(#{conf := Conf} = Info) -> case maps:find(has_deprecated_file, Info) of {ok, HasDeprecatedFile} -> HasDeprecatedFile; error -> %% The old version don't have emqx_config:has_deprecated_file/0 - DataDir = emqx_conf_proto_v2:get_config(Node, [node, data_dir]), - File = filename:join([DataDir, "configs", "cluster-override.conf"]), - emqx_conf_proto_v3:file_exist(Node, File) + %% Conf is not empty if deprecated file is found. + Conf =/= #{} end. diff --git a/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl b/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl index 3bcf532f6..97446ee9f 100644 --- a/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl +++ b/apps/emqx_conf/src/proto/emqx_conf_proto_v2.erl @@ -20,7 +20,6 @@ -export([ introduced_in/0, - deprecated_since/0, sync_data_from_node/1, get_config/2, get_config/3, @@ -42,9 +41,6 @@ introduced_in() -> "5.0.1". -deprecated_since() -> - "5.0.23". - -spec sync_data_from_node(node()) -> {ok, binary()} | emqx_rpc:badrpc(). sync_data_from_node(Node) -> rpc:call(Node, emqx_conf_app, sync_data_from_node, [], 20000). diff --git a/apps/emqx_conf/src/proto/emqx_conf_proto_v3.erl b/apps/emqx_conf/src/proto/emqx_conf_proto_v3.erl deleted file mode 100644 index 802436f98..000000000 --- a/apps/emqx_conf/src/proto/emqx_conf_proto_v3.erl +++ /dev/null @@ -1,114 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - --module(emqx_conf_proto_v3). - --behaviour(emqx_bpapi). - --export([ - introduced_in/0, - sync_data_from_node/1, - get_config/2, - get_config/3, - get_all/1, - - update/3, - update/4, - remove_config/2, - remove_config/3, - - reset/2, - reset/3, - - get_override_config_file/1, - file_exist/2 -]). - --include_lib("emqx/include/bpapi.hrl"). - -introduced_in() -> - "5.0.24". - --spec sync_data_from_node(node()) -> {ok, binary()} | emqx_rpc:badrpc(). -sync_data_from_node(Node) -> - rpc:call(Node, emqx_conf_app, sync_data_from_node, [], 20000). --type update_config_key_path() :: [emqx_utils_maps:config_key(), ...]. - --spec get_config(node(), emqx_utils_maps:config_key_path()) -> - term() | emqx_rpc:badrpc(). -get_config(Node, KeyPath) -> - rpc:call(Node, emqx, get_config, [KeyPath]). - --spec get_config(node(), emqx_utils_maps:config_key_path(), _Default) -> - term() | emqx_rpc:badrpc(). -get_config(Node, KeyPath, Default) -> - rpc:call(Node, emqx, get_config, [KeyPath, Default]). - --spec get_all(emqx_utils_maps:config_key_path()) -> emqx_rpc:multicall_result(). -get_all(KeyPath) -> - rpc:multicall(emqx_conf, get_node_and_config, [KeyPath], 5000). - --spec update( - update_config_key_path(), - emqx_config:update_request(), - emqx_config:update_opts() -) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. -update(KeyPath, UpdateReq, Opts) -> - emqx_cluster_rpc:multicall(emqx, update_config, [KeyPath, UpdateReq, Opts]). - --spec update( - node(), - update_config_key_path(), - emqx_config:update_request(), - emqx_config:update_opts() -) -> - {ok, emqx_config:update_result()} - | {error, emqx_config:update_error()} - | emqx_rpc:badrpc(). -update(Node, KeyPath, UpdateReq, Opts) -> - rpc:call(Node, emqx, update_config, [KeyPath, UpdateReq, Opts], 5000). - --spec remove_config(update_config_key_path(), emqx_config:update_opts()) -> - {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. -remove_config(KeyPath, Opts) -> - emqx_cluster_rpc:multicall(emqx, remove_config, [KeyPath, Opts]). - --spec remove_config(node(), update_config_key_path(), emqx_config:update_opts()) -> - {ok, emqx_config:update_result()} - | {error, emqx_config:update_error()} - | emqx_rpc:badrpc(). -remove_config(Node, KeyPath, Opts) -> - rpc:call(Node, emqx, remove_config, [KeyPath, Opts], 5000). - --spec reset(update_config_key_path(), emqx_config:update_opts()) -> - {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. -reset(KeyPath, Opts) -> - emqx_cluster_rpc:multicall(emqx, reset_config, [KeyPath, Opts]). - --spec reset(node(), update_config_key_path(), emqx_config:update_opts()) -> - {ok, emqx_config:update_result()} - | {error, emqx_config:update_error()} - | emqx_rpc:badrpc(). -reset(Node, KeyPath, Opts) -> - rpc:call(Node, emqx, reset_config, [KeyPath, Opts]). - --spec get_override_config_file([node()]) -> emqx_rpc:multicall_result(). -get_override_config_file(Nodes) -> - rpc:multicall(Nodes, emqx_conf_app, get_override_config_file, [], 20000). - --spec file_exist(node(), string()) -> emqx_rpc:badrpc() | boolean(). -file_exist(Node, File) -> - rpc:call(Node, filelib, is_regular, [File], 5000). From feeb3df994c992f4246c0bca5613346931822e0d Mon Sep 17 00:00:00 2001 From: firest Date: Mon, 24 Apr 2023 18:09:17 +0800 Subject: [PATCH 122/263] fix(api): add limiter API back which deleted by mistake --- apps/emqx_management/src/emqx_mgmt_api_configs.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/apps/emqx_management/src/emqx_mgmt_api_configs.erl b/apps/emqx_management/src/emqx_mgmt_api_configs.erl index af203dfe9..bc9aaf768 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_configs.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_configs.erl @@ -42,6 +42,7 @@ <<"alarm">>, <<"sys_topics">>, <<"sysmon">>, + <<"limiter">>, <<"log">>, <<"persistent_session_store">>, <<"zones">> From 6110aad23f135e819197c0600c48ce688118d0f6 Mon Sep 17 00:00:00 2001 From: firest Date: Mon, 24 Apr 2023 18:16:28 +0800 Subject: [PATCH 123/263] chore: bump version && update changes --- apps/emqx_management/src/emqx_management.app.src | 2 +- changes/ce/fix-10495.en.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changes/ce/fix-10495.en.md diff --git a/apps/emqx_management/src/emqx_management.app.src b/apps/emqx_management/src/emqx_management.app.src index f423213af..ec282b60b 100644 --- a/apps/emqx_management/src/emqx_management.app.src +++ b/apps/emqx_management/src/emqx_management.app.src @@ -2,7 +2,7 @@ {application, emqx_management, [ {description, "EMQX Management API and CLI"}, % strict semver, bump manually! - {vsn, "5.0.19"}, + {vsn, "5.0.20"}, {modules, []}, {registered, [emqx_management_sup]}, {applications, [kernel, stdlib, emqx_plugins, minirest, emqx, emqx_ctl]}, diff --git a/changes/ce/fix-10495.en.md b/changes/ce/fix-10495.en.md new file mode 100644 index 000000000..222f3dd5a --- /dev/null +++ b/changes/ce/fix-10495.en.md @@ -0,0 +1 @@ +Add the limiter API `/configs/limiter` which was deleted by mistake back. From 8cfb24b5b435936759b98fc0e6b53576755637bf Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 6 Apr 2023 15:04:44 -0300 Subject: [PATCH 124/263] docs(kafka_bridge): minor fixes to license and readme Fixes https://emqx.atlassian.net/browse/EMQX-9481 --- LICENSE | 6 +++++- apps/emqx_bridge_kafka/README.md | 15 +++++++++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/LICENSE b/LICENSE index 2a081b135..4ed190bda 100644 --- a/LICENSE +++ b/LICENSE @@ -4,4 +4,8 @@ For EMQX: Apache License 2.0, see APL.txt, which applies to all source files except for lib-ee sub-directory. For EMQX Enterprise (since version 5.0): Business Source License 1.1, -see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory. +see lib-ee/BSL.txt, which applies to source code in lib-ee +sub-directory and some of the apps under the apps directory. + +Source code under apps that uses BSL License: +- apps/emqx_bridge_kafka diff --git a/apps/emqx_bridge_kafka/README.md b/apps/emqx_bridge_kafka/README.md index 72cbeecc6..80978ff10 100644 --- a/apps/emqx_bridge_kafka/README.md +++ b/apps/emqx_bridge_kafka/README.md @@ -10,10 +10,21 @@ workers from `emqx_resource`. It implements the connection management and interaction without need for a separate connector app, since it's not used by authentication and authorization applications. -## Contributing +# Documentation links + +For more information on Apache Kafka, please see its [official +site](https://kafka.apache.org/). + +# Configurations + +Please see [our official +documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-kafka.html) +for more detailed info. + +# Contributing Please see our [contributing.md](../../CONTRIBUTING.md). -## License +# License See [BSL](./BSL.txt). From 26883eec02e52ee5c35afdf46f47803085c2b978 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 13 Apr 2023 15:38:18 -0300 Subject: [PATCH 125/263] test(kafka): fix innocuous test assertion --- .../test/emqx_bridge_kafka_impl_consumer_SUITE.erl | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl index 08fbf5e15..bb8930280 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl @@ -1156,11 +1156,12 @@ t_start_and_consume_ok(Config) -> ), %% Check that the bridge probe API doesn't leak atoms. - ProbeRes = probe_bridge_api(Config), - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes0 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), AtomsBefore = erlang:system_info(atom_count), %% Probe again; shouldn't have created more atoms. - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes1 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), AtomsAfter = erlang:system_info(atom_count), ?assertEqual(AtomsBefore, AtomsAfter), @@ -1259,11 +1260,12 @@ t_multiple_topic_mappings(Config) -> {ok, _} = snabbkaffe:receive_events(SRef0), %% Check that the bridge probe API doesn't leak atoms. - ProbeRes = probe_bridge_api(Config), - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes0 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), AtomsBefore = erlang:system_info(atom_count), %% Probe again; shouldn't have created more atoms. - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes1 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), AtomsAfter = erlang:system_info(atom_count), ?assertEqual(AtomsBefore, AtomsAfter), From 4bcfbea056719be91158be4d7eb2eff6299c83f4 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 13 Apr 2023 16:26:03 -0300 Subject: [PATCH 126/263] refactor(kafka_consumer): follow up refactoring requested from previous pull request Follow up from https://github.com/emqx/emqx/pull/10273 --- .../src/emqx_bridge_kafka_impl_consumer.erl | 29 +++++++++++-------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl index fdfa3300c..c549b3467 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl @@ -179,7 +179,12 @@ on_get_status(_InstanceID, State) -> kafka_client_id := ClientID, kafka_topics := KafkaTopics } = State, - do_get_status(State, ClientID, KafkaTopics, SubscriberId). + case do_get_status(ClientID, KafkaTopics, SubscriberId) of + {disconnected, Message} -> + {disconnected, State, Message}; + Res -> + Res + end. %%------------------------------------------------------------------------------------- %% `brod_group_subscriber' API @@ -376,41 +381,41 @@ stop_client(ClientID) -> ), ok. -do_get_status(State, ClientID, [KafkaTopic | RestTopics], SubscriberId) -> +do_get_status(ClientID, [KafkaTopic | RestTopics], SubscriberId) -> case brod:get_partitions_count(ClientID, KafkaTopic) of {ok, NPartitions} -> - case do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) of - connected -> do_get_status(State, ClientID, RestTopics, SubscriberId); + case do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) of + connected -> do_get_status(ClientID, RestTopics, SubscriberId); disconnected -> disconnected end; {error, {client_down, Context}} -> case infer_client_error(Context) of auth_error -> Message = "Authentication error. " ++ ?CLIENT_DOWN_MESSAGE, - {disconnected, State, Message}; + {disconnected, Message}; {auth_error, Message0} -> Message = binary_to_list(Message0) ++ "; " ++ ?CLIENT_DOWN_MESSAGE, - {disconnected, State, Message}; + {disconnected, Message}; connection_refused -> Message = "Connection refused. " ++ ?CLIENT_DOWN_MESSAGE, - {disconnected, State, Message}; + {disconnected, Message}; _ -> - {disconnected, State, ?CLIENT_DOWN_MESSAGE} + {disconnected, ?CLIENT_DOWN_MESSAGE} end; {error, leader_not_available} -> Message = "Leader connection not available. Please check the Kafka topic used," " the connection parameters and Kafka cluster health", - {disconnected, State, Message}; + {disconnected, Message}; _ -> disconnected end; -do_get_status(_State, _ClientID, _KafkaTopics = [], _SubscriberId) -> +do_get_status(_ClientID, _KafkaTopics = [], _SubscriberId) -> connected. --spec do_get_status1(brod:client_id(), binary(), subscriber_id(), pos_integer()) -> +-spec do_get_topic_status(brod:client_id(), binary(), subscriber_id(), pos_integer()) -> connected | disconnected. -do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) -> +do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) -> Results = lists:map( fun(N) -> From dc480323092ccf2e2259c13ccba3598d59753b54 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 12 Apr 2023 10:21:57 -0300 Subject: [PATCH 127/263] feat(schema): add support for schemes in server parser/validator --- apps/emqx/src/emqx_schema.erl | 82 +++++++++++-- apps/emqx/test/emqx_schema_tests.erl | 167 ++++++++++++++++++++++++++- 2 files changed, 240 insertions(+), 9 deletions(-) diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 272a1d0cd..248fdad7f 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -42,7 +42,12 @@ -type ip_port() :: tuple() | integer(). -type cipher() :: map(). -type port_number() :: 1..65536. --type server_parse_option() :: #{default_port => port_number(), no_port => boolean()}. +-type server_parse_option() :: #{ + default_port => port_number(), + no_port => boolean(), + supported_schemes => [string()], + default_scheme => string() +}. -type url() :: binary(). -type json_binary() :: binary(). @@ -2896,7 +2901,10 @@ servers_validator(Opts, Required) -> %% `no_port': by default it's `false', when set to `true', %% a `throw' exception is raised if the port is found. -spec parse_server(undefined | string() | binary(), server_parse_option()) -> - {string(), port_number()}. + string() + | {string(), port_number()} + | {string(), string()} + | {string(), string(), port_number()}. parse_server(Str, Opts) -> case parse_servers(Str, Opts) of undefined -> @@ -2910,7 +2918,12 @@ parse_server(Str, Opts) -> %% @doc Parse comma separated `host[:port][,host[:port]]' endpoints %% into a list of `{Host, Port}' tuples or just `Host' string. -spec parse_servers(undefined | string() | binary(), server_parse_option()) -> - [{string(), port_number()}]. + [ + string() + | {string(), port_number()} + | {string(), string()} + | {string(), string(), port_number()} + ]. parse_servers(undefined, _Opts) -> %% should not parse 'undefined' as string, %% not to throw exception either, @@ -2956,6 +2969,9 @@ split_host_port(Str) -> do_parse_server(Str, Opts) -> DefaultPort = maps:get(default_port, Opts, undefined), NotExpectingPort = maps:get(no_port, Opts, false), + DefaultScheme = maps:get(default_scheme, Opts, undefined), + SupportedSchemes = maps:get(supported_schemes, Opts, []), + NotExpectingScheme = (not is_list(DefaultScheme)) andalso length(SupportedSchemes) =:= 0, case is_integer(DefaultPort) andalso NotExpectingPort of true -> %% either provide a default port from schema, @@ -2964,24 +2980,74 @@ do_parse_server(Str, Opts) -> false -> ok end, + case is_list(DefaultScheme) andalso (not lists:member(DefaultScheme, SupportedSchemes)) of + true -> + %% inconsistent schema + error("bad_schema"); + false -> + ok + end, %% do not split with space, there should be no space allowed between host and port case string:tokens(Str, ":") of - [Hostname, Port] -> + [Scheme, "//" ++ Hostname, Port] -> NotExpectingPort andalso throw("not_expecting_port_number"), - {check_hostname(Hostname), parse_port(Port)}; - [Hostname] -> + NotExpectingScheme andalso throw("not_expecting_scheme"), + {check_scheme(Scheme, Opts), check_hostname(Hostname), parse_port(Port)}; + [Scheme, "//" ++ Hostname] -> + NotExpectingScheme andalso throw("not_expecting_scheme"), case is_integer(DefaultPort) of true -> - {check_hostname(Hostname), DefaultPort}; + {check_scheme(Scheme, Opts), check_hostname(Hostname), DefaultPort}; false when NotExpectingPort -> - check_hostname(Hostname); + {check_scheme(Scheme, Opts), check_hostname(Hostname)}; false -> throw("missing_port_number") end; + [Hostname, Port] -> + NotExpectingPort andalso throw("not_expecting_port_number"), + case is_list(DefaultScheme) of + false -> + {check_hostname(Hostname), parse_port(Port)}; + true -> + {DefaultScheme, check_hostname(Hostname), parse_port(Port)} + end; + [Hostname] -> + case is_integer(DefaultPort) orelse NotExpectingPort of + true -> + ok; + false -> + throw("missing_port_number") + end, + case is_list(DefaultScheme) orelse NotExpectingScheme of + true -> + ok; + false -> + throw("missing_scheme") + end, + case {is_integer(DefaultPort), is_list(DefaultScheme)} of + {true, true} -> + {DefaultScheme, check_hostname(Hostname), DefaultPort}; + {true, false} -> + {check_hostname(Hostname), DefaultPort}; + {false, true} -> + {DefaultScheme, check_hostname(Hostname)}; + {false, false} -> + check_hostname(Hostname) + end; _ -> throw("bad_host_port") end. +check_scheme(Str, Opts) -> + SupportedSchemes = maps:get(supported_schemes, Opts, []), + IsSupported = lists:member(Str, SupportedSchemes), + case IsSupported of + true -> + Str; + false -> + throw("unsupported_scheme") + end. + check_hostname(Str) -> %% not intended to use inet_parse:domain here %% only checking space because it interferes the parsing diff --git a/apps/emqx/test/emqx_schema_tests.erl b/apps/emqx/test/emqx_schema_tests.erl index 5176f4fad..c13dc8055 100644 --- a/apps/emqx/test/emqx_schema_tests.erl +++ b/apps/emqx/test/emqx_schema_tests.erl @@ -350,7 +350,7 @@ parse_server_test_() -> ) ), ?T( - "multiple servers wihtout port, mixed list(binary|string)", + "multiple servers without port, mixed list(binary|string)", ?assertEqual( ["host1", "host2"], Parse2([<<"host1">>, "host2"], #{no_port => true}) @@ -447,6 +447,171 @@ parse_server_test_() -> "bad_schema", emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true}) ) + ), + ?T( + "scheme, hostname and port", + ?assertEqual( + {"pulsar+ssl", "host", 6651}, + emqx_schema:parse_server( + "pulsar+ssl://host:6651", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, default port", + ?assertEqual( + {"pulsar", "host", 6650}, + emqx_schema:parse_server( + "pulsar://host", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, no port", + ?assertEqual( + {"pulsar", "host"}, + emqx_schema:parse_server( + "pulsar://host", + #{ + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, missing port", + ?assertThrow( + "missing_port_number", + emqx_schema:parse_server( + "pulsar://host", + #{ + no_port => false, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, no default port", + ?assertEqual( + {"pulsar", "host"}, + emqx_schema:parse_server( + "host", + #{ + default_scheme => "pulsar", + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, default port", + ?assertEqual( + {"pulsar", "host", 6650}, + emqx_schema:parse_server( + "host", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "just hostname, expecting missing scheme", + ?assertThrow( + "missing_scheme", + emqx_schema:parse_server( + "host", + #{ + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, defined port", + ?assertEqual( + {"pulsar", "host", 6651}, + emqx_schema:parse_server( + "host:6651", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "inconsistent scheme opts", + ?assertError( + "bad_schema", + emqx_schema:parse_server( + "pulsar+ssl://host:6651", + #{ + default_port => 6650, + default_scheme => "something", + supported_schemes => ["not", "supported"] + } + ) + ) + ), + ?T( + "hostname, default scheme, defined port", + ?assertEqual( + {"pulsar", "host", 6651}, + emqx_schema:parse_server( + "host:6651", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "unsupported scheme", + ?assertThrow( + "unsupported_scheme", + emqx_schema:parse_server( + "pulsar+quic://host:6651", + #{ + default_port => 6650, + supported_schemes => ["pulsar"] + } + ) + ) + ), + ?T( + "multiple hostnames with schemes (1)", + ?assertEqual( + [ + {"pulsar", "host", 6649}, + {"pulsar+ssl", "other.host", 6651}, + {"pulsar", "yet.another", 6650} + ], + emqx_schema:parse_servers( + "pulsar://host:6649, pulsar+ssl://other.host:6651,pulsar://yet.another", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) ) ]. From f6da118ebd16d445f8ae3df1b597e7d87e0a3084 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 17 Apr 2023 17:51:54 -0300 Subject: [PATCH 128/263] test: fix flaky test --- apps/emqx/test/emqx_connection_SUITE.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/apps/emqx/test/emqx_connection_SUITE.erl b/apps/emqx/test/emqx_connection_SUITE.erl index f24c1c895..6f0e3eb20 100644 --- a/apps/emqx/test/emqx_connection_SUITE.erl +++ b/apps/emqx/test/emqx_connection_SUITE.erl @@ -495,6 +495,7 @@ t_get_conn_info(_) -> end). t_oom_shutdown(init, Config) -> + ok = snabbkaffe:stop(), ok = snabbkaffe:start_trace(), ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]), meck:expect( From ad4be08bb2bac7c4d730070dc268fc2b54afccc0 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 12 Apr 2023 10:22:36 -0300 Subject: [PATCH 129/263] feat: implement Pulsar Producer bridge (e5.0) Fixes https://emqx.atlassian.net/browse/EMQX-8398 --- .../docker-compose-pulsar-tcp.yaml | 32 + .ci/docker-compose-file/toxiproxy.json | 12 + LICENSE | 1 + apps/emqx/test/emqx_test_janitor.erl | 24 +- apps/emqx_bridge/src/emqx_bridge.erl | 3 +- apps/emqx_bridge/src/emqx_bridge_resource.erl | 2 + apps/emqx_bridge_pulsar/.gitignore | 19 + apps/emqx_bridge_pulsar/BSL.txt | 94 ++ apps/emqx_bridge_pulsar/README.md | 30 + apps/emqx_bridge_pulsar/docker-ct | 2 + .../etc/emqx_bridge_pulsar.conf | 0 .../include/emqx_bridge_pulsar.hrl | 14 + apps/emqx_bridge_pulsar/rebar.config | 13 + .../src/emqx_bridge_pulsar.app.src | 15 + .../src/emqx_bridge_pulsar.erl | 228 +++++ .../src/emqx_bridge_pulsar_app.erl | 14 + .../src/emqx_bridge_pulsar_impl_producer.erl | 396 +++++++++ .../src/emqx_bridge_pulsar_sup.erl | 33 + ...emqx_bridge_pulsar_impl_producer_SUITE.erl | 819 ++++++++++++++++++ .../pulsar_echo_consumer.erl | 25 + .../src/emqx_resource_manager.erl | 7 +- changes/ee/feat-10378.en.md | 1 + .../emqx_ee_bridge/src/emqx_ee_bridge.app.src | 4 +- lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl | 24 +- mix.exs | 4 +- rebar.config.erl | 1 + rel/i18n/emqx_bridge_pulsar.hocon | 176 ++++ rel/i18n/zh/emqx_bridge_pulsar.hocon | 173 ++++ scripts/ct/run.sh | 5 +- 29 files changed, 2160 insertions(+), 11 deletions(-) create mode 100644 .ci/docker-compose-file/docker-compose-pulsar-tcp.yaml create mode 100644 apps/emqx_bridge_pulsar/.gitignore create mode 100644 apps/emqx_bridge_pulsar/BSL.txt create mode 100644 apps/emqx_bridge_pulsar/README.md create mode 100644 apps/emqx_bridge_pulsar/docker-ct create mode 100644 apps/emqx_bridge_pulsar/etc/emqx_bridge_pulsar.conf create mode 100644 apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl create mode 100644 apps/emqx_bridge_pulsar/rebar.config create mode 100644 apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src create mode 100644 apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl create mode 100644 apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_app.erl create mode 100644 apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl create mode 100644 apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl create mode 100644 apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl create mode 100644 apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl create mode 100644 changes/ee/feat-10378.en.md create mode 100644 rel/i18n/emqx_bridge_pulsar.hocon create mode 100644 rel/i18n/zh/emqx_bridge_pulsar.hocon diff --git a/.ci/docker-compose-file/docker-compose-pulsar-tcp.yaml b/.ci/docker-compose-file/docker-compose-pulsar-tcp.yaml new file mode 100644 index 000000000..926000ae4 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-pulsar-tcp.yaml @@ -0,0 +1,32 @@ +version: '3' + +services: + pulsar: + container_name: pulsar + image: apachepulsar/pulsar:2.11.0 + # ports: + # - 6650:6650 + # - 8080:8080 + networks: + emqx_bridge: + volumes: + - ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem + - ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem + - ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem + restart: always + command: + - bash + - "-c" + - | + sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf + sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf + sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf + sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf + sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf + sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf + sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf + sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf + sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf + sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf + echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf + bin/pulsar standalone -nfw -nss diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json index f6b31da4c..9cefcb808 100644 --- a/.ci/docker-compose-file/toxiproxy.json +++ b/.ci/docker-compose-file/toxiproxy.json @@ -107,5 +107,17 @@ "listen": "0.0.0.0:4242", "upstream": "opents:4242", "enabled": true + }, + { + "name": "pulsar_plain", + "listen": "0.0.0.0:6652", + "upstream": "pulsar:6652", + "enabled": true + }, + { + "name": "pulsar_tls", + "listen": "0.0.0.0:6653", + "upstream": "pulsar:6653", + "enabled": true } ] diff --git a/LICENSE b/LICENSE index 4ed190bda..68bb18ce3 100644 --- a/LICENSE +++ b/LICENSE @@ -9,3 +9,4 @@ sub-directory and some of the apps under the apps directory. Source code under apps that uses BSL License: - apps/emqx_bridge_kafka +- apps/emqx_bridge_pulsar diff --git a/apps/emqx/test/emqx_test_janitor.erl b/apps/emqx/test/emqx_test_janitor.erl index c3f82a3e1..041b03fa7 100644 --- a/apps/emqx/test/emqx_test_janitor.erl +++ b/apps/emqx/test/emqx_test_janitor.erl @@ -60,12 +60,12 @@ init(Parent) -> {ok, #{callbacks => [], owner => Parent}}. terminate(_Reason, #{callbacks := Callbacks}) -> - lists:foreach(fun(Fun) -> catch Fun() end, Callbacks). + do_terminate(Callbacks). handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) -> {reply, ok, State#{callbacks := [Callback | Callbacks]}}; handle_call(terminate, _From, State = #{callbacks := Callbacks}) -> - lists:foreach(fun(Fun) -> catch Fun() end, Callbacks), + do_terminate(Callbacks), {stop, normal, ok, State}; handle_call(_Req, _From, State) -> {reply, error, State}. @@ -77,3 +77,23 @@ handle_info({'EXIT', Parent, _Reason}, State = #{owner := Parent}) -> {stop, normal, State}; handle_info(_Msg, State) -> {noreply, State}. + +%%---------------------------------------------------------------------------------- +%% Internal fns +%%---------------------------------------------------------------------------------- + +do_terminate(Callbacks) -> + lists:foreach( + fun(Fun) -> + try + Fun() + catch + K:E:S -> + ct:pal("error executing callback ~p: ~p", [Fun, {K, E}]), + ct:pal("stacktrace: ~p", [S]), + ok + end + end, + Callbacks + ), + ok. diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index 08b8222f2..fd4e16263 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -70,7 +70,8 @@ T == dynamo; T == rocketmq; T == cassandra; - T == sqlserver + T == sqlserver; + T == pulsar_producer ). load() -> diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index 1ad024c40..da98b073e 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -340,6 +340,8 @@ parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) -> %% to hocon; keeping this as just `kafka' for backwards compatibility. parse_confs(<<"kafka">> = _Type, Name, Conf) -> Conf#{bridge_name => Name}; +parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) -> + Conf#{bridge_name => Name}; parse_confs(_Type, _Name, Conf) -> Conf. diff --git a/apps/emqx_bridge_pulsar/.gitignore b/apps/emqx_bridge_pulsar/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/apps/emqx_bridge_pulsar/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/apps/emqx_bridge_pulsar/BSL.txt b/apps/emqx_bridge_pulsar/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_pulsar/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_pulsar/README.md b/apps/emqx_bridge_pulsar/README.md new file mode 100644 index 000000000..09e17d8bb --- /dev/null +++ b/apps/emqx_bridge_pulsar/README.md @@ -0,0 +1,30 @@ +# Pulsar Data Integration Bridge + +This application houses the Pulsar Producer data integration bridge +for EMQX Enterprise Edition. It provides the means to connect to +Pulsar and publish messages to it. + +Currently, our Pulsar Producer library has its own `replayq` buffering +implementation, so this bridge does not require buffer workers from +`emqx_resource`. It implements the connection management and +interaction without need for a separate connector app, since it's not +used by authentication and authorization applications. + +# Documentation links + +For more information on Apache Pulsar, please see its [official +site](https://pulsar.apache.org/). + +# Configurations + +Please see [our official +documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-pulsar.html) +for more detailed info. + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +# License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_bridge_pulsar/docker-ct b/apps/emqx_bridge_pulsar/docker-ct new file mode 100644 index 000000000..6324bb4f7 --- /dev/null +++ b/apps/emqx_bridge_pulsar/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +pulsar diff --git a/apps/emqx_bridge_pulsar/etc/emqx_bridge_pulsar.conf b/apps/emqx_bridge_pulsar/etc/emqx_bridge_pulsar.conf new file mode 100644 index 000000000..e69de29bb diff --git a/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl b/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl new file mode 100644 index 000000000..5ee87e48f --- /dev/null +++ b/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl @@ -0,0 +1,14 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-ifndef(EMQX_BRIDGE_PULSAR_HRL). +-define(EMQX_BRIDGE_PULSAR_HRL, true). + +-define(PULSAR_HOST_OPTIONS, #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] +}). + +-endif. diff --git a/apps/emqx_bridge_pulsar/rebar.config b/apps/emqx_bridge_pulsar/rebar.config new file mode 100644 index 000000000..3b9ae417d --- /dev/null +++ b/apps/emqx_bridge_pulsar/rebar.config @@ -0,0 +1,13 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ {pulsar, {git, "https://github.com/emqx/pulsar-client-erl.git", {tag, "0.8.0"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + % {config, "config/sys.config"}, + {apps, [emqx_bridge_pulsar]} +]}. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src new file mode 100644 index 000000000..cd89f6867 --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src @@ -0,0 +1,15 @@ +{application, emqx_bridge_pulsar, [ + {description, "EMQX Pulsar Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {mod, {emqx_bridge_pulsar_app, []}}, + {applications, [ + kernel, + stdlib, + pulsar + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl new file mode 100644 index 000000000..a3e50054e --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl @@ -0,0 +1,228 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar). + +-include("emqx_bridge_pulsar.hrl"). +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +%% hocon_schema API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). +%% emqx_ee_bridge "unofficial" API +-export([conn_bridge_examples/1]). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> + "bridge_pulsar". + +roots() -> + []. + +fields(pulsar_producer) -> + fields(config) ++ fields(producer_opts); +fields(config) -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {servers, + mk( + binary(), + #{ + required => true, + desc => ?DESC("servers"), + validator => emqx_schema:servers_validator( + ?PULSAR_HOST_OPTIONS, _Required = true + ) + } + )}, + {authentication, + mk(hoconsc:union([none, ref(auth_basic), ref(auth_token)]), #{ + default => none, desc => ?DESC("authentication") + })} + ] ++ emqx_connector_schema_lib:ssl_fields(); +fields(producer_opts) -> + [ + {batch_size, + mk( + pos_integer(), + #{default => 100, desc => ?DESC("producer_batch_size")} + )}, + {compression, + mk( + hoconsc:enum([no_compression, snappy, zlib]), + #{default => no_compression, desc => ?DESC("producer_compression")} + )}, + {send_buffer, + mk(emqx_schema:bytesize(), #{ + default => <<"1MB">>, desc => ?DESC("producer_send_buffer") + })}, + {sync_timeout, + mk(emqx_schema:duration_ms(), #{ + default => <<"3s">>, desc => ?DESC("producer_sync_timeout") + })}, + {retention_period, + mk( + hoconsc:union([infinity, emqx_schema:duration_ms()]), + #{default => infinity, desc => ?DESC("producer_retention_period")} + )}, + {max_batch_bytes, + mk( + emqx_schema:bytesize(), + #{default => <<"900KB">>, desc => ?DESC("producer_max_batch_bytes")} + )}, + {local_topic, mk(binary(), #{required => false, desc => ?DESC("producer_local_topic")})}, + {pulsar_topic, mk(binary(), #{required => true, desc => ?DESC("producer_pulsar_topic")})}, + {strategy, + mk( + hoconsc:enum([random, roundrobin, first_key_dispatch]), + #{default => random, desc => ?DESC("producer_strategy")} + )}, + {buffer, mk(ref(producer_buffer), #{required => false, desc => ?DESC("producer_buffer")})}, + {message, + mk(ref(producer_pulsar_message), #{ + required => false, desc => ?DESC("producer_message_opts") + })}, + {resource_opts, + mk( + ref(producer_resource_opts), + #{ + required => false, + desc => ?DESC(emqx_resource_schema, "creation_opts") + } + )} + ]; +fields(producer_buffer) -> + [ + {mode, + mk( + hoconsc:enum([memory, disk, hybrid]), + #{default => memory, desc => ?DESC("buffer_mode")} + )}, + {per_partition_limit, + mk( + emqx_schema:bytesize(), + #{default => <<"2GB">>, desc => ?DESC("buffer_per_partition_limit")} + )}, + {segment_bytes, + mk( + emqx_schema:bytesize(), + #{default => <<"100MB">>, desc => ?DESC("buffer_segment_bytes")} + )}, + {memory_overload_protection, + mk(boolean(), #{ + default => false, + desc => ?DESC("buffer_memory_overload_protection") + })} + ]; +fields(producer_pulsar_message) -> + [ + {key, + mk(string(), #{default => <<"${.clientid}">>, desc => ?DESC("producer_key_template")})}, + {value, mk(string(), #{default => <<"${.}">>, desc => ?DESC("producer_value_template")})} + ]; +fields(producer_resource_opts) -> + SupportedOpts = [ + health_check_interval, + resume_interval, + start_after_created, + start_timeout, + auto_restart_interval + ], + lists:filtermap( + fun + ({health_check_interval = Field, MetaFn}) -> + {true, {Field, override_default(MetaFn, 1_000)}}; + ({Field, _Meta}) -> + lists:member(Field, SupportedOpts) + end, + emqx_resource_schema:fields("creation_opts") + ); +fields(auth_basic) -> + [ + {username, mk(binary(), #{required => true, desc => ?DESC("auth_basic_username")})}, + {password, + mk(binary(), #{ + required => true, + desc => ?DESC("auth_basic_password"), + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} + ]; +fields(auth_token) -> + [ + {jwt, + mk(binary(), #{ + required => true, + desc => ?DESC("auth_token_jwt"), + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} + ]; +fields("get_" ++ Type) -> + emqx_bridge_schema:status_fields() ++ fields("post_" ++ Type); +fields("put_" ++ Type) -> + fields("config_" ++ Type); +fields("post_" ++ Type) -> + [type_field(), name_field() | fields("config_" ++ Type)]; +fields("config_producer") -> + fields(pulsar_producer). + +desc(pulsar_producer) -> + ?DESC(pulsar_producer_struct); +desc(producer_resource_opts) -> + ?DESC(emqx_resource_schema, "creation_opts"); +desc("get_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `GET` method."]; +desc("put_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `PUT` method."]; +desc("post_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `POST` method."]; +desc(Name) -> + lists:member(Name, struct_names()) orelse throw({missing_desc, Name}), + ?DESC(Name). + +conn_bridge_examples(_Method) -> + [ + #{ + <<"pulsar_producer">> => #{ + summary => <<"Pulsar Producer Bridge">>, + value => #{todo => true} + } + } + ]. + +%%------------------------------------------------------------------------------------------------- +%% Internal fns +%%------------------------------------------------------------------------------------------------- + +mk(Type, Meta) -> hoconsc:mk(Type, Meta). +ref(Name) -> hoconsc:ref(?MODULE, Name). + +type_field() -> + {type, mk(hoconsc:enum([pulsar_producer]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. + +struct_names() -> + [ + auth_basic, + auth_token, + producer_buffer, + producer_pulsar_message + ]. + +override_default(OriginalFn, NewDefault) -> + fun + (default) -> NewDefault; + (Field) -> OriginalFn(Field) + end. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_app.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_app.erl new file mode 100644 index 000000000..bedf42cf6 --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_app.erl @@ -0,0 +1,14 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar_app). + +-behaviour(application). + +-export([start/2, stop/1]). + +start(_StartType, _StartArgs) -> + emqx_bridge_pulsar_sup:start_link(). + +stop(_State) -> + ok. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl new file mode 100644 index 000000000..4bc390b91 --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl @@ -0,0 +1,396 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar_impl_producer). + +-include("emqx_bridge_pulsar.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_get_status/2, + on_query/3, + on_query_async/4 +]). + +-type pulsar_client_id() :: atom(). +-type state() :: #{ + pulsar_client_id := pulsar_client_id(), + producers := pulsar_producers:producers(), + sync_timeout := infinity | time:time(), + message_template := message_template() +}. +-type buffer_mode() :: memory | disk | hybrid. +-type compression_mode() :: no_compression | snappy | zlib. +-type partition_strategy() :: random | roundrobin | first_key_dispatch. +-type message_template_raw() :: #{ + key := binary(), + value := binary() +}. +-type message_template() :: #{ + key := emqx_plugin_libs_rule:tmpl_token(), + value := emqx_plugin_libs_rule:tmpl_token() +}. +-type config() :: #{ + authentication := _, + batch_size := pos_integer(), + bridge_name := atom(), + buffer := #{ + mode := buffer_mode(), + per_partition_limit := emqx_schema:byte_size(), + segment_bytes := emqx_schema:byte_size(), + memory_overload_protection := boolean() + }, + compression := compression_mode(), + max_batch_bytes := emqx_schema:bytesize(), + message := message_template_raw(), + pulsar_topic := binary(), + retention_period := infinity | emqx_schema:duration_ms(), + send_buffer := emqx_schema:bytesize(), + servers := binary(), + ssl := _, + strategy := partition_strategy(), + sync_timeout := emqx_schema:duration_ms() +}. + +%%------------------------------------------------------------------------------------- +%% `emqx_resource' API +%%------------------------------------------------------------------------------------- + +callback_mode() -> async_if_possible. + +%% there are no queries to be made to this bridge, so we say that +%% buffer is supported so we don't spawn unused resource buffer +%% workers. +is_buffer_supported() -> true. + +-spec on_start(manager_id(), config()) -> {ok, state()}. +on_start(InstanceId, Config) -> + #{ + authentication := _Auth, + bridge_name := BridgeName, + servers := Servers0, + ssl := SSL + } = Config, + Servers = format_servers(Servers0), + ClientId = make_client_id(InstanceId, BridgeName), + SSLOpts = emqx_tls_lib:to_client_opts(SSL), + ClientOpts = #{ + ssl_opts => SSLOpts, + conn_opts => conn_opts(Config) + }, + case pulsar:ensure_supervised_client(ClientId, Servers, ClientOpts) of + {ok, _Pid} -> + ?SLOG(info, #{ + msg => "pulsar_client_started", + instance_id => InstanceId, + pulsar_hosts => Servers + }); + {error, Error} -> + ?SLOG(error, #{ + msg => "failed_to_start_kafka_client", + instance_id => InstanceId, + pulsar_hosts => Servers, + reason => Error + }), + throw(failed_to_start_pulsar_client) + end, + start_producer(Config, InstanceId, ClientId, ClientOpts). + +-spec on_stop(manager_id(), state()) -> ok. +on_stop(_InstanceId, State) -> + #{ + pulsar_client_id := ClientId, + producers := Producers + } = State, + stop_producers(ClientId, Producers), + stop_client(ClientId), + ?tp(pulsar_bridge_stopped, #{instance_id => _InstanceId}), + ok. + +-spec on_get_status(manager_id(), state()) -> connected | disconnected. +on_get_status(_InstanceId, State) -> + #{ + pulsar_client_id := ClientId, + producers := Producers + } = State, + case pulsar_client_sup:find_client(ClientId) of + {ok, Pid} -> + try pulsar_client:get_status(Pid) of + true -> + get_producer_status(Producers); + false -> + disconnected + catch + error:timeout -> + disconnected; + exit:{noproc, _} -> + disconnected + end; + {error, _} -> + disconnected + end. + +-spec on_query(manager_id(), {send_message, map()}, state()) -> ok | {error, timeout}. +on_query(_InstanceId, {send_message, Message}, State) -> + #{ + producers := Producers, + sync_timeout := SyncTimeout, + message_template := MessageTemplate + } = State, + PulsarMessage = render_message(Message, MessageTemplate), + try + pulsar:send_sync(Producers, [PulsarMessage], SyncTimeout) + catch + error:timeout -> + {error, timeout} + end. + +-spec on_query_async( + manager_id(), {send_message, map()}, {ReplyFun :: function(), Args :: list()}, state() +) -> + {ok, pid()}. +on_query_async(_InstanceId, {send_message, Message}, AsyncReplyFn, State) -> + #{ + producers := Producers, + message_template := MessageTemplate + } = State, + PulsarMessage = render_message(Message, MessageTemplate), + pulsar:send(Producers, [PulsarMessage], #{callback_fn => AsyncReplyFn}). + +%%------------------------------------------------------------------------------------- +%% Internal fns +%%------------------------------------------------------------------------------------- + +-spec to_bin(atom() | string() | binary()) -> binary(). +to_bin(A) when is_atom(A) -> + atom_to_binary(A); +to_bin(L) when is_list(L) -> + list_to_binary(L); +to_bin(B) when is_binary(B) -> + B. + +-spec format_servers(binary()) -> [string()]. +format_servers(Servers0) -> + Servers1 = emqx_schema:parse_servers(Servers0, ?PULSAR_HOST_OPTIONS), + lists:map( + fun({Scheme, Host, Port}) -> + Scheme ++ "://" ++ Host ++ ":" ++ integer_to_list(Port) + end, + Servers1 + ). + +-spec make_client_id(manager_id(), atom() | binary()) -> pulsar_client_id(). +make_client_id(InstanceId, BridgeName) -> + case is_dry_run(InstanceId) of + true -> + pulsar_producer_probe; + false -> + ClientIdBin = iolist_to_binary([ + <<"pulsar_producer:">>, + to_bin(BridgeName), + <<":">>, + to_bin(node()) + ]), + binary_to_atom(ClientIdBin) + end. + +-spec is_dry_run(manager_id()) -> boolean(). +is_dry_run(InstanceId) -> + TestIdStart = string:find(InstanceId, ?TEST_ID_PREFIX), + case TestIdStart of + nomatch -> + false; + _ -> + string:equal(TestIdStart, InstanceId) + end. + +conn_opts(#{authentication := none}) -> + #{}; +conn_opts(#{authentication := #{username := Username, password := Password}}) -> + #{ + auth_data => iolist_to_binary([Username, <<":">>, Password]), + auth_method_name => <<"basic">> + }; +conn_opts(#{authentication := #{jwt := JWT}}) -> + #{ + auth_data => JWT, + auth_method_name => <<"token">> + }. + +-spec replayq_dir(pulsar_client_id()) -> string(). +replayq_dir(ClientId) -> + filename:join([emqx:data_dir(), "pulsar", to_bin(ClientId)]). + +-spec producer_name(pulsar_client_id()) -> atom(). +producer_name(ClientId) -> + ClientIdBin = to_bin(ClientId), + binary_to_atom( + iolist_to_binary([ + <<"producer-">>, + ClientIdBin + ]) + ). + +-spec start_producer(config(), manager_id(), pulsar_client_id(), map()) -> {ok, state()}. +start_producer(Config, InstanceId, ClientId, ClientOpts) -> + #{ + conn_opts := ConnOpts, + ssl_opts := SSLOpts + } = ClientOpts, + #{ + batch_size := BatchSize, + buffer := #{ + mode := BufferMode, + per_partition_limit := PerPartitionLimit, + segment_bytes := SegmentBytes, + memory_overload_protection := MemOLP0 + }, + compression := Compression, + max_batch_bytes := MaxBatchBytes, + message := MessageTemplateOpts, + pulsar_topic := PulsarTopic0, + retention_period := RetentionPeriod, + send_buffer := SendBuffer, + strategy := Strategy, + sync_timeout := SyncTimeout + } = Config, + {OffloadMode, ReplayQDir} = + case BufferMode of + memory -> {false, false}; + disk -> {false, replayq_dir(ClientId)}; + hybrid -> {true, replayq_dir(ClientId)} + end, + MemOLP = + case os:type() of + {unix, linux} -> MemOLP0; + _ -> false + end, + ReplayQOpts = #{ + replayq_dir => ReplayQDir, + replayq_offload_mode => OffloadMode, + replayq_max_total_bytes => PerPartitionLimit, + replayq_seg_bytes => SegmentBytes, + drop_if_highmem => MemOLP + }, + ProducerName = producer_name(ClientId), + MessageTemplate = compile_message_template(MessageTemplateOpts), + ProducerOpts0 = + #{ + batch_size => BatchSize, + compression => Compression, + conn_opts => ConnOpts, + max_batch_bytes => MaxBatchBytes, + name => ProducerName, + retention_period => RetentionPeriod, + ssl_opts => SSLOpts, + strategy => Strategy, + tcp_opts => [{sndbuf, SendBuffer}] + }, + ProducerOpts = maps:merge(ReplayQOpts, ProducerOpts0), + PulsarTopic = binary_to_list(PulsarTopic0), + try pulsar:ensure_supervised_producers(ClientId, PulsarTopic, ProducerOpts) of + {ok, Producers} -> + State = #{ + pulsar_client_id => ClientId, + producers => Producers, + sync_timeout => SyncTimeout, + message_template => MessageTemplate + }, + ?tp(pulsar_producer_bridge_started, #{}), + {ok, State} + catch + Kind:Error:Stacktrace -> + ?SLOG(error, #{ + msg => "failed_to_start_pulsar_producer", + instance_id => InstanceId, + kind => Kind, + reason => Error, + stacktrace => Stacktrace + }), + stop_client(ClientId), + throw(failed_to_start_pulsar_producer) + end. + +-spec stop_client(pulsar_client_id()) -> ok. +stop_client(ClientId) -> + _ = log_when_error( + fun() -> + ok = pulsar:stop_and_delete_supervised_client(ClientId), + ?tp(pulsar_bridge_client_stopped, #{pulsar_client_id => ClientId}), + ok + end, + #{ + msg => "failed_to_delete_pulsar_client", + pulsar_client_id => ClientId + } + ), + ok. + +-spec stop_producers(pulsar_client_id(), pulsar_producers:producers()) -> ok. +stop_producers(ClientId, Producers) -> + _ = log_when_error( + fun() -> + ok = pulsar:stop_and_delete_supervised_producers(Producers), + ?tp(pulsar_bridge_producer_stopped, #{pulsar_client_id => ClientId}), + ok + end, + #{ + msg => "failed_to_delete_pulsar_producer", + pulsar_client_id => ClientId + } + ), + ok. + +log_when_error(Fun, Log) -> + try + Fun() + catch + C:E -> + ?SLOG(error, Log#{ + exception => C, + reason => E + }) + end. + +-spec compile_message_template(message_template_raw()) -> message_template(). +compile_message_template(TemplateOpts) -> + KeyTemplate = maps:get(key, TemplateOpts, <<"${.clientid}">>), + ValueTemplate = maps:get(value, TemplateOpts, <<"${.}">>), + #{ + key => preproc_tmpl(KeyTemplate), + value => preproc_tmpl(ValueTemplate) + }. + +preproc_tmpl(Template) -> + emqx_plugin_libs_rule:preproc_tmpl(Template). + +render_message( + Message, #{key := KeyTemplate, value := ValueTemplate} +) -> + #{ + key => render(Message, KeyTemplate), + value => render(Message, ValueTemplate) + }. + +render(Message, Template) -> + Opts = #{ + var_trans => fun + (undefined) -> <<"">>; + (X) -> emqx_plugin_libs_rule:bin(X) + end, + return => full_binary + }, + emqx_plugin_libs_rule:proc_tmpl(Template, Message, Opts). + +get_producer_status(Producers) -> + case pulsar_producers:all_connected(Producers) of + true -> connected; + false -> connecting + end. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl new file mode 100644 index 000000000..17121beab --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl @@ -0,0 +1,33 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar_sup). + +-behaviour(supervisor). + +-export([start_link/0]). + +-export([init/1]). + +-define(SERVER, ?MODULE). + +start_link() -> + supervisor:start_link({local, ?SERVER}, ?MODULE, []). + +%% sup_flags() = #{strategy => strategy(), % optional +%% intensity => non_neg_integer(), % optional +%% period => pos_integer()} % optional +%% child_spec() = #{id => child_id(), % mandatory +%% start => mfargs(), % mandatory +%% restart => restart(), % optional +%% shutdown => shutdown(), % optional +%% type => worker(), % optional +%% modules => modules()} % optional +init([]) -> + SupFlags = #{ + strategy => one_for_all, + intensity => 0, + period => 1 + }, + ChildSpecs = [], + {ok, {SupFlags, ChildSpecs}}. diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl new file mode 100644 index 000000000..f86dbc65d --- /dev/null +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl @@ -0,0 +1,819 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar_impl_producer_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import(emqx_common_test_helpers, [on_exit/1]). + +-define(BRIDGE_TYPE_BIN, <<"pulsar_producer">>). +-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_bridge_pulsar]). +-define(RULE_TOPIC, "mqtt/rule"). +-define(RULE_TOPIC_BIN, <>). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, plain}, + {group, tls} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + OnlyOnceTCs = only_once_tests(), + TCs = AllTCs -- OnlyOnceTCs, + [ + {plain, AllTCs}, + {tls, TCs} + ]. + +only_once_tests() -> + [t_create_via_http]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)), + _ = application:stop(emqx_connector), + ok. + +init_per_group(plain = Type, Config) -> + PulsarHost = os:getenv("PULSAR_PLAIN_HOST", "toxiproxy"), + PulsarPort = list_to_integer(os:getenv("PULSAR_PLAIN_PORT", "6652")), + ProxyName = "pulsar_plain", + case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {pulsar_host, PulsarHost}, + {pulsar_port, PulsarPort}, + {pulsar_type, Type}, + {use_tls, false} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_pulsar); + _ -> + {skip, no_pulsar} + end + end; +init_per_group(tls = Type, Config) -> + PulsarHost = os:getenv("PULSAR_TLS_HOST", "toxiproxy"), + PulsarPort = list_to_integer(os:getenv("PULSAR_TLS_PORT", "6653")), + ProxyName = "pulsar_tls", + case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {pulsar_host, PulsarHost}, + {pulsar_port, PulsarPort}, + {pulsar_type, Type}, + {use_tls, true} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_pulsar); + _ -> + {skip, no_pulsar} + end + end; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= plain +-> + common_end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +common_init_per_group() -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + application:load(emqx_bridge), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps(?APPS), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>, + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {mqtt_topic, MQTTTopic} + ]. + +common_end_per_group(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + stop_consumer(Config), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +common_init_per_testcase(TestCase, Config0) -> + ct:timetrap(timer:seconds(60)), + delete_all_bridges(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + PulsarTopic = + << + (atom_to_binary(TestCase))/binary, + UniqueNum/binary + >>, + PulsarType = ?config(pulsar_type, Config0), + Config1 = [{pulsar_topic, PulsarTopic} | Config0], + {Name, ConfigString, PulsarConfig} = pulsar_config( + TestCase, PulsarType, Config1 + ), + ConsumerConfig = start_consumer(TestCase, Config1), + Config = ConsumerConfig ++ Config1, + ok = snabbkaffe:start_trace(), + [ + {pulsar_name, Name}, + {pulsar_config_string, ConfigString}, + {pulsar_config, PulsarConfig} + | Config + ]. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +pulsar_config(TestCase, _PulsarType, Config) -> + UniqueNum = integer_to_binary(erlang:unique_integer()), + PulsarHost = ?config(pulsar_host, Config), + PulsarPort = ?config(pulsar_port, Config), + PulsarTopic = ?config(pulsar_topic, Config), + AuthType = proplists:get_value(sasl_auth_mechanism, Config, none), + UseTLS = proplists:get_value(use_tls, Config, false), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + MQTTTopic = proplists:get_value(mqtt_topic, Config, <<"mqtt/topic/", UniqueNum/binary>>), + Prefix = + case UseTLS of + true -> <<"pulsar+ssl://">>; + false -> <<"pulsar://">> + end, + ServerURL = iolist_to_binary([ + Prefix, + PulsarHost, + ":", + integer_to_binary(PulsarPort) + ]), + ConfigString = + io_lib:format( + "bridges.pulsar_producer.~s {\n" + " enable = true\n" + " servers = \"~s\"\n" + " sync_timeout = 5s\n" + " compression = no_compression\n" + " send_buffer = 1MB\n" + " retention_period = infinity\n" + " max_batch_bytes = 900KB\n" + " batch_size = 1\n" + " strategy = random\n" + " buffer {\n" + " mode = memory\n" + " per_partition_limit = 10MB\n" + " segment_bytes = 5MB\n" + " memory_overload_protection = true\n" + " }\n" + " message {\n" + " key = \"${.clientid}\"\n" + " value = \"${.}\"\n" + " }\n" + "~s" + " ssl {\n" + " enable = ~p\n" + " verify = verify_none\n" + " server_name_indication = \"auto\"\n" + " }\n" + " pulsar_topic = \"~s\"\n" + " local_topic = \"~s\"\n" + "}\n", + [ + Name, + ServerURL, + authentication(AuthType), + UseTLS, + PulsarTopic, + MQTTTopic + ] + ), + {Name, ConfigString, parse_and_check(ConfigString, Name)}. + +parse_and_check(ConfigString, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = ?BRIDGE_TYPE_BIN, + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +authentication(_) -> + " authentication = none\n". + +resource_id(Config) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + emqx_bridge_resource:resource_id(Type, Name). + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + emqx_bridge:create(Type, Name, PulsarConfig). + +create_bridge_api(Config) -> + create_bridge_api(Config, _Overrides = #{}). + +create_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + Params = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("creating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("bridge create result: ~p", [Res]), + Res. + +update_bridge_api(Config) -> + update_bridge_api(Config, _Overrides = #{}). + +update_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + BridgeId = emqx_bridge_resource:bridge_id(TypeBin, Name), + Params = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("updating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of + {ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge update result: ~p", [Res]), + Res. + +probe_bridge_api(Config) -> + probe_bridge_api(Config, _Overrides = #{}). + +probe_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig = ?config(pulsar_config, Config), + Params0 = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Params = maps:merge(Params0, Overrides), + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("probing bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0}; + Error -> Error + end, + ct:pal("bridge probe result: ~p", [Res]), + Res. + +start_consumer(TestCase, Config) -> + PulsarHost = ?config(pulsar_host, Config), + PulsarPort = ?config(pulsar_port, Config), + PulsarTopic = ?config(pulsar_topic, Config), + UseTLS = ?config(use_tls, Config), + %% FIXME: patch pulsar to accept binary urls... + Scheme = + case UseTLS of + true -> <<"pulsar+ssl://">>; + false -> <<"pulsar://">> + end, + URL = + binary_to_list( + <> + ), + ConnOpts = #{}, + ConsumerClientId = TestCase, + CertsPath = emqx_common_test_helpers:deps_path(emqx, "etc/certs"), + SSLOpts = #{ + enable => UseTLS, + keyfile => filename:join([CertsPath, "key.pem"]), + certfile => filename:join([CertsPath, "cert.pem"]), + cacertfile => filename:join([CertsPath, "cacert.pem"]) + }, + {ok, _ClientPid} = pulsar:ensure_supervised_client( + ConsumerClientId, + [URL], + #{ + conn_opts => ConnOpts, + ssl_opts => emqx_tls_lib:to_client_opts(SSLOpts) + } + ), + ConsumerOpts = #{ + cb_init_args => #{send_to => self()}, + cb_module => pulsar_echo_consumer, + sub_type => 'Shared', + subscription => atom_to_list(TestCase), + max_consumer_num => 1, + %% Note! This must not coincide with the client + %% id, or else weird bugs will happen, like the + %% consumer never starts... + name => test_consumer, + consumer_id => 1, + conn_opts => ConnOpts + }, + {ok, Consumer} = pulsar:ensure_supervised_consumers( + ConsumerClientId, + PulsarTopic, + ConsumerOpts + ), + %% since connection is async, and there's currently no way to + %% specify the subscription initial position as `Earliest', we + %% need to wait until the consumer is connected to avoid + %% flakiness. + ok = wait_until_consumer_connected(Consumer), + [ + {consumer_client_id, ConsumerClientId}, + {pulsar_consumer, Consumer} + ]. + +stop_consumer(Config) -> + ConsumerClientId = ?config(consumer_client_id, Config), + Consumer = ?config(pulsar_consumer, Config), + ok = pulsar:stop_and_delete_supervised_consumers(Consumer), + ok = pulsar:stop_and_delete_supervised_client(ConsumerClientId), + ok. + +wait_until_consumer_connected(Consumer) -> + ?retry( + _Sleep = 300, + _Attempts0 = 20, + true = pulsar_consumers:all_connected(Consumer) + ), + ok. + +wait_until_producer_connected() -> + wait_until_connected(pulsar_producers_sup, pulsar_producer). + +wait_until_connected(SupMod, Mod) -> + Pids = [ + P + || {_Name, SupPid, _Type, _Mods} <- supervisor:which_children(SupMod), + P <- element(2, process_info(SupPid, links)), + case proc_lib:initial_call(P) of + {Mod, init, _} -> true; + _ -> false + end + ], + ?retry( + _Sleep = 300, + _Attempts0 = 20, + lists:foreach(fun(P) -> {connected, _} = sys:get_state(P) end, Pids) + ), + ok. + +create_rule_and_action_http(Config) -> + PulsarName = ?config(pulsar_name, Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, PulsarName), + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"", ?RULE_TOPIC, "\"">>, + actions => [BridgeId] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ct:pal("rule action params: ~p", [Params]), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +receive_consumed(Timeout) -> + receive + {pulsar_message, #{payloads := Payloads}} -> + lists:map(fun try_decode_json/1, Payloads) + after Timeout -> + ct:pal("mailbox: ~p", [process_info(self(), messages)]), + ct:fail("no message consumed") + end. + +try_decode_json(Payload) -> + case emqx_utils_json:safe_decode(Payload, [return_maps]) of + {error, _} -> + Payload; + {ok, JSON} -> + JSON + end. + +cluster(Config) -> + PrivDataDir = ?config(priv_dir, Config), + PeerModule = + case os:getenv("IS_CI") of + false -> + slave; + _ -> + ct_slave + end, + Cluster = emqx_common_test_helpers:emqx_cluster( + [core, core], + [ + {apps, [emqx_conf, emqx_bridge, emqx_rule_engine, emqx_bridge_pulsar]}, + {listener_ports, []}, + {peer_mod, PeerModule}, + {priv_data_dir, PrivDataDir}, + {load_schema, true}, + {start_autocluster, true}, + {schema_mod, emqx_ee_conf_schema}, + {env_handler, fun + (emqx) -> + application:set_env(emqx, boot_modules, [broker, router]), + ok; + (emqx_conf) -> + ok; + (_) -> + ok + end} + ] + ), + ct:pal("cluster: ~p", [Cluster]), + Cluster. + +start_cluster(Cluster) -> + Nodes = + [ + emqx_common_test_helpers:start_slave(Name, Opts) + || {Name, Opts} <- Cluster + ], + on_exit(fun() -> + emqx_utils:pmap( + fun(N) -> + ct:pal("stopping ~p", [N]), + ok = emqx_common_test_helpers:stop_slave(N) + end, + Nodes + ) + end), + Nodes. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_start_and_produce_ok(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + QoS = 0, + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), + on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + %% Publish using local topic. + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + emqx:publish(Message0), + %% Publish using rule engine. + Message1 = emqx_message:make(ClientId, QoS, ?RULE_TOPIC_BIN, Payload), + emqx:publish(Message1), + + #{rule_id => RuleId} + end, + fun(#{rule_id := RuleId}, _Trace) -> + Data0 = receive_consumed(5_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + Data1 = receive_consumed(5_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := ?RULE_TOPIC_BIN + } + ], + Data1 + ), + ?retry( + _Sleep = 100, + _Attempts0 = 20, + begin + ?assertMatch( + #{ + counters := #{ + dropped := 0, + failed := 0, + late_reply := 0, + matched := 2, + received := 0, + retried := 0, + success := 2 + } + }, + emqx_resource_manager:get_metrics(ResourceId) + ), + ?assertEqual( + 1, emqx_metrics_worker:get(rule_metrics, RuleId, 'actions.success') + ), + ?assertEqual( + 0, emqx_metrics_worker:get(rule_metrics, RuleId, 'actions.failed') + ), + ok + end + ), + ok + end + ), + ok. + +%% Under normal operations, the bridge will be called async via +%% `simple_async_query'. +t_sync_query(Config) -> + ResourceId = resource_id(Config), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + Message = {send_message, #{payload => Payload}}, + ?assertMatch( + {ok, #{sequence_id := _}}, emqx_resource:simple_sync_query(ResourceId, Message) + ), + ok + end, + [] + ), + ok. + +t_create_via_http(Config) -> + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + + %% lightweight matrix testing some configs + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"buffer">> => + #{<<"mode">> => <<"disk">>} + } + ) + ), + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"buffer">> => + #{ + <<"mode">> => <<"hybrid">>, + <<"memory_overload_protection">> => true + } + } + ) + ), + ok + end, + [] + ), + ok. + +t_start_stop(Config) -> + PulsarName = ?config(pulsar_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + %% Check that the bridge probe API doesn't leak atoms. + redbug:start( + [ + "emqx_resource_manager:health_check_interval -> return", + "emqx_resource_manager:with_health_check -> return" + ], + [{msgs, 100}, {time, 30_000}] + ), + ProbeRes0 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + %% Now stop the bridge. + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_bridge:disable_enable(disable, ?BRIDGE_TYPE_BIN, PulsarName), + #{?snk_kind := pulsar_bridge_stopped}, + 5_000 + ) + ), + + ok + end, + fun(Trace) -> + %% one for each probe, one for real + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_producer_stopped, Trace)), + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_client_stopped, Trace)), + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_stopped, Trace)), + ok + end + ), + ok. + +t_on_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(500), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok. + +t_cluster(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + Cluster = cluster(Config), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + QoS = 0, + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + Nodes = [N1, N2 | _] = start_cluster(Cluster), + {ok, SRef0} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := pulsar_producer_bridge_started}), + length(Nodes), + 15_000 + ), + {ok, _} = erpc:call(N1, fun() -> create_bridge(Config) end), + {ok, _} = snabbkaffe:receive_events(SRef0), + lists:foreach( + fun(N) -> + ?retry( + _Sleep = 1_000, + _Attempts0 = 20, + ?assertEqual( + {ok, connected}, + erpc:call(N, emqx_resource_manager, health_check, [ResourceId]), + #{node => N} + ) + ) + end, + Nodes + ), + erpc:multicall(Nodes, fun wait_until_producer_connected/0), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + erpc:call(N2, emqx, publish, [Message0]), + + lists:foreach( + fun(N) -> + ?assertEqual( + {ok, connected}, + erpc:call(N, emqx_resource_manager, health_check, [ResourceId]), + #{node => N} + ) + end, + Nodes + ), + + ok + end, + fun(_Trace) -> + Data0 = receive_consumed(10_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end + ), + ok. diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl new file mode 100644 index 000000000..834978851 --- /dev/null +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl @@ -0,0 +1,25 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(pulsar_echo_consumer). + +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% pulsar consumer API +-export([init/2, handle_message/3]). + +init(Topic, Args) -> + ct:pal("consumer init: ~p", [#{topic => Topic, args => Args}]), + SendTo = maps:get(send_to, Args), + ?tp(pulsar_echo_consumer_init, #{topic => Topic}), + {ok, #{topic => Topic, send_to => SendTo}}. + +handle_message(Message, Payloads, State) -> + #{send_to := SendTo, topic := Topic} = State, + ct:pal( + "pulsar consumer received:\n ~p", + [#{message => Message, payloads => Payloads}] + ), + SendTo ! {pulsar_message, #{topic => Topic, message => Message, payloads => Payloads}}, + ?tp(pulsar_echo_consumer_message, #{topic => Topic, message => Message, payloads => Payloads}), + {ok, 'Individual', State}. diff --git a/apps/emqx_resource/src/emqx_resource_manager.erl b/apps/emqx_resource/src/emqx_resource_manager.erl index 877b35fff..f6a0ebebf 100644 --- a/apps/emqx_resource/src/emqx_resource_manager.erl +++ b/apps/emqx_resource/src/emqx_resource_manager.erl @@ -165,8 +165,13 @@ create(MgrId, ResId, Group, ResourceType, Config, Opts) -> create_dry_run(ResourceType, Config) -> ResId = make_test_id(), MgrId = set_new_owner(ResId), + Opts = + case is_map(Config) of + true -> maps:get(resource_opts, Config, #{}); + false -> #{} + end, ok = emqx_resource_manager_sup:ensure_child( - MgrId, ResId, <<"dry_run">>, ResourceType, Config, #{} + MgrId, ResId, <<"dry_run">>, ResourceType, Config, Opts ), case wait_for_ready(ResId, 5000) of ok -> diff --git a/changes/ee/feat-10378.en.md b/changes/ee/feat-10378.en.md new file mode 100644 index 000000000..ebdd299c8 --- /dev/null +++ b/changes/ee/feat-10378.en.md @@ -0,0 +1 @@ +Implement Pulsar Producer Bridge, which supports publishing messages to Pulsar from MQTT topics. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src index 7dc8882b3..5544825f8 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src @@ -9,7 +9,9 @@ telemetry, emqx_bridge_kafka, emqx_bridge_gcp_pubsub, - emqx_bridge_opents + emqx_bridge_cassandra, + emqx_bridge_opents, + emqx_bridge_pulsar ]}, {env, []}, {modules, []}, diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl index 4b83fda3f..38f471ca2 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl @@ -36,7 +36,8 @@ api_schemas(Method) -> ref(emqx_ee_bridge_dynamo, Method), ref(emqx_ee_bridge_rocketmq, Method), ref(emqx_ee_bridge_sqlserver, Method), - ref(emqx_bridge_opents, Method) + ref(emqx_bridge_opents, Method), + ref(emqx_bridge_pulsar, Method ++ "_producer") ]. schema_modules() -> @@ -57,7 +58,8 @@ schema_modules() -> emqx_ee_bridge_dynamo, emqx_ee_bridge_rocketmq, emqx_ee_bridge_sqlserver, - emqx_bridge_opents + emqx_bridge_opents, + emqx_bridge_pulsar ]. examples(Method) -> @@ -97,7 +99,8 @@ resource_type(clickhouse) -> emqx_ee_connector_clickhouse; resource_type(dynamo) -> emqx_ee_connector_dynamo; resource_type(rocketmq) -> emqx_ee_connector_rocketmq; resource_type(sqlserver) -> emqx_ee_connector_sqlserver; -resource_type(opents) -> emqx_bridge_opents_connector. +resource_type(opents) -> emqx_bridge_opents_connector; +resource_type(pulsar_producer) -> emqx_bridge_pulsar_impl_producer. fields(bridges) -> [ @@ -165,7 +168,8 @@ fields(bridges) -> required => false } )} - ] ++ kafka_structs() ++ mongodb_structs() ++ influxdb_structs() ++ redis_structs() ++ + ] ++ kafka_structs() ++ pulsar_structs() ++ mongodb_structs() ++ influxdb_structs() ++ + redis_structs() ++ pgsql_structs() ++ clickhouse_structs() ++ sqlserver_structs(). mongodb_structs() -> @@ -202,6 +206,18 @@ kafka_structs() -> )} ]. +pulsar_structs() -> + [ + {pulsar_producer, + mk( + hoconsc:map(name, ref(emqx_bridge_pulsar, pulsar_producer)), + #{ + desc => <<"Pulsar Producer Bridge Config">>, + required => false + } + )} + ]. + influxdb_structs() -> [ {Protocol, diff --git a/mix.exs b/mix.exs index e2230d55d..97fdd732a 100644 --- a/mix.exs +++ b/mix.exs @@ -169,7 +169,8 @@ defmodule EMQXUmbrella.MixProject do :emqx_bridge_redis, :emqx_bridge_rocketmq, :emqx_bridge_tdengine, - :emqx_bridge_timescale + :emqx_bridge_timescale, + :emqx_bridge_pulsar ]) end @@ -360,6 +361,7 @@ defmodule EMQXUmbrella.MixProject do emqx_ee_connector: :permanent, emqx_ee_bridge: :permanent, emqx_bridge_kafka: :permanent, + emqx_bridge_pulsar: :permanent, emqx_bridge_gcp_pubsub: :permanent, emqx_bridge_cassandra: :permanent, emqx_bridge_opents: :permanent, diff --git a/rebar.config.erl b/rebar.config.erl index 3c863046f..020285a44 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -454,6 +454,7 @@ relx_apps_per_edition(ee) -> emqx_ee_connector, emqx_ee_bridge, emqx_bridge_kafka, + emqx_bridge_pulsar, emqx_bridge_gcp_pubsub, emqx_bridge_cassandra, emqx_bridge_opents, diff --git a/rel/i18n/emqx_bridge_pulsar.hocon b/rel/i18n/emqx_bridge_pulsar.hocon new file mode 100644 index 000000000..1d2bb4599 --- /dev/null +++ b/rel/i18n/emqx_bridge_pulsar.hocon @@ -0,0 +1,176 @@ +emqx_bridge_pulsar { + auth_basic { + desc = "Parameters for basic authentication." + label = "Basic auth params" + } + + auth_basic_password { + desc = "Basic authentication password." + label = "Password" + } + + auth_basic_username { + desc = "Basic authentication username." + label = "Username" + } + + auth_token { + desc = "Parameters for token authentication." + label = "Token auth params" + } + + auth_token_jwt { + desc = "JWT authentication token." + label = "JWT" + } + + authentication { + desc = "Authentication configs." + label = "Authentication" + } + + buffer_memory_overload_protection { + desc = "Applicable when buffer mode is set to memory\n" + "EMQX will drop old buffered messages under high memory pressure." + " The high memory threshold is defined in config sysmon.os.sysmem_high_watermark." + " NOTE: This config only works on Linux." + label = "Memory Overload Protection" + } + + buffer_mode { + desc = "Message buffer mode.\n" + "\n" + "memory: Buffer all messages in memory. The messages will be lost" + " in case of EMQX node restart\ndisk: Buffer all messages on disk." + " The messages on disk are able to survive EMQX node restart.\n" + "hybrid: Buffer message in memory first, when up to certain limit" + " (see segment_bytes config for more information), then start offloading" + " messages to disk, Like memory mode, the messages will be lost in" + " case of EMQX node restart." + label = "Buffer Mode" + } + + buffer_per_partition_limit { + desc = "Number of bytes allowed to buffer for each Pulsar partition." + " When this limit is exceeded, old messages will be dropped in a trade for credits" + " for new messages to be buffered." + label = "Per-partition Buffer Limit" + } + + buffer_segment_bytes { + desc = "Applicable when buffer mode is set to disk or hybrid.\n" + "This value is to specify the size of each on-disk buffer file." + label = "Segment File Bytes" + } + + config_enable { + desc = "Enable (true) or disable (false) this Pulsar bridge." + label = "Enable or Disable" + } + + desc_name { + desc = "Bridge name, used as a human-readable description of the bridge." + label = "Bridge Name" + } + + desc_type { + desc = "The Bridge Type" + label = "Bridge Type" + } + + producer_batch_size { + desc = "Maximum number of individual requests to batch in a Pulsar message." + label = "Batch size" + } + + producer_buffer { + desc = "Configure producer message buffer.\n\n" + "Tell Pulsar producer how to buffer messages when EMQX has more messages to" + " send than Pulsar can keep up, or when Pulsar is down." + label = "Message Buffer" + } + + producer_compression { + desc = "Compression method." + label = "Compression" + } + + producer_key_template { + desc = "Template to render Pulsar message key." + label = "Message Key" + } + + producer_local_topic { + desc = "MQTT topic or topic filter as data source (bridge input)." + " If rule action is used as data source, this config should be left empty," + " otherwise messages will be duplicated in Pulsar." + label = "Source MQTT Topic" + } + + producer_max_batch_bytes { + desc = "Maximum bytes to collect in a Pulsar message batch. Most of the Pulsar brokers" + " default to a limit of 5 MB batch size. EMQX's default value is less than 5 MB in" + " order to compensate Pulsar message encoding overheads (especially when each individual" + " message is very small). When a single message is over the limit, it is still" + " sent (as a single element batch)." + label = "Max Batch Bytes" + } + + producer_message_opts { + desc = "Template to render a Pulsar message." + label = "Pulsar Message Template" + } + + producer_pulsar_message { + desc = "Template to render a Pulsar message." + label = "Pulsar Message Template" + } + + producer_pulsar_topic { + desc = "Pulsar topic name" + label = "Pulsar topic name" + } + + producer_retention_period { + desc = "The amount of time messages will be buffered while there is no connection to" + " the Pulsar broker. Longer times mean that more memory/disk will be used" + label = "Retention Period" + } + + producer_send_buffer { + desc = "Fine tune the socket send buffer. The default value is tuned for high throughput." + label = "Socket Send Buffer Size" + } + + producer_strategy { + desc = "Partition strategy is to tell the producer how to dispatch messages to Pulsar partitions.\n" + "\n" + "random: Randomly pick a partition for each message.\n" + "roundrobin: Pick each available producer in turn for each message.\n" + "first_key_dispatch: Hash Pulsar message key of the first message in a batch" + " to a partition number." + label = "Partition Strategy" + } + + producer_sync_timeout { + desc = "Maximum wait time for receiving a receipt from Pulsar when publishing synchronously." + label = "Sync publish timeout" + } + + producer_value_template { + desc = "Template to render Pulsar message value." + label = "Message Value" + } + + pulsar_producer_struct { + desc = "Configuration for a Pulsar bridge." + label = "Pulsar Bridge Configuration" + } + + servers { + desc = "A comma separated list of Pulsar URLs in the form scheme://host[:port]" + " for the client to connect to. The supported schemes are pulsar:// (default)" + " and pulsar+ssl://. The default port is 6650." + label = "Servers" + } +} diff --git a/rel/i18n/zh/emqx_bridge_pulsar.hocon b/rel/i18n/zh/emqx_bridge_pulsar.hocon new file mode 100644 index 000000000..3af8652a4 --- /dev/null +++ b/rel/i18n/zh/emqx_bridge_pulsar.hocon @@ -0,0 +1,173 @@ +emqx_bridge_pulsar { + + pulsar_producer_struct { + desc = "Pulsar 桥接配置" + label = "Pulsar 桥接配置" + } + + desc_type { + desc = "桥接类型" + label = "桥接类型" + } + + desc_name { + desc = "桥接名字,可读描述" + label = "桥接名字" + } + + config_enable { + desc = "启用(true)或停用该(false)Pulsar 数据桥接。" + label = "启用或停用" + } + + servers { + desc = "以scheme://host[:port]形式分隔的Pulsar URL列表," + "供客户端连接使用。支持的方案是 pulsar:// (默认)" + "和pulsar+ssl://。默认的端口是6650。" + label = "服务员" + } + + authentication { + desc = "认证参数。" + label = "认证" + } + + producer_batch_size { + desc = "在一个Pulsar消息中批处理的单个请求的最大数量。" + label = "批量大小" + } + + producer_compression { + desc = "压缩方法。" + label = "压缩" + } + + producer_send_buffer { + desc = "TCP socket 的发送缓存调优。默认值是针对高吞吐量的一个推荐值。" + label = "Socket 发送缓存大小" + } + + producer_sync_timeout { + desc = "同步发布时,从Pulsar接收发送回执的最长等待时间。" + label = "同步发布超时" + } + + auth_basic_username { + desc = "基本认证用户名。" + label = "用户名" + } + + auth_basic_password { + desc = "基本认证密码。" + label = "密码" + } + + auth_token_jwt { + desc = "JWT认证令牌。" + label = "JWT" + } + + producer_max_batch_bytes { + desc = "最大消息批量字节数。" + "大多数 Pulsar 环境的默认最低值是 5 MB,EMQX 的默认值比 5 MB 更小是因为需要" + "补偿 Pulsar 消息编码所需要的额外字节(尤其是当每条消息都很小的情况下)。" + "当单个消息的大小超过该限制时,它仍然会被发送,(相当于该批量中只有单个消息)。" + label = "最大批量字节数" + } + + producer_retention_period { + desc = "当没有连接到Pulsar代理时,信息将被缓冲的时间。 较长的时间意味着将使用更多的内存/磁盘" + label = "保留期" + } + + producer_local_topic { + desc = "MQTT 主题数据源由桥接指定,或留空由规则动作指定。" + label = "源 MQTT 主题" + } + + producer_pulsar_topic { + desc = "Pulsar 主题名称" + label = "Pulsar 主题名称" + } + + producer_strategy { + desc = "设置消息发布时应该如何选择 Pulsar 分区。\n\n" + "random: 为每个消息随机选择一个分区。\n" + "roundrobin: 依次为每条信息挑选可用的生产商。\n" + "first_key_dispatch: 将一批信息中的第一条信息的Pulsar信息密钥哈希到一个分区编号。" + label = "分区选择策略" + } + + producer_buffer { + desc = "配置消息缓存的相关参数。\n\n" + "当 EMQX 需要发送的消息超过 Pulsar 处理能力,或者当 Pulsar 临时下线时,EMQX 内部会将消息缓存起来。" + label = "消息缓存" + } + + buffer_mode { + desc = "消息缓存模式。\n" + "memory: 所有的消息都缓存在内存里。如果 EMQX 服务重启,缓存的消息会丢失。\n" + "disk: 缓存到磁盘上。EMQX 重启后会继续发送重启前未发送完成的消息。\n" + "hybrid: 先将消息缓存在内存中,当内存中的消息堆积超过一定限制" + "(配置项 segment_bytes 描述了该限制)后,后续的消息会缓存到磁盘上。" + "与 memory 模式一样,如果 EMQX 服务重启,缓存的消息会丢失。" + label = "缓存模式" + } + + buffer_per_partition_limit { + desc = "为每个 Pulsar 分区设置的最大缓存字节数。当超过这个上限之后,老的消息会被丢弃," + "为新的消息腾出空间。" + label = "Pulsar 分区缓存上限" + } + + buffer_segment_bytes { + desc = "当缓存模式是 diskhybrid 时适用。" + "该配置用于指定缓存到磁盘上的文件的大小。" + label = "缓存文件大小" + } + + buffer_memory_overload_protection { + desc = "缓存模式是 memoryhybrid 时适用。" + "当系统处于高内存压力时,从队列中丢弃旧的消息以减缓内存增长。" + "内存压力值由配置项 sysmon.os.sysmem_high_watermark 决定。" + "注意,该配置仅在 Linux 系统中有效。" + label = "内存过载保护" + } + + producer_message_opts { + desc = "用于生成 Pulsar 消息的模版。" + label = "Pulsar 消息模版" + } + + producer_key_template { + desc = "生成 Pulsar 消息 Key 的模版。" + label = "消息的 Key" + } + + producer_value_template { + desc = "生成 Pulsar 消息 Value 的模版。" + label = "消息的 Value" + } + + auth_basic { + desc = "基本认证的参数。" + label = "基本认证参数" + } + + auth_token { + desc = "令牌认证的参数。" + label = "Token auth params" + } + + producer_buffer { + desc = "配置消息缓存的相关参数。\n\n" + "当 EMQX 需要发送的消息超过 Pulsar 处理能力,或者当 Pulsar 临时下线时,EMQX 内部会将消息缓存起来。" + label = "消息缓存" + } + + producer_pulsar_message { + desc = "用于生成 Pulsar 消息的模版。" + label = "Pulsar 消息模版" + } + +} diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index c153669f4..307063e84 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -190,7 +190,10 @@ for dep in ${CT_DEPS}; do ;; opents) FILES+=( '.ci/docker-compose-file/docker-compose-opents.yaml' ) - ;; + ;; + pulsar) + FILES+=( '.ci/docker-compose-file/docker-compose-pulsar-tcp.yaml' ) + ;; *) echo "unknown_ct_dependency $dep" exit 1 From d06ce9c79da949da9a794d7f6413a20be2810dcc Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 18 Apr 2023 12:58:48 -0300 Subject: [PATCH 130/263] docs(license): change license contents after review https://github.com/emqx/emqx/pull/10378#discussion_r1170143535 --- LICENSE | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/LICENSE b/LICENSE index 68bb18ce3..8ff0a9060 100644 --- a/LICENSE +++ b/LICENSE @@ -1,12 +1,7 @@ Source code in this repository is variously licensed under below licenses. -For EMQX: Apache License 2.0, see APL.txt, -which applies to all source files except for lib-ee sub-directory. +For Default: Apache License 2.0, see APL.txt, +which applies to all source files except for folders applied with Business Source License. For EMQX Enterprise (since version 5.0): Business Source License 1.1, -see lib-ee/BSL.txt, which applies to source code in lib-ee -sub-directory and some of the apps under the apps directory. - -Source code under apps that uses BSL License: -- apps/emqx_bridge_kafka -- apps/emqx_bridge_pulsar +see apps/emqx_bridge_kafka/BSL.txt as an example, please check license files under sub directory of apps. From f4a3affd6fd9f98cb0aec107da9d76d0efbc4e08 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 18 Apr 2023 13:00:35 -0300 Subject: [PATCH 131/263] docs: change phrasing after review --- apps/emqx_bridge_kafka/README.md | 2 +- apps/emqx_bridge_pulsar/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/emqx_bridge_kafka/README.md b/apps/emqx_bridge_kafka/README.md index 80978ff10..f1b0d1f9a 100644 --- a/apps/emqx_bridge_kafka/README.md +++ b/apps/emqx_bridge_kafka/README.md @@ -27,4 +27,4 @@ Please see our [contributing.md](../../CONTRIBUTING.md). # License -See [BSL](./BSL.txt). +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_pulsar/README.md b/apps/emqx_bridge_pulsar/README.md index 09e17d8bb..fbd8bf81d 100644 --- a/apps/emqx_bridge_pulsar/README.md +++ b/apps/emqx_bridge_pulsar/README.md @@ -27,4 +27,4 @@ Please see our [contributing.md](../../CONTRIBUTING.md). # License -See [BSL](./BSL.txt). +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). From 180b6acd9e8d12076be8b1d9424150248db01876 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 18 Apr 2023 13:03:06 -0300 Subject: [PATCH 132/263] docs: remove auto-generated comment --- apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl | 9 --------- 1 file changed, 9 deletions(-) diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl index 17121beab..ad7e2ae6f 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl @@ -14,15 +14,6 @@ start_link() -> supervisor:start_link({local, ?SERVER}, ?MODULE, []). -%% sup_flags() = #{strategy => strategy(), % optional -%% intensity => non_neg_integer(), % optional -%% period => pos_integer()} % optional -%% child_spec() = #{id => child_id(), % mandatory -%% start => mfargs(), % mandatory -%% restart => restart(), % optional -%% shutdown => shutdown(), % optional -%% type => worker(), % optional -%% modules => modules()} % optional init([]) -> SupFlags = #{ strategy => one_for_all, From 1e8dd70a11f68505a01f6f61df7906a87429657c Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 19 Apr 2023 09:22:25 -0300 Subject: [PATCH 133/263] chore: fix error message and rename variable --- .../src/emqx_bridge_pulsar_impl_producer.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl index 4bc390b91..72363389e 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl @@ -92,12 +92,12 @@ on_start(InstanceId, Config) -> instance_id => InstanceId, pulsar_hosts => Servers }); - {error, Error} -> + {error, Reason} -> ?SLOG(error, #{ - msg => "failed_to_start_kafka_client", + msg => "failed_to_start_pulsar_client", instance_id => InstanceId, pulsar_hosts => Servers, - reason => Error + reason => Reason }), throw(failed_to_start_pulsar_client) end, From 120d3e70ea0f38cf92b92ba7e603665c7ee9a546 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 19 Apr 2023 09:28:58 -0300 Subject: [PATCH 134/263] chore: bump app vsns --- apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src index a4fbe5673..e5680cfc4 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_kafka, [ {description, "EMQX Enterprise Kafka Bridge"}, - {vsn, "0.1.0"}, + {vsn, "0.1.1"}, {registered, [emqx_bridge_kafka_consumer_sup]}, {applications, [ kernel, From 4af6e3eb6e44fde5e243f5c0320d53c99c38d823 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 19 Apr 2023 13:47:19 -0300 Subject: [PATCH 135/263] refactor: rm unused modules --- .../src/emqx_bridge_pulsar.app.src | 1 - .../src/emqx_bridge_pulsar_app.erl | 14 ----------- .../src/emqx_bridge_pulsar_sup.erl | 24 ------------------- 3 files changed, 39 deletions(-) delete mode 100644 apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_app.erl delete mode 100644 apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src index cd89f6867..ead7cb715 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src @@ -2,7 +2,6 @@ {description, "EMQX Pulsar Bridge"}, {vsn, "0.1.0"}, {registered, []}, - {mod, {emqx_bridge_pulsar_app, []}}, {applications, [ kernel, stdlib, diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_app.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_app.erl deleted file mode 100644 index bedf42cf6..000000000 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_app.erl +++ /dev/null @@ -1,14 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%%-------------------------------------------------------------------- --module(emqx_bridge_pulsar_app). - --behaviour(application). - --export([start/2, stop/1]). - -start(_StartType, _StartArgs) -> - emqx_bridge_pulsar_sup:start_link(). - -stop(_State) -> - ok. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl deleted file mode 100644 index ad7e2ae6f..000000000 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_sup.erl +++ /dev/null @@ -1,24 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%%-------------------------------------------------------------------- --module(emqx_bridge_pulsar_sup). - --behaviour(supervisor). - --export([start_link/0]). - --export([init/1]). - --define(SERVER, ?MODULE). - -start_link() -> - supervisor:start_link({local, ?SERVER}, ?MODULE, []). - -init([]) -> - SupFlags = #{ - strategy => one_for_all, - intensity => 0, - period => 1 - }, - ChildSpecs = [], - {ok, {SupFlags, ChildSpecs}}. From 631863d8432ebc798d4171b819761cb01df54304 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 20 Apr 2023 10:54:58 -0300 Subject: [PATCH 136/263] docs: improve descriptions Co-authored-by: Zaiming (Stone) Shi --- rel/i18n/emqx_bridge_pulsar.hocon | 2 +- rel/i18n/zh/emqx_bridge_pulsar.hocon | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/rel/i18n/emqx_bridge_pulsar.hocon b/rel/i18n/emqx_bridge_pulsar.hocon index 1d2bb4599..205e0271e 100644 --- a/rel/i18n/emqx_bridge_pulsar.hocon +++ b/rel/i18n/emqx_bridge_pulsar.hocon @@ -147,7 +147,7 @@ emqx_bridge_pulsar { "\n" "random: Randomly pick a partition for each message.\n" "roundrobin: Pick each available producer in turn for each message.\n" - "first_key_dispatch: Hash Pulsar message key of the first message in a batch" + "key_dispatch: Hash Pulsar message key of the first message in a batch" " to a partition number." label = "Partition Strategy" } diff --git a/rel/i18n/zh/emqx_bridge_pulsar.hocon b/rel/i18n/zh/emqx_bridge_pulsar.hocon index 3af8652a4..23643060b 100644 --- a/rel/i18n/zh/emqx_bridge_pulsar.hocon +++ b/rel/i18n/zh/emqx_bridge_pulsar.hocon @@ -16,13 +16,13 @@ emqx_bridge_pulsar { } config_enable { - desc = "启用(true)或停用该(false)Pulsar 数据桥接。" + desc = "启用(true)或停用(false)该 Pulsar 数据桥接。" label = "启用或停用" } servers { - desc = "以scheme://host[:port]形式分隔的Pulsar URL列表," - "供客户端连接使用。支持的方案是 pulsar:// (默认)" + desc = "以逗号分隔的 scheme://host[:port] 格式的 Pulsar URL 列表," + "支持的 scheme 有 pulsar:// (默认)" "和pulsar+ssl://。默认的端口是6650。" label = "服务员" } @@ -94,7 +94,7 @@ emqx_bridge_pulsar { desc = "设置消息发布时应该如何选择 Pulsar 分区。\n\n" "random: 为每个消息随机选择一个分区。\n" "roundrobin: 依次为每条信息挑选可用的生产商。\n" - "first_key_dispatch: 将一批信息中的第一条信息的Pulsar信息密钥哈希到一个分区编号。" + "key_dispatch: 将一批信息中的第一条信息的Pulsar信息密钥哈希到一个分区编号。" label = "分区选择策略" } From 4f2262129b338a1158774c603351ccdd5ded105c Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 20 Apr 2023 11:00:48 -0300 Subject: [PATCH 137/263] refactor: rename `{first_,}key_dispatch` partition strategy option --- apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl | 2 +- .../src/emqx_bridge_pulsar_impl_producer.erl | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl index a3e50054e..18faf0e3b 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl @@ -83,7 +83,7 @@ fields(producer_opts) -> {pulsar_topic, mk(binary(), #{required => true, desc => ?DESC("producer_pulsar_topic")})}, {strategy, mk( - hoconsc:enum([random, roundrobin, first_key_dispatch]), + hoconsc:enum([random, roundrobin, key_dispatch]), #{default => random, desc => ?DESC("producer_strategy")} )}, {buffer, mk(ref(producer_buffer), #{required => false, desc => ?DESC("producer_buffer")})}, diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl index 72363389e..b86124417 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl @@ -28,7 +28,7 @@ }. -type buffer_mode() :: memory | disk | hybrid. -type compression_mode() :: no_compression | snappy | zlib. --type partition_strategy() :: random | roundrobin | first_key_dispatch. +-type partition_strategy() :: random | roundrobin | key_dispatch. -type message_template_raw() :: #{ key := binary(), value := binary() @@ -290,7 +290,7 @@ start_producer(Config, InstanceId, ClientId, ClientOpts) -> name => ProducerName, retention_period => RetentionPeriod, ssl_opts => SSLOpts, - strategy => Strategy, + strategy => partition_strategy(Strategy), tcp_opts => [{sndbuf, SendBuffer}] }, ProducerOpts = maps:merge(ReplayQOpts, ProducerOpts0), @@ -394,3 +394,6 @@ get_producer_status(Producers) -> true -> connected; false -> connecting end. + +partition_strategy(key_dispatch) -> first_key_dispatch; +partition_strategy(Strategy) -> Strategy. From cb149ac3458b6048f3559172a1ebeb5801b75000 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 20 Apr 2023 11:02:19 -0300 Subject: [PATCH 138/263] docs: improve descriptions Co-authored-by: Zaiming (Stone) Shi --- rel/i18n/emqx_bridge_pulsar.hocon | 1 - 1 file changed, 1 deletion(-) diff --git a/rel/i18n/emqx_bridge_pulsar.hocon b/rel/i18n/emqx_bridge_pulsar.hocon index 205e0271e..92294bb75 100644 --- a/rel/i18n/emqx_bridge_pulsar.hocon +++ b/rel/i18n/emqx_bridge_pulsar.hocon @@ -39,7 +39,6 @@ emqx_bridge_pulsar { buffer_mode { desc = "Message buffer mode.\n" - "\n" "memory: Buffer all messages in memory. The messages will be lost" " in case of EMQX node restart\ndisk: Buffer all messages on disk." " The messages on disk are able to survive EMQX node restart.\n" From 4aad5c74545a4bb75df9c771fb32e21327a273d4 Mon Sep 17 00:00:00 2001 From: JianBo He Date: Mon, 24 Apr 2023 21:44:21 +0800 Subject: [PATCH 139/263] chore: improve changes --- changes/ce/feat-10457.en.md | 4 ++-- changes/ce/fix-10420.zh.md | 0 2 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 changes/ce/fix-10420.zh.md diff --git a/changes/ce/feat-10457.en.md b/changes/ce/feat-10457.en.md index d6a44bd53..a11e9b424 100644 --- a/changes/ce/feat-10457.en.md +++ b/changes/ce/feat-10457.en.md @@ -1,4 +1,4 @@ Deprecates the integration with StatsD. -Since StatsD is not used a lot. So we will deprecate it in the next release -and plan to remove it in 5.1 +There seemd to be no user using StatsD integration, so we have decided to hide this feature +for now. We will either remove it based on requirements in the future. diff --git a/changes/ce/fix-10420.zh.md b/changes/ce/fix-10420.zh.md deleted file mode 100644 index e69de29bb..000000000 From 377b1433254c1f047ca689733169f4103ebf1577 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 24 Apr 2023 11:01:33 -0300 Subject: [PATCH 140/263] refactor: split `parse_server` into smaller functions, improve return type to use map --- apps/emqx/src/emqx_schema.erl | 173 ++++++++++++------ apps/emqx/test/emqx_schema_tests.erl | 74 +++++--- .../src/emqx_bridge_cassandra_connector.erl | 11 +- .../emqx_bridge_cassandra_connector_SUITE.erl | 11 +- .../src/emqx_bridge_gcp_pubsub_connector.erl | 2 +- .../src/emqx_bridge_pulsar_impl_producer.erl | 2 +- .../src/emqx_connector_ldap.erl | 12 +- .../src/emqx_connector_mongo.erl | 7 +- .../src/emqx_connector_mysql.erl | 2 +- .../src/emqx_connector_pgsql.erl | 2 +- .../src/emqx_connector_redis.erl | 8 +- .../src/mqtt/emqx_connector_mqtt_schema.erl | 3 +- apps/emqx_statsd/src/emqx_statsd.erl | 2 +- .../test/emqx_ee_bridge_redis_SUITE.erl | 11 +- .../src/emqx_ee_connector_dynamo.erl | 2 +- .../src/emqx_ee_connector_influxdb.erl | 2 +- .../src/emqx_ee_connector_rocketmq.erl | 2 +- .../src/emqx_ee_connector_sqlserver.erl | 2 +- .../src/emqx_ee_connector_tdengine.erl | 2 +- 19 files changed, 220 insertions(+), 110 deletions(-) diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 248fdad7f..69f234e47 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -66,6 +66,12 @@ -typerefl_from_string({url/0, emqx_schema, to_url}). -typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}). +-type parsed_server() :: #{ + hostname := string(), + port => port_number(), + scheme => string() +}. + -export([ validate_heap_size/1, user_lookup_fun_tr/2, @@ -2901,10 +2907,7 @@ servers_validator(Opts, Required) -> %% `no_port': by default it's `false', when set to `true', %% a `throw' exception is raised if the port is found. -spec parse_server(undefined | string() | binary(), server_parse_option()) -> - string() - | {string(), port_number()} - | {string(), string()} - | {string(), string(), port_number()}. + undefined | parsed_server(). parse_server(Str, Opts) -> case parse_servers(Str, Opts) of undefined -> @@ -2918,12 +2921,7 @@ parse_server(Str, Opts) -> %% @doc Parse comma separated `host[:port][,host[:port]]' endpoints %% into a list of `{Host, Port}' tuples or just `Host' string. -spec parse_servers(undefined | string() | binary(), server_parse_option()) -> - [ - string() - | {string(), port_number()} - | {string(), string()} - | {string(), string(), port_number()} - ]. + undefined | [parsed_server()]. parse_servers(undefined, _Opts) -> %% should not parse 'undefined' as string, %% not to throw exception either, @@ -2988,55 +2986,112 @@ do_parse_server(Str, Opts) -> ok end, %% do not split with space, there should be no space allowed between host and port - case string:tokens(Str, ":") of - [Scheme, "//" ++ Hostname, Port] -> - NotExpectingPort andalso throw("not_expecting_port_number"), - NotExpectingScheme andalso throw("not_expecting_scheme"), - {check_scheme(Scheme, Opts), check_hostname(Hostname), parse_port(Port)}; - [Scheme, "//" ++ Hostname] -> - NotExpectingScheme andalso throw("not_expecting_scheme"), - case is_integer(DefaultPort) of - true -> - {check_scheme(Scheme, Opts), check_hostname(Hostname), DefaultPort}; - false when NotExpectingPort -> - {check_scheme(Scheme, Opts), check_hostname(Hostname)}; - false -> - throw("missing_port_number") - end; - [Hostname, Port] -> - NotExpectingPort andalso throw("not_expecting_port_number"), - case is_list(DefaultScheme) of - false -> - {check_hostname(Hostname), parse_port(Port)}; - true -> - {DefaultScheme, check_hostname(Hostname), parse_port(Port)} - end; - [Hostname] -> - case is_integer(DefaultPort) orelse NotExpectingPort of - true -> - ok; - false -> - throw("missing_port_number") - end, - case is_list(DefaultScheme) orelse NotExpectingScheme of - true -> - ok; - false -> - throw("missing_scheme") - end, - case {is_integer(DefaultPort), is_list(DefaultScheme)} of - {true, true} -> - {DefaultScheme, check_hostname(Hostname), DefaultPort}; - {true, false} -> - {check_hostname(Hostname), DefaultPort}; - {false, true} -> - {DefaultScheme, check_hostname(Hostname)}; - {false, false} -> - check_hostname(Hostname) - end; - _ -> - throw("bad_host_port") - end. + Tokens = string:tokens(Str, ":"), + Context = #{ + not_expecting_port => NotExpectingPort, + not_expecting_scheme => NotExpectingScheme, + default_port => DefaultPort, + default_scheme => DefaultScheme, + opts => Opts + }, + check_server_parts(Tokens, Context). + +check_server_parts([Scheme, "//" ++ Hostname, Port], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + opts := Opts + } = Context, + NotExpectingPort andalso throw("not_expecting_port_number"), + NotExpectingScheme andalso throw("not_expecting_scheme"), + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname), + port => parse_port(Port) + }; +check_server_parts([Scheme, "//" ++ Hostname], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + default_port := DefaultPort, + opts := Opts + } = Context, + NotExpectingScheme andalso throw("not_expecting_scheme"), + case is_integer(DefaultPort) of + true -> + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname), + port => DefaultPort + }; + false when NotExpectingPort -> + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname) + }; + false -> + throw("missing_port_number") + end; +check_server_parts([Hostname, Port], Context) -> + #{ + not_expecting_port := NotExpectingPort, + default_scheme := DefaultScheme + } = Context, + NotExpectingPort andalso throw("not_expecting_port_number"), + case is_list(DefaultScheme) of + false -> + #{ + hostname => check_hostname(Hostname), + port => parse_port(Port) + }; + true -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname), + port => parse_port(Port) + } + end; +check_server_parts([Hostname], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + default_port := DefaultPort, + default_scheme := DefaultScheme + } = Context, + case is_integer(DefaultPort) orelse NotExpectingPort of + true -> + ok; + false -> + throw("missing_port_number") + end, + case is_list(DefaultScheme) orelse NotExpectingScheme of + true -> + ok; + false -> + throw("missing_scheme") + end, + case {is_integer(DefaultPort), is_list(DefaultScheme)} of + {true, true} -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname), + port => DefaultPort + }; + {true, false} -> + #{ + hostname => check_hostname(Hostname), + port => DefaultPort + }; + {false, true} -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname) + }; + {false, false} -> + #{hostname => check_hostname(Hostname)} + end; +check_server_parts(_Tokens, _Context) -> + throw("bad_host_port"). check_scheme(Str, Opts) -> SupportedSchemes = maps:get(supported_schemes, Opts, []), diff --git a/apps/emqx/test/emqx_schema_tests.erl b/apps/emqx/test/emqx_schema_tests.erl index c13dc8055..cb51aca46 100644 --- a/apps/emqx/test/emqx_schema_tests.erl +++ b/apps/emqx/test/emqx_schema_tests.erl @@ -219,112 +219,124 @@ parse_server_test_() -> ?T( "single server, binary, no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse(<<"localhost">>) ) ), ?T( "single server, string, no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse("localhost") ) ), ?T( "single server, list(string), no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse(["localhost"]) ) ), ?T( "single server, list(binary), no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse([<<"localhost">>]) ) ), ?T( "single server, binary, with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse(<<"localhost:9999">>) ) ), ?T( "single server, list(string), with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse(["localhost:9999"]) ) ), ?T( "single server, string, with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse("localhost:9999") ) ), ?T( "single server, list(binary), with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse([<<"localhost:9999">>]) ) ), ?T( "multiple servers, string, no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse("host1, host2") ) ), ?T( "multiple servers, binary, no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse(<<"host1, host2,,,">>) ) ), ?T( "multiple servers, list(string), no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse(["host1", "host2"]) ) ), ?T( "multiple servers, list(binary), no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse([<<"host1">>, <<"host2">>]) ) ), ?T( "multiple servers, string, with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse("host1:1234, host2:2345") ) ), ?T( "multiple servers, binary, with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse(<<"host1:1234, host2:2345, ">>) ) ), ?T( "multiple servers, list(string), with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse([" host1:1234 ", "host2:2345"]) ) ), ?T( "multiple servers, list(binary), with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse([<<"host1:1234">>, <<"host2:2345">>]) ) ), @@ -352,7 +364,7 @@ parse_server_test_() -> ?T( "multiple servers without port, mixed list(binary|string)", ?assertEqual( - ["host1", "host2"], + [#{hostname => "host1"}, #{hostname => "host2"}], Parse2([<<"host1">>, "host2"], #{no_port => true}) ) ), @@ -394,14 +406,18 @@ parse_server_test_() -> ?T( "single server map", ?assertEqual( - [{"host1.domain", 1234}], + [#{hostname => "host1.domain", port => 1234}], HoconParse("host1.domain:1234") ) ), ?T( "multiple servers map", ?assertEqual( - [{"host1.domain", 1234}, {"host2.domain", 2345}, {"host3.domain", 3456}], + [ + #{hostname => "host1.domain", port => 1234}, + #{hostname => "host2.domain", port => 2345}, + #{hostname => "host3.domain", port => 3456} + ], HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456") ) ), @@ -451,7 +467,7 @@ parse_server_test_() -> ?T( "scheme, hostname and port", ?assertEqual( - {"pulsar+ssl", "host", 6651}, + #{scheme => "pulsar+ssl", hostname => "host", port => 6651}, emqx_schema:parse_server( "pulsar+ssl://host:6651", #{ @@ -464,7 +480,7 @@ parse_server_test_() -> ?T( "scheme and hostname, default port", ?assertEqual( - {"pulsar", "host", 6650}, + #{scheme => "pulsar", hostname => "host", port => 6650}, emqx_schema:parse_server( "pulsar://host", #{ @@ -477,7 +493,7 @@ parse_server_test_() -> ?T( "scheme and hostname, no port", ?assertEqual( - {"pulsar", "host"}, + #{scheme => "pulsar", hostname => "host"}, emqx_schema:parse_server( "pulsar://host", #{ @@ -503,7 +519,7 @@ parse_server_test_() -> ?T( "hostname, default scheme, no default port", ?assertEqual( - {"pulsar", "host"}, + #{scheme => "pulsar", hostname => "host"}, emqx_schema:parse_server( "host", #{ @@ -517,7 +533,7 @@ parse_server_test_() -> ?T( "hostname, default scheme, default port", ?assertEqual( - {"pulsar", "host", 6650}, + #{scheme => "pulsar", hostname => "host", port => 6650}, emqx_schema:parse_server( "host", #{ @@ -544,7 +560,7 @@ parse_server_test_() -> ?T( "hostname, default scheme, defined port", ?assertEqual( - {"pulsar", "host", 6651}, + #{scheme => "pulsar", hostname => "host", port => 6651}, emqx_schema:parse_server( "host:6651", #{ @@ -572,7 +588,7 @@ parse_server_test_() -> ?T( "hostname, default scheme, defined port", ?assertEqual( - {"pulsar", "host", 6651}, + #{scheme => "pulsar", hostname => "host", port => 6651}, emqx_schema:parse_server( "host:6651", #{ @@ -600,9 +616,9 @@ parse_server_test_() -> "multiple hostnames with schemes (1)", ?assertEqual( [ - {"pulsar", "host", 6649}, - {"pulsar+ssl", "other.host", 6651}, - {"pulsar", "yet.another", 6650} + #{scheme => "pulsar", hostname => "host", port => 6649}, + #{scheme => "pulsar+ssl", hostname => "other.host", port => 6651}, + #{scheme => "pulsar", hostname => "yet.another", port => 6650} ], emqx_schema:parse_servers( "pulsar://host:6649, pulsar+ssl://other.host:6651,pulsar://yet.another", diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl index cf6ddff9f..d0a1df7a8 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl @@ -92,7 +92,7 @@ callback_mode() -> async_if_possible. on_start( InstId, #{ - servers := Servers, + servers := Servers0, keyspace := Keyspace, username := Username, pool_size := PoolSize, @@ -104,9 +104,16 @@ on_start( connector => InstId, config => emqx_utils:redact(Config) }), + Servers = + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(Servers0, ?DEFAULT_SERVER_OPTION) + ), Options = [ - {nodes, emqx_schema:parse_servers(Servers, ?DEFAULT_SERVER_OPTION)}, + {nodes, Servers}, {username, Username}, {password, emqx_secret:wrap(maps:get(password, Config, ""))}, {keyspace, Keyspace}, diff --git a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl index f419283a8..452db33a7 100644 --- a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl @@ -38,9 +38,14 @@ groups() -> []. cassandra_servers() -> - emqx_schema:parse_servers( - iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]), - #{default_port => ?CASSANDRA_DEFAULT_PORT} + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers( + iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]), + #{default_port => ?CASSANDRA_DEFAULT_PORT} + ) ). init_per_suite(Config) -> diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl index a3f0ef36b..98f3e497d 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl @@ -81,7 +81,7 @@ on_start( %% emulating the emulator behavior %% https://cloud.google.com/pubsub/docs/emulator HostPort = os:getenv("PUBSUB_EMULATOR_HOST", "pubsub.googleapis.com:443"), - {Host, Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}), + #{hostname := Host, port := Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}), PoolType = random, Transport = tls, TransportOpts = emqx_tls_lib:to_client_opts(#{enable => true, verify => verify_none}), diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl index b86124417..0b195df66 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl @@ -180,7 +180,7 @@ to_bin(B) when is_binary(B) -> format_servers(Servers0) -> Servers1 = emqx_schema:parse_servers(Servers0, ?PULSAR_HOST_OPTIONS), lists:map( - fun({Scheme, Host, Port}) -> + fun(#{scheme := Scheme, hostname := Host, port := Port}) -> Scheme ++ "://" ++ Host ++ ":" ++ integer_to_list(Port) end, Servers1 diff --git a/apps/emqx_connector/src/emqx_connector_ldap.erl b/apps/emqx_connector/src/emqx_connector_ldap.erl index e2121de22..c3e1db7d3 100644 --- a/apps/emqx_connector/src/emqx_connector_ldap.erl +++ b/apps/emqx_connector/src/emqx_connector_ldap.erl @@ -67,7 +67,17 @@ on_start( connector => InstId, config => emqx_utils:redact(Config) }), - Servers = emqx_schema:parse_servers(Servers0, ?LDAP_HOST_OPTIONS), + Servers1 = emqx_schema:parse_servers(Servers0, ?LDAP_HOST_OPTIONS), + Servers = + lists:map( + fun + (#{hostname := Host, port := Port0}) -> + {Host, Port0}; + (#{hostname := Host}) -> + Host + end, + Servers1 + ), SslOpts = case maps:get(enable, SSL) of true -> diff --git a/apps/emqx_connector/src/emqx_connector_mongo.erl b/apps/emqx_connector/src/emqx_connector_mongo.erl index a65a32842..dde8652f0 100644 --- a/apps/emqx_connector/src/emqx_connector_mongo.erl +++ b/apps/emqx_connector/src/emqx_connector_mongo.erl @@ -537,4 +537,9 @@ format_hosts(Hosts) -> lists:map(fun format_host/1, Hosts). parse_servers(HoconValue) -> - emqx_schema:parse_servers(HoconValue, ?MONGO_HOST_OPTIONS). + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(HoconValue, ?MONGO_HOST_OPTIONS) + ). diff --git a/apps/emqx_connector/src/emqx_connector_mysql.erl b/apps/emqx_connector/src/emqx_connector_mysql.erl index 45d459e70..b8c1250fe 100644 --- a/apps/emqx_connector/src/emqx_connector_mysql.erl +++ b/apps/emqx_connector/src/emqx_connector_mysql.erl @@ -98,7 +98,7 @@ on_start( ssl := SSL } = Config ) -> - {Host, Port} = emqx_schema:parse_server(Server, ?MYSQL_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?MYSQL_HOST_OPTIONS), ?SLOG(info, #{ msg => "starting_mysql_connector", connector => InstId, diff --git a/apps/emqx_connector/src/emqx_connector_pgsql.erl b/apps/emqx_connector/src/emqx_connector_pgsql.erl index ddbf9491d..3b2375d04 100644 --- a/apps/emqx_connector/src/emqx_connector_pgsql.erl +++ b/apps/emqx_connector/src/emqx_connector_pgsql.erl @@ -91,7 +91,7 @@ on_start( ssl := SSL } = Config ) -> - {Host, Port} = emqx_schema:parse_server(Server, ?PGSQL_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?PGSQL_HOST_OPTIONS), ?SLOG(info, #{ msg => "starting_postgresql_connector", connector => InstId, diff --git a/apps/emqx_connector/src/emqx_connector_redis.erl b/apps/emqx_connector/src/emqx_connector_redis.erl index e2155eb49..32ac77226 100644 --- a/apps/emqx_connector/src/emqx_connector_redis.erl +++ b/apps/emqx_connector/src/emqx_connector_redis.erl @@ -131,7 +131,13 @@ on_start( _ -> servers end, Servers0 = maps:get(ConfKey, Config), - Servers = [{servers, emqx_schema:parse_servers(Servers0, ?REDIS_HOST_OPTIONS)}], + Servers1 = lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(Servers0, ?REDIS_HOST_OPTIONS) + ), + Servers = [{servers, Servers1}], Database = case Type of cluster -> []; diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl index e08804685..2a40980af 100644 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl +++ b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl @@ -293,4 +293,5 @@ qos() -> hoconsc:union([emqx_schema:qos(), binary()]). parse_server(Str) -> - emqx_schema:parse_server(Str, ?MQTT_HOST_OPTS). + #{hostname := Host, port := Port} = emqx_schema:parse_server(Str, ?MQTT_HOST_OPTS), + {Host, Port}. diff --git a/apps/emqx_statsd/src/emqx_statsd.erl b/apps/emqx_statsd/src/emqx_statsd.erl index c5a7fc1c8..b2d726b07 100644 --- a/apps/emqx_statsd/src/emqx_statsd.erl +++ b/apps/emqx_statsd/src/emqx_statsd.erl @@ -80,7 +80,7 @@ init(Conf) -> flush_time_interval := FlushTimeInterval } = Conf, FlushTimeInterval1 = flush_interval(FlushTimeInterval, SampleTimeInterval), - {Host, Port} = emqx_schema:parse_server(Server, ?SERVER_PARSE_OPTS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?SERVER_PARSE_OPTS), Tags = maps:fold(fun(K, V, Acc) -> [{to_bin(K), to_bin(V)} | Acc] end, [], TagsRaw), Opts = [{tags, Tags}, {host, Host}, {port, Port}, {prefix, <<"emqx">>}], {ok, Pid} = estatsd:start_link(Opts), diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_redis_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_redis_SUITE.erl index f0b70d21b..56f932aba 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_redis_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_redis_SUITE.erl @@ -449,9 +449,14 @@ all_test_hosts() -> ). parse_servers(Servers) -> - emqx_schema:parse_servers(Servers, #{ - default_port => 6379 - }). + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(Servers, #{ + default_port => 6379 + }) + ). redis_connect_ssl_opts(Type) -> maps:merge( diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl index ebb86f577..f45f8ca2f 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl @@ -92,7 +92,7 @@ on_start( }), {Schema, Server} = get_host_schema(to_str(Url)), - {Host, Port} = emqx_schema:parse_server(Server, ?DYNAMO_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?DYNAMO_HOST_OPTIONS), Options = [ {config, #{ diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl index 700eb2a81..331577486 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl @@ -294,7 +294,7 @@ client_config( server := Server } ) -> - {Host, Port} = emqx_schema:parse_server(Server, ?INFLUXDB_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?INFLUXDB_HOST_OPTIONS), [ {host, str(Host)}, {port, Port}, diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_rocketmq.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_rocketmq.erl index 205359bb8..74fb4eedd 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_rocketmq.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_rocketmq.erl @@ -105,7 +105,7 @@ on_start( config => redact(Config1) }), Config = maps:merge(default_security_info(), Config1), - {Host, Port} = emqx_schema:parse_server(Server, ?ROCKETMQ_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?ROCKETMQ_HOST_OPTIONS), Server1 = [{Host, Port}], ClientId = client_id(InstanceId), diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl index 70bd76d14..8ea4429d0 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl @@ -355,7 +355,7 @@ conn_str([], Acc) -> conn_str([{driver, Driver} | Opts], Acc) -> conn_str(Opts, ["Driver=" ++ str(Driver) | Acc]); conn_str([{server, Server} | Opts], Acc) -> - {Host, Port} = emqx_schema:parse_server(Server, ?SQLSERVER_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?SQLSERVER_HOST_OPTIONS), conn_str(Opts, ["Server=" ++ str(Host) ++ "," ++ str(Port) | Acc]); conn_str([{database, Database} | Opts], Acc) -> conn_str(Opts, ["Database=" ++ str(Database) | Acc]); diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_tdengine.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_tdengine.erl index f9ca21ad7..09cbd8db8 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_tdengine.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_tdengine.erl @@ -96,7 +96,7 @@ on_start( config => emqx_utils:redact(Config) }), - {Host, Port} = emqx_schema:parse_server(Server, ?TD_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?TD_HOST_OPTIONS), Options = [ {host, to_bin(Host)}, {port, Port}, From 99f3965f4eb51d503ea63489448ee93c41f588a0 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 24 Apr 2023 14:24:42 -0300 Subject: [PATCH 141/263] feat(schema_registry): use rocksdb as table type for protobuf cache --- .../emqx_ee_schema_registry/src/emqx_ee_schema_registry.app.src | 2 +- lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.app.src b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.app.src index 87f6e53d0..aa43cf248 100644 --- a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.app.src +++ b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.app.src @@ -1,6 +1,6 @@ {application, emqx_ee_schema_registry, [ {description, "EMQX Schema Registry"}, - {vsn, "0.1.1"}, + {vsn, "0.1.2"}, {registered, [emqx_ee_schema_registry_sup]}, {mod, {emqx_ee_schema_registry_app, []}}, {applications, [ diff --git a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.erl b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.erl index 59a224fc7..b1453914b 100644 --- a/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.erl +++ b/lib-ee/emqx_ee_schema_registry/src/emqx_ee_schema_registry.erl @@ -179,7 +179,7 @@ create_tables() -> ok = mria:create_table(?PROTOBUF_CACHE_TAB, [ {type, set}, {rlog_shard, ?SCHEMA_REGISTRY_SHARD}, - {storage, disc_only_copies}, + {storage, rocksdb_copies}, {record_name, protobuf_cache}, {attributes, record_info(fields, protobuf_cache)} ]), From 99e892b5c4de941139b480c6de89c0dd62d90857 Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Mon, 24 Apr 2023 20:29:38 +0300 Subject: [PATCH 142/263] chore: bump ekka to 0.15.0 ekka 0.15.0 uses mria 0.5.0, which adds several fixes, enhancements and features: - protect `mria:join/1,2` with a global lock - implement new function `mria:sync_transaction/4,3,2`, which waits for a transaction replication to be ready on the local node (if the local node is a replicant) - optimize `mria:running_nodes/0` - optimize `mria:ro_transaction/2` when called on a replicant node. Fixes: EMQX-9588 (#10380), EMQX-9102, EMQX-9152, EMQX-9213 --- apps/emqx/rebar.config | 2 +- changes/ce/fix-10500.en.md | 12 ++++++++++++ mix.exs | 2 +- rebar.config | 2 +- 4 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 changes/ce/fix-10500.en.md diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 6788b4f40..21b2fd292 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -27,7 +27,7 @@ {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.0"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, diff --git a/changes/ce/fix-10500.en.md b/changes/ce/fix-10500.en.md new file mode 100644 index 000000000..730dfb6e5 --- /dev/null +++ b/changes/ce/fix-10500.en.md @@ -0,0 +1,12 @@ +Add several fixes, enhancements and features in Mria: + - protect `mria:join/1,2` with a global lock to prevent conflicts between + two nodes trying to join each other simultaneously + [Mria PR](https://github.com/emqx/mria/pull/137) + - implement new function `mria:sync_transaction/4,3,2`, which blocks the caller until + a transaction is imported to the local node (if the local node is a replicant, otherwise, + it behaves exactly the same as `mria:transaction/3,2`) + [Mria PR](https://github.com/emqx/mria/pull/136) + - optimize `mria:running_nodes/0` + [Mria PR](https://github.com/emqx/mria/pull/135) + - optimize `mria:ro_transaction/2` when called on a replicant node + [Mria PR](https://github.com/emqx/mria/pull/134). diff --git a/mix.exs b/mix.exs index e2230d55d..ca4faabcd 100644 --- a/mix.exs +++ b/mix.exs @@ -55,7 +55,7 @@ defmodule EMQXUmbrella.MixProject do {:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true}, {:esockd, github: "emqx/esockd", tag: "5.9.6", override: true}, {:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.7.2-emqx-9", override: true}, - {:ekka, github: "emqx/ekka", tag: "0.14.6", override: true}, + {:ekka, github: "emqx/ekka", tag: "0.15.0", override: true}, {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.7", override: true}, {:minirest, github: "emqx/minirest", tag: "1.3.8", override: true}, diff --git a/rebar.config b/rebar.config index de520f124..10adb3848 100644 --- a/rebar.config +++ b/rebar.config @@ -62,7 +62,7 @@ , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}} , {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}} , {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.7.2-emqx-9"}}} - , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}} + , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.0"}}} , {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}} , {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.7"}}} , {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.8"}}} From e9fde129131fc3e5f2bbc302cecccded0431b4f9 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 24 Apr 2023 15:19:33 -0300 Subject: [PATCH 143/263] test: attempt to fix flaky test Example failure: https://github.com/emqx/emqx/actions/runs/4789177314/jobs/8517116154#step:7:503 --- .../test/emqx_bridge_kafka_impl_consumer_SUITE.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl index 08fbf5e15..3d22c0698 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl @@ -1473,7 +1473,10 @@ do_t_receive_after_recovery(Config) -> ResourceId = resource_id(Config), ?check_trace( begin - {ok, _} = create_bridge(Config), + {ok, _} = create_bridge( + Config, + #{<<"kafka">> => #{<<"offset_reset_policy">> => <<"earliest">>}} + ), ping_until_healthy(Config, _Period = 1_500, _Timeout0 = 24_000), {ok, connected} = emqx_resource_manager:health_check(ResourceId), %% 0) ensure each partition commits its offset so it can From 09b17000c843a87c1e442701a26fb0a3becbed7d Mon Sep 17 00:00:00 2001 From: JianBo He Date: Tue, 25 Apr 2023 09:56:40 +0800 Subject: [PATCH 144/263] chore: update changes/ce/feat-10457.en.md Co-authored-by: Zaiming (Stone) Shi --- changes/ce/feat-10457.en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changes/ce/feat-10457.en.md b/changes/ce/feat-10457.en.md index a11e9b424..966569a1c 100644 --- a/changes/ce/feat-10457.en.md +++ b/changes/ce/feat-10457.en.md @@ -1,4 +1,4 @@ Deprecates the integration with StatsD. There seemd to be no user using StatsD integration, so we have decided to hide this feature -for now. We will either remove it based on requirements in the future. +for now. We will either remove or revive it based on requirements in the future. From 308056f0fc30b28a95a9c352b6167b8fc8f030b1 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Tue, 25 Apr 2023 18:08:34 +0800 Subject: [PATCH 145/263] feat: improved the storage format of Unicode characters in data files --- apps/emqx/rebar.config | 2 +- changes/ce/feat-10512.en.md | 3 +++ mix.exs | 2 +- rebar.config | 2 +- 4 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 changes/ce/feat-10512.en.md diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 21b2fd292..75be887ad 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -29,7 +29,7 @@ {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.0"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, - {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}}, + {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.3"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, diff --git a/changes/ce/feat-10512.en.md b/changes/ce/feat-10512.en.md new file mode 100644 index 000000000..e6c162742 --- /dev/null +++ b/changes/ce/feat-10512.en.md @@ -0,0 +1,3 @@ +Improved the storage format of Unicode characters in data files, +Now we can store Unicode characters normally. +For example: "SELECT * FROM \"t/1\" WHERE clientid = \"-测试专用-\"" diff --git a/mix.exs b/mix.exs index ca4faabcd..a05804a19 100644 --- a/mix.exs +++ b/mix.exs @@ -72,7 +72,7 @@ defmodule EMQXUmbrella.MixProject do # in conflict by emqtt and hocon {:getopt, "1.0.2", override: true}, {:snabbkaffe, github: "kafka4beam/snabbkaffe", tag: "1.0.7", override: true}, - {:hocon, github: "emqx/hocon", tag: "0.39.2", override: true}, + {:hocon, github: "emqx/hocon", tag: "0.39.3", override: true}, {:emqx_http_lib, github: "emqx/emqx_http_lib", tag: "0.5.2", override: true}, {:esasl, github: "emqx/esasl", tag: "0.2.0"}, {:jose, github: "potatosalad/erlang-jose", tag: "1.11.2"}, diff --git a/rebar.config b/rebar.config index 10adb3848..2b6f1b53c 100644 --- a/rebar.config +++ b/rebar.config @@ -75,7 +75,7 @@ , {system_monitor, {git, "https://github.com/ieQu1/system_monitor", {tag, "3.0.3"}}} , {getopt, "1.0.2"} , {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.7"}}} - , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}} + , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.3"}}} , {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}} , {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}} , {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}} From d6e46dcadb7ae38c110421b10acbb27a0be9dae9 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Tue, 25 Apr 2023 14:48:34 +0200 Subject: [PATCH 146/263] ci: Add a script to generate erlang_ls config --- scripts/gen-erlang-ls-config.sh | 116 ++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100755 scripts/gen-erlang-ls-config.sh diff --git a/scripts/gen-erlang-ls-config.sh b/scripts/gen-erlang-ls-config.sh new file mode 100755 index 000000000..0f5bcdecd --- /dev/null +++ b/scripts/gen-erlang-ls-config.sh @@ -0,0 +1,116 @@ +#!/bin/bash +set -eou pipefail +shopt -s nullglob + +# Our fork of rebar3 copies those directories from apps to +# _build/default/lib rather than just making a symlink. Now erlang_ls +# sees the same module twice and if you're just navigating through the +# call stack you accidentally end up editing files in +# _build/default/lib rather than apps + +usage() { + cat < Date: Tue, 25 Apr 2023 14:26:19 -0300 Subject: [PATCH 147/263] style: format rebar.config file --- apps/emqx_bridge_pulsar/rebar.config | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/apps/emqx_bridge_pulsar/rebar.config b/apps/emqx_bridge_pulsar/rebar.config index 3b9ae417d..be5f282df 100644 --- a/apps/emqx_bridge_pulsar/rebar.config +++ b/apps/emqx_bridge_pulsar/rebar.config @@ -1,13 +1,14 @@ %% -*- mode: erlang; -*- {erl_opts, [debug_info]}. -{deps, [ {pulsar, {git, "https://github.com/emqx/pulsar-client-erl.git", {tag, "0.8.0"}}} - , {emqx_connector, {path, "../../apps/emqx_connector"}} - , {emqx_resource, {path, "../../apps/emqx_resource"}} - , {emqx_bridge, {path, "../../apps/emqx_bridge"}} - ]}. +{deps, [ + {pulsar, {git, "https://github.com/emqx/pulsar-client-erl.git", {tag, "0.8.0"}}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. {shell, [ - % {config, "config/sys.config"}, + % {config, "config/sys.config"}, {apps, [emqx_bridge_pulsar]} ]}. From f69ebdcd1a551fe717e6bc2bf9cda4c9abcd59b1 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 25 Apr 2023 14:27:07 -0300 Subject: [PATCH 148/263] test(pulsar): teardown tls group --- .../test/emqx_bridge_pulsar_impl_producer_SUITE.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl index f86dbc65d..d254b01fc 100644 --- a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl @@ -99,7 +99,8 @@ init_per_group(_Group, Config) -> Config. end_per_group(Group, Config) when - Group =:= plain + Group =:= plain; + Group =:= tls -> common_end_per_group(Config), ok; From 56b884ab1776e5e864e14fd9c5835f575ae4ad0d Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 25 Apr 2023 14:28:00 -0300 Subject: [PATCH 149/263] style: change docker compose file name --- ...ocker-compose-pulsar-tcp.yaml => docker-compose-pulsar.yaml} | 0 scripts/ct/run.sh | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename .ci/docker-compose-file/{docker-compose-pulsar-tcp.yaml => docker-compose-pulsar.yaml} (100%) diff --git a/.ci/docker-compose-file/docker-compose-pulsar-tcp.yaml b/.ci/docker-compose-file/docker-compose-pulsar.yaml similarity index 100% rename from .ci/docker-compose-file/docker-compose-pulsar-tcp.yaml rename to .ci/docker-compose-file/docker-compose-pulsar.yaml diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index 307063e84..3a796821c 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -192,7 +192,7 @@ for dep in ${CT_DEPS}; do FILES+=( '.ci/docker-compose-file/docker-compose-opents.yaml' ) ;; pulsar) - FILES+=( '.ci/docker-compose-file/docker-compose-pulsar-tcp.yaml' ) + FILES+=( '.ci/docker-compose-file/docker-compose-pulsar.yaml' ) ;; *) echo "unknown_ct_dependency $dep" From b56a158a545d387f12942e5ebaf54b70059172d7 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 25 Apr 2023 14:29:40 -0300 Subject: [PATCH 150/263] fix(pulsar): fix function return typespec --- .../src/emqx_bridge_pulsar_impl_producer.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl index 0b195df66..2bd44d16a 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl @@ -137,7 +137,10 @@ on_get_status(_InstanceId, State) -> disconnected end. --spec on_query(manager_id(), {send_message, map()}, state()) -> ok | {error, timeout}. +-spec on_query(manager_id(), {send_message, map()}, state()) -> + {ok, term()} + | {error, timeout} + | {error, term()}. on_query(_InstanceId, {send_message, Message}, State) -> #{ producers := Producers, From 19b5ebff813513499b1bb09f5474a62c25825899 Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Tue, 25 Apr 2023 23:04:42 +0300 Subject: [PATCH 151/263] chore: bump ekka to 0.15.1 ekka 0.15.1 uses mria 0.5.2, which includes the following changes: - fix(mria_membership): call `mria_rlog:role/1` safely - feat: add extra field to ?rlog_sync table (for future use) --- apps/emqx/rebar.config | 2 +- changes/ce/fix-10518.en.md | 6 ++++++ mix.exs | 2 +- rebar.config | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) create mode 100644 changes/ce/fix-10518.en.md diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 21b2fd292..26ff4d1e2 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -27,7 +27,7 @@ {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.0"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, diff --git a/changes/ce/fix-10518.en.md b/changes/ce/fix-10518.en.md new file mode 100644 index 000000000..87d001e91 --- /dev/null +++ b/changes/ce/fix-10518.en.md @@ -0,0 +1,6 @@ +Add the following fixes and features in Mria: + - call `mria_rlog:role/1` safely in mria_membership to ensure that mria_membership + gen_server won't crash if RPC to another node fails + [Mria PR](https://github.com/emqx/mria/pull/139) + - Add extra field to ?rlog_sync table to facilitate extending this functionality in future + [Mria PR](https://github.com/emqx/mria/pull/138). diff --git a/mix.exs b/mix.exs index ca4faabcd..5836d5b19 100644 --- a/mix.exs +++ b/mix.exs @@ -55,7 +55,7 @@ defmodule EMQXUmbrella.MixProject do {:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true}, {:esockd, github: "emqx/esockd", tag: "5.9.6", override: true}, {:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.7.2-emqx-9", override: true}, - {:ekka, github: "emqx/ekka", tag: "0.15.0", override: true}, + {:ekka, github: "emqx/ekka", tag: "0.15.1", override: true}, {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.7", override: true}, {:minirest, github: "emqx/minirest", tag: "1.3.8", override: true}, diff --git a/rebar.config b/rebar.config index 10adb3848..b39d8a868 100644 --- a/rebar.config +++ b/rebar.config @@ -62,7 +62,7 @@ , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}} , {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}} , {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.7.2-emqx-9"}}} - , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.0"}}} + , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.1"}}} , {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}} , {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.7"}}} , {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.8"}}} From 687509886e8fab8439637ba5bf0a2a02cd619d81 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 25 Apr 2023 17:37:20 -0300 Subject: [PATCH 152/263] test: rm unused var warning --- lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl index 105f1fe75..e2006bc6d 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl @@ -425,7 +425,7 @@ t_mongo_date_rule_engine_functions(Config) -> RuleId = <<"rule:t_mongo_date_rule_engine_functions">>, emqx_rule_engine:delete_rule(RuleId), BridgeId = emqx_bridge_resource:bridge_id(Type, Name), - {ok, Rule} = emqx_rule_engine:create_rule( + {ok, _Rule} = emqx_rule_engine:create_rule( #{ id => <<"rule:t_mongo_date_rule_engine_functions">>, sql => SQL, From bc1bdae55d137cbd47a3b67570e47d7e72345320 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Wed, 26 Apr 2023 11:27:31 +0800 Subject: [PATCH 153/263] chore: reslove confilt for sync release-50 to master --- .../src/emqx_ee_connector_dynamo.erl | 12 ++++++++---- .../src/emqx_ee_connector_rocketmq.erl | 6 ++++-- .../src/emqx_ee_connector_sqlserver.erl | 5 ++--- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl index b6270b1b6..5eee882ce 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl @@ -80,8 +80,10 @@ on_start( config => redact(Config) }), - {Schema, Server} = get_host_schema(to_str(Url)), - #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?DYNAMO_HOST_OPTIONS), + {Schema, Server, DefaultPort} = get_host_info(to_str(Url)), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, #{ + default_port => DefaultPort + }), Options = [ {config, #{ @@ -142,8 +144,10 @@ on_batch_query_async(InstanceId, [{send_message, _} | _] = Query, ReplyCtx, Stat on_batch_query_async(_InstanceId, Query, _Reply, _State) -> {error, {unrecoverable_error, {invalid_request, Query}}}. -on_get_status(_InstanceId, #{pool_name := PoolName}) -> - Health = emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1), +on_get_status(_InstanceId, #{pool_name := Pool}) -> + Health = emqx_resource_pool:health_check_workers( + Pool, {emqx_ee_connector_dynamo_client, is_connected, []} + ), status_result(Health). status_result(_Status = true) -> connected; diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_rocketmq.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_rocketmq.erl index e831b4f2f..2e1730b52 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_rocketmq.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_rocketmq.erl @@ -96,8 +96,10 @@ on_start( connector => InstanceId, config => redact(Config) }), - Config = maps:merge(default_security_info(), Config1), - #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?ROCKETMQ_HOST_OPTIONS), + Servers = lists:map( + fun(#{hostname := Host, port := Port}) -> {Host, Port} end, + emqx_schema:parse_servers(BinServers, ?ROCKETMQ_HOST_OPTIONS) + ), ClientId = client_id(InstanceId), TopicTks = emqx_plugin_libs_rule:preproc_tmpl(Topic), diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl index 180d1271c..90d90cb36 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl @@ -304,11 +304,10 @@ on_batch_query_async(InstanceId, Requests, ReplyFunAndArgs, State) -> ), do_query(InstanceId, Requests, ?ASYNC_QUERY_MODE(ReplyFunAndArgs), State). -on_get_status(_InstanceId, #{pool_name := PoolName, resource_opts := ResourceOpts} = _State) -> - RequestTimeout = ?REQUEST_TIMEOUT(ResourceOpts), +on_get_status(_InstanceId, #{pool_name := PoolName} = _State) -> Health = emqx_resource_pool:health_check_workers( PoolName, - {?MODULE, do_get_status, []}, + {?MODULE, do_get_status, []} ), status_result(Health). From f0cd5c98c731e76e77816d45267ec76f5d33dac1 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Wed, 26 Apr 2023 13:57:15 +0800 Subject: [PATCH 154/263] chore: split i18n with script --- rel/i18n/emqx_ee_bridge_rocketmq.hocon | 12 +--- rel/i18n/emqx_ee_connector_rocketmq.hocon | 71 ++++++++++---------- rel/i18n/zh/emqx_ee_bridge_rocketmq.hocon | 5 +- rel/i18n/zh/emqx_ee_connector_rocketmq.hocon | 22 +++++- 4 files changed, 63 insertions(+), 47 deletions(-) diff --git a/rel/i18n/emqx_ee_bridge_rocketmq.hocon b/rel/i18n/emqx_ee_bridge_rocketmq.hocon index 4e4f8a99c..e079220b6 100644 --- a/rel/i18n/emqx_ee_bridge_rocketmq.hocon +++ b/rel/i18n/emqx_ee_bridge_rocketmq.hocon @@ -34,17 +34,11 @@ local_topic.label: template.desc: """Template, the default value is empty. When this value is empty the whole message will be stored in the RocketMQ.
-The template can be any valid string with placeholders, example:
-- ${id}, ${username}, ${clientid}, ${timestamp}
-- {\"id\" : ${id}, \"username\" : ${username}}""" + The template can be any valid string with placeholders, example:
+ - ${id}, ${username}, ${clientid}, ${timestamp}
+ - {"id" : ${id}, "username" : ${username}}""" template.label: """Template""" -config_enable.desc: -"""Enable or disable this bridge""" - -config_enable.label: -"""Enable Or Disable Bridge""" - } diff --git a/rel/i18n/emqx_ee_connector_rocketmq.hocon b/rel/i18n/emqx_ee_connector_rocketmq.hocon index 661cbe249..d3d59a389 100644 --- a/rel/i18n/emqx_ee_connector_rocketmq.hocon +++ b/rel/i18n/emqx_ee_connector_rocketmq.hocon @@ -1,52 +1,53 @@ emqx_ee_connector_rocketmq { +access_key.desc: +"""RocketMQ server `accessKey`.""" + +access_key.label: +"""AccessKey""" + +refresh_interval.desc: +"""RocketMQ Topic Route Refresh Interval.""" + +refresh_interval.label: +"""Topic Route Refresh Interval""" + +secret_key.desc: +"""RocketMQ server `secretKey`.""" + +secret_key.label: +"""SecretKey""" + +security_token.desc: +"""RocketMQ Server Security Token""" + +security_token.label: +"""Security Token""" + +send_buffer.desc: +"""The socket send buffer size of the RocketMQ driver client.""" + +send_buffer.label: +"""Send Buffer Size""" + servers.desc: """The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
The RocketMQ default port 9876 is used if `[:Port]` is not specified.""" -servers.label: +servers.label: """Server Host""" -topic.desc: -"""RocketMQ Topic""" - -topic.label: -"""RocketMQ Topic""" - -access_key.desc: -"""RocketMQ server `accessKey`.""" - -access_key.label: -"""AccessKey""" - -secret_key.desc: -"""RocketMQ server `secretKey`.""" -secret_key.label: -"""SecretKey""" - sync_timeout.desc: """Timeout of RocketMQ driver synchronous call.""" -sync_timeout.label: +sync_timeout.label: """Sync Timeout""" - -refresh_interval.desc: -"""RocketMQ Topic Route Refresh Interval.""" - -refresh_interval.label: -"""Topic Route Refresh Interval""" -send_buffer.desc: -"""The socket send buffer size of the RocketMQ driver client.""" +topic.desc: +"""RocketMQ Topic""" -send_buffer.label: -"""Send Buffer Size""" - -security_token.desc: -"""RocketMQ Server Security Token""" - -security_token.label: -"""Security Token""" +topic.label: +"""RocketMQ Topic""" } diff --git a/rel/i18n/zh/emqx_ee_bridge_rocketmq.hocon b/rel/i18n/zh/emqx_ee_bridge_rocketmq.hocon index 924004361..445a54232 100644 --- a/rel/i18n/zh/emqx_ee_bridge_rocketmq.hocon +++ b/rel/i18n/zh/emqx_ee_bridge_rocketmq.hocon @@ -32,7 +32,10 @@ local_topic.label: """本地 Topic""" template.desc: -"""模板, 默认为空,为空时将会将整个消息转发给 RocketMQ""" +"""模板, 默认为空,为空时将会将整个消息转发给 RocketMQ。
+ 模板可以是任意带有占位符的合法字符串, 例如:
+ - ${id}, ${username}, ${clientid}, ${timestamp}
+ - {"id" : ${id}, "username" : ${username}}""" template.label: """模板""" diff --git a/rel/i18n/zh/emqx_ee_connector_rocketmq.hocon b/rel/i18n/zh/emqx_ee_connector_rocketmq.hocon index d32e6ea01..58a1f7ddb 100644 --- a/rel/i18n/zh/emqx_ee_connector_rocketmq.hocon +++ b/rel/i18n/zh/emqx_ee_connector_rocketmq.hocon @@ -1,11 +1,23 @@ emqx_ee_connector_rocketmq { +access_key.desc: +"""RocketMQ 服务器的 `accessKey`。""" + +access_key.label: +"""AccessKey""" + refresh_interval.desc: """RocketMQ 主题路由更新间隔。""" refresh_interval.label: """主题路由更新间隔""" +secret_key.desc: +"""RocketMQ 服务器的 `secretKey`。""" + +secret_key.label: +"""SecretKey""" + security_token.desc: """RocketMQ 服务器安全令牌""" @@ -18,14 +30,20 @@ send_buffer.desc: send_buffer.label: """发送消息的缓冲区大小""" -server.desc: +servers.desc: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
主机名具有以下形式:`Host[:Port]`。
如果未指定 `[:Port]`,则使用 RocketMQ 默认端口 9876。""" -server.label: +servers.label: """服务器地址""" +sync_timeout.desc: +"""RocketMQ 驱动同步调用的超时时间。""" + +sync_timeout.label: +"""同步调用超时时间""" + topic.desc: """RocketMQ 主题""" From 5ed3c3a92cd78ecfd8feeafe953a84f840f219fb Mon Sep 17 00:00:00 2001 From: William Yang Date: Mon, 17 Apr 2023 14:18:19 +0200 Subject: [PATCH 155/263] perf(config): eliminate make_ref() calls in config get calls --- apps/emqx/src/emqx_config.erl | 20 +++++++++----------- apps/emqx_utils/src/emqx_utils.app.src | 2 +- apps/emqx_utils/src/emqx_utils_maps.erl | 9 ++++----- changes/ce/perf-10417.en.md | 4 ++++ 4 files changed, 18 insertions(+), 17 deletions(-) create mode 100644 changes/ce/perf-10417.en.md diff --git a/apps/emqx/src/emqx_config.erl b/apps/emqx/src/emqx_config.erl index 9561263ca..8ec8c11ab 100644 --- a/apps/emqx/src/emqx_config.erl +++ b/apps/emqx/src/emqx_config.erl @@ -103,6 +103,8 @@ -define(ZONE_CONF_PATH(ZONE, PATH), [zones, ZONE | PATH]). -define(LISTENER_CONF_PATH(TYPE, LISTENER, PATH), [listeners, TYPE, LISTENER | PATH]). +-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound'). + -export_type([ update_request/0, raw_config/0, @@ -164,9 +166,8 @@ get(KeyPath, Default) -> do_get(?CONF, KeyPath, Default). -spec find(emqx_utils_maps:config_key_path()) -> {ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}. find([]) -> - Ref = make_ref(), - case do_get(?CONF, [], Ref) of - Ref -> {not_found, []}; + case do_get(?CONF, [], ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> {not_found, []}; Res -> {ok, Res} end; find(KeyPath) -> @@ -179,9 +180,8 @@ find(KeyPath) -> -spec find_raw(emqx_utils_maps:config_key_path()) -> {ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}. find_raw([]) -> - Ref = make_ref(), - case do_get_raw([], Ref) of - Ref -> {not_found, []}; + case do_get_raw([], ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> {not_found, []}; Res -> {ok, Res} end; find_raw(KeyPath) -> @@ -666,11 +666,9 @@ do_get_raw(Path, Default) -> do_get(?RAW_CONF, Path, Default). do_get(Type, KeyPath) -> - Ref = make_ref(), - Res = do_get(Type, KeyPath, Ref), - case Res =:= Ref of - true -> error({config_not_found, KeyPath}); - false -> Res + case do_get(Type, KeyPath, ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, KeyPath}); + Res -> Res end. do_get(Type, [], Default) -> diff --git a/apps/emqx_utils/src/emqx_utils.app.src b/apps/emqx_utils/src/emqx_utils.app.src index eb6371411..dff55bc86 100644 --- a/apps/emqx_utils/src/emqx_utils.app.src +++ b/apps/emqx_utils/src/emqx_utils.app.src @@ -2,7 +2,7 @@ {application, emqx_utils, [ {description, "Miscellaneous utilities for EMQX apps"}, % strict semver, bump manually! - {vsn, "5.0.0"}, + {vsn, "5.0.1"}, {modules, [ emqx_utils, emqx_utils_api, diff --git a/apps/emqx_utils/src/emqx_utils_maps.erl b/apps/emqx_utils/src/emqx_utils_maps.erl index 6bec32ae3..d1c3ed649 100644 --- a/apps/emqx_utils/src/emqx_utils_maps.erl +++ b/apps/emqx_utils/src/emqx_utils_maps.erl @@ -41,14 +41,13 @@ -type config_key_path() :: [config_key()]. -type convert_fun() :: fun((...) -> {K1 :: any(), V1 :: any()} | drop). +-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound'). %%----------------------------------------------------------------- -spec deep_get(config_key_path(), map()) -> term(). deep_get(ConfKeyPath, Map) -> - Ref = make_ref(), - Res = deep_get(ConfKeyPath, Map, Ref), - case Res =:= Ref of - true -> error({config_not_found, ConfKeyPath}); - false -> Res + case deep_get(ConfKeyPath, Map, ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, ConfKeyPath}); + Res -> Res end. -spec deep_get(config_key_path(), map(), term()) -> term(). diff --git a/changes/ce/perf-10417.en.md b/changes/ce/perf-10417.en.md new file mode 100644 index 000000000..b9f1217c9 --- /dev/null +++ b/changes/ce/perf-10417.en.md @@ -0,0 +1,4 @@ +Improve get config performance + +eliminate make_ref calls + From 54c1a2b06d23ac38cdfab64f0ab03372dbc2d951 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Tue, 25 Apr 2023 17:21:36 +0200 Subject: [PATCH 156/263] ci: add performance test workflow --- .github/workflows/performance_test.yaml | 125 ++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 .github/workflows/performance_test.yaml diff --git a/.github/workflows/performance_test.yaml b/.github/workflows/performance_test.yaml new file mode 100644 index 000000000..e8a2a321e --- /dev/null +++ b/.github/workflows/performance_test.yaml @@ -0,0 +1,125 @@ +name: Performance Test Suite + +on: + push: + branches: + - 'perf/**' + schedule: + - cron: '0 1 * * *' + workflow_dispatch: + inputs: + ref: + required: false + +jobs: + prepare: + runs-on: ubuntu-latest + container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu20.04 + outputs: + BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }} + PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }} + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.event.inputs.ref }} + - id: prepare + run: | + echo "EMQX_NAME=emqx" >> $GITHUB_ENV + echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV + echo "BENCH_ID=$(date --utc +%F)/emqx-$(./pkg-vsn.sh emqx)" >> $GITHUB_OUTPUT + - name: Work around https://github.com/actions/checkout/issues/766 + run: | + git config --global --add safe.directory "$GITHUB_WORKSPACE" + - name: Build deb package + run: | + make ${EMQX_NAME}-pkg + ./scripts/pkg-tests.sh ${EMQX_NAME}-pkg + - name: Get package file name + id: package_file + run: | + echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT + - uses: actions/upload-artifact@v3 + with: + name: emqx-ubuntu20.04 + path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }} + + tf_emqx_perf_test: + runs-on: ubuntu-latest + needs: + - prepare + env: + TF_VAR_bench_id: ${{ needs.prepare.outputs.BENCH_ID }} + TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }} + TF_VAR_test_duration_seconds: 300 + TF_VAR_grafana_api_key: ${{ secrets.TF_EMQX_PERF_TEST_GRAFANA_API_KEY }} + TF_AWS_REGION: eu-north-1 + + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} + aws-region: eu-north-1 + - name: Checkout tf-emqx-performance-test + uses: actions/checkout@v3 + with: + repository: emqx/tf-emqx-performance-test + path: tf-emqx-performance-test + - uses: actions/download-artifact@v3 + with: + name: emqx-ubuntu20.04 + path: tf-emqx-performance-test/ + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_wrapper: false + - name: terraform init + working-directory: ./tf-emqx-performance-test + run: | + terraform init + - name: terraform apply + working-directory: ./tf-emqx-performance-test + run: | + terraform apply -auto-approve + - name: Wait for test results + working-directory: ./tf-emqx-performance-test + id: test-results + run: | + sleep $TF_VAR_test_duration_seconds + until aws s3api head-object --bucket tf-emqx-performance-test --key "$TF_VAR_bench_id/DONE" > /dev/null 2>&1 + do + echo 'waiting' + sleep 10 + done + aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/metrics.json" ./ + aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/stats.json" ./ + echo MESSAGES_DELIVERED=$(cat metrics.json | jq '[.[]."messages.delivered"] | add') >> $GITHUB_OUTPUT + echo MESSAGES_DROPPED=$(cat metrics.json | jq '[.[]."messages.dropped"] | add') >> $GITHUB_OUTPUT + - name: Send notification to Slack + if: success() + uses: slackapi/slack-github-action@v1.23.0 + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + with: + payload: | + {"text": "EMQX performance test completed.\nMessages delivered: ${{ steps.test-results.outputs.MESSAGES_DELIVERED }}.\nMessages dropped: ${{ steps.test-results.outputs.MESSAGES_DROPPED }}.\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} + - name: terraform destroy + if: always() + working-directory: ./tf-emqx-performance-test + run: | + terraform destroy -auto-approve + - uses: actions/upload-artifact@v3 + if: success() + with: + name: test-results + path: "./tf-emqx-performance-test/*.json" + - uses: actions/upload-artifact@v3 + if: always() + with: + name: terraform + path: | + ./tf-emqx-performance-test/.terraform + ./tf-emqx-performance-test/*.tfstate From abf150518c1e9188bc2ac287a69baabcbd15ac5f Mon Sep 17 00:00:00 2001 From: William Yang Date: Wed, 26 Apr 2023 12:16:16 +0200 Subject: [PATCH 157/263] fix(test): avoid port collision Use OS selected free port to avoid port collision among the test runs. --- apps/emqx/test/emqx_common_test_helpers.erl | 34 +++++++++++++++- apps/emqx/test/emqx_listeners_SUITE.erl | 39 ++++++++++++------- .../test/emqx_quic_multistreams_SUITE.erl | 13 +------ 3 files changed, 60 insertions(+), 26 deletions(-) diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl index 8603be879..ac03f4660 100644 --- a/apps/emqx/test/emqx_common_test_helpers.erl +++ b/apps/emqx/test/emqx_common_test_helpers.erl @@ -59,7 +59,8 @@ read_schema_configs/2, render_config_file/2, wait_for/4, - wait_mqtt_payload/1 + wait_mqtt_payload/1, + select_free_port/1 ]). -export([ @@ -1242,3 +1243,34 @@ get_or_spawn_janitor() -> on_exit(Fun) -> Janitor = get_or_spawn_janitor(), ok = emqx_test_janitor:push_on_exit_callback(Janitor, Fun). + +%%------------------------------------------------------------------------------- +%% Select a free transport port from the OS +%%------------------------------------------------------------------------------- +%% @doc get unused port from OS +-spec select_free_port(tcp | udp | ssl | quic) -> inets:port_number(). +select_free_port(tcp) -> + select_free_port(gen_tcp, listen); +select_free_port(udp) -> + select_free_port(gen_udp, open); +select_free_port(ssl) -> + select_free_port(tcp); +select_free_port(quic) -> + select_free_port(udp). + +select_free_port(GenModule, Fun) when + GenModule == gen_tcp orelse + GenModule == gen_udp +-> + {ok, S} = GenModule:Fun(0, [{reuseaddr, true}]), + {ok, Port} = inet:port(S), + ok = GenModule:close(S), + case os:type() of + {unix, darwin} -> + %% in MacOS, still get address_in_use after close port + timer:sleep(500); + _ -> + skip + end, + ct:pal("Select free OS port: ~p", [Port]), + Port. diff --git a/apps/emqx/test/emqx_listeners_SUITE.erl b/apps/emqx/test/emqx_listeners_SUITE.erl index 107f3d4e7..f0c18fa30 100644 --- a/apps/emqx/test/emqx_listeners_SUITE.erl +++ b/apps/emqx/test/emqx_listeners_SUITE.erl @@ -47,13 +47,14 @@ init_per_testcase(Case, Config) when Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp -> catch emqx_config_handler:stop(), + Port = emqx_common_test_helpers:select_free_port(tcp), {ok, _} = emqx_config_handler:start_link(), PrevListeners = emqx_config:get([listeners], #{}), PureListeners = remove_default_limiter(PrevListeners), PureListeners2 = PureListeners#{ tcp => #{ listener_test => #{ - bind => {"127.0.0.1", 9999}, + bind => {"127.0.0.1", Port}, max_connections => 4321, limiter => #{} } @@ -63,19 +64,20 @@ init_per_testcase(Case, Config) when ok = emqx_listeners:start(), [ - {prev_listener_conf, PrevListeners} + {prev_listener_conf, PrevListeners}, + {tcp_port, Port} | Config ]; init_per_testcase(t_wss_conn, Config) -> catch emqx_config_handler:stop(), + Port = emqx_common_test_helpers:select_free_port(ssl), {ok, _} = emqx_config_handler:start_link(), - PrevListeners = emqx_config:get([listeners], #{}), PureListeners = remove_default_limiter(PrevListeners), PureListeners2 = PureListeners#{ wss => #{ listener_test => #{ - bind => {{127, 0, 0, 1}, 9998}, + bind => {{127, 0, 0, 1}, Port}, limiter => #{}, ssl_options => #{ cacertfile => ?CERTS_PATH("cacert.pem"), @@ -89,7 +91,8 @@ init_per_testcase(t_wss_conn, Config) -> ok = emqx_listeners:start(), [ - {prev_listener_conf, PrevListeners} + {prev_listener_conf, PrevListeners}, + {wss_port, Port} | Config ]; init_per_testcase(_, Config) -> @@ -171,20 +174,30 @@ t_restart_listeners_with_hibernate_after_disabled(_Config) -> ok = emqx_listeners:stop(), emqx_config:put([listeners], OldLConf). -t_max_conns_tcp(_) -> +t_max_conns_tcp(Config) -> %% Note: Using a string representation for the bind address like %% "127.0.0.1" does not work - ?assertEqual(4321, emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). + ?assertEqual( + 4321, + emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, ?config(tcp_port, Config)}) + ). -t_current_conns_tcp(_) -> - ?assertEqual(0, emqx_listeners:current_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). +t_current_conns_tcp(Config) -> + ?assertEqual( + 0, + emqx_listeners:current_conns('tcp:listener_test', { + {127, 0, 0, 1}, ?config(tcp_port, Config) + }) + ). -t_wss_conn(_) -> - {ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000), +t_wss_conn(Config) -> + {ok, Socket} = ssl:connect( + {127, 0, 0, 1}, ?config(wss_port, Config), [{verify, verify_none}], 1000 + ), ok = ssl:close(Socket). t_quic_conn(Config) -> - Port = 24568, + Port = emqx_common_test_helpers:select_free_port(quic), DataDir = ?config(data_dir, Config), SSLOpts = #{ password => ?SERVER_KEY_PASSWORD, @@ -207,7 +220,7 @@ t_quic_conn(Config) -> emqx_listeners:stop_listener(quic, ?FUNCTION_NAME, #{bind => Port}). t_ssl_password_cert(Config) -> - Port = 24568, + Port = emqx_common_test_helpers:select_free_port(ssl), DataDir = ?config(data_dir, Config), SSLOptsPWD = #{ password => ?SERVER_KEY_PASSWORD, diff --git a/apps/emqx/test/emqx_quic_multistreams_SUITE.erl b/apps/emqx/test/emqx_quic_multistreams_SUITE.erl index 4afd965bd..b55a28206 100644 --- a/apps/emqx/test/emqx_quic_multistreams_SUITE.erl +++ b/apps/emqx/test/emqx_quic_multistreams_SUITE.erl @@ -2026,18 +2026,7 @@ stop_emqx() -> %% select a random port picked by OS -spec select_port() -> inet:port_number(). select_port() -> - {ok, S} = gen_udp:open(0, [{reuseaddr, true}]), - {ok, {_, Port}} = inet:sockname(S), - gen_udp:close(S), - case os:type() of - {unix, darwin} -> - %% in MacOS, still get address_in_use after close port - timer:sleep(500); - _ -> - skip - end, - ct:pal("select port: ~p", [Port]), - Port. + emqx_common_test_helpers:select_free_port(quic). -spec via_stream({quic, quicer:connection_handle(), quicer:stream_handle()}) -> quicer:stream_handle(). From a79c741ee5e12d578e1a1f04a5a1b79614fba4ac Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Tue, 25 Apr 2023 18:02:10 +0200 Subject: [PATCH 158/263] build: fix docdir --- rebar.config.erl | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/rebar.config.erl b/rebar.config.erl index 3c863046f..dc445db54 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -509,7 +509,7 @@ etc_overlay(ReleaseType, Edition) -> [ {mkdir, "etc/"}, {copy, "{{base_dir}}/lib/emqx/etc/certs", "etc/"}, - {copy, "_build/docgen/" ++ name(Edition) ++ "/emqx.conf.example", "etc/emqx.conf.example"} + {copy, "_build/docgen/" ++ profile() ++ "/emqx.conf.example", "etc/emqx.conf.example"} ] ++ lists:map( fun @@ -646,5 +646,15 @@ list_dir(Dir) -> [] end. -name(ce) -> "emqx"; -name(ee) -> "emqx-enterprise". +profile() -> + case os:getenv("PROFILE") of + Profile = "emqx-enterprise" ++ _ -> + Profile; + Profile = "emqx" ++ _ -> + Profile; + false -> + "emqx-enterprise"; + Profile -> + io:format(standard_error, "ERROR: bad_PROFILE ~p~n", [Profile]), + exit(bad_PROFILE) + end. From d78312e10ebb7335ac0fd675b3f30aeef355ec64 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 25 Apr 2023 17:58:23 -0300 Subject: [PATCH 159/263] test(resource): fix flaky test --- .../src/emqx_resource_buffer_worker.erl | 2 +- .../test/emqx_resource_SUITE.erl | 56 ++++++++++++++++--- 2 files changed, 48 insertions(+), 10 deletions(-) diff --git a/apps/emqx_resource/src/emqx_resource_buffer_worker.erl b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl index 2e2cd5631..7cb7f8198 100644 --- a/apps/emqx_resource/src/emqx_resource_buffer_worker.erl +++ b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl @@ -1466,7 +1466,7 @@ mark_inflight_items_as_retriable(Data, WorkerMRef) -> end ), _NumAffected = ets:select_replace(InflightTID, MatchSpec), - ?tp(buffer_worker_worker_down_update, #{num_affected => _NumAffected}), + ?tp(buffer_worker_async_agent_down, #{num_affected => _NumAffected}), ok. %% used to update a batch after dropping expired individual queries. diff --git a/apps/emqx_resource/test/emqx_resource_SUITE.erl b/apps/emqx_resource/test/emqx_resource_SUITE.erl index f8ddd56b5..34781df6c 100644 --- a/apps/emqx_resource/test/emqx_resource_SUITE.erl +++ b/apps/emqx_resource/test/emqx_resource_SUITE.erl @@ -1766,12 +1766,6 @@ t_async_pool_worker_death(_Config) -> ?assertEqual(NumReqs, Inflight0), %% grab one of the worker pids and kill it - {ok, SRef1} = - snabbkaffe:subscribe( - ?match_event(#{?snk_kind := buffer_worker_worker_down_update}), - NumBufferWorkers, - 10_000 - ), {ok, #{pid := Pid0}} = emqx_resource:simple_sync_query(?ID, get_state), MRef = monitor(process, Pid0), ct:pal("will kill ~p", [Pid0]), @@ -1785,13 +1779,27 @@ t_async_pool_worker_death(_Config) -> end, %% inflight requests should have been marked as retriable - {ok, _} = snabbkaffe:receive_events(SRef1), + wait_until_all_marked_as_retriable(NumReqs), Inflight1 = emqx_resource_metrics:inflight_get(?ID), ?assertEqual(NumReqs, Inflight1), - ok + NumReqs end, - [] + fun(NumReqs, Trace) -> + Events = ?of_kind(buffer_worker_async_agent_down, Trace), + %% At least one buffer worker should have marked its + %% requests as retriable. If a single one has + %% received all requests, that's all we got. + ?assertMatch([_ | _], Events), + %% All requests distributed over all buffer workers + %% should have been marked as retriable, by the time + %% the inflight has been drained. + ?assertEqual( + NumReqs, + lists:sum([N || #{num_affected := N} <- Events]) + ), + ok + end ), ok. @@ -3017,3 +3025,33 @@ trace_between_span(Trace0, Marker) -> {Trace1, [_ | _]} = ?split_trace_at(#{?snk_kind := Marker, ?snk_span := {complete, _}}, Trace0), {[_ | _], [_ | Trace2]} = ?split_trace_at(#{?snk_kind := Marker, ?snk_span := start}, Trace1), Trace2. + +wait_until_all_marked_as_retriable(NumExpected) when NumExpected =< 0 -> + ok; +wait_until_all_marked_as_retriable(NumExpected) -> + Seen = #{}, + do_wait_until_all_marked_as_retriable(NumExpected, Seen). + +do_wait_until_all_marked_as_retriable(NumExpected, _Seen) when NumExpected =< 0 -> + ok; +do_wait_until_all_marked_as_retriable(NumExpected, Seen) -> + Res = ?block_until( + #{?snk_kind := buffer_worker_async_agent_down, ?snk_meta := #{pid := P}} when + not is_map_key(P, Seen), + 10_000 + ), + case Res of + {timeout, Evts} -> + ct:pal("events so far:\n ~p", [Evts]), + ct:fail("timeout waiting for events"); + {ok, #{num_affected := NumAffected, ?snk_meta := #{pid := Pid}}} -> + ct:pal("affected: ~p; pid: ~p", [NumAffected, Pid]), + case NumAffected >= NumExpected of + true -> + ok; + false -> + do_wait_until_all_marked_as_retriable(NumExpected - NumAffected, Seen#{ + Pid => true + }) + end + end. From 28a68a0ec78cebe4b6e7ddc3cd72fd105982963f Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 25 Apr 2023 11:40:27 +0200 Subject: [PATCH 160/263] refactor: stop i18n support in hotconf and bridges frontend team has decided to deal with translations all by themselves --- apps/emqx_conf/src/emqx_conf.erl | 64 ++++------------- .../src/emqx_dashboard_schema_api.erl | 24 +++---- .../src/emqx_dashboard_swagger.erl | 68 ++++++++++++++++++- 3 files changed, 88 insertions(+), 68 deletions(-) diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index 8632df139..8d67cfb57 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -31,8 +31,9 @@ %% TODO: move to emqx_dashboard when we stop building api schema at build time -export([ - hotconf_schema_json/1, - bridge_schema_json/1 + hotconf_schema_json/0, + bridge_schema_json/0, + hocon_schema_to_spec/2 ]). %% for rpc @@ -184,13 +185,13 @@ gen_api_schema_json(Dir, Lang) -> %% TODO: delete this function when we stop generating this JSON at build time. gen_api_schema_json_hotconf(Dir, Lang) -> File = schema_filename(Dir, "hot-config-schema-", Lang), - IoData = hotconf_schema_json(Lang), + IoData = hotconf_schema_json(), ok = write_api_schema_json_file(File, IoData). %% TODO: delete this function when we stop generating this JSON at build time. gen_api_schema_json_bridge(Dir, Lang) -> File = schema_filename(Dir, "bridge-api-", Lang), - IoData = bridge_schema_json(Lang), + IoData = bridge_schema_json(), ok = write_api_schema_json_file(File, IoData). %% TODO: delete this function when we stop generating this JSON at build time. @@ -199,14 +200,14 @@ write_api_schema_json_file(File, IoData) -> file:write_file(File, IoData). %% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. -hotconf_schema_json(Lang) -> +hotconf_schema_json() -> SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>}, - gen_api_schema_json_iodata(emqx_mgmt_api_configs, SchemaInfo, Lang). + gen_api_schema_json_iodata(emqx_mgmt_api_configs, SchemaInfo). %% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. -bridge_schema_json(Lang) -> +bridge_schema_json() -> SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>}, - gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo, Lang). + gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo). schema_filename(Dir, Prefix, Lang) -> Filename = Prefix ++ Lang ++ ".json", @@ -270,50 +271,11 @@ gen_example(File, SchemaModule) -> Example = hocon_schema_example:gen(SchemaModule, Opts), file:write_file(File, Example). -%% TODO: move this to emqx_dashboard when we stop generating -%% this JSON at build time. -gen_api_schema_json_iodata(SchemaMod, SchemaInfo, Lang) -> - {ApiSpec0, Components0} = emqx_dashboard_swagger:spec( +gen_api_schema_json_iodata(SchemaMod, SchemaInfo) -> + emqx_dashboard_swagger:gen_api_schema_json_iodata( SchemaMod, - #{ - schema_converter => fun hocon_schema_to_spec/2, - i18n_lang => Lang - } - ), - ApiSpec = lists:foldl( - fun({Path, Spec, _, _}, Acc) -> - NewSpec = maps:fold( - fun(Method, #{responses := Responses}, SubAcc) -> - case Responses of - #{ - <<"200">> := - #{ - <<"content">> := #{ - <<"application/json">> := #{<<"schema">> := Schema} - } - } - } -> - SubAcc#{Method => Schema}; - _ -> - SubAcc - end - end, - #{}, - Spec - ), - Acc#{list_to_atom(Path) => NewSpec} - end, - #{}, - ApiSpec0 - ), - Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0), - emqx_utils_json:encode( - #{ - info => SchemaInfo, - paths => ApiSpec, - components => #{schemas => Components} - }, - [pretty, force_utf8] + SchemaInfo, + fun ?MODULE:hocon_schema_to_spec/2 ). -define(TO_REF(_N_, _F_), iolist_to_binary([to_bin(_N_), ".", to_bin(_F_)])). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl b/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl index 898d95b3c..e4f2f0c1a 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl @@ -45,18 +45,11 @@ schema("/schemas/:name") -> 'operationId' => get_schema, get => #{ parameters => [ - {name, hoconsc:mk(hoconsc:enum([hotconf, bridges]), #{in => path})}, - {lang, - hoconsc:mk(typerefl:string(), #{ - in => query, - default => <<"en">>, - desc => <<"The language of the schema.">> - })} + {name, hoconsc:mk(hoconsc:enum([hotconf, bridges]), #{in => path})} ], desc => << "Get the schema JSON of the specified name. " - "NOTE: you should never need to make use of this API " - "unless you are building a multi-lang dashboaard." + "NOTE: only intended for EMQX Dashboard." >>, tags => ?TAGS, security => [], @@ -71,14 +64,13 @@ schema("/schemas/:name") -> %%-------------------------------------------------------------------- get_schema(get, #{ - bindings := #{name := Name}, - query_string := #{<<"lang">> := Lang} + bindings := #{name := Name} }) -> - {200, gen_schema(Name, iolist_to_binary(Lang))}; + {200, gen_schema(Name)}; get_schema(get, _) -> {400, ?BAD_REQUEST, <<"unknown">>}. -gen_schema(hotconf, Lang) -> - emqx_conf:hotconf_schema_json(Lang); -gen_schema(bridges, Lang) -> - emqx_conf:bridge_schema_json(Lang). +gen_schema(hotconf) -> + emqx_conf:hotconf_schema_json(); +gen_schema(bridges) -> + emqx_conf:bridge_schema_json(). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index e471486e5..fec9717ba 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -26,7 +26,11 @@ -export([error_codes/1, error_codes/2]). -export([file_schema/1]). --export([filter_check_request/2, filter_check_request_and_translate_body/2]). +-export([ + filter_check_request/2, + filter_check_request_and_translate_body/2, + gen_api_schema_json_iodata/3 +]). -ifdef(TEST). -export([ @@ -72,6 +76,8 @@ ]) ). +-define(SPECIAL_LANG_MSGID, <<"$msgid">>). + -define(MAX_ROW_LIMIT, 1000). -define(DEFAULT_ROW, 100). @@ -192,6 +198,50 @@ file_schema(FileName) -> } }. +gen_api_schema_json_iodata(SchemaMod, SchemaInfo, Converter) -> + {ApiSpec0, Components0} = emqx_dashboard_swagger:spec( + SchemaMod, + #{ + schema_converter => Converter, + i18n_lang => ?SPECIAL_LANG_MSGID + } + ), + ApiSpec = lists:foldl( + fun({Path, Spec, _, _}, Acc) -> + NewSpec = maps:fold( + fun(Method, #{responses := Responses}, SubAcc) -> + case Responses of + #{ + <<"200">> := + #{ + <<"content">> := #{ + <<"application/json">> := #{<<"schema">> := Schema} + } + } + } -> + SubAcc#{Method => Schema}; + _ -> + SubAcc + end + end, + #{}, + Spec + ), + Acc#{list_to_atom(Path) => NewSpec} + end, + #{}, + ApiSpec0 + ), + Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0), + emqx_utils_json:encode( + #{ + info => SchemaInfo, + paths => ApiSpec, + components => #{schemas => Components} + }, + [pretty, force_utf8] + ). + %%------------------------------------------------------------------------------ %% Private functions %%------------------------------------------------------------------------------ @@ -482,6 +532,14 @@ maybe_add_summary_from_label(Spec, Hocon, Options) -> get_i18n(Tag, ?DESC(Namespace, Id), Default, Options) -> Lang = get_lang(Options), + case Lang of + ?SPECIAL_LANG_MSGID -> + make_msgid(Namespace, Id, Tag); + _ -> + get_i18n_text(Lang, Namespace, Id, Tag, Default) + end. + +get_i18n_text(Lang, Namespace, Id, Tag, Default) -> case emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, Tag) of undefined -> Default; @@ -489,6 +547,14 @@ get_i18n(Tag, ?DESC(Namespace, Id), Default, Options) -> Text end. +%% Format:$msgid:Namespace.Id.Tag +%% e.g. $msgid:emqx_schema.key.desc +%% $msgid:emqx_schema.key.label +%% if needed, the consumer of this schema JSON can use this msgid to +%% resolve the text in the i18n database. +make_msgid(Namespace, Id, Tag) -> + iolist_to_binary(["$msgid:", to_bin(Namespace), ".", to_bin(Id), ".", Tag]). + %% So far i18n_lang in options is only used at build time. %% At runtime, it's still the global config which controls the language. get_lang(#{i18n_lang := Lang}) -> Lang; From 55c488fa95262663da6475779bd15bb6e2a6cd07 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 25 Apr 2023 13:42:42 +0200 Subject: [PATCH 161/263] refactor: stop generating static hot-conf and bridges schema files --- apps/emqx_conf/src/emqx_conf.erl | 27 --------------------------- build | 7 ++----- 2 files changed, 2 insertions(+), 32 deletions(-) diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index 8d67cfb57..eaa16ab5a 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -150,7 +150,6 @@ dump_schema(Dir, SchemaModule) -> lists:foreach( fun(Lang) -> ok = gen_config_md(Dir, SchemaModule, Lang), - ok = gen_api_schema_json(Dir, Lang), ok = gen_schema_json(Dir, SchemaModule, Lang) end, ["en", "zh"] @@ -177,28 +176,6 @@ gen_schema_json(Dir, SchemaModule, Lang) -> IoData = emqx_utils_json:encode(JsonMap, [pretty, force_utf8]), ok = file:write_file(SchemaJsonFile, IoData). -%% TODO: delete this function when we stop generating this JSON at build time. -gen_api_schema_json(Dir, Lang) -> - gen_api_schema_json_hotconf(Dir, Lang), - gen_api_schema_json_bridge(Dir, Lang). - -%% TODO: delete this function when we stop generating this JSON at build time. -gen_api_schema_json_hotconf(Dir, Lang) -> - File = schema_filename(Dir, "hot-config-schema-", Lang), - IoData = hotconf_schema_json(), - ok = write_api_schema_json_file(File, IoData). - -%% TODO: delete this function when we stop generating this JSON at build time. -gen_api_schema_json_bridge(Dir, Lang) -> - File = schema_filename(Dir, "bridge-api-", Lang), - IoData = bridge_schema_json(), - ok = write_api_schema_json_file(File, IoData). - -%% TODO: delete this function when we stop generating this JSON at build time. -write_api_schema_json_file(File, IoData) -> - io:format(user, "===< Generating: ~s~n", [File]), - file:write_file(File, IoData). - %% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. hotconf_schema_json() -> SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>}, @@ -209,10 +186,6 @@ bridge_schema_json() -> SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>}, gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo). -schema_filename(Dir, Prefix, Lang) -> - Filename = Prefix ++ Lang ++ ".json", - filename:join([Dir, Filename]). - %% TODO: remove it and also remove hocon_md.erl and friends. %% markdown generation from schema is a failure and we are moving to an interactive %% viewer like swagger UI. diff --git a/build b/build index 77d4dbfc8..05246a359 100755 --- a/build +++ b/build @@ -92,7 +92,7 @@ log() { } make_docs() { - local libs_dir1 libs_dir2 libs_dir3 docdir dashboard_www_static + local libs_dir1 libs_dir2 libs_dir3 docdir libs_dir1="$("$FIND" "_build/$PROFILE/lib/" -maxdepth 2 -name ebin -type d)" if [ -d "_build/default/lib/" ]; then libs_dir2="$("$FIND" "_build/default/lib/" -maxdepth 2 -name ebin -type d)" @@ -113,14 +113,11 @@ make_docs() { ;; esac docdir="_build/docgen/$PROFILE" - dashboard_www_static='apps/emqx_dashboard/priv/www/static/' - mkdir -p "$docdir" "$dashboard_www_static" + mkdir -p "$docdir" # shellcheck disable=SC2086 erl -noshell -pa $libs_dir1 $libs_dir2 $libs_dir3 -eval \ "ok = emqx_conf:dump_schema('$docdir', $SCHEMA_MODULE), \ halt(0)." - cp "$docdir"/bridge-api-*.json "$dashboard_www_static" - cp "$docdir"/hot-config-schema-*.json "$dashboard_www_static" } assert_no_compile_time_only_deps() { From 9260b5ec6c55fc8ae6deb80c65a328216a745a79 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 25 Apr 2023 19:13:55 +0200 Subject: [PATCH 162/263] test(emqx_dashboard): add test case for api/v5/schemas API --- .../test/emqx_dashboard_schema_api_SUITE.erl | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl diff --git a/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl new file mode 100644 index 000000000..e4425aed8 --- /dev/null +++ b/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl @@ -0,0 +1,52 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_dashboard_schema_api_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("emqx/include/http_api.hrl"). + +-include_lib("eunit/include/eunit.hrl"). + +-define(SERVER, "http://127.0.0.1:18083/api/v5"). + +-import(emqx_mgmt_api_test_util, [request/2]). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite([emqx_conf]). + +t_hotconf(_) -> + Url = ?SERVER ++ "/schemas/hotconf", + {ok, 200, Body} = request(get, Url), + %% assert it's a valid json + _ = emqx_utils_json:decode(Body), + ok. + +t_bridges(_) -> + Url = ?SERVER ++ "/schemas/bridges", + {ok, 200, Body} = request(get, Url), + %% assert it's a valid json + _ = emqx_utils_json:decode(Body), + ok. From ed7a8659d2022c50f1f33b86cd49eb271fdab433 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 25 Apr 2023 22:22:03 +0200 Subject: [PATCH 163/263] feat: add a json format support for the /status API --- .../src/emqx_mgmt_api_status.erl | 59 ++++++++++++-- .../test/emqx_mgmt_api_status_SUITE.erl | 79 ++++++++++++++++++- rel/i18n/emqx_mgmt_api_status.hocon | 31 ++++++-- rel/i18n/zh/emqx_mgmt_api_status.hocon | 22 ++++-- 4 files changed, 171 insertions(+), 20 deletions(-) diff --git a/apps/emqx_management/src/emqx_mgmt_api_status.erl b/apps/emqx_management/src/emqx_mgmt_api_status.erl index 7d5c18e59..c0ee42e2b 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_status.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_status.erl @@ -45,6 +45,17 @@ schema("/status") -> #{ 'operationId' => get_status, get => #{ + parameters => [ + {format, + hoconsc:mk( + string(), + #{ + in => query, + default => <<"text">>, + desc => ?DESC(get_status_api_format) + } + )} + ], description => ?DESC(get_status_api), tags => ?TAGS, security => [], @@ -70,7 +81,16 @@ path() -> "/status". init(Req0, State) -> - {Code, Headers, Body} = running_status(), + Format = + try + QS = cowboy_req:parse_qs(Req0), + {_, F} = lists:keyfind(<<"format">>, 1, QS), + F + catch + _:_ -> + <<"text">> + end, + {Code, Headers, Body} = running_status(Format), Req = cowboy_req:reply(Code, Headers, Body, Req0), {ok, Req, State}. @@ -78,29 +98,52 @@ init(Req0, State) -> %% API Handler funcs %%-------------------------------------------------------------------- -get_status(get, _Params) -> - running_status(). +get_status(get, Params) -> + Format = maps:get(<<"format">>, maps:get(query_string, Params, #{}), <<"text">>), + running_status(iolist_to_binary(Format)). -running_status() -> +running_status(Format) -> case emqx_dashboard_listener:is_ready(timer:seconds(20)) of true -> - BrokerStatus = broker_status(), AppStatus = application_status(), - Body = io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), BrokerStatus, AppStatus]), + Body = do_get_status(AppStatus, Format), StatusCode = case AppStatus of running -> 200; not_running -> 503 end, + ContentType = + case Format of + <<"json">> -> <<"applicatin/json">>; + _ -> <<"text/plain">> + end, Headers = #{ - <<"content-type">> => <<"text/plain">>, + <<"content-type">> => ContentType, <<"retry-after">> => <<"15">> }, - {StatusCode, Headers, list_to_binary(Body)}; + {StatusCode, Headers, iolist_to_binary(Body)}; false -> {503, #{<<"retry-after">> => <<"15">>}, <<>>} end. +do_get_status(AppStatus, <<"json">>) -> + BrokerStatus = broker_status(), + emqx_utils_json:encode(#{ + node_name => atom_to_binary(node(), utf8), + rel_vsn => vsn(), + broker_status => atom_to_binary(BrokerStatus), + app_status => atom_to_binary(AppStatus) + }); +do_get_status(AppStatus, _) -> + BrokerStatus = broker_status(), + io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), BrokerStatus, AppStatus]). + +vsn() -> + iolist_to_binary([ + emqx_release:edition_vsn_prefix(), + emqx_release:version() + ]). + broker_status() -> case emqx:is_running() of true -> diff --git a/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl index f0200c410..e8e0b4ac9 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl @@ -38,7 +38,10 @@ all() -> get_status_tests() -> [ t_status_ok, - t_status_not_ok + t_status_not_ok, + t_status_text_format, + t_status_json_format, + t_status_bad_format_qs ]. groups() -> @@ -87,8 +90,10 @@ do_request(Opts) -> headers := Headers, body := Body0 } = Opts, + QS = maps:get(qs, Opts, ""), URL = ?HOST ++ filename:join(Path0), - {ok, #{host := Host, port := Port, path := Path}} = emqx_http_lib:uri_parse(URL), + {ok, #{host := Host, port := Port, path := Path1}} = emqx_http_lib:uri_parse(URL), + Path = Path1 ++ QS, %% we must not use `httpc' here, because it keeps retrying when it %% receives a 503 with `retry-after' header, and there's no option %% to stop that behavior... @@ -165,3 +170,73 @@ t_status_not_ok(Config) -> Headers ), ok. + +t_status_text_format(Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => "?format=text", + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + {match, _}, + re:run(Resp, <<"emqx is running$">>) + ), + ok. + +t_status_json_format(Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => "?format=json", + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + #{<<"app_status">> := <<"running">>}, + emqx_utils_json:decode(Resp) + ), + ok. + +t_status_bad_format_qs(Config) -> + lists:foreach( + fun(QS) -> + test_status_bad_format_qs(QS, Config) + end, + [ + "?a=b", + "?format=", + "?format=x" + ] + ). + +%% when query-sting is invalid, fallback to text format +test_status_bad_format_qs(QS, Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => QS, + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + {match, _}, + re:run(Resp, <<"emqx is running$">>) + ), + ok. diff --git a/rel/i18n/emqx_mgmt_api_status.hocon b/rel/i18n/emqx_mgmt_api_status.hocon index 28278b747..2034d13bc 100644 --- a/rel/i18n/emqx_mgmt_api_status.hocon +++ b/rel/i18n/emqx_mgmt_api_status.hocon @@ -1,21 +1,42 @@ emqx_mgmt_api_status { get_status_api.desc: -"""Serves as a health check for the node. Returns a plain text response describing the status of the node. This endpoint requires no authentication. +"""Serves as a health check for the node. +Returns response to describe the status of the node and the application. + +This endpoint requires no authentication. Returns status code 200 if the EMQX application is up and running, 503 otherwise. This API was introduced in v5.0.10. -The GET `/status` endpoint (without the `/api/...` prefix) is also an alias to this endpoint and works in the same way. This alias has been available since v5.0.0.""" +The GET `/status` endpoint (without the `/api/...` prefix) is also an alias to this endpoint and works in the same way. +This alias has been available since v5.0.0. + +Starting from v5.0.25 or e5.0.4, you can also use 'format' parameter to get JSON format information. +""" get_status_api.label: """Service health check""" get_status_response200.desc: -"""Node emqx@127.0.0.1 is started +"""If 'format' parameter is 'json', then it returns a JSON like below:
+{ + "rel_vsn": "v5.0.23", + "node_name": "emqx@127.0.0.1", + "broker_status": "started", + "app_status": "running" +} +
+Otherwise it returns free text strings as below:
+Node emqx@127.0.0.1 is started emqx is running""" get_status_response503.desc: -"""Node emqx@127.0.0.1 is stopped -emqx is not_running""" +"""When EMQX application is temporary not running or being restarted, it may return 'emqx is not_running'. +If the 'format' parameter is provided 'json', the nthe 'app_status' field in the JSON object is 'not_running'. +""" + +get_status_api_format.desc: +"""Specify the response format, 'text' (default) to return the HTTP body in free text, +or 'json' to return the HTTP body with a JSON object.""" } diff --git a/rel/i18n/zh/emqx_mgmt_api_status.hocon b/rel/i18n/zh/emqx_mgmt_api_status.hocon index 3625db967..3938f47c1 100644 --- a/rel/i18n/zh/emqx_mgmt_api_status.hocon +++ b/rel/i18n/zh/emqx_mgmt_api_status.hocon @@ -1,22 +1,34 @@ emqx_mgmt_api_status { get_status_api.desc: -"""作为节点的健康检查。 返回一个纯文本的响应,描述节点的状态。 +"""节点的健康检查。 返回节点状态的描述信息。 如果 EMQX 应用程序已经启动并运行,返回状态代码 200,否则返回 503。 这个API是在v5.0.10中引入的。 -GET `/status`端点(没有`/api/...`前缀)也是这个端点的一个别名,工作方式相同。 这个别名从v5.0.0开始就有了。""" +GET `/status`端点(没有`/api/...`前缀)也是这个端点的一个别名,工作方式相同。 这个别名从v5.0.0开始就有了。 +自 v5.0.25 和 e5.0.4 开始,可以通过指定 'format' 参数来得到 JSON 格式的信息。""" get_status_api.label: """服务健康检查""" get_status_response200.desc: -"""Node emqx@127.0.0.1 is started +"""如果 'format' 参数为 'json',则返回如下JSON:
+{ + "rel_vsn": "v5.0.23", + "node_name": "emqx@127.0.0.1", + "broker_status": "started", + "app_status": "running" +} +
+否则返回2行自由格式的文本,第一行描述节点的状态,第二行描述 EMQX 应用运行状态。例如:
+Node emqx@127.0.0.1 is started emqx is running""" get_status_response503.desc: -"""Node emqx@127.0.0.1 is stopped -emqx is not_running""" +"""如果 EMQX 应用暂时没有启动,或正在重启,则可能返回 'emqx is not_running'""" + +get_status_api_format.desc: +"""指定返回的内容格式。使用 'text'(默认)则返回自由格式的字符串; 'json' 则返回 JSON 格式。""" } From 48e68b7c77ced9c1a243fbd09ddde8e9065c0cf9 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 25 Apr 2023 18:04:07 +0200 Subject: [PATCH 164/263] test: add smoke test to cover schemas api --- scripts/test/emqx-smoke-test.sh | 102 ++++++++++++++++++++++++-------- 1 file changed, 76 insertions(+), 26 deletions(-) diff --git a/scripts/test/emqx-smoke-test.sh b/scripts/test/emqx-smoke-test.sh index ce8116b39..09e7d6438 100755 --- a/scripts/test/emqx-smoke-test.sh +++ b/scripts/test/emqx-smoke-test.sh @@ -2,42 +2,92 @@ set -euo pipefail -[ $# -ne 2 ] && { echo "Usage: $0 ip port"; exit 1; } +[ $# -ne 2 ] && { echo "Usage: $0 host port"; exit 1; } -IP=$1 +HOST=$1 PORT=$2 -URL="http://$IP:$PORT/status" +BASE_URL="http://$HOST:$PORT" ## Check if EMQX is responding -ATTEMPTS=10 -while ! curl "$URL" >/dev/null 2>&1; do - if [ $ATTEMPTS -eq 0 ]; then - echo "emqx is not responding on $URL" - exit 1 +wait_for_emqx() { + local attempts=10 + local url="$BASE_URL"/status + while ! curl "$url" >/dev/null 2>&1; do + if [ $attempts -eq 0 ]; then + echo "emqx is not responding on $url" + exit 1 + fi + sleep 5 + attempts=$((attempts-1)) + done +} + +## Get the JSON format status which is jq friendly and includes a version string +json_status() { + local url="${BASE_URL}/status?format=json" + local resp + resp="$(curl -s "$url")" + if (echo "$resp" | jq . >/dev/null 2>&1); then + echo "$resp" + else + echo 'NOT_JSON' fi - sleep 5 - ATTEMPTS=$((ATTEMPTS-1)) -done +} ## Check if the API docs are available -API_DOCS_URL="http://$IP:$PORT/api-docs/index.html" -API_DOCS_STATUS="$(curl -s -o /dev/null -w "%{http_code}" "$API_DOCS_URL")" -if [ "$API_DOCS_STATUS" != "200" ]; then - echo "emqx is not responding on $API_DOCS_URL" - exit 1 -fi +check_api_docs() { + local url="$BASE_URL/api-docs/index.html" + local status + status="$(curl -s -o /dev/null -w "%{http_code}" "$url")" + if [ "$status" != "200" ]; then + echo "emqx is not responding on $API_DOCS_URL" + exit 1 + fi +} ## Check if the swagger.json contains hidden fields ## fail if it does -SWAGGER_JSON_URL="http://$IP:$PORT/api-docs/swagger.json" -## assert swagger.json is valid json -JSON="$(curl -s "$SWAGGER_JSON_URL")" -echo "$JSON" | jq . >/dev/null +check_swagger_json() { + local url="$BASE_URL/api-docs/swagger.json" + ## assert swagger.json is valid json + JSON="$(curl -s "$url")" + echo "$JSON" | jq . >/dev/null -if [ "${EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS:-yes}" = 'yes' ]; then - ## assert swagger.json does not contain trie_compaction (which is a hidden field) - if echo "$JSON" | grep -q trie_compaction; then - echo "swagger.json contains hidden fields" + if [ "${EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS:-yes}" = 'yes' ]; then + ## assert swagger.json does not contain trie_compaction (which is a hidden field) + if echo "$JSON" | grep -q trie_compaction; then + echo "swagger.json contains hidden fields" + exit 1 + fi + fi +} + +check_schema_json() { + local name="$1" + local expected_title="$2" + local url="$BASE_URL/api/v5/schemas/$name" + local json + json="$(curl -s "$url" | jq .)" + title="$(echo "$json" | jq -r '.info.title')" + if [[ "$title" != "$expected_title" ]]; then + echo "unexpected value from GET $url" + echo "expected: $expected_title" + echo "got : $title" exit 1 fi -fi +} + +main() { + wait_for_emqx + local JSON_STATUS + JSON_STATUS="$(json_status)" + check_api_docs + check_swagger_json + ## The json status feature was added after hotconf and bridges schema API + if [ "$JSON_STATUS" != 'NOT_JSON' ]; then + check_schema_json hotconf "EMQX Hot Conf API Schema" + check_schema_json bridges "EMQX Data Bridge API Schema" + fi +} + +main From 0bd30e039f300eb049009325d122895d356cbab9 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 25 Apr 2023 22:56:25 +0200 Subject: [PATCH 165/263] test: simplify swagger json check script --- .github/workflows/build_slim_packages.yaml | 3 --- scripts/test/emqx-smoke-test.sh | 13 +++++-------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build_slim_packages.yaml b/.github/workflows/build_slim_packages.yaml index 9ae5ba944..06bcb98a2 100644 --- a/.github/workflows/build_slim_packages.yaml +++ b/.github/workflows/build_slim_packages.yaml @@ -194,15 +194,12 @@ jobs: run: | CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG) HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID) - export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='yes' ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT docker stop $CID - name: test two nodes cluster with proto_dist=inet_tls in docker run: | ./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy) - # versions before 5.0.22 have hidden fields included in the API spec - export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='no' ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT # cleanup ./scripts/test/start-two-nodes-in-docker.sh -c diff --git a/scripts/test/emqx-smoke-test.sh b/scripts/test/emqx-smoke-test.sh index 09e7d6438..44df5b5bd 100755 --- a/scripts/test/emqx-smoke-test.sh +++ b/scripts/test/emqx-smoke-test.sh @@ -52,13 +52,10 @@ check_swagger_json() { ## assert swagger.json is valid json JSON="$(curl -s "$url")" echo "$JSON" | jq . >/dev/null - - if [ "${EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS:-yes}" = 'yes' ]; then - ## assert swagger.json does not contain trie_compaction (which is a hidden field) - if echo "$JSON" | grep -q trie_compaction; then - echo "swagger.json contains hidden fields" - exit 1 - fi + ## assert swagger.json does not contain trie_compaction (which is a hidden field) + if echo "$JSON" | grep -q trie_compaction; then + echo "swagger.json contains hidden fields" + exit 1 fi } @@ -82,9 +79,9 @@ main() { local JSON_STATUS JSON_STATUS="$(json_status)" check_api_docs - check_swagger_json ## The json status feature was added after hotconf and bridges schema API if [ "$JSON_STATUS" != 'NOT_JSON' ]; then + check_swagger_json check_schema_json hotconf "EMQX Hot Conf API Schema" check_schema_json bridges "EMQX Data Bridge API Schema" fi From 01770fab8597d9eda6afcb8fe7704206a4e56c14 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Wed, 26 Apr 2023 18:02:14 +0200 Subject: [PATCH 166/263] ci: fix pkg-vsn.sh in perf test --- .github/workflows/performance_test.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/performance_test.yaml b/.github/workflows/performance_test.yaml index e8a2a321e..2433f3621 100644 --- a/.github/workflows/performance_test.yaml +++ b/.github/workflows/performance_test.yaml @@ -24,14 +24,14 @@ jobs: with: fetch-depth: 0 ref: ${{ github.event.inputs.ref }} + - name: Work around https://github.com/actions/checkout/issues/766 + run: | + git config --global --add safe.directory "$GITHUB_WORKSPACE" - id: prepare run: | echo "EMQX_NAME=emqx" >> $GITHUB_ENV echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV echo "BENCH_ID=$(date --utc +%F)/emqx-$(./pkg-vsn.sh emqx)" >> $GITHUB_OUTPUT - - name: Work around https://github.com/actions/checkout/issues/766 - run: | - git config --global --add safe.directory "$GITHUB_WORKSPACE" - name: Build deb package run: | make ${EMQX_NAME}-pkg From 35c48ef009975201488373ba51f3cc9acb24ae16 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Wed, 26 Apr 2023 18:02:44 +0200 Subject: [PATCH 167/263] chore: v5.0.24 --- apps/emqx/include/emqx_release.hrl | 2 +- deploy/charts/emqx/Chart.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index eff228621..0cfba8fe3 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -32,7 +32,7 @@ %% `apps/emqx/src/bpapi/README.md' %% Community edition --define(EMQX_RELEASE_CE, "5.0.23"). +-define(EMQX_RELEASE_CE, "5.0.24"). %% Enterprise edition -define(EMQX_RELEASE_EE, "5.0.3-alpha.3"). diff --git a/deploy/charts/emqx/Chart.yaml b/deploy/charts/emqx/Chart.yaml index 312a9dfbe..9c23f7c15 100644 --- a/deploy/charts/emqx/Chart.yaml +++ b/deploy/charts/emqx/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 5.0.23 +version: 5.0.24 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 5.0.23 +appVersion: 5.0.24 From 50504a4cbfcb29c48908421876a92d09194e3e9f Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Wed, 26 Apr 2023 18:03:00 +0200 Subject: [PATCH 168/263] docs: Generate changelog for v5.0.24 --- changes/v5.0.24.en.md | 89 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 changes/v5.0.24.en.md diff --git a/changes/v5.0.24.en.md b/changes/v5.0.24.en.md new file mode 100644 index 000000000..4fa5cdd4f --- /dev/null +++ b/changes/v5.0.24.en.md @@ -0,0 +1,89 @@ +# v5.0.24 + +## Enhancements + +- [#10457](https://github.com/emqx/emqx/pull/10457) Deprecates the integration with StatsD. + + There seemd to be no user using StatsD integration, so we have decided to hide this feature + for now. We will either remove or revive it based on requirements in the future. + +- [#10458](https://github.com/emqx/emqx/pull/10458) Set the level of plugin configuration options to low level, + in most cases, users only need to manage plugins on the dashboard + without the need for manual modification, so we lowered the level. + +- [#10491](https://github.com/emqx/emqx/pull/10491) Rename `etcd.ssl` to `etcd.ssl_options` to keep all of SSL options consistent in the configuration file. + +- [#10512](https://github.com/emqx/emqx/pull/10512) Improved the storage format of Unicode characters in data files, + Now we can store Unicode characters normally. + For example: "SELECT * FROM \"t/1\" WHERE clientid = \"-测试专用-\"" + +- [#10487](https://github.com/emqx/emqx/pull/10487) Optimize the instance of limiter for whose rate is `infinity` to reduce memory and CPU usage. + +- [#10490](https://github.com/emqx/emqx/pull/10490) Remove the default limit of connect rate which used to be `1000/s` + +## Bug Fixes + +- [#10407](https://github.com/emqx/emqx/pull/10407) Improve 'emqx_alarm' performance by using Mnesia dirty operations and avoiding + unnecessary calls from 'emqx_resource_manager' to reactivate alarms that have been already activated. + Use new safe 'emqx_alarm' API to activate/deactivate alarms to ensure that emqx_resource_manager + doesn't crash because of alarm timeouts. + The crashes were possible when the following conditions co-occurred: + - a relatively high number of failing resources, e.g. bridges tried to activate alarms on re-occurring errors; + - the system experienced a very high load. + +- [#10420](https://github.com/emqx/emqx/pull/10420) Fix HTTP path handling when composing the URL for the HTTP requests in authentication and authorization modules. + * Avoid unnecessary URL normalization since we cannot assume that external servers treat original and normalized URLs equally. This led to bugs like [#10411](https://github.com/emqx/emqx/issues/10411). + * Fix the issue that path segments could be HTTP encoded twice. + +- [#10422](https://github.com/emqx/emqx/pull/10422) Fixed a bug where external plugins could not be configured via environment variables in a lone-node cluster. + +- [#10448](https://github.com/emqx/emqx/pull/10448) Fix a compatibility issue of limiter configuration introduced by v5.0.23 which broke the upgrade from previous versions if the `capacity` is `infinity`. + + In v5.0.23 we have replaced `capacity` with `burst`. After this fix, a `capacity = infinity` config will be automatically converted to equivalent `burst = 0`. + +- [#10449](https://github.com/emqx/emqx/pull/10449) Validate the ssl_options and header configurations when creating authentication http (`authn_http`). + Prior to this, incorrect `ssl` configuration could result in successful creation but the entire authn being unusable. + +- [#10455](https://github.com/emqx/emqx/pull/10455) Fixed an issue that could cause (otherwise harmless) noise in the logs. + + During some particularly slow synchronous calls to bridges, some late replies could be sent to connections processes that were no longer expecting a reply, and then emit an error log like: + + ``` + 2023-04-19T18:24:35.350233+00:00 [error] msg: unexpected_info, mfa: emqx_channel:handle_info/2, line: 1278, peername: 172.22.0.1:36384, clientid: caribdis_bench_sub_1137967633_4788, info: {#Ref<0.408802983.1941504010.189402>,{ok,200,[{<<"cache-control">>,<<"max-age=0, ...">>}} + ``` + + Those logs are harmless, but they could flood and worry the users without need. + +- [#10462](https://github.com/emqx/emqx/pull/10462) Deprecate config `broker.shared_dispatch_ack_enabled`. + This was designed to avoid dispatching messages to a shared-subscription session which has the client disconnected. + However since v5.0.9, this feature is no longer useful because the shared-subscrption messages in a expired session will be redispatched to other sessions in the group. + See also: https://github.com/emqx/emqx/pull/9104 + +- [#10463](https://github.com/emqx/emqx/pull/10463) Improve bridges API error handling. + If Webhook bridge URL is not valid, bridges API will return '400' error instead of '500'. + +- [#10484](https://github.com/emqx/emqx/pull/10484) Fix the issue that the priority of the configuration cannot be set during rolling upgrade. + For example, when authorization is modified in v5.0.21 and then upgraded v5.0.23 through rolling upgrade, + the authorization will be restored to the default. + +- [#10495](https://github.com/emqx/emqx/pull/10495) Add the limiter API `/configs/limiter` which was deleted by mistake back. + +- [#10500](https://github.com/emqx/emqx/pull/10500) Add several fixes, enhancements and features in Mria: + - protect `mria:join/1,2` with a global lock to prevent conflicts between + two nodes trying to join each other simultaneously + [Mria PR](https://github.com/emqx/mria/pull/137) + - implement new function `mria:sync_transaction/4,3,2`, which blocks the caller until + a transaction is imported to the local node (if the local node is a replicant, otherwise, + it behaves exactly the same as `mria:transaction/3,2`) + [Mria PR](https://github.com/emqx/mria/pull/136) + - optimize `mria:running_nodes/0` + [Mria PR](https://github.com/emqx/mria/pull/135) + - optimize `mria:ro_transaction/2` when called on a replicant node + [Mria PR](https://github.com/emqx/mria/pull/134). + +- [#10518](https://github.com/emqx/emqx/pull/10518) Add the following fixes and features in Mria: + - call `mria_rlog:role/1` safely in mria_membership to ensure that mria_membership + gen_server won't crash if RPC to another node fails + [Mria PR](https://github.com/emqx/mria/pull/139) + - Add extra field to ?rlog_sync table to facilitate extending this functionality in future + [Mria PR](https://github.com/emqx/mria/pull/138). From 51cd83e70fc12b8092b826c82eb9b4d43d53b961 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Wed, 18 Jan 2023 08:12:43 +0100 Subject: [PATCH 169/263] refactor: delete dead code --- apps/emqx_machine/src/emqx_machine.erl | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/apps/emqx_machine/src/emqx_machine.erl b/apps/emqx_machine/src/emqx_machine.erl index 6872b150c..aa8f03ae5 100644 --- a/apps/emqx_machine/src/emqx_machine.erl +++ b/apps/emqx_machine/src/emqx_machine.erl @@ -43,7 +43,7 @@ start() -> start_sysmon(), configure_shard_transports(), ekka:start(), - ok = print_otp_version_warning(). + ok. graceful_shutdown() -> emqx_machine_terminator:graceful_wait(). @@ -61,17 +61,6 @@ set_backtrace_depth() -> is_ready() -> emqx_machine_terminator:is_running(). --if(?OTP_RELEASE > 22). -print_otp_version_warning() -> ok. --else. -print_otp_version_warning() -> - ?ULOG( - "WARNING: Running on Erlang/OTP version ~p. Recommended: 23~n", - [?OTP_RELEASE] - ). -% OTP_RELEASE > 22 --endif. - start_sysmon() -> _ = application:load(system_monitor), application:set_env(system_monitor, node_status_fun, {?MODULE, node_status}), From d89975c6eedaa6bbc61c4799faa2356bd98f70da Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Wed, 18 Jan 2023 08:20:56 +0100 Subject: [PATCH 170/263] test: add a test case for emqx_machine:node_status --- apps/emqx_machine/src/emqx_machine.app.src | 2 +- apps/emqx_machine/test/emqx_machine_SUITE.erl | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/apps/emqx_machine/src/emqx_machine.app.src b/apps/emqx_machine/src/emqx_machine.app.src index 6bd36aab5..a44d2b36e 100644 --- a/apps/emqx_machine/src/emqx_machine.app.src +++ b/apps/emqx_machine/src/emqx_machine.app.src @@ -3,7 +3,7 @@ {id, "emqx_machine"}, {description, "The EMQX Machine"}, % strict semver, bump manually! - {vsn, "0.2.2"}, + {vsn, "0.2.3"}, {modules, []}, {registered, []}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/apps/emqx_machine/test/emqx_machine_SUITE.erl b/apps/emqx_machine/test/emqx_machine_SUITE.erl index 691cda677..02d03d983 100644 --- a/apps/emqx_machine/test/emqx_machine_SUITE.erl +++ b/apps/emqx_machine/test/emqx_machine_SUITE.erl @@ -103,3 +103,13 @@ t_custom_shard_transports(_Config) -> emqx_machine:start(), ?assertEqual(distr, mria_config:shard_transport(Shard)), ok. + +t_node_status(_Config) -> + JSON = emqx_machine:node_status(), + ?assertMatch( + #{ + <<"backend">> := _, + <<"role">> := <<"core">> + }, + jsx:decode(JSON) + ). From 0c284ce5fed6e62dccf03289fc4458c5e86fb95f Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Wed, 26 Apr 2023 20:50:06 +0200 Subject: [PATCH 171/263] chore: bump app versions --- apps/emqx_conf/src/emqx_conf.app.src | 2 +- apps/emqx_dashboard/src/emqx_dashboard.app.src | 2 +- apps/emqx_management/src/emqx_management.app.src | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index 03cd36522..e6c3d9cd9 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,6 +1,6 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.18"}, + {vsn, "0.1.19"}, {registered, []}, {mod, {emqx_conf_app, []}}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/apps/emqx_dashboard/src/emqx_dashboard.app.src b/apps/emqx_dashboard/src/emqx_dashboard.app.src index 8c7e424e0..bd022f226 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.app.src +++ b/apps/emqx_dashboard/src/emqx_dashboard.app.src @@ -2,7 +2,7 @@ {application, emqx_dashboard, [ {description, "EMQX Web Dashboard"}, % strict semver, bump manually! - {vsn, "5.0.19"}, + {vsn, "5.0.20"}, {modules, []}, {registered, [emqx_dashboard_sup]}, {applications, [kernel, stdlib, mnesia, minirest, emqx, emqx_ctl]}, diff --git a/apps/emqx_management/src/emqx_management.app.src b/apps/emqx_management/src/emqx_management.app.src index ec282b60b..34f3dd1fe 100644 --- a/apps/emqx_management/src/emqx_management.app.src +++ b/apps/emqx_management/src/emqx_management.app.src @@ -2,7 +2,7 @@ {application, emqx_management, [ {description, "EMQX Management API and CLI"}, % strict semver, bump manually! - {vsn, "5.0.20"}, + {vsn, "5.0.21"}, {modules, []}, {registered, [emqx_management_sup]}, {applications, [kernel, stdlib, emqx_plugins, minirest, emqx, emqx_ctl]}, From 262f0e8dd9cd4a8a08612cee293ad3e11c628639 Mon Sep 17 00:00:00 2001 From: firest Date: Thu, 27 Apr 2023 10:28:05 +0800 Subject: [PATCH 172/263] chore: update README --- apps/emqx_bridge_matrix/README.md | 3 +-- apps/emqx_bridge_mysql/README.md | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/apps/emqx_bridge_matrix/README.md b/apps/emqx_bridge_matrix/README.md index 339eb0605..0d9c4fc4a 100644 --- a/apps/emqx_bridge_matrix/README.md +++ b/apps/emqx_bridge_matrix/README.md @@ -1,7 +1,6 @@ # EMQX MatrixDB Bridge -[MatrixDB](http://matrixdb.univ-lyon1.fr/) is a biological database focused on -molecular interactions between extracellular proteins and polysaccharides. +[YMatrix](https://www.ymatrix.cn/) is a hyper-converged database product developed by YMatrix based on the PostgreSQL / Greenplum classic open source database. In addition to being able to handle time series scenarios with ease, it also supports classic scenarios such as online transaction processing (OLTP) and online analytical processing (OLAP). The application is used to connect EMQX and MatrixDB. User can create a rule and easily ingest IoT data into MatrixDB by leveraging diff --git a/apps/emqx_bridge_mysql/README.md b/apps/emqx_bridge_mysql/README.md index 73f6987b6..d7c9b5647 100644 --- a/apps/emqx_bridge_mysql/README.md +++ b/apps/emqx_bridge_mysql/README.md @@ -1,6 +1,6 @@ # EMQX MySQL Bridge -[MySQL](https://github.com/MySQL/MySQL) is a popular open-source relational database +[MySQL](https://github.com/mysql/mysql-server) is a popular open-source relational database management system. The application is used to connect EMQX and MySQL. From ce2f2217eecfd76884ddda9b1d0ebcefbe19b807 Mon Sep 17 00:00:00 2001 From: firest Date: Thu, 27 Apr 2023 10:36:50 +0800 Subject: [PATCH 173/263] chore: bump versions --- apps/emqx_conf/src/emqx_conf.app.src | 2 +- apps/emqx_dashboard/src/emqx_dashboard.app.src | 2 +- apps/emqx_management/src/emqx_management.app.src | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index 03cd36522..e6c3d9cd9 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,6 +1,6 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.18"}, + {vsn, "0.1.19"}, {registered, []}, {mod, {emqx_conf_app, []}}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/apps/emqx_dashboard/src/emqx_dashboard.app.src b/apps/emqx_dashboard/src/emqx_dashboard.app.src index 8c7e424e0..bd022f226 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.app.src +++ b/apps/emqx_dashboard/src/emqx_dashboard.app.src @@ -2,7 +2,7 @@ {application, emqx_dashboard, [ {description, "EMQX Web Dashboard"}, % strict semver, bump manually! - {vsn, "5.0.19"}, + {vsn, "5.0.20"}, {modules, []}, {registered, [emqx_dashboard_sup]}, {applications, [kernel, stdlib, mnesia, minirest, emqx, emqx_ctl]}, diff --git a/apps/emqx_management/src/emqx_management.app.src b/apps/emqx_management/src/emqx_management.app.src index ec282b60b..34f3dd1fe 100644 --- a/apps/emqx_management/src/emqx_management.app.src +++ b/apps/emqx_management/src/emqx_management.app.src @@ -2,7 +2,7 @@ {application, emqx_management, [ {description, "EMQX Management API and CLI"}, % strict semver, bump manually! - {vsn, "5.0.20"}, + {vsn, "5.0.21"}, {modules, []}, {registered, [emqx_management_sup]}, {applications, [kernel, stdlib, emqx_plugins, minirest, emqx, emqx_ctl]}, From 99448151e9e1928c4d16303ca90dd8ff6b1e7dff Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 26 Apr 2023 09:38:15 -0300 Subject: [PATCH 174/263] test(crl): ensure ssl_manager is ready to avoid flakiness Example failure: https://github.com/emqx/emqx/actions/runs/4806430125/jobs/8555021522?pr=10524#step:8:49138 --- apps/emqx/test/emqx_crl_cache_SUITE.erl | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/apps/emqx/test/emqx_crl_cache_SUITE.erl b/apps/emqx/test/emqx_crl_cache_SUITE.erl index dd3eb29e7..1b8abb9c3 100644 --- a/apps/emqx/test/emqx_crl_cache_SUITE.erl +++ b/apps/emqx/test/emqx_crl_cache_SUITE.erl @@ -35,6 +35,7 @@ all() -> init_per_suite(Config) -> application:load(emqx), + {ok, _} = application:ensure_all_started(ssl), emqx_config:save_schema_mod_and_names(emqx_schema), emqx_common_test_helpers:boot_modules(all), Config. @@ -328,7 +329,15 @@ drain_msgs() -> clear_crl_cache() -> %% reset the CRL cache + Ref = monitor(process, whereis(ssl_manager)), exit(whereis(ssl_manager), kill), + receive + {'DOWN', Ref, process, _, _} -> + ok + after 1_000 -> + ct:fail("ssl_manager didn't die") + end, + ensure_ssl_manager_alive(), ok. force_cacertfile(Cacertfile) -> @@ -382,7 +391,6 @@ setup_crl_options(Config, #{is_cached := IsCached} = Opts) -> false -> %% ensure cache is empty clear_crl_cache(), - ct:sleep(200), ok end, drain_msgs(), @@ -459,6 +467,13 @@ of_kinds(Trace0, Kinds0) -> Trace0 ). +ensure_ssl_manager_alive() -> + ?retry( + _Sleep0 = 200, + _Attempts0 = 50, + true = is_pid(whereis(ssl_manager)) + ). + %%-------------------------------------------------------------------- %% Test cases %%-------------------------------------------------------------------- From 0f37f38fda991e7c3943fd2fdda72bf5d9404698 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 27 Apr 2023 09:51:40 -0300 Subject: [PATCH 175/263] ci: set `IS_CI=yes` when running tests --- .github/workflows/run_test_cases.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index f7b775f08..0ebc67e13 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -14,6 +14,9 @@ on: - e* pull_request: +env: + IS_CI: "yes" + jobs: build-matrix: runs-on: ubuntu-22.04 From 7853a4c36ee7a40697faf121250ca4873e720d0b Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 27 Apr 2023 11:58:28 -0300 Subject: [PATCH 176/263] chore: bump app vsns --- apps/emqx_resource/src/emqx_resource.app.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx_resource/src/emqx_resource.app.src b/apps/emqx_resource/src/emqx_resource.app.src index 2553e6dd8..3e264cb3e 100644 --- a/apps/emqx_resource/src/emqx_resource.app.src +++ b/apps/emqx_resource/src/emqx_resource.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_resource, [ {description, "Manager for all external resources"}, - {vsn, "0.1.14"}, + {vsn, "0.1.15"}, {registered, []}, {mod, {emqx_resource_app, []}}, {applications, [ From 521b54904985448021f99847783a84c1468a7e32 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 27 Apr 2023 11:58:40 -0300 Subject: [PATCH 177/263] test(peer): define cookie when using `ct_slave` module --- apps/emqx/test/emqx_common_test_helpers.erl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl index ac03f4660..71e1bee84 100644 --- a/apps/emqx/test/emqx_common_test_helpers.erl +++ b/apps/emqx/test/emqx_common_test_helpers.erl @@ -271,6 +271,7 @@ app_schema(App) -> mustache_vars(App, Opts) -> ExtraMustacheVars = maps:get(extra_mustache_vars, Opts, #{}), Defaults = #{ + node_cookie => atom_to_list(erlang:get_cookie()), platform_data_dir => app_path(App, "data"), platform_etc_dir => app_path(App, "etc"), platform_log_dir => app_path(App, "log") @@ -667,6 +668,7 @@ start_slave(Name, Opts) when is_map(Opts) -> SlaveMod = maps:get(peer_mod, Opts, ct_slave), Node = node_name(Name), put_peer_mod(Node, SlaveMod), + Cookie = atom_to_list(erlang:get_cookie()), DoStart = fun() -> case SlaveMod of @@ -678,7 +680,11 @@ start_slave(Name, Opts) when is_map(Opts) -> {monitor_master, true}, {init_timeout, 20_000}, {startup_timeout, 20_000}, - {erl_flags, erl_flags()} + {erl_flags, erl_flags()}, + {env, [ + {"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"}, + {"EMQX_NODE__COOKIE", Cookie} + ]} ] ); slave -> From dd90b2f49827a6f479ff7cb526d1daa9a5e03862 Mon Sep 17 00:00:00 2001 From: Paulo Zulato Date: Mon, 24 Apr 2023 12:21:02 -0300 Subject: [PATCH 178/263] feat(oracle): Oracle Database integration --- .../docker-compose-oracle.yaml | 11 + .ci/docker-compose-file/toxiproxy.json | 6 + apps/emqx_bridge/src/emqx_bridge.app.src | 2 +- apps/emqx_bridge/src/emqx_bridge.erl | 3 +- apps/emqx_bridge_oracle/BSL.txt | 94 +++ apps/emqx_bridge_oracle/README.md | 28 + apps/emqx_bridge_oracle/docker-ct | 2 + .../etc/emqx_bridge_oracle.conf | 0 apps/emqx_bridge_oracle/rebar.config | 13 + .../src/emqx_bridge_oracle.app.src | 14 + .../src/emqx_bridge_oracle.erl | 109 ++++ .../test/emqx_bridge_oracle_SUITE.erl | 594 ++++++++++++++++++ apps/emqx_oracle/BSL.txt | 94 +++ apps/emqx_oracle/README.md | 14 + apps/emqx_oracle/rebar.config | 7 + apps/emqx_oracle/src/emqx_oracle.app.src | 14 + apps/emqx_oracle/src/emqx_oracle.erl | 434 +++++++++++++ apps/emqx_oracle/src/emqx_oracle_schema.erl | 33 + .../emqx_plugin_libs/src/emqx_placeholder.erl | 11 +- .../src/emqx_plugin_libs.app.src | 2 +- .../src/emqx_plugin_libs_rule.erl | 3 +- apps/emqx_resource/src/emqx_resource.app.src | 2 +- changes/ee/feat-10498.en.md | 1 + .../emqx_ee_bridge/src/emqx_ee_bridge.app.src | 2 +- lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl | 17 +- mix.exs | 6 +- rebar.config.erl | 4 + rel/i18n/emqx_bridge_oracle.hocon | 52 ++ rel/i18n/emqx_oracle.hocon | 15 + rel/i18n/zh/emqx_bridge_oracle.hocon | 51 ++ rel/i18n/zh/emqx_oracle.hocon | 15 + scripts/ct/run.sh | 3 + 32 files changed, 1642 insertions(+), 14 deletions(-) create mode 100644 .ci/docker-compose-file/docker-compose-oracle.yaml create mode 100644 apps/emqx_bridge_oracle/BSL.txt create mode 100644 apps/emqx_bridge_oracle/README.md create mode 100644 apps/emqx_bridge_oracle/docker-ct create mode 100644 apps/emqx_bridge_oracle/etc/emqx_bridge_oracle.conf create mode 100644 apps/emqx_bridge_oracle/rebar.config create mode 100644 apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src create mode 100644 apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl create mode 100644 apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl create mode 100644 apps/emqx_oracle/BSL.txt create mode 100644 apps/emqx_oracle/README.md create mode 100644 apps/emqx_oracle/rebar.config create mode 100644 apps/emqx_oracle/src/emqx_oracle.app.src create mode 100644 apps/emqx_oracle/src/emqx_oracle.erl create mode 100644 apps/emqx_oracle/src/emqx_oracle_schema.erl create mode 100644 changes/ee/feat-10498.en.md create mode 100644 rel/i18n/emqx_bridge_oracle.hocon create mode 100644 rel/i18n/emqx_oracle.hocon create mode 100644 rel/i18n/zh/emqx_bridge_oracle.hocon create mode 100644 rel/i18n/zh/emqx_oracle.hocon diff --git a/.ci/docker-compose-file/docker-compose-oracle.yaml b/.ci/docker-compose-file/docker-compose-oracle.yaml new file mode 100644 index 000000000..ea8965846 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-oracle.yaml @@ -0,0 +1,11 @@ +version: '3.9' + +services: + oracle_server: + container_name: oracle + image: oracleinanutshell/oracle-xe-11g:1.0.0 + restart: always + environment: + ORACLE_DISABLE_ASYNCH_IO: true + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json index 9cefcb808..e4fbfa62a 100644 --- a/.ci/docker-compose-file/toxiproxy.json +++ b/.ci/docker-compose-file/toxiproxy.json @@ -119,5 +119,11 @@ "listen": "0.0.0.0:6653", "upstream": "pulsar:6653", "enabled": true + }, + { + "name": "oracle", + "listen": "0.0.0.0:1521", + "upstream": "oracle:1521", + "enabled": true } ] diff --git a/apps/emqx_bridge/src/emqx_bridge.app.src b/apps/emqx_bridge/src/emqx_bridge.app.src index d6c140fef..e408250be 100644 --- a/apps/emqx_bridge/src/emqx_bridge.app.src +++ b/apps/emqx_bridge/src/emqx_bridge.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge, [ {description, "EMQX bridges"}, - {vsn, "0.1.17"}, + {vsn, "0.1.18"}, {registered, [emqx_bridge_sup]}, {mod, {emqx_bridge_app, []}}, {applications, [ diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index fd4e16263..a37b6db3c 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -71,7 +71,8 @@ T == rocketmq; T == cassandra; T == sqlserver; - T == pulsar_producer + T == pulsar_producer; + T == oracle ). load() -> diff --git a/apps/emqx_bridge_oracle/BSL.txt b/apps/emqx_bridge_oracle/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_oracle/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_oracle/README.md b/apps/emqx_bridge_oracle/README.md new file mode 100644 index 000000000..d2974b722 --- /dev/null +++ b/apps/emqx_bridge_oracle/README.md @@ -0,0 +1,28 @@ +# EMQX Oracle Database Bridge + +This application houses the Oracle Database bridge for EMQX Enterprise Edition. +It implements the data bridge APIs for interacting with an Oracle Database Bridge. + + +# Documentation + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +## Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +## License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_bridge_oracle/docker-ct b/apps/emqx_bridge_oracle/docker-ct new file mode 100644 index 000000000..c24dc4bc9 --- /dev/null +++ b/apps/emqx_bridge_oracle/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +oracle diff --git a/apps/emqx_bridge_oracle/etc/emqx_bridge_oracle.conf b/apps/emqx_bridge_oracle/etc/emqx_bridge_oracle.conf new file mode 100644 index 000000000..e69de29bb diff --git a/apps/emqx_bridge_oracle/rebar.config b/apps/emqx_bridge_oracle/rebar.config new file mode 100644 index 000000000..c238546c4 --- /dev/null +++ b/apps/emqx_bridge_oracle/rebar.config @@ -0,0 +1,13 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ {emqx_oracle, {path, "../../apps/emqx_oracle"}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + % {config, "config/sys.config"}, + {apps, [emqx_bridge_oracle]} +]}. diff --git a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src new file mode 100644 index 000000000..4f81c2110 --- /dev/null +++ b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src @@ -0,0 +1,14 @@ +{application, emqx_bridge_oracle, [ + {description, "EMQX Enterprise Oracle Database Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + emqx_oracle + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl new file mode 100644 index 000000000..8a87f02ba --- /dev/null +++ b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl @@ -0,0 +1,109 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_oracle). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_SQL, << + "insert into t_mqtt_msg(msgid, topic, qos, payload)" + "values (${id}, ${topic}, ${qos}, ${payload})" +>>). + +conn_bridge_examples(Method) -> + [ + #{ + <<"oracle">> => #{ + summary => <<"Oracle Database Bridge">>, + value => values(Method) + } + } + ]. + +values(_Method) -> + #{ + enable => true, + type => oracle, + name => <<"foo">>, + server => <<"127.0.0.1:1521">>, + pool_size => 8, + database => <<"ORCL">>, + sid => <<"ORCL">>, + username => <<"root">>, + password => <<"******">>, + sql => ?DEFAULT_SQL, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions + +namespace() -> "bridge_oracle". + +roots() -> []. + +fields("config") -> + [ + {enable, + hoconsc:mk( + boolean(), + #{desc => ?DESC("config_enable"), default => true} + )}, + {sql, + hoconsc:mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )}, + {local_topic, + hoconsc:mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + (emqx_oracle_schema:fields(config) -- + emqx_connector_schema_lib:prepare_statement_fields()); +fields("post") -> + fields("post", oracle); +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +fields("post", Type) -> + [type_field(Type), name_field() | fields("config")]. + +desc("config") -> + ?DESC("desc_config"); +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- + +type_field(Type) -> + {type, hoconsc:mk(hoconsc:enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, hoconsc:mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl new file mode 100644 index 000000000..de77b26de --- /dev/null +++ b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl @@ -0,0 +1,594 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_oracle_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import(emqx_common_test_helpers, [on_exit/1]). + +-define(BRIDGE_TYPE_BIN, <<"oracle">>). +-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_oracle, emqx_bridge_oracle]). +-define(DATABASE, "XE"). +-define(RULE_TOPIC, "mqtt/rule"). +% -define(RULE_TOPIC_BIN, <>). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, plain} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + [ + {plain, AllTCs} + ]. + +only_once_tests() -> + [t_create_via_http]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)), + _ = application:stop(emqx_connector), + ok. + +init_per_group(plain = Type, Config) -> + OracleHost = os:getenv("ORACLE_PLAIN_HOST", "toxiproxy.emqx.net"), + OraclePort = list_to_integer(os:getenv("ORACLE_PLAIN_PORT", "1521")), + ProxyName = "oracle", + case emqx_common_test_helpers:is_tcp_server_available(OracleHost, OraclePort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {oracle_host, OracleHost}, + {oracle_port, OraclePort}, + {connection_type, Type} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_oracle); + _ -> + {skip, no_oracle} + end + end; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= plain +-> + common_end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +common_init_per_group() -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + application:load(emqx_bridge), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps(?APPS), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>, + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {mqtt_topic, MQTTTopic} + ]. + +common_end_per_group(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +end_per_testcase(_Testcase, Config) -> + common_end_per_testcase(_Testcase, Config). + +common_init_per_testcase(TestCase, Config0) -> + ct:timetrap(timer:seconds(60)), + delete_all_bridges(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + OracleTopic = + << + (atom_to_binary(TestCase))/binary, + UniqueNum/binary + >>, + ConnectionType = ?config(connection_type, Config0), + Config = [{oracle_topic, OracleTopic} | Config0], + {Name, ConfigString, OracleConfig} = oracle_config( + TestCase, ConnectionType, Config + ), + ok = snabbkaffe:start_trace(), + [ + {oracle_name, Name}, + {oracle_config_string, ConfigString}, + {oracle_config, OracleConfig} + | Config + ]. + +common_end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ +sql_insert_template_for_bridge() -> + "INSERT INTO mqtt_test(topic, msgid, payload, retain) VALUES (${topic}, ${id}, ${payload}, ${retain})". + +sql_create_table() -> + "CREATE TABLE mqtt_test (topic VARCHAR2(255), msgid VARCHAR2(64), payload NCLOB, retain NUMBER(1))". + +sql_drop_table() -> + "DROP TABLE mqtt_test". + +reset_table(Config) -> + ResourceId = resource_id(Config), + _ = emqx_resource:simple_sync_query(ResourceId, {sql, sql_drop_table()}), + {ok, [{proc_result, 0, _}]} = emqx_resource:simple_sync_query( + ResourceId, {sql, sql_create_table()} + ), + ok. + +drop_table(Config) -> + ResourceId = resource_id(Config), + emqx_resource:simple_sync_query(ResourceId, {query, sql_drop_table()}), + ok. + +oracle_config(TestCase, _ConnectionType, Config) -> + UniqueNum = integer_to_binary(erlang:unique_integer()), + OracleHost = ?config(oracle_host, Config), + OraclePort = ?config(oracle_port, Config), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + ServerURL = iolist_to_binary([ + OracleHost, + ":", + integer_to_binary(OraclePort) + ]), + ConfigString = + io_lib:format( + "bridges.oracle.~s {\n" + " enable = true\n" + " database = \"~s\"\n" + " sid = \"~s\"\n" + " server = \"~s\"\n" + " username = \"system\"\n" + " password = \"oracle\"\n" + " pool_size = 1\n" + " sql = \"~s\"\n" + " resource_opts = {\n" + " auto_restart_interval = 5000\n" + " request_timeout = 30000\n" + " query_mode = \"async\"\n" + " enable_batch = true\n" + " batch_size = 3\n" + " batch_time = \"3s\"\n" + " worker_pool_size = 1\n" + " }\n" + "}\n", + [ + Name, + ?DATABASE, + ?DATABASE, + ServerURL, + sql_insert_template_for_bridge() + ] + ), + {Name, ConfigString, parse_and_check(ConfigString, Name)}. + +parse_and_check(ConfigString, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = ?BRIDGE_TYPE_BIN, + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +resource_id(Config) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + emqx_bridge_resource:resource_id(Type, Name). + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + emqx_bridge:create(Type, Name, OracleConfig). + +create_bridge_api(Config) -> + create_bridge_api(Config, _Overrides = #{}). + +create_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("creating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("bridge create result: ~p", [Res]), + Res. + +update_bridge_api(Config) -> + update_bridge_api(Config, _Overrides = #{}). + +update_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + BridgeId = emqx_bridge_resource:bridge_id(TypeBin, Name), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("updating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of + {ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge update result: ~p", [Res]), + Res. + +probe_bridge_api(Config) -> + probe_bridge_api(Config, _Overrides = #{}). + +probe_bridge_api(Config, _Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig = ?config(oracle_config, Config), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("probing bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0}; + Error -> Error + end, + ct:pal("bridge probe result: ~p", [Res]), + Res. + +create_rule_and_action_http(Config) -> + OracleName = ?config(oracle_name, Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, OracleName), + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"", ?RULE_TOPIC, "\"">>, + actions => [BridgeId] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ct:pal("rule action params: ~p", [Params]), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +% Under normal operations, the bridge will be called async via +% `simple_async_query'. +t_sync_query(Config) -> + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + reset_table(Config), + MsgId = erlang:unique_integer(), + Params = #{ + topic => ?config(mqtt_topic, Config), + id => MsgId, + payload => ?config(oracle_name, Config), + retain => true + }, + Message = {send_message, Params}, + ?assertEqual( + {ok, [{affected_rows, 1}]}, emqx_resource:simple_sync_query(ResourceId, Message) + ), + ok + end, + [] + ), + ok. + +t_async_query(Config) -> + Overrides = #{ + <<"resource_opts">> => #{ + <<"enable_batch">> => <<"false">>, + <<"batch_size">> => 1 + } + }, + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config, Overrides)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + reset_table(Config), + MsgId = erlang:unique_integer(), + Params = #{ + topic => ?config(mqtt_topic, Config), + id => MsgId, + payload => ?config(oracle_name, Config), + retain => false + }, + Message = {send_message, Params}, + ?assertMatch( + { + ok, + {ok, #{result := {ok, [{affected_rows, 1}]}}} + }, + ?wait_async_action( + emqx_resource:query(ResourceId, Message), + #{?snk_kind := oracle_query}, + 5_000 + ) + ), + ok + end, + [] + ), + ok. + +t_batch_sync_query(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 30, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + reset_table(Config), + MsgId = erlang:unique_integer(), + Params = #{ + topic => ?config(mqtt_topic, Config), + id => MsgId, + payload => ?config(oracle_name, Config), + retain => false + }, + % Send 3 async messages while resource is down. When it comes back, these messages + % will be delivered in sync way. If we try to send sync messages directly, it will + % be sent async as callback_mode is set to async_if_possible. + Message = {send_message, Params}, + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(1000), + emqx_resource:query(ResourceId, Message), + emqx_resource:query(ResourceId, Message), + emqx_resource:query(ResourceId, Message) + end), + ?retry( + _Sleep = 1_000, + _Attempts = 30, + ?assertMatch( + {ok, [{result_set, _, _, [[{3}]]}]}, + emqx_resource:simple_sync_query( + ResourceId, {query, "SELECT COUNT(*) FROM mqtt_test"} + ) + ) + ), + ok + end, + [] + ), + ok. + +t_batch_async_query(Config) -> + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + reset_table(Config), + MsgId = erlang:unique_integer(), + Params = #{ + topic => ?config(mqtt_topic, Config), + id => MsgId, + payload => ?config(oracle_name, Config), + retain => false + }, + Message = {send_message, Params}, + ?assertMatch( + { + ok, + {ok, #{result := {ok, [{affected_rows, 1}]}}} + }, + ?wait_async_action( + emqx_resource:query(ResourceId, Message), + #{?snk_kind := oracle_batch_query}, + 5_000 + ) + ), + ok + end, + [] + ), + ok. + +t_create_via_http(Config) -> + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + + %% lightweight matrix testing some configs + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"resource_opts">> => + #{<<"batch_size">> => 4} + } + ) + ), + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"resource_opts">> => + #{<<"batch_time">> => <<"4s">>} + } + ) + ), + ok + end, + [] + ), + ok. + +t_start_stop(Config) -> + OracleName = ?config(oracle_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + %% Check that the bridge probe API doesn't leak atoms. + ProbeRes0 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + %% Now stop the bridge. + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_bridge:disable_enable(disable, ?BRIDGE_TYPE_BIN, OracleName), + #{?snk_kind := oracle_bridge_stopped}, + 5_000 + ) + ), + + ok + end, + fun(Trace) -> + %% one for each probe, one for real + ?assertMatch([_, _, _], ?of_kind(oracle_bridge_stopped, Trace)), + ok + end + ), + ok. + +t_on_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(500), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok. diff --git a/apps/emqx_oracle/BSL.txt b/apps/emqx_oracle/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_oracle/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_oracle/README.md b/apps/emqx_oracle/README.md new file mode 100644 index 000000000..873d52259 --- /dev/null +++ b/apps/emqx_oracle/README.md @@ -0,0 +1,14 @@ +# Oracle Database Connector + +This application houses the Oracle Database connector for EMQX Enterprise Edition. +It provides the APIs to connect to Oracle Database. + +So far it is only used to insert messages as data bridge. + +## Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +## License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_oracle/rebar.config b/apps/emqx_oracle/rebar.config new file mode 100644 index 000000000..14461ba34 --- /dev/null +++ b/apps/emqx_oracle/rebar.config @@ -0,0 +1,7 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ {jamdb_oracle, {git, "https://github.com/emqx/jamdb_oracle", {tag, "0.4.9.4"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + ]}. diff --git a/apps/emqx_oracle/src/emqx_oracle.app.src b/apps/emqx_oracle/src/emqx_oracle.app.src new file mode 100644 index 000000000..fa48e8479 --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle.app.src @@ -0,0 +1,14 @@ +{application, emqx_oracle, [ + {description, "EMQX Enterprise Oracle Database Connector"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + jamdb_oracle + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_oracle/src/emqx_oracle.erl b/apps/emqx_oracle/src/emqx_oracle.erl new file mode 100644 index 000000000..c39a6a6d7 --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle.erl @@ -0,0 +1,434 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_oracle). + +-behaviour(emqx_resource). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-define(ORACLE_DEFAULT_PORT, 1521). + +%%==================================================================== +%% Exports +%%==================================================================== + +%% callbacks for behaviour emqx_resource +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_query_async/4, + on_batch_query_async/4, + on_get_status/2 +]). + +%% callbacks for ecpool +-export([connect/1, prepare_sql_to_conn/2]). + +%% Internal exports used to execute code with ecpool worker +-export([ + query/3, + execute_batch/3, + do_async_reply/2, + do_get_status/1 +]). + +-export([ + oracle_host_options/0 +]). + +-define(ACTION_SEND_MESSAGE, send_message). + +-define(SYNC_QUERY_MODE, no_handover). +-define(ASYNC_QUERY_MODE(REPLY), {handover_async, {?MODULE, do_async_reply, [REPLY]}}). + +-define(ORACLE_HOST_OPTIONS, #{ + default_port => ?ORACLE_DEFAULT_PORT +}). + +-define(MAX_CURSORS, 10). +-define(DEFAULT_POOL_SIZE, 8). +-define(OPT_TIMEOUT, 30000). + +-type prepares() :: #{atom() => binary()}. +-type params_tokens() :: #{atom() => list()}. + +-type state() :: + #{ + pool_name := binary(), + prepare_sql := prepares(), + params_tokens := params_tokens(), + batch_params_tokens := params_tokens() + }. + +callback_mode() -> async_if_possible. + +is_buffer_supported() -> false. + +-spec on_start(binary(), hoconsc:config()) -> {ok, state()} | {error, _}. +on_start( + InstId, + #{ + server := Server, + database := DB, + sid := Sid, + username := User + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_oracle_connector", + connector => InstId, + config => emqx_utils:redact(Config) + }), + ?tp(oracle_bridge_started, #{instance_id => InstId, config => Config}), + {ok, _} = application:ensure_all_started(ecpool), + {ok, _} = application:ensure_all_started(jamdb_oracle), + jamdb_oracle_conn:set_max_cursors_number(?MAX_CURSORS), + + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, oracle_host_options()), + ServiceName = maps:get(<<"service_name">>, Config, Sid), + Options = [ + {host, Host}, + {port, Port}, + {user, emqx_plugin_libs_rule:str(User)}, + {password, emqx_secret:wrap(maps:get(password, Config, ""))}, + {sid, emqx_plugin_libs_rule:str(Sid)}, + {service_name, emqx_plugin_libs_rule:str(ServiceName)}, + {database, DB}, + {pool_size, maps:get(<<"pool_size">>, Config, ?DEFAULT_POOL_SIZE)}, + {timeout, ?OPT_TIMEOUT}, + {app_name, "EMQX Data To Oracle Database Action"} + ], + PoolName = InstId, + Prepares = parse_prepare_sql(Config), + InitState = #{pool_name => PoolName, prepare_statement => #{}}, + State = maps:merge(InitState, Prepares), + case emqx_resource_pool:start(InstId, ?MODULE, Options) of + ok -> + {ok, init_prepare(State)}; + {error, Reason} -> + ?tp( + oracle_connector_start_failed, + #{error => Reason} + ), + {error, Reason} + end. + +on_stop(InstId, #{pool_name := PoolName}) -> + ?SLOG(info, #{ + msg => "stopping_oracle_connector", + connector => InstId + }), + ?tp(oracle_bridge_stopped, #{instance_id => InstId}), + emqx_resource_pool:stop(PoolName). + +on_query(InstId, {TypeOrKey, NameOrSQL}, #{pool_name := _PoolName} = State) -> + on_query(InstId, {TypeOrKey, NameOrSQL, []}, State); +on_query( + InstId, + {TypeOrKey, NameOrSQL, Params}, + #{pool_name := PoolName} = State +) -> + ?SLOG(debug, #{ + msg => "oracle database connector received sql query", + connector => InstId, + type => TypeOrKey, + sql => NameOrSQL, + state => State + }), + Type = query, + {NameOrSQL2, Data} = proc_sql_params(TypeOrKey, NameOrSQL, Params, State), + Res = on_sql_query(InstId, PoolName, Type, ?SYNC_QUERY_MODE, NameOrSQL2, Data), + handle_result(Res). + +on_query_async(InstId, {TypeOrKey, NameOrSQL}, Reply, State) -> + on_query_async(InstId, {TypeOrKey, NameOrSQL, []}, Reply, State); +on_query_async( + InstId, {TypeOrKey, NameOrSQL, Params} = Query, Reply, #{pool_name := PoolName} = State +) -> + ?SLOG(debug, #{ + msg => "oracle database connector received async sql query", + connector => InstId, + query => Query, + reply => Reply, + state => State + }), + ApplyMode = ?ASYNC_QUERY_MODE(Reply), + Type = query, + {NameOrSQL2, Data} = proc_sql_params(TypeOrKey, NameOrSQL, Params, State), + Res = on_sql_query(InstId, PoolName, Type, ApplyMode, NameOrSQL2, Data), + handle_result(Res). + +on_batch_query( + InstId, + BatchReq, + #{pool_name := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State +) -> + case BatchReq of + [{Key, _} = Request | _] -> + BinKey = to_bin(Key), + case maps:get(BinKey, Tokens, undefined) of + undefined -> + Log = #{ + connector => InstId, + first_request => Request, + state => State, + msg => "batch prepare not implemented" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, batch_prepare_not_implemented}}; + TokenList -> + {_, Datas} = lists:unzip(BatchReq), + Datas2 = [emqx_plugin_libs_rule:proc_sql(TokenList, Data) || Data <- Datas], + St = maps:get(BinKey, Sts), + case + on_sql_query(InstId, PoolName, execute_batch, ?SYNC_QUERY_MODE, St, Datas2) + of + {ok, Results} -> + handle_batch_result(Results, 0); + Result -> + Result + end + end; + _ -> + Log = #{ + connector => InstId, + request => BatchReq, + state => State, + msg => "invalid request" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, invalid_request}} + end. + +on_batch_query_async( + InstId, + BatchReq, + Reply, + #{pool_name := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State +) -> + case BatchReq of + [{Key, _} = Request | _] -> + BinKey = to_bin(Key), + case maps:get(BinKey, Tokens, undefined) of + undefined -> + Log = #{ + connector => InstId, + first_request => Request, + state => State, + msg => "batch prepare not implemented" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, batch_prepare_not_implemented}}; + TokenList -> + {_, Datas} = lists:unzip(BatchReq), + Datas2 = [emqx_plugin_libs_rule:proc_sql(TokenList, Data) || Data <- Datas], + St = maps:get(BinKey, Sts), + case + on_sql_query( + InstId, PoolName, execute_batch, ?ASYNC_QUERY_MODE(Reply), St, Datas2 + ) + of + {ok, Results} -> + handle_batch_result(Results, 0); + Result -> + Result + end + end; + _ -> + Log = #{ + connector => InstId, + request => BatchReq, + state => State, + msg => "invalid request" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, invalid_request}} + end. + +proc_sql_params(query, SQLOrKey, Params, _State) -> + {SQLOrKey, Params}; +proc_sql_params(TypeOrKey, SQLOrData, Params, #{ + params_tokens := ParamsTokens, prepare_sql := PrepareSql +}) -> + Key = to_bin(TypeOrKey), + case maps:get(Key, ParamsTokens, undefined) of + undefined -> + {SQLOrData, Params}; + Tokens -> + case maps:get(Key, PrepareSql, undefined) of + undefined -> + {SQLOrData, Params}; + Sql -> + {Sql, emqx_plugin_libs_rule:proc_sql(Tokens, SQLOrData)} + end + end. + +on_sql_query(InstId, PoolName, Type, ApplyMode, NameOrSQL, Data) -> + case ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Data]}, ApplyMode) of + {error, Reason} = Result -> + ?tp( + oracle_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "oracle database connector do sql query failed", + connector => InstId, + type => Type, + sql => NameOrSQL, + reason => Reason + }), + Result; + Result -> + ?tp( + oracle_connector_query_return, + #{result => Result} + ), + Result + end. + +on_get_status(_InstId, #{pool_name := Pool} = State) -> + case emqx_resource_pool:health_check_workers(Pool, fun ?MODULE:do_get_status/1) of + true -> + case do_check_prepares(State) of + ok -> + connected; + {ok, NState} -> + %% return new state with prepared statements + {connected, NState} + end; + false -> + disconnected + end. + +do_get_status(Conn) -> + ok == element(1, jamdb_oracle:sql_query(Conn, "select 1 from dual")). + +do_check_prepares(#{prepare_sql := Prepares}) when is_map(Prepares) -> + ok; +do_check_prepares(State = #{pool_name := PoolName, prepare_sql := {error, Prepares}}) -> + {ok, Sts} = prepare_sql(Prepares, PoolName), + {ok, State#{prepare_sql => Prepares, prepare_statement := Sts}}. + +%% =================================================================== + +oracle_host_options() -> + ?ORACLE_HOST_OPTIONS. + +connect(Opts) -> + Password = emqx_secret:unwrap(proplists:get_value(password, Opts)), + NewOpts = lists:keyreplace(password, 1, Opts, {password, Password}), + jamdb_oracle:start_link(NewOpts). + +sql_query_to_str(SqlQuery) -> + emqx_plugin_libs_rule:str(SqlQuery). + +sql_params_to_str(Params) when is_list(Params) -> + lists:map( + fun + (false) -> "0"; + (true) -> "1"; + (Value) -> emqx_plugin_libs_rule:str(Value) + end, + Params + ). + +query(Conn, SQL, Params) -> + Ret = jamdb_oracle:sql_query(Conn, {sql_query_to_str(SQL), sql_params_to_str(Params)}), + ?tp(oracle_query, #{conn => Conn, sql => SQL, params => Params, result => Ret}), + handle_result(Ret). + +execute_batch(Conn, SQL, ParamsList) -> + ParamsListStr = lists:map(fun sql_params_to_str/1, ParamsList), + Ret = jamdb_oracle:sql_query(Conn, {batch, sql_query_to_str(SQL), ParamsListStr}), + ?tp(oracle_batch_query, #{conn => Conn, sql => SQL, params => ParamsList, result => Ret}), + handle_result(Ret). + +parse_prepare_sql(Config) -> + SQL = + case maps:get(prepare_statement, Config, undefined) of + undefined -> + case maps:get(sql, Config, undefined) of + undefined -> #{}; + Template -> #{<<"send_message">> => Template} + end; + Any -> + Any + end, + parse_prepare_sql(maps:to_list(SQL), #{}, #{}). + +parse_prepare_sql([{Key, H} | T], Prepares, Tokens) -> + {PrepareSQL, ParamsTokens} = emqx_plugin_libs_rule:preproc_sql(H, ':n'), + parse_prepare_sql( + T, Prepares#{Key => PrepareSQL}, Tokens#{Key => ParamsTokens} + ); +parse_prepare_sql([], Prepares, Tokens) -> + #{ + prepare_sql => Prepares, + params_tokens => Tokens + }. + +init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName}) -> + {ok, Sts} = prepare_sql(Prepares, PoolName), + State#{prepare_statement := Sts}. + +prepare_sql(Prepares, PoolName) when is_map(Prepares) -> + prepare_sql(maps:to_list(Prepares), PoolName); +prepare_sql(Prepares, PoolName) -> + Data = do_prepare_sql(Prepares, PoolName), + {ok, _Sts} = Data, + ecpool:add_reconnect_callback(PoolName, {?MODULE, prepare_sql_to_conn, [Prepares]}), + Data. + +do_prepare_sql(Prepares, PoolName) -> + do_prepare_sql(ecpool:workers(PoolName), Prepares, PoolName, #{}). + +do_prepare_sql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) -> + {ok, Conn} = ecpool_worker:client(Worker), + {ok, Sts} = prepare_sql_to_conn(Conn, Prepares), + do_prepare_sql(T, Prepares, PoolName, Sts); +do_prepare_sql([], _Prepares, _PoolName, LastSts) -> + {ok, LastSts}. + +prepare_sql_to_conn(Conn, Prepares) -> + prepare_sql_to_conn(Conn, Prepares, #{}). + +prepare_sql_to_conn(Conn, [], Statements) when is_pid(Conn) -> {ok, Statements}; +prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Conn) -> + LogMeta = #{msg => "Oracle Database Prepare Statement", name => Key, prepare_sql => SQL}, + ?SLOG(info, LogMeta), + prepare_sql_to_conn(Conn, PrepareList, Statements#{Key => SQL}). + +to_bin(Bin) when is_binary(Bin) -> + Bin; +to_bin(Atom) when is_atom(Atom) -> + erlang:atom_to_binary(Atom). + +handle_result({error, disconnected}) -> + {error, {recoverable_error, disconnected}}; +handle_result({error, Error}) -> + {error, {unrecoverable_error, Error}}; +handle_result({error, socket, closed} = Error) -> + {error, {recoverable_error, Error}}; +handle_result({error, Type, Reason}) -> + {error, {unrecoverable_error, {Type, Reason}}}; +handle_result(Res) -> + Res. + +handle_batch_result([{affected_rows, RowCount} | Rest], Acc) -> + handle_batch_result(Rest, Acc + RowCount); +handle_batch_result([{proc_result, RetCode, _Rows} | Rest], Acc) when RetCode =:= 0 -> + handle_batch_result(Rest, Acc); +handle_batch_result([{proc_result, RetCode, Reason} | _Rest], _Acc) -> + {error, {unrecoverable_error, {RetCode, Reason}}}; +handle_batch_result([], Acc) -> + {ok, Acc}. + +do_async_reply(Result, {ReplyFun, [Context]}) -> + ReplyFun(Context, Result). diff --git a/apps/emqx_oracle/src/emqx_oracle_schema.erl b/apps/emqx_oracle/src/emqx_oracle_schema.erl new file mode 100644 index 000000000..cfa74054a --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle_schema.erl @@ -0,0 +1,33 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_oracle_schema). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-define(REF_MODULE, emqx_oracle). + +%% Hocon config schema exports +-export([ + roots/0, + fields/1 +]). + +roots() -> + [{config, #{type => hoconsc:ref(?REF_MODULE, config)}}]. + +fields(config) -> + [{server, server()}, {sid, fun sid/1}] ++ + emqx_connector_schema_lib:relational_db_fields() ++ + emqx_connector_schema_lib:prepare_statement_fields(). + +server() -> + Meta = #{desc => ?DESC(?REF_MODULE, "server")}, + emqx_schema:servers_sc(Meta, (?REF_MODULE):oracle_host_options()). + +sid(type) -> binary(); +sid(desc) -> ?DESC(?REF_MODULE, "sid"); +sid(required) -> true; +sid(_) -> undefined. diff --git a/apps/emqx_plugin_libs/src/emqx_placeholder.erl b/apps/emqx_plugin_libs/src/emqx_placeholder.erl index 18ef9e8fb..dcd666f5b 100644 --- a/apps/emqx_plugin_libs/src/emqx_placeholder.erl +++ b/apps/emqx_plugin_libs/src/emqx_placeholder.erl @@ -69,7 +69,7 @@ -type preproc_sql_opts() :: #{ placeholders => list(binary()), - replace_with => '?' | '$n', + replace_with => '?' | '$n' | ':n', strip_double_quote => boolean() }. @@ -149,7 +149,7 @@ proc_cmd(Tokens, Data, Opts) -> preproc_sql(Sql) -> preproc_sql(Sql, '?'). --spec preproc_sql(binary(), '?' | '$n' | preproc_sql_opts()) -> +-spec preproc_sql(binary(), '?' | '$n' | ':n' | preproc_sql_opts()) -> {prepare_statement_key(), tmpl_token()}. preproc_sql(Sql, ReplaceWith) when is_atom(ReplaceWith) -> preproc_sql(Sql, #{replace_with => ReplaceWith}); @@ -316,13 +316,17 @@ preproc_tmpl_deep_map_key(Key, _) -> replace_with(Tmpl, RE, '?') -> re:replace(Tmpl, RE, "?", [{return, binary}, global]); replace_with(Tmpl, RE, '$n') -> + replace_with(Tmpl, RE, <<"$">>); +replace_with(Tmpl, RE, ':n') -> + replace_with(Tmpl, RE, <<":">>); +replace_with(Tmpl, RE, String) when is_binary(String) -> Parts = re:split(Tmpl, RE, [{return, binary}, trim, group]), {Res, _} = lists:foldl( fun ([Tkn, _Phld], {Acc, Seq}) -> Seq1 = erlang:integer_to_binary(Seq), - {<>, Seq + 1}; + {<>, Seq + 1}; ([Tkn], {Acc, Seq}) -> {<>, Seq} end, @@ -330,6 +334,7 @@ replace_with(Tmpl, RE, '$n') -> Parts ), Res. + parse_nested(<<".", R/binary>>) -> %% ignore the root . parse_nested(R); diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src index 24b5a3240..bfd7e68fa 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_plugin_libs, [ {description, "EMQX Plugin utility libs"}, - {vsn, "4.3.9"}, + {vsn, "4.3.10"}, {modules, []}, {applications, [kernel, stdlib]}, {env, []} diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl index 8844fe586..9a4c01a2b 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl @@ -105,9 +105,8 @@ proc_cmd(Tokens, Data, Opts) -> preproc_sql(Sql) -> emqx_placeholder:preproc_sql(Sql). --spec preproc_sql(Sql :: binary(), ReplaceWith :: '?' | '$n') -> +-spec preproc_sql(Sql :: binary(), ReplaceWith :: '?' | '$n' | ':n') -> {prepare_statement_key(), tmpl_token()}. - preproc_sql(Sql, ReplaceWith) -> emqx_placeholder:preproc_sql(Sql, ReplaceWith). diff --git a/apps/emqx_resource/src/emqx_resource.app.src b/apps/emqx_resource/src/emqx_resource.app.src index 2553e6dd8..3e264cb3e 100644 --- a/apps/emqx_resource/src/emqx_resource.app.src +++ b/apps/emqx_resource/src/emqx_resource.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_resource, [ {description, "Manager for all external resources"}, - {vsn, "0.1.14"}, + {vsn, "0.1.15"}, {registered, []}, {mod, {emqx_resource_app, []}}, {applications, [ diff --git a/changes/ee/feat-10498.en.md b/changes/ee/feat-10498.en.md new file mode 100644 index 000000000..7222f8957 --- /dev/null +++ b/changes/ee/feat-10498.en.md @@ -0,0 +1 @@ +Implement Oracle Database Bridge, which supports publishing messages to Oracle Database from MQTT topics. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src index 5544825f8..825175038 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src @@ -1,6 +1,6 @@ {application, emqx_ee_bridge, [ {description, "EMQX Enterprise data bridges"}, - {vsn, "0.1.11"}, + {vsn, "0.1.12"}, {registered, []}, {applications, [ kernel, diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl index 38f471ca2..3baf056ec 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl @@ -37,7 +37,8 @@ api_schemas(Method) -> ref(emqx_ee_bridge_rocketmq, Method), ref(emqx_ee_bridge_sqlserver, Method), ref(emqx_bridge_opents, Method), - ref(emqx_bridge_pulsar, Method ++ "_producer") + ref(emqx_bridge_pulsar, Method ++ "_producer"), + ref(emqx_bridge_oracle, Method) ]. schema_modules() -> @@ -59,7 +60,8 @@ schema_modules() -> emqx_ee_bridge_rocketmq, emqx_ee_bridge_sqlserver, emqx_bridge_opents, - emqx_bridge_pulsar + emqx_bridge_pulsar, + emqx_bridge_oracle ]. examples(Method) -> @@ -100,7 +102,8 @@ resource_type(dynamo) -> emqx_ee_connector_dynamo; resource_type(rocketmq) -> emqx_ee_connector_rocketmq; resource_type(sqlserver) -> emqx_ee_connector_sqlserver; resource_type(opents) -> emqx_bridge_opents_connector; -resource_type(pulsar_producer) -> emqx_bridge_pulsar_impl_producer. +resource_type(pulsar_producer) -> emqx_bridge_pulsar_impl_producer; +resource_type(oracle) -> emqx_oracle. fields(bridges) -> [ @@ -167,6 +170,14 @@ fields(bridges) -> desc => <<"OpenTSDB Bridge Config">>, required => false } + )}, + {oracle, + mk( + hoconsc:map(name, ref(emqx_bridge_oracle, "config")), + #{ + desc => <<"Oracle Bridge Config">>, + required => false + } )} ] ++ kafka_structs() ++ pulsar_structs() ++ mongodb_structs() ++ influxdb_structs() ++ redis_structs() ++ diff --git a/mix.exs b/mix.exs index 2c391611e..8e100967b 100644 --- a/mix.exs +++ b/mix.exs @@ -170,7 +170,9 @@ defmodule EMQXUmbrella.MixProject do :emqx_bridge_rocketmq, :emqx_bridge_tdengine, :emqx_bridge_timescale, - :emqx_bridge_pulsar + :emqx_bridge_pulsar, + :emqx_oracle, + :emqx_bridge_oracle ]) end @@ -377,6 +379,8 @@ defmodule EMQXUmbrella.MixProject do emqx_bridge_rocketmq: :permanent, emqx_bridge_tdengine: :permanent, emqx_bridge_timescale: :permanent, + emqx_oracle: :permanent, + emqx_bridge_oracle: :permanent, emqx_ee_schema_registry: :permanent ], else: [] diff --git a/rebar.config.erl b/rebar.config.erl index fa7bdbdf3..bcc104b31 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -94,6 +94,8 @@ is_community_umbrella_app("apps/emqx_bridge_redis") -> false; is_community_umbrella_app("apps/emqx_bridge_rocketmq") -> false; is_community_umbrella_app("apps/emqx_bridge_tdengine") -> false; is_community_umbrella_app("apps/emqx_bridge_timescale") -> false; +is_community_umbrella_app("apps/emqx_bridge_oracle") -> false; +is_community_umbrella_app("apps/emqx_oracle") -> false; is_community_umbrella_app(_) -> true. is_jq_supported() -> @@ -470,6 +472,8 @@ relx_apps_per_edition(ee) -> emqx_bridge_rocketmq, emqx_bridge_tdengine, emqx_bridge_timescale, + emqx_oracle, + emqx_bridge_oracle, emqx_ee_schema_registry ]; relx_apps_per_edition(ce) -> diff --git a/rel/i18n/emqx_bridge_oracle.hocon b/rel/i18n/emqx_bridge_oracle.hocon new file mode 100644 index 000000000..95e0cf4af --- /dev/null +++ b/rel/i18n/emqx_bridge_oracle.hocon @@ -0,0 +1,52 @@ +emqx_bridge_oracle { + + local_topic { + desc = "The MQTT topic filter to be forwarded to Oracle Database. All MQTT 'PUBLISH' messages with the topic" + " matching the local_topic will be forwarded.
" + "NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is" + " configured, then both the data got from the rule and the MQTT messages that match local_topic" + " will be forwarded." + label = "Local Topic" + } + + sql_template { + desc = "SQL Template. The template string can contain placeholders" + " for message metadata and payload field. The placeholders are inserted" + " without any checking and special formatting, so it is important to" + " ensure that the inserted values are formatted and escaped correctly." + label = "SQL Template" + } + + server { + desc = "The IPv4 or IPv6 address or the hostname to connect to.
" + "A host entry has the following form: `Host[:Port]`.
" + "The Oracle Database default port 1521 is used if `[:Port]` is not specified." + label = "Server Host" + } + + sid { + desc = "Sid for Oracle Database" + label = "Oracle Database Sid." + } + + config_enable { + desc = "Enable or disable this bridge" + label = "Enable Or Disable Bridge" + } + + desc_config { + desc = "Configuration for an Oracle Database bridge." + label = "Oracle Database Bridge Configuration" + } + + desc_type { + desc = "The Bridge Type" + label = "Bridge Type" + } + + desc_name { + desc = "Bridge name." + label = "Bridge Name" + } + +} diff --git a/rel/i18n/emqx_oracle.hocon b/rel/i18n/emqx_oracle.hocon new file mode 100644 index 000000000..58de8e4c7 --- /dev/null +++ b/rel/i18n/emqx_oracle.hocon @@ -0,0 +1,15 @@ +emqx_oracle { + + server { + desc = "The IPv4 or IPv6 address or the hostname to connect to.
" + "A host entry has the following form: `Host[:Port]`.
" + "The Oracle Database default port 1521 is used if `[:Port]` is not specified." + label = "Server Host" + } + + sid { + desc = "Sid for Oracle Database." + label = "Oracle Database Sid" + } + +} diff --git a/rel/i18n/zh/emqx_bridge_oracle.hocon b/rel/i18n/zh/emqx_bridge_oracle.hocon new file mode 100644 index 000000000..290ac6d07 --- /dev/null +++ b/rel/i18n/zh/emqx_bridge_oracle.hocon @@ -0,0 +1,51 @@ +emqx_bridge_oracle { + + local_topic { + desc = "发送到 'local_topic' 的消息都会转发到 Oracle Database。
" + "注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。" + label = "本地 Topic" + } + + sql_template { + desc = "SQL模板。模板字符串可以包含消息元数据和有效载荷字段的占位符。占位符" + "的插入不需要任何检查和特殊格式化,因此必须确保插入的数值格式化和转义正确。模板字符串可以包含占位符" + "模板字符串可以包含消息元数据和有效载荷字段的占位符。这些占位符被插入" + "所以必须确保插入的值的格式正确。因此,确保插入的值格式化和转义正确是非常重要的。模板字符串可以包含占位符" + "模板字符串可以包含消息元数据和有效载荷字段的占位符。这些占位符被插入" + "所以必须确保插入的值的格式正确。确保插入的值被正确地格式化和转义。" + label = "SQL 模板" + } + + server { + desc = "将要连接的 IPv4 或 IPv6 地址,或者主机名。
" + "主机名具有以下形式:`Host[:Port]`。
" + "如果未指定 `[:Port]`,则使用 Oracle Database 默认端口 1521。" + label = "服务器地址" + } + + sid { + desc = "Oracle Database Sid 名称" + label = "Oracle Database Sid" + } + + config_enable { + desc = "启用/禁用桥接" + label = "启用/禁用桥接" + } + + desc_config { + desc = "Oracle Database 桥接配置" + label = "Oracle Database 桥接配置" + } + + desc_type { + desc = "Bridge 类型" + label = "桥接类型" + } + + desc_name { + desc = "桥接名字" + label = "桥接名字" + } + +} diff --git a/rel/i18n/zh/emqx_oracle.hocon b/rel/i18n/zh/emqx_oracle.hocon new file mode 100644 index 000000000..70c597cb1 --- /dev/null +++ b/rel/i18n/zh/emqx_oracle.hocon @@ -0,0 +1,15 @@ +emqx_oracle { + + server { + desc = "将要连接的 IPv4 或 IPv6 地址,或者主机名。
" + "主机名具有以下形式:`Host[:Port]`。
" + "如果未指定 `[:Port]`,则使用 Oracle Database 默认端口 1521。" + label = "服务器地址" + } + + sid { + desc = "Oracle Database Sid 名称" + label = "Oracle Database Sid" + } + +} diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index fec0d589c..ad0736bb3 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -193,6 +193,9 @@ for dep in ${CT_DEPS}; do ;; pulsar) FILES+=( '.ci/docker-compose-file/docker-compose-pulsar.yaml' ) + ;; + oracle) + FILES+=( '.ci/docker-compose-file/docker-compose-oracle.yaml' ) ;; *) echo "unknown_ct_dependency $dep" From 43bb6f00caaa0adda300a61e8b359c7f49f28d41 Mon Sep 17 00:00:00 2001 From: Paulo Zulato Date: Tue, 25 Apr 2023 19:51:15 -0300 Subject: [PATCH 179/263] fix(oracle): drop support for async queries jamdb_oracle does not provide interface for performing async queries and ecpool does not monitor the worker which calls jamdb_oracle, so it's safer to keep support for sync queries only. --- .../test/emqx_bridge_oracle_SUITE.erl | 80 ------------------- apps/emqx_oracle/src/emqx_oracle.erl | 75 +---------------- 2 files changed, 4 insertions(+), 151 deletions(-) diff --git a/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl index de77b26de..b50788277 100644 --- a/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl +++ b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl @@ -330,8 +330,6 @@ create_rule_and_action_http(Config) -> %% Testcases %%------------------------------------------------------------------------------ -% Under normal operations, the bridge will be called async via -% `simple_async_query'. t_sync_query(Config) -> ResourceId = resource_id(Config), ?check_trace( @@ -360,48 +358,6 @@ t_sync_query(Config) -> ), ok. -t_async_query(Config) -> - Overrides = #{ - <<"resource_opts">> => #{ - <<"enable_batch">> => <<"false">>, - <<"batch_size">> => 1 - } - }, - ResourceId = resource_id(Config), - ?check_trace( - begin - ?assertMatch({ok, _}, create_bridge_api(Config, Overrides)), - ?retry( - _Sleep = 1_000, - _Attempts = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) - ), - reset_table(Config), - MsgId = erlang:unique_integer(), - Params = #{ - topic => ?config(mqtt_topic, Config), - id => MsgId, - payload => ?config(oracle_name, Config), - retain => false - }, - Message = {send_message, Params}, - ?assertMatch( - { - ok, - {ok, #{result := {ok, [{affected_rows, 1}]}}} - }, - ?wait_async_action( - emqx_resource:query(ResourceId, Message), - #{?snk_kind := oracle_query}, - 5_000 - ) - ), - ok - end, - [] - ), - ok. - t_batch_sync_query(Config) -> ProxyPort = ?config(proxy_port, Config), ProxyHost = ?config(proxy_host, Config), @@ -449,42 +405,6 @@ t_batch_sync_query(Config) -> ), ok. -t_batch_async_query(Config) -> - ResourceId = resource_id(Config), - ?check_trace( - begin - ?assertMatch({ok, _}, create_bridge_api(Config)), - ?retry( - _Sleep = 1_000, - _Attempts = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) - ), - reset_table(Config), - MsgId = erlang:unique_integer(), - Params = #{ - topic => ?config(mqtt_topic, Config), - id => MsgId, - payload => ?config(oracle_name, Config), - retain => false - }, - Message = {send_message, Params}, - ?assertMatch( - { - ok, - {ok, #{result := {ok, [{affected_rows, 1}]}}} - }, - ?wait_async_action( - emqx_resource:query(ResourceId, Message), - #{?snk_kind := oracle_batch_query}, - 5_000 - ) - ), - ok - end, - [] - ), - ok. - t_create_via_http(Config) -> ?check_trace( begin diff --git a/apps/emqx_oracle/src/emqx_oracle.erl b/apps/emqx_oracle/src/emqx_oracle.erl index c39a6a6d7..a0d7169f3 100644 --- a/apps/emqx_oracle/src/emqx_oracle.erl +++ b/apps/emqx_oracle/src/emqx_oracle.erl @@ -23,8 +23,6 @@ on_stop/2, on_query/3, on_batch_query/3, - on_query_async/4, - on_batch_query_async/4, on_get_status/2 ]). @@ -35,7 +33,6 @@ -export([ query/3, execute_batch/3, - do_async_reply/2, do_get_status/1 ]). @@ -46,7 +43,6 @@ -define(ACTION_SEND_MESSAGE, send_message). -define(SYNC_QUERY_MODE, no_handover). --define(ASYNC_QUERY_MODE(REPLY), {handover_async, {?MODULE, do_async_reply, [REPLY]}}). -define(ORACLE_HOST_OPTIONS, #{ default_port => ?ORACLE_DEFAULT_PORT @@ -67,7 +63,10 @@ batch_params_tokens := params_tokens() }. -callback_mode() -> async_if_possible. +% As ecpool is not monitoring the worker's PID when doing a handover_async, the +% request can be lost if worker crashes. Thus, it's better to force requests to +% be sync for now. +callback_mode() -> always_sync. is_buffer_supported() -> false. @@ -147,24 +146,6 @@ on_query( Res = on_sql_query(InstId, PoolName, Type, ?SYNC_QUERY_MODE, NameOrSQL2, Data), handle_result(Res). -on_query_async(InstId, {TypeOrKey, NameOrSQL}, Reply, State) -> - on_query_async(InstId, {TypeOrKey, NameOrSQL, []}, Reply, State); -on_query_async( - InstId, {TypeOrKey, NameOrSQL, Params} = Query, Reply, #{pool_name := PoolName} = State -) -> - ?SLOG(debug, #{ - msg => "oracle database connector received async sql query", - connector => InstId, - query => Query, - reply => Reply, - state => State - }), - ApplyMode = ?ASYNC_QUERY_MODE(Reply), - Type = query, - {NameOrSQL2, Data} = proc_sql_params(TypeOrKey, NameOrSQL, Params, State), - Res = on_sql_query(InstId, PoolName, Type, ApplyMode, NameOrSQL2, Data), - handle_result(Res). - on_batch_query( InstId, BatchReq, @@ -207,51 +188,6 @@ on_batch_query( {error, {unrecoverable_error, invalid_request}} end. -on_batch_query_async( - InstId, - BatchReq, - Reply, - #{pool_name := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State -) -> - case BatchReq of - [{Key, _} = Request | _] -> - BinKey = to_bin(Key), - case maps:get(BinKey, Tokens, undefined) of - undefined -> - Log = #{ - connector => InstId, - first_request => Request, - state => State, - msg => "batch prepare not implemented" - }, - ?SLOG(error, Log), - {error, {unrecoverable_error, batch_prepare_not_implemented}}; - TokenList -> - {_, Datas} = lists:unzip(BatchReq), - Datas2 = [emqx_plugin_libs_rule:proc_sql(TokenList, Data) || Data <- Datas], - St = maps:get(BinKey, Sts), - case - on_sql_query( - InstId, PoolName, execute_batch, ?ASYNC_QUERY_MODE(Reply), St, Datas2 - ) - of - {ok, Results} -> - handle_batch_result(Results, 0); - Result -> - Result - end - end; - _ -> - Log = #{ - connector => InstId, - request => BatchReq, - state => State, - msg => "invalid request" - }, - ?SLOG(error, Log), - {error, {unrecoverable_error, invalid_request}} - end. - proc_sql_params(query, SQLOrKey, Params, _State) -> {SQLOrKey, Params}; proc_sql_params(TypeOrKey, SQLOrData, Params, #{ @@ -429,6 +365,3 @@ handle_batch_result([{proc_result, RetCode, Reason} | _Rest], _Acc) -> {error, {unrecoverable_error, {RetCode, Reason}}}; handle_batch_result([], Acc) -> {ok, Acc}. - -do_async_reply(Result, {ReplyFun, [Context]}) -> - ReplyFun(Context, Result). From d0c4c70f74331b4951bce0fef6de49e1945027ec Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 27 Apr 2023 13:19:26 -0300 Subject: [PATCH 180/263] test(banned): attempt to fix flaky test Example failure: https://github.com/emqx/emqx/actions/runs/4821105856/jobs/8587006829#step:8:4495 ``` - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - emqx_common_test_helpers:wait_for_down failed on line 434 Reason: {{t_session_taken,178,timeout},[{emqx_common_test_helpers,...},{...}|...]} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Testing lib.emqx.emqx_banned_SUITE: *** FAILED test case 4 of 5 *** %%% emqx_banned_SUITE ==> t_session_taken: FAILED %%% emqx_banned_SUITE ==> {{t_session_taken,178,timeout}, [{emqx_common_test_helpers,wait_for_down,6, [{file,"/__w/emqx/emqx/source/apps/emqx/test/emqx_common_test_helpers.erl"}, {line,434}]}, {emqx_banned_SUITE,t_session_taken,1, [{file,"/__w/emqx/emqx/source/apps/emqx/test/emqx_banned_SUITE.erl"}, {line,176}]}, {test_server,ts_tc,3,[{file,"test_server.erl"},{line,1782}]}, {test_server,run_test_case_eval1,6,[{file,"test_server.erl"},{line,1291}]}, {test_server,run_test_case_eval,9,[{file,"test_server.erl"},{line,1223}]}]} ``` --- apps/emqx/test/emqx_banned_SUITE.erl | 2 +- apps/emqx_resource/src/emqx_resource.app.src | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/emqx/test/emqx_banned_SUITE.erl b/apps/emqx/test/emqx_banned_SUITE.erl index 0c14f64c9..9419ba4c3 100644 --- a/apps/emqx/test/emqx_banned_SUITE.erl +++ b/apps/emqx/test/emqx_banned_SUITE.erl @@ -186,7 +186,7 @@ t_session_taken(_) -> false end end, - 6000 + 15_000 ), Publish(), diff --git a/apps/emqx_resource/src/emqx_resource.app.src b/apps/emqx_resource/src/emqx_resource.app.src index 2553e6dd8..3e264cb3e 100644 --- a/apps/emqx_resource/src/emqx_resource.app.src +++ b/apps/emqx_resource/src/emqx_resource.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_resource, [ {description, "Manager for all external resources"}, - {vsn, "0.1.14"}, + {vsn, "0.1.15"}, {registered, []}, {mod, {emqx_resource_app, []}}, {applications, [ From 633eacad3b77d2d0860b75e9fc3d1490882770af Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 27 Apr 2023 09:42:05 -0300 Subject: [PATCH 181/263] test(pulsar): add more test cases for Pulsar Producer bridge Fixes https://emqx.atlassian.net/browse/EMQX-8400 --- apps/emqx_bridge_pulsar/rebar.config | 2 +- .../src/emqx_bridge_pulsar.app.src | 2 +- .../src/emqx_bridge_pulsar_impl_producer.erl | 47 ++-- ...emqx_bridge_pulsar_impl_producer_SUITE.erl | 201 +++++++++++++++++- 4 files changed, 235 insertions(+), 17 deletions(-) diff --git a/apps/emqx_bridge_pulsar/rebar.config b/apps/emqx_bridge_pulsar/rebar.config index be5f282df..d5a63f320 100644 --- a/apps/emqx_bridge_pulsar/rebar.config +++ b/apps/emqx_bridge_pulsar/rebar.config @@ -2,7 +2,7 @@ {erl_opts, [debug_info]}. {deps, [ - {pulsar, {git, "https://github.com/emqx/pulsar-client-erl.git", {tag, "0.8.0"}}}, + {pulsar, {git, "https://github.com/emqx/pulsar-client-erl.git", {tag, "0.8.1"}}}, {emqx_connector, {path, "../../apps/emqx_connector"}}, {emqx_resource, {path, "../../apps/emqx_resource"}}, {emqx_bridge, {path, "../../apps/emqx_bridge"}} diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src index ead7cb715..b169aa2c4 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_pulsar, [ {description, "EMQX Pulsar Bridge"}, - {vsn, "0.1.0"}, + {vsn, "0.1.1"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl index 2bd44d16a..27d50f077 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl @@ -87,11 +87,14 @@ on_start(InstanceId, Config) -> }, case pulsar:ensure_supervised_client(ClientId, Servers, ClientOpts) of {ok, _Pid} -> - ?SLOG(info, #{ - msg => "pulsar_client_started", - instance_id => InstanceId, - pulsar_hosts => Servers - }); + ?tp( + info, + "pulsar_client_started", + #{ + instance_id => InstanceId, + pulsar_hosts => Servers + } + ); {error, Reason} -> ?SLOG(error, #{ msg => "failed_to_start_pulsar_client", @@ -115,7 +118,7 @@ on_stop(_InstanceId, State) -> ok. -spec on_get_status(manager_id(), state()) -> connected | disconnected. -on_get_status(_InstanceId, State) -> +on_get_status(_InstanceId, State = #{}) -> #{ pulsar_client_id := ClientId, producers := Producers @@ -135,7 +138,11 @@ on_get_status(_InstanceId, State) -> end; {error, _} -> disconnected - end. + end; +on_get_status(_InstanceId, _State) -> + %% If a health check happens just after a concurrent request to + %% create the bridge is not quite finished, `State = undefined'. + connecting. -spec on_query(manager_id(), {send_message, map()}, state()) -> {ok, term()} @@ -160,6 +167,13 @@ on_query(_InstanceId, {send_message, Message}, State) -> ) -> {ok, pid()}. on_query_async(_InstanceId, {send_message, Message}, AsyncReplyFn, State) -> + ?tp_span( + pulsar_producer_on_query_async, + #{instance_id => _InstanceId, message => Message}, + do_on_query_async(Message, AsyncReplyFn, State) + ). + +do_on_query_async(Message, AsyncReplyFn, State) -> #{ producers := Producers, message_template := MessageTemplate @@ -283,6 +297,7 @@ start_producer(Config, InstanceId, ClientId, ClientOpts) -> drop_if_highmem => MemOLP }, ProducerName = producer_name(ClientId), + ?tp(pulsar_producer_capture_name, #{producer_name => ProducerName}), MessageTemplate = compile_message_template(MessageTemplateOpts), ProducerOpts0 = #{ @@ -298,6 +313,7 @@ start_producer(Config, InstanceId, ClientId, ClientOpts) -> }, ProducerOpts = maps:merge(ReplayQOpts, ProducerOpts0), PulsarTopic = binary_to_list(PulsarTopic0), + ?tp(pulsar_producer_about_to_start_producers, #{producer_name => ProducerName}), try pulsar:ensure_supervised_producers(ClientId, PulsarTopic, ProducerOpts) of {ok, Producers} -> State = #{ @@ -310,13 +326,16 @@ start_producer(Config, InstanceId, ClientId, ClientOpts) -> {ok, State} catch Kind:Error:Stacktrace -> - ?SLOG(error, #{ - msg => "failed_to_start_pulsar_producer", - instance_id => InstanceId, - kind => Kind, - reason => Error, - stacktrace => Stacktrace - }), + ?tp( + error, + "failed_to_start_pulsar_producer", + #{ + instance_id => InstanceId, + kind => Kind, + reason => Error, + stacktrace => Stacktrace + } + ), stop_client(ClientId), throw(failed_to_start_pulsar_producer) end. diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl index d254b01fc..be38f6625 100644 --- a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl @@ -37,7 +37,14 @@ groups() -> ]. only_once_tests() -> - [t_create_via_http]. + [ + t_create_via_http, + t_start_when_down, + t_send_when_down, + t_send_when_timeout, + t_failure_to_start_producer, + t_producer_process_crash + ]. init_per_suite(Config) -> Config. @@ -753,6 +760,198 @@ t_on_get_status(Config) -> ), ok. +t_start_when_down(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ok + end), + %% Should recover given enough time. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok + end, + [] + ), + ok. + +t_send_when_down(Config) -> + do_t_send_with_failure(Config, down). + +t_send_when_timeout(Config) -> + do_t_send_with_failure(Config, timeout). + +do_t_send_with_failure(Config, FailureType) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + MQTTTopic = ?config(mqtt_topic, Config), + QoS = 0, + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := pulsar_producer_bridge_started}, + 10_000 + ), + ?check_trace( + begin + emqx_common_test_helpers:with_failure( + FailureType, ProxyName, ProxyHost, ProxyPort, fun() -> + {_, {ok, _}} = + ?wait_async_action( + emqx:publish(Message0), + #{ + ?snk_kind := pulsar_producer_on_query_async, + ?snk_span := {complete, _} + }, + 5_000 + ), + ok + end + ), + ok + end, + fun(_Trace) -> + %% Should recover given enough time. + Data0 = receive_consumed(20_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end + ), + ok. + +%% Check that we correctly terminate the pulsar client when the pulsar +%% producer processes fail to start for whatever reason. +t_failure_to_start_producer(Config) -> + ?check_trace( + begin + ?force_ordering( + #{?snk_kind := name_registered}, + #{?snk_kind := pulsar_producer_about_to_start_producers} + ), + spawn_link(fun() -> + ?tp(will_register_name, #{}), + {ok, #{producer_name := ProducerName}} = ?block_until( + #{?snk_kind := pulsar_producer_capture_name}, 10_000 + ), + true = register(ProducerName, self()), + ?tp(name_registered, #{name => ProducerName}), + %% Just simulating another process so that starting the + %% producers fail. Currently it does a gen_server:call + %% with `infinity' timeout, so this is just to avoid + %% hanging. + receive + {'$gen_call', From, _Request} -> + gen_server:reply(From, {error, im_not, your_producer}) + end, + receive + die -> ok + end + end), + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := pulsar_bridge_client_stopped}, + 20_000 + ), + ok + end, + [] + ), + ok. + +%% Check the driver recovers itself if one of the producer processes +%% die for whatever reason. +t_producer_process_crash(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + QoS = 0, + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + ?check_trace( + begin + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge( + Config, + #{<<"buffer">> => #{<<"mode">> => <<"disk">>}} + ), + #{?snk_kind := pulsar_producer_bridge_started}, + 10_000 + ), + [ProducerPid | _] = [ + Pid + || {_Name, PS, _Type, _Mods} <- supervisor:which_children(pulsar_producers_sup), + Pid <- element(2, process_info(PS, links)), + case proc_lib:initial_call(Pid) of + {pulsar_producer, init, _} -> true; + _ -> false + end + ], + Ref = monitor(process, ProducerPid), + exit(ProducerPid, kill), + receive + {'DOWN', Ref, process, ProducerPid, _Killed} -> + ok + after 1_000 -> ct:fail("pid didn't die") + end, + ?assertEqual({ok, connecting}, emqx_resource_manager:health_check(ResourceId)), + %% Should recover given enough time. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + {_, {ok, _}} = + ?wait_async_action( + emqx:publish(Message0), + #{?snk_kind := pulsar_producer_on_query_async, ?snk_span := {complete, _}}, + 5_000 + ), + Data0 = receive_consumed(20_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end, + [] + ), + ok. + t_cluster(Config) -> MQTTTopic = ?config(mqtt_topic, Config), ResourceId = resource_id(Config), From ee61648368f65b52bac3c6d2d7de2e36fddcd4fb Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Fri, 28 Apr 2023 22:42:27 +0200 Subject: [PATCH 182/263] build: imporove speed to local run prior to this change, 'make run' has to wait for the release tar ball to be created. now it just copy the release files and run --- build | 9 +++++---- rebar.config | 2 +- rebar.config.erl | 5 +---- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/build b/build index 05246a359..eb75317cc 100755 --- a/build +++ b/build @@ -125,6 +125,7 @@ assert_no_compile_time_only_deps() { } make_rel() { + local release_or_tar="${1}" ./scripts/pre-compile.sh "$PROFILE" # make_elixir_rel always create rebar.lock # delete it to make git clone + checkout work because we use shallow close for rebar deps @@ -134,7 +135,7 @@ make_rel() { # generate docs (require beam compiled), generated to etc and priv dirs make_docs # now assemble the release tar - ./rebar3 as "$PROFILE" tar + ./rebar3 as "$PROFILE" "$release_or_tar" assert_no_compile_time_only_deps } @@ -220,7 +221,7 @@ make_tgz() { else # build the src_tarball again to ensure relup is included # elixir does not have relup yet. - make_rel + make_rel tar local relpath="_build/${PROFILE}/rel/emqx" full_vsn="$(./pkg-vsn.sh "$PROFILE" --long)" @@ -378,7 +379,7 @@ case "$ARTIFACT" in make_docs ;; rel) - make_rel + make_rel release ;; relup) make_relup @@ -397,7 +398,7 @@ case "$ARTIFACT" in if [ "${IS_ELIXIR:-}" = 'yes' ]; then make_elixir_rel else - make_rel + make_rel tar fi env EMQX_REL="$(pwd)" \ EMQX_BUILD="${PROFILE}" \ diff --git a/rebar.config b/rebar.config index bd8c3484c..05e9b9a28 100644 --- a/rebar.config +++ b/rebar.config @@ -45,7 +45,7 @@ emqx_ssl_crl_cache ]}. -{provider_hooks, [{pre, [{release, {relup_helper, gen_appups}}]}]}. +%{provider_hooks, [{pre, [{release, {relup_helper, gen_appups}}]}]}. {post_hooks,[]}. diff --git a/rebar.config.erl b/rebar.config.erl index bcc104b31..5c83d1ea0 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -156,7 +156,7 @@ project_app_dirs(Edition) -> plugins() -> [ - {relup_helper, {git, "https://github.com/emqx/relup_helper", {tag, "2.1.0"}}}, + %{relup_helper, {git, "https://github.com/emqx/relup_helper", {tag, "2.1.0"}}}, %% emqx main project does not require port-compiler %% pin at root level for deterministic {pc, "v1.14.0"} @@ -495,11 +495,8 @@ relx_overlay(ReleaseType, Edition) -> {copy, "bin/emqx_cluster_rescue", "bin/emqx_cluster_rescue"}, {copy, "bin/node_dump", "bin/node_dump"}, {copy, "bin/install_upgrade.escript", "bin/install_upgrade.escript"}, - %% for relup {copy, "bin/emqx", "bin/emqx-{{release_version}}"}, - %% for relup {copy, "bin/emqx_ctl", "bin/emqx_ctl-{{release_version}}"}, - %% for relup {copy, "bin/install_upgrade.escript", "bin/install_upgrade.escript-{{release_version}}"}, {copy, "apps/emqx_gateway_lwm2m/lwm2m_xml", "etc/lwm2m_xml"}, {copy, "apps/emqx_authz/etc/acl.conf", "etc/acl.conf"}, From b0eca5bc00f982bd58069278ffff0a4279880c06 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Thu, 27 Apr 2023 17:25:08 +0800 Subject: [PATCH 183/263] feat: aliases etcd.ssl to etcd.ssl_options --- apps/emqx_conf/src/emqx_conf_schema.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index abccca9fb..a7688baf9 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -340,7 +340,7 @@ fields(cluster_etcd) -> ?R_REF(emqx_schema, "ssl_client_opts"), #{ desc => ?DESC(cluster_etcd_ssl), - alias => [ssl], + aliases => [ssl], 'readOnly' => true } )} @@ -1247,7 +1247,7 @@ cluster_options(dns, Conf) -> {type, conf_get("cluster.dns.record_type", Conf)} ]; cluster_options(etcd, Conf) -> - Namespace = "cluster.etcd.ssl", + Namespace = "cluster.etcd.ssl_options", SslOpts = fun(C) -> Options = keys(Namespace, C), lists:map(fun(Key) -> {to_atom(Key), conf_get([Namespace, Key], Conf)} end, Options) From cc2d529562edd2d2f5da9e59bb8548d861f94523 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Thu, 27 Apr 2023 17:28:22 +0800 Subject: [PATCH 184/263] feat: remove tlsv1.1,tlsv1,dtlsv1 from default ssl version --- apps/emqx/src/emqx.app.src | 2 +- apps/emqx/src/emqx_schema.erl | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index d42478fea..5ca8fc797 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -3,7 +3,7 @@ {id, "emqx"}, {description, "EMQX Core"}, % strict semver, bump manually! - {vsn, "5.0.24"}, + {vsn, "5.0.25"}, {modules, []}, {registered, []}, {applications, [ diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 69f234e47..1dd122ca6 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -2424,9 +2424,9 @@ client_ssl_opts_schema(Defaults) -> ]. default_tls_vsns(dtls_all_available) -> - emqx_tls_lib:available_versions(dtls); + emqx_tls_lib:available_versions(dtls) -- [dtlsv1]; default_tls_vsns(tls_all_available) -> - emqx_tls_lib:available_versions(tls). + emqx_tls_lib:available_versions(tls) -- ['tlsv1.1', tlsv1]. -spec ciphers_schema(quic | dtls_all_available | tls_all_available | undefined) -> hocon_schema:field_schema(). From d8c4c6637ba36a50dfc0e4b9b91168f171381c4a Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Thu, 27 Apr 2023 17:31:59 +0800 Subject: [PATCH 185/263] feat: mark ssl_options.password as low level --- apps/emqx/src/emqx_schema.erl | 1 + apps/emqx_dashboard/src/emqx_dashboard_schema.erl | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 1dd122ca6..b68b41760 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -2227,6 +2227,7 @@ common_ssl_opts_schema(Defaults) -> example => <<"">>, format => <<"password">>, desc => ?DESC(common_ssl_opts_schema_password), + importance => ?IMPORTANCE_LOW, converter => fun password_converter/2 } )}, diff --git a/apps/emqx_dashboard/src/emqx_dashboard_schema.erl b/apps/emqx_dashboard/src/emqx_dashboard_schema.erl index 319c9cee1..28bfb709a 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_schema.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_schema.erl @@ -102,9 +102,7 @@ fields("https") -> server_ssl_opts() -> Opts0 = emqx_schema:server_ssl_opts_schema(#{}, true), - Opts1 = exclude_fields(["fail_if_no_peer_cert"], Opts0), - {value, {_, Meta}, Opts2} = lists:keytake("password", 1, Opts1), - [{"password", Meta#{importance => ?IMPORTANCE_HIDDEN}} | Opts2]. + exclude_fields(["fail_if_no_peer_cert"], Opts0). exclude_fields([], Fields) -> Fields; From 2ab0e304898677ed37f1dc16c4f734ad0a574199 Mon Sep 17 00:00:00 2001 From: Zhongwen Deng Date: Sun, 30 Apr 2023 10:48:46 +0800 Subject: [PATCH 186/263] chore: seperate avail and default tls version --- apps/emqx/src/emqx_schema.erl | 21 +++++++++++++-------- apps/emqx/test/emqx_tls_lib_tests.erl | 3 ++- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index b68b41760..589e05ddb 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -2165,7 +2165,7 @@ common_ssl_opts_schema(Defaults) -> D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end, Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end, Collection = maps:get(versions, Defaults, tls_all_available), - AvailableVersions = default_tls_vsns(Collection), + DefaultVersions = default_tls_vsns(Collection), [ {"cacertfile", sc( @@ -2235,10 +2235,10 @@ common_ssl_opts_schema(Defaults) -> sc( hoconsc:array(typerefl:atom()), #{ - default => AvailableVersions, + default => DefaultVersions, desc => ?DESC(common_ssl_opts_schema_versions), importance => ?IMPORTANCE_HIGH, - validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end + validator => fun(Input) -> validate_tls_versions(Collection, Input) end } )}, {"ciphers", ciphers_schema(D("ciphers"))}, @@ -2424,10 +2424,14 @@ client_ssl_opts_schema(Defaults) -> )} ]. -default_tls_vsns(dtls_all_available) -> - emqx_tls_lib:available_versions(dtls) -- [dtlsv1]; -default_tls_vsns(tls_all_available) -> - emqx_tls_lib:available_versions(tls) -- ['tlsv1.1', tlsv1]. +available_tls_vsns(dtls_all_available) -> emqx_tls_lib:available_versions(dtls); +available_tls_vsns(tls_all_available) -> emqx_tls_lib:available_versions(tls). + +outdated_tls_vsn(dtls_all_available) -> [dtlsv1]; +outdated_tls_vsn(tls_all_available) -> ['tlsv1.1', tlsv1]. + +default_tls_vsns(Key) -> + available_tls_vsns(Key) -- outdated_tls_vsn(Key). -spec ciphers_schema(quic | dtls_all_available | tls_all_available | undefined) -> hocon_schema:field_schema(). @@ -2736,7 +2740,8 @@ validate_ciphers(Ciphers) -> Bad -> {error, {bad_ciphers, Bad}} end. -validate_tls_versions(AvailableVersions, Versions) -> +validate_tls_versions(Collection, Versions) -> + AvailableVersions = available_tls_vsns(Collection), case lists:filter(fun(V) -> not lists:member(V, AvailableVersions) end, Versions) of [] -> ok; Vs -> {error, {unsupported_tls_versions, Vs}} diff --git a/apps/emqx/test/emqx_tls_lib_tests.erl b/apps/emqx/test/emqx_tls_lib_tests.erl index ad9598107..0f5883b10 100644 --- a/apps/emqx/test/emqx_tls_lib_tests.erl +++ b/apps/emqx/test/emqx_tls_lib_tests.erl @@ -229,7 +229,8 @@ ssl_files_handle_non_generated_file_test() -> ok = emqx_tls_lib:delete_ssl_files(Dir, undefined, SSL2), %% verify the file is not delete and not changed, because it is not generated by %% emqx_tls_lib - ?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)). + ?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)), + ok = file:delete(TmpKeyFile). ssl_file_replace_test() -> Key1 = bin(test_key()), From 516c52bdc783a7f044846adb0c920d7bc262425c Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Sun, 30 Apr 2023 10:48:20 +0200 Subject: [PATCH 187/263] build: add a 'app' build target which only compiles the code but no release --- Makefile | 5 +++++ build | 12 +++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index a39f44c07..e60e8e492 100644 --- a/Makefile +++ b/Makefile @@ -140,6 +140,11 @@ COMMON_DEPS := $(REBAR) $(REL_PROFILES:%=%): $(COMMON_DEPS) @$(BUILD) $(@) rel +.PHONY: compile $(PROFILES:%=compile-%) +compile: $(PROFILES:%=compile-%) +$(PROFILES:%=compile-%): + @$(BUILD) $(@:compile-%=%) apps + ## Not calling rebar3 clean because ## 1. rebar3 clean relies on rebar3, meaning it reads config, fetches dependencies etc. ## 2. it's slow diff --git a/build b/build index eb75317cc..2924f8a6f 100755 --- a/build +++ b/build @@ -124,16 +124,19 @@ assert_no_compile_time_only_deps() { : } -make_rel() { - local release_or_tar="${1}" +just_compile() { ./scripts/pre-compile.sh "$PROFILE" # make_elixir_rel always create rebar.lock # delete it to make git clone + checkout work because we use shallow close for rebar deps rm -f rebar.lock # compile all beams ./rebar3 as "$PROFILE" compile - # generate docs (require beam compiled), generated to etc and priv dirs make_docs +} + +make_rel() { + local release_or_tar="${1}" + just_compile # now assemble the release tar ./rebar3 as "$PROFILE" "$release_or_tar" assert_no_compile_time_only_deps @@ -375,6 +378,9 @@ export_elixir_release_vars() { log "building artifact=$ARTIFACT for profile=$PROFILE" case "$ARTIFACT" in + apps) + just_compile + ;; doc|docs) make_docs ;; From dbcb75f35bae1d4a00122b3244f2123cdb1051fb Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Mon, 1 May 2023 21:34:33 +0200 Subject: [PATCH 188/263] build: add ./dev --- dev | 250 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 250 insertions(+) create mode 100755 dev diff --git a/dev b/dev new file mode 100755 index 000000000..69706bf6d --- /dev/null +++ b/dev @@ -0,0 +1,250 @@ +#!/usr/bin/env bash + +set -euo pipefail + +usage() { +cat <&2 + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + exit 1 + ;; + esac +done +shift $((OPTIND-1)) + +case "${PROFILE}" in + ce|emqx) + PROFILE='emqx' + ;; + ee|emqx-enterprise) + PROFILE='emqx-enterprise' + ;; + *) + echo "Unknown profile $PROFILE" + exit 1 + ;; +esac +export PROFILE + +case "${PROFILE}" in + emqx) + SCHEMA_MOD='emqx_conf_schema' + ;; + emqx-enterprise) + SCHEMA_MOD='emqx_ee_conf_schema' + ;; +esac + +PROJ_ROOT="$(git rev-parse --show-toplevel)" +cd "$PROJ_ROOT" +BASE_DIR="_build/dev-run/$PROFILE" +export EMQX_ETC_DIR="$BASE_DIR/etc" +export EMQX_DATA_DIR="$BASE_DIR/data" +export EMQX_LOG_DIR="$BASE_DIR/log" +CONFIGS_DIR="$EMQX_DATA_DIR/configs" +COOKIE='emqxsecretcookie' +mkdir -p "$EMQX_ETC_DIR" "$EMQX_DATA_DIR/patches" "$EMQX_LOG_DIR" "$CONFIGS_DIR" + +## build compile the profile is it's not compiled yet +prepare_erl_libs() { + local profile="$1" + local libs_dir="_build/${profile}/lib" + local erl_libs='' + if [ $FORCE_COMPILE -eq 1 ] || [ ! -d "$libs_dir" ]; then + make "compile-${PROFILE}" + else + echo "Running from code in $libs_dir" + fi + for app in "${libs_dir}"/*; do + erl_libs="${erl_libs}:${app}" + done + export ERL_LIBS="$erl_libs" +} + +## poorman's mustache templating +mustache() { + local name="$1" + local value="$2" + local file="$3" + sed -i "s|{{\s*${name}\s*}}|${value}|g" "$file" +} + +## render the merged boot conf file. +## the merge action is done before the profile is compiled +render_hocon_conf() { + input="apps/emqx_conf/etc/emqx.conf.all" + output="$EMQX_ETC_DIR/emqx.conf" + cp "$input" "$output" + mustache emqx_default_erlang_cookie "$COOKIE" "$output" + mustache platform_data_dir "${EMQX_DATA_DIR}" "$output" + mustache platform_log_dir "${EMQX_LOG_DIR}" "$output" + mustache platform_etc_dir "${EMQX_ETC_DIR}" "$output" +} + +call_hocon() { + local in=("$@") + local args='' + for arg in "${in[@]}"; do + if [ -z "$args" ]; then + args="\"$arg\"" + else + args="$args, \"$arg\"" + fi + done + erl -noshell -eval "{ok, _} = application:ensure_all_started(hocon), ok = hocon_cli:main([$args]), init:stop()." +} + +# Function to generate app.config and vm.args +# sets two environment variables CONF_FILE and ARGS_FILE +generate_app_conf() { + ## timestamp for each generation + local NOW_TIME + NOW_TIME="$(date +'%Y.%m.%d.%H.%M.%S')" + + ## this command populates two files: app.