diff --git a/.ci/docker-compose-file/cassandra/cassandra.yaml b/.ci/docker-compose-file/cassandra/cassandra.yaml
index 968efe5f6..bc1bf3357 100644
--- a/.ci/docker-compose-file/cassandra/cassandra.yaml
+++ b/.ci/docker-compose-file/cassandra/cassandra.yaml
@@ -469,8 +469,8 @@ concurrent_materialized_view_writes: 32
# accepting writes when the limit is exceeded until a flush completes,
# and will trigger a flush based on memtable_cleanup_threshold
# If omitted, Cassandra will set both to 1/4 the size of the heap.
-# memtable_heap_space_in_mb: 2048
-# memtable_offheap_space_in_mb: 2048
+memtable_heap_space_in_mb: 2048
+memtable_offheap_space_in_mb: 2048
# memtable_cleanup_threshold is deprecated. The default calculation
# is the only reasonable choice. See the comments on memtable_flush_writers
diff --git a/.ci/docker-compose-file/docker-compose-cassandra.yaml b/.ci/docker-compose-file/docker-compose-cassandra.yaml
index a54f621c1..f7143f471 100644
--- a/.ci/docker-compose-file/docker-compose-cassandra.yaml
+++ b/.ci/docker-compose-file/docker-compose-cassandra.yaml
@@ -12,6 +12,8 @@ services:
environment:
CASSANDRA_BROADCAST_ADDRESS: "1.2.3.4"
CASSANDRA_RPC_ADDRESS: "0.0.0.0"
+ HEAP_NEWSIZE: "128M"
+ MAX_HEAP_SIZE: "2048M"
volumes:
- ./certs:/certs
#ports:
diff --git a/Makefile b/Makefile
index 13e3a6d43..a5adf0e0a 100644
--- a/Makefile
+++ b/Makefile
@@ -82,7 +82,7 @@ ct: $(REBAR) merge-config
static_checks:
@$(REBAR) as check do xref, dialyzer
@if [ "$${PROFILE}" = 'emqx-enterprise' ]; then $(REBAR) ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE); fi
- @if [ "$${PROFILE}" = 'emqx-enterprise' ]; then ./scripts/check-i18n-style.sh; fi
+ ./scripts/check-i18n-style.sh
APPS=$(shell $(SCRIPTS)/find-apps.sh)
diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl
index d246b4639..76920928b 100644
--- a/apps/emqx/include/emqx_release.hrl
+++ b/apps/emqx/include/emqx_release.hrl
@@ -35,7 +35,7 @@
-define(EMQX_RELEASE_CE, "5.0.21").
%% Enterprise edition
--define(EMQX_RELEASE_EE, "5.0.2-rc.2").
+-define(EMQX_RELEASE_EE, "5.0.2-rc.4").
%% the HTTP API version
-define(EMQX_API_VERSION, "5.0").
diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl
index 0f90677bd..6bfff38d3 100644
--- a/apps/emqx/src/emqx_schema.erl
+++ b/apps/emqx/src/emqx_schema.erl
@@ -1880,7 +1880,9 @@ mqtt_listener(Bind) ->
default => <<"3s">>
}
)},
- {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(listener)}
+ {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, (authentication(listener))#{
+ importance => ?IMPORTANCE_HIDDEN
+ }}
].
base_listener(Bind) ->
diff --git a/apps/emqx/test/emqx_banned_SUITE.erl b/apps/emqx/test/emqx_banned_SUITE.erl
index 605c1de6d..80427ac47 100644
--- a/apps/emqx/test/emqx_banned_SUITE.erl
+++ b/apps/emqx/test/emqx_banned_SUITE.erl
@@ -154,7 +154,12 @@ t_session_taken(_) ->
{clean_start, false},
{properties, #{'Session-Expiry-Interval' => 120}}
]),
- {ok, _} = emqtt:connect(C),
+ case emqtt:connect(C) of
+ {ok, _} ->
+ ok;
+ {error, econnrefused} ->
+ throw(mqtt_listener_not_ready)
+ end,
{ok, _, [0]} = emqtt:subscribe(C, Topic, []),
C
end,
@@ -168,9 +173,21 @@ t_session_taken(_) ->
lists:seq(1, MsgNum)
)
end,
-
- C1 = Connect(),
- ok = emqtt:disconnect(C1),
+ emqx_common_test_helpers:wait_for(
+ ?FUNCTION_NAME,
+ ?LINE,
+ fun() ->
+ try
+ C = Connect(),
+ emqtt:disconnect(C),
+ true
+ catch
+ throw:mqtt_listener_not_ready ->
+ false
+ end
+ end,
+ 3000
+ ),
Publish(),
diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl
index 9a4461fac..077ebe138 100644
--- a/apps/emqx/test/emqx_common_test_helpers.erl
+++ b/apps/emqx/test/emqx_common_test_helpers.erl
@@ -660,6 +660,7 @@ start_slave(Name, Opts) when is_list(Opts) ->
start_slave(Name, Opts) when is_map(Opts) ->
SlaveMod = maps:get(peer_mod, Opts, ct_slave),
Node = node_name(Name),
+ put_peer_mod(Node, SlaveMod),
DoStart =
fun() ->
case SlaveMod of
@@ -669,8 +670,8 @@ start_slave(Name, Opts) when is_map(Opts) ->
[
{kill_if_fail, true},
{monitor_master, true},
- {init_timeout, 10000},
- {startup_timeout, 10000},
+ {init_timeout, 20_000},
+ {startup_timeout, 20_000},
{erl_flags, erl_flags()}
]
);
@@ -687,7 +688,6 @@ start_slave(Name, Opts) when is_map(Opts) ->
throw(Other)
end,
pong = net_adm:ping(Node),
- put_peer_mod(Node, SlaveMod),
setup_node(Node, Opts),
ok = snabbkaffe:forward_trace(Node),
Node.
diff --git a/apps/emqx/test/emqx_crl_cache_SUITE.erl b/apps/emqx/test/emqx_crl_cache_SUITE.erl
index 7a61f7835..01f9c7172 100644
--- a/apps/emqx/test/emqx_crl_cache_SUITE.erl
+++ b/apps/emqx/test/emqx_crl_cache_SUITE.erl
@@ -884,7 +884,20 @@ t_revoked(Config) ->
{port, 8883}
]),
process_flag(trap_exit, true),
- ?assertMatch({error, {{shutdown, {tls_alert, {certificate_revoked, _}}}, _}}, emqtt:connect(C)),
+ Res = emqtt:connect(C),
+ %% apparently, sometimes there's some race condition in
+ %% `emqtt_sock:ssl_upgrade' when it calls
+ %% `ssl:conetrolling_process' and a bad match happens at that
+ %% point.
+ case Res of
+ {error, {{shutdown, {tls_alert, {certificate_revoked, _}}}, _}} ->
+ ok;
+ {error, closed} ->
+ %% race condition?
+ ok;
+ _ ->
+ ct:fail("unexpected result: ~p", [Res])
+ end,
ok.
t_revoke_then_refresh(Config) ->
diff --git a/apps/emqx/test/emqx_test_janitor.erl b/apps/emqx/test/emqx_test_janitor.erl
index c9b297dc7..c3f82a3e1 100644
--- a/apps/emqx/test/emqx_test_janitor.erl
+++ b/apps/emqx/test/emqx_test_janitor.erl
@@ -65,7 +65,7 @@ terminate(_Reason, #{callbacks := Callbacks}) ->
handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) ->
{reply, ok, State#{callbacks := [Callback | Callbacks]}};
handle_call(terminate, _From, State = #{callbacks := Callbacks}) ->
- lists:foreach(fun(Fun) -> Fun() end, Callbacks),
+ lists:foreach(fun(Fun) -> catch Fun() end, Callbacks),
{stop, normal, ok, State};
handle_call(_Req, _From, State) ->
{reply, error, State}.
diff --git a/apps/emqx_authn/src/emqx_authn.app.src b/apps/emqx_authn/src/emqx_authn.app.src
index 6a3ffbdb4..caa59e455 100644
--- a/apps/emqx_authn/src/emqx_authn.app.src
+++ b/apps/emqx_authn/src/emqx_authn.app.src
@@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authn, [
{description, "EMQX Authentication"},
- {vsn, "0.1.15"},
+ {vsn, "0.1.16"},
{modules, []},
{registered, [emqx_authn_sup, emqx_authn_registry]},
{applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]},
diff --git a/apps/emqx_authn/src/emqx_authn_api.erl b/apps/emqx_authn/src/emqx_authn_api.erl
index 0a7f67f5a..ad9cd8579 100644
--- a/apps/emqx_authn/src/emqx_authn_api.erl
+++ b/apps/emqx_authn/src/emqx_authn_api.erl
@@ -1419,14 +1419,14 @@ request_user_create_examples() ->
summary => <<"Regular user">>,
value => #{
user_id => <<"user1">>,
- password => <<"secret">>
+ password => <<"******">>
}
},
super_user => #{
summary => <<"Superuser">>,
value => #{
user_id => <<"user2">>,
- password => <<"secret">>,
+ password => <<"******">>,
is_superuser => true
}
}
@@ -1437,13 +1437,13 @@ request_user_update_examples() ->
regular_user => #{
summary => <<"Update regular user">>,
value => #{
- password => <<"newsecret">>
+ password => <<"******">>
}
},
super_user => #{
summary => <<"Update user and promote to superuser">>,
value => #{
- password => <<"newsecret">>,
+ password => <<"******">>,
is_superuser => true
}
}
diff --git a/apps/emqx_authz/src/emqx_authz.app.src b/apps/emqx_authz/src/emqx_authz.app.src
index 943978519..2f8b26894 100644
--- a/apps/emqx_authz/src/emqx_authz.app.src
+++ b/apps/emqx_authz/src/emqx_authz.app.src
@@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authz, [
{description, "An OTP application"},
- {vsn, "0.1.15"},
+ {vsn, "0.1.16"},
{registered, []},
{mod, {emqx_authz_app, []}},
{applications, [
diff --git a/apps/emqx_authz/src/emqx_authz_schema.erl b/apps/emqx_authz/src/emqx_authz_schema.erl
index b15d4abd4..6630ed526 100644
--- a/apps/emqx_authz/src/emqx_authz_schema.erl
+++ b/apps/emqx_authz/src/emqx_authz_schema.erl
@@ -492,7 +492,9 @@ authz_fields() ->
?ARRAY(?UNION(UnionMemberSelector)),
#{
default => [],
- desc => ?DESC(sources)
+ desc => ?DESC(sources),
+ %% doc_lift is force a root level reference instead of nesting sub-structs
+ extra => #{doc_lift => true}
}
)}
].
diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl
index 586c66bef..44a478bca 100644
--- a/apps/emqx_bridge/src/emqx_bridge_api.erl
+++ b/apps/emqx_bridge/src/emqx_bridge_api.erl
@@ -235,7 +235,7 @@ mqtt_main_example() ->
server => <<"127.0.0.1:1883">>,
proto_ver => <<"v4">>,
username => <<"foo">>,
- password => <<"bar">>,
+ password => <<"******">>,
clean_start => true,
keepalive => <<"300s">>,
retry_interval => <<"15s">>,
diff --git a/apps/emqx_conf/src/emqx_cluster_rpc.erl b/apps/emqx_conf/src/emqx_cluster_rpc.erl
index f7c34031c..0382045d4 100644
--- a/apps/emqx_conf/src/emqx_cluster_rpc.erl
+++ b/apps/emqx_conf/src/emqx_cluster_rpc.erl
@@ -270,9 +270,6 @@ fast_forward_to_commit(Node, ToTnxId) ->
%% @private
init([Node, RetryMs]) ->
- %% Workaround for https://github.com/emqx/mria/issues/94:
- _ = mria_rlog:wait_for_shards([?CLUSTER_RPC_SHARD], 1000),
- _ = mria:wait_for_tables([?CLUSTER_MFA, ?CLUSTER_COMMIT]),
{ok, _} = mnesia:subscribe({table, ?CLUSTER_MFA, simple}),
State = #{node => Node, retry_interval => RetryMs},
%% The init transaction ID is set in emqx_conf_app after
@@ -286,6 +283,9 @@ init([Node, RetryMs]) ->
%% @private
handle_continue(?CATCH_UP, State) ->
+ %% emqx app must be started before
+ %% trying to catch up the rpc commit logs
+ ok = wait_for_emqx_ready(),
{noreply, State, catch_up(State)}.
handle_call(reset, _From, State) ->
@@ -572,3 +572,37 @@ maybe_init_tnx_id(_Node, TnxId) when TnxId < 0 -> ok;
maybe_init_tnx_id(Node, TnxId) ->
{atomic, _} = transaction(fun ?MODULE:commit/2, [Node, TnxId]),
ok.
+
+%% @priv Cannot proceed until emqx app is ready.
+%% Otherwise the committed transaction catch up may fail.
+wait_for_emqx_ready() ->
+ %% wait 10 seconds for emqx to start
+ ok = do_wait_for_emqx_ready(10).
+
+%% Wait for emqx app to be ready,
+%% write a log message every 1 second
+do_wait_for_emqx_ready(0) ->
+ timeout;
+do_wait_for_emqx_ready(N) ->
+ %% check interval is 100ms
+ %% makes the total wait time 1 second
+ case do_wait_for_emqx_ready2(10) of
+ ok ->
+ ok;
+ timeout ->
+ ?SLOG(warning, #{msg => "stil_waiting_for_emqx_app_to_be_ready"}),
+ do_wait_for_emqx_ready(N - 1)
+ end.
+
+%% Wait for emqx app to be ready,
+%% check interval is 100ms
+do_wait_for_emqx_ready2(0) ->
+ timeout;
+do_wait_for_emqx_ready2(N) ->
+ case emqx:is_running() of
+ true ->
+ ok;
+ false ->
+ timer:sleep(100),
+ do_wait_for_emqx_ready2(N - 1)
+ end.
diff --git a/apps/emqx_conf/src/emqx_cluster_rpc_handler.erl b/apps/emqx_conf/src/emqx_cluster_rpc_cleaner.erl
similarity index 97%
rename from apps/emqx_conf/src/emqx_cluster_rpc_handler.erl
rename to apps/emqx_conf/src/emqx_cluster_rpc_cleaner.erl
index c3d946a91..bce866c2d 100644
--- a/apps/emqx_conf/src/emqx_cluster_rpc_handler.erl
+++ b/apps/emqx_conf/src/emqx_cluster_rpc_cleaner.erl
@@ -13,7 +13,9 @@
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
--module(emqx_cluster_rpc_handler).
+
+%% @doc This module is responsible for cleaning up the cluster RPC MFA.
+-module(emqx_cluster_rpc_cleaner).
-behaviour(gen_server).
diff --git a/apps/emqx_conf/src/emqx_conf_app.erl b/apps/emqx_conf/src/emqx_conf_app.erl
index 0896eb718..51fc5c2e2 100644
--- a/apps/emqx_conf/src/emqx_conf_app.erl
+++ b/apps/emqx_conf/src/emqx_conf_app.erl
@@ -95,19 +95,22 @@ init_load() ->
-endif.
init_conf() ->
+ %% Workaround for https://github.com/emqx/mria/issues/94:
+ _ = mria_rlog:wait_for_shards([?CLUSTER_RPC_SHARD], 1000),
+ _ = mria:wait_for_tables([?CLUSTER_MFA, ?CLUSTER_COMMIT]),
{ok, TnxId} = copy_override_conf_from_core_node(),
_ = emqx_app:set_init_tnx_id(TnxId),
ok = init_load(),
ok = emqx_app:set_init_config_load_done().
cluster_nodes() ->
- maps:get(running_nodes, ekka_cluster:info()) -- [node()].
+ mria:cluster_nodes(cores) -- [node()].
copy_override_conf_from_core_node() ->
case cluster_nodes() of
%% The first core nodes is self.
[] ->
- ?SLOG(debug, #{msg => "skip_copy_overide_conf_from_core_node"}),
+ ?SLOG(debug, #{msg => "skip_copy_override_conf_from_core_node"}),
{ok, ?DEFAULT_INIT_TXN_ID};
Nodes ->
{Results, Failed} = emqx_conf_proto_v2:get_override_config_file(Nodes),
@@ -141,7 +144,7 @@ copy_override_conf_from_core_node() ->
%% finish the boot sequence and load the
%% config for other nodes to copy it.
?SLOG(info, #{
- msg => "skip_copy_overide_conf_from_core_node",
+ msg => "skip_copy_override_conf_from_core_node",
loading_from_disk => true,
nodes => Nodes,
failed => Failed,
@@ -153,7 +156,7 @@ copy_override_conf_from_core_node() ->
Jitter = rand:uniform(2_000),
Timeout = 10_000 + Jitter,
?SLOG(info, #{
- msg => "copy_overide_conf_from_core_node_retry",
+ msg => "copy_override_conf_from_core_node_retry",
timeout => Timeout,
nodes => Nodes,
failed => Failed,
@@ -166,7 +169,7 @@ copy_override_conf_from_core_node() ->
[{ok, Info} | _] = lists:sort(fun conf_sort/2, Ready),
#{node := Node, conf := RawOverrideConf, tnx_id := TnxId} = Info,
?SLOG(debug, #{
- msg => "copy_overide_conf_from_core_node_success",
+ msg => "copy_override_conf_from_core_node_success",
node => Node,
cluster_override_conf_file => application:get_env(
emqx, cluster_override_conf_file
diff --git a/apps/emqx_conf/src/emqx_conf_sup.erl b/apps/emqx_conf/src/emqx_conf_sup.erl
index d4411af4b..6a3d795ae 100644
--- a/apps/emqx_conf/src/emqx_conf_sup.erl
+++ b/apps/emqx_conf/src/emqx_conf_sup.erl
@@ -36,7 +36,7 @@ init([]) ->
ChildSpecs =
[
child_spec(emqx_cluster_rpc, []),
- child_spec(emqx_cluster_rpc_handler, [])
+ child_spec(emqx_cluster_rpc_cleaner, [])
],
{ok, {SupFlags, ChildSpecs}}.
diff --git a/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl b/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl
index f7d3c76fd..8cdfcaeea 100644
--- a/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl
+++ b/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl
@@ -43,6 +43,7 @@ groups() -> [].
init_per_suite(Config) ->
application:load(emqx_conf),
ok = ekka:start(),
+ ok = emqx_common_test_helpers:start_apps([]),
ok = mria_rlog:wait_for_shards([?CLUSTER_RPC_SHARD], infinity),
ok = emqx_config:put([node, cluster_call, retry_interval], 1000),
meck:new(emqx_alarm, [non_strict, passthrough, no_link]),
@@ -53,6 +54,7 @@ init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
+ ok = emqx_common_test_helpers:stop_apps([]),
ekka:stop(),
mria:stop(),
meck:unload(mria),
@@ -255,13 +257,13 @@ t_fast_forward_commit(_Config) ->
),
ok.
-t_handler_unexpected_msg(_Config) ->
- Handler = emqx_cluster_rpc_handler,
- OldPid = erlang:whereis(Handler),
- ok = gen_server:cast(Handler, unexpected_cast_msg),
- ignore = gen_server:call(Handler, unexpected_cast_msg),
- erlang:send(Handler, unexpected_info_msg),
- NewPid = erlang:whereis(Handler),
+t_cleaner_unexpected_msg(_Config) ->
+ Cleaner = emqx_cluster_cleaner,
+ OldPid = erlang:whereis(Cleaner),
+ ok = gen_server:cast(Cleaner, unexpected_cast_msg),
+ ignore = gen_server:call(Cleaner, unexpected_cast_msg),
+ erlang:send(Cleaner, unexpected_info_msg),
+ NewPid = erlang:whereis(Cleaner),
?assertEqual(OldPid, NewPid),
ok.
@@ -279,8 +281,8 @@ start() ->
{ok, Pid1} = emqx_cluster_rpc:start_link(),
{ok, Pid2} = emqx_cluster_rpc:start_link({node(), ?NODE2}, ?NODE2, 500),
{ok, Pid3} = emqx_cluster_rpc:start_link({node(), ?NODE3}, ?NODE3, 500),
- {ok, Pid4} = emqx_cluster_rpc_handler:start_link(100, 500),
- true = erlang:register(emqx_cluster_rpc_handler, Pid4),
+ {ok, Pid4} = emqx_cluster_rpc_cleaner:start_link(100, 500),
+ true = erlang:register(emqx_cluster_rpc_cleaner, Pid4),
{ok, [Pid1, Pid2, Pid3, Pid4]}.
stop() ->
@@ -296,7 +298,7 @@ stop() ->
end
|| N <- [?NODE1, ?NODE2, ?NODE3]
],
- gen_server:stop(emqx_cluster_rpc_handler, normal, 5000).
+ gen_server:stop(emqx_cluster_rpc_cleaner, normal, 5000).
receive_msg(0, _Msg) ->
ok;
diff --git a/apps/emqx_connector/src/emqx_connector_mysql.erl b/apps/emqx_connector/src/emqx_connector_mysql.erl
index 68ec59894..fe495252a 100644
--- a/apps/emqx_connector/src/emqx_connector_mysql.erl
+++ b/apps/emqx_connector/src/emqx_connector_mysql.erl
@@ -172,10 +172,15 @@ on_query(
%% not return result, next loop will try again
on_query(InstId, {TypeOrKey, SQLOrKey, Params, Timeout}, State);
{error, Reason} ->
- LogMeta = #{connector => InstId, sql => SQLOrKey, state => State},
- ?SLOG(
+ ?tp(
error,
- LogMeta#{msg => "mysql_connector_do_prepare_failed", reason => Reason}
+ "mysql_connector_do_prepare_failed",
+ #{
+ connector => InstId,
+ sql => SQLOrKey,
+ state => State,
+ reason => Reason
+ }
),
{error, Reason}
end;
@@ -417,12 +422,10 @@ on_sql_query(
),
do_sql_query(SQLFunc, Conn, SQLOrKey, Params, Timeout, LogMeta);
{error, disconnected} ->
- ?SLOG(
+ ?tp(
error,
- LogMeta#{
- msg => "mysql_connector_do_sql_query_failed",
- reason => worker_is_disconnected
- }
+ "mysql_connector_do_sql_query_failed",
+ LogMeta#{reason => worker_is_disconnected}
),
{error, {recoverable_error, disconnected}}
end.
diff --git a/apps/emqx_connector/src/emqx_connector_pgsql.erl b/apps/emqx_connector/src/emqx_connector_pgsql.erl
index 1fc994275..14cbbc80f 100644
--- a/apps/emqx_connector/src/emqx_connector_pgsql.erl
+++ b/apps/emqx_connector/src/emqx_connector_pgsql.erl
@@ -44,7 +44,8 @@
execute_batch/3
]).
--export([do_get_status/1]).
+%% for ecpool workers usage
+-export([do_get_status/1, prepare_sql_to_conn/2]).
-define(PGSQL_HOST_OPTIONS, #{
default_port => ?PGSQL_DEFAULT_PORT
diff --git a/apps/emqx_gateway/src/emqx_gateway_schema.erl b/apps/emqx_gateway/src/emqx_gateway_schema.erl
index 84e9ee7e4..f0e65627f 100644
--- a/apps/emqx_gateway/src/emqx_gateway_schema.erl
+++ b/apps/emqx_gateway/src/emqx_gateway_schema.erl
@@ -91,20 +91,20 @@ fields(clientinfo_override) ->
];
fields(udp_listeners) ->
[
- {udp, sc(map(name, ref(udp_listener)), #{desc => ?DESC(udp_listener)})},
- {dtls, sc(map(name, ref(dtls_listener)), #{desc => ?DESC(dtls_listener)})}
+ {udp, sc(map(name, ref(udp_listener)), #{desc => ?DESC(listener_name_to_settings_map)})},
+ {dtls, sc(map(name, ref(dtls_listener)), #{desc => ?DESC(listener_name_to_settings_map)})}
];
fields(tcp_listeners) ->
[
- {tcp, sc(map(name, ref(tcp_listener)), #{desc => ?DESC(tcp_listener)})},
- {ssl, sc(map(name, ref(ssl_listener)), #{desc => ?DESC(ssl_listener)})}
+ {tcp, sc(map(name, ref(tcp_listener)), #{desc => ?DESC(listener_name_to_settings_map)})},
+ {ssl, sc(map(name, ref(ssl_listener)), #{desc => ?DESC(listener_name_to_settings_map)})}
];
fields(tcp_udp_listeners) ->
[
- {tcp, sc(map(name, ref(tcp_listener)), #{desc => ?DESC(tcp_listener)})},
- {ssl, sc(map(name, ref(ssl_listener)), #{desc => ?DESC(ssl_listener)})},
- {udp, sc(map(name, ref(udp_listener)), #{desc => ?DESC(udp_listener)})},
- {dtls, sc(map(name, ref(dtls_listener)), #{desc => ?DESC(dtls_listener)})}
+ {tcp, sc(map(name, ref(tcp_listener)), #{desc => ?DESC(listener_name_to_settings_map)})},
+ {ssl, sc(map(name, ref(ssl_listener)), #{desc => ?DESC(listener_name_to_settings_map)})},
+ {udp, sc(map(name, ref(udp_listener)), #{desc => ?DESC(listener_name_to_settings_map)})},
+ {dtls, sc(map(name, ref(dtls_listener)), #{desc => ?DESC(listener_name_to_settings_map)})}
];
fields(tcp_listener) ->
%% some special configs for tcp listener
@@ -167,19 +167,19 @@ desc(udp_listeners) ->
desc(tcp_listeners) ->
"Settings for the TCP listeners.";
desc(tcp_udp_listeners) ->
- "Settings for the listeners.";
+ "Settings for TCP and UDP listeners.";
desc(tcp_listener) ->
- "Settings for the TCP listener.";
+ "Settings for TCP listener.";
desc(ssl_listener) ->
- "Settings for the SSL listener.";
+ "Settings for SSL listener.";
desc(udp_listener) ->
- "Settings for the UDP listener.";
+ "Settings for UDP listener.";
desc(dtls_listener) ->
- "Settings for the DTLS listener.";
+ "Settings for DTLS listener.";
desc(udp_opts) ->
- "Settings for the UDP sockets.";
+ "Settings for UDP sockets.";
desc(dtls_opts) ->
- "Settings for the DTLS protocol.";
+ "Settings for DTLS protocol.";
desc(_) ->
undefined.
@@ -189,6 +189,8 @@ authentication_schema() ->
#{
required => {false, recursively},
desc => ?DESC(gateway_common_authentication),
+ %% we do not expose this to the user for now
+ importance => ?IMPORTANCE_HIDDEN,
examples => emqx_authn_api:authenticator_examples()
}
).
@@ -234,7 +236,7 @@ mountpoint(Default) ->
binary(),
#{
default => iolist_to_binary(Default),
- desc => ?DESC(gateway_common_mountpoint)
+ desc => ?DESC(gateway_mountpoint)
}
).
@@ -283,7 +285,7 @@ common_listener_opts() ->
binary(),
#{
default => undefined,
- desc => ?DESC(gateway_common_listener_mountpoint)
+ desc => ?DESC(gateway_mountpoint)
}
)},
{access_rules,
diff --git a/apps/emqx_management/README.md b/apps/emqx_management/README.md
index fa37d0f0f..aa5d0c606 100644
--- a/apps/emqx_management/README.md
+++ b/apps/emqx_management/README.md
@@ -1,12 +1,42 @@
-# emqx-management
+# EMQX Management
-EMQX Management API
+EMQX Management offers various interfaces for administrators to interact with
+the system, either by a remote console attached to a running node, a CLI (i.e.
+`./emqx ctl`), or through its rich CRUD-style REST API (mostly used by EMQX'
+dashboard). The system enables administrators to modify both cluster and
+individual node configurations, and provides the ability to view and reset
+different statistics and metrics.
-## How to Design RESTful API?
+## Functionality
-http://restful-api-design.readthedocs.io/en/latest/scope.html
+Amongst others it allows to manage
-default application see:
-header:
-authorization: Basic YWRtaW46cHVibGlj
+* Alarms
+* API Keys
+* Banned clients, users or hosts
+* Clients (and sessions) including their topic subscriptions
+* Configurations
+* Manage plugins
+* Fixed subscriptions
+* Topics
+
+Moreover it lets you
+
+* modify hot and non-hot updatable configuration values,
+* publish messages, as well as bulk messages,
+* create trace files,
+* and last but not least monitor system status.
+
+## Implementation Notes
+
+API endpoints are implemented using the `minirest` framework in combination with
+HOCON schema and OpenAPI 3.0 specifications.
+
+## TODO/FIXME
+
+At its current state there are some reverse dependencies from other applications
+that do calls directly into `emqx_mgmt`.
+
+Also, and somewhat related, its bpapi proto modules do calls directly into
+other applications.
diff --git a/apps/emqx_modules/src/emqx_delayed_api.erl b/apps/emqx_modules/src/emqx_delayed_api.erl
index d4e7e5b90..766d23d6b 100644
--- a/apps/emqx_modules/src/emqx_delayed_api.erl
+++ b/apps/emqx_modules/src/emqx_delayed_api.erl
@@ -52,7 +52,7 @@
-define(INVALID_NODE, 'INVALID_NODE').
api_spec() ->
- emqx_dashboard_swagger:spec(?MODULE).
+ emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
paths() ->
[
@@ -202,9 +202,9 @@ delayed_message(get, #{bindings := #{node := NodeBin, msgid := HexId}}) ->
{200, Message#{payload => base64:encode(Payload)}}
end;
{error, not_found} ->
- {404, generate_http_code_map(not_found, Id)};
+ {404, generate_http_code_map(not_found, HexId)};
{badrpc, _} ->
- {400, generate_http_code_map(invalid_node, Id)}
+ {400, generate_http_code_map(invalid_node, NodeBin)}
end
end
);
@@ -271,19 +271,19 @@ generate_http_code_map(id_schema_error, Id) ->
#{
code => ?MESSAGE_ID_SCHEMA_ERROR,
message =>
- iolist_to_binary(io_lib:format("Message ID ~p schema error", [Id]))
+ iolist_to_binary(io_lib:format("Message ID ~s schema error", [Id]))
};
generate_http_code_map(not_found, Id) ->
#{
code => ?MESSAGE_ID_NOT_FOUND,
message =>
- iolist_to_binary(io_lib:format("Message ID ~p not found", [Id]))
+ iolist_to_binary(io_lib:format("Message ID ~s not found", [Id]))
};
generate_http_code_map(invalid_node, Node) ->
#{
code => ?INVALID_NODE,
message =>
- iolist_to_binary(io_lib:format("The node name ~p is invalid", [Node]))
+ iolist_to_binary(io_lib:format("The node name ~s is invalid", [Node]))
}.
make_maybe(X, Error, Fun) ->
diff --git a/apps/emqx_modules/src/emqx_modules.app.src b/apps/emqx_modules/src/emqx_modules.app.src
index 4a9cb6723..fdc13f354 100644
--- a/apps/emqx_modules/src/emqx_modules.app.src
+++ b/apps/emqx_modules/src/emqx_modules.app.src
@@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_modules, [
{description, "EMQX Modules"},
- {vsn, "5.0.11"},
+ {vsn, "5.0.12"},
{modules, []},
{applications, [kernel, stdlib, emqx, emqx_ctl]},
{mod, {emqx_modules_app, []}},
diff --git a/apps/emqx_resource/src/emqx_resource.erl b/apps/emqx_resource/src/emqx_resource.erl
index 2e72c2a28..0f7e93a9b 100644
--- a/apps/emqx_resource/src/emqx_resource.erl
+++ b/apps/emqx_resource/src/emqx_resource.erl
@@ -356,7 +356,14 @@ is_buffer_supported(Module) ->
-spec call_start(manager_id(), module(), resource_config()) ->
{ok, resource_state()} | {error, Reason :: term()}.
call_start(MgrId, Mod, Config) ->
- ?SAFE_CALL(Mod:on_start(MgrId, Config)).
+ try
+ Mod:on_start(MgrId, Config)
+ catch
+ throw:Error ->
+ {error, Error};
+ Kind:Error:Stacktrace ->
+ {error, #{exception => Kind, reason => Error, stacktrace => Stacktrace}}
+ end.
-spec call_health_check(manager_id(), module(), resource_state()) ->
resource_status()
diff --git a/apps/emqx_resource/src/schema/emqx_resource_schema.erl b/apps/emqx_resource/src/schema/emqx_resource_schema.erl
index e89278e8c..647a40fed 100644
--- a/apps/emqx_resource/src/schema/emqx_resource_schema.erl
+++ b/apps/emqx_resource/src/schema/emqx_resource_schema.erl
@@ -30,18 +30,6 @@ namespace() -> "resource_schema".
roots() -> [].
-fields("resource_opts_sync_only") ->
- [
- {resource_opts,
- mk(
- ref(?MODULE, "creation_opts_sync_only"),
- resource_opts_meta()
- )}
- ];
-fields("creation_opts_sync_only") ->
- Fields = fields("creation_opts"),
- QueryMod = {query_mode, fun query_mode_sync_only/1},
- lists:keyreplace(query_mode, 1, Fields, QueryMod);
fields("resource_opts") ->
[
{resource_opts,
@@ -82,7 +70,7 @@ worker_pool_size(required) -> false;
worker_pool_size(_) -> undefined.
resume_interval(type) -> emqx_schema:duration_ms();
-resume_interval(importance) -> hidden;
+resume_interval(importance) -> ?IMPORTANCE_HIDDEN;
resume_interval(desc) -> ?DESC("resume_interval");
resume_interval(required) -> false;
resume_interval(_) -> undefined.
@@ -117,12 +105,6 @@ query_mode(default) -> async;
query_mode(required) -> false;
query_mode(_) -> undefined.
-query_mode_sync_only(type) -> enum([sync]);
-query_mode_sync_only(desc) -> ?DESC("query_mode_sync_only");
-query_mode_sync_only(default) -> sync;
-query_mode_sync_only(required) -> false;
-query_mode_sync_only(_) -> undefined.
-
request_timeout(type) -> hoconsc:union([infinity, emqx_schema:duration_ms()]);
request_timeout(desc) -> ?DESC("request_timeout");
request_timeout(default) -> <<"15s">>;
@@ -167,7 +149,4 @@ max_queue_bytes(default) -> ?DEFAULT_QUEUE_SIZE_RAW;
max_queue_bytes(required) -> false;
max_queue_bytes(_) -> undefined.
-desc("creation_opts") ->
- ?DESC("creation_opts");
-desc("creation_opts_sync_only") ->
- ?DESC("creation_opts").
+desc("creation_opts") -> ?DESC("creation_opts").
diff --git a/apps/emqx_rule_engine/src/emqx_rule_runtime.erl b/apps/emqx_rule_engine/src/emqx_rule_runtime.erl
index ed6cd22de..e8d807d38 100644
--- a/apps/emqx_rule_engine/src/emqx_rule_runtime.erl
+++ b/apps/emqx_rule_engine/src/emqx_rule_runtime.erl
@@ -508,12 +508,12 @@ nested_put(Alias, Val, Columns0) ->
emqx_rule_maps:nested_put(Alias, Val, Columns).
-define(IS_RES_DOWN(R), R == stopped; R == not_connected; R == not_found).
-inc_action_metrics(ok, RuleId) ->
- emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.success');
inc_action_metrics({error, {recoverable_error, _}}, RuleId) ->
emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.failed.out_of_service');
inc_action_metrics(?RESOURCE_ERROR_M(R, _), RuleId) when ?IS_RES_DOWN(R) ->
emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.failed.out_of_service');
+inc_action_metrics({error, {unrecoverable_error, _}}, RuleId) ->
+ emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.failed');
inc_action_metrics(R, RuleId) ->
case is_ok_result(R) of
false ->
@@ -527,5 +527,5 @@ is_ok_result(ok) ->
true;
is_ok_result(R) when is_tuple(R) ->
ok == erlang:element(1, R);
-is_ok_result(ok) ->
+is_ok_result(_) ->
false.
diff --git a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl
index 209332fe7..94adb3506 100644
--- a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl
+++ b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl
@@ -687,10 +687,6 @@ t_jq(_) ->
got_timeout
end,
ConfigRootKey = emqx_rule_engine_schema:namespace(),
- DefaultTimeOut = emqx_config:get([
- ConfigRootKey,
- jq_function_default_timeout
- ]),
?assertThrow(
{jq_exception, {timeout, _}},
apply_func(jq, [TOProgram, <<"-2">>])
diff --git a/changes/ce/feat-10306.en.md b/changes/ce/feat-10306.en.md
new file mode 100644
index 000000000..11754c5c0
--- /dev/null
+++ b/changes/ce/feat-10306.en.md
@@ -0,0 +1,3 @@
+Add support for `async` query mode for most bridges.
+
+Before this change, some bridges (Cassandra, MongoDB, MySQL, Postgres, Redis, RocketMQ, TDengine) were only allowed to be created with a `sync` query mode.
diff --git a/changes/ce/fix-10313.en.md b/changes/ce/fix-10313.en.md
new file mode 100644
index 000000000..ca1a3b391
--- /dev/null
+++ b/changes/ce/fix-10313.en.md
@@ -0,0 +1,2 @@
+Ensure that when the core or replicant node starting, the `cluster-override.conf` file is only copied from the core node.
+Previously, when sorting nodes by startup time, the core node may have copied this file from the replicant node.
diff --git a/changes/ce/fix-10313.zh.md b/changes/ce/fix-10313.zh.md
new file mode 100644
index 000000000..94f118ece
--- /dev/null
+++ b/changes/ce/fix-10313.zh.md
@@ -0,0 +1,2 @@
+确保当 core 或 replicant 节点启动时,仅从 core 节点复制 `cluster-override.conf` 文件。
+此前按照节点启动时间排序时,core 节点可能从 replicant 节点复制该文件。
diff --git a/changes/ce/fix-10315.en.md b/changes/ce/fix-10315.en.md
new file mode 100644
index 000000000..67445252d
--- /dev/null
+++ b/changes/ce/fix-10315.en.md
@@ -0,0 +1 @@
+Fix crash checking `limit` and `page` parameters in `/mqtt/delayed/messages` API call.
diff --git a/changes/ce/fix-10317.en.md b/changes/ce/fix-10317.en.md
new file mode 100644
index 000000000..7a83dcaca
--- /dev/null
+++ b/changes/ce/fix-10317.en.md
@@ -0,0 +1 @@
+Do not expose listener level authentications before extensive verification.
diff --git a/changes/ce/fix-10317.zh.md b/changes/ce/fix-10317.zh.md
new file mode 100644
index 000000000..69cf09901
--- /dev/null
+++ b/changes/ce/fix-10317.zh.md
@@ -0,0 +1 @@
+在大量验证完成前不暴露监听器级的认证功能。
diff --git a/changes/ce/fix-10323.en.md b/changes/ce/fix-10323.en.md
new file mode 100644
index 000000000..1bb678875
--- /dev/null
+++ b/changes/ce/fix-10323.en.md
@@ -0,0 +1,2 @@
+For security reasons, the value of the `password` field in the API examples is replaced with `******`.
+
diff --git a/changes/ce/fix-10323.zh.md b/changes/ce/fix-10323.zh.md
new file mode 100644
index 000000000..4f7acfc56
--- /dev/null
+++ b/changes/ce/fix-10323.zh.md
@@ -0,0 +1,2 @@
+出于安全原因,将 API 示例中 `password` 字段的值,统一更换为 `******`。
+
diff --git a/changes/ce/fix-10327.en.md b/changes/ce/fix-10327.en.md
new file mode 100644
index 000000000..4fa561779
--- /dev/null
+++ b/changes/ce/fix-10327.en.md
@@ -0,0 +1,4 @@
+Don't increment 'actions.failed.unknown' rule metrics counter upon receiving unrecoverable bridge errors.
+This counter is displayed on the dashboard's rule overview tab ('Action statistics' - 'Unknown').
+The fix is only applicable for synchronous bridges, as all rule actions for asynchronous bridges
+are counted as successful (they increment 'actions.success' which is displayed as 'Action statistics' - 'Success').
diff --git a/changes/ee/fix-10201.en.md b/changes/ee/fix-10201.en.md
new file mode 100644
index 000000000..b3dd53150
--- /dev/null
+++ b/changes/ee/fix-10201.en.md
@@ -0,0 +1 @@
+In TDengine, removed the redundant database name from the SQL template.
diff --git a/changes/ee/fix-10201.zh.md b/changes/ee/fix-10201.zh.md
new file mode 100644
index 000000000..53b175551
--- /dev/null
+++ b/changes/ee/fix-10201.zh.md
@@ -0,0 +1 @@
+在 TDengine 桥接的 SQL 模板中,删除了多余的数据库表名。
diff --git a/changes/ee/fix-10289.en.md b/changes/ee/fix-10289.en.md
deleted file mode 100644
index 65eed7b5d..000000000
--- a/changes/ee/fix-10289.en.md
+++ /dev/null
@@ -1 +0,0 @@
-Clickhouse has got a fix that makes the error message better when users click the test button in the settings dialog.
diff --git a/changes/ee/fix-10289.zh.md b/changes/ee/fix-10289.zh.md
deleted file mode 100644
index d47278c16..000000000
--- a/changes/ee/fix-10289.zh.md
+++ /dev/null
@@ -1 +0,0 @@
-Clickhouse 已经修复了一个问题,当用户在设置对话框中点击测试按钮时,错误信息会更清晰。
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_clickhouse.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_clickhouse.conf
deleted file mode 100644
index 6a28b371a..000000000
--- a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_clickhouse.conf
+++ /dev/null
@@ -1,109 +0,0 @@
-emqx_ee_bridge_clickhouse {
-
- local_topic {
- desc {
- en: """The MQTT topic filter to be forwarded to Clickhouse. All MQTT 'PUBLISH' messages with the topic
-matching the local_topic will be forwarded.
-NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is
-configured, then both the data got from the rule and the MQTT messages that match local_topic
-will be forwarded.
-"""
- zh: """发送到 'local_topic' 的消息都会转发到 Clickhouse。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。
-"""
- }
- label {
- en: "Local Topic"
- zh: "本地 Topic"
- }
- }
- sql_template {
- desc {
- en: """SQL Template. The template string can contain placeholders
-for message metadata and payload field. The placeholders are inserted
-without any checking and special formatting, so it is important to
-ensure that the inserted values are formatted and escaped correctly."""
- zh:
- """SQL模板。模板字符串可以包含消息元数据和有效载荷字段的占位符。占位符
-的插入不需要任何检查和特殊格式化,因此必须确保插入的数值格式化和转义正确。模板字符串可以包含占位符
-模板字符串可以包含消息元数据和有效载荷字段的占位符。这些占位符被插入
-所以必须确保插入的值的格式正确。因此,确保插入的值格式化和转义正确是非常重要的。模板字符串可以包含占位符
-模板字符串可以包含消息元数据和有效载荷字段的占位符。这些占位符被插入
-所以必须确保插入的值的格式正确。确保插入的值被正确地格式化和转义。"""
- }
- label {
- en: "SQL Template"
- zh: "SQL 模板"
- }
- }
- batch_value_separator {
- desc {
- en: """The bridge repeats what comes after the VALUES or FORMAT FormatType in the
-SQL template to form a batch request. The value specified with
-this parameter will be inserted between the values. The default
-value ',' works for the VALUES format, but other values
-might be needed if you specify some other format with the
-clickhouse FORMAT syntax.
-
-See https://clickhouse.com/docs/en/sql-reference/statements/insert-into/ and
-https://clickhouse.com/docs/en/interfaces/formats#formats for more information about
-the format syntax and the available formats."""
- zh: """桥接会重复 VALUES 或 FORMAT 格式类型之后的内容。中 VALUES 或
-FORMAT FormatType 后面的内容,以形成一个批处理请求。用这个参数指定的值
-这个参数指定的值将被插入到这些值之间。默认的
-默认值','适用于VALUES格式,但是如果你指定了其他的格式,可能需要其他的值。可能需要其他值,如果你用
-"clickhouse FORMAT "语法指定其他格式。语法指定其他格式。
-
-参见https://clickhouse.com/docs/en/sql-reference/statements/insert-into/ 和
-https://clickhouse.com/docs/en/interfaces/formats#formats 了解更多关于
-格式语法和可用的格式。"""
- }
- label {
- en: "Batch Value Separator"
- zh: "批量值分离器"
- }
- }
- config_enable {
- desc {
- en: """Enable or disable this bridge"""
- zh: """启用/禁用桥接"""
- }
- label {
- en: "Enable Or Disable Bridge"
- zh: "启用/禁用桥接"
- }
- }
-
- desc_config {
- desc {
- en: """Configuration for a Clickhouse bridge."""
- zh: """Clickhouse 桥接配置"""
- }
- label: {
- en: "Clickhouse Bridge Configuration"
- zh: "Clickhouse 桥接配置"
- }
- }
-
- desc_type {
- desc {
- en: """The Bridge Type"""
- zh: """Bridge 类型"""
- }
- label {
- en: "Bridge Type"
- zh: "桥接类型"
- }
- }
-
- desc_name {
- desc {
- en: """Bridge name."""
- zh: """桥接名字"""
- }
- label {
- en: "Bridge Name"
- zh: "桥接名字"
- }
- }
-}
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_cassa.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_cassa.erl
index 12f86fcf7..78db8352a 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_cassa.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_cassa.erl
@@ -86,21 +86,10 @@ fields("config") ->
mk(
binary(),
#{desc => ?DESC("local_topic"), default => undefined}
- )},
- {resource_opts,
- mk(
- ref(?MODULE, "creation_opts"),
- #{
- required => false,
- default => #{},
- desc => ?DESC(emqx_resource_schema, <<"resource_opts">>)
- }
)}
- ] ++
+ ] ++ emqx_resource_schema:fields("resource_opts") ++
(emqx_ee_connector_cassa:fields(config) --
emqx_connector_schema_lib:prepare_statement_fields());
-fields("creation_opts") ->
- emqx_resource_schema:fields("creation_opts_sync_only");
fields("post") ->
fields("post", cassandra);
fields("put") ->
@@ -115,8 +104,6 @@ desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for Cassandra using `", string:to_upper(Method), "` method."];
-desc("creation_opts" = Name) ->
- emqx_resource_schema:desc(Name);
desc(_) ->
undefined.
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_clickhouse.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_clickhouse.erl
index 80a317d2b..0b611c142 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_clickhouse.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_clickhouse.erl
@@ -50,7 +50,7 @@ values(_Method, Type) ->
database => <<"mqtt">>,
pool_size => 8,
username => <<"default">>,
- password => <<"public">>,
+ password => <<"******">>,
sql => ?DEFAULT_SQL,
batch_value_separator => ?DEFAULT_BATCH_VALUE_SEPARATOR,
local_topic => <<"local/topic/#">>,
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_dynamo.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_dynamo.erl
index e55be61e5..e6a3d1a58 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_dynamo.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_dynamo.erl
@@ -46,7 +46,7 @@ values(_Method) ->
database => <<"mqtt">>,
pool_size => 8,
username => <<"root">>,
- password => <<"public">>,
+ password => <<"******">>,
template => ?DEFAULT_TEMPLATE,
local_topic => <<"local/topic/#">>,
resource_opts => #{
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_influxdb.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_influxdb.erl
index 1ad3af23c..5693a1902 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_influxdb.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_influxdb.erl
@@ -61,7 +61,7 @@ values("influxdb_api_v1", post) ->
TypeOpts = #{
database => <<"example_database">>,
username => <<"example_username">>,
- password => <<"examlpe_password">>,
+ password => <<"******">>,
server => <<"127.0.0.1:8086">>
},
values(common, "influxdb_api_v1", SupportUint, TypeOpts);
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl
index e11ef1c93..f3dfa5964 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl
@@ -64,7 +64,7 @@ values(common_config) ->
authentication => #{
mechanism => <<"plain">>,
username => <<"username">>,
- password => <<"password">>
+ password => <<"******">>
},
bootstrap_hosts => <<"localhost:9092">>,
connect_timeout => <<"5s">>,
@@ -233,7 +233,7 @@ fields(socket_opts) ->
boolean(),
#{
default => true,
- hidden => true,
+ importance => ?IMPORTANCE_HIDDEN,
desc => ?DESC(socket_nodelay)
}
)}
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mongodb.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mongodb.erl
index bc450f39b..fec5a4a7f 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mongodb.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mongodb.erl
@@ -38,7 +38,7 @@ fields("config") ->
{enable, mk(boolean(), #{desc => ?DESC("enable"), default => true})},
{collection, mk(binary(), #{desc => ?DESC("collection"), default => <<"mqtt">>})},
{payload_template, mk(binary(), #{required => false, desc => ?DESC("payload_template")})}
- ] ++ emqx_resource_schema:fields("resource_opts_sync_only");
+ ] ++ emqx_resource_schema:fields("resource_opts");
fields(mongodb_rs) ->
emqx_connector_mongo:fields(rs) ++ fields("config");
fields(mongodb_sharded) ->
@@ -149,7 +149,7 @@ values(common, MongoType, Method, TypeOpts) ->
srv_record => false,
pool_size => 8,
username => <<"myuser">>,
- password => <<"mypass">>
+ password => <<"******">>
},
MethodVals = method_values(MongoType, Method),
Vals0 = maps:merge(MethodVals, Common),
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mysql.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mysql.erl
index eed4172ab..f3ed44247 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mysql.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mysql.erl
@@ -47,7 +47,7 @@ values(_Method) ->
database => <<"test">>,
pool_size => 8,
username => <<"root">>,
- password => <<"">>,
+ password => <<"******">>,
sql => ?DEFAULT_SQL,
local_topic => <<"local/topic/#">>,
resource_opts => #{
@@ -79,21 +79,10 @@ fields("config") ->
mk(
binary(),
#{desc => ?DESC("local_topic"), default => undefined}
- )},
- {resource_opts,
- mk(
- ref(?MODULE, "creation_opts"),
- #{
- required => false,
- default => #{},
- desc => ?DESC(emqx_resource_schema, <<"resource_opts">>)
- }
)}
- ] ++
+ ] ++ emqx_resource_schema:fields("resource_opts") ++
(emqx_connector_mysql:fields(config) --
emqx_connector_schema_lib:prepare_statement_fields());
-fields("creation_opts") ->
- emqx_resource_schema:fields("creation_opts_sync_only");
fields("post") ->
[type_field(), name_field() | fields("config")];
fields("put") ->
@@ -105,8 +94,6 @@ desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for MySQL using `", string:to_upper(Method), "` method."];
-desc("creation_opts" = Name) ->
- emqx_resource_schema:desc(Name);
desc(_) ->
undefined.
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_pgsql.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_pgsql.erl
index 46132bd99..958bc3449 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_pgsql.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_pgsql.erl
@@ -49,7 +49,7 @@ values(_Method, Type) ->
database => <<"mqtt">>,
pool_size => 8,
username => <<"root">>,
- password => <<"public">>,
+ password => <<"******">>,
sql => ?DEFAULT_SQL,
local_topic => <<"local/topic/#">>,
resource_opts => #{
@@ -81,21 +81,10 @@ fields("config") ->
mk(
binary(),
#{desc => ?DESC("local_topic"), default => undefined}
- )},
- {resource_opts,
- mk(
- ref(?MODULE, "creation_opts"),
- #{
- required => false,
- default => #{},
- desc => ?DESC(emqx_resource_schema, <<"resource_opts">>)
- }
)}
- ] ++
+ ] ++ emqx_resource_schema:fields("resource_opts") ++
(emqx_connector_pgsql:fields(config) --
emqx_connector_schema_lib:prepare_statement_fields());
-fields("creation_opts") ->
- emqx_resource_schema:fields("creation_opts_sync_only");
fields("post") ->
fields("post", pgsql);
fields("put") ->
@@ -110,8 +99,6 @@ desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for PostgreSQL using `", string:to_upper(Method), "` method."];
-desc("creation_opts" = Name) ->
- emqx_resource_schema:desc(Name);
desc(_) ->
undefined.
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_redis.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_redis.erl
index fa6958b6d..a728ecb7e 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_redis.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_redis.erl
@@ -77,7 +77,7 @@ values(common, RedisType, SpecificOpts) ->
enable => true,
local_topic => <<"local/topic/#">>,
pool_size => 8,
- password => <<"secret">>,
+ password => <<"******">>,
command_template => [<<"LPUSH">>, <<"MSGS">>, <<"${payload}">>],
resource_opts => values(resource_opts, RedisType, #{}),
ssl => #{enable => false}
@@ -180,10 +180,10 @@ resource_fields(Type) ->
resource_creation_fields("redis_cluster") ->
% TODO
% Cluster bridge is currently incompatible with batching.
- Fields = emqx_resource_schema:fields("creation_opts_sync_only"),
+ Fields = emqx_resource_schema:fields("creation_opts"),
lists:foldl(fun proplists:delete/2, Fields, [batch_size, batch_time, enable_batch]);
resource_creation_fields(_) ->
- emqx_resource_schema:fields("creation_opts_sync_only").
+ emqx_resource_schema:fields("creation_opts").
desc("config") ->
?DESC("desc_config");
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_rocketmq.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_rocketmq.erl
index 124e18069..78fd527d3 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_rocketmq.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_rocketmq.erl
@@ -80,21 +80,10 @@ fields("config") ->
mk(
binary(),
#{desc => ?DESC("local_topic"), required => false}
- )},
- {resource_opts,
- mk(
- ref(?MODULE, "creation_opts"),
- #{
- required => false,
- default => #{<<"request_timeout">> => ?DEFFAULT_REQ_TIMEOUT},
- desc => ?DESC(emqx_resource_schema, <<"resource_opts">>)
- }
)}
- ] ++
+ ] ++ emqx_resource_schema:fields("resource_opts") ++
(emqx_ee_connector_rocketmq:fields(config) --
emqx_connector_schema_lib:prepare_statement_fields());
-fields("creation_opts") ->
- emqx_resource_schema:fields("creation_opts_sync_only");
fields("post") ->
[type_field(), name_field() | fields("config")];
fields("put") ->
@@ -106,8 +95,6 @@ desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for RocketMQ using `", string:to_upper(Method), "` method."];
-desc("creation_opts" = Name) ->
- emqx_resource_schema:desc(Name);
desc(_) ->
undefined.
diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_tdengine.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_tdengine.erl
index b72d79955..7a958d45f 100644
--- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_tdengine.erl
+++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_tdengine.erl
@@ -22,7 +22,7 @@
]).
-define(DEFAULT_SQL, <<
- "insert into mqtt.t_mqtt_msg(ts, msgid, mqtt_topic, qos, payload, arrived) "
+ "insert into t_mqtt_msg(ts, msgid, mqtt_topic, qos, payload, arrived) "
"values (${ts}, ${id}, ${topic}, ${qos}, ${payload}, ${timestamp})"
>>).
@@ -80,19 +80,8 @@ fields("config") ->
mk(
binary(),
#{desc => ?DESC("local_topic"), default => undefined}
- )},
- {resource_opts,
- mk(
- ref(?MODULE, "creation_opts"),
- #{
- required => false,
- default => #{},
- desc => ?DESC(emqx_resource_schema, <<"resource_opts">>)
- }
)}
- ] ++ emqx_ee_connector_tdengine:fields(config);
-fields("creation_opts") ->
- emqx_resource_schema:fields("creation_opts_sync_only");
+ ] ++ emqx_resource_schema:fields("resource_opts") ++ emqx_ee_connector_tdengine:fields(config);
fields("post") ->
[type_field(), name_field() | fields("config")];
fields("put") ->
@@ -104,8 +93,6 @@ desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for TDengine using `", string:to_upper(Method), "` method."];
-desc("creation_opts" = Name) ->
- emqx_resource_schema:desc(Name);
desc(_) ->
undefined.
diff --git a/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_consumer.erl b/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_consumer.erl
index a05f6ec13..f4dc3456e 100644
--- a/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_consumer.erl
+++ b/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_consumer.erl
@@ -95,6 +95,11 @@
commit_fun => brod_group_subscriber_v2:commit_fun()
}.
+-define(CLIENT_DOWN_MESSAGE,
+ "Failed to start Kafka client. Please check the logs for errors and check"
+ " the connection parameters."
+).
+
%%-------------------------------------------------------------------------------------
%% `emqx_resource' API
%%-------------------------------------------------------------------------------------
@@ -152,7 +157,7 @@ on_start(InstanceId, Config) ->
kafka_hosts => BootstrapHosts,
reason => emqx_misc:redact(Reason)
}),
- throw(failed_to_start_kafka_client)
+ throw(?CLIENT_DOWN_MESSAGE)
end,
start_consumer(Config, InstanceId, ClientID).
@@ -173,7 +178,7 @@ on_get_status(_InstanceID, State) ->
kafka_client_id := ClientID,
kafka_topics := KafkaTopics
} = State,
- do_get_status(ClientID, KafkaTopics, SubscriberId).
+ do_get_status(State, ClientID, KafkaTopics, SubscriberId).
%%-------------------------------------------------------------------------------------
%% `brod_group_subscriber' API
@@ -370,22 +375,41 @@ stop_client(ClientID) ->
),
ok.
-do_get_status(ClientID, [KafkaTopic | RestTopics], SubscriberId) ->
+do_get_status(State, ClientID, [KafkaTopic | RestTopics], SubscriberId) ->
case brod:get_partitions_count(ClientID, KafkaTopic) of
{ok, NPartitions} ->
- case do_get_status(ClientID, KafkaTopic, SubscriberId, NPartitions) of
- connected -> do_get_status(ClientID, RestTopics, SubscriberId);
+ case do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) of
+ connected -> do_get_status(State, ClientID, RestTopics, SubscriberId);
disconnected -> disconnected
end;
+ {error, {client_down, Context}} ->
+ case infer_client_error(Context) of
+ auth_error ->
+ Message = "Authentication error. " ++ ?CLIENT_DOWN_MESSAGE,
+ {disconnected, State, Message};
+ {auth_error, Message0} ->
+ Message = binary_to_list(Message0) ++ "; " ++ ?CLIENT_DOWN_MESSAGE,
+ {disconnected, State, Message};
+ connection_refused ->
+ Message = "Connection refused. " ++ ?CLIENT_DOWN_MESSAGE,
+ {disconnected, State, Message};
+ _ ->
+ {disconnected, State, ?CLIENT_DOWN_MESSAGE}
+ end;
+ {error, leader_not_available} ->
+ Message =
+ "Leader connection not available. Please check the Kafka topic used,"
+ " the connection parameters and Kafka cluster health",
+ {disconnected, State, Message};
_ ->
disconnected
end;
-do_get_status(_ClientID, _KafkaTopics = [], _SubscriberId) ->
+do_get_status(_State, _ClientID, _KafkaTopics = [], _SubscriberId) ->
connected.
--spec do_get_status(brod:client_id(), binary(), subscriber_id(), pos_integer()) ->
+-spec do_get_status1(brod:client_id(), binary(), subscriber_id(), pos_integer()) ->
connected | disconnected.
-do_get_status(ClientID, KafkaTopic, SubscriberId, NPartitions) ->
+do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) ->
Results =
lists:map(
fun(N) ->
@@ -504,3 +528,15 @@ encode(Value, base64) ->
to_bin(B) when is_binary(B) -> B;
to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8).
+
+infer_client_error(Error) ->
+ case Error of
+ [{_BrokerEndpoint, {econnrefused, _}} | _] ->
+ connection_refused;
+ [{_BrokerEndpoint, {{sasl_auth_error, Message}, _}} | _] when is_binary(Message) ->
+ {auth_error, Message};
+ [{_BrokerEndpoint, {{sasl_auth_error, _}, _}} | _] ->
+ auth_error;
+ _ ->
+ undefined
+ end.
diff --git a/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_producer.erl b/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_producer.erl
index 5703c69f5..09713a431 100644
--- a/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_producer.erl
+++ b/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_producer.erl
@@ -114,7 +114,10 @@ on_start(InstId, Config) ->
client_id => ClientId
}
),
- throw(failed_to_start_kafka_producer)
+ throw(
+ "Failed to start Kafka client. Please check the logs for errors and check"
+ " the connection parameters."
+ )
end.
on_stop(_InstanceID, #{client_id := ClientID, producers := Producers, resource_id := ResourceID}) ->
diff --git a/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_consumer_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_consumer_SUITE.erl
index 02a4c3c3b..4019a9c42 100644
--- a/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_consumer_SUITE.erl
+++ b/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_consumer_SUITE.erl
@@ -388,7 +388,9 @@ end_per_testcase(_Testcase, Config) ->
maps:values(ProducersMapping)
),
ok = wolff:stop_and_delete_supervised_client(KafkaProducerClientId),
- emqx_common_test_helpers:call_janitor(),
+ %% in CI, apparently this needs more time since the
+ %% machines struggle with all the containers running...
+ emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
@@ -1664,7 +1666,7 @@ t_cluster_group(Config) ->
|| {Name, Opts} <- Cluster
],
on_exit(fun() ->
- lists:foreach(
+ emqx_misc:pmap(
fun(N) ->
ct:pal("stopping ~p", [N]),
ok = emqx_common_test_helpers:stop_slave(N)
@@ -1875,7 +1877,7 @@ t_cluster_node_down(Config) ->
Cluster
),
on_exit(fun() ->
- lists:foreach(
+ emqx_misc:pmap(
fun(N) ->
ct:pal("stopping ~p", [N]),
ok = emqx_common_test_helpers:stop_slave(N)
@@ -1894,10 +1896,14 @@ t_cluster_node_down(Config) ->
{ok, _} = snabbkaffe:receive_events(SRef0),
lists:foreach(
fun(N) ->
- ?assertMatch(
- {ok, _},
- erpc:call(N, emqx_bridge, lookup, [BridgeId]),
- #{node => N}
+ ?retry(
+ _Sleep1 = 100,
+ _Attempts1 = 50,
+ ?assertMatch(
+ {ok, _},
+ erpc:call(N, emqx_bridge, lookup, [BridgeId]),
+ #{node => N}
+ )
)
end,
Nodes
diff --git a/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_producer_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_producer_SUITE.erl
index 4b9642442..9e32f818d 100644
--- a/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_producer_SUITE.erl
+++ b/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_producer_SUITE.erl
@@ -9,6 +9,7 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
+-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("brod/include/brod.hrl").
-define(PRODUCER, emqx_bridge_impl_kafka_producer).
@@ -415,9 +416,11 @@ t_failed_creation_then_fix(Config) ->
Type, erlang:list_to_atom(Name), WrongConf
),
WrongConfigAtom = WrongConfigAtom1#{bridge_name => Name},
- ?assertThrow(failed_to_start_kafka_producer, ?PRODUCER:on_start(ResourceId, WrongConfigAtom)),
- %% before throwing, it should cleanup the client process.
- ?assertEqual([], supervisor:which_children(wolff_client_sup)),
+ ?assertThrow(Reason when is_list(Reason), ?PRODUCER:on_start(ResourceId, WrongConfigAtom)),
+ %% before throwing, it should cleanup the client process. we
+ %% retry because the supervisor might need some time to really
+ %% remove it from its tree.
+ ?retry(50, 10, ?assertEqual([], supervisor:which_children(wolff_client_sup))),
%% must succeed with correct config
{ok, #{config := ValidConfigAtom1}} = emqx_bridge:create(
Type, erlang:list_to_atom(Name), ValidConf
diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl
index d040000e2..f1ea6e930 100644
--- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl
+++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl
@@ -73,15 +73,16 @@ all() ->
groups() ->
TCs = emqx_common_test_helpers:all(?MODULE),
NonBatchCases = [t_write_timeout],
+ QueryModeGroups = [{group, async}, {group, sync}],
+ BatchingGroups = [
+ %{group, with_batch},
+ {group, without_batch}
+ ],
[
- {tcp, [
- %{group, with_batch},
- {group, without_batch}
- ]},
- {tls, [
- %{group, with_batch},
- {group, without_batch}
- ]},
+ {tcp, QueryModeGroups},
+ {tls, QueryModeGroups},
+ {async, BatchingGroups},
+ {sync, BatchingGroups},
{with_batch, TCs -- NonBatchCases},
{without_batch, TCs}
].
@@ -93,7 +94,6 @@ init_per_group(tcp, Config) ->
{cassa_host, Host},
{cassa_port, Port},
{enable_tls, false},
- {query_mode, sync},
{proxy_name, "cassa_tcp"}
| Config
];
@@ -104,10 +104,13 @@ init_per_group(tls, Config) ->
{cassa_host, Host},
{cassa_port, Port},
{enable_tls, true},
- {query_mode, sync},
{proxy_name, "cassa_tls"}
| Config
];
+init_per_group(async, Config) ->
+ [{query_mode, async} | Config];
+init_per_group(sync, Config) ->
+ [{query_mode, sync} | Config];
init_per_group(with_batch, Config0) ->
Config = [{enable_batch, true} | Config0],
common_init(Config);
@@ -139,14 +142,15 @@ end_per_suite(_Config) ->
init_per_testcase(_Testcase, Config) ->
connect_and_clear_table(Config),
delete_bridge(Config),
+ snabbkaffe:start_trace(),
Config.
end_per_testcase(_Testcase, Config) ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
- connect_and_clear_table(Config),
ok = snabbkaffe:stop(),
+ connect_and_clear_table(Config),
delete_bridge(Config),
ok.
@@ -171,6 +175,7 @@ common_init(Config0) ->
ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]),
emqx_mgmt_api_test_util:init_suite(),
% Connect to cassnadra directly and create the table
+ catch connect_and_drop_table(Config0),
connect_and_create_table(Config0),
{Name, CassaConf} = cassa_config(BridgeType, Config0),
Config =
@@ -250,9 +255,13 @@ parse_and_check(ConfigString, BridgeType, Name) ->
Config.
create_bridge(Config) ->
+ create_bridge(Config, _Overrides = #{}).
+
+create_bridge(Config, Overrides) ->
BridgeType = ?config(cassa_bridge_type, Config),
Name = ?config(cassa_name, Config),
- BridgeConfig = ?config(cassa_config, Config),
+ BridgeConfig0 = ?config(cassa_config, Config),
+ BridgeConfig = emqx_map_lib:deep_merge(BridgeConfig0, Overrides),
emqx_bridge:create(BridgeType, Name, BridgeConfig).
delete_bridge(Config) ->
@@ -288,6 +297,27 @@ query_resource(Config, Request) ->
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
emqx_resource:query(ResourceID, Request, #{timeout => 1_000}).
+query_resource_async(Config, Request) ->
+ Name = ?config(cassa_name, Config),
+ BridgeType = ?config(cassa_bridge_type, Config),
+ Ref = alias([reply]),
+ AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end,
+ ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
+ Return = emqx_resource:query(ResourceID, Request, #{
+ timeout => 500, async_reply_fun => {AsyncReplyFun, []}
+ }),
+ {Return, Ref}.
+
+receive_result(Ref, Timeout) when is_reference(Ref) ->
+ receive
+ {result, Ref, Result} ->
+ {ok, Result};
+ {Ref, Result} ->
+ {ok, Result}
+ after Timeout ->
+ timeout
+ end.
+
connect_direct_cassa(Config) ->
Opts = #{
nodes => [{?config(cassa_host, Config), ?config(cassa_port, Config)}],
@@ -546,15 +576,27 @@ t_write_failure(Config) ->
% ok.
t_simple_sql_query(Config) ->
+ EnableBatch = ?config(enable_batch, Config),
+ QueryMode = ?config(query_mode, Config),
?assertMatch(
{ok, _},
create_bridge(Config)
),
Request = {query, <<"SELECT count(1) AS T FROM system.local">>},
- Result = query_resource(Config, Request),
- case ?config(enable_batch, Config) of
- true -> ?assertEqual({error, {unrecoverable_error, batch_prepare_not_implemented}}, Result);
- false -> ?assertMatch({ok, {<<"system.local">>, _, [[1]]}}, Result)
+ Result =
+ case QueryMode of
+ sync ->
+ query_resource(Config, Request);
+ async ->
+ {_, Ref} = query_resource_async(Config, Request),
+ {ok, Res} = receive_result(Ref, 2_000),
+ Res
+ end,
+ case EnableBatch of
+ true ->
+ ?assertEqual({error, {unrecoverable_error, batch_prepare_not_implemented}}, Result);
+ false ->
+ ?assertMatch({ok, {<<"system.local">>, _, [[1]]}}, Result)
end,
ok.
@@ -565,22 +607,56 @@ t_missing_data(Config) ->
),
%% emqx_ee_connector_cassa will send missed data as a `null` atom
%% to ecql driver
- Result = send_message(Config, #{}),
+ {_, {ok, Event}} =
+ ?wait_async_action(
+ send_message(Config, #{}),
+ #{?snk_kind := buffer_worker_flush_ack},
+ 2_000
+ ),
?assertMatch(
%% TODO: match error msgs
- {error, {unrecoverable_error, {8704, <<"Expected 8 or 0 byte long for date (4)">>}}},
- Result
+ #{
+ result :=
+ {error, {unrecoverable_error, {8704, <<"Expected 8 or 0 byte long for date (4)">>}}}
+ },
+ Event
),
ok.
t_bad_sql_parameter(Config) ->
+ QueryMode = ?config(query_mode, Config),
+ EnableBatch = ?config(enable_batch, Config),
+ Name = ?config(cassa_name, Config),
+ ResourceId = emqx_bridge_resource:resource_id(cassandra, Name),
?assertMatch(
{ok, _},
- create_bridge(Config)
+ create_bridge(
+ Config,
+ #{
+ <<"resource_opts">> => #{
+ <<"request_timeout">> => 500,
+ <<"resume_interval">> => 100,
+ <<"health_check_interval">> => 100
+ }
+ }
+ )
),
Request = {query, <<"">>, [bad_parameter]},
- Result = query_resource(Config, Request),
- case ?config(enable_batch, Config) of
+ Result =
+ case QueryMode of
+ sync ->
+ query_resource(Config, Request);
+ async ->
+ {_, Ref} = query_resource_async(Config, Request),
+ case receive_result(Ref, 5_000) of
+ {ok, Res} ->
+ Res;
+ timeout ->
+ ct:pal("mailbox:\n ~p", [process_info(self(), messages)]),
+ ct:fail("no response received")
+ end
+ end,
+ case EnableBatch of
true ->
?assertEqual({error, {unrecoverable_error, invalid_request}}, Result);
false ->
diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl
index 9850c9529..116dcc729 100644
--- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl
+++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl
@@ -9,6 +9,7 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
+-include_lib("snabbkaffe/include/snabbkaffe.hrl").
%%------------------------------------------------------------------------------
%% CT boilerplate
@@ -16,9 +17,8 @@
all() ->
[
- {group, rs},
- {group, sharded},
- {group, single}
+ {group, async},
+ {group, sync}
| (emqx_common_test_helpers:all(?MODULE) -- group_tests())
].
@@ -31,12 +31,23 @@ group_tests() ->
].
groups() ->
+ TypeGroups = [
+ {group, rs},
+ {group, sharded},
+ {group, single}
+ ],
[
+ {async, TypeGroups},
+ {sync, TypeGroups},
{rs, group_tests()},
{sharded, group_tests()},
{single, group_tests()}
].
+init_per_group(async, Config) ->
+ [{query_mode, async} | Config];
+init_per_group(sync, Config) ->
+ [{query_mode, sync} | Config];
init_per_group(Type = rs, Config) ->
MongoHost = os:getenv("MONGO_RS_HOST", "mongo1"),
MongoPort = list_to_integer(os:getenv("MONGO_RS_PORT", "27017")),
@@ -44,7 +55,7 @@ init_per_group(Type = rs, Config) ->
true ->
ok = start_apps(),
emqx_mgmt_api_test_util:init_suite(),
- {Name, MongoConfig} = mongo_config(MongoHost, MongoPort, Type),
+ {Name, MongoConfig} = mongo_config(MongoHost, MongoPort, Type, Config),
[
{mongo_host, MongoHost},
{mongo_port, MongoPort},
@@ -63,7 +74,7 @@ init_per_group(Type = sharded, Config) ->
true ->
ok = start_apps(),
emqx_mgmt_api_test_util:init_suite(),
- {Name, MongoConfig} = mongo_config(MongoHost, MongoPort, Type),
+ {Name, MongoConfig} = mongo_config(MongoHost, MongoPort, Type, Config),
[
{mongo_host, MongoHost},
{mongo_port, MongoPort},
@@ -82,7 +93,7 @@ init_per_group(Type = single, Config) ->
true ->
ok = start_apps(),
emqx_mgmt_api_test_util:init_suite(),
- {Name, MongoConfig} = mongo_config(MongoHost, MongoPort, Type),
+ {Name, MongoConfig} = mongo_config(MongoHost, MongoPort, Type, Config),
[
{mongo_host, MongoHost},
{mongo_port, MongoPort},
@@ -99,6 +110,7 @@ end_per_group(_Type, _Config) ->
ok.
init_per_suite(Config) ->
+ emqx_common_test_helpers:clear_screen(),
Config.
end_per_suite(_Config) ->
@@ -109,11 +121,13 @@ end_per_suite(_Config) ->
init_per_testcase(_Testcase, Config) ->
catch clear_db(Config),
delete_bridge(Config),
+ snabbkaffe:start_trace(),
Config.
end_per_testcase(_Testcase, Config) ->
catch clear_db(Config),
delete_bridge(Config),
+ snabbkaffe:stop(),
ok.
%%------------------------------------------------------------------------------
@@ -140,7 +154,8 @@ mongo_type_bin(sharded) ->
mongo_type_bin(single) ->
<<"mongodb_single">>.
-mongo_config(MongoHost, MongoPort0, rs = Type) ->
+mongo_config(MongoHost, MongoPort0, rs = Type, Config) ->
+ QueryMode = ?config(query_mode, Config),
MongoPort = integer_to_list(MongoPort0),
Servers = MongoHost ++ ":" ++ MongoPort,
Name = atom_to_binary(?MODULE),
@@ -154,13 +169,19 @@ mongo_config(MongoHost, MongoPort0, rs = Type) ->
" w_mode = safe\n"
" database = mqtt\n"
" resource_opts = {\n"
+ " query_mode = ~s\n"
" worker_pool_size = 1\n"
" }\n"
"}",
- [Name, Servers]
+ [
+ Name,
+ Servers,
+ QueryMode
+ ]
),
{Name, parse_and_check(ConfigString, Type, Name)};
-mongo_config(MongoHost, MongoPort0, sharded = Type) ->
+mongo_config(MongoHost, MongoPort0, sharded = Type, Config) ->
+ QueryMode = ?config(query_mode, Config),
MongoPort = integer_to_list(MongoPort0),
Servers = MongoHost ++ ":" ++ MongoPort,
Name = atom_to_binary(?MODULE),
@@ -173,13 +194,19 @@ mongo_config(MongoHost, MongoPort0, sharded = Type) ->
" w_mode = safe\n"
" database = mqtt\n"
" resource_opts = {\n"
+ " query_mode = ~s\n"
" worker_pool_size = 1\n"
" }\n"
"}",
- [Name, Servers]
+ [
+ Name,
+ Servers,
+ QueryMode
+ ]
),
{Name, parse_and_check(ConfigString, Type, Name)};
-mongo_config(MongoHost, MongoPort0, single = Type) ->
+mongo_config(MongoHost, MongoPort0, single = Type, Config) ->
+ QueryMode = ?config(query_mode, Config),
MongoPort = integer_to_list(MongoPort0),
Server = MongoHost ++ ":" ++ MongoPort,
Name = atom_to_binary(?MODULE),
@@ -192,10 +219,15 @@ mongo_config(MongoHost, MongoPort0, single = Type) ->
" w_mode = safe\n"
" database = mqtt\n"
" resource_opts = {\n"
+ " query_mode = ~s\n"
" worker_pool_size = 1\n"
" }\n"
"}",
- [Name, Server]
+ [
+ Name,
+ Server,
+ QueryMode
+ ]
),
{Name, parse_and_check(ConfigString, Type, Name)}.
@@ -248,7 +280,7 @@ find_all(Config) ->
Name = ?config(mongo_name, Config),
#{<<"collection">> := Collection} = ?config(mongo_config, Config),
ResourceID = emqx_bridge_resource:resource_id(Type, Name),
- emqx_resource:query(ResourceID, {find, Collection, #{}, #{}}).
+ emqx_resource:simple_sync_query(ResourceID, {find, Collection, #{}, #{}}).
send_message(Config, Payload) ->
Name = ?config(mongo_name, Config),
@@ -266,7 +298,12 @@ t_setup_via_config_and_publish(Config) ->
create_bridge(Config)
),
Val = erlang:unique_integer(),
- ok = send_message(Config, #{key => Val}),
+ {ok, {ok, _}} =
+ ?wait_async_action(
+ send_message(Config, #{key => Val}),
+ #{?snk_kind := mongo_ee_connector_on_query_return},
+ 5_000
+ ),
?assertMatch(
{ok, [#{<<"key">> := Val}]},
find_all(Config)
@@ -286,7 +323,12 @@ t_setup_via_http_api_and_publish(Config) ->
create_bridge_http(MongoConfig)
),
Val = erlang:unique_integer(),
- ok = send_message(Config, #{key => Val}),
+ {ok, {ok, _}} =
+ ?wait_async_action(
+ send_message(Config, #{key => Val}),
+ #{?snk_kind := mongo_ee_connector_on_query_return},
+ 5_000
+ ),
?assertMatch(
{ok, [#{<<"key">> := Val}]},
find_all(Config)
@@ -297,7 +339,12 @@ t_payload_template(Config) ->
{ok, _} = create_bridge(Config, #{<<"payload_template">> => <<"{\"foo\": \"${clientid}\"}">>}),
Val = erlang:unique_integer(),
ClientId = emqx_guid:to_hexstr(emqx_guid:gen()),
- ok = send_message(Config, #{key => Val, clientid => ClientId}),
+ {ok, {ok, _}} =
+ ?wait_async_action(
+ send_message(Config, #{key => Val, clientid => ClientId}),
+ #{?snk_kind := mongo_ee_connector_on_query_return},
+ 5_000
+ ),
?assertMatch(
{ok, [#{<<"foo">> := ClientId}]},
find_all(Config)
@@ -314,11 +361,16 @@ t_collection_template(Config) ->
),
Val = erlang:unique_integer(),
ClientId = emqx_guid:to_hexstr(emqx_guid:gen()),
- ok = send_message(Config, #{
- key => Val,
- clientid => ClientId,
- mycollectionvar => <<"mycol">>
- }),
+ {ok, {ok, _}} =
+ ?wait_async_action(
+ send_message(Config, #{
+ key => Val,
+ clientid => ClientId,
+ mycollectionvar => <<"mycol">>
+ }),
+ #{?snk_kind := mongo_ee_connector_on_query_return},
+ 5_000
+ ),
?assertMatch(
{ok, [#{<<"foo">> := ClientId}]},
find_all(Config)
diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl
index 93e9e6fee..38e31c7ae 100644
--- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl
+++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl
@@ -45,15 +45,16 @@ all() ->
groups() ->
TCs = emqx_common_test_helpers:all(?MODULE),
NonBatchCases = [t_write_timeout, t_uninitialized_prepared_statement],
+ BatchingGroups = [
+ {group, with_batch},
+ {group, without_batch}
+ ],
+ QueryModeGroups = [{group, async}, {group, sync}],
[
- {tcp, [
- {group, with_batch},
- {group, without_batch}
- ]},
- {tls, [
- {group, with_batch},
- {group, without_batch}
- ]},
+ {tcp, QueryModeGroups},
+ {tls, QueryModeGroups},
+ {async, BatchingGroups},
+ {sync, BatchingGroups},
{with_batch, TCs -- NonBatchCases},
{without_batch, TCs}
].
@@ -65,7 +66,6 @@ init_per_group(tcp, Config) ->
{mysql_host, MysqlHost},
{mysql_port, MysqlPort},
{enable_tls, false},
- {query_mode, sync},
{proxy_name, "mysql_tcp"}
| Config
];
@@ -76,10 +76,13 @@ init_per_group(tls, Config) ->
{mysql_host, MysqlHost},
{mysql_port, MysqlPort},
{enable_tls, true},
- {query_mode, sync},
{proxy_name, "mysql_tls"}
| Config
];
+init_per_group(async, Config) ->
+ [{query_mode, async} | Config];
+init_per_group(sync, Config) ->
+ [{query_mode, sync} | Config];
init_per_group(with_batch, Config0) ->
Config = [{batch_size, 100} | Config0],
common_init(Config);
@@ -99,6 +102,7 @@ end_per_group(_Group, _Config) ->
ok.
init_per_suite(Config) ->
+ emqx_common_test_helpers:clear_screen(),
Config.
end_per_suite(_Config) ->
@@ -109,6 +113,7 @@ end_per_suite(_Config) ->
init_per_testcase(_Testcase, Config) ->
connect_and_clear_table(Config),
delete_bridge(Config),
+ snabbkaffe:start_trace(),
Config.
end_per_testcase(_Testcase, Config) ->
@@ -237,6 +242,25 @@ query_resource(Config, Request) ->
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
emqx_resource:query(ResourceID, Request, #{timeout => 500}).
+query_resource_async(Config, Request) ->
+ Name = ?config(mysql_name, Config),
+ BridgeType = ?config(mysql_bridge_type, Config),
+ Ref = alias([reply]),
+ AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end,
+ ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
+ Return = emqx_resource:query(ResourceID, Request, #{
+ timeout => 500, async_reply_fun => {AsyncReplyFun, []}
+ }),
+ {Return, Ref}.
+
+receive_result(Ref, Timeout) ->
+ receive
+ {result, Ref, Result} ->
+ {ok, Result}
+ after Timeout ->
+ timeout
+ end.
+
unprepare(Config, Key) ->
Name = ?config(mysql_name, Config),
BridgeType = ?config(mysql_bridge_type, Config),
@@ -409,17 +433,29 @@ t_write_failure(Config) ->
Val = integer_to_binary(erlang:unique_integer()),
SentData = #{payload => Val, timestamp => 1668602148000},
?check_trace(
- emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
- case QueryMode of
- sync ->
- ?assertMatch(
- {error, {resource_error, #{reason := timeout}}},
+ begin
+ %% for some unknown reason, `?wait_async_action' and `subscribe'
+ %% hang and timeout if called inside `with_failure', but the event
+ %% happens and is emitted after the test pid dies!?
+ {ok, SRef} = snabbkaffe:subscribe(
+ ?match_event(#{?snk_kind := buffer_worker_flush_nack}),
+ 2_000
+ ),
+ emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
+ case QueryMode of
+ sync ->
+ ?assertMatch(
+ {error, {resource_error, #{reason := timeout}}},
+ send_message(Config, SentData)
+ );
+ async ->
send_message(Config, SentData)
- );
- async ->
- send_message(Config, SentData)
- end
- end),
+ end,
+ ?assertMatch({ok, [#{result := {error, _}}]}, snabbkaffe:receive_events(SRef)),
+ ok
+ end),
+ ok
+ end,
fun(Trace0) ->
ct:pal("trace: ~p", [Trace0]),
Trace = ?of_kind(buffer_worker_flush_nack, Trace0),
@@ -443,27 +479,52 @@ t_write_timeout(Config) ->
ProxyName = ?config(proxy_name, Config),
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
+ QueryMode = ?config(query_mode, Config),
{ok, _} = create_bridge(Config),
Val = integer_to_binary(erlang:unique_integer()),
SentData = #{payload => Val, timestamp => 1668602148000},
Timeout = 1000,
+ %% for some unknown reason, `?wait_async_action' and `subscribe'
+ %% hang and timeout if called inside `with_failure', but the event
+ %% happens and is emitted after the test pid dies!?
+ {ok, SRef} = snabbkaffe:subscribe(
+ ?match_event(#{?snk_kind := buffer_worker_flush_nack}),
+ 2 * Timeout
+ ),
emqx_common_test_helpers:with_failure(timeout, ProxyName, ProxyHost, ProxyPort, fun() ->
- ?assertMatch(
- {error, {resource_error, #{reason := timeout}}},
- query_resource(Config, {send_message, SentData, [], Timeout})
- )
+ case QueryMode of
+ sync ->
+ ?assertMatch(
+ {error, {resource_error, #{reason := timeout}}},
+ query_resource(Config, {send_message, SentData, [], Timeout})
+ );
+ async ->
+ query_resource(Config, {send_message, SentData, [], Timeout}),
+ ok
+ end,
+ ok
end),
+ ?assertMatch({ok, [#{result := {error, _}}]}, snabbkaffe:receive_events(SRef)),
ok.
t_simple_sql_query(Config) ->
+ QueryMode = ?config(query_mode, Config),
+ BatchSize = ?config(batch_size, Config),
+ IsBatch = BatchSize > 1,
?assertMatch(
{ok, _},
create_bridge(Config)
),
Request = {sql, <<"SELECT count(1) AS T">>},
- Result = query_resource(Config, Request),
- BatchSize = ?config(batch_size, Config),
- IsBatch = BatchSize > 1,
+ Result =
+ case QueryMode of
+ sync ->
+ query_resource(Config, Request);
+ async ->
+ {_, Ref} = query_resource_async(Config, Request),
+ {ok, Res} = receive_result(Ref, 2_000),
+ Res
+ end,
case IsBatch of
true -> ?assertEqual({error, {unrecoverable_error, batch_select_not_implemented}}, Result);
false -> ?assertEqual({ok, [<<"T">>], [[1]]}, Result)
@@ -471,25 +532,37 @@ t_simple_sql_query(Config) ->
ok.
t_missing_data(Config) ->
+ BatchSize = ?config(batch_size, Config),
+ IsBatch = BatchSize > 1,
?assertMatch(
{ok, _},
create_bridge(Config)
),
- Result = send_message(Config, #{}),
- BatchSize = ?config(batch_size, Config),
- IsBatch = BatchSize > 1,
+ {ok, SRef} = snabbkaffe:subscribe(
+ ?match_event(#{?snk_kind := buffer_worker_flush_ack}),
+ 2_000
+ ),
+ send_message(Config, #{}),
+ {ok, [Event]} = snabbkaffe:receive_events(SRef),
case IsBatch of
true ->
?assertMatch(
- {error,
- {unrecoverable_error,
- {1292, _, <<"Truncated incorrect DOUBLE value: 'undefined'">>}}},
- Result
+ #{
+ result :=
+ {error,
+ {unrecoverable_error,
+ {1292, _, <<"Truncated incorrect DOUBLE value: 'undefined'">>}}}
+ },
+ Event
);
false ->
?assertMatch(
- {error, {unrecoverable_error, {1048, _, <<"Column 'arrived' cannot be null">>}}},
- Result
+ #{
+ result :=
+ {error,
+ {unrecoverable_error, {1048, _, <<"Column 'arrived' cannot be null">>}}}
+ },
+ Event
)
end,
ok.
@@ -500,14 +573,22 @@ t_bad_sql_parameter(Config) ->
create_bridge(Config)
),
Request = {sql, <<"">>, [bad_parameter]},
- Result = query_resource(Config, Request),
+ {_, {ok, Event}} =
+ ?wait_async_action(
+ query_resource(Config, Request),
+ #{?snk_kind := buffer_worker_flush_ack},
+ 2_000
+ ),
BatchSize = ?config(batch_size, Config),
IsBatch = BatchSize > 1,
case IsBatch of
true ->
- ?assertEqual({error, {unrecoverable_error, invalid_request}}, Result);
+ ?assertMatch(#{result := {error, {unrecoverable_error, invalid_request}}}, Event);
false ->
- ?assertEqual({error, {unrecoverable_error, {invalid_params, [bad_parameter]}}}, Result)
+ ?assertMatch(
+ #{result := {error, {unrecoverable_error, {invalid_params, [bad_parameter]}}}},
+ Event
+ )
end,
ok.
@@ -515,7 +596,12 @@ t_nasty_sql_string(Config) ->
?assertMatch({ok, _}, create_bridge(Config)),
Payload = list_to_binary(lists:seq(0, 255)),
Message = #{payload => Payload, timestamp => erlang:system_time(millisecond)},
- Result = send_message(Config, Message),
+ {Result, {ok, _}} =
+ ?wait_async_action(
+ send_message(Config, Message),
+ #{?snk_kind := mysql_connector_query_return},
+ 1_000
+ ),
?assertEqual(ok, Result),
?assertMatch(
{ok, [<<"payload">>], [[Payload]]},
@@ -561,12 +647,22 @@ t_unprepared_statement_query(Config) ->
create_bridge(Config)
),
Request = {prepared_query, unprepared_query, []},
- Result = query_resource(Config, Request),
+ {_, {ok, Event}} =
+ ?wait_async_action(
+ query_resource(Config, Request),
+ #{?snk_kind := buffer_worker_flush_ack},
+ 2_000
+ ),
BatchSize = ?config(batch_size, Config),
IsBatch = BatchSize > 1,
case IsBatch of
- true -> ?assertEqual({error, {unrecoverable_error, invalid_request}}, Result);
- false -> ?assertEqual({error, {unrecoverable_error, prepared_statement_invalid}}, Result)
+ true ->
+ ?assertMatch(#{result := {error, {unrecoverable_error, invalid_request}}}, Event);
+ false ->
+ ?assertMatch(
+ #{result := {error, {unrecoverable_error, prepared_statement_invalid}}},
+ Event
+ )
end,
ok.
@@ -582,7 +678,13 @@ t_uninitialized_prepared_statement(Config) ->
unprepare(Config, send_message),
?check_trace(
begin
- ?assertEqual(ok, send_message(Config, SentData)),
+ {Res, {ok, _}} =
+ ?wait_async_action(
+ send_message(Config, SentData),
+ #{?snk_kind := mysql_connector_query_return},
+ 2_000
+ ),
+ ?assertEqual(ok, Res),
ok
end,
fun(Trace) ->
diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_pgsql_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_pgsql_SUITE.erl
index 10359a128..83cb8b1f3 100644
--- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_pgsql_SUITE.erl
+++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_pgsql_SUITE.erl
@@ -42,19 +42,18 @@ all() ->
groups() ->
TCs = emqx_common_test_helpers:all(?MODULE),
NonBatchCases = [t_write_timeout],
+ BatchVariantGroups = [
+ {group, with_batch},
+ {group, without_batch},
+ {group, matrix},
+ {group, timescale}
+ ],
+ QueryModeGroups = [{async, BatchVariantGroups}, {sync, BatchVariantGroups}],
[
- {tcp, [
- {group, with_batch},
- {group, without_batch},
- {group, matrix},
- {group, timescale}
- ]},
- {tls, [
- {group, with_batch},
- {group, without_batch},
- {group, matrix},
- {group, timescale}
- ]},
+ {tcp, QueryModeGroups},
+ {tls, QueryModeGroups},
+ {async, BatchVariantGroups},
+ {sync, BatchVariantGroups},
{with_batch, TCs -- NonBatchCases},
{without_batch, TCs},
{matrix, [t_setup_via_config_and_publish, t_setup_via_http_api_and_publish]},
@@ -68,7 +67,6 @@ init_per_group(tcp, Config) ->
{pgsql_host, Host},
{pgsql_port, Port},
{enable_tls, false},
- {query_mode, sync},
{proxy_name, "pgsql_tcp"}
| Config
];
@@ -79,10 +77,13 @@ init_per_group(tls, Config) ->
{pgsql_host, Host},
{pgsql_port, Port},
{enable_tls, true},
- {query_mode, sync},
{proxy_name, "pgsql_tls"}
| Config
];
+init_per_group(async, Config) ->
+ [{query_mode, async} | Config];
+init_per_group(sync, Config) ->
+ [{query_mode, sync} | Config];
init_per_group(with_batch, Config0) ->
Config = [{enable_batch, true} | Config0],
common_init(Config);
@@ -118,6 +119,7 @@ end_per_suite(_Config) ->
init_per_testcase(_Testcase, Config) ->
connect_and_clear_table(Config),
delete_bridge(Config),
+ snabbkaffe:start_trace(),
Config.
end_per_testcase(_Testcase, Config) ->
@@ -221,9 +223,13 @@ parse_and_check(ConfigString, BridgeType, Name) ->
Config.
create_bridge(Config) ->
+ create_bridge(Config, _Overrides = #{}).
+
+create_bridge(Config, Overrides) ->
BridgeType = ?config(pgsql_bridge_type, Config),
Name = ?config(pgsql_name, Config),
- PGConfig = ?config(pgsql_config, Config),
+ PGConfig0 = ?config(pgsql_config, Config),
+ PGConfig = emqx_map_lib:deep_merge(PGConfig0, Overrides),
emqx_bridge:create(BridgeType, Name, PGConfig).
delete_bridge(Config) ->
@@ -251,6 +257,27 @@ query_resource(Config, Request) ->
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
emqx_resource:query(ResourceID, Request, #{timeout => 1_000}).
+query_resource_async(Config, Request) ->
+ Name = ?config(pgsql_name, Config),
+ BridgeType = ?config(pgsql_bridge_type, Config),
+ Ref = alias([reply]),
+ AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end,
+ ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
+ Return = emqx_resource:query(ResourceID, Request, #{
+ timeout => 500, async_reply_fun => {AsyncReplyFun, []}
+ }),
+ {Return, Ref}.
+
+receive_result(Ref, Timeout) ->
+ receive
+ {result, Ref, Result} ->
+ {ok, Result};
+ {Ref, Result} ->
+ {ok, Result}
+ after Timeout ->
+ timeout
+ end.
+
connect_direct_pgsql(Config) ->
Opts = #{
host => ?config(pgsql_host, Config),
@@ -308,11 +335,12 @@ t_setup_via_config_and_publish(Config) ->
SentData = #{payload => Val, timestamp => 1668602148000},
?check_trace(
begin
- ?wait_async_action(
- ?assertEqual({ok, 1}, send_message(Config, SentData)),
- #{?snk_kind := pgsql_connector_query_return},
- 10_000
- ),
+ {_, {ok, _}} =
+ ?wait_async_action(
+ send_message(Config, SentData),
+ #{?snk_kind := pgsql_connector_query_return},
+ 10_000
+ ),
?assertMatch(
Val,
connect_and_get_payload(Config)
@@ -336,6 +364,7 @@ t_setup_via_http_api_and_publish(Config) ->
BridgeType = ?config(pgsql_bridge_type, Config),
Name = ?config(pgsql_name, Config),
PgsqlConfig0 = ?config(pgsql_config, Config),
+ QueryMode = ?config(query_mode, Config),
PgsqlConfig = PgsqlConfig0#{
<<"name">> => Name,
<<"type">> => BridgeType
@@ -348,11 +377,18 @@ t_setup_via_http_api_and_publish(Config) ->
SentData = #{payload => Val, timestamp => 1668602148000},
?check_trace(
begin
- ?wait_async_action(
- ?assertEqual({ok, 1}, send_message(Config, SentData)),
- #{?snk_kind := pgsql_connector_query_return},
- 10_000
- ),
+ {Res, {ok, _}} =
+ ?wait_async_action(
+ send_message(Config, SentData),
+ #{?snk_kind := pgsql_connector_query_return},
+ 10_000
+ ),
+ case QueryMode of
+ async ->
+ ok;
+ sync ->
+ ?assertEqual({ok, 1}, Res)
+ end,
?assertMatch(
Val,
connect_and_get_payload(Config)
@@ -457,28 +493,71 @@ t_write_timeout(Config) ->
ProxyName = ?config(proxy_name, Config),
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
- {ok, _} = create_bridge(Config),
+ QueryMode = ?config(query_mode, Config),
+ {ok, _} = create_bridge(
+ Config,
+ #{
+ <<"resource_opts">> => #{
+ <<"request_timeout">> => 500,
+ <<"resume_interval">> => 100,
+ <<"health_check_interval">> => 100
+ }
+ }
+ ),
Val = integer_to_binary(erlang:unique_integer()),
SentData = #{payload => Val, timestamp => 1668602148000},
- Timeout = 1000,
- emqx_common_test_helpers:with_failure(timeout, ProxyName, ProxyHost, ProxyPort, fun() ->
- ?assertMatch(
- {error, {resource_error, #{reason := timeout}}},
- query_resource(Config, {send_message, SentData, [], Timeout})
- )
- end),
+ {ok, SRef} = snabbkaffe:subscribe(
+ ?match_event(#{?snk_kind := call_query_enter}),
+ 2_000
+ ),
+ Res0 =
+ emqx_common_test_helpers:with_failure(timeout, ProxyName, ProxyHost, ProxyPort, fun() ->
+ Res1 =
+ case QueryMode of
+ async ->
+ query_resource_async(Config, {send_message, SentData});
+ sync ->
+ query_resource(Config, {send_message, SentData})
+ end,
+ ?assertMatch({ok, [_]}, snabbkaffe:receive_events(SRef)),
+ Res1
+ end),
+ case Res0 of
+ {_, Ref} when is_reference(Ref) ->
+ case receive_result(Ref, 15_000) of
+ {ok, Res} ->
+ ?assertMatch({error, {unrecoverable_error, _}}, Res);
+ timeout ->
+ ct:pal("mailbox:\n ~p", [process_info(self(), messages)]),
+ ct:fail("no response received")
+ end;
+ _ ->
+ ?assertMatch({error, {resource_error, #{reason := timeout}}}, Res0)
+ end,
ok.
t_simple_sql_query(Config) ->
+ EnableBatch = ?config(enable_batch, Config),
+ QueryMode = ?config(query_mode, Config),
?assertMatch(
{ok, _},
create_bridge(Config)
),
Request = {sql, <<"SELECT count(1) AS T">>},
- Result = query_resource(Config, Request),
- case ?config(enable_batch, Config) of
- true -> ?assertEqual({error, {unrecoverable_error, batch_prepare_not_implemented}}, Result);
- false -> ?assertMatch({ok, _, [{1}]}, Result)
+ Result =
+ case QueryMode of
+ sync ->
+ query_resource(Config, Request);
+ async ->
+ {_, Ref} = query_resource_async(Config, Request),
+ {ok, Res} = receive_result(Ref, 2_000),
+ Res
+ end,
+ case EnableBatch of
+ true ->
+ ?assertEqual({error, {unrecoverable_error, batch_prepare_not_implemented}}, Result);
+ false ->
+ ?assertMatch({ok, _, [{1}]}, Result)
end,
ok.
@@ -487,21 +566,40 @@ t_missing_data(Config) ->
{ok, _},
create_bridge(Config)
),
- Result = send_message(Config, #{}),
+ {_, {ok, Event}} =
+ ?wait_async_action(
+ send_message(Config, #{}),
+ #{?snk_kind := buffer_worker_flush_ack},
+ 2_000
+ ),
?assertMatch(
- {error, {unrecoverable_error, {error, error, <<"23502">>, not_null_violation, _, _}}},
- Result
+ #{
+ result :=
+ {error,
+ {unrecoverable_error, {error, error, <<"23502">>, not_null_violation, _, _}}}
+ },
+ Event
),
ok.
t_bad_sql_parameter(Config) ->
+ QueryMode = ?config(query_mode, Config),
+ EnableBatch = ?config(enable_batch, Config),
?assertMatch(
{ok, _},
create_bridge(Config)
),
Request = {sql, <<"">>, [bad_parameter]},
- Result = query_resource(Config, Request),
- case ?config(enable_batch, Config) of
+ Result =
+ case QueryMode of
+ sync ->
+ query_resource(Config, Request);
+ async ->
+ {_, Ref} = query_resource_async(Config, Request),
+ {ok, Res} = receive_result(Ref, 2_000),
+ Res
+ end,
+ case EnableBatch of
true ->
?assertEqual({error, {unrecoverable_error, invalid_request}}, Result);
false ->
@@ -515,5 +613,10 @@ t_nasty_sql_string(Config) ->
?assertMatch({ok, _}, create_bridge(Config)),
Payload = list_to_binary(lists:seq(1, 127)),
Message = #{payload => Payload, timestamp => erlang:system_time(millisecond)},
- ?assertEqual({ok, 1}, send_message(Config, Message)),
+ {_, {ok, _}} =
+ ?wait_async_action(
+ send_message(Config, Message),
+ #{?snk_kind := pgsql_connector_query_return},
+ 1_000
+ ),
?assertEqual(Payload, connect_and_get_payload(Config)).
diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_redis_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_redis_SUITE.erl
index 5431cbb03..f0b70d21b 100644
--- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_redis_SUITE.erl
+++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_redis_SUITE.erl
@@ -64,14 +64,17 @@ groups() ->
{group, batch_on},
{group, batch_off}
],
+ QueryModeGroups = [{group, async}, {group, sync}],
[
{rest, TCs},
{transports, [
{group, tcp},
{group, tls}
]},
- {tcp, TypeGroups},
- {tls, TypeGroups},
+ {tcp, QueryModeGroups},
+ {tls, QueryModeGroups},
+ {async, TypeGroups},
+ {sync, TypeGroups},
{redis_single, BatchGroups},
{redis_sentinel, BatchGroups},
{redis_cluster, BatchGroups},
@@ -79,6 +82,10 @@ groups() ->
{batch_off, ResourceSpecificTCs}
].
+init_per_group(async, Config) ->
+ [{query_mode, async} | Config];
+init_per_group(sync, Config) ->
+ [{query_mode, sync} | Config];
init_per_group(Group, Config) when
Group =:= redis_single; Group =:= redis_sentinel; Group =:= redis_cluster
->
@@ -149,8 +156,9 @@ init_per_testcase(_Testcase, Config) ->
{skip, "Batching is not supported by 'redis_cluster' bridge type"};
{RedisType, BatchMode} ->
Transport = ?config(transport, Config),
+ QueryMode = ?config(query_mode, Config),
#{RedisType := #{Transport := RedisConnConfig}} = redis_connect_configs(),
- #{BatchMode := ResourceConfig} = resource_configs(),
+ #{BatchMode := ResourceConfig} = resource_configs(#{query_mode => QueryMode}),
IsBatch = (BatchMode =:= batch_on),
BridgeConfig0 = maps:merge(RedisConnConfig, ?COMMON_REDIS_OPTS),
BridgeConfig1 = BridgeConfig0#{<<"resource_opts">> => ResourceConfig},
@@ -301,7 +309,7 @@ t_permanent_error(_Config) ->
?wait_async_action(
publish_message(Topic, Payload),
#{?snk_kind := redis_ee_connector_send_done},
- 10000
+ 10_000
)
end,
fun(Trace) ->
@@ -529,14 +537,14 @@ invalid_command_bridge_config() ->
<<"command_template">> => [<<"BAD">>, <<"COMMAND">>, <<"${payload}">>]
}.
-resource_configs() ->
+resource_configs(#{query_mode := QueryMode}) ->
#{
batch_off => #{
- <<"query_mode">> => <<"sync">>,
+ <<"query_mode">> => atom_to_binary(QueryMode),
<<"start_timeout">> => <<"15s">>
},
batch_on => #{
- <<"query_mode">> => <<"sync">>,
+ <<"query_mode">> => atom_to_binary(QueryMode),
<<"worker_pool_size">> => <<"1">>,
<<"batch_size">> => integer_to_binary(?BATCH_SIZE),
<<"start_timeout">> => <<"15s">>,
diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_rocketmq_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_rocketmq_SUITE.erl
index cd02b65d0..95ec47e7f 100644
--- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_rocketmq_SUITE.erl
+++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_rocketmq_SUITE.erl
@@ -24,17 +24,24 @@
all() ->
[
- {group, with_batch},
- {group, without_batch}
+ {group, async},
+ {group, sync}
].
groups() ->
TCs = emqx_common_test_helpers:all(?MODULE),
+ BatchingGroups = [{group, with_batch}, {group, without_batch}],
[
+ {async, BatchingGroups},
+ {sync, BatchingGroups},
{with_batch, TCs},
{without_batch, TCs}
].
+init_per_group(async, Config) ->
+ [{query_mode, async} | Config];
+init_per_group(sync, Config) ->
+ [{query_mode, sync} | Config];
init_per_group(with_batch, Config0) ->
Config = [{batch_size, ?BATCH_SIZE} | Config0],
common_init(Config);
@@ -84,7 +91,6 @@ common_init(ConfigT) ->
Config0 = [
{host, Host},
{port, Port},
- {query_mode, sync},
{proxy_name, "rocketmq"}
| ConfigT
],
diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_tdengine_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_tdengine_SUITE.erl
index 3b580ec61..c956a93c6 100644
--- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_tdengine_SUITE.erl
+++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_tdengine_SUITE.erl
@@ -46,18 +46,25 @@
all() ->
[
- {group, with_batch},
- {group, without_batch}
+ {group, async},
+ {group, sync}
].
groups() ->
TCs = emqx_common_test_helpers:all(?MODULE),
NonBatchCases = [t_write_timeout],
+ BatchingGroups = [{group, with_batch}, {group, without_batch}],
[
+ {async, BatchingGroups},
+ {sync, BatchingGroups},
{with_batch, TCs -- NonBatchCases},
{without_batch, TCs}
].
+init_per_group(async, Config) ->
+ [{query_mode, async} | Config];
+init_per_group(sync, Config) ->
+ [{query_mode, sync} | Config];
init_per_group(with_batch, Config0) ->
Config = [{enable_batch, true} | Config0],
common_init(Config);
@@ -87,6 +94,7 @@ end_per_suite(_Config) ->
init_per_testcase(_Testcase, Config) ->
connect_and_clear_table(Config),
delete_bridge(Config),
+ snabbkaffe:start_trace(),
Config.
end_per_testcase(_Testcase, Config) ->
@@ -109,7 +117,6 @@ common_init(ConfigT) ->
Config0 = [
{td_host, Host},
{td_port, Port},
- {query_mode, sync},
{proxy_name, "tdengine_restful"}
| ConfigT
],
@@ -194,9 +201,13 @@ parse_and_check(ConfigString, BridgeType, Name) ->
Config.
create_bridge(Config) ->
+ create_bridge(Config, _Overrides = #{}).
+
+create_bridge(Config, Overrides) ->
BridgeType = ?config(tdengine_bridge_type, Config),
Name = ?config(tdengine_name, Config),
- TDConfig = ?config(tdengine_config, Config),
+ TDConfig0 = ?config(tdengine_config, Config),
+ TDConfig = emqx_map_lib:deep_merge(TDConfig0, Overrides),
emqx_bridge:create(BridgeType, Name, TDConfig).
delete_bridge(Config) ->
@@ -224,6 +235,27 @@ query_resource(Config, Request) ->
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
emqx_resource:query(ResourceID, Request, #{timeout => 1_000}).
+query_resource_async(Config, Request) ->
+ Name = ?config(tdengine_name, Config),
+ BridgeType = ?config(tdengine_bridge_type, Config),
+ Ref = alias([reply]),
+ AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end,
+ ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
+ Return = emqx_resource:query(ResourceID, Request, #{
+ timeout => 500, async_reply_fun => {AsyncReplyFun, []}
+ }),
+ {Return, Ref}.
+
+receive_result(Ref, Timeout) ->
+ receive
+ {result, Ref, Result} ->
+ {ok, Result};
+ {Ref, Result} ->
+ {ok, Result}
+ after Timeout ->
+ timeout
+ end.
+
connect_direct_tdengine(Config) ->
Opts = [
{host, to_bin(?config(td_host, Config))},
@@ -273,12 +305,14 @@ t_setup_via_config_and_publish(Config) ->
SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000},
?check_trace(
begin
- ?wait_async_action(
- ?assertMatch(
- {ok, #{<<"code">> := 0, <<"rows">> := 1}}, send_message(Config, SentData)
+ {_, {ok, #{result := Result}}} =
+ ?wait_async_action(
+ send_message(Config, SentData),
+ #{?snk_kind := buffer_worker_flush_ack},
+ 2_000
),
- #{?snk_kind := tdengine_connector_query_return},
- 10_000
+ ?assertMatch(
+ {ok, #{<<"code">> := 0, <<"rows">> := 1}}, Result
),
?assertMatch(
?PAYLOAD,
@@ -297,24 +331,32 @@ t_setup_via_config_and_publish(Config) ->
t_setup_via_http_api_and_publish(Config) ->
BridgeType = ?config(tdengine_bridge_type, Config),
Name = ?config(tdengine_name, Config),
- PgsqlConfig0 = ?config(tdengine_config, Config),
- PgsqlConfig = PgsqlConfig0#{
+ QueryMode = ?config(query_mode, Config),
+ TDengineConfig0 = ?config(tdengine_config, Config),
+ TDengineConfig = TDengineConfig0#{
<<"name">> => Name,
<<"type">> => BridgeType
},
?assertMatch(
{ok, _},
- create_bridge_http(PgsqlConfig)
+ create_bridge_http(TDengineConfig)
),
SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000},
?check_trace(
begin
- ?wait_async_action(
- ?assertMatch(
- {ok, #{<<"code">> := 0, <<"rows">> := 1}}, send_message(Config, SentData)
- ),
- #{?snk_kind := tdengine_connector_query_return},
- 10_000
+ Request = {send_message, SentData},
+ Res0 =
+ case QueryMode of
+ sync ->
+ query_resource(Config, Request);
+ async ->
+ {_, Ref} = query_resource_async(Config, Request),
+ {ok, Res} = receive_result(Ref, 2_000),
+ Res
+ end,
+
+ ?assertMatch(
+ {ok, #{<<"code">> := 0, <<"rows">> := 1}}, Res0
),
?assertMatch(
?PAYLOAD,
@@ -359,7 +401,14 @@ t_write_failure(Config) ->
{ok, _} = create_bridge(Config),
SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000},
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
- ?assertMatch({error, econnrefused}, send_message(Config, SentData))
+ {_, {ok, #{result := Result}}} =
+ ?wait_async_action(
+ send_message(Config, SentData),
+ #{?snk_kind := buffer_worker_flush_ack},
+ 2_000
+ ),
+ ?assertMatch({error, econnrefused}, Result),
+ ok
end),
ok.
@@ -369,24 +418,50 @@ t_write_timeout(Config) ->
ProxyName = ?config(proxy_name, Config),
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
- {ok, _} = create_bridge(Config),
+ QueryMode = ?config(query_mode, Config),
+ {ok, _} = create_bridge(
+ Config,
+ #{
+ <<"resource_opts">> => #{
+ <<"request_timeout">> => 500,
+ <<"resume_interval">> => 100,
+ <<"health_check_interval">> => 100
+ }
+ }
+ ),
SentData = #{payload => ?PAYLOAD, timestamp => 1668602148000},
- emqx_common_test_helpers:with_failure(timeout, ProxyName, ProxyHost, ProxyPort, fun() ->
- ?assertMatch(
- {error, {resource_error, #{reason := timeout}}},
- query_resource(Config, {send_message, SentData})
- )
- end),
+ %% FIXME: TDengine connector hangs indefinetily during
+ %% `call_query' while the connection is unresponsive. Should add
+ %% a timeout to `APPLY_RESOURCE' in buffer worker??
+ case QueryMode of
+ sync ->
+ emqx_common_test_helpers:with_failure(
+ timeout, ProxyName, ProxyHost, ProxyPort, fun() ->
+ ?assertMatch(
+ {error, {resource_error, #{reason := timeout}}},
+ query_resource(Config, {send_message, SentData})
+ )
+ end
+ );
+ async ->
+ ct:comment("tdengine connector hangs the buffer worker forever")
+ end,
ok.
t_simple_sql_query(Config) ->
+ EnableBatch = ?config(enable_batch, Config),
?assertMatch(
{ok, _},
create_bridge(Config)
),
Request = {query, <<"SELECT count(1) AS T">>},
- Result = query_resource(Config, Request),
- case ?config(enable_batch, Config) of
+ {_, {ok, #{result := Result}}} =
+ ?wait_async_action(
+ query_resource(Config, Request),
+ #{?snk_kind := buffer_worker_flush_ack},
+ 2_000
+ ),
+ case EnableBatch of
true ->
?assertEqual({error, {unrecoverable_error, batch_prepare_not_implemented}}, Result);
false ->
@@ -399,7 +474,12 @@ t_missing_data(Config) ->
{ok, _},
create_bridge(Config)
),
- Result = send_message(Config, #{}),
+ {_, {ok, #{result := Result}}} =
+ ?wait_async_action(
+ send_message(Config, #{}),
+ #{?snk_kind := buffer_worker_flush_ack},
+ 2_000
+ ),
?assertMatch(
{error, #{
<<"code">> := 534,
@@ -410,13 +490,19 @@ t_missing_data(Config) ->
ok.
t_bad_sql_parameter(Config) ->
+ EnableBatch = ?config(enable_batch, Config),
?assertMatch(
{ok, _},
create_bridge(Config)
),
Request = {sql, <<"">>, [bad_parameter]},
- Result = query_resource(Config, Request),
- case ?config(enable_batch, Config) of
+ {_, {ok, #{result := Result}}} =
+ ?wait_async_action(
+ query_resource(Config, Request),
+ #{?snk_kind := buffer_worker_flush_ack},
+ 2_000
+ ),
+ case EnableBatch of
true ->
?assertEqual({error, {unrecoverable_error, invalid_request}}, Result);
false ->
@@ -443,9 +529,15 @@ t_nasty_sql_string(Config) ->
% [1]: https://github.com/taosdata/TDengine/blob/066cb34a/source/libs/parser/src/parUtil.c#L279-L301
Payload = list_to_binary(lists:seq(1, 127)),
Message = #{payload => Payload, timestamp => erlang:system_time(millisecond)},
+ {_, {ok, #{result := Result}}} =
+ ?wait_async_action(
+ send_message(Config, Message),
+ #{?snk_kind := buffer_worker_flush_ack},
+ 2_000
+ ),
?assertMatch(
{ok, #{<<"code">> := 0, <<"rows">> := 1}},
- send_message(Config, Message)
+ Result
),
?assertEqual(
Payload,
diff --git a/lib-ee/emqx_ee_connector/rebar.config b/lib-ee/emqx_ee_connector/rebar.config
index d49ce59c0..e754bd573 100644
--- a/lib-ee/emqx_ee_connector/rebar.config
+++ b/lib-ee/emqx_ee_connector/rebar.config
@@ -2,7 +2,7 @@
{deps, [
{hstreamdb_erl, {git, "https://github.com/hstreamdb/hstreamdb_erl.git", {tag, "0.2.5"}}},
{influxdb, {git, "https://github.com/emqx/influxdb-client-erl", {tag, "1.1.9"}}},
- {tdengine, {git, "https://github.com/emqx/tdengine-client-erl", {tag, "0.1.5"}}},
+ {tdengine, {git, "https://github.com/emqx/tdengine-client-erl", {tag, "0.1.6"}}},
{clickhouse, {git, "https://github.com/emqx/clickhouse-client-erl", {tag, "0.3"}}},
{erlcloud, {git, "https://github.com/emqx/erlcloud.git", {tag,"3.5.16-emqx-1"}}},
{rocketmq, {git, "https://github.com/emqx/rocketmq-client-erl.git", {tag, "v0.5.1"}}},
diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl
index 8df77fbe0..aa03863b0 100644
--- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl
+++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_mongodb.erl
@@ -60,7 +60,9 @@ on_query(InstanceId, {send_message, Message0}, State) ->
collection => emqx_plugin_libs_rule:proc_tmpl(CollectionTemplate, Message0)
},
Message = render_message(PayloadTemplate, Message0),
- emqx_connector_mongo:on_query(InstanceId, {send_message, Message}, NewConnectorState);
+ Res = emqx_connector_mongo:on_query(InstanceId, {send_message, Message}, NewConnectorState),
+ ?tp(mongo_ee_connector_on_query_return, #{result => Res}),
+ Res;
on_query(InstanceId, Request, _State = #{connector_state := ConnectorState}) ->
emqx_connector_mongo:on_query(InstanceId, Request, ConnectorState).
diff --git a/mix.exs b/mix.exs
index 981755b01..483815c3a 100644
--- a/mix.exs
+++ b/mix.exs
@@ -93,7 +93,7 @@ defmodule EMQXUmbrella.MixProject do
github: "ninenines/ranch", ref: "a692f44567034dacf5efcaa24a24183788594eb7", override: true},
# in conflict by grpc and eetcd
{:gpb, "4.19.5", override: true, runtime: false},
- {:hackney, github: "benoitc/hackney", tag: "1.18.1", override: true}
+ {:hackney, github: "emqx/hackney", tag: "1.18.1-1", override: true}
] ++
emqx_apps(profile_info, version) ++
enterprise_deps(profile_info) ++ bcrypt_dep() ++ jq_dep() ++ quicer_dep()
diff --git a/rebar.config b/rebar.config
index b641077ea..50a8124be 100644
--- a/rebar.config
+++ b/rebar.config
@@ -80,7 +80,7 @@
, {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}}
, {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}}
, {telemetry, "1.1.0"}
- , {hackney, {git, "https://github.com/benoitc/hackney", {tag, "1.18.1"}}}
+ , {hackney, {git, "https://github.com/emqx/hackney.git", {tag, "1.18.1-1"}}}
]}.
{xref_ignores,
diff --git a/apps/emqx_authn/i18n/emqx_authn_api_i18n.conf b/rel/i18n/emqx_authn_api.hocon
similarity index 100%
rename from apps/emqx_authn/i18n/emqx_authn_api_i18n.conf
rename to rel/i18n/emqx_authn_api.hocon
diff --git a/apps/emqx_authn/i18n/emqx_authn_http_i18n.conf b/rel/i18n/emqx_authn_http.hocon
similarity index 100%
rename from apps/emqx_authn/i18n/emqx_authn_http_i18n.conf
rename to rel/i18n/emqx_authn_http.hocon
diff --git a/apps/emqx_authn/i18n/emqx_authn_jwt_i18n.conf b/rel/i18n/emqx_authn_jwt.hocon
similarity index 100%
rename from apps/emqx_authn/i18n/emqx_authn_jwt_i18n.conf
rename to rel/i18n/emqx_authn_jwt.hocon
diff --git a/apps/emqx_authn/i18n/emqx_authn_mnesia_i18n.conf b/rel/i18n/emqx_authn_mnesia.hocon
similarity index 100%
rename from apps/emqx_authn/i18n/emqx_authn_mnesia_i18n.conf
rename to rel/i18n/emqx_authn_mnesia.hocon
diff --git a/apps/emqx_authn/i18n/emqx_authn_mongodb_i18n.conf b/rel/i18n/emqx_authn_mongodb.hocon
similarity index 100%
rename from apps/emqx_authn/i18n/emqx_authn_mongodb_i18n.conf
rename to rel/i18n/emqx_authn_mongodb.hocon
diff --git a/apps/emqx_authn/i18n/emqx_authn_mysql_i18n.conf b/rel/i18n/emqx_authn_mysql.hocon
similarity index 100%
rename from apps/emqx_authn/i18n/emqx_authn_mysql_i18n.conf
rename to rel/i18n/emqx_authn_mysql.hocon
diff --git a/apps/emqx_authn/i18n/emqx_authn_pgsql_i18n.conf b/rel/i18n/emqx_authn_pgsql.hocon
similarity index 100%
rename from apps/emqx_authn/i18n/emqx_authn_pgsql_i18n.conf
rename to rel/i18n/emqx_authn_pgsql.hocon
diff --git a/apps/emqx_authn/i18n/emqx_authn_redis_i18n.conf b/rel/i18n/emqx_authn_redis.hocon
similarity index 100%
rename from apps/emqx_authn/i18n/emqx_authn_redis_i18n.conf
rename to rel/i18n/emqx_authn_redis.hocon
diff --git a/apps/emqx_authn/i18n/emqx_authn_schema_i18n.conf b/rel/i18n/emqx_authn_schema.hocon
similarity index 100%
rename from apps/emqx_authn/i18n/emqx_authn_schema_i18n.conf
rename to rel/i18n/emqx_authn_schema.hocon
diff --git a/apps/emqx_authn/i18n/emqx_authn_user_import_api_i18n.conf b/rel/i18n/emqx_authn_user_import_api.hocon
similarity index 100%
rename from apps/emqx_authn/i18n/emqx_authn_user_import_api_i18n.conf
rename to rel/i18n/emqx_authn_user_import_api.hocon
diff --git a/apps/emqx_authz/i18n/emqx_authz_api_cache_i18n.conf b/rel/i18n/emqx_authz_api_cache.hocon
similarity index 100%
rename from apps/emqx_authz/i18n/emqx_authz_api_cache_i18n.conf
rename to rel/i18n/emqx_authz_api_cache.hocon
diff --git a/apps/emqx_authz/i18n/emqx_authz_api_mnesia_i18n.conf b/rel/i18n/emqx_authz_api_mnesia.hocon
similarity index 100%
rename from apps/emqx_authz/i18n/emqx_authz_api_mnesia_i18n.conf
rename to rel/i18n/emqx_authz_api_mnesia.hocon
diff --git a/apps/emqx_authz/i18n/emqx_authz_api_schema_i18n.conf b/rel/i18n/emqx_authz_api_schema.hocon
similarity index 100%
rename from apps/emqx_authz/i18n/emqx_authz_api_schema_i18n.conf
rename to rel/i18n/emqx_authz_api_schema.hocon
diff --git a/apps/emqx_authz/i18n/emqx_authz_api_settings_i18n.conf b/rel/i18n/emqx_authz_api_settings.hocon
similarity index 100%
rename from apps/emqx_authz/i18n/emqx_authz_api_settings_i18n.conf
rename to rel/i18n/emqx_authz_api_settings.hocon
diff --git a/apps/emqx_authz/i18n/emqx_authz_api_sources_i18n.conf b/rel/i18n/emqx_authz_api_sources.hocon
similarity index 100%
rename from apps/emqx_authz/i18n/emqx_authz_api_sources_i18n.conf
rename to rel/i18n/emqx_authz_api_sources.hocon
diff --git a/apps/emqx_authz/i18n/emqx_authz_schema_i18n.conf b/rel/i18n/emqx_authz_schema.hocon
similarity index 100%
rename from apps/emqx_authz/i18n/emqx_authz_schema_i18n.conf
rename to rel/i18n/emqx_authz_schema.hocon
diff --git a/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_api_i18n.conf b/rel/i18n/emqx_auto_subscribe_api.hocon
similarity index 100%
rename from apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_api_i18n.conf
rename to rel/i18n/emqx_auto_subscribe_api.hocon
diff --git a/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_i18n.conf b/rel/i18n/emqx_auto_subscribe_schema.hocon
similarity index 100%
rename from apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_i18n.conf
rename to rel/i18n/emqx_auto_subscribe_schema.hocon
diff --git a/apps/emqx_bridge/i18n/emqx_bridge_api.conf b/rel/i18n/emqx_bridge_api.hocon
similarity index 100%
rename from apps/emqx_bridge/i18n/emqx_bridge_api.conf
rename to rel/i18n/emqx_bridge_api.hocon
diff --git a/apps/emqx_bridge/i18n/emqx_bridge_mqtt_schema.conf b/rel/i18n/emqx_bridge_mqtt_schema.hocon
similarity index 100%
rename from apps/emqx_bridge/i18n/emqx_bridge_mqtt_schema.conf
rename to rel/i18n/emqx_bridge_mqtt_schema.hocon
diff --git a/apps/emqx_bridge/i18n/emqx_bridge_schema.conf b/rel/i18n/emqx_bridge_schema.hocon
similarity index 100%
rename from apps/emqx_bridge/i18n/emqx_bridge_schema.conf
rename to rel/i18n/emqx_bridge_schema.hocon
diff --git a/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf b/rel/i18n/emqx_bridge_webhook_schema.hocon
similarity index 100%
rename from apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf
rename to rel/i18n/emqx_bridge_webhook_schema.hocon
diff --git a/apps/emqx_coap/i18n/emqx_coap_api_i18n.conf b/rel/i18n/emqx_coap_api.hocon
similarity index 100%
rename from apps/emqx_coap/i18n/emqx_coap_api_i18n.conf
rename to rel/i18n/emqx_coap_api.hocon
diff --git a/apps/emqx_coap/i18n/emqx_coap_schema.conf b/rel/i18n/emqx_coap_schema.hocon
similarity index 100%
rename from apps/emqx_coap/i18n/emqx_coap_schema.conf
rename to rel/i18n/emqx_coap_schema.hocon
diff --git a/apps/emqx_conf/i18n/emqx_conf_schema.conf b/rel/i18n/emqx_conf_schema.hocon
similarity index 100%
rename from apps/emqx_conf/i18n/emqx_conf_schema.conf
rename to rel/i18n/emqx_conf_schema.hocon
diff --git a/apps/emqx_connector/i18n/emqx_connector_api.conf b/rel/i18n/emqx_connector_api.hocon
similarity index 100%
rename from apps/emqx_connector/i18n/emqx_connector_api.conf
rename to rel/i18n/emqx_connector_api.hocon
diff --git a/apps/emqx_connector/i18n/emqx_connector_http.conf b/rel/i18n/emqx_connector_http.hocon
similarity index 100%
rename from apps/emqx_connector/i18n/emqx_connector_http.conf
rename to rel/i18n/emqx_connector_http.hocon
diff --git a/apps/emqx_connector/i18n/emqx_connector_ldap.conf b/rel/i18n/emqx_connector_ldap.hocon
similarity index 100%
rename from apps/emqx_connector/i18n/emqx_connector_ldap.conf
rename to rel/i18n/emqx_connector_ldap.hocon
diff --git a/apps/emqx_connector/i18n/emqx_connector_mongo.conf b/rel/i18n/emqx_connector_mongo.hocon
similarity index 100%
rename from apps/emqx_connector/i18n/emqx_connector_mongo.conf
rename to rel/i18n/emqx_connector_mongo.hocon
diff --git a/apps/emqx_connector/i18n/emqx_connector_mqtt.conf b/rel/i18n/emqx_connector_mqtt.hocon
similarity index 100%
rename from apps/emqx_connector/i18n/emqx_connector_mqtt.conf
rename to rel/i18n/emqx_connector_mqtt.hocon
diff --git a/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf b/rel/i18n/emqx_connector_mqtt_schema.hocon
similarity index 100%
rename from apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf
rename to rel/i18n/emqx_connector_mqtt_schema.hocon
diff --git a/apps/emqx_connector/i18n/emqx_connector_mysql.conf b/rel/i18n/emqx_connector_mysql.hocon
similarity index 100%
rename from apps/emqx_connector/i18n/emqx_connector_mysql.conf
rename to rel/i18n/emqx_connector_mysql.hocon
diff --git a/apps/emqx_connector/i18n/emqx_connector_pgsql.conf b/rel/i18n/emqx_connector_pgsql.hocon
similarity index 100%
rename from apps/emqx_connector/i18n/emqx_connector_pgsql.conf
rename to rel/i18n/emqx_connector_pgsql.hocon
diff --git a/apps/emqx_connector/i18n/emqx_connector_redis.conf b/rel/i18n/emqx_connector_redis.hocon
similarity index 100%
rename from apps/emqx_connector/i18n/emqx_connector_redis.conf
rename to rel/i18n/emqx_connector_redis.hocon
diff --git a/apps/emqx_connector/i18n/emqx_connector_schema_lib.conf b/rel/i18n/emqx_connector_schema_lib.hocon
similarity index 100%
rename from apps/emqx_connector/i18n/emqx_connector_schema_lib.conf
rename to rel/i18n/emqx_connector_schema_lib.hocon
diff --git a/apps/emqx_dashboard/i18n/emqx_dashboard_api_i18n.conf b/rel/i18n/emqx_dashboard_api.hocon
similarity index 100%
rename from apps/emqx_dashboard/i18n/emqx_dashboard_api_i18n.conf
rename to rel/i18n/emqx_dashboard_api.hocon
diff --git a/apps/emqx_dashboard/i18n/emqx_dashboard_i18n.conf b/rel/i18n/emqx_dashboard_schema.hocon
similarity index 100%
rename from apps/emqx_dashboard/i18n/emqx_dashboard_i18n.conf
rename to rel/i18n/emqx_dashboard_schema.hocon
diff --git a/apps/emqx_modules/i18n/emqx_delayed_api_i18n.conf b/rel/i18n/emqx_delayed_api.hocon
similarity index 100%
rename from apps/emqx_modules/i18n/emqx_delayed_api_i18n.conf
rename to rel/i18n/emqx_delayed_api.hocon
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_cassa.conf b/rel/i18n/emqx_ee_bridge_cassa.hocon
similarity index 100%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_cassa.conf
rename to rel/i18n/emqx_ee_bridge_cassa.hocon
diff --git a/rel/i18n/emqx_ee_bridge_clickhouse.hocon b/rel/i18n/emqx_ee_bridge_clickhouse.hocon
new file mode 100644
index 000000000..b54f4dc70
--- /dev/null
+++ b/rel/i18n/emqx_ee_bridge_clickhouse.hocon
@@ -0,0 +1,81 @@
+emqx_ee_bridge_clickhouse {
+
+ local_topic {
+ desc {
+ en: """The MQTT topic filter to be forwarded to Clickhouse. All MQTT 'PUBLISH' messages with the topic
+matching the local_topic will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is
+configured, then both the data got from the rule and the MQTT messages that match local_topic
+will be forwarded."""
+ zh: """发送到 'local_topic' 的消息都会转发到 Clickhouse。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。"""
+ }
+ label {
+ en: "Local Topic"
+ zh: "本地 Topic"
+ }
+ }
+ sql_template {
+ desc {
+ en: """The template string can contain ${field} placeholders for message metadata and payload field. Make sure that the inserted values are formatted and escaped correctly. [Prepared Statement](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridges.html#Prepared-Statement) is not supported."""
+ zh: """可以使用 ${field} 占位符来引用消息与客户端上下文中的变量,请确保对应字段存在且数据格式符合预期。此处不支持 [SQL 预处理](https://docs.emqx.com/zh/enterprise/v5.0/data-integration/data-bridges.html#sql-预处理)。"""
+ }
+ label {
+ en: "SQL Template"
+ zh: "SQL 模板"
+ }
+ }
+ batch_value_separator {
+ desc {
+ en: """The default value ',' works for the VALUES format. You can also use other separator if other format is specified. See [INSERT INTO Statement](https://clickhouse.com/docs/en/sql-reference/statements/insert-into)."""
+ zh: """默认为逗号 ',',适用于 VALUE 格式。您也可以使用其他分隔符, 请参考 [INSERT INTO 语句](https://clickhouse.com/docs/en/sql-reference/statements/insert-into)。"""
+ }
+ label {
+ en: "Batch Value Separator"
+ zh: "分隔符"
+ }
+ }
+ config_enable {
+ desc {
+ en: """Enable or disable this bridge"""
+ zh: """启用/禁用桥接"""
+ }
+ label {
+ en: "Enable Or Disable Bridge"
+ zh: "启用/禁用桥接"
+ }
+ }
+
+ desc_config {
+ desc {
+ en: """Configuration for a Clickhouse bridge."""
+ zh: """Clickhouse 桥接配置"""
+ }
+ label: {
+ en: "Clickhouse Bridge Configuration"
+ zh: "Clickhouse 桥接配置"
+ }
+ }
+
+ desc_type {
+ desc {
+ en: """The Bridge Type"""
+ zh: """Bridge 类型"""
+ }
+ label {
+ en: "Bridge Type"
+ zh: "桥接类型"
+ }
+ }
+
+ desc_name {
+ desc {
+ en: """Bridge name."""
+ zh: """桥接名字"""
+ }
+ label {
+ en: "Bridge Name"
+ zh: "桥接名字"
+ }
+ }
+}
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_dynamo.conf b/rel/i18n/emqx_ee_bridge_dynamo.hocon
similarity index 100%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_dynamo.conf
rename to rel/i18n/emqx_ee_bridge_dynamo.hocon
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_gcp_pubsub.conf b/rel/i18n/emqx_ee_bridge_gcp_pubsub.hocon
similarity index 100%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_gcp_pubsub.conf
rename to rel/i18n/emqx_ee_bridge_gcp_pubsub.hocon
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_hstreamdb.conf b/rel/i18n/emqx_ee_bridge_hstreamdb.hocon
similarity index 100%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_hstreamdb.conf
rename to rel/i18n/emqx_ee_bridge_hstreamdb.hocon
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_influxdb.conf b/rel/i18n/emqx_ee_bridge_influxdb.hocon
similarity index 100%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_influxdb.conf
rename to rel/i18n/emqx_ee_bridge_influxdb.hocon
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf b/rel/i18n/emqx_ee_bridge_kafka.hocon
similarity index 99%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf
rename to rel/i18n/emqx_ee_bridge_kafka.hocon
index df32c1cae..d1a017416 100644
--- a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf
+++ b/rel/i18n/emqx_ee_bridge_kafka.hocon
@@ -547,7 +547,7 @@ emqx_ee_bridge_kafka {
"ts
: message timestamp.\n"
"ts_type
: message timestamp type, which is one of"
" create
, append
or undefined
.\n"
- "value
: Kafka message value (uses the chosen value encoding).\n"
+ "value
: Kafka message value (uses the chosen value encoding)."
zh: "用于转换收到的 Kafka 消息的模板。 "
"默认情况下,它将使用 JSON 格式来序列化来自 Kafka 的所有字段。 "
"这些字段包括:"
@@ -558,7 +558,7 @@ emqx_ee_bridge_kafka {
"ts
: 消息的时间戳。\n"
"ts_type
:消息的时间戳类型,值可能是:"
" create
, append
或 undefined
。\n"
- "value
: Kafka 消息值(使用选择的编码方式编码)。\n"
+ "value
: Kafka 消息值(使用选择的编码方式编码)。"
}
label {
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mongodb.conf b/rel/i18n/emqx_ee_bridge_mongodb.hocon
similarity index 100%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mongodb.conf
rename to rel/i18n/emqx_ee_bridge_mongodb.hocon
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mysql.conf b/rel/i18n/emqx_ee_bridge_mysql.hocon
similarity index 100%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mysql.conf
rename to rel/i18n/emqx_ee_bridge_mysql.hocon
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_pgsql.conf b/rel/i18n/emqx_ee_bridge_pgsql.hocon
similarity index 100%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_pgsql.conf
rename to rel/i18n/emqx_ee_bridge_pgsql.hocon
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_redis.conf b/rel/i18n/emqx_ee_bridge_redis.hocon
similarity index 100%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_redis.conf
rename to rel/i18n/emqx_ee_bridge_redis.hocon
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_rocketmq.conf b/rel/i18n/emqx_ee_bridge_rocketmq.hocon
similarity index 100%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_rocketmq.conf
rename to rel/i18n/emqx_ee_bridge_rocketmq.hocon
diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_tdengine.conf b/rel/i18n/emqx_ee_bridge_tdengine.hocon
similarity index 96%
rename from lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_tdengine.conf
rename to rel/i18n/emqx_ee_bridge_tdengine.hocon
index 2d5af9f16..21fc013df 100644
--- a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_tdengine.conf
+++ b/rel/i18n/emqx_ee_bridge_tdengine.hocon
@@ -6,11 +6,9 @@ emqx_ee_bridge_tdengine {
matching the local_topic will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is
configured, then both the data got from the rule and the MQTT messages that match local_topic
-will be forwarded.
-"""
+will be forwarded."""
zh: """发送到 'local_topic' 的消息都会转发到 TDengine。
-注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。
-"""
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。"""
}
label {
en: "Local Topic"
diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_cassa.conf b/rel/i18n/emqx_ee_connector_cassa.hocon
similarity index 100%
rename from lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_cassa.conf
rename to rel/i18n/emqx_ee_connector_cassa.hocon
diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_clickhouse.conf b/rel/i18n/emqx_ee_connector_clickhouse.hocon
similarity index 86%
rename from lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_clickhouse.conf
rename to rel/i18n/emqx_ee_connector_clickhouse.hocon
index 533d100bf..4d30e1715 100644
--- a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_clickhouse.conf
+++ b/rel/i18n/emqx_ee_connector_clickhouse.hocon
@@ -1,4 +1,3 @@
-
emqx_ee_connector_clickhouse {
base_url {
@@ -7,8 +6,8 @@ emqx_ee_connector_clickhouse {
zh: """你想连接到的Clickhouse服务器的HTTP URL(例如http://myhostname:8123)。"""
}
label: {
- en: "URL to clickhouse server"
- zh: "到clickhouse服务器的URL"
+ en: "Server URL"
+ zh: "服务器 URL"
}
}
diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_dynamo.conf b/rel/i18n/emqx_ee_connector_dynamo.hocon
similarity index 63%
rename from lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_dynamo.conf
rename to rel/i18n/emqx_ee_connector_dynamo.hocon
index e1fc11e03..295929a72 100644
--- a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_dynamo.conf
+++ b/rel/i18n/emqx_ee_connector_dynamo.hocon
@@ -2,8 +2,8 @@ emqx_ee_connector_dynamo {
url {
desc {
- en: """The url of DynamoDB endpoint.
"""
- zh: """DynamoDB 的地址。
"""
+ en: """The url of DynamoDB endpoint."""
+ zh: """DynamoDB 的地址。"""
}
label: {
en: "DynamoDB Endpoint"
diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_hstreamdb.conf b/rel/i18n/emqx_ee_connector_hstreamdb.hocon
similarity index 100%
rename from lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_hstreamdb.conf
rename to rel/i18n/emqx_ee_connector_hstreamdb.hocon
diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_influxdb.conf b/rel/i18n/emqx_ee_connector_influxdb.hocon
similarity index 100%
rename from lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_influxdb.conf
rename to rel/i18n/emqx_ee_connector_influxdb.hocon
diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_rocketmq.conf b/rel/i18n/emqx_ee_connector_rocketmq.hocon
similarity index 89%
rename from lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_rocketmq.conf
rename to rel/i18n/emqx_ee_connector_rocketmq.hocon
index d4a610212..44dda7931 100644
--- a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_rocketmq.conf
+++ b/rel/i18n/emqx_ee_connector_rocketmq.hocon
@@ -2,16 +2,12 @@ emqx_ee_connector_rocketmq {
server {
desc {
- en: """
-The IPv4 or IPv6 address or the hostname to connect to.
+ en: """The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
-The RocketMQ default port 9876 is used if `[:Port]` is not specified.
-"""
- zh: """
-将要连接的 IPv4 或 IPv6 地址,或者主机名。
+The RocketMQ default port 9876 is used if `[:Port]` is not specified."""
+ zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 RocketMQ 默认端口 9876。
-"""
+如果未指定 `[:Port]`,则使用 RocketMQ 默认端口 9876。"""
}
label: {
en: "Server Host"
diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_tdengine.conf b/rel/i18n/emqx_ee_connector_tdengine.hocon
similarity index 69%
rename from lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_tdengine.conf
rename to rel/i18n/emqx_ee_connector_tdengine.hocon
index c6c58d82d..02254124c 100644
--- a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_tdengine.conf
+++ b/rel/i18n/emqx_ee_connector_tdengine.hocon
@@ -2,16 +2,12 @@ emqx_ee_connector_tdengine {
server {
desc {
- en: """
-The IPv4 or IPv6 address or the hostname to connect to.
+ en: """The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
-The TDengine default port 6041 is used if `[:Port]` is not specified.
-"""
- zh: """
-将要连接的 IPv4 或 IPv6 地址,或者主机名。
+The TDengine default port 6041 is used if `[:Port]` is not specified."""
+ zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
主机名具有以下形式:`Host[:Port]`。
-如果未指定 `[:Port]`,则使用 TDengine 默认端口 6041。
-"""
+如果未指定 `[:Port]`,则使用 TDengine 默认端口 6041。"""
}
label: {
en: "Server Host"
diff --git a/apps/emqx_exhook/i18n/emqx_exhook_api_i18n.conf b/rel/i18n/emqx_exhook_api.hocon
similarity index 100%
rename from apps/emqx_exhook/i18n/emqx_exhook_api_i18n.conf
rename to rel/i18n/emqx_exhook_api.hocon
diff --git a/apps/emqx_exhook/i18n/emqx_exhook_i18n.conf b/rel/i18n/emqx_exhook_schema.hocon
similarity index 100%
rename from apps/emqx_exhook/i18n/emqx_exhook_i18n.conf
rename to rel/i18n/emqx_exhook_schema.hocon
diff --git a/apps/emqx_exproto/i18n/emqx_exproto_schema.conf b/rel/i18n/emqx_exproto_schema.hocon
similarity index 100%
rename from apps/emqx_exproto/i18n/emqx_exproto_schema.conf
rename to rel/i18n/emqx_exproto_schema.hocon
diff --git a/apps/emqx_gateway/i18n/emqx_gateway_api_i18n.conf b/rel/i18n/emqx_gateway_api.hocon
similarity index 100%
rename from apps/emqx_gateway/i18n/emqx_gateway_api_i18n.conf
rename to rel/i18n/emqx_gateway_api.hocon
diff --git a/apps/emqx_gateway/i18n/emqx_gateway_api_authn_i18n.conf b/rel/i18n/emqx_gateway_api_authn.hocon
similarity index 100%
rename from apps/emqx_gateway/i18n/emqx_gateway_api_authn_i18n.conf
rename to rel/i18n/emqx_gateway_api_authn.hocon
diff --git a/apps/emqx_gateway/i18n/emqx_gateway_api_clients_i18n.conf b/rel/i18n/emqx_gateway_api_clients.hocon
similarity index 100%
rename from apps/emqx_gateway/i18n/emqx_gateway_api_clients_i18n.conf
rename to rel/i18n/emqx_gateway_api_clients.hocon
diff --git a/apps/emqx_gateway/i18n/emqx_gateway_api_listeners_i18n.conf b/rel/i18n/emqx_gateway_api_listeners.hocon
similarity index 100%
rename from apps/emqx_gateway/i18n/emqx_gateway_api_listeners_i18n.conf
rename to rel/i18n/emqx_gateway_api_listeners.hocon
diff --git a/apps/emqx_gateway/i18n/emqx_gateway_schema_i18n.conf b/rel/i18n/emqx_gateway_schema.hocon
similarity index 94%
rename from apps/emqx_gateway/i18n/emqx_gateway_schema_i18n.conf
rename to rel/i18n/emqx_gateway_schema.hocon
index 1ffc5c6c1..fc34ef0a8 100644
--- a/apps/emqx_gateway/i18n/emqx_gateway_schema_i18n.conf
+++ b/rel/i18n/emqx_gateway_schema.hocon
@@ -25,13 +25,6 @@ emqx_gateway_schema {
}
}
- gateway_common_mountpoint {
- desc {
- en: """"""
- zh: """"""
- }
- }
-
gateway_common_clientinfo_override {
desc {
en: """ClientInfo override."""
@@ -86,10 +79,10 @@ emqx_gateway_schema {
}
}
- tcp_listener {
+ listener_name_to_settings_map{
desc {
- en: """"""
- zh: """"""
+ en: """A map from listener names to listener settings."""
+ zh: """从监听器名称到配置参数的映射。"""
}
}
@@ -123,13 +116,6 @@ EMQX will close the TCP connection if proxy protocol packet is not received with
}
}
- ssl_listener {
- desc {
- en: """"""
- zh: """"""
- }
- }
-
ssl_listener_options {
desc {
en: """SSL Socket options."""
@@ -137,13 +123,6 @@ EMQX will close the TCP connection if proxy protocol packet is not received with
}
}
- udp_listener {
- desc {
- en: """"""
- zh: """"""
- }
- }
-
udp_listener_udp_opts {
desc {
en: """Settings for the UDP sockets."""
@@ -188,13 +167,6 @@ See: https://erlang.org/doc/man/inet.html#setopts-2"""
}
}
- dtls_listener {
- desc {
- en: """"""
- zh: """"""
- }
- }
-
dtls_listener_acceptors {
desc {
en: """Size of the acceptor pool."""
@@ -247,7 +219,7 @@ When set to false
clients will be allowed to connect without authen
}
}
- gateway_common_listener_mountpoint {
+ gateway_mountpoint {
desc {
en: """When publishing or subscribing, prefix all topics with a mountpoint string.
The prefixed string will be removed from the topic name when the message is delivered to the subscriber.
diff --git a/lib-ee/emqx_license/i18n/emqx_license_http_api.conf b/rel/i18n/emqx_license_http_api.hocon
similarity index 100%
rename from lib-ee/emqx_license/i18n/emqx_license_http_api.conf
rename to rel/i18n/emqx_license_http_api.hocon
diff --git a/lib-ee/emqx_license/i18n/emqx_license_schema_i18n.conf b/rel/i18n/emqx_license_schema.hocon
similarity index 100%
rename from lib-ee/emqx_license/i18n/emqx_license_schema_i18n.conf
rename to rel/i18n/emqx_license_schema.hocon
diff --git a/apps/emqx/i18n/emqx_limiter_i18n.conf b/rel/i18n/emqx_limiter_schema.hocon
similarity index 100%
rename from apps/emqx/i18n/emqx_limiter_i18n.conf
rename to rel/i18n/emqx_limiter_schema.hocon
diff --git a/apps/emqx_gateway/i18n/emqx_lwm2m_api_i18n.conf b/rel/i18n/emqx_lwm2m_api.hocon
similarity index 100%
rename from apps/emqx_gateway/i18n/emqx_lwm2m_api_i18n.conf
rename to rel/i18n/emqx_lwm2m_api.hocon
diff --git a/apps/emqx_lwm2m/i18n/emqx_lwm2m_schema.conf b/rel/i18n/emqx_lwm2m_schema.hocon
similarity index 100%
rename from apps/emqx_lwm2m/i18n/emqx_lwm2m_schema.conf
rename to rel/i18n/emqx_lwm2m_schema.hocon
diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_alarms_i18n.conf b/rel/i18n/emqx_mgmt_api_alarms.hocon
similarity index 100%
rename from apps/emqx_management/i18n/emqx_mgmt_api_alarms_i18n.conf
rename to rel/i18n/emqx_mgmt_api_alarms.hocon
diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_banned_i18n.conf b/rel/i18n/emqx_mgmt_api_banned.hocon
similarity index 100%
rename from apps/emqx_management/i18n/emqx_mgmt_api_banned_i18n.conf
rename to rel/i18n/emqx_mgmt_api_banned.hocon
diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_key_i18n.conf b/rel/i18n/emqx_mgmt_api_key_schema.hocon
similarity index 100%
rename from apps/emqx_management/i18n/emqx_mgmt_api_key_i18n.conf
rename to rel/i18n/emqx_mgmt_api_key_schema.hocon
diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_publish_i18n.conf b/rel/i18n/emqx_mgmt_api_publish.hocon
similarity index 99%
rename from apps/emqx_management/i18n/emqx_mgmt_api_publish_i18n.conf
rename to rel/i18n/emqx_mgmt_api_publish.hocon
index f91115df5..a09732cfc 100644
--- a/apps/emqx_management/i18n/emqx_mgmt_api_publish_i18n.conf
+++ b/rel/i18n/emqx_mgmt_api_publish.hocon
@@ -1,4 +1,3 @@
-
emqx_mgmt_api_publish {
publish_api {
desc {
diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_status_i18n.conf b/rel/i18n/emqx_mgmt_api_status.hocon
similarity index 100%
rename from apps/emqx_management/i18n/emqx_mgmt_api_status_i18n.conf
rename to rel/i18n/emqx_mgmt_api_status.hocon
diff --git a/apps/emqx_modules/i18n/emqx_modules_schema_i18n.conf b/rel/i18n/emqx_modules_schema.hocon
similarity index 100%
rename from apps/emqx_modules/i18n/emqx_modules_schema_i18n.conf
rename to rel/i18n/emqx_modules_schema.hocon
diff --git a/apps/emqx_mqttsn/i18n/emqx_mqttsn_schema.conf b/rel/i18n/emqx_mqttsn_schema.hocon
similarity index 100%
rename from apps/emqx_mqttsn/i18n/emqx_mqttsn_schema.conf
rename to rel/i18n/emqx_mqttsn_schema.hocon
diff --git a/apps/emqx_plugins/i18n/emqx_plugins_schema.conf b/rel/i18n/emqx_plugins_schema.hocon
similarity index 100%
rename from apps/emqx_plugins/i18n/emqx_plugins_schema.conf
rename to rel/i18n/emqx_plugins_schema.hocon
diff --git a/apps/emqx_prometheus/i18n/emqx_prometheus_schema_i18n.conf b/rel/i18n/emqx_prometheus_schema.hocon
similarity index 100%
rename from apps/emqx_prometheus/i18n/emqx_prometheus_schema_i18n.conf
rename to rel/i18n/emqx_prometheus_schema.hocon
diff --git a/apps/emqx_psk/i18n/emqx_psk_i18n.conf b/rel/i18n/emqx_psk_schema.hocon
similarity index 100%
rename from apps/emqx_psk/i18n/emqx_psk_i18n.conf
rename to rel/i18n/emqx_psk_schema.hocon
diff --git a/apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf b/rel/i18n/emqx_resource_schema.hocon
similarity index 92%
rename from apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf
rename to rel/i18n/emqx_resource_schema.hocon
index 341ba2463..c73f8b1aa 100644
--- a/apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf
+++ b/rel/i18n/emqx_resource_schema.hocon
@@ -100,17 +100,6 @@ For bridges only have ingress direction data flow, it can be set to 0 otherwise
}
}
- query_mode_sync_only {
- desc {
- en: """Query mode. Only support 'sync'."""
- zh: """请求模式。目前只支持同步模式。"""
- }
- label {
- en: """Query mode"""
- zh: """请求模式"""
- }
- }
-
request_timeout {
desc {
en: """Starting from the moment when the request enters the buffer, if the request remains in the buffer for the specified time or is sent but does not receive a response or acknowledgement in time, the request is considered expired."""
@@ -160,11 +149,11 @@ When disabled the messages are buffered in RAM only."""
batch_size {
desc {
en: """Maximum batch count. If equal to 1, there's effectively no batching."""
- zh: """批量请求大小。如果设为1,则无批处理。"""
+ zh: """最大批量请求大小。如果设为1,则无批处理。"""
}
label {
- en: """Batch size"""
- zh: """批量请求大小"""
+ en: """Max batch size"""
+ zh: """最大批量请求大小"""
}
}
@@ -174,7 +163,7 @@ When disabled the messages are buffered in RAM only."""
zh: """在较低消息率情况下尝试累积批量输出时的最大等待间隔,以提高资源的利用率。"""
}
label {
- en: """Max Batch Wait Time"""
+ en: """Max batch wait time"""
zh: """批量等待最大间隔"""
}
}
diff --git a/apps/emqx_retainer/i18n/emqx_retainer_api_i18n.conf b/rel/i18n/emqx_retainer_api.hocon
similarity index 100%
rename from apps/emqx_retainer/i18n/emqx_retainer_api_i18n.conf
rename to rel/i18n/emqx_retainer_api.hocon
diff --git a/apps/emqx_retainer/i18n/emqx_retainer_i18n.conf b/rel/i18n/emqx_retainer_schema.hocon
similarity index 100%
rename from apps/emqx_retainer/i18n/emqx_retainer_i18n.conf
rename to rel/i18n/emqx_retainer_schema.hocon
diff --git a/apps/emqx_modules/i18n/emqx_rewrite_api_i18n.conf b/rel/i18n/emqx_rewrite_api.hocon
similarity index 100%
rename from apps/emqx_modules/i18n/emqx_rewrite_api_i18n.conf
rename to rel/i18n/emqx_rewrite_api.hocon
diff --git a/apps/emqx_rule_engine/i18n/emqx_rule_api_schema.conf b/rel/i18n/emqx_rule_api_schema.hocon
similarity index 99%
rename from apps/emqx_rule_engine/i18n/emqx_rule_api_schema.conf
rename to rel/i18n/emqx_rule_api_schema.hocon
index e4c2314de..f9b344666 100644
--- a/apps/emqx_rule_engine/i18n/emqx_rule_api_schema.conf
+++ b/rel/i18n/emqx_rule_api_schema.hocon
@@ -35,8 +35,8 @@ emqx_rule_api_schema {
event_username {
desc {
- en: "The User Name"
- zh: ""
+ en: "Username"
+ zh: "用户名"
}
label: {
en: "Username"
diff --git a/apps/emqx_rule_engine/i18n/emqx_rule_engine_api.conf b/rel/i18n/emqx_rule_engine_api.hocon
similarity index 100%
rename from apps/emqx_rule_engine/i18n/emqx_rule_engine_api.conf
rename to rel/i18n/emqx_rule_engine_api.hocon
diff --git a/apps/emqx_rule_engine/i18n/emqx_rule_engine_schema.conf b/rel/i18n/emqx_rule_engine_schema.hocon
similarity index 100%
rename from apps/emqx_rule_engine/i18n/emqx_rule_engine_schema.conf
rename to rel/i18n/emqx_rule_engine_schema.hocon
diff --git a/apps/emqx/i18n/emqx_schema_i18n.conf b/rel/i18n/emqx_schema.hocon
similarity index 94%
rename from apps/emqx/i18n/emqx_schema_i18n.conf
rename to rel/i18n/emqx_schema.hocon
index 71a008ca3..d36809c3b 100644
--- a/apps/emqx/i18n/emqx_schema_i18n.conf
+++ b/rel/i18n/emqx_schema.hocon
@@ -666,15 +666,15 @@ mqtt 下所有的配置作为全局的默认值存在,它可以被 zone<
mqtt_idle_timeout {
desc {
- en: """After the TCP connection is established, if the MQTT CONNECT packet from the client is
-not received within the time specified by idle_timeout
, the connection will be disconnected.
-After the CONNECT packet has been accepted by EMQX, if the connection idles for this long time,
-then the Erlang process is put to hibernation to save OS resources. Note: long idle_timeout
-interval may impose risk at the system if large number of malicious clients only establish connections
-but do not send any data."""
- zh: """TCP 连接建立后,如果在 idle_timeout
指定的时间内未收到客户端的 MQTT CONNECT 报文,则连接将被断开。
-如果连接在 CONNECT 报文被 EMQX 接受之后空闲超过该时长,那么服务这个连接的 Erlang 进程会进入休眠以节省系统资源。
-注意,该配置值如果设置过大的情况下,如果大量恶意客户端只连接,但不发任何数据,可能会导致系统资源被恶意消耗。"""
+ en: """Configure the duration of time that a connection can remain idle (i.e., without any data transfer) before being:
+ - Automatically disconnected if no CONNECT package is received from the client yet.
+ - Put into hibernation mode to save resources if some CONNECT packages are already received.
+Note: Please set the parameter with caution as long idle time will lead to resource waste."""
+ zh: """设置连接被断开或进入休眠状态前的等待时间,空闲超时后,
+ - 如暂未收到客户端的 CONNECT 报文,连接将断开;
+ - 如已收到客户端的 CONNECT 报文,连接将进入休眠模式以节省系统资源。
+
+注意:请合理设置该参数值,如等待时间设置过长,可能造成系统资源的浪费。"""
}
label: {
en: """Idle Timeout"""
@@ -783,8 +783,8 @@ but do not send any data."""
mqtt_ignore_loop_deliver {
desc {
- en: """Ignore loop delivery of messages for MQTT v3.1.1/v3.1.0, similar to No Local
subscription option in MQTT 5.0."""
- zh: """是否为 MQTT v3.1.1/v3.1.0 客户端忽略投递自己发布的消息,类似于 MQTT 5.0 中的 No Local
订阅选项。"""
+ en: """Whether the messages sent by the MQTT v3.1.1/v3.1.0 client will be looped back to the publisher itself, similar to No Local
in MQTT 5.0."""
+ zh: """设置由 MQTT v3.1.1/v3.1.0 客户端发布的消息是否将转发给其本身;类似 MQTT 5.0 协议中的 No Local
选项。"""
}
label: {
en: """Ignore Loop Deliver"""
@@ -794,10 +794,10 @@ but do not send any data."""
mqtt_strict_mode {
desc {
- en: """Parse MQTT messages in strict mode.
-When set to true, invalid utf8 strings in for example client ID, topic name, etc. will cause the client to be disconnected"""
+ en: """Whether to parse MQTT messages in strict mode.
+In strict mode, invalid utf8 strings in for example client ID, topic name, etc. will cause the client to be disconnected."""
zh: """是否以严格模式解析 MQTT 消息。
-当设置为 true 时,例如客户端 ID、主题名称等中的无效 utf8 字符串将导致客户端断开连接。"""
+严格模式下,如客户端 ID、主题名称等中包含无效 utf8 字符串,连接将被断开。"""
}
label: {
en: """Strict Mode"""
@@ -807,8 +807,10 @@ When set to true, invalid utf8 strings in for example client ID, topic name, etc
mqtt_response_information {
desc {
- en: """Specify the response information returned to the client. This feature is disabled if is set to \"\". Applies only to clients using MQTT 5.0."""
- zh: """指定返回给客户端的响应信息。如果设置为 \"\",则禁用此功能。仅适用于使用 MQTT 5.0 协议的客户端。"""
+ en: """UTF-8 string, for creating the response topic, for example, if set to reqrsp/
, the publisher/subscriber will communicate using the topic prefix reqrsp/
.
+To disable this feature, input \"\"
in the text box below. Only applicable to MQTT 5.0 clients."""
+ zh: """UTF-8 字符串,用于指定返回给客户端的响应主题,如 reqrsp/
,此时请求和应答客户端都需要使用 reqrsp/
前缀的主题来完成通讯。
+如希望禁用此功能,请在下方的文字框中输入\"\"
;仅适用于 MQTT 5.0 客户端。"""
}
label: {
en: """Response Information"""
@@ -818,23 +820,23 @@ When set to true, invalid utf8 strings in for example client ID, topic name, etc
mqtt_server_keepalive {
desc {
- en: """The keep alive that EMQX requires the client to use. If configured as disabled
, it means that the keep alive specified by the client will be used. Requires Server Keep Alive
in MQTT 5.0, so it is only applicable to clients using MQTT 5.0 protocol."""
- zh: """EMQX 要求客户端使用的保活时间,配置为 disabled
表示将使用客户端指定的保活时间。需要用到 MQTT 5.0 中的 Server Keep Alive
,因此仅适用于使用 MQTT 5.0 协议的客户端。"""
+ en: """The keep alive duration required by EMQX. To use the setting from the client side, choose disabled from the drop-down list. Only applicable to MQTT 5.0 clients."""
+ zh: """EMQX 要求的保活时间,如设为 disabled,则将使用客户端指定的保持连接时间;仅适用于 MQTT 5.0 客户端。"""
}
label: {
en: """Server Keep Alive"""
- zh: """服务端保持连接"""
+ zh: """服务端保活时间"""
}
}
mqtt_keepalive_backoff {
desc {
- en: """The backoff multiplier used by the broker to determine the client keep alive timeout. If EMQX doesn't receive any packet in Keep Alive * Backoff * 2
seconds, EMQX will close the current connection."""
- zh: """Broker 判定客户端保活超时使用的退避乘数。如果 EMQX 在 Keep Alive * Backoff * 2
秒内未收到任何报文,EMQX 将关闭当前连接。"""
+ en: """The coefficient EMQX uses to confirm whether the keep alive duration of the client expires. Formula: Keep Alive * Backoff * 2"""
+ zh: """EMQX 判定客户端保活超时使用的阈值系数。计算公式为:Keep Alive * Backoff * 2"""
}
label: {
en: """Keep Alive Backoff"""
- zh: """保持连接退避乘数"""
+ zh: """保活超时阈值系数"""
}
}
@@ -978,14 +980,14 @@ To configure \"topic/1\" > \"topic/2\"
:
mqtt_use_username_as_clientid {
desc {
- en: """Whether to user Client ID as Username.
-This setting takes effect later than Use Peer Certificate as Username
(peer_cert_as_username
) and Use peer certificate as Client ID
(peer_cert_as_clientid
)."""
+ en: """Whether to use Username as Client ID.
+This setting takes effect later than Use Peer Certificate as Username
and Use peer certificate as Client ID
."""
zh: """是否使用用户名作为客户端 ID。
-此设置的作用时间晚于 使用对端证书作为用户名
(peer_cert_as_username
) 和 使用对端证书作为客户端 ID
(peer_cert_as_clientid
)。"""
+此设置的作用时间晚于 对端证书作为用户名
和 对端证书作为客户端 ID
。"""
}
label: {
en: """Use Username as Client ID"""
- zh: """使用用户名作为客户端 ID"""
+ zh: """用户名作为客户端 ID"""
}
}
@@ -993,22 +995,22 @@ This setting takes effect later than Use Peer Certificate as Usernamecn
: Take the CN field of the certificate as Username
-- dn
: Take the DN field of the certificate as Username
-- crt
: Take the content of the DER
or PEM
certificate as Username
-- pem
: Convert DER
certificate content to PEM
format as Username
-- md5
: Take the MD5 value of the content of the DER
or PEM
certificate as Username"""
- zh: """使用对端证书中的 CN、DN 字段或整个证书内容来作为用户名。仅适用于 TLS 连接。
-目前支持配置为以下内容:
-- cn
: 取证书的 CN 字段作为 Username
-- dn
: 取证书的 DN 字段作为 Username
-- crt
: 取 DER
或 PEM
证书的内容作为 Username
-- pem
: 将 DER
证书内容转换为 PEM
格式后作为 Username
-- md5
: 取 DER
或 PEM
证书的内容的 MD5 值作为 Username"""
+- cn
: CN field of the certificate
+- dn
: DN field of the certificate
+- crt
: Content of the DER
or PEM
certificate
+- pem
: Convert DER
certificate content to PEM
format and use as Username
+- md5
: MD5 value of the DER
or PEM
certificate"""
+ zh: """使用对端证书中的 CN、DN 字段或整个证书内容来作为用户名;仅适用于 TLS 连接。
+目前支持:
+- cn
: 取证书的 CN 字段
+- dn
: 取证书的 DN 字段
+- crt
: 取 DER
或 PEM
的证书内容
+- pem
: 将 DER
证书转换为 PEM
格式作为用户名
+- md5
: 取 DER
或 PEM
证书内容的 MD5 值"""
}
label: {
en: """Use Peer Certificate as Username"""
- zh: """使用对端证书作为用户名"""
+ zh: """对端证书作为用户名"""
}
}
@@ -1016,22 +1018,22 @@ Supported configurations are the following:
desc {
en: """Use the CN, DN field in the peer certificate or the entire certificate content as Client ID. Only works for the TLS connection.
Supported configurations are the following:
-- cn
: Take the CN field of the certificate as Client ID
-- dn
: Take the DN field of the certificate as Client ID
-- crt
: Take the content of the DER
or PEM
certificate as Client ID
-- pem
: Convert DER
certificate content to PEM
format as Client ID
-- md5
: Take the MD5 value of the content of the DER
or PEM
certificate as Client ID"""
- zh: """使用对端证书中的 CN、DN 字段或整个证书内容来作为客户端 ID。仅适用于 TLS 连接。
-目前支持配置为以下内容:
-- cn
: 取证书的 CN 字段作为 Client ID
-- dn
: 取证书的 DN 字段作为 Client ID
-- crt
: 取 DER
或 PEM
证书的内容作为 Client ID
-- pem
: 将 DER
证书内容转换为 PEM
格式后作为 Client ID
-- md5
: 取 DER
或 PEM
证书的内容的 MD5 值作为 Client ID"""
+- cn
: CN field of the certificate
+- dn
: DN field of the certificate
+- crt
: DER
or PEM
certificate
+- pem
: Convert DER
certificate content to PEM
format and use as Client ID
+- md5
: MD5 value of the DER
or PEM
certificate"""
+ zh: """使用对端证书中的 CN、DN 字段或整个证书内容来作为客户端 ID。仅适用于 TLS 连接;
+目前支持:
+- cn
: 取证书的 CN 字段
+- dn
: 取证书的 DN 字段
+- crt
: 取 DER
或 PEM
证书的内容
+- pem
: 将 DER
证书内容转换为 PEM
格式作为客户端 ID
+- md5
: 取 DER
或 PEM
证书内容的 MD5 值"""
}
label: {
en: """Use Peer Certificate as Client ID"""
- zh: """使用对端证书作为客户端 ID"""
+ zh: """对端证书作为客户端 ID"""
}
}
@@ -1503,8 +1505,8 @@ In case PSK cipher suites are intended, make sure to configure
common_ssl_opts_schema_hibernate_after {
desc {
- en: """ Hibernate the SSL process after idling for amount of time reducing its memory footprint. """
- zh: """ 在闲置一定时间后休眠 SSL 进程,减少其内存占用。"""
+ en: """Hibernate the SSL process after idling for amount of time reducing its memory footprint."""
+ zh: """在闲置一定时间后休眠 SSL 进程,减少其内存占用。"""
}
label: {
en: "hibernate after"
diff --git a/apps/emqx_slow_subs/i18n/emqx_slow_subs_api_i18n.conf b/rel/i18n/emqx_slow_subs_api.hocon
similarity index 100%
rename from apps/emqx_slow_subs/i18n/emqx_slow_subs_api_i18n.conf
rename to rel/i18n/emqx_slow_subs_api.hocon
diff --git a/apps/emqx_slow_subs/i18n/emqx_slow_subs_i18n.conf b/rel/i18n/emqx_slow_subs_schema.hocon
similarity index 100%
rename from apps/emqx_slow_subs/i18n/emqx_slow_subs_i18n.conf
rename to rel/i18n/emqx_slow_subs_schema.hocon
diff --git a/apps/emqx_statsd/i18n/emqx_statsd_api_i18n.conf b/rel/i18n/emqx_statsd_api.hocon
similarity index 100%
rename from apps/emqx_statsd/i18n/emqx_statsd_api_i18n.conf
rename to rel/i18n/emqx_statsd_api.hocon
diff --git a/apps/emqx_statsd/i18n/emqx_statsd_schema_i18n.conf b/rel/i18n/emqx_statsd_schema.hocon
similarity index 100%
rename from apps/emqx_statsd/i18n/emqx_statsd_schema_i18n.conf
rename to rel/i18n/emqx_statsd_schema.hocon
diff --git a/apps/emqx_stomp/i18n/emqx_stomp_schema.conf b/rel/i18n/emqx_stomp_schema.hocon
similarity index 100%
rename from apps/emqx_stomp/i18n/emqx_stomp_schema.conf
rename to rel/i18n/emqx_stomp_schema.hocon
diff --git a/apps/emqx_modules/i18n/emqx_telemetry_api_i18n.conf b/rel/i18n/emqx_telemetry_api.hocon
similarity index 100%
rename from apps/emqx_modules/i18n/emqx_telemetry_api_i18n.conf
rename to rel/i18n/emqx_telemetry_api.hocon
diff --git a/apps/emqx_modules/i18n/emqx_topic_metrics_api_i18n.conf b/rel/i18n/emqx_topic_metrics_api.hocon
similarity index 100%
rename from apps/emqx_modules/i18n/emqx_topic_metrics_api_i18n.conf
rename to rel/i18n/emqx_topic_metrics_api.hocon
diff --git a/scripts/buildx.sh b/scripts/buildx.sh
index 4f12e0abc..5c3a65369 100755
--- a/scripts/buildx.sh
+++ b/scripts/buildx.sh
@@ -27,6 +27,13 @@ help() {
echo " E.g. ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-debian11"
}
+die() {
+ msg="$1"
+ echo "$msg" >&2
+ help
+ exit 1
+}
+
while [ "$#" -gt 0 ]; do
case $1 in
-h|--help)
@@ -81,13 +88,23 @@ while [ "$#" -gt 0 ]; do
esac
done
-if [ -z "${PROFILE:-}" ] ||
- [ -z "${PKGTYPE:-}" ] ||
- [ -z "${BUILDER:-}" ] ||
- [ -z "${ARCH:-}" ]; then
- help
- exit 1
+## we have a different naming for them
+if [[ $(uname -m) == "x86_64" ]]; then
+ NATIVE_ARCH='amd64'
+elif [[ $(uname -m) == "aarch64" ]]; then
+ NATIVE_ARCH='arm64'
+elif [[ $(uname -m) == "arm64" ]]; then
+ NATIVE_ARCH='arm64'
+elif [[ $(uname -m) == "armv7l" ]]; then
+ # CHECKME: really ?
+ NATIVE_ARCH='arm64'
fi
+ARCH="${ARCH:-${NATIVE_ARCH:-}}"
+
+[ -z "${PROFILE:-}" ] && die "missing --prifile"
+[ -z "${PKGTYPE:-}" ] && die "missing --pkgtyp"
+[ -z "${BUILDER:-}" ] && die "missing --builder"
+[ -z "${ARCH:-}" ] && die "missing --arch"
# ensure dir
cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")/.."
@@ -128,13 +145,7 @@ if [[ "$HOST_SYSTEM" = "$BUILDER_SYSTEM" ]]; then
fi
IS_NATIVE_ARCH='no'
-if [[ $(uname -m) == "x86_64" && "$ARCH" == "amd64" ]]; then
- IS_NATIVE_ARCH='yes'
-elif [[ $(uname -m) == "aarch64" && "$ARCH" == "arm64" ]]; then
- IS_NATIVE_ARCH='yes'
-elif [[ $(uname -m) == "arm64" && "$ARCH" == "arm64" ]]; then
- IS_NATIVE_ARCH='yes'
-elif [[ $(uname -m) == "armv7l" && "$ARCH" == "arm64" ]]; then
+if [[ "$NATIVE_ARCH" == "$ARCH" ]]; then
IS_NATIVE_ARCH='yes'
fi
@@ -151,7 +162,7 @@ elif docker info; then
--platform="linux/$ARCH" \
--env ACLOCAL_PATH="/usr/share/aclocal:/usr/local/share/aclocal" \
"$BUILDER" \
- bash -euc "$CMD_RUN"
+ bash -euc "git config --global --add safe.directory /emqx && $CMD_RUN"
else
echo "Error: Docker not available on unsupported platform"
exit 1;
diff --git a/scripts/check-i18n-style.escript b/scripts/check-i18n-style.escript
index 6ad6c1770..cbe79c82e 100755
--- a/scripts/check-i18n-style.escript
+++ b/scripts/check-i18n-style.escript
@@ -1,12 +1,15 @@
#!/usr/bin/env escript
+%% called from check-i18n-style.sh
+
-mode(compile).
--define(YELLOW, "\e[33m").
+% -define(YELLOW, "\e[33m"). % not used
-define(RED, "\e[31m").
-define(RESET, "\e[39m").
main([Files0]) ->
+ io:format(user, "checking i18n file styles", []),
_ = put(errors, 0),
Files = string:tokens(Files0, "\n"),
ok = load_hocon(),
@@ -46,7 +49,7 @@ logerr(Fmt, Args) ->
check(File) ->
- io:format(user, "checking: ~s~n", [File]),
+ io:format(user, ".", []),
{ok, C} = hocon:load(File),
maps:foreach(fun check_one_field/2, C),
ok.
@@ -84,7 +87,7 @@ do_check_desc(Name, _) ->
die("~s: missing 'zh' or 'en'~n", [Name]).
check_desc_string(Name, Tr, <<>>) ->
- io:format(standard_error, ?YELLOW ++ "WARNING: ~s.~s: empty string~n" ++ ?RESET, [Name, Tr]);
+ logerr("~s.~s: empty string~n", [Name, Tr]);
check_desc_string(Name, Tr, BinStr) ->
Str = unicode:characters_to_list(BinStr, utf8),
Err = fun(Reason) ->
diff --git a/scripts/check-i18n-style.sh b/scripts/check-i18n-style.sh
index 0be565f30..d21f43a72 100755
--- a/scripts/check-i18n-style.sh
+++ b/scripts/check-i18n-style.sh
@@ -3,6 +3,6 @@ set -euo pipefail
cd -P -- "$(dirname -- "$0")/.."
-all_files="$(git ls-files '*i18n*.conf')"
+all_files="$(git ls-files 'rel/i18n/*.hocon')"
./scripts/check-i18n-style.escript "$all_files"
diff --git a/scripts/merge-i18n.escript b/scripts/merge-i18n.escript
index 816cbe182..b2501d10a 100755
--- a/scripts/merge-i18n.escript
+++ b/scripts/merge-i18n.escript
@@ -4,12 +4,8 @@
main(_) ->
BaseConf = <<"">>,
- Cfgs0 = get_all_cfgs("apps/"),
- Cfgs1 = get_all_cfgs("lib-ee/"),
- Conf0 = merge(BaseConf, Cfgs0),
- Conf = [merge(Conf0, Cfgs1),
- io_lib:nl()
- ],
+ Cfgs0 = get_all_files(),
+ Conf = merge(BaseConf, Cfgs0),
OutputFile = "apps/emqx_dashboard/priv/i18n.conf",
ok = filelib:ensure_dir(OutputFile),
ok = file:write_file(OutputFile, Conf).
@@ -25,39 +21,7 @@ merge(BaseConf, Cfgs) ->
end
end, BaseConf, Cfgs).
-get_all_cfgs(Root) ->
- Apps = filelib:wildcard("*", Root) -- ["emqx_machine"],
- Dirs = [filename:join([Root, App]) || App <- Apps],
- lists:foldl(fun get_cfgs/2, [], Dirs).
-
-get_all_cfgs(Dir, Cfgs) ->
- Fun = fun(E, Acc) ->
- Path = filename:join([Dir, E]),
- get_cfgs(Path, Acc)
- end,
- lists:foldl(Fun, Cfgs, filelib:wildcard("*", Dir)).
-
-get_cfgs(Dir, Cfgs) ->
- case filelib:is_dir(Dir) of
- false ->
- Cfgs;
- _ ->
- Files = filelib:wildcard("*", Dir),
- case lists:member("i18n", Files) of
- false ->
- try_enter_child(Dir, Files, Cfgs);
- true ->
- EtcDir = filename:join([Dir, "i18n"]),
- Confs = filelib:wildcard("*.conf", EtcDir),
- NewCfgs = [filename:join([EtcDir, Name]) || Name <- Confs],
- try_enter_child(Dir, Files, NewCfgs ++ Cfgs)
- end
- end.
-
-try_enter_child(Dir, Files, Cfgs) ->
- case lists:member("src", Files) of
- false ->
- Cfgs;
- true ->
- get_all_cfgs(filename:join([Dir, "src"]), Cfgs)
- end.
+get_all_files() ->
+ Dir = filename:join(["rel","i18n"]),
+ Files = filelib:wildcard("*.hocon", Dir),
+ lists:map(fun(Name) -> filename:join([Dir, Name]) end, Files).