From d6e9bbb95c5406d88f67cb25819f226b943b1e92 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 13 Nov 2023 15:56:46 -0300 Subject: [PATCH 001/101] fix(connector): validate connector name before converting ssl certs Fixes https://emqx.atlassian.net/browse/EMQX-11336 See also: https://github.com/emqx/emqx/pull/11540 --- apps/emqx_bridge/src/emqx_bridge.erl | 54 ++++++++++---- apps/emqx_bridge/src/emqx_bridge_app.erl | 18 ++++- apps/emqx_bridge/test/emqx_bridge_SUITE.erl | 30 +++++++- .../test/emqx_bridge_api_SUITE.erl | 8 ++- ...qx_bridge_v1_compatibility_layer_SUITE.erl | 31 +++++++- .../emqx_connector/src/emqx_connector.app.src | 2 +- apps/emqx_connector/src/emqx_connector.erl | 70 +++++++++++++++++-- .../test/emqx_connector_SUITE.erl | 65 +++++++++++++++++ .../test/emqx_connector_api_SUITE.erl | 22 ++++++ apps/emqx_resource/src/emqx_resource.erl | 28 +++----- 10 files changed, 283 insertions(+), 45 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index 51bdfb084..64bec3a4e 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -55,7 +55,6 @@ ]). -export([config_key_path/0]). --export([validate_bridge_name/1]). %% exported for `emqx_telemetry' -export([get_basic_usage_info/0]). @@ -268,7 +267,12 @@ config_key_path() -> pre_config_update([?ROOT_KEY], RawConf, RawConf) -> {ok, RawConf}; pre_config_update([?ROOT_KEY], NewConf, _RawConf) -> - {ok, convert_certs(NewConf)}. + case multi_validate_bridge_names(NewConf) of + ok -> + {ok, convert_certs(NewConf)}; + Error -> + Error + end. post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) -> #{added := Added, removed := Removed, changed := Updated} = @@ -657,17 +661,13 @@ get_basic_usage_info() -> InitialAcc end. -validate_bridge_name(BridgeName0) -> - BridgeName = to_bin(BridgeName0), - case re:run(BridgeName, ?MAP_KEY_RE, [{capture, none}]) of - match -> - ok; - nomatch -> - {error, #{ - kind => validation_error, - reason => bad_bridge_name, - value => BridgeName - }} +validate_bridge_name(BridgeName) -> + try + _ = emqx_resource:validate_name(to_bin(BridgeName)), + ok + catch + throw:Error -> + {error, Error} end. to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8); @@ -675,3 +675,31 @@ to_bin(B) when is_binary(B) -> B. upgrade_type(Type) -> emqx_bridge_lib:upgrade_type(Type). + +multi_validate_bridge_names(Conf) -> + BridgeTypeAndNames = + [ + {Type, Name} + || {Type, NameToConf} <- maps:to_list(Conf), + {Name, _Conf} <- maps:to_list(NameToConf) + ], + BadBridges = + lists:filtermap( + fun({Type, Name}) -> + case validate_bridge_name(Name) of + ok -> false; + _Error -> {true, #{type => Type, name => Name}} + end + end, + BridgeTypeAndNames + ), + case BadBridges of + [] -> + ok; + [_ | _] -> + {error, #{ + kind => validation_error, + reason => bad_bridge_names, + bad_bridges => BadBridges + }} + end. diff --git a/apps/emqx_bridge/src/emqx_bridge_app.erl b/apps/emqx_bridge/src/emqx_bridge_app.erl index cd54d31e7..321f59f28 100644 --- a/apps/emqx_bridge/src/emqx_bridge_app.erl +++ b/apps/emqx_bridge/src/emqx_bridge_app.erl @@ -63,7 +63,7 @@ pre_config_update(_, {Oper, _Type, _Name}, OldConfig) -> %% to save the 'enable' to the config files {ok, OldConfig#{<<"enable">> => operation_to_enable(Oper)}}; pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) -> - case validate_bridge_name(Path) of + case validate_bridge_name_in_config(Path) of ok -> case emqx_connector_ssl:convert_certs(filename:join(Path), Conf) of {error, Reason} -> @@ -104,11 +104,23 @@ post_config_update([bridges, BridgeType, BridgeName], _Req, NewConf, OldConf, _A operation_to_enable(disable) -> false; operation_to_enable(enable) -> true. -validate_bridge_name(Path) -> +validate_bridge_name_in_config(Path) -> [RootKey] = emqx_bridge:config_key_path(), case Path of [RootKey, _BridgeType, BridgeName] -> - emqx_bridge:validate_bridge_name(BridgeName); + validate_bridge_name(BridgeName); _ -> ok end. + +to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8); +to_bin(B) when is_binary(B) -> B. + +validate_bridge_name(BridgeName) -> + try + _ = emqx_resource:validate_name(to_bin(BridgeName)), + ok + catch + throw:Error -> + {error, Error} + end. diff --git a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl index 96c3c29ca..b29ba154e 100644 --- a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl @@ -199,13 +199,41 @@ t_create_with_bad_name(_Config) -> ?assertMatch( {error, {pre_config_update, emqx_bridge_app, #{ - reason := bad_bridge_name, + reason := <<"only 0-9a-zA-Z_- is allowed in resource name", _/binary>>, kind := validation_error }}}, emqx:update_config(Path, Conf) ), ok. +t_create_with_bad_name_root(_Config) -> + BadBridgeName = <<"test_哈哈">>, + BridgeConf = #{ + <<"bridge_mode">> => false, + <<"clean_start">> => true, + <<"keepalive">> => <<"60s">>, + <<"proto_ver">> => <<"v4">>, + <<"server">> => <<"127.0.0.1:1883">>, + <<"ssl">> => + #{ + %% needed to trigger pre_config_update + <<"certfile">> => cert_file("certfile"), + <<"enable">> => true + } + }, + Conf = #{<<"mqtt">> => #{BadBridgeName => BridgeConf}}, + Path = [bridges], + ?assertMatch( + {error, + {pre_config_update, _ConfigHandlerMod, #{ + kind := validation_error, + reason := bad_bridge_names, + bad_bridges := [#{type := <<"mqtt">>, name := BadBridgeName}] + }}}, + emqx:update_config(Path, Conf) + ), + ok. + data_file(Name) -> Dir = code:lib_dir(emqx_bridge, test), {ok, Bin} = file:read_file(filename:join([Dir, "data", Name])), diff --git a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl index c0339660e..99a2bc8cd 100644 --- a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl @@ -1362,7 +1362,13 @@ t_create_with_bad_name(Config) -> Config ), Msg = emqx_utils_json:decode(Msg0, [return_maps]), - ?assertMatch(#{<<"reason">> := <<"bad_bridge_name">>}, Msg), + ?assertMatch( + #{ + <<"kind">> := <<"validation_error">>, + <<"reason">> := <<"only 0-9a-zA-Z_- is allowed in resource name", _/binary>> + }, + Msg + ), ok. validate_resource_request_ttl(single, Timeout, Name) -> diff --git a/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl index f3b7fb685..c714b858a 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl @@ -150,7 +150,8 @@ con_schema() -> fields("connector") -> [ {enable, hoconsc:mk(any(), #{})}, - {resource_opts, hoconsc:mk(map(), #{})} + {resource_opts, hoconsc:mk(map(), #{})}, + {ssl, hoconsc:ref(ssl)} ]; fields("api_post") -> [ @@ -159,7 +160,9 @@ fields("api_post") -> {type, hoconsc:mk(bridge_type(), #{})}, {send_to, hoconsc:mk(atom(), #{})} | fields("connector") - ]. + ]; +fields(ssl) -> + emqx_schema:client_ssl_opts_schema(#{required => false}). con_config() -> #{ @@ -806,3 +809,27 @@ t_scenario_2(Config) -> ?assert(is_rule_enabled(RuleId2)), ok. + +t_create_with_bad_name(_Config) -> + BadBridgeName = <<"test_哈哈">>, + %% Note: must contain SSL options to trigger bug. + Cacertfile = emqx_common_test_helpers:app_path( + emqx, + filename:join(["etc", "certs", "cacert.pem"]) + ), + Opts = #{ + name => BadBridgeName, + overrides => #{ + <<"ssl">> => + #{<<"cacertfile">> => Cacertfile} + } + }, + {error, + {{_, 400, _}, _, #{ + <<"code">> := <<"BAD_REQUEST">>, + <<"message">> := #{ + <<"kind">> := <<"validation_error">>, + <<"reason">> := <<"only 0-9a-zA-Z_- is allowed in resource name", _/binary>> + } + }}} = create_bridge_http_api_v1(Opts), + ok. diff --git a/apps/emqx_connector/src/emqx_connector.app.src b/apps/emqx_connector/src/emqx_connector.app.src index 7ecabb0ff..d23a36e49 100644 --- a/apps/emqx_connector/src/emqx_connector.app.src +++ b/apps/emqx_connector/src/emqx_connector.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_connector, [ {description, "EMQX Data Integration Connectors"}, - {vsn, "0.1.33"}, + {vsn, "0.1.34"}, {registered, []}, {mod, {emqx_connector_app, []}}, {applications, [ diff --git a/apps/emqx_connector/src/emqx_connector.erl b/apps/emqx_connector/src/emqx_connector.erl index f07e038d2..30654bb13 100644 --- a/apps/emqx_connector/src/emqx_connector.erl +++ b/apps/emqx_connector/src/emqx_connector.erl @@ -108,18 +108,28 @@ config_key_path() -> pre_config_update([?ROOT_KEY], RawConf, RawConf) -> {ok, RawConf}; pre_config_update([?ROOT_KEY], NewConf, _RawConf) -> - {ok, convert_certs(NewConf)}; + case multi_validate_connector_names(NewConf) of + ok -> + {ok, convert_certs(NewConf)}; + Error -> + Error + end; pre_config_update(_, {_Oper, _, _}, undefined) -> {error, connector_not_found}; pre_config_update(_, {Oper, _Type, _Name}, OldConfig) -> %% to save the 'enable' to the config files {ok, OldConfig#{<<"enable">> => operation_to_enable(Oper)}}; pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) -> - case emqx_connector_ssl:convert_certs(filename:join(Path), Conf) of - {error, Reason} -> - {error, Reason}; - {ok, ConfNew} -> - {ok, ConfNew} + case validate_connector_name_in_config(Path) of + ok -> + case emqx_connector_ssl:convert_certs(filename:join(Path), Conf) of + {error, Reason} -> + {error, Reason}; + {ok, ConfNew} -> + {ok, ConfNew} + end; + Error -> + Error end. operation_to_enable(disable) -> false; @@ -458,3 +468,51 @@ ensure_no_channels(Configs) -> {error, Reason, _State} -> {error, Reason} end. + +to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8); +to_bin(B) when is_binary(B) -> B. + +validate_connector_name(ConnectorName) -> + try + _ = emqx_resource:validate_name(to_bin(ConnectorName)), + ok + catch + throw:Error -> + {error, Error} + end. + +validate_connector_name_in_config(Path) -> + case Path of + [?ROOT_KEY, _ConnectorType, ConnectorName] -> + validate_connector_name(ConnectorName); + _ -> + ok + end. + +multi_validate_connector_names(Conf) -> + ConnectorTypeAndNames = + [ + {Type, Name} + || {Type, NameToConf} <- maps:to_list(Conf), + {Name, _Conf} <- maps:to_list(NameToConf) + ], + BadConnectors = + lists:filtermap( + fun({Type, Name}) -> + case validate_connector_name(Name) of + ok -> false; + _Error -> {true, #{type => Type, name => Name}} + end + end, + ConnectorTypeAndNames + ), + case BadConnectors of + [] -> + ok; + [_ | _] -> + {error, #{ + kind => validation_error, + reason => bad_connector_names, + bad_connectors => BadConnectors + }} + end. diff --git a/apps/emqx_connector/test/emqx_connector_SUITE.erl b/apps/emqx_connector/test/emqx_connector_SUITE.erl index a62b5ed95..ee7e29741 100644 --- a/apps/emqx_connector/test/emqx_connector_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_SUITE.erl @@ -204,6 +204,71 @@ t_remove_fail(_Config) -> ), ok. +t_create_with_bad_name_direct_path({init, Config}) -> + meck:new(emqx_connector_ee_schema, [passthrough]), + meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR), + meck:new(?CONNECTOR, [non_strict]), + meck:expect(?CONNECTOR, callback_mode, 0, async_if_possible), + meck:expect(?CONNECTOR, on_start, 2, {ok, connector_state}), + meck:expect(?CONNECTOR, on_stop, 2, ok), + meck:expect(?CONNECTOR, on_get_status, 2, connected), + Config; +t_create_with_bad_name_direct_path({'end', _Config}) -> + meck:unload(), + ok; +t_create_with_bad_name_direct_path(_Config) -> + Path = [connectors, kafka_producer, 'test_哈哈'], + ConnConfig0 = connector_config(), + %% Note: must contain SSL options to trigger original bug. + Cacertfile = emqx_common_test_helpers:app_path( + emqx, + filename:join(["etc", "certs", "cacert.pem"]) + ), + ConnConfig = ConnConfig0#{<<"ssl">> => #{<<"cacertfile">> => Cacertfile}}, + ?assertMatch( + {error, + {pre_config_update, _ConfigHandlerMod, #{ + kind := validation_error, + reason := <<"only 0-9a-zA-Z_- is allowed in resource name", _/binary>> + }}}, + emqx:update_config(Path, ConnConfig) + ), + ok. + +t_create_with_bad_name_root_path({init, Config}) -> + meck:new(emqx_connector_ee_schema, [passthrough]), + meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR), + meck:new(?CONNECTOR, [non_strict]), + meck:expect(?CONNECTOR, callback_mode, 0, async_if_possible), + meck:expect(?CONNECTOR, on_start, 2, {ok, connector_state}), + meck:expect(?CONNECTOR, on_stop, 2, ok), + meck:expect(?CONNECTOR, on_get_status, 2, connected), + Config; +t_create_with_bad_name_root_path({'end', _Config}) -> + meck:unload(), + ok; +t_create_with_bad_name_root_path(_Config) -> + Path = [connectors], + BadConnectorName = <<"test_哈哈">>, + ConnConfig0 = connector_config(), + %% Note: must contain SSL options to trigger original bug. + Cacertfile = emqx_common_test_helpers:app_path( + emqx, + filename:join(["etc", "certs", "cacert.pem"]) + ), + ConnConfig = ConnConfig0#{<<"ssl">> => #{<<"cacertfile">> => Cacertfile}}, + Conf = #{<<"kafka_producer">> => #{BadConnectorName => ConnConfig}}, + ?assertMatch( + {error, + {pre_config_update, _ConfigHandlerMod, #{ + kind := validation_error, + reason := bad_connector_names, + bad_connectors := [#{type := <<"kafka_producer">>, name := BadConnectorName}] + }}}, + emqx:update_config(Path, Conf) + ), + ok. + %% helpers connector_config() -> diff --git a/apps/emqx_connector/test/emqx_connector_api_SUITE.erl b/apps/emqx_connector/test/emqx_connector_api_SUITE.erl index becbc8791..f6609808f 100644 --- a/apps/emqx_connector/test/emqx_connector_api_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_api_SUITE.erl @@ -652,6 +652,28 @@ t_connectors_probe(Config) -> ), ok. +t_create_with_bad_name(Config) -> + ConnectorName = <<"test_哈哈">>, + Conf0 = ?KAFKA_CONNECTOR(ConnectorName), + %% Note: must contain SSL options to trigger original bug. + Cacertfile = emqx_common_test_helpers:app_path( + emqx, + filename:join(["etc", "certs", "cacert.pem"]) + ), + Conf = Conf0#{<<"ssl">> => #{<<"cacertfile">> => Cacertfile}}, + {ok, 400, #{ + <<"code">> := <<"BAD_REQUEST">>, + <<"message">> := Msg0 + }} = request_json( + post, + uri(["connectors"]), + Conf, + Config + ), + Msg = emqx_utils_json:decode(Msg0, [return_maps]), + ?assertMatch(#{<<"kind">> := <<"validation_error">>}, Msg), + ok. + %%% helpers listen_on_random_port() -> SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}], diff --git a/apps/emqx_resource/src/emqx_resource.erl b/apps/emqx_resource/src/emqx_resource.erl index f5bf65c0f..90df229e4 100644 --- a/apps/emqx_resource/src/emqx_resource.erl +++ b/apps/emqx_resource/src/emqx_resource.erl @@ -815,29 +815,21 @@ validate_name(<<>>, _Opts) -> invalid_data("name cannot be empty string"); validate_name(Name, _Opts) when size(Name) >= 255 -> invalid_data("name length must be less than 255"); -validate_name(Name0, Opts) -> - Name = unicode:characters_to_list(Name0, utf8), - case lists:all(fun is_id_char/1, Name) of - true -> +validate_name(Name, Opts) -> + case re:run(Name, <<"^[-0-9a-zA-Z_]+$">>, [{capture, none}]) of + match -> case maps:get(atom_name, Opts, true) of - % NOTE - % Rule may be created before bridge, thus not `list_to_existing_atom/1`, - % also it is infrequent user input anyway. - true -> list_to_atom(Name); - false -> Name0 + %% NOTE + %% Rule may be created before bridge, thus not `list_to_existing_atom/1`, + %% also it is infrequent user input anyway. + true -> binary_to_atom(Name, utf8); + false -> Name end; - false -> + nomatch -> invalid_data( - <<"only 0-9a-zA-Z_- is allowed in resource name, got: ", Name0/binary>> + <<"only 0-9a-zA-Z_- is allowed in resource name, got: ", Name/binary>> ) end. -spec invalid_data(binary()) -> no_return(). invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}). - -is_id_char(C) when C >= $0 andalso C =< $9 -> true; -is_id_char(C) when C >= $a andalso C =< $z -> true; -is_id_char(C) when C >= $A andalso C =< $Z -> true; -is_id_char($_) -> true; -is_id_char($-) -> true; -is_id_char(_) -> false. From 2f1d88d4140abc40ff6e7c1b127dc6075974985a Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 14 Nov 2023 13:56:50 -0300 Subject: [PATCH 002/101] fix(bridges_v1): avoid merging action examples for non-v1 bridges Since some new bridges might not have a V1 equivalent (i.e. they are not registered in `emqx_bridge_enterprise`), we should avoid displaying their examples in the V1 API spec. --- apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl index 9456575d4..ca5ad74b6 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl @@ -82,9 +82,7 @@ schema_modules() -> ]. examples(Method) -> - ActionExamples = emqx_bridge_v2_schema:examples(Method), - RegisteredExamples = registered_examples(Method), - maps:merge(ActionExamples, RegisteredExamples). + registered_examples(Method). registered_examples(Method) -> MergeFun = From 98f947f4f3eb92cff8b0cf8604a992e51c8d194a Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Wed, 15 Nov 2023 22:28:52 +0700 Subject: [PATCH 003/101] ci(router): fix flaky testcase --- apps/emqx/test/emqx_routing_SUITE.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx/test/emqx_routing_SUITE.erl b/apps/emqx/test/emqx_routing_SUITE.erl index 6966ac56a..a54e1b4dd 100644 --- a/apps/emqx/test/emqx_routing_SUITE.erl +++ b/apps/emqx/test/emqx_routing_SUITE.erl @@ -100,7 +100,7 @@ mk_config_listeners(N) -> t_cluster_routing(Config) -> Cluster = ?config(cluster, Config), - Clients = [C1, C2, C3] = [start_client(N) || N <- Cluster], + Clients = [C1, C2, C3] = lists:sort([start_client(N) || N <- Cluster]), Commands = [ {fun publish/3, [C1, <<"a/b/c">>, <<"wontsee">>]}, {fun publish/3, [C2, <<"a/b/d">>, <<"wontsee">>]}, From 90571b7d8eb05b7badcec810cce2d56d569d1c08 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 14 Nov 2023 13:33:07 -0300 Subject: [PATCH 004/101] test: fix noise about undefined unofficial callbacks --- apps/emqx/test/emqx_cth_suite.erl | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/apps/emqx/test/emqx_cth_suite.erl b/apps/emqx/test/emqx_cth_suite.erl index 5a59238de..401d4f59d 100644 --- a/apps/emqx/test/emqx_cth_suite.erl +++ b/apps/emqx/test/emqx_cth_suite.erl @@ -74,6 +74,9 @@ -export([merge_appspec/2]). +%% "Unofficial" `emqx_config_handler' and `emqx_conf' APIs +-export([schema_module/0, upgrade_raw_conf/1]). + -export_type([appspec/0]). -export_type([appspec_opts/0]). @@ -477,3 +480,18 @@ render_config(Config = #{}) -> unicode:characters_to_binary(hocon_pp:do(Config, #{})); render_config(Config) -> unicode:characters_to_binary(Config). + +%% + +%% "Unofficial" `emqx_config_handler' API +schema_module() -> + ?MODULE. + +%% "Unofficial" `emqx_conf' API +upgrade_raw_conf(Conf) -> + case emqx_release:edition() of + ee -> + emqx_enterprise_schema:upgrade_raw_conf(Conf); + ce -> + emqx_conf_schema:upgrade_raw_conf(Conf) + end. From 36b5d58957050790ebbc9d3d3068c8e5d76c71be Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 16 Nov 2023 09:17:54 -0300 Subject: [PATCH 005/101] test: reorganize test suite a bit --- .../emqx_bridge_v2_kafka_producer_SUITE.erl | 220 +++++++++--------- 1 file changed, 116 insertions(+), 104 deletions(-) diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl index 6adb66357..fba72a1d7 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl @@ -25,6 +25,10 @@ -define(TYPE, kafka_producer). +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + all() -> emqx_common_test_helpers:all(?MODULE). @@ -51,6 +55,118 @@ end_per_suite(Config) -> emqx_cth_suite:stop(Apps), ok. +%%------------------------------------------------------------------------------------- +%% Helper fns +%%------------------------------------------------------------------------------------- + +check_send_message_with_bridge(BridgeName) -> + %% ###################################### + %% Create Kafka message + %% ###################################### + Time = erlang:unique_integer(), + BinTime = integer_to_binary(Time), + Payload = list_to_binary("payload" ++ integer_to_list(Time)), + Msg = #{ + clientid => BinTime, + payload => Payload, + timestamp => Time + }, + Offset = resolve_kafka_offset(), + %% ###################################### + %% Send message + %% ###################################### + emqx_bridge_v2:send_message(?TYPE, BridgeName, Msg, #{}), + %% ###################################### + %% Check if message is sent to Kafka + %% ###################################### + check_kafka_message_payload(Offset, Payload). + +resolve_kafka_offset() -> + KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(), + Partition = 0, + Hosts = emqx_bridge_kafka_impl_producer_SUITE:kafka_hosts(), + {ok, Offset0} = emqx_bridge_kafka_impl_producer_SUITE:resolve_kafka_offset( + Hosts, KafkaTopic, Partition + ), + Offset0. + +check_kafka_message_payload(Offset, ExpectedPayload) -> + KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(), + Partition = 0, + Hosts = emqx_bridge_kafka_impl_producer_SUITE:kafka_hosts(), + {ok, {_, [KafkaMsg0]}} = brod:fetch(Hosts, KafkaTopic, Partition, Offset), + ?assertMatch(#kafka_message{value = ExpectedPayload}, KafkaMsg0). + +bridge_v2_config(ConnectorName) -> + #{ + <<"connector">> => ConnectorName, + <<"enable">> => true, + <<"kafka">> => #{ + <<"buffer">> => #{ + <<"memory_overload_protection">> => false, + <<"mode">> => <<"memory">>, + <<"per_partition_limit">> => <<"2GB">>, + <<"segment_bytes">> => <<"100MB">> + }, + <<"compression">> => <<"no_compression">>, + <<"kafka_header_value_encode_mode">> => <<"none">>, + <<"max_batch_bytes">> => <<"896KB">>, + <<"max_inflight">> => 10, + <<"message">> => #{ + <<"key">> => <<"${.clientid}">>, + <<"timestamp">> => <<"${.timestamp}">>, + <<"value">> => <<"${.payload}">> + }, + <<"partition_count_refresh_interval">> => <<"60s">>, + <<"partition_strategy">> => <<"random">>, + <<"query_mode">> => <<"sync">>, + <<"required_acks">> => <<"all_isr">>, + <<"sync_query_timeout">> => <<"5s">>, + <<"topic">> => emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition() + }, + <<"local_topic">> => <<"kafka_t/#">>, + <<"resource_opts">> => #{ + <<"health_check_interval">> => <<"15s">> + } + }. + +connector_config() -> + #{ + <<"authentication">> => <<"none">>, + <<"bootstrap_hosts">> => iolist_to_binary(kafka_hosts_string()), + <<"connect_timeout">> => <<"5s">>, + <<"enable">> => true, + <<"metadata_request_timeout">> => <<"5s">>, + <<"min_metadata_refresh_interval">> => <<"3s">>, + <<"socket_opts">> => + #{ + <<"recbuf">> => <<"1024KB">>, + <<"sndbuf">> => <<"1024KB">>, + <<"tcp_keepalive">> => <<"none">> + }, + <<"ssl">> => + #{ + <<"ciphers">> => [], + <<"depth">> => 10, + <<"enable">> => false, + <<"hibernate_after">> => <<"5s">>, + <<"log_level">> => <<"notice">>, + <<"reuse_sessions">> => true, + <<"secure_renegotiate">> => true, + <<"verify">> => <<"verify_peer">>, + <<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>] + } + }. + +kafka_hosts_string() -> + KafkaHost = os:getenv("KAFKA_PLAIN_HOST", "kafka-1.emqx.net"), + KafkaPort = os:getenv("KAFKA_PLAIN_PORT", "9092"), + KafkaHost ++ ":" ++ KafkaPort. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + t_create_remove_list(_) -> [] = emqx_bridge_v2:list(), ConnectorConfig = connector_config(), @@ -186,107 +302,3 @@ t_unknown_topic(_Config) -> emqx_bridge_v2_testlib:get_bridge_api(?TYPE, BridgeName) ), ok. - -check_send_message_with_bridge(BridgeName) -> - %% ###################################### - %% Create Kafka message - %% ###################################### - Time = erlang:unique_integer(), - BinTime = integer_to_binary(Time), - Payload = list_to_binary("payload" ++ integer_to_list(Time)), - Msg = #{ - clientid => BinTime, - payload => Payload, - timestamp => Time - }, - Offset = resolve_kafka_offset(), - %% ###################################### - %% Send message - %% ###################################### - emqx_bridge_v2:send_message(?TYPE, BridgeName, Msg, #{}), - %% ###################################### - %% Check if message is sent to Kafka - %% ###################################### - check_kafka_message_payload(Offset, Payload). - -resolve_kafka_offset() -> - KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(), - Partition = 0, - Hosts = emqx_bridge_kafka_impl_producer_SUITE:kafka_hosts(), - {ok, Offset0} = emqx_bridge_kafka_impl_producer_SUITE:resolve_kafka_offset( - Hosts, KafkaTopic, Partition - ), - Offset0. - -check_kafka_message_payload(Offset, ExpectedPayload) -> - KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(), - Partition = 0, - Hosts = emqx_bridge_kafka_impl_producer_SUITE:kafka_hosts(), - {ok, {_, [KafkaMsg0]}} = brod:fetch(Hosts, KafkaTopic, Partition, Offset), - ?assertMatch(#kafka_message{value = ExpectedPayload}, KafkaMsg0). - -bridge_v2_config(ConnectorName) -> - #{ - <<"connector">> => ConnectorName, - <<"enable">> => true, - <<"kafka">> => #{ - <<"buffer">> => #{ - <<"memory_overload_protection">> => false, - <<"mode">> => <<"memory">>, - <<"per_partition_limit">> => <<"2GB">>, - <<"segment_bytes">> => <<"100MB">> - }, - <<"compression">> => <<"no_compression">>, - <<"kafka_header_value_encode_mode">> => <<"none">>, - <<"max_batch_bytes">> => <<"896KB">>, - <<"max_inflight">> => 10, - <<"message">> => #{ - <<"key">> => <<"${.clientid}">>, - <<"timestamp">> => <<"${.timestamp}">>, - <<"value">> => <<"${.payload}">> - }, - <<"partition_count_refresh_interval">> => <<"60s">>, - <<"partition_strategy">> => <<"random">>, - <<"query_mode">> => <<"sync">>, - <<"required_acks">> => <<"all_isr">>, - <<"sync_query_timeout">> => <<"5s">>, - <<"topic">> => emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition() - }, - <<"local_topic">> => <<"kafka_t/#">>, - <<"resource_opts">> => #{ - <<"health_check_interval">> => <<"15s">> - } - }. - -connector_config() -> - #{ - <<"authentication">> => <<"none">>, - <<"bootstrap_hosts">> => iolist_to_binary(kafka_hosts_string()), - <<"connect_timeout">> => <<"5s">>, - <<"enable">> => true, - <<"metadata_request_timeout">> => <<"5s">>, - <<"min_metadata_refresh_interval">> => <<"3s">>, - <<"socket_opts">> => - #{ - <<"recbuf">> => <<"1024KB">>, - <<"sndbuf">> => <<"1024KB">>, - <<"tcp_keepalive">> => <<"none">> - }, - <<"ssl">> => - #{ - <<"ciphers">> => [], - <<"depth">> => 10, - <<"enable">> => false, - <<"hibernate_after">> => <<"5s">>, - <<"log_level">> => <<"notice">>, - <<"reuse_sessions">> => true, - <<"secure_renegotiate">> => true, - <<"verify">> => <<"verify_peer">>, - <<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>] - } - }. - -kafka_hosts_string() -> - KafkaHost = os:getenv("KAFKA_PLAIN_HOST", "kafka-1.emqx.net"), - KafkaPort = os:getenv("KAFKA_PLAIN_PORT", "9092"), - KafkaHost ++ ":" ++ KafkaPort. From 4c54efd8b1935c13db82b31c82edb1350c857152 Mon Sep 17 00:00:00 2001 From: Ilya Averyanov Date: Wed, 15 Nov 2023 23:18:52 +0300 Subject: [PATCH 006/101] fix(auth): fix deadlock while stopping mongodb resource --- apps/emqx_mongodb/rebar.config | 2 +- changes/ce/fix-11955.en.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changes/ce/fix-11955.en.md diff --git a/apps/emqx_mongodb/rebar.config b/apps/emqx_mongodb/rebar.config index 577dee8b8..5be42ef17 100644 --- a/apps/emqx_mongodb/rebar.config +++ b/apps/emqx_mongodb/rebar.config @@ -3,5 +3,5 @@ {erl_opts, [debug_info]}. {deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}} , {emqx_resource, {path, "../../apps/emqx_resource"}} - , {mongodb, {git, "https://github.com/emqx/mongodb-erlang", {tag, "v3.0.21"}}} + , {mongodb, {git, "https://github.com/emqx/mongodb-erlang", {tag, "v3.0.22"}}} ]}. diff --git a/changes/ce/fix-11955.en.md b/changes/ce/fix-11955.en.md new file mode 100644 index 000000000..aae3f0602 --- /dev/null +++ b/changes/ce/fix-11955.en.md @@ -0,0 +1 @@ +Fix EMQX graceful stop when there is an unavailable MongoDB resource present. From b92821188b43466f449f73572613ce1d9865dc1f Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 16 Nov 2023 10:08:41 -0300 Subject: [PATCH 007/101] fix(kafka_producer): make status `connecting` while the client fails to connect Fixes https://emqx.atlassian.net/browse/EMQX-11408 To make it consistent with the previous bridge behavior. Also, introduces macros for resource status to avoid problems with typos. --- apps/emqx_bridge/src/emqx_bridge_v2.erl | 25 ++++--- .../emqx_bridge/test/emqx_bridge_v2_SUITE.erl | 68 ++++++++++++++++--- .../test/emqx_bridge_v2_api_SUITE.erl | 4 +- .../test/emqx_bridge_v2_test_connector.erl | 4 +- .../src/emqx_bridge_kafka_impl_producer.erl | 10 +-- .../emqx_bridge_v2_kafka_producer_SUITE.erl | 40 +++++++++++ apps/emqx_resource/include/emqx_resource.hrl | 18 ++++- 7 files changed, 137 insertions(+), 32 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index 70e248e56..7ce266922 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -202,33 +202,36 @@ lookup(Type, Name) -> %% The connector should always exist %% ... but, in theory, there might be no channels associated to it when we try %% to delete the connector, and then this reference will become dangling... - InstanceData = + ConnectorData = case emqx_resource:get_instance(ConnectorId) of {ok, _, Data} -> Data; {error, not_found} -> #{} end, - %% Find the Bridge V2 status from the InstanceData - Channels = maps:get(added_channels, InstanceData, #{}), + %% Find the Bridge V2 status from the ConnectorData + ConnectorStatus = maps:get(status, ConnectorData, undefined), + Channels = maps:get(added_channels, ConnectorData, #{}), BridgeV2Id = id(Type, Name, BridgeConnector), ChannelStatus = maps:get(BridgeV2Id, Channels, undefined), {DisplayBridgeV2Status, ErrorMsg} = - case ChannelStatus of - #{status := connected} -> - {connected, <<"">>}; - #{status := Status, error := undefined} -> + case {ChannelStatus, ConnectorStatus} of + {#{status := ?status_connected}, _} -> + {?status_connected, <<"">>}; + {#{error := resource_not_operational}, ?status_connecting} -> + {?status_connecting, <<"Not installed">>}; + {#{status := Status, error := undefined}, _} -> {Status, <<"Unknown reason">>}; - #{status := Status, error := Error} -> + {#{status := Status, error := Error}, _} -> {Status, emqx_utils:readable_error_msg(Error)}; - undefined -> - {disconnected, <<"Pending installation">>} + {undefined, _} -> + {?status_disconnected, <<"Not installed">>} end, {ok, #{ type => bin(Type), name => bin(Name), raw_config => RawConf, - resource_data => InstanceData, + resource_data => ConnectorData, status => DisplayBridgeV2Status, error => ErrorMsg }} diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_v2_SUITE.erl index 2766088a1..791997fc3 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_SUITE.erl @@ -20,6 +20,7 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). -import(emqx_common_test_helpers, [on_exit/1]). @@ -43,7 +44,7 @@ con_schema() -> { con_type(), hoconsc:mk( - hoconsc:map(name, typerefl:map()), + hoconsc:map(name, hoconsc:ref(?MODULE, connector_config)), #{ desc => <<"Test Connector Config">>, required => false @@ -52,6 +53,15 @@ con_schema() -> } ]. +fields(connector_config) -> + [ + {enable, hoconsc:mk(typerefl:boolean(), #{})}, + {resource_opts, hoconsc:mk(typerefl:map(), #{})}, + {on_start_fun, hoconsc:mk(typerefl:binary(), #{})}, + {on_get_status_fun, hoconsc:mk(typerefl:binary(), #{})}, + {on_add_channel_fun, hoconsc:mk(typerefl:binary(), #{})} + ]. + con_config() -> #{ <<"enable">> => true, @@ -112,6 +122,7 @@ setup_mocks() -> catch meck:new(emqx_connector_schema, MeckOpts), meck:expect(emqx_connector_schema, fields, 1, con_schema()), + meck:expect(emqx_connector_schema, connector_type_to_bridge_types, 1, [con_type()]), catch meck:new(emqx_connector_resource, MeckOpts), meck:expect(emqx_connector_resource, connector_to_resource_type, 1, con_mod()), @@ -159,15 +170,7 @@ init_per_testcase(_TestCase, Config) -> ets:new(fun_table_name(), [named_table, public]), %% Create a fake connector {ok, _} = emqx_connector:create(con_type(), con_name(), con_config()), - [ - {mocked_mods, [ - emqx_connector_schema, - emqx_connector_resource, - - emqx_bridge_v2 - ]} - | Config - ]. + Config. end_per_testcase(_TestCase, _Config) -> ets:delete(fun_table_name()), @@ -846,6 +849,51 @@ t_start_operation_when_on_add_channel_gives_error(_Config) -> ), ok. +t_lookup_status_when_connecting(_Config) -> + ResponseETS = ets:new(response_ets, [public]), + ets:insert(ResponseETS, {on_get_status_value, ?status_connecting}), + OnGetStatusFun = wrap_fun(fun() -> + ets:lookup_element(ResponseETS, on_get_status_value, 2) + end), + + ConnectorConfig = emqx_utils_maps:deep_merge(con_config(), #{ + <<"on_get_status_fun">> => OnGetStatusFun, + <<"resource_opts">> => #{<<"start_timeout">> => 100} + }), + ConnectorName = ?FUNCTION_NAME, + ct:pal("connector config:\n ~p", [ConnectorConfig]), + {ok, _} = emqx_connector:create(con_type(), ConnectorName, ConnectorConfig), + + ActionName = my_test_action, + ChanStatusFun = wrap_fun(fun() -> ?status_disconnected end), + ActionConfig = (bridge_config())#{ + <<"on_get_channel_status_fun">> => ChanStatusFun, + <<"connector">> => atom_to_binary(ConnectorName) + }, + ct:pal("action config:\n ~p", [ActionConfig]), + {ok, _} = emqx_bridge_v2:create(bridge_type(), ActionName, ActionConfig), + + %% Top-level status is connecting if the connector status is connecting, but the + %% channel is not yet installed. `resource_data.added_channels.$channel_id.status' + %% contains true internal status. + {ok, Res} = emqx_bridge_v2:lookup(bridge_type(), ActionName), + ?assertMatch( + #{ + %% This is the action's public status + status := ?status_connecting, + resource_data := + #{ + %% This is the connector's status + status := ?status_connecting + } + }, + Res + ), + #{resource_data := #{added_channels := Channels}} = Res, + [{_Id, ChannelData}] = maps:to_list(Channels), + ?assertMatch(#{status := ?status_disconnected}, ChannelData), + ok. + %% Helper Functions wait_until(Fun) -> diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl index 059f9ac9f..b99a462b4 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl @@ -587,7 +587,7 @@ t_broken_bridge_config(Config) -> <<"type">> := ?BRIDGE_TYPE, <<"connector">> := <<"does_not_exist">>, <<"status">> := <<"disconnected">>, - <<"error">> := <<"Pending installation">> + <<"error">> := <<"Not installed">> } ]}, request_json(get, uri([?ROOT]), Config) @@ -640,7 +640,7 @@ t_fix_broken_bridge_config(Config) -> <<"type">> := ?BRIDGE_TYPE, <<"connector">> := <<"does_not_exist">>, <<"status">> := <<"disconnected">>, - <<"error">> := <<"Pending installation">> + <<"error">> := <<"Not installed">> } ]}, request_json(get, uri([?ROOT]), Config) diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_test_connector.erl b/apps/emqx_bridge/test/emqx_bridge_v2_test_connector.erl index 0138832a0..3c5204ea1 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_test_connector.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_test_connector.erl @@ -43,8 +43,8 @@ on_start( ) -> Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef), Fun(Conf); -on_start(_InstId, _Config) -> - {ok, #{}}. +on_start(_InstId, Config) -> + {ok, Config}. on_add_channel( _InstId, diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl index 4422d8dd5..84401aaa6 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl @@ -481,11 +481,11 @@ on_get_status( case wolff_client_sup:find_client(ClientId) of {ok, Pid} -> case wolff_client:check_connectivity(Pid) of - ok -> connected; - {error, Error} -> {connecting, State, Error} + ok -> ?status_connected; + {error, Error} -> {?status_connecting, State, Error} end; {error, _Reason} -> - connecting + ?status_connecting end. on_get_channel_status( @@ -499,10 +499,10 @@ on_get_channel_status( #{kafka_topic := KafkaTopic} = maps:get(ChannelId, Channels), try ok = check_topic_and_leader_connections(ClientId, KafkaTopic), - connected + ?status_connected catch throw:#{reason := restarting} -> - conneting + ?status_connecting end. check_topic_and_leader_connections(ClientId, KafkaTopic) -> diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl index fba72a1d7..6c48146cd 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl @@ -23,6 +23,8 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("brod/include/brod.hrl"). +-import(emqx_common_test_helpers, [on_exit/1]). + -define(TYPE, kafka_producer). %%------------------------------------------------------------------------------ @@ -55,6 +57,13 @@ end_per_suite(Config) -> emqx_cth_suite:stop(Apps), ok. +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(_TestCase, _Config) -> + emqx_common_test_helpers:call_janitor(60_000), + ok. + %%------------------------------------------------------------------------------------- %% Helper fns %%------------------------------------------------------------------------------------- @@ -163,6 +172,16 @@ kafka_hosts_string() -> KafkaPort = os:getenv("KAFKA_PLAIN_PORT", "9092"), KafkaHost ++ ":" ++ KafkaPort. +create_connector(Name, Config) -> + Res = emqx_connector:create(?TYPE, Name, Config), + on_exit(fun() -> emqx_connector:remove(?TYPE, Name) end), + Res. + +create_action(Name, Config) -> + Res = emqx_bridge_v2:create(?TYPE, Name, Config), + on_exit(fun() -> emqx_bridge_v2:remove(?TYPE, Name) end), + Res. + %%------------------------------------------------------------------------------ %% Testcases %%------------------------------------------------------------------------------ @@ -302,3 +321,24 @@ t_unknown_topic(_Config) -> emqx_bridge_v2_testlib:get_bridge_api(?TYPE, BridgeName) ), ok. + +t_bad_url(_Config) -> + ConnectorName = <<"test_connector">>, + ActionName = <<"test_action">>, + ActionConfig = bridge_v2_config(<<"test_connector">>), + ConnectorConfig0 = connector_config(), + ConnectorConfig = ConnectorConfig0#{<<"bootstrap_hosts">> := <<"bad_host:9092">>}, + ?assertMatch({ok, _}, create_connector(ConnectorName, ConnectorConfig)), + ?assertMatch({ok, _}, create_action(ActionName, ActionConfig)), + ?assertMatch( + {ok, #{ + resource_data := + #{ + status := connecting, + error := [#{reason := unresolvable_hostname}] + } + }}, + emqx_connector:lookup(?TYPE, ConnectorName) + ), + ?assertMatch({ok, #{status := connecting}}, emqx_bridge_v2:lookup(?TYPE, ActionName)), + ok. diff --git a/apps/emqx_resource/include/emqx_resource.hrl b/apps/emqx_resource/include/emqx_resource.hrl index fa86e68c9..b34da9a63 100644 --- a/apps/emqx_resource/include/emqx_resource.hrl +++ b/apps/emqx_resource/include/emqx_resource.hrl @@ -13,6 +13,16 @@ %% See the License for the specific language governing permissions and %% limitations under the License. %%-------------------------------------------------------------------- + +%% bridge/connector/action status +-define(status_connected, connected). +-define(status_connecting, connecting). +-define(status_disconnected, disconnected). +%% Note: the `stopped' status can only be emitted by `emqx_resource_manager'... Modules +%% implementing `emqx_resource' behavior should not return it. The `rm_' prefix is to +%% remind us of that. +-define(rm_status_stopped, stopped). + -type resource_type() :: module(). -type resource_id() :: binary(). -type channel_id() :: binary(). @@ -21,8 +31,12 @@ -type resource_config() :: term(). -type resource_spec() :: map(). -type resource_state() :: term(). --type resource_status() :: connected | disconnected | connecting | stopped. --type channel_status() :: connected | connecting | disconnected. +%% Note: the `stopped' status can only be emitted by `emqx_resource_manager'... Modules +%% implementing `emqx_resource' behavior should not return it. +-type resource_status() :: + ?status_connected | ?status_disconnected | ?status_connecting | ?rm_status_stopped. +-type health_check_status() :: ?status_connected | ?status_disconnected | ?status_connecting. +-type channel_status() :: ?status_connected | ?status_connecting | ?status_disconnected. -type callback_mode() :: always_sync | async_if_possible. -type query_mode() :: simple_sync From 58437cd35ac8f4eae945ea2e54bee44088e23a59 Mon Sep 17 00:00:00 2001 From: Ilya Averyanov Date: Fri, 17 Nov 2023 12:34:36 +0300 Subject: [PATCH 008/101] fix(mongodb): fix deadlock while stopping mongodb resource --- apps/emqx_mongodb/rebar.config | 2 +- changes/ce/fix-11955.en.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changes/ce/fix-11955.en.md diff --git a/apps/emqx_mongodb/rebar.config b/apps/emqx_mongodb/rebar.config index 577dee8b8..5be42ef17 100644 --- a/apps/emqx_mongodb/rebar.config +++ b/apps/emqx_mongodb/rebar.config @@ -3,5 +3,5 @@ {erl_opts, [debug_info]}. {deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}} , {emqx_resource, {path, "../../apps/emqx_resource"}} - , {mongodb, {git, "https://github.com/emqx/mongodb-erlang", {tag, "v3.0.21"}}} + , {mongodb, {git, "https://github.com/emqx/mongodb-erlang", {tag, "v3.0.22"}}} ]}. diff --git a/changes/ce/fix-11955.en.md b/changes/ce/fix-11955.en.md new file mode 100644 index 000000000..aae3f0602 --- /dev/null +++ b/changes/ce/fix-11955.en.md @@ -0,0 +1 @@ +Fix EMQX graceful stop when there is an unavailable MongoDB resource present. From 9feba802e9add26a0f0d7542dc3eee9d5f7a7531 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 17 Nov 2023 17:53:30 +0100 Subject: [PATCH 009/101] chore: add convenience function for creating action schemas --- .../src/schema/emqx_bridge_v2_schema.erl | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl index ede783e97..9016ea97c 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl @@ -40,6 +40,8 @@ -export([types/0, types_sc/0]). +-export([make_action_schema/1]). + -export_type([action_type/0]). %% Should we explicitly list them here so dialyzer may be more helpful? @@ -116,7 +118,9 @@ roots() -> end. fields(actions) -> - registered_schema_fields(). + registered_schema_fields(); +fields(resource_opts) -> + emqx_resource_schema:create_opts(_Overrides = []). registered_schema_fields() -> [ @@ -150,6 +154,24 @@ examples(Method) -> SchemaModules = [Mod || {_, Mod} <- emqx_action_info:registered_schema_modules()], lists:foldl(Fun, #{}, SchemaModules). +%%====================================================================================== +%% Helper functions for making HOCON Schema +%%====================================================================================== + +make_action_schema(ActionParametersRef) -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {connector, + mk(binary(), #{ + desc => ?DESC(emqx_connector_schema, "connector_field"), required => true + })}, + {description, emqx_schema:description_schema()}, + {local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})}, + {parameters, ActionParametersRef}, + {resource_opts, + mk(ref(?MODULE, resource_opts), #{default => #{}, desc => ?DESC(resource_opts)})} + ]. + -ifdef(TEST). -include_lib("hocon/include/hocon_types.hrl"). schema_homogeneous_test() -> From 86c126ffcd2c21365f198aee43ca3aaf72810d76 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 17 Nov 2023 17:54:55 +0100 Subject: [PATCH 010/101] feat: callbacks for fixup after automatic Bridge V1 upgrade/downgrade This commit adds callbacks to the emqx_action_info module for doing fixes (such as changing a field name) after the automatic split of a Bridge V1 config or the merge of connector and action configs for the compatibility layer. --- apps/emqx_bridge/src/emqx_action_info.erl | 44 +++++++++++++++++-- apps/emqx_bridge/src/emqx_bridge_api.erl | 18 +------- apps/emqx_bridge/src/emqx_bridge_v2.erl | 3 +- ...mqx_bridge_azure_event_hub_action_info.erl | 20 ++++++++- .../src/emqx_bridge_kafka.erl | 2 +- .../src/emqx_bridge_kafka_action_info.erl | 20 ++++++++- .../src/schema/emqx_connector_schema.erl | 12 ++++- 7 files changed, 94 insertions(+), 25 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index e1932af44..f47aa8af8 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -25,15 +25,25 @@ action_type_to_bridge_v1_type/1, bridge_v1_type_to_action_type/1, is_action_type/1, - registered_schema_modules/0 + registered_schema_modules/0, + action_to_bridge_v1_fixup/2, + bridge_v1_to_action_fixup/2 ]). -callback bridge_v1_type_name() -> atom(). -callback action_type_name() -> atom(). -callback connector_type_name() -> atom(). -callback schema_module() -> atom(). +%% Define this if the automatic config downgrade is not enough for the bridge. +-callback action_to_bridge_v1_fixup(map()) -> term(). +%% Define this if the automatic config upgrade is not enough for the bridge. +-callback bridge_v1_to_action_fixup(map()) -> term(). --optional_callbacks([bridge_v1_type_name/0]). +-optional_callbacks([ + bridge_v1_type_name/0, + action_to_bridge_v1_fixup/1, + bridge_v1_to_action_fixup/1 +]). %% ==================================================================== %% Hadcoded list of info modules for actions @@ -111,10 +121,33 @@ registered_schema_modules() -> Schemas = maps:get(action_type_to_schema_module, InfoMap), maps:to_list(Schemas). +action_to_bridge_v1_fixup(ActionOrBridgeType, Config) -> + Module = get_action_info_module(ActionOrBridgeType), + case erlang:function_exported(Module, action_to_bridge_v1_fixup, 1) of + true -> + Module:action_to_bridge_v1_fixup(Config); + false -> + Config + end. + +bridge_v1_to_action_fixup(ActionOrBridgeType, Config) -> + Module = get_action_info_module(ActionOrBridgeType), + case erlang:function_exported(Module, bridge_v1_to_action_fixup, 1) of + true -> + Module:bridge_v1_to_action_fixup(Config); + false -> + Config + end. + %% ==================================================================== %% Internal functions for building the info map and accessing it %% ==================================================================== +get_action_info_module(ActionOrBridgeType) -> + InfoMap = info_map(), + ActionInfoModuleMap = maps:get(action_type_to_info_module, InfoMap), + maps:get(ActionOrBridgeType, ActionInfoModuleMap). + internal_emqx_action_persistent_term_info_key() -> ?FUNCTION_NAME. @@ -162,7 +195,8 @@ initial_info_map() -> bridge_v1_type_to_action_type => #{}, action_type_to_bridge_v1_type => #{}, action_type_to_connector_type => #{}, - action_type_to_schema_module => #{} + action_type_to_schema_module => #{}, + action_type_to_info_module => #{} }. get_info_map(Module) -> @@ -196,5 +230,9 @@ get_info_map(Module) -> }, action_type_to_schema_module => #{ ActionType => Module:schema_module() + }, + action_type_to_info_module => #{ + ActionType => Module, + BridgeV1Type => Module } }. diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index d263817bf..188f26ab5 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -900,7 +900,7 @@ format_resource( case emqx_bridge_v2:is_bridge_v2_type(Type) of true -> %% The defaults are already filled in - downgrade_raw_conf(Type, RawConf); + RawConf; false -> fill_defaults(Type, RawConf) end, @@ -1164,19 +1164,3 @@ upgrade_type(Type) -> downgrade_type(Type) -> emqx_bridge_lib:downgrade_type(Type). - -%% TODO: move it to callback -downgrade_raw_conf(kafka_producer, RawConf) -> - rename(<<"parameters">>, <<"kafka">>, RawConf); -downgrade_raw_conf(azure_event_hub_producer, RawConf) -> - rename(<<"parameters">>, <<"kafka">>, RawConf); -downgrade_raw_conf(_Type, RawConf) -> - RawConf. - -rename(OldKey, NewKey, Map) -> - case maps:find(OldKey, Map) of - {ok, Value} -> - maps:remove(OldKey, maps:put(NewKey, Value, Map)); - error -> - Map - end. diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index 70e248e56..f8939df8c 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -1104,7 +1104,8 @@ bridge_v1_lookup_and_transform_helper( ), BridgeV1Config1 = maps:remove(<<"connector">>, BridgeV2RawConfig2), BridgeV1Config2 = maps:merge(BridgeV1Config1, ConnectorRawConfig2), - BridgeV1Tmp = maps:put(raw_config, BridgeV1Config2, BridgeV2), + BridgeV1Config3 = emqx_action_info:action_to_bridge_v1_fixup(BridgeV2Type, BridgeV1Config2), + BridgeV1Tmp = maps:put(raw_config, BridgeV1Config3, BridgeV2), BridgeV1 = maps:remove(status, BridgeV1Tmp), BridgeV2Status = maps:get(status, BridgeV2, undefined), BridgeV2Error = maps:get(error, BridgeV2, undefined), diff --git a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl index 8ebdb2435..b18adcada 100644 --- a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl +++ b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl @@ -10,7 +10,9 @@ bridge_v1_type_name/0, action_type_name/0, connector_type_name/0, - schema_module/0 + schema_module/0, + action_to_bridge_v1_fixup/1, + bridge_v1_to_action_fixup/1 ]). bridge_v1_type_name() -> azure_event_hub_producer. @@ -20,3 +22,19 @@ action_type_name() -> azure_event_hub_producer. connector_type_name() -> azure_event_hub_producer. schema_module() -> emqx_bridge_azure_event_hub. + +action_to_bridge_v1_fixup(Config) -> + rename(<<"parameters">>, <<"kafka">>, Config). + +rename(OldKey, NewKey, Map) -> + case maps:find(OldKey, Map) of + {ok, Value} -> + maps:remove(OldKey, maps:put(NewKey, Value, Map)); + error -> + Map + end. + +bridge_v1_to_action_fixup(Config) -> + KafkaField = emqx_utils_maps:deep_get([<<"parameters">>, <<"kafka">>], Config, #{}), + Config1 = emqx_utils_maps:deep_remove([<<"parameters">>, <<"kafka">>], Config), + emqx_utils_maps:deep_merge(Config1, #{<<"parameters">> => KafkaField}). diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl index d193738bb..9709bb174 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl @@ -610,7 +610,7 @@ producer_opts() -> ]. %% Since e5.3.1, we want to rename the field 'kafka' to 'parameters' -%% Hoever we need to keep it backward compatible for generated schema json (version 0.1.0) +%% However we need to keep it backward compatible for generated schema json (version 0.1.0) %% since schema is data for the 'schemas' API. parameters_field() -> {Name, Alias} = diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl index 50d4f0c63..d0b14cf2c 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl @@ -10,7 +10,9 @@ bridge_v1_type_name/0, action_type_name/0, connector_type_name/0, - schema_module/0 + schema_module/0, + action_to_bridge_v1_fixup/1, + bridge_v1_to_action_fixup/1 ]). bridge_v1_type_name() -> kafka. @@ -20,3 +22,19 @@ action_type_name() -> kafka_producer. connector_type_name() -> kafka_producer. schema_module() -> emqx_bridge_kafka. + +action_to_bridge_v1_fixup(Config) -> + rename(<<"parameters">>, <<"kafka">>, Config). + +rename(OldKey, NewKey, Map) -> + case maps:find(OldKey, Map) of + {ok, Value} -> + maps:remove(OldKey, maps:put(NewKey, Value, Map)); + error -> + Map + end. + +bridge_v1_to_action_fixup(Config) -> + KafkaField = emqx_utils_maps:deep_get([<<"parameters">>, <<"kafka">>], Config, #{}), + Config1 = emqx_utils_maps:deep_remove([<<"parameters">>, <<"kafka">>], Config), + emqx_utils_maps:deep_merge(Config1, #{<<"parameters">> => KafkaField}). diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index e4308ac54..082e190ee 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -203,11 +203,21 @@ transform_old_style_bridges_to_connector_and_actions_of_type( [<<"bridges">>, to_bin(BridgeType), BridgeName], RawConfigSoFar1 ), + %% Take fields that should be in the top level of the action map + TopLevelFields = [<<"resource_opts">>, <<"enable">>, <<"connector">>], + TopLevelActionFields = maps:with(TopLevelFields, ActionMap), + ParametersActionFields = maps:without(TopLevelFields, ActionMap), + %% Action map should be wrapped under parameters key + WrappedParameters = #{<<"parameters">> => ParametersActionFields}, + FinalActionMap = maps:merge(TopLevelActionFields, WrappedParameters), + FixedActionMap = emqx_action_info:bridge_v1_to_action_fixup( + BridgeType, FinalActionMap + ), %% Add action RawConfigSoFar3 = emqx_utils_maps:deep_put( [actions_config_name(), to_bin(maybe_rename(BridgeType)), BridgeName], RawConfigSoFar2, - ActionMap + FixedActionMap ), RawConfigSoFar3 end, From eb3f54184ed8169d3368a4b135c02ce18d3d4f52 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Fri, 17 Nov 2023 16:02:16 -0300 Subject: [PATCH 011/101] refactor: address review comments and avoid transformations without schema knowledge --- apps/emqx_bridge/src/emqx_action_info.erl | 35 +++++++++++++--- .../src/schema/emqx_bridge_v2_schema.erl | 11 +++-- ...qx_bridge_v1_compatibility_layer_SUITE.erl | 10 +---- ...mqx_bridge_azure_event_hub_action_info.erl | 14 +------ .../src/emqx_bridge_kafka_action_info.erl | 29 ++++++++------ .../src/schema/emqx_connector_schema.erl | 40 ++++--------------- apps/emqx_utils/src/emqx_utils_maps.erl | 11 ++++- 7 files changed, 74 insertions(+), 76 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index f47aa8af8..b5d88c4d8 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -35,9 +35,9 @@ -callback connector_type_name() -> atom(). -callback schema_module() -> atom(). %% Define this if the automatic config downgrade is not enough for the bridge. --callback action_to_bridge_v1_fixup(map()) -> term(). +-callback action_to_bridge_v1_fixup(map()) -> map(). %% Define this if the automatic config upgrade is not enough for the bridge. --callback bridge_v1_to_action_fixup(map()) -> term(). +-callback bridge_v1_to_action_fixup(map()) -> map(). -optional_callbacks([ bridge_v1_type_name/0, @@ -130,15 +130,38 @@ action_to_bridge_v1_fixup(ActionOrBridgeType, Config) -> Config end. -bridge_v1_to_action_fixup(ActionOrBridgeType, Config) -> +bridge_v1_to_action_fixup(ActionOrBridgeType, Config0) -> Module = get_action_info_module(ActionOrBridgeType), case erlang:function_exported(Module, bridge_v1_to_action_fixup, 1) of true -> - Module:bridge_v1_to_action_fixup(Config); + Config1 = Module:bridge_v1_to_action_fixup(Config0), + common_bridge_v1_to_action_adapter(Config1); false -> - Config + common_bridge_v1_to_action_adapter(Config0) end. +%% ==================================================================== +%% Helper fns +%% ==================================================================== + +common_bridge_v1_to_action_adapter(RawConfig) -> + TopKeys = [ + <<"enable">>, + <<"connector">>, + <<"local_topic">>, + <<"resource_opts">>, + <<"description">>, + <<"parameters">> + ], + TopMap = maps:with(TopKeys, RawConfig), + RestMap = maps:without(TopKeys, RawConfig), + %% Other parameters should be stuffed into `parameters' + emqx_utils_maps:update_if_present( + <<"parameters">>, + fun(Old) -> emqx_utils_maps:deep_merge(Old, RestMap) end, + TopMap + ). + %% ==================================================================== %% Internal functions for building the info map and accessing it %% ==================================================================== @@ -146,7 +169,7 @@ bridge_v1_to_action_fixup(ActionOrBridgeType, Config) -> get_action_info_module(ActionOrBridgeType) -> InfoMap = info_map(), ActionInfoModuleMap = maps:get(action_type_to_info_module, InfoMap), - maps:get(ActionOrBridgeType, ActionInfoModuleMap). + maps:get(ActionOrBridgeType, ActionInfoModuleMap, undefined). internal_emqx_action_persistent_term_info_key() -> ?FUNCTION_NAME. diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl index 9016ea97c..d2fa85f92 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl @@ -40,7 +40,7 @@ -export([types/0, types_sc/0]). --export([make_action_schema/1]). +-export([make_producer_action_schema/1, make_consumer_action_schema/1]). -export_type([action_type/0]). @@ -158,7 +158,13 @@ examples(Method) -> %% Helper functions for making HOCON Schema %%====================================================================================== -make_action_schema(ActionParametersRef) -> +make_producer_action_schema(ActionParametersRef) -> + [ + {local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})} + | make_consumer_action_schema(ActionParametersRef) + ]. + +make_consumer_action_schema(ActionParametersRef) -> [ {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, {connector, @@ -166,7 +172,6 @@ make_action_schema(ActionParametersRef) -> desc => ?DESC(emqx_connector_schema, "connector_field"), required => true })}, {description, emqx_schema:description_schema()}, - {local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})}, {parameters, ActionParametersRef}, {resource_opts, mk(ref(?MODULE, resource_opts), #{default => #{}, desc => ?DESC(resource_opts)})} diff --git a/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl index f3b7fb685..dbced42f4 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl @@ -60,15 +60,7 @@ init_per_testcase(_TestCase, Config) -> ets:new(fun_table_name(), [named_table, public]), %% Create a fake connector {ok, _} = emqx_connector:create(con_type(), con_name(), con_config()), - [ - {mocked_mods, [ - emqx_connector_schema, - emqx_connector_resource, - - emqx_bridge_v2 - ]} - | Config - ]. + Config. end_per_testcase(_TestCase, _Config) -> ets:delete(fun_table_name()), diff --git a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl index b18adcada..fd1f4f4ff 100644 --- a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl +++ b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl @@ -24,17 +24,7 @@ connector_type_name() -> azure_event_hub_producer. schema_module() -> emqx_bridge_azure_event_hub. action_to_bridge_v1_fixup(Config) -> - rename(<<"parameters">>, <<"kafka">>, Config). - -rename(OldKey, NewKey, Map) -> - case maps:find(OldKey, Map) of - {ok, Value} -> - maps:remove(OldKey, maps:put(NewKey, Value, Map)); - error -> - Map - end. + emqx_bridge_kafka_action_info:action_to_bridge_v1_fixup(Config). bridge_v1_to_action_fixup(Config) -> - KafkaField = emqx_utils_maps:deep_get([<<"parameters">>, <<"kafka">>], Config, #{}), - Config1 = emqx_utils_maps:deep_remove([<<"parameters">>, <<"kafka">>], Config), - emqx_utils_maps:deep_merge(Config1, #{<<"parameters">> => KafkaField}). + emqx_bridge_kafka_action_info:bridge_v1_to_action_fixup(Config). diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl index d0b14cf2c..66ea2bbd7 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl @@ -24,17 +24,22 @@ connector_type_name() -> kafka_producer. schema_module() -> emqx_bridge_kafka. action_to_bridge_v1_fixup(Config) -> - rename(<<"parameters">>, <<"kafka">>, Config). + emqx_utils_maps:rename(<<"parameters">>, <<"kafka">>, Config). -rename(OldKey, NewKey, Map) -> - case maps:find(OldKey, Map) of - {ok, Value} -> - maps:remove(OldKey, maps:put(NewKey, Value, Map)); - error -> - Map - end. +bridge_v1_to_action_fixup(Config0) -> + Config = emqx_utils_maps:rename(<<"kafka">>, <<"parameters">>, Config0), + maps:with(producer_action_field_keys(), Config). -bridge_v1_to_action_fixup(Config) -> - KafkaField = emqx_utils_maps:deep_get([<<"parameters">>, <<"kafka">>], Config, #{}), - Config1 = emqx_utils_maps:deep_remove([<<"parameters">>, <<"kafka">>], Config), - emqx_utils_maps:deep_merge(Config1, #{<<"parameters">> => KafkaField}). +%%------------------------------------------------------------------------------------------ +%% Internal helper fns +%%------------------------------------------------------------------------------------------ + +producer_action_field_keys() -> + [ + to_bin(K) + || {K, _} <- emqx_bridge_kafka:fields(kafka_producer_action) + ]. + +to_bin(B) when is_binary(B) -> B; +to_bin(L) when is_list(L) -> list_to_binary(L); +to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8). diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index 082e190ee..f2b764fdc 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -98,16 +98,16 @@ bridge_configs_to_transform( end. split_bridge_to_connector_and_action( - {ConnectorsMap, {BridgeType, BridgeName, BridgeConf, ConnectorFields, PreviousRawConfig}} + {ConnectorsMap, {BridgeType, BridgeName, ActionConf, ConnectorFields, PreviousRawConfig}} ) -> %% Get connector fields from bridge config ConnectorMap = lists:foldl( fun({ConnectorFieldName, _Spec}, ToTransformSoFar) -> - case maps:is_key(to_bin(ConnectorFieldName), BridgeConf) of + case maps:is_key(to_bin(ConnectorFieldName), ActionConf) of true -> NewToTransform = maps:put( to_bin(ConnectorFieldName), - maps:get(to_bin(ConnectorFieldName), BridgeConf), + maps:get(to_bin(ConnectorFieldName), ActionConf), ToTransformSoFar ), NewToTransform; @@ -118,23 +118,6 @@ split_bridge_to_connector_and_action( #{}, ConnectorFields ), - %% Remove connector fields from bridge config to create Action - ActionMap0 = lists:foldl( - fun - ({enable, _Spec}, ToTransformSoFar) -> - %% Enable filed is used in both - ToTransformSoFar; - ({ConnectorFieldName, _Spec}, ToTransformSoFar) -> - case maps:is_key(to_bin(ConnectorFieldName), BridgeConf) of - true -> - maps:remove(to_bin(ConnectorFieldName), ToTransformSoFar); - false -> - ToTransformSoFar - end - end, - BridgeConf, - ConnectorFields - ), %% Generate a connector name, if needed. Avoid doing so if there was a previous config. ConnectorName = case PreviousRawConfig of @@ -142,7 +125,7 @@ split_bridge_to_connector_and_action( _ -> generate_connector_name(ConnectorsMap, BridgeName, 0) end, %% Add connector field to action map - ActionMap = maps:put(<<"connector">>, ConnectorName, ActionMap0), + ActionMap = maps:put(<<"connector">>, ConnectorName, ActionConf), {BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}. generate_connector_name(ConnectorsMap, BridgeName, Attempt) -> @@ -191,7 +174,7 @@ transform_old_style_bridges_to_connector_and_actions_of_type( ), %% Add connectors and actions and remove bridges lists:foldl( - fun({BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}, RawConfigSoFar) -> + fun({BridgeType, BridgeName, ActionMap0, ConnectorName, ConnectorMap}, RawConfigSoFar) -> %% Add connector RawConfigSoFar1 = emqx_utils_maps:deep_put( [<<"connectors">>, to_bin(ConnectorType), ConnectorName], @@ -203,21 +186,12 @@ transform_old_style_bridges_to_connector_and_actions_of_type( [<<"bridges">>, to_bin(BridgeType), BridgeName], RawConfigSoFar1 ), - %% Take fields that should be in the top level of the action map - TopLevelFields = [<<"resource_opts">>, <<"enable">>, <<"connector">>], - TopLevelActionFields = maps:with(TopLevelFields, ActionMap), - ParametersActionFields = maps:without(TopLevelFields, ActionMap), - %% Action map should be wrapped under parameters key - WrappedParameters = #{<<"parameters">> => ParametersActionFields}, - FinalActionMap = maps:merge(TopLevelActionFields, WrappedParameters), - FixedActionMap = emqx_action_info:bridge_v1_to_action_fixup( - BridgeType, FinalActionMap - ), + ActionMap = emqx_action_info:bridge_v1_to_action_fixup(BridgeType, ActionMap0), %% Add action RawConfigSoFar3 = emqx_utils_maps:deep_put( [actions_config_name(), to_bin(maybe_rename(BridgeType)), BridgeName], RawConfigSoFar2, - FixedActionMap + ActionMap ), RawConfigSoFar3 end, diff --git a/apps/emqx_utils/src/emqx_utils_maps.erl b/apps/emqx_utils/src/emqx_utils_maps.erl index 3945b7201..a3b6961f0 100644 --- a/apps/emqx_utils/src/emqx_utils_maps.erl +++ b/apps/emqx_utils/src/emqx_utils_maps.erl @@ -34,7 +34,8 @@ best_effort_recursive_sum/3, if_only_to_toggle_enable/2, update_if_present/3, - put_if/4 + put_if/4, + rename/3 ]). -export_type([config_key/0, config_key_path/0]). @@ -309,3 +310,11 @@ put_if(Acc, K, V, true) -> Acc#{K => V}; put_if(Acc, _K, _V, false) -> Acc. + +rename(OldKey, NewKey, Map) -> + case maps:find(OldKey, Map) of + {ok, Value} -> + maps:put(NewKey, Value, maps:remove(OldKey, Map)); + error -> + Map + end. From e93e9ed1081737874b8ec899187341261184ba88 Mon Sep 17 00:00:00 2001 From: Ilya Averyanov Date: Fri, 17 Nov 2023 12:32:16 +0300 Subject: [PATCH 012/101] feat(rebalance): improve rebalance usability * make availability API endpoint public * allow connections during wait_health_check interval * make availability status calculation more consistent and lightweight * refactor test to get rid of some mocks and to use cth --- apps/emqx/priv/bpapi.versions | 1 + .../src/emqx_eviction_agent.app.src | 2 +- .../src/emqx_eviction_agent.erl | 64 +++++-- .../test/emqx_eviction_agent_SUITE.erl | 72 ++++++- .../test/emqx_eviction_agent_api_SUITE.erl | 19 +- .../emqx_eviction_agent_channel_SUITE.erl | 18 +- .../test/emqx_eviction_agent_cli_SUITE.erl | 16 +- .../test/emqx_eviction_agent_test_helpers.erl | 80 ++++---- .../src/emqx_node_rebalance.app.src | 5 +- .../src/emqx_node_rebalance.erl | 16 +- .../src/emqx_node_rebalance_agent.erl | 175 +++++++++++------- .../src/emqx_node_rebalance_api.erl | 9 +- .../src/emqx_node_rebalance_evacuation.erl | 49 +++-- .../src/emqx_node_rebalance_purge.erl | 2 +- .../src/emqx_node_rebalance_status.erl | 8 + .../proto/emqx_node_rebalance_proto_v3.erl | 96 ++++++++++ .../test/emqx_node_rebalance_SUITE.erl | 79 ++++++-- .../test/emqx_node_rebalance_agent_SUITE.erl | 38 ++-- .../test/emqx_node_rebalance_api_SUITE.erl | 30 +-- .../test/emqx_node_rebalance_cli_SUITE.erl | 114 ++++++------ .../emqx_node_rebalance_evacuation_SUITE.erl | 53 +++--- .../test/emqx_node_rebalance_purge_SUITE.erl | 36 ++-- .../test/emqx_node_rebalance_status_SUITE.erl | 1 + changes/ee/feat-11971.en.md | 4 + 24 files changed, 657 insertions(+), 330 deletions(-) create mode 100644 apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_proto_v3.erl create mode 100644 changes/ee/feat-11971.en.md diff --git a/apps/emqx/priv/bpapi.versions b/apps/emqx/priv/bpapi.versions index 7042f5186..ea4ce159d 100644 --- a/apps/emqx/priv/bpapi.versions +++ b/apps/emqx/priv/bpapi.versions @@ -43,6 +43,7 @@ {emqx_mgmt_trace,2}. {emqx_node_rebalance,1}. {emqx_node_rebalance,2}. +{emqx_node_rebalance,3}. {emqx_node_rebalance_api,1}. {emqx_node_rebalance_api,2}. {emqx_node_rebalance_evacuation,1}. diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src b/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src index 4f4cf5722..cc415d495 100644 --- a/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src @@ -1,6 +1,6 @@ {application, emqx_eviction_agent, [ {description, "EMQX Eviction Agent"}, - {vsn, "5.1.4"}, + {vsn, "5.1.5"}, {registered, [ emqx_eviction_agent_sup, emqx_eviction_agent, diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent.erl b/apps/emqx_eviction_agent/src/emqx_eviction_agent.erl index 42cffcb3d..9f1352b7c 100644 --- a/apps/emqx_eviction_agent/src/emqx_eviction_agent.erl +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent.erl @@ -15,8 +15,11 @@ -export([ start_link/0, enable/2, + enable/3, + default_options/0, disable/1, status/0, + enable_status/0, connection_count/0, all_channels_count/0, session_count/0, @@ -51,7 +54,7 @@ unhook/0 ]). --export_type([server_reference/0]). +-export_type([server_reference/0, kind/0, options/0]). -define(CONN_MODULES, [ emqx_connection, emqx_ws_connection, emqx_quic_connection, emqx_eviction_agent_channel @@ -67,15 +70,31 @@ connections := non_neg_integer(), sessions := non_neg_integer() }. --type kind() :: atom(). + +%% kind() is any() because it was not exported previously +%% and bpapi checker remembered it as any() +-type kind() :: any(). +-type options() :: #{ + allow_connections => boolean() +}. -spec start_link() -> startlink_ret(). start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). +-spec default_options() -> options(). +default_options() -> + #{ + allow_connections => false + }. + -spec enable(kind(), server_reference()) -> ok_or_error(eviction_agent_busy). enable(Kind, ServerReference) -> - gen_server:call(?MODULE, {enable, Kind, ServerReference}). + gen_server:call(?MODULE, {enable, Kind, ServerReference, default_options()}). + +-spec enable(kind(), server_reference(), options()) -> ok_or_error(eviction_agent_busy). +enable(Kind, ServerReference, #{} = Options) -> + gen_server:call(?MODULE, {enable, Kind, ServerReference, Options}). -spec disable(kind()) -> ok. disable(Kind) -> @@ -84,16 +103,20 @@ disable(Kind) -> -spec status() -> status(). status() -> case enable_status() of - {enabled, _Kind, _ServerReference} -> + {enabled, _Kind, _ServerReference, _Options} -> {enabled, stats()}; disabled -> disabled end. +-spec enable_status() -> disabled | {enabled, kind(), server_reference(), options()}. +enable_status() -> + persistent_term:get(?MODULE, disabled). + -spec evict_connections(pos_integer()) -> ok_or_error(disabled). evict_connections(N) -> case enable_status() of - {enabled, _Kind, ServerReference} -> + {enabled, _Kind, ServerReference, _Options} -> ok = do_evict_connections(N, ServerReference); disabled -> {error, disabled} @@ -112,15 +135,16 @@ evict_sessions(N, Nodes, ConnState) when is_list(Nodes) andalso length(Nodes) > 0 -> case enable_status() of - {enabled, _Kind, _ServerReference} -> + {enabled, _Kind, _ServerReference, _Options} -> ok = do_evict_sessions(N, Nodes, ConnState); disabled -> {error, disabled} end. +-spec purge_sessions(non_neg_integer()) -> ok_or_error(disabled). purge_sessions(N) -> case enable_status() of - {enabled, _Kind, _ServerReference} -> + {enabled, _Kind, _ServerReference, _Options} -> ok = do_purge_sessions(N); disabled -> {error, disabled} @@ -135,14 +159,14 @@ init([]) -> {ok, #{}}. %% enable -handle_call({enable, Kind, ServerReference}, _From, St) -> +handle_call({enable, Kind, ServerReference, Options}, _From, St) -> Reply = case enable_status() of disabled -> - ok = persistent_term:put(?MODULE, {enabled, Kind, ServerReference}); - {enabled, Kind, _ServerReference} -> - ok = persistent_term:put(?MODULE, {enabled, Kind, ServerReference}); - {enabled, _OtherKind, _ServerReference} -> + ok = persistent_term:put(?MODULE, {enabled, Kind, ServerReference, Options}); + {enabled, Kind, _ServerReference, _Options} -> + ok = persistent_term:put(?MODULE, {enabled, Kind, ServerReference, Options}); + {enabled, _OtherKind, _ServerReference, _Options} -> {error, eviction_agent_busy} end, {reply, Reply, St}; @@ -152,10 +176,10 @@ handle_call({disable, Kind}, _From, St) -> case enable_status() of disabled -> {error, disabled}; - {enabled, Kind, _ServerReference} -> + {enabled, Kind, _ServerReference, _Options} -> _ = persistent_term:erase(?MODULE), ok; - {enabled, _OtherKind, _ServerReference} -> + {enabled, _OtherKind, _ServerReference, _Options} -> {error, eviction_agent_busy} end, {reply, Reply, St}; @@ -180,8 +204,10 @@ code_change(_Vsn, State, _Extra) -> on_connect(_ConnInfo, _Props) -> case enable_status() of - {enabled, _Kind, _ServerReference} -> + {enabled, _Kind, _ServerReference, #{allow_connections := false}} -> {stop, {error, ?RC_USE_ANOTHER_SERVER}}; + {enabled, _Kind, _ServerReference, _Options} -> + ignore; disabled -> ignore end. @@ -192,7 +218,7 @@ on_connack( Props ) -> case enable_status() of - {enabled, _Kind, ServerReference} -> + {enabled, _Kind, ServerReference, _Options} -> {ok, Props#{'Server-Reference' => ServerReference}}; disabled -> {ok, Props} @@ -214,10 +240,10 @@ unhook() -> ok = emqx_hooks:del('client.connect', {?MODULE, on_connect}), ok = emqx_hooks:del('client.connack', {?MODULE, on_connack}). -enable_status() -> - persistent_term:get(?MODULE, disabled). +%%-------------------------------------------------------------------- +%% Internal funcs +%%-------------------------------------------------------------------- -% connection management stats() -> #{ connections => connection_count(), diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_SUITE.erl index bc6f626d2..bf2865a78 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_SUITE.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_SUITE.erl @@ -15,7 +15,11 @@ -import( emqx_eviction_agent_test_helpers, - [emqtt_connect/0, emqtt_connect/1, emqtt_connect/2, emqtt_connect_for_publish/1] + [ + emqtt_connect/0, emqtt_connect/1, emqtt_connect/2, + emqtt_connect_for_publish/1, + case_specific_node_name/1 + ] ). -define(assertPrinted(Printed, Code), @@ -29,11 +33,19 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - emqx_common_test_helpers:start_apps([emqx_eviction_agent]), - Config. + Apps = emqx_cth_suite:start( + [ + emqx, + emqx_eviction_agent + ], + #{ + work_dir => emqx_cth_suite:work_dir(Config) + } + ), + [{apps, Apps} | Config]. -end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([emqx_eviction_agent]). +end_per_suite(Config) -> + ok = emqx_cth_suite:stop(?config(apps, Config)). init_per_testcase(Case, Config) -> _ = emqx_eviction_agent:disable(test_eviction), @@ -41,10 +53,17 @@ init_per_testcase(Case, Config) -> start_slave(Case, Config). start_slave(t_explicit_session_takeover, Config) -> + NodeNames = + [ + t_explicit_session_takeover_donor, + t_explicit_session_takeover_recipient + ], ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( - [{evacuate_test1, 2883}, {evacuate_test2, 3883}], - [emqx_eviction_agent] + Config, + NodeNames, + [emqx_conf, emqx, emqx_eviction_agent] ), + ok = snabbkaffe:start_trace(), [{evacuate_nodes, ClusterNodes} | Config]; start_slave(_Case, Config) -> Config. @@ -56,8 +75,7 @@ end_per_testcase(TestCase, Config) -> stop_slave(t_explicit_session_takeover, Config) -> emqx_eviction_agent_test_helpers:stop_cluster( - ?config(evacuate_nodes, Config), - [emqx_eviction_agent] + ?config(evacuate_nodes, Config) ); stop_slave(_Case, _Config) -> ok. @@ -77,13 +95,16 @@ t_enable_disable(_Config) -> {ok, C0} = emqtt_connect(), ok = emqtt:disconnect(C0), + %% Enable ok = emqx_eviction_agent:enable(test_eviction, undefined), + %% Can't enable with different kind ?assertMatch( {error, eviction_agent_busy}, emqx_eviction_agent:enable(bar, undefined) ), + %% Enable with the same kind but different server ref ?assertMatch( ok, emqx_eviction_agent:enable(test_eviction, <<"srv">>) @@ -99,6 +120,39 @@ t_enable_disable(_Config) -> emqtt_connect() ), + %% Enable with the same kind and server ref and explicit options + ?assertMatch( + ok, + emqx_eviction_agent:enable(test_eviction, <<"srv">>, #{allow_connections => false}) + ), + + ?assertMatch( + {enabled, #{}}, + emqx_eviction_agent:status() + ), + + ?assertMatch( + {error, {use_another_server, #{}}}, + emqtt_connect() + ), + + %% Enable with the same kind and server ref and permissive options + ?assertMatch( + ok, + emqx_eviction_agent:enable(test_eviction, <<"srv">>, #{allow_connections => true}) + ), + + ?assertMatch( + {enabled, #{}}, + emqx_eviction_agent:status() + ), + + ?assertMatch( + {ok, _}, + emqtt_connect() + ), + + %% Can't enable using different kind ?assertMatch( {error, eviction_agent_busy}, emqx_eviction_agent:disable(bar) diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_api_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_api_SUITE.erl index 3fe15e53a..341f543a7 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_api_SUITE.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_api_SUITE.erl @@ -22,12 +22,23 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - emqx_mgmt_api_test_util:init_suite([emqx_eviction_agent]), - Config. + Apps = emqx_cth_suite:start( + [ + emqx, + emqx_eviction_agent, + emqx_management, + {emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"} + ], + #{ + work_dir => emqx_cth_suite:work_dir(Config) + } + ), + _ = emqx_common_test_http:create_default_app(), + [{apps, Apps} | Config]. end_per_suite(Config) -> - emqx_mgmt_api_test_util:end_suite([emqx_eviction_agent]), - Config. + emqx_common_test_http:delete_default_app(), + emqx_cth_suite:stop(?config(apps, Config)). %%-------------------------------------------------------------------- %% Tests diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl index b4d7ceb08..d87429339 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl @@ -22,12 +22,20 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - emqx_common_test_helpers:start_apps([emqx_conf, emqx_eviction_agent]), - {ok, _} = emqx:update_config([rpc, port_discovery], manual), - Config. + Apps = emqx_cth_suite:start( + [ + emqx_conf, + emqx, + emqx_eviction_agent + ], + #{ + work_dir => emqx_cth_suite:work_dir(Config) + } + ), + [{apps, Apps} | Config]. -end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([emqx_eviction_agent, emqx_conf]). +end_per_suite(Config) -> + ok = emqx_cth_suite:stop(?config(apps, Config)). %%-------------------------------------------------------------------- %% Tests diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_cli_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_cli_SUITE.erl index 4cfb2fff5..70abd076f 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_cli_SUITE.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_cli_SUITE.erl @@ -14,13 +14,21 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - emqx_common_test_helpers:start_apps([emqx_eviction_agent]), - Config. + Apps = emqx_cth_suite:start( + [ + emqx, + emqx_eviction_agent + ], + #{ + work_dir => emqx_cth_suite:work_dir(Config) + } + ), + [{apps, Apps} | Config]. end_per_suite(Config) -> _ = emqx_eviction_agent:disable(foo), - emqx_common_test_helpers:stop_apps([emqx_eviction_agent]), - Config. + + emqx_cth_suite:stop(?config(apps, Config)). %%-------------------------------------------------------------------- %% Tests diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_test_helpers.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_test_helpers.erl index b3b3e8767..052f37952 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_test_helpers.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_test_helpers.erl @@ -15,13 +15,15 @@ emqtt_try_connect/1, - start_cluster/2, start_cluster/3, - stop_cluster/2, + stop_cluster/1, case_specific_node_name/2, case_specific_node_name/3, - concat_atoms/1 + concat_atoms/1, + + get_mqtt_port/2, + nodes_with_mqtt_tcp_ports/1 ]). emqtt_connect() -> @@ -83,52 +85,24 @@ emqtt_try_connect(Opts) -> Error end. -start_cluster(NamesWithPorts, Apps) -> - start_cluster(NamesWithPorts, Apps, []). - -start_cluster(NamesWithPorts, Apps, Env) -> - Specs = lists:map( - fun({ShortName, Port}) -> - {core, ShortName, #{listener_ports => [{tcp, Port}]}} - end, - NamesWithPorts +start_cluster(Config, NodeNames = [Node1 | _], Apps) -> + Spec = #{ + role => core, + join_to => emqx_cth_cluster:node_name(Node1), + listeners => true, + apps => Apps + }, + Cluster = [{NodeName, Spec} || NodeName <- NodeNames], + ClusterNodes = emqx_cth_cluster:start( + Cluster, + %% Use Node1 to scope the work dirs for all the nodes + #{work_dir => emqx_cth_suite:work_dir(Node1, Config)} ), - Opts0 = [ - {env, Env}, - {apps, Apps}, - {conf, - [{[listeners, Proto, default, enable], false} || Proto <- [ssl, ws, wss]] ++ - [{[rpc, mode], async}]} - ], - Cluster = emqx_common_test_helpers:emqx_cluster( - Specs, - Opts0 - ), - NodesWithPorts = [ - { - emqx_common_test_helpers:start_slave(Name, Opts), - proplists:get_value(Name, NamesWithPorts) - } - || {Name, Opts} <- Cluster - ], - NodesWithPorts. + nodes_with_mqtt_tcp_ports(ClusterNodes). -stop_cluster(NodesWithPorts, Apps) -> - lists:foreach( - fun({Node, _Port}) -> - lists:foreach( - fun(App) -> - rpc:call(Node, application, stop, [App]) - end, - Apps - ), - %% This sleep is just to make logs cleaner - ct:sleep(100), - _ = rpc:call(Node, emqx_common_test_helpers, stop_apps, []), - emqx_common_test_helpers:stop_slave(Node) - end, - NodesWithPorts - ). +stop_cluster(NamesWithPorts) -> + {Nodes, _Ports} = lists:unzip(NamesWithPorts), + ok = emqx_cth_cluster:stop(Nodes). case_specific_node_name(Module, Case) -> concat_atoms([Module, '__', Case]). @@ -145,3 +119,15 @@ concat_atoms(Atoms) -> ) ) ). + +get_mqtt_port(Node, Type) -> + {_IP, Port} = erpc:call(Node, emqx_config, get, [[listeners, Type, default, bind]]), + Port. + +nodes_with_mqtt_tcp_ports(Nodes) -> + lists:map( + fun(Node) -> + {Node, get_mqtt_port(Node, tcp)} + end, + Nodes + ). diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance.app.src b/apps/emqx_node_rebalance/src/emqx_node_rebalance.app.src index c66ec9f23..beb5f2abb 100644 --- a/apps/emqx_node_rebalance/src/emqx_node_rebalance.app.src +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance.app.src @@ -1,11 +1,12 @@ {application, emqx_node_rebalance, [ {description, "EMQX Node Rebalance"}, - {vsn, "5.0.6"}, + {vsn, "5.0.7"}, {registered, [ emqx_node_rebalance_sup, emqx_node_rebalance, emqx_node_rebalance_agent, - emqx_node_rebalance_evacuation + emqx_node_rebalance_evacuation, + emqx_node_rebalance_purge ]}, {applications, [ kernel, diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance.erl index b2044c5fa..f9f9fc70e 100644 --- a/apps/emqx_node_rebalance/src/emqx_node_rebalance.erl +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance.erl @@ -41,6 +41,8 @@ start_error/0 ]). +-define(ENABLE_KIND, ?MODULE). + %%-------------------------------------------------------------------- %% APIs %%-------------------------------------------------------------------- @@ -143,9 +145,13 @@ handle_event( state_timeout, evict_conns, wait_health_check, - Data + #{donors := DonorNodes} = Data ) -> ?SLOG(warning, #{msg => "node_rebalance_wait_health_check_over"}), + _ = multicall(DonorNodes, enable_rebalance_agent, [ + self(), ?ENABLE_KIND, #{allow_connections => false} + ]), + ?tp(debug, node_rebalance_enable_started_prohibiting, #{}), {next_state, evicting_conns, Data, [{state_timeout, 0, evict_conns}]}; handle_event( state_timeout, @@ -232,7 +238,9 @@ enable_rebalance(#{opts := Opts} = Data) -> false -> {error, nothing_to_balance}; true -> - _ = multicall(DonorNodes, enable_rebalance_agent, [self()]), + _ = multicall(DonorNodes, enable_rebalance_agent, [ + self(), ?ENABLE_KIND, #{allow_connections => true} + ]), {ok, Data#{ donors => DonorNodes, recipients => RecipientNodes, @@ -242,7 +250,7 @@ enable_rebalance(#{opts := Opts} = Data) -> end. disable_rebalance(#{donors := DonorNodes}) -> - _ = multicall(DonorNodes, disable_rebalance_agent, [self()]), + _ = multicall(DonorNodes, disable_rebalance_agent, [self(), ?ENABLE_KIND]), ok. evict_conns(#{donors := DonorNodes, recipients := RecipientNodes, opts := Opts} = Data) -> @@ -370,7 +378,7 @@ avg(List) when length(List) >= 1 -> lists:sum(List) / length(List). multicall(Nodes, F, A) -> - case apply(emqx_node_rebalance_proto_v2, F, [Nodes | A]) of + case apply(emqx_node_rebalance_proto_v3, F, [Nodes | A]) of {Results, []} -> case lists:partition(fun is_ok/1, lists:zip(Nodes, Results)) of {OkResults, []} -> diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_agent.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_agent.erl index 250d03d9c..088d27b6b 100644 --- a/apps/emqx_node_rebalance/src/emqx_node_rebalance_agent.erl +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_agent.erl @@ -11,10 +11,13 @@ -include_lib("stdlib/include/qlc.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-behaviour(gen_statem). + -export([ start_link/0, enable/1, enable/2, + enable/3, disable/1, disable/2, status/0 @@ -22,13 +25,13 @@ -export([ init/1, - handle_call/3, - handle_info/2, - handle_cast/2, - code_change/3 + callback_mode/0, + handle_event/4, + code_change/4 ]). -define(ENABLE_KIND, emqx_node_rebalance). +-define(SERVER_REFERENCE, undefined). %%-------------------------------------------------------------------- %% APIs @@ -38,16 +41,21 @@ -spec start_link() -> startlink_ret(). start_link() -> - gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + gen_statem:start_link({local, ?MODULE}, ?MODULE, [], []). -spec enable(pid()) -> ok_or_error(already_enabled | eviction_agent_busy). enable(CoordinatorPid) -> enable(CoordinatorPid, ?ENABLE_KIND). -spec enable(pid(), emqx_eviction_agent:kind()) -> - ok_or_error(already_enabled | eviction_agent_busy). + ok_or_error(invalid_coordinator | eviction_agent_busy). enable(CoordinatorPid, Kind) -> - gen_server:call(?MODULE, {enable, CoordinatorPid, Kind}). + enable(CoordinatorPid, Kind, emqx_eviction_agent:default_options()). + +-spec enable(pid(), emqx_eviction_agent:kind(), emqx_eviction_agent:options()) -> + ok_or_error(invalid_coordinator | eviction_agent_busy). +enable(CoordinatorPid, Kind, Options) -> + gen_statem:call(?MODULE, {enable, CoordinatorPid, Kind, Options}). -spec disable(pid()) -> ok_or_error(already_disabled | invalid_coordinator). disable(CoordinatorPid) -> @@ -56,88 +64,113 @@ disable(CoordinatorPid) -> -spec disable(pid(), emqx_eviction_agent:kind()) -> ok_or_error(already_disabled | invalid_coordinator). disable(CoordinatorPid, Kind) -> - gen_server:call(?MODULE, {disable, CoordinatorPid, Kind}). + gen_statem:call(?MODULE, {disable, CoordinatorPid, Kind}). -spec status() -> status(). status() -> - gen_server:call(?MODULE, status). + gen_statem:call(?MODULE, status). %%-------------------------------------------------------------------- -%% gen_server callbacks +%% gen_statem callbacks %%-------------------------------------------------------------------- +-define(disabled, disabled). +-define(enabled(ST), {enabled, ST}). + +callback_mode() -> + handle_event_function. + init([]) -> - {ok, #{}}. + {ok, ?disabled, #{}}. -handle_call({enable, CoordinatorPid, Kind}, _From, St) -> - case St of - #{coordinator_pid := _Pid} -> - {reply, {error, already_enabled}, St}; - _ -> - true = link(CoordinatorPid), - EvictionAgentPid = whereis(emqx_eviction_agent), - true = link(EvictionAgentPid), - case emqx_eviction_agent:enable(Kind, undefined) of - ok -> - {reply, ok, #{ - coordinator_pid => CoordinatorPid, - eviction_agent_pid => EvictionAgentPid - }}; - {error, eviction_agent_busy} -> - true = unlink(EvictionAgentPid), - true = unlink(CoordinatorPid), - {reply, {error, eviction_agent_busy}, St} - end - end; -handle_call({disable, CoordinatorPid, Kind}, _From, St) -> - case St of - #{ - coordinator_pid := CoordinatorPid, - eviction_agent_pid := EvictionAgentPid - } -> - _ = emqx_eviction_agent:disable(Kind), +%% disabled status + +%% disabled status, enable command +handle_event({call, From}, {enable, CoordinatorPid, Kind, Options}, ?disabled, Data) -> + true = link(CoordinatorPid), + EvictionAgentPid = whereis(emqx_eviction_agent), + true = link(EvictionAgentPid), + case emqx_eviction_agent:enable(Kind, ?SERVER_REFERENCE, Options) of + ok -> + {next_state, + ?enabled(#{ + coordinator_pid => CoordinatorPid, + eviction_agent_pid => EvictionAgentPid, + kind => Kind + }), Data, {reply, From, ok}}; + {error, eviction_agent_busy} -> true = unlink(EvictionAgentPid), true = unlink(CoordinatorPid), - NewSt = maps:without( - [coordinator_pid, eviction_agent_pid], - St - ), - {reply, ok, NewSt}; - #{coordinator_pid := _CoordinatorPid} -> - {reply, {error, invalid_coordinator}, St}; - #{} -> - {reply, {error, already_disabled}, St} + {keep_state_and_data, {reply, From, {error, eviction_agent_busy}}} end; -handle_call(status, _From, St) -> - case St of - #{coordinator_pid := Pid} -> - {reply, {enabled, Pid}, St}; - _ -> - {reply, disabled, St} - end; -handle_call(Msg, _From, St) -> +%% disabled status, disable command +handle_event({call, From}, {disable, _CoordinatorPid, _Kind}, ?disabled, _Data) -> + {keep_state_and_data, {reply, From, {error, already_disabled}}}; +%% disabled status, status command +handle_event({call, From}, status, ?disabled, _Data) -> + {keep_state_and_data, {reply, From, disabled}}; +%% enabled status + +%% enabled status, enable command +handle_event( + {call, From}, + {enable, CoordinatorPid, Kind, Options}, + ?enabled(#{ + coordinator_pid := CoordinatorPid, + kind := Kind + }), + _Data +) -> + %% just updating options + ok = emqx_eviction_agent:enable(Kind, ?SERVER_REFERENCE, Options), + {keep_state_and_data, {reply, From, ok}}; +handle_event({call, From}, {enable, _CoordinatorPid, _Kind, _Options}, ?enabled(_St), _Data) -> + {keep_state_and_data, {reply, From, {error, invalid_coordinator}}}; +%% enabled status, disable command +handle_event( + {call, From}, + {disable, CoordinatorPid, Kind}, + ?enabled(#{ + coordinator_pid := CoordinatorPid, + eviction_agent_pid := EvictionAgentPid + }), + Data +) -> + _ = emqx_eviction_agent:disable(Kind), + true = unlink(EvictionAgentPid), + true = unlink(CoordinatorPid), + {next_state, ?disabled, Data, {reply, From, ok}}; +handle_event({call, From}, {disable, _CoordinatorPid, _Kind}, ?enabled(_St), _Data) -> + {keep_state_and_data, {reply, From, {error, invalid_coordinator}}}; +%% enabled status, status command +handle_event({call, From}, status, ?enabled(#{coordinator_pid := CoordinatorPid}), _Data) -> + {keep_state_and_data, {reply, From, {enabled, CoordinatorPid}}}; +%% fallbacks + +handle_event({call, From}, Msg, State, Data) -> ?SLOG(warning, #{ msg => "unknown_call", call => Msg, - state => St + state => State, + data => Data }), - {reply, ignored, St}. - -handle_info(Msg, St) -> - ?SLOG(warning, #{ - msg => "unknown_info", - info => Msg, - state => St - }), - {noreply, St}. - -handle_cast(Msg, St) -> + {keep_state_and_data, {reply, From, ignored}}; +handle_event(cast, Msg, State, Data) -> ?SLOG(warning, #{ msg => "unknown_cast", cast => Msg, - state => St + state => State, + data => Data }), - {noreply, St}. + keep_state_and_data; +handle_event(info, Msg, State, Data) -> + ?SLOG(warning, #{ + msg => "unknown_info", + info => Msg, + state => State, + data => Data + }), + keep_state_and_data. -code_change(_Vsn, State, _Extra) -> - {ok, State}. +code_change(_Vsn, State, Data, _Extra) -> + {ok, State, Data}. diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_api.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_api.erl index 44ac0c291..a8f788abc 100644 --- a/apps/emqx_node_rebalance/src/emqx_node_rebalance_api.erl +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_api.erl @@ -109,7 +109,8 @@ schema("/load_rebalance/availability_check") -> responses => #{ 200 => response_schema(), 503 => error_codes([?NODE_EVACUATING], <<"Node Evacuating">>) - } + }, + security => [] } }; schema("/load_rebalance/:node/start") -> @@ -248,10 +249,10 @@ schema("/load_rebalance/:node/evacuation/stop") -> }}. '/load_rebalance/availability_check'(get, #{}) -> - case emqx_node_rebalance_status:local_status() of - disabled -> + case emqx_node_rebalance_status:availability_status() of + available -> {200, #{}}; - _ -> + unavailable -> error_response(503, ?NODE_EVACUATING, <<"Node Evacuating">>) end. diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_evacuation.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_evacuation.erl index 6b6aa0675..11c0df3fa 100644 --- a/apps/emqx_node_rebalance/src/emqx_node_rebalance_evacuation.erl +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_evacuation.erl @@ -57,7 +57,7 @@ migrate_to => migrate_to(), wait_health_check => number() }. --type start_error() :: already_started. +-type start_error() :: already_started | eviction_agent_busy. -type stats() :: #{ initial_conns := non_neg_integer(), initial_sessions := non_neg_integer(), @@ -102,9 +102,9 @@ callback_mode() -> handle_event_function. init([]) -> case emqx_node_rebalance_evacuation_persist:read(default_opts()) of - {ok, #{server_reference := ServerReference} = Opts} -> + {ok, Opts} -> ?SLOG(warning, #{msg => "restoring_evacuation_state", opts => Opts}), - case emqx_eviction_agent:enable(?MODULE, ServerReference) of + case enable_eviction_agent(Opts, _AllowConnections = false) of ok -> Data = init_data(#{}, Opts), ok = warn_enabled(), @@ -122,18 +122,26 @@ handle_event( {call, From}, {start, #{wait_health_check := WaitHealthCheck} = Opts}, disabled, - #{} = Data + Data ) -> - ?SLOG(warning, #{ - msg => "node_evacuation_started", - opts => Opts - }), - NewData = init_data(Data, Opts), - ok = emqx_node_rebalance_evacuation_persist:save(Opts), - {next_state, waiting_health_check, NewData, [ - {state_timeout, seconds(WaitHealthCheck), start_eviction}, - {reply, From, ok} - ]}; + case enable_eviction_agent(Opts, _AllowConnections = true) of + ok -> + ?SLOG(warning, #{ + msg => "node_evacuation_started", + opts => Opts + }), + NewData = init_data(Data, Opts), + ok = emqx_node_rebalance_evacuation_persist:save(Opts), + {next_state, waiting_health_check, NewData, [ + {state_timeout, seconds(WaitHealthCheck), start_eviction}, + {reply, From, ok} + ]}; + {error, eviction_agent_busy} -> + ?tp(warning, eviction_agent_busy, #{ + data => Data + }), + {keep_state_and_data, [{reply, From, {error, eviction_agent_busy}}]} + end; handle_event({call, From}, {start, _Opts}, _State, #{}) -> {keep_state_and_data, [{reply, From, {error, already_started}}]}; %% stop @@ -168,9 +176,9 @@ handle_event( state_timeout, start_eviction, waiting_health_check, - #{server_reference := ServerReference} = Data + Data ) -> - case emqx_eviction_agent:enable(?MODULE, ServerReference) of + case enable_eviction_agent(Data, _AllowConnections = false) of ok -> ?tp(debug, eviction_agent_started, #{ data => Data @@ -178,10 +186,8 @@ handle_event( {next_state, evicting_conns, Data, [ {state_timeout, 0, evict_conns} ]}; + %% This should never happen {error, eviction_agent_busy} -> - ?tp(warning, eviction_agent_busy, #{ - data => Data - }), {next_state, disabled, deinit(Data)} end; %% conn eviction @@ -212,7 +218,7 @@ handle_event( NewData = Data#{current_conns => 0}, ?SLOG(warning, #{msg => "node_evacuation_evict_conns_done"}), {next_state, waiting_takeover, NewData, [ - {state_timeout, timer:seconds(WaitTakeover), evict_sessions} + {state_timeout, seconds(WaitTakeover), evict_sessions} ]} end; handle_event( @@ -308,6 +314,9 @@ deinit(Data) -> maps:keys(default_opts()), maps:without(Keys, Data). +enable_eviction_agent(#{server_reference := ServerReference} = _Opts, AllowConnections) -> + emqx_eviction_agent:enable(?MODULE, ServerReference, #{allow_connections => AllowConnections}). + warn_enabled() -> ?SLOG(warning, #{msg => "node_evacuation_enabled"}), io:format( diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_purge.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_purge.erl index 81f1bfe03..17f4bd574 100644 --- a/apps/emqx_node_rebalance/src/emqx_node_rebalance_purge.erl +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_purge.erl @@ -199,7 +199,7 @@ deinit(Data) -> maps:without(Keys, Data). multicall(Nodes, F, A) -> - case apply(emqx_node_rebalance_proto_v2, F, [Nodes | A]) of + case apply(emqx_node_rebalance_proto_v3, F, [Nodes | A]) of {Results, []} -> case lists:partition(fun is_ok/1, lists:zip(Nodes, Results)) of {_OkResults, []} -> diff --git a/apps/emqx_node_rebalance/src/emqx_node_rebalance_status.erl b/apps/emqx_node_rebalance/src/emqx_node_rebalance_status.erl index dbeb4d97f..d2cf02ef9 100644 --- a/apps/emqx_node_rebalance/src/emqx_node_rebalance_status.erl +++ b/apps/emqx_node_rebalance/src/emqx_node_rebalance_status.erl @@ -5,6 +5,7 @@ -module(emqx_node_rebalance_status). -export([ + availability_status/0, local_status/0, local_status/1, global_status/0, @@ -23,6 +24,13 @@ %% APIs %%-------------------------------------------------------------------- +-spec availability_status() -> available | unavailable. +availability_status() -> + case emqx_eviction_agent:enable_status() of + {enabled, _Kind, _ServerReference, _Options} -> unavailable; + disabled -> available + end. + -spec local_status() -> disabled | {evacuation, map()} | {purge, map()} | {rebalance, map()}. local_status() -> Checks = [ diff --git a/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_proto_v3.erl b/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_proto_v3.erl new file mode 100644 index 000000000..ab7943a6f --- /dev/null +++ b/apps/emqx_node_rebalance/src/proto/emqx_node_rebalance_proto_v3.erl @@ -0,0 +1,96 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_node_rebalance_proto_v3). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + + available_nodes/1, + evict_connections/2, + evict_sessions/4, + connection_counts/1, + session_counts/1, + enable_rebalance_agent/2, + disable_rebalance_agent/2, + disconnected_session_counts/1, + + %% Introduced in v2: + enable_rebalance_agent/3, + disable_rebalance_agent/3, + purge_sessions/2, + + %% Introduced in v3: + enable_rebalance_agent/4 +]). + +-include_lib("emqx/include/bpapi.hrl"). +-include_lib("emqx/include/types.hrl"). + +introduced_in() -> + "5.4.0". + +-spec available_nodes([node()]) -> emqx_rpc:multicall_result(node()). +available_nodes(Nodes) -> + rpc:multicall(Nodes, emqx_node_rebalance, is_node_available, []). + +-spec evict_connections([node()], non_neg_integer()) -> + emqx_rpc:multicall_result(ok_or_error(disabled)). +evict_connections(Nodes, Count) -> + rpc:multicall(Nodes, emqx_eviction_agent, evict_connections, [Count]). + +-spec evict_sessions([node()], non_neg_integer(), [node()], emqx_channel:conn_state()) -> + emqx_rpc:multicall_result(ok_or_error(disabled)). +evict_sessions(Nodes, Count, RecipientNodes, ConnState) -> + rpc:multicall(Nodes, emqx_eviction_agent, evict_sessions, [Count, RecipientNodes, ConnState]). + +-spec connection_counts([node()]) -> emqx_rpc:multicall_result({ok, non_neg_integer()}). +connection_counts(Nodes) -> + rpc:multicall(Nodes, emqx_node_rebalance, connection_count, []). + +-spec session_counts([node()]) -> emqx_rpc:multicall_result({ok, non_neg_integer()}). +session_counts(Nodes) -> + rpc:multicall(Nodes, emqx_node_rebalance, session_count, []). + +-spec enable_rebalance_agent([node()], pid()) -> + emqx_rpc:multicall_result(ok_or_error(already_enabled | eviction_agent_busy)). +enable_rebalance_agent(Nodes, OwnerPid) -> + rpc:multicall(Nodes, emqx_node_rebalance_agent, enable, [OwnerPid]). + +-spec disable_rebalance_agent([node()], pid()) -> + emqx_rpc:multicall_result(ok_or_error(already_disabled | invalid_coordinator)). +disable_rebalance_agent(Nodes, OwnerPid) -> + rpc:multicall(Nodes, emqx_node_rebalance_agent, disable, [OwnerPid]). + +-spec disconnected_session_counts([node()]) -> emqx_rpc:multicall_result({ok, non_neg_integer()}). +disconnected_session_counts(Nodes) -> + rpc:multicall(Nodes, emqx_node_rebalance, disconnected_session_count, []). + +%% Introduced in v2: + +-spec enable_rebalance_agent([node()], pid(), emqx_eviction_agent:kind()) -> + emqx_rpc:multicall_result(ok_or_error(already_enabled | eviction_agent_busy)). +enable_rebalance_agent(Nodes, OwnerPid, Kind) -> + rpc:multicall(Nodes, emqx_node_rebalance_agent, enable, [OwnerPid, Kind]). + +-spec disable_rebalance_agent([node()], pid(), emqx_eviction_agent:kind()) -> + emqx_rpc:multicall_result(ok_or_error(already_disabled | invalid_coordinator)). +disable_rebalance_agent(Nodes, OwnerPid, Kind) -> + rpc:multicall(Nodes, emqx_node_rebalance_agent, disable, [OwnerPid, Kind]). + +-spec purge_sessions([node()], non_neg_integer()) -> + emqx_rpc:multicall_result(ok_or_error(disabled)). +purge_sessions(Nodes, Count) -> + rpc:multicall(Nodes, emqx_eviction_agent, purge_sessions, [Count]). + +%% Introduced in v3: + +-spec enable_rebalance_agent( + [node()], pid(), emqx_eviction_agent:kind(), emqx_eviction_agent:options() +) -> + emqx_rpc:multicall_result(ok_or_error(eviction_agent_busy | invalid_coordinator)). +enable_rebalance_agent(Nodes, OwnerPid, Kind, Options) -> + rpc:multicall(Nodes, emqx_node_rebalance_agent, enable, [OwnerPid, Kind, Options]). diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_SUITE.erl index a818145a2..d996719fb 100644 --- a/apps/emqx_node_rebalance/test/emqx_node_rebalance_SUITE.erl +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_SUITE.erl @@ -16,39 +16,46 @@ -import( emqx_eviction_agent_test_helpers, - [emqtt_connect_many/1, emqtt_connect_many/2, stop_many/1, case_specific_node_name/3] + [ + emqtt_connect_many/1, + emqtt_connect_many/2, + emqtt_try_connect/1, + stop_many/1, + case_specific_node_name/3, + start_cluster/3, + stop_cluster/1 + ] ). --define(START_APPS, [emqx_eviction_agent, emqx_node_rebalance]). - all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:start_apps([]), - Config. + Apps = emqx_cth_suite:start([emqx], #{ + work_dir => ?config(priv_dir, Config) + }), + [{apps, Apps} | Config]. -end_per_suite(_Config) -> - ok = emqx_common_test_helpers:stop_apps([]), - ok. +end_per_suite(Config) -> + emqx_cth_suite:stop(?config(apps, Config)). init_per_testcase(Case, Config) -> - ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( + NodeNames = [ - {case_specific_node_name(?MODULE, Case, '_donor'), 2883}, - {case_specific_node_name(?MODULE, Case, '_recipient'), 3883} + case_specific_node_name(?MODULE, Case, '_donor'), + case_specific_node_name(?MODULE, Case, '_recipient') ], - ?START_APPS + ClusterNodes = start_cluster( + Config, + NodeNames, + [emqx, emqx_eviction_agent, emqx_node_rebalance] ), ok = snabbkaffe:start_trace(), [{cluster_nodes, ClusterNodes} | Config]. end_per_testcase(_Case, Config) -> ok = snabbkaffe:stop(), - ok = emqx_eviction_agent_test_helpers:stop_cluster( - ?config(cluster_nodes, Config), - ?START_APPS - ). + stop_cluster(?config(cluster_nodes, Config)). %%-------------------------------------------------------------------- %% Tests @@ -227,3 +234,43 @@ t_available_nodes(Config) -> [[DonorNode, RecipientNode]] ) ). + +t_before_health_check_over(Config) -> + process_flag(trap_exit, true), + + [{DonorNode, DonorPort}, {RecipientNode, _RecipientPort}] = ?config(cluster_nodes, Config), + + Nodes = [DonorNode, RecipientNode], + + Conns = emqtt_connect_many(DonorPort, 50), + + Opts = #{ + conn_evict_rate => 1, + sess_evict_rate => 1, + evict_interval => 1000, + abs_conn_threshold => 1, + abs_sess_threshold => 1, + rel_conn_threshold => 1.0, + rel_sess_threshold => 1.0, + wait_health_check => 2, + wait_takeover => 100, + nodes => Nodes + }, + + ?assertWaitEvent( + begin + ok = rpc:call(DonorNode, emqx_node_rebalance, start, [Opts]), + ?assertMatch( + ok, + emqtt_try_connect([{port, DonorPort}]) + ) + end, + #{?snk_kind := node_rebalance_enable_started_prohibiting}, + 5000 + ), + ?assertMatch( + {error, {use_another_server, #{}}}, + emqtt_try_connect([{port, DonorPort}]) + ), + + stop_many(Conns). diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_agent_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_agent_SUITE.erl index 8b21f9433..9b36fe616 100644 --- a/apps/emqx_node_rebalance/test/emqx_node_rebalance_agent_SUITE.erl +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_agent_SUITE.erl @@ -38,12 +38,13 @@ groups() -> ]. init_per_suite(Config) -> - ok = emqx_common_test_helpers:start_apps([emqx_eviction_agent, emqx_node_rebalance]), - Config. + Apps = emqx_cth_suite:start([emqx, emqx_eviction_agent, emqx_node_rebalance], #{ + work_dir => ?config(priv_dir, Config) + }), + [{apps, Apps} | Config]. -end_per_suite(_Config) -> - ok = emqx_common_test_helpers:stop_apps([emqx_eviction_agent, emqx_node_rebalance]), - ok. +end_per_suite(Config) -> + emqx_cth_suite:stop(?config(apps, Config)). init_per_group(local, Config) -> [{cluster, false} | Config]; @@ -56,9 +57,13 @@ end_per_group(_Group, _Config) -> init_per_testcase(Case, Config) -> case ?config(cluster, Config) of true -> - ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( - [{case_specific_node_name(?MODULE, Case), 2883}], - [emqx_eviction_agent, emqx_node_rebalance] + ClusterNodes = emqx_cth_cluster:start( + [ + {case_specific_node_name(?MODULE, Case), #{ + apps => [emqx, emqx_eviction_agent, emqx_node_rebalance] + }} + ], + #{work_dir => emqx_cth_suite:work_dir(Case, Config)} ), [{cluster_nodes, ClusterNodes} | Config]; false -> @@ -68,10 +73,7 @@ init_per_testcase(Case, Config) -> end_per_testcase(_Case, Config) -> case ?config(cluster, Config) of true -> - emqx_eviction_agent_test_helpers:stop_cluster( - ?config(cluster_nodes, Config), - [emqx_eviction_agent, emqx_node_rebalance] - ); + emqx_cth_cluster:stop(?config(cluster_nodes, Config)); false -> ok end. @@ -94,7 +96,13 @@ t_enable_disable(_Config) -> ), ?assertEqual( - {error, already_enabled}, + {error, invalid_coordinator}, + emqx_node_rebalance_agent:enable(self(), other_rebalance) + ), + + %% Options update + ?assertEqual( + ok, emqx_node_rebalance_agent:enable(self()) ), @@ -150,7 +158,7 @@ t_unknown_messages(_Config) -> t_rebalance_agent_coordinator_fail(Config) -> process_flag(trap_exit, true), - [{Node, _}] = ?config(cluster_nodes, Config), + [Node] = ?config(cluster_nodes, Config), CoordinatorPid = spawn_link( fun() -> @@ -189,7 +197,7 @@ t_rebalance_agent_coordinator_fail(Config) -> t_rebalance_agent_fail(Config) -> process_flag(trap_exit, true), - [{Node, _}] = ?config(cluster_nodes, Config), + [Node] = ?config(cluster_nodes, Config), CoordinatorPid = spawn_link( fun() -> diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_api_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_api_SUITE.erl index 017e85971..8b8dc7e42 100644 --- a/apps/emqx_node_rebalance/test/emqx_node_rebalance_api_SUITE.erl +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_api_SUITE.erl @@ -13,6 +13,7 @@ -import( emqx_mgmt_api_test_util, [ + request_api/3, request/2, request/3, uri/1 @@ -24,18 +25,17 @@ [emqtt_connect_many/2, stop_many/1, case_specific_node_name/3] ). --define(START_APPS, [emqx_eviction_agent, emqx_node_rebalance]). - all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:start_apps(?START_APPS), - Config. + Apps = emqx_cth_suite:start([emqx, emqx_eviction_agent, emqx_node_rebalance], #{ + work_dir => ?config(priv_dir, Config) + }), + [{apps, Apps} | Config]. -end_per_suite(_Config) -> - ok = emqx_common_test_helpers:stop_apps(?START_APPS), - ok. +end_per_suite(Config) -> + emqx_cth_suite:stop(?config(apps, Config)). init_per_testcase(Case, Config) -> DonorNode = case_specific_node_name(?MODULE, Case, '_donor'), @@ -57,7 +57,6 @@ init_per_testcase(Case, Config) -> [{cluster_nodes, ClusterNodes} | Config]. end_per_testcase(_Case, Config) -> Nodes = ?config(cluster_nodes, Config), - erpc:multicall(Nodes, meck, unload, []), _ = emqx_cth_cluster:stop(Nodes), ok. @@ -473,28 +472,31 @@ t_start_stop_rebalance(Config) -> t_availability_check(Config) -> [DonorNode | _] = ?config(cluster_nodes, Config), ?assertMatch( - {ok, 200, #{}}, - api_get(["load_rebalance", "availability_check"]) + {ok, _}, + api_get_noauth(["load_rebalance", "availability_check"]) ), ok = rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [#{}]), ?assertMatch( - {ok, 503, _}, - api_get(["load_rebalance", "availability_check"]) + {error, {_, 503, _}}, + api_get_noauth(["load_rebalance", "availability_check"]) ), ok = rpc:call(DonorNode, emqx_node_rebalance_evacuation, stop, []), ?assertMatch( - {ok, 200, #{}}, - api_get(["load_rebalance", "availability_check"]) + {ok, _}, + api_get_noauth(["load_rebalance", "availability_check"]) ). %%-------------------------------------------------------------------- %% Helpers %%-------------------------------------------------------------------- +api_get_noauth(Path) -> + request_api(get, uri(Path), emqx_common_test_http:auth_header("invalid", "password")). + api_get(Path) -> case request(get, uri(Path)) of {ok, Code, ResponseBody} -> diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_cli_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_cli_SUITE.erl index 7d0cab0ce..484a3efe5 100644 --- a/apps/emqx_node_rebalance/test/emqx_node_rebalance_cli_SUITE.erl +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_cli_SUITE.erl @@ -15,27 +15,38 @@ [emqtt_connect_many/2, stop_many/1, case_specific_node_name/3] ). --define(START_APPS, [emqx_eviction_agent, emqx_node_rebalance]). +-define(START_APPS, [emqx, emqx_eviction_agent, emqx_node_rebalance]). all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - emqx_common_test_helpers:start_apps(?START_APPS), - Config. + Apps = emqx_cth_suite:start(?START_APPS, #{ + work_dir => ?config(priv_dir, Config) + }), + [{apps, Apps} | Config]. end_per_suite(Config) -> - emqx_common_test_helpers:stop_apps(lists:reverse(?START_APPS)), - Config. + emqx_cth_suite:stop(?config(apps, Config)). init_per_testcase(Case = t_rebalance, Config) -> _ = emqx_node_rebalance_evacuation:stop(), - ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( + Nodes = + [Node1 | _] = [ - {case_specific_node_name(?MODULE, Case, '_donor'), 2883}, - {case_specific_node_name(?MODULE, Case, '_recipient'), 3883} + case_specific_node_name(?MODULE, Case, '_1'), + case_specific_node_name(?MODULE, Case, '_2') ], - ?START_APPS + Spec = #{ + role => core, + join_to => emqx_cth_cluster:node_name(Node1), + listeners => true, + apps => ?START_APPS + }, + Cluster = [{Node, Spec} || Node <- Nodes], + ClusterNodes = emqx_cth_cluster:start( + Cluster, + #{work_dir => emqx_cth_suite:work_dir(Case, Config)} ), [{cluster_nodes, ClusterNodes} | Config]; init_per_testcase(_Case, Config) -> @@ -46,10 +57,7 @@ init_per_testcase(_Case, Config) -> end_per_testcase(t_rebalance, Config) -> _ = emqx_node_rebalance_evacuation:stop(), _ = emqx_node_rebalance:stop(), - _ = emqx_eviction_agent_test_helpers:stop_cluster( - ?config(cluster_nodes, Config), - ?START_APPS - ); + _ = emqx_cth_cluster:stop(?config(cluster_nodes, Config)); end_per_testcase(_Case, _Config) -> _ = emqx_node_rebalance_evacuation:stop(), _ = emqx_node_rebalance:stop(). @@ -157,6 +165,8 @@ t_evacuation(_Config) -> ). t_purge(_Config) -> + process_flag(trap_exit, true), + %% start with invalid args ?assertNot( emqx_node_rebalance_cli:cli(["start", "--purge", "--foo-bar"]) @@ -187,40 +197,44 @@ t_purge(_Config) -> atom_to_list(node()) ]) ), - with_some_sessions(fun() -> - ?assert( - emqx_node_rebalance_cli:cli([ - "start", - "--purge", - "--purge-rate", - "10" - ]) - ), - %% status - ok = emqx_node_rebalance_cli:cli(["status"]), - ok = emqx_node_rebalance_cli:cli(["node-status"]), - ok = emqx_node_rebalance_cli:cli(["node-status", atom_to_list(node())]), + Conns = emqtt_connect_many(get_mqtt_port(node(), tcp), 100), - ?assertMatch( - {enabled, #{}}, - emqx_node_rebalance_purge:status() - ), + ?assert( + emqx_node_rebalance_cli:cli([ + "start", + "--purge", + "--purge-rate", + "10" + ]) + ), + + %% status + ok = emqx_node_rebalance_cli:cli(["status"]), + ok = emqx_node_rebalance_cli:cli(["node-status"]), + ok = emqx_node_rebalance_cli:cli(["node-status", atom_to_list(node())]), + + ?assertMatch( + {enabled, #{}}, + emqx_node_rebalance_purge:status() + ), + + %% already enabled + ?assertNot( + emqx_node_rebalance_cli:cli([ + "start", + "--purge", + "--purge-rate", + "10" + ]) + ), - %% already enabled - ?assertNot( - emqx_node_rebalance_cli:cli([ - "start", - "--purge", - "--purge-rate", - "10" - ]) - ), - true = emqx_node_rebalance_cli:cli(["stop"]), - ok - end), %% stop + true = emqx_node_rebalance_cli:cli(["stop"]), + + %% stop when not started + false = emqx_node_rebalance_cli:cli(["stop"]), ?assertEqual( @@ -228,12 +242,13 @@ t_purge(_Config) -> emqx_node_rebalance_purge:status() ), - ok. + ok = stop_many(Conns). t_rebalance(Config) -> process_flag(trap_exit, true), - [{DonorNode, DonorPort}, {RecipientNode, _}] = ?config(cluster_nodes, Config), + [DonorNode, RecipientNode] = ?config(cluster_nodes, Config), + DonorPort = get_mqtt_port(DonorNode, tcp), %% start with invalid args ?assertNot( @@ -364,11 +379,6 @@ emqx_node_rebalance_cli(Node, Args) -> Result end. -%% to avoid it finishing too fast -with_some_sessions(Fn) -> - emqx_common_test_helpers:with_mock( - emqx_eviction_agent, - all_channels_count, - fun() -> 100 end, - Fn - ). +get_mqtt_port(Node, Type) -> + {_IP, Port} = erpc:call(Node, emqx_config, get, [[listeners, Type, default, bind]]), + Port. diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_evacuation_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_evacuation_SUITE.erl index b7f1ebb63..945b1566d 100644 --- a/apps/emqx_node_rebalance/test/emqx_node_rebalance_evacuation_SUITE.erl +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_evacuation_SUITE.erl @@ -15,7 +15,13 @@ -import( emqx_eviction_agent_test_helpers, - [emqtt_connect/1, emqtt_try_connect/1, case_specific_node_name/3] + [ + emqtt_connect/1, + emqtt_try_connect/1, + case_specific_node_name/3, + start_cluster/3, + stop_cluster/1 + ] ). all() -> [{group, one_node}, {group, two_node}]. @@ -37,12 +43,13 @@ one_node_cases() -> emqx_common_test_helpers:all(?MODULE) -- two_node_cases(). init_per_suite(Config) -> - ok = emqx_common_test_helpers:start_apps([]), - Config. + Apps = emqx_cth_suite:start([emqx], #{ + work_dir => ?config(priv_dir, Config) + }), + [{apps, Apps} | Config]. -end_per_suite(_Config) -> - ok = emqx_common_test_helpers:stop_apps([]), - ok. +end_per_suite(Config) -> + emqx_cth_suite:stop(?config(apps, Config)). init_per_group(one_node, Config) -> [{cluster_type, one_node} | Config]; @@ -53,30 +60,23 @@ end_per_group(_Group, _Config) -> ok. init_per_testcase(Case, Config) -> - NodesWithPorts = + NodeNames = case ?config(cluster_type, Config) of one_node -> - [{case_specific_node_name(?MODULE, Case, '_evacuated'), 2883}]; + [case_specific_node_name(?MODULE, Case, '_evacuated')]; two_node -> [ - {case_specific_node_name(?MODULE, Case, '_evacuated'), 2883}, - {case_specific_node_name(?MODULE, Case, '_recipient'), 3883} + case_specific_node_name(?MODULE, Case, '_evacuated'), + case_specific_node_name(?MODULE, Case, '_recipient') ] end, - ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( - NodesWithPorts, - [emqx_eviction_agent, emqx_node_rebalance], - [{emqx, data_dir, case_specific_data_dir(Case, Config)}] - ), + ClusterNodes = start_cluster(Config, NodeNames, [emqx, emqx_eviction_agent, emqx_node_rebalance]), ok = snabbkaffe:start_trace(), [{cluster_nodes, ClusterNodes} | Config]. end_per_testcase(_Case, Config) -> ok = snabbkaffe:stop(), - ok = emqx_eviction_agent_test_helpers:stop_cluster( - ?config(cluster_nodes, Config), - [emqx_eviction_agent, emqx_node_rebalance] - ). + stop_cluster(?config(cluster_nodes, Config)). %%-------------------------------------------------------------------- %% Tests @@ -89,10 +89,9 @@ t_agent_busy(Config) -> ok = rpc:call(DonorNode, emqx_eviction_agent, enable, [other_rebalance, undefined]), - ?assertWaitEvent( - rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]), - #{?snk_kind := eviction_agent_busy}, - 5000 + ?assertEqual( + {error, eviction_agent_busy}, + rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]) ). t_already_started(Config) -> @@ -118,7 +117,13 @@ t_start(Config) -> [{DonorNode, DonorPort}] = ?config(cluster_nodes, Config), ?assertWaitEvent( - rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]), + begin + rpc:call(DonorNode, emqx_node_rebalance_evacuation, start, [opts(Config)]), + ?assertMatch( + ok, + emqtt_try_connect([{port, DonorPort}]) + ) + end, #{?snk_kind := eviction_agent_started}, 5000 ), diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_purge_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_purge_SUITE.erl index 7cdcc4d71..f74da13f6 100644 --- a/apps/emqx_node_rebalance/test/emqx_node_rebalance_purge_SUITE.erl +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_purge_SUITE.erl @@ -18,7 +18,9 @@ [ emqtt_connect/1, emqtt_try_connect/1, - case_specific_node_name/3 + case_specific_node_name/3, + stop_many/1, + get_mqtt_port/2 ] ). @@ -41,11 +43,13 @@ one_node_cases() -> emqx_common_test_helpers:all(?MODULE) -- two_nodes_cases(). init_per_suite(Config) -> - ok = emqx_common_test_helpers:start_apps([]), - Config. + Apps = emqx_cth_suite:start([emqx], #{ + work_dir => ?config(priv_dir, Config) + }), + [{apps, Apps} | Config]. -end_per_suite(_Config) -> - ok = emqx_common_test_helpers:stop_apps([]), +end_per_suite(Config) -> + ok = emqx_cth_suite:stop(?config(apps, Config)), ok. init_per_group(one_node, Config) -> @@ -78,7 +82,7 @@ init_per_testcase(TestCase, Config) -> Cluster = [{Node, Spec} || Node <- Nodes], ClusterNodes = emqx_cth_cluster:start( Cluster, - #{work_dir => ?config(priv_dir, Config)} + #{work_dir => emqx_cth_suite:work_dir(TestCase, Config)} ), ok = snabbkaffe:start_trace(), [{cluster_nodes, ClusterNodes} | Config]. @@ -128,20 +132,12 @@ case_specific_data_dir(Case, Config) -> PrivDir -> filename:join(PrivDir, atom_to_list(Case)) end. -get_mqtt_port(Node, Type) -> - {_IP, Port} = erpc:call(Node, emqx_config, get, [[listeners, Type, default, bind]]), - Port. - %% to avoid it finishing too fast with_some_sessions(Node, Fn) -> - erpc:call(Node, fun() -> - emqx_common_test_helpers:with_mock( - emqx_eviction_agent, - all_channels_count, - fun() -> 100 end, - Fn - ) - end). + Port = get_mqtt_port(Node, tcp), + Conns = emqtt_connect_many(Port, 100), + _ = erpc:call(Node, Fn), + ok = stop_many(Conns). drain_exits([ClientPid | Rest]) -> receive @@ -189,6 +185,7 @@ t_agent_busy(Config) -> ok. t_already_started(Config) -> + process_flag(trap_exit, true), [Node] = ?config(cluster_nodes, Config), with_some_sessions(Node, fun() -> ok = emqx_node_rebalance_purge:start(opts(Config)), @@ -216,6 +213,7 @@ t_not_started(Config) -> ). t_start(Config) -> + process_flag(trap_exit, true), [Node] = ?config(cluster_nodes, Config), Port = get_mqtt_port(Node, tcp), @@ -233,6 +231,7 @@ t_start(Config) -> ok. t_non_persistence(Config) -> + process_flag(trap_exit, true), [Node] = ?config(cluster_nodes, Config), Port = get_mqtt_port(Node, tcp), @@ -284,6 +283,7 @@ t_unknown_messages(Config) -> %%-------------------------------------------------------------------- t_already_started_two(Config) -> + process_flag(trap_exit, true), [Node1, _Node2] = ?config(cluster_nodes, Config), with_some_sessions(Node1, fun() -> ok = emqx_node_rebalance_purge:start(opts(Config)), diff --git a/apps/emqx_node_rebalance/test/emqx_node_rebalance_status_SUITE.erl b/apps/emqx_node_rebalance/test/emqx_node_rebalance_status_SUITE.erl index f9c50b761..9351e065e 100644 --- a/apps/emqx_node_rebalance/test/emqx_node_rebalance_status_SUITE.erl +++ b/apps/emqx_node_rebalance/test/emqx_node_rebalance_status_SUITE.erl @@ -32,6 +32,7 @@ init_per_suite(Config) -> Apps = [ emqx_conf, emqx, + emqx_eviction_agent, emqx_node_rebalance ], Cluster = [ diff --git a/changes/ee/feat-11971.en.md b/changes/ee/feat-11971.en.md new file mode 100644 index 000000000..edf99cae2 --- /dev/null +++ b/changes/ee/feat-11971.en.md @@ -0,0 +1,4 @@ +Made `/api/v5/load_rebalance/availability_check` public, i.e. not requiring authentication. This simplifies load balancer setup. + +Made rebalance/evacuation more graceful during the wait health check phase. The connections to nodes marked for eviction are now not prohibited during this phase. +During this phase it is unknown whether these nodes are all marked unhealthy by the load balancer, so prohibiting connections to them may cause multiple unssuccessful attempts to reconnect. From e73bf716ae113008ffb936b30b27816ce775ce1d Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Sun, 19 Nov 2023 22:08:42 +0100 Subject: [PATCH 013/101] fix(emqx_channel): do not log stale sock_close event as error In some cases, EMQX may decide to close socket and mark connection at 'disconnected' state, for example, when DISCONNECTE packet is received, or, when failed to write data to socket. However, by the time EMQX decided to close the socket, the socket might have already been closed by peer, and the `tcp_closed` envet is already delivered to the process mailbox -- causing EMQX to handle sock_close event at 'disconnected' state. --- apps/emqx/src/emqx_channel.erl | 6 ++++-- apps/emqx_gateway_stomp/src/emqx_stomp_channel.erl | 9 ++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index 4f6d5ac6f..c2f62c840 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -1246,8 +1246,10 @@ handle_info( {ok, Channel3} -> {ok, ?REPLY_EVENT(disconnected), Channel3}; Shutdown -> Shutdown end; -handle_info({sock_closed, Reason}, Channel = #channel{conn_state = disconnected}) -> - ?SLOG(error, #{msg => "unexpected_sock_close", reason => Reason}), +handle_info({sock_closed, _Reason}, Channel = #channel{conn_state = disconnected}) -> + %% This can happen as a race: + %% EMQX closes socket and marks 'disconnected' but 'tcp_closed' or 'ssl_closed' + %% is already in process mailbox {ok, Channel}; handle_info(clean_authz_cache, Channel) -> ok = emqx_authz_cache:empty_authz_cache(), diff --git a/apps/emqx_gateway_stomp/src/emqx_stomp_channel.erl b/apps/emqx_gateway_stomp/src/emqx_stomp_channel.erl index 453fa9fd2..10d081e57 100644 --- a/apps/emqx_gateway_stomp/src/emqx_stomp_channel.erl +++ b/apps/emqx_gateway_stomp/src/emqx_stomp_channel.erl @@ -963,13 +963,12 @@ handle_info( NChannel = ensure_disconnected(Reason, Channel), shutdown(Reason, NChannel); handle_info( - {sock_closed, Reason}, + {sock_closed, _Reason}, Channel = #channel{conn_state = disconnected} ) -> - ?SLOG(error, #{ - msg => "unexpected_sock_closed", - reason => Reason - }), + %% This can happen as a race: + %% EMQX closes socket and marks 'disconnected' but 'tcp_closed' or 'ssl_closed' + %% is already in process mailbox {ok, Channel}; handle_info(clean_authz_cache, Channel) -> ok = emqx_authz_cache:empty_authz_cache(), From 648b6ac63ef4792c223be5eed72eee2f8c4a8bf6 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Thu, 16 Nov 2023 16:35:46 +0700 Subject: [PATCH 014/101] chore(sessds): rename iterators -> subscriptions Also try to make clearer the difference between 2 flavors of topic filter representation in use. --- apps/emqx/src/emqx_persistent_session_ds.erl | 72 ++++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index 6c0fc2dcc..ca3fc3514 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -83,11 +83,12 @@ %% Currently, this is the clientid. We avoid `emqx_types:clientid()' because that can be %% an atom, in theory (?). -type id() :: binary(). --type topic_filter() :: emqx_ds:topic_filter(). +-type topic_filter() :: emqx_types:topic(). +-type topic_filter_words() :: emqx_ds:topic_filter(). -type subscription_id() :: {id(), topic_filter()}. -type subscription() :: #{ start_time := emqx_ds:time(), - propts := map(), + props := map(), extra := map() }. -type session() :: #{ @@ -98,7 +99,7 @@ %% When the session should expire expires_at := timestamp() | never, %% Client’s Subscriptions. - iterators := #{topic() => subscription()}, + subscriptions := #{topic_filter() => subscription()}, %% Inflight messages inflight := emqx_persistent_message_ds_replayer:inflight(), %% Receive maximum @@ -108,7 +109,6 @@ }. -type timestamp() :: emqx_utils_calendar:epoch_millisecond(). --type topic() :: emqx_types:topic(). -type clientinfo() :: emqx_types:clientinfo(). -type conninfo() :: emqx_session:conninfo(). -type replies() :: emqx_session:replies(). @@ -195,9 +195,9 @@ info(created_at, #{created_at := CreatedAt}) -> CreatedAt; info(is_persistent, #{}) -> true; -info(subscriptions, #{iterators := Iters}) -> +info(subscriptions, #{subscriptions := Iters}) -> maps:map(fun(_, #{props := SubOpts}) -> SubOpts end, Iters); -info(subscriptions_cnt, #{iterators := Iters}) -> +info(subscriptions_cnt, #{subscriptions := Iters}) -> maps:size(Iters); info(subscriptions_max, #{props := Conf}) -> maps:get(max_subscriptions, Conf); @@ -239,47 +239,47 @@ stats(Session) -> %% Client -> Broker: SUBSCRIBE / UNSUBSCRIBE %%-------------------------------------------------------------------- --spec subscribe(topic(), emqx_types:subopts(), session()) -> +-spec subscribe(topic_filter(), emqx_types:subopts(), session()) -> {ok, session()} | {error, emqx_types:reason_code()}. subscribe( TopicFilter, SubOpts, - Session = #{id := ID, iterators := Iters} -) when is_map_key(TopicFilter, Iters) -> - Iterator = maps:get(TopicFilter, Iters), - NIterator = update_subscription(TopicFilter, Iterator, SubOpts, ID), - {ok, Session#{iterators := Iters#{TopicFilter => NIterator}}}; + Session = #{id := ID, subscriptions := Subs} +) when is_map_key(TopicFilter, Subs) -> + Subscription = maps:get(TopicFilter, Subs), + NSubscription = update_subscription(TopicFilter, Subscription, SubOpts, ID), + {ok, Session#{subscriptions := Subs#{TopicFilter => NSubscription}}}; subscribe( TopicFilter, SubOpts, - Session = #{id := ID, iterators := Iters} + Session = #{id := ID, subscriptions := Subs} ) -> % TODO: max_subscriptions - Iterator = add_subscription(TopicFilter, SubOpts, ID), - {ok, Session#{iterators := Iters#{TopicFilter => Iterator}}}. + Subscription = add_subscription(TopicFilter, SubOpts, ID), + {ok, Session#{subscriptions := Subs#{TopicFilter => Subscription}}}. --spec unsubscribe(topic(), session()) -> +-spec unsubscribe(topic_filter(), session()) -> {ok, session(), emqx_types:subopts()} | {error, emqx_types:reason_code()}. unsubscribe( TopicFilter, - Session = #{id := ID, iterators := Iters} -) when is_map_key(TopicFilter, Iters) -> - Iterator = maps:get(TopicFilter, Iters), - SubOpts = maps:get(props, Iterator), + Session = #{id := ID, subscriptions := Subs} +) when is_map_key(TopicFilter, Subs) -> + Subscription = maps:get(TopicFilter, Subs), + SubOpts = maps:get(props, Subscription), ok = del_subscription(TopicFilter, ID), - {ok, Session#{iterators := maps:remove(TopicFilter, Iters)}, SubOpts}; + {ok, Session#{subscriptions := maps:remove(TopicFilter, Subs)}, SubOpts}; unsubscribe( _TopicFilter, _Session = #{} ) -> {error, ?RC_NO_SUBSCRIPTION_EXISTED}. --spec get_subscription(topic(), session()) -> +-spec get_subscription(topic_filter(), session()) -> emqx_types:subopts() | undefined. -get_subscription(TopicFilter, #{iterators := Iters}) -> - case maps:get(TopicFilter, Iters, undefined) of - Iterator = #{} -> - maps:get(props, Iterator); +get_subscription(TopicFilter, #{subscriptions := Subs}) -> + case maps:get(TopicFilter, Subs, undefined) of + Subscription = #{} -> + maps:get(props, Subscription); undefined -> undefined end. @@ -292,7 +292,7 @@ get_subscription(TopicFilter, #{iterators := Iters}) -> {ok, emqx_types:publish_result(), replies(), session()} | {error, emqx_types:reason_code()}. publish(_PacketId, Msg, Session) -> - %% TODO: + %% TODO: QoS2 Result = emqx_broker:publish(Msg), {ok, Result, [], Session}. @@ -397,7 +397,7 @@ terminate(_Reason, _Session = #{}) -> %%-------------------------------------------------------------------- --spec add_subscription(topic(), emqx_types:subopts(), id()) -> +-spec add_subscription(topic_filter(), emqx_types:subopts(), id()) -> subscription(). add_subscription(TopicFilterBin, SubOpts, DSSessionID) -> %% N.B.: we chose to update the router before adding the subscription to the @@ -427,7 +427,7 @@ add_subscription(TopicFilterBin, SubOpts, DSSessionID) -> %% we'll list streams and open iterators when implementing message replay. DSSubExt. --spec update_subscription(topic(), subscription(), emqx_types:subopts(), id()) -> +-spec update_subscription(topic_filter(), subscription(), emqx_types:subopts(), id()) -> subscription(). update_subscription(TopicFilterBin, DSSubExt, SubOpts, DSSessionID) -> TopicFilter = emqx_topic:words(TopicFilterBin), @@ -437,7 +437,7 @@ update_subscription(TopicFilterBin, DSSubExt, SubOpts, DSSessionID) -> ok = ?tp(persistent_session_ds_iterator_updated, #{sub => DSSubExt}), NDSSubExt. --spec del_subscription(topic(), id()) -> +-spec del_subscription(topic_filter(), id()) -> ok. del_subscription(TopicFilterBin, DSSessionId) -> TopicFilter = emqx_topic:words(TopicFilterBin), @@ -522,7 +522,7 @@ storage() -> %% Note: session API doesn't handle session takeovers, it's the job of %% the broker. -spec session_open(id()) -> - {ok, session(), #{topic() => subscription()}} | false. + {ok, session(), #{topic_filter_words() => subscription()}} | false. session_open(SessionId) -> transaction(fun() -> case mnesia:read(?SESSION_TAB, SessionId, write) of @@ -537,7 +537,7 @@ session_open(SessionId) -> end). -spec session_ensure_new(id(), _Props :: map()) -> - {ok, session(), #{topic() => subscription()}}. + {ok, session(), #{topic_filter_words() => subscription()}}. session_ensure_new(SessionId, Props) -> transaction(fun() -> ok = session_drop_subscriptions(SessionId), @@ -581,7 +581,7 @@ session_drop_subscriptions(DSSessionId) -> ). %% @doc Called when a client subscribes to a topic. Idempotent. --spec session_add_subscription(id(), topic_filter(), _Props :: map()) -> +-spec session_add_subscription(id(), topic_filter_words(), _Props :: map()) -> {ok, subscription(), _IsNew :: boolean()}. session_add_subscription(DSSessionId, TopicFilter, Props) -> DSSubId = {DSSessionId, TopicFilter}, @@ -606,7 +606,7 @@ session_add_subscription(DSSessionId, TopicFilter, Props) -> end end). --spec session_insert_subscription(id(), topic_filter(), map()) -> ds_sub(). +-spec session_insert_subscription(id(), topic_filter_words(), map()) -> ds_sub(). session_insert_subscription(DSSessionId, TopicFilter, Props) -> {DSSubId, StartMS} = new_subscription_id(DSSessionId, TopicFilter), DSSub = #ds_sub{ @@ -641,7 +641,7 @@ session_read_subscriptions(DSSessionId) -> ), mnesia:select(?SESSION_SUBSCRIPTIONS_TAB, MS, read). --spec new_subscription_id(id(), topic_filter()) -> {subscription_id(), integer()}. +-spec new_subscription_id(id(), topic_filter_words()) -> {subscription_id(), integer()}. new_subscription_id(DSSessionId, TopicFilter) -> %% Note: here we use _milliseconds_ to match with the timestamp %% field of `#message' record. @@ -688,7 +688,7 @@ renew_streams(DSSessionId) -> Subscriptions ). --spec renew_streams(id(), [ds_stream()], emqx_ds:topic_filter(), emqx_ds:time()) -> ok. +-spec renew_streams(id(), [ds_stream()], topic_filter_words(), emqx_ds:time()) -> ok. renew_streams(DSSessionId, ExistingStreams, TopicFilter, StartTime) -> AllStreams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime), transaction( From 5b40304d1fbe938e36234f05f4bfb749cc7c6dbc Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Mon, 20 Nov 2023 13:25:24 +0700 Subject: [PATCH 015/101] chore(sessds): simplify subscriptions handling There's currently no point in storing parsed topic filters in the subscriptions table. --- .../emqx_persistent_session_ds_SUITE.erl | 17 +---- apps/emqx/src/emqx_persistent_session_ds.erl | 69 +++++++------------ 2 files changed, 28 insertions(+), 58 deletions(-) diff --git a/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl b/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl index f22a4f97e..72775228c 100644 --- a/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl +++ b/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl @@ -11,12 +11,6 @@ -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). --include_lib("emqx/src/emqx_persistent_session_ds.hrl"). - --define(DEFAULT_KEYSPACE, default). --define(DS_SHARD_ID, <<"local">>). --define(DS_SHARD, {?DEFAULT_KEYSPACE, ?DS_SHARD_ID}). - -import(emqx_common_test_helpers, [on_exit/1]). %%------------------------------------------------------------------------------ @@ -92,12 +86,6 @@ get_mqtt_port(Node, Type) -> {_IP, Port} = erpc:call(Node, emqx_config, get, [[listeners, Type, default, bind]]), Port. -get_all_iterator_ids(Node) -> - Fn = fun(K, _V, Acc) -> [K | Acc] end, - erpc:call(Node, fun() -> - emqx_ds_storage_layer:foldl_iterator_prefix(?DS_SHARD, <<>>, Fn, []) - end). - wait_nodeup(Node) -> ?retry( _Sleep0 = 500, @@ -233,9 +221,8 @@ t_session_subscription_idempotency(Config) -> end, fun(Trace) -> ct:pal("trace:\n ~p", [Trace]), - SubTopicFilterWords = emqx_topic:words(SubTopicFilter), ?assertMatch( - {ok, #{}, #{SubTopicFilterWords := #{}}}, + #{subscriptions := #{SubTopicFilter := #{}}}, erpc:call(Node1, emqx_persistent_session_ds, session_open, [ClientId]) ) end @@ -308,7 +295,7 @@ t_session_unsubscription_idempotency(Config) -> fun(Trace) -> ct:pal("trace:\n ~p", [Trace]), ?assertMatch( - {ok, #{}, Subs = #{}} when map_size(Subs) =:= 0, + #{subscriptions := Subs = #{}} when map_size(Subs) =:= 0, erpc:call(Node1, emqx_persistent_session_ds, session_open, [ClientId]) ), ok diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index ca3fc3514..3a7232747 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -142,7 +142,7 @@ open(#{clientid := ClientID} = _ClientInfo, ConnInfo) -> %% somehow isolate those idling not-yet-expired sessions into a separate process %% space, and move this call back into `emqx_cm` where it belongs. ok = emqx_cm:discard_session(ClientID), - case open_session(ClientID) of + case session_open(ClientID) of Session0 = #{} -> ensure_timers(), ReceiveMaximum = receive_maximum(ConnInfo), @@ -153,24 +153,9 @@ open(#{clientid := ClientID} = _ClientInfo, ConnInfo) -> end. ensure_session(ClientID, ConnInfo, Conf) -> - {ok, Session, #{}} = session_ensure_new(ClientID, Conf), + Session = session_ensure_new(ClientID, Conf), ReceiveMaximum = receive_maximum(ConnInfo), - Session#{iterators => #{}, receive_maximum => ReceiveMaximum}. - -open_session(ClientID) -> - case session_open(ClientID) of - {ok, Session, Subscriptions} -> - Session#{iterators => prep_subscriptions(Subscriptions)}; - false -> - false - end. - -prep_subscriptions(Subscriptions) -> - maps:fold( - fun(Topic, Subscription, Acc) -> Acc#{emqx_topic:join(Topic) => Subscription} end, - #{}, - Subscriptions - ). + Session#{subscriptions => #{}, receive_maximum => ReceiveMaximum}. -spec destroy(session() | clientinfo()) -> ok. destroy(#{id := ClientID}) -> @@ -392,14 +377,13 @@ disconnect(Session = #{}) -> -spec terminate(Reason :: term(), session()) -> ok. terminate(_Reason, _Session = #{}) -> - % TODO: close iterators ok. %%-------------------------------------------------------------------- -spec add_subscription(topic_filter(), emqx_types:subopts(), id()) -> subscription(). -add_subscription(TopicFilterBin, SubOpts, DSSessionID) -> +add_subscription(TopicFilter, SubOpts, DSSessionID) -> %% N.B.: we chose to update the router before adding the subscription to the %% session/iterator table. The reasoning for this is as follows: %% @@ -418,8 +402,7 @@ add_subscription(TopicFilterBin, SubOpts, DSSessionID) -> %% since it is guarded by a transaction context: we consider a subscription %% operation to be successful if it ended up changing this table. Both router %% and iterator information can be reconstructed from this table, if needed. - ok = emqx_persistent_session_ds_router:do_add_route(TopicFilterBin, DSSessionID), - TopicFilter = emqx_topic:words(TopicFilterBin), + ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, DSSessionID), {ok, DSSubExt, IsNew} = session_add_subscription( DSSessionID, TopicFilter, SubOpts ), @@ -429,8 +412,7 @@ add_subscription(TopicFilterBin, SubOpts, DSSessionID) -> -spec update_subscription(topic_filter(), subscription(), emqx_types:subopts(), id()) -> subscription(). -update_subscription(TopicFilterBin, DSSubExt, SubOpts, DSSessionID) -> - TopicFilter = emqx_topic:words(TopicFilterBin), +update_subscription(TopicFilter, DSSubExt, SubOpts, DSSessionID) -> {ok, NDSSubExt, false} = session_add_subscription( DSSessionID, TopicFilter, SubOpts ), @@ -439,8 +421,8 @@ update_subscription(TopicFilterBin, DSSubExt, SubOpts, DSSessionID) -> -spec del_subscription(topic_filter(), id()) -> ok. -del_subscription(TopicFilterBin, DSSessionId) -> - TopicFilter = emqx_topic:words(TopicFilterBin), +del_subscription(TopicFilter, DSSessionId) -> + %% TODO: transaction? ?tp_span( persistent_session_ds_subscription_delete, #{session_id => DSSessionId}, @@ -449,7 +431,7 @@ del_subscription(TopicFilterBin, DSSessionId) -> ?tp_span( persistent_session_ds_subscription_route_delete, #{session_id => DSSessionId}, - ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilterBin, DSSessionId) + ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, DSSessionId) ). %%-------------------------------------------------------------------- @@ -522,27 +504,33 @@ storage() -> %% Note: session API doesn't handle session takeovers, it's the job of %% the broker. -spec session_open(id()) -> - {ok, session(), #{topic_filter_words() => subscription()}} | false. + session() | false. session_open(SessionId) -> - transaction(fun() -> + ro_transaction(fun() -> case mnesia:read(?SESSION_TAB, SessionId, write) of [Record = #session{}] -> Session = export_session(Record), DSSubs = session_read_subscriptions(SessionId), Subscriptions = export_subscriptions(DSSubs), - {ok, Session, Subscriptions}; + Session#{ + subscriptions => Subscriptions, + inflight => emqx_persistent_message_ds_replayer:new() + }; [] -> false end end). -spec session_ensure_new(id(), _Props :: map()) -> - {ok, session(), #{topic_filter_words() => subscription()}}. + session(). session_ensure_new(SessionId, Props) -> transaction(fun() -> ok = session_drop_subscriptions(SessionId), Session = export_session(session_create(SessionId, Props)), - {ok, Session, #{}} + Session#{ + subscriptions => #{}, + inflight => emqx_persistent_message_ds_replayer:new() + } end). session_create(SessionId, Props) -> @@ -550,8 +538,7 @@ session_create(SessionId, Props) -> id = SessionId, created_at = erlang:system_time(millisecond), expires_at = never, - props = Props, - inflight = emqx_persistent_message_ds_replayer:new() + props = Props }, ok = mnesia:write(?SESSION_TAB, Session, write), Session. @@ -573,15 +560,14 @@ session_drop_subscriptions(DSSessionId) -> lists:foreach( fun(#ds_sub{id = DSSubId} = DSSub) -> TopicFilter = subscription_id_to_topic_filter(DSSubId), - TopicFilterBin = emqx_topic:join(TopicFilter), - ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilterBin, DSSessionId), + ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, DSSessionId), ok = session_del_subscription(DSSub) end, Subscriptions ). %% @doc Called when a client subscribes to a topic. Idempotent. --spec session_add_subscription(id(), topic_filter_words(), _Props :: map()) -> +-spec session_add_subscription(id(), topic_filter(), _Props :: map()) -> {ok, subscription(), _IsNew :: boolean()}. session_add_subscription(DSSessionId, TopicFilter, Props) -> DSSubId = {DSSessionId, TopicFilter}, @@ -606,7 +592,7 @@ session_add_subscription(DSSessionId, TopicFilter, Props) -> end end). --spec session_insert_subscription(id(), topic_filter_words(), map()) -> ds_sub(). +-spec session_insert_subscription(id(), topic_filter(), map()) -> ds_sub(). session_insert_subscription(DSSessionId, TopicFilter, Props) -> {DSSubId, StartMS} = new_subscription_id(DSSessionId, TopicFilter), DSSub = #ds_sub{ @@ -641,7 +627,7 @@ session_read_subscriptions(DSSessionId) -> ), mnesia:select(?SESSION_SUBSCRIPTIONS_TAB, MS, read). --spec new_subscription_id(id(), topic_filter_words()) -> {subscription_id(), integer()}. +-spec new_subscription_id(id(), topic_filter()) -> {subscription_id(), integer()}. new_subscription_id(DSSessionId, TopicFilter) -> %% Note: here we use _milliseconds_ to match with the timestamp %% field of `#message' record. @@ -808,10 +794,7 @@ receive_maximum(ConnInfo) -> list_all_sessions() -> DSSessionIds = mnesia:dirty_all_keys(?SESSION_TAB), Sessions = lists:map( - fun(SessionID) -> - {ok, Session, Subscriptions} = session_open(SessionID), - {SessionID, #{session => Session, subscriptions => Subscriptions}} - end, + fun(SessionID) -> {SessionID, session_open(SessionID)} end, DSSessionIds ), maps:from_list(Sessions). From 1246d714c51e206b5d62cb830fb04ba7314d3d4a Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Mon, 20 Nov 2023 13:28:20 +0700 Subject: [PATCH 016/101] feat(sessds): preserve acks / ranges in mnesia for replays --- .../emqx_persistent_session_ds_SUITE.erl | 10 +- .../emqx_persistent_message_ds_replayer.erl | 437 +++++++++++++----- apps/emqx/src/emqx_persistent_session_ds.erl | 170 +++---- apps/emqx/src/emqx_persistent_session_ds.hrl | 23 +- 4 files changed, 418 insertions(+), 222 deletions(-) diff --git a/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl b/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl index 72775228c..6c5fdc56e 100644 --- a/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl +++ b/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl @@ -357,18 +357,12 @@ do_t_session_discard(Params) -> _Attempts0 = 50, true = map_size(emqx_persistent_session_ds:list_all_streams()) > 0 ), - ?retry( - _Sleep0 = 100, - _Attempts0 = 50, - true = map_size(emqx_persistent_session_ds:list_all_iterators()) > 0 - ), ok = emqtt:stop(Client0), ?tp(notice, "disconnected", #{}), ?tp(notice, "reconnecting", #{}), - %% we still have iterators and streams + %% we still have streams ?assert(map_size(emqx_persistent_session_ds:list_all_streams()) > 0), - ?assert(map_size(emqx_persistent_session_ds:list_all_iterators()) > 0), Client1 = start_client(ReconnectOpts), {ok, _} = emqtt:connect(Client1), ?assertEqual([], emqtt:subscriptions(Client1)), @@ -381,7 +375,7 @@ do_t_session_discard(Params) -> ?assertEqual(#{}, emqx_persistent_session_ds:list_all_subscriptions()), ?assertEqual([], emqx_persistent_session_ds_router:topics()), ?assertEqual(#{}, emqx_persistent_session_ds:list_all_streams()), - ?assertEqual(#{}, emqx_persistent_session_ds:list_all_iterators()), + ?assertEqual(#{}, emqx_persistent_session_ds:list_all_pubranges()), ok = emqtt:stop(Client1), ?tp(notice, "disconnected", #{}), diff --git a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl b/apps/emqx/src/emqx_persistent_message_ds_replayer.erl index 69b6675d8..a95e1c152 100644 --- a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl +++ b/apps/emqx/src/emqx_persistent_message_ds_replayer.erl @@ -19,12 +19,12 @@ -module(emqx_persistent_message_ds_replayer). %% API: --export([new/0, next_packet_id/1, replay/2, commit_offset/3, poll/3, n_inflight/1]). +-export([new/0, open/1, next_packet_id/1, replay/1, commit_offset/3, poll/3, n_inflight/1]). %% internal exports: -export([]). --export_type([inflight/0]). +-export_type([inflight/0, seqno/0]). -include_lib("emqx/include/logger.hrl"). -include("emqx_persistent_session_ds.hrl"). @@ -42,17 +42,28 @@ -type seqno() :: non_neg_integer(). -record(range, { - stream :: emqx_ds:stream(), + stream :: _StreamRef, first :: seqno(), - last :: seqno(), - iterator_next :: emqx_ds:iterator() | undefined + until :: seqno(), + %% Type of a range: + %% * Inflight range is a range of yet unacked messages from this stream. + %% * Checkpoint range was already acked, its purpose is to keep track of the + %% very last iterator for this stream. + type :: inflight | checkpoint, + %% Meaning of this depends on the type of the range: + %% * For inflight range, this is the iterator pointing to the first message in + %% the range. + %% * For checkpoint range, this is the iterator pointing right past the last + %% message in the range. + iterator :: emqx_ds:iterator() }). -type range() :: #range{}. -record(inflight, { - next_seqno = 0 :: seqno(), - acked_seqno = 0 :: seqno(), + next_seqno = 1 :: seqno(), + acked_until = 1 :: seqno(), + %% Ranges are sorted in ascending order of their sequence numbers. offset_ranges = [] :: [range()] }). @@ -66,34 +77,37 @@ new() -> #inflight{}. +-spec open(emqx_persistent_session_ds:id()) -> inflight(). +open(SessionId) -> + Ranges = ro_transaction(fun() -> get_ranges(SessionId) end), + {AckedUntil, NextSeqno} = compute_inflight_range(Ranges), + #inflight{ + acked_until = AckedUntil, + next_seqno = NextSeqno, + offset_ranges = Ranges + }. + -spec next_packet_id(inflight()) -> {emqx_types:packet_id(), inflight()}. -next_packet_id(Inflight0 = #inflight{next_seqno = LastSeqNo}) -> - Inflight = Inflight0#inflight{next_seqno = LastSeqNo + 1}, - case LastSeqNo rem 16#10000 of - 0 -> - %% We skip sequence numbers that lead to PacketId = 0 to - %% simplify math. Note: it leads to occasional gaps in the - %% sequence numbers. - next_packet_id(Inflight); - PacketId -> - {PacketId, Inflight} - end. +next_packet_id(Inflight0 = #inflight{next_seqno = LastSeqno}) -> + Inflight = Inflight0#inflight{next_seqno = next_seqno(LastSeqno)}, + {seqno_to_packet_id(LastSeqno), Inflight}. -spec n_inflight(inflight()) -> non_neg_integer(). -n_inflight(#inflight{next_seqno = NextSeqNo, acked_seqno = AckedSeqno}) -> - %% NOTE: this function assumes that gaps in the sequence ID occur - %% _only_ when the packet ID wraps: - case AckedSeqno >= ((NextSeqNo bsr 16) bsl 16) of - true -> - NextSeqNo - AckedSeqno; - false -> - NextSeqNo - AckedSeqno - 1 - end. +n_inflight(#inflight{next_seqno = NextSeqno, acked_until = AckedUntil}) -> + range_size(AckedUntil, NextSeqno). --spec replay(emqx_persistent_session_ds:id(), inflight()) -> - emqx_session:replies(). -replay(_SessionId, _Inflight = #inflight{offset_ranges = _Ranges}) -> - []. +-spec replay(inflight()) -> + {emqx_session:replies(), inflight()}. +replay(Inflight0 = #inflight{acked_until = AckedUntil, offset_ranges = Ranges0}) -> + {Ranges, Replies} = lists:mapfoldr( + fun(Range, Acc) -> + replay_range(Range, AckedUntil, Acc) + end, + [], + Ranges0 + ), + Inflight = Inflight0#inflight{offset_ranges = Ranges}, + {Replies, Inflight}. -spec commit_offset(emqx_persistent_session_ds:id(), emqx_types:packet_id(), inflight()) -> {_IsValidOffset :: boolean(), inflight()}. @@ -101,47 +115,34 @@ commit_offset( SessionId, PacketId, Inflight0 = #inflight{ - acked_seqno = AckedSeqno0, next_seqno = NextSeqNo, offset_ranges = Ranges0 + acked_until = AckedUntil, next_seqno = NextSeqno } ) -> - AckedSeqno = - case packet_id_to_seqno(NextSeqNo, PacketId) of - N when N > AckedSeqno0; AckedSeqno0 =:= 0 -> - N; - OutOfRange -> - ?SLOG(warning, #{ - msg => "out-of-order_ack", - prev_seqno => AckedSeqno0, - acked_seqno => OutOfRange, - next_seqno => NextSeqNo, - packet_id => PacketId - }), - AckedSeqno0 - end, - Ranges = lists:filter( - fun(#range{stream = Stream, last = LastSeqno, iterator_next = ItNext}) -> - case LastSeqno =< AckedSeqno of - true -> - %% This range has been fully - %% acked. Remove it and replace saved - %% iterator with the trailing iterator. - update_iterator(SessionId, Stream, ItNext), - false; - false -> - %% This range still has unacked - %% messages: - true - end - end, - Ranges0 - ), - Inflight = Inflight0#inflight{acked_seqno = AckedSeqno, offset_ranges = Ranges}, - {true, Inflight}. + case packet_id_to_seqno(NextSeqno, PacketId) of + Seqno when Seqno >= AckedUntil andalso Seqno < NextSeqno -> + %% TODO + %% We do not preserve `acked_until` in the database. Instead, we discard + %% fully acked ranges from the database. In effect, this means that the + %% most recent `acked_until` the client has sent may be lost in case of a + %% crash or client loss. + Inflight1 = Inflight0#inflight{acked_until = next_seqno(Seqno)}, + Inflight = discard_acked(SessionId, Inflight1), + {true, Inflight}; + OutOfRange -> + ?SLOG(warning, #{ + msg => "out-of-order_ack", + acked_until => AckedUntil, + acked_seqno => OutOfRange, + next_seqno => NextSeqno, + packet_id => PacketId + }), + {false, Inflight0} + end. -spec poll(emqx_persistent_session_ds:id(), inflight(), pos_integer()) -> {emqx_session:replies(), inflight()}. poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff -> - #inflight{next_seqno = NextSeqNo0, acked_seqno = AckedSeqno} = + #inflight{next_seqno = NextSeqNo0, acked_until = AckedSeqno} = Inflight0, FetchThreshold = max(1, WindowSize div 2), FreeSpace = AckedSeqno + WindowSize - NextSeqNo0, @@ -153,6 +154,7 @@ poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff %% client get stuck even? {[], Inflight0}; true -> + %% TODO: Wrap this in `mria:async_dirty/2`? Streams = shuffle(get_streams(SessionId)), fetch(SessionId, Inflight0, Streams, FreeSpace, []) end. @@ -165,75 +167,206 @@ poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff %% Internal functions %%================================================================================ -fetch(_SessionId, Inflight, _Streams = [], _N, Acc) -> - {lists:reverse(Acc), Inflight}; -fetch(_SessionId, Inflight, _Streams, 0, Acc) -> - {lists:reverse(Acc), Inflight}; -fetch(SessionId, Inflight0, [Stream | Streams], N, Publishes0) -> - #inflight{next_seqno = FirstSeqNo, offset_ranges = Ranges0} = Inflight0, - ItBegin = get_last_iterator(SessionId, Stream, Ranges0), +compute_inflight_range([]) -> + {1, 1}; +compute_inflight_range(Ranges) -> + _RangeLast = #range{until = LastSeqno} = lists:last(Ranges), + RangesUnacked = lists:dropwhile(fun(#range{type = T}) -> T == checkpoint end, Ranges), + case RangesUnacked of + [#range{first = AckedUntil} | _] -> + {AckedUntil, LastSeqno}; + [] -> + {LastSeqno, LastSeqno} + end. + +get_ranges(SessionId) -> + DSRanges = mnesia:match_object( + ?SESSION_PUBRANGE_TAB, + #ds_pubrange{id = {SessionId, '_'}, _ = '_'}, + read + ), + lists:map(fun export_range/1, DSRanges). + +export_range(#ds_pubrange{ + type = Type, id = {_, First}, until = Until, stream = StreamRef, iterator = It +}) -> + #range{type = Type, stream = StreamRef, first = First, until = Until, iterator = It}. + +fetch(SessionId, Inflight0, [DSStream | Streams], N, Acc) when N > 0 -> + #inflight{next_seqno = FirstSeqno, offset_ranges = Ranges0} = Inflight0, + ItBegin = get_last_iterator(DSStream, Ranges0), {ok, ItEnd, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, N), - {NMessages, Publishes, Inflight1} = - lists:foldl( - fun(Msg, {N0, PubAcc0, InflightAcc0}) -> - {PacketId, InflightAcc} = next_packet_id(InflightAcc0), - PubAcc = [{PacketId, Msg} | PubAcc0], - {N0 + 1, PubAcc, InflightAcc} - end, - {0, Publishes0, Inflight0}, - Messages - ), - #inflight{next_seqno = LastSeqNo} = Inflight1, - case NMessages > 0 of - true -> - Range = #range{ - first = FirstSeqNo, - last = LastSeqNo - 1, - stream = Stream, - iterator_next = ItEnd + {Publishes, UntilSeqno} = publish(FirstSeqno, Messages), + case range_size(FirstSeqno, UntilSeqno) of + Size when Size > 0 -> + Range0 = #range{ + type = inflight, + first = FirstSeqno, + until = UntilSeqno, + stream = DSStream#ds_stream.ref, + iterator = ItBegin }, - Inflight = Inflight1#inflight{offset_ranges = Ranges0 ++ [Range]}, - fetch(SessionId, Inflight, Streams, N - NMessages, Publishes); - false -> - fetch(SessionId, Inflight1, Streams, N, Publishes) - end. + %% We need to preserve the iterator pointing to the beginning of the + %% range, so that we can replay it if needed. + ok = preserve_range(SessionId, Range0), + %% ...Yet we need to keep the iterator pointing past the end of the + %% range, so that we can pick up where we left off: it will become + %% `ItBegin` of the next range for this stream. + Range = Range0#range{iterator = ItEnd}, + Ranges = Ranges0 ++ [Range#range{iterator = ItEnd}], + Inflight = Inflight0#inflight{ + next_seqno = UntilSeqno, + offset_ranges = Ranges + }, + fetch(SessionId, Inflight, Streams, N - Size, [Publishes | Acc]); + 0 -> + fetch(SessionId, Inflight0, Streams, N, Acc) + end; +fetch(_SessionId, Inflight, _Streams, _N, Acc) -> + Publishes = lists:append(lists:reverse(Acc)), + {Publishes, Inflight}. --spec update_iterator(emqx_persistent_session_ds:id(), emqx_ds:stream(), emqx_ds:iterator()) -> ok. -update_iterator(DSSessionId, Stream, Iterator) -> - %% Workaround: we convert `Stream' to a binary before attempting to store it in - %% mnesia(rocksdb) because of a bug in `mnesia_rocksdb' when trying to do - %% `mnesia:dirty_all_keys' later. - StreamBin = term_to_binary(Stream), - mria:dirty_write(?SESSION_ITER_TAB, #ds_iter{id = {DSSessionId, StreamBin}, iter = Iterator}). +discard_acked( + SessionId, + Inflight0 = #inflight{acked_until = AckedUntil, offset_ranges = Ranges0} +) -> + %% TODO: This could be kept and incrementally updated in the inflight state. + Checkpoints = find_checkpoints(Ranges0), + %% TODO: Wrap this in `mria:async_dirty/2`? + Ranges = discard_acked_ranges(SessionId, AckedUntil, Checkpoints, Ranges0), + Inflight0#inflight{offset_ranges = Ranges}. -get_last_iterator(SessionId, Stream, Ranges) -> - case lists:keyfind(Stream, #range.stream, lists:reverse(Ranges)) of - false -> - get_iterator(SessionId, Stream); - #range{iterator_next = Next} -> - Next - end. - --spec get_iterator(emqx_persistent_session_ds:id(), emqx_ds:stream()) -> emqx_ds:iterator(). -get_iterator(DSSessionId, Stream) -> - %% See comment in `update_iterator'. - StreamBin = term_to_binary(Stream), - Id = {DSSessionId, StreamBin}, - [#ds_iter{iter = It}] = mnesia:dirty_read(?SESSION_ITER_TAB, Id), - It. - --spec get_streams(emqx_persistent_session_ds:id()) -> [emqx_ds:stream()]. -get_streams(SessionId) -> - lists:map( - fun(#ds_stream{stream = Stream}) -> - Stream +find_checkpoints(Ranges) -> + lists:foldl( + fun(#range{stream = StreamRef, until = Until}, Acc) -> + %% For each stream, remember the last range over this stream. + Acc#{StreamRef => Until} end, - mnesia:dirty_read(?SESSION_STREAM_TAB, SessionId) + #{}, + Ranges ). +discard_acked_ranges( + SessionId, + AckedUntil, + Checkpoints, + [Range = #range{until = Until, stream = StreamRef} | Rest] +) when Until =< AckedUntil -> + %% This range has been fully acked. + %% Either discard it completely, or preserve the iterator for the next range + %% over this stream (i.e. a checkpoint). + RangeKept = + case maps:get(StreamRef, Checkpoints) of + CP when CP > Until -> + discard_range(SessionId, Range), + []; + Until -> + checkpoint_range(SessionId, Range), + [Range#range{type = checkpoint}] + end, + %% Since we're (intentionally) not using transactions here, it's important to + %% issue database writes in the same order in which ranges are stored: from + %% the oldest to the newest. This is also why we need to compute which ranges + %% should become checkpoints before we start writing anything. + RangeKept ++ discard_acked_ranges(SessionId, AckedUntil, Checkpoints, Rest); +discard_acked_ranges(_SessionId, _AckedUntil, _Checkpoints, Ranges) -> + %% The rest of ranges (if any) still have unacked messages. + Ranges. + +replay_range( + Range0 = #range{type = inflight, first = First, until = Until, iterator = It}, + AckedUntil, + Acc +) -> + Size = range_size(First, Until), + FirstUnacked = max(First, AckedUntil), + {ok, ItNext, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, It, Size), + MessagesUnacked = + case FirstUnacked of + First -> + Messages; + _ -> + lists:nthtail(range_size(First, FirstUnacked), Messages) + end, + %% Asserting that range is consistent with the message storage state. + {Replies, Until} = publish(FirstUnacked, MessagesUnacked), + Range = Range0#range{iterator = ItNext}, + {Range, Replies ++ Acc}; +replay_range(Range0 = #range{type = checkpoint}, _AckedUntil, Acc) -> + {Range0, Acc}. + +publish(FirstSeqno, Messages) -> + lists:mapfoldl( + fun(Message, Seqno) -> + PacketId = seqno_to_packet_id(Seqno), + {{PacketId, Message}, next_seqno(Seqno)} + end, + FirstSeqno, + Messages + ). + +-spec preserve_range(emqx_persistent_session_ds:id(), range()) -> ok. +preserve_range( + SessionId, + #range{first = First, until = Until, stream = StreamRef, iterator = It} +) -> + DSRange = #ds_pubrange{ + id = {SessionId, First}, + until = Until, + stream = StreamRef, + type = inflight, + iterator = It + }, + mria:dirty_write(?SESSION_PUBRANGE_TAB, DSRange). + +-spec discard_range(emqx_persistent_session_ds:id(), range()) -> ok. +discard_range(SessionId, #range{first = First}) -> + mria:dirty_delete(?SESSION_PUBRANGE_TAB, {SessionId, First}). + +-spec checkpoint_range(emqx_persistent_session_ds:id(), range()) -> ok. +checkpoint_range( + SessionId, + #range{type = inflight, first = First, until = Until, stream = StreamRef, iterator = ItNext} +) -> + DSRange = #ds_pubrange{ + id = {SessionId, First}, + until = Until, + stream = StreamRef, + type = checkpoint, + iterator = ItNext + }, + mria:dirty_write(?SESSION_PUBRANGE_TAB, DSRange); +checkpoint_range(_SessionId, #range{type = checkpoint}) -> + %% This range should have been checkpointed already. + ok. + +get_last_iterator(DSStream = #ds_stream{ref = StreamRef}, Ranges) -> + case lists:keyfind(StreamRef, #range.stream, lists:reverse(Ranges)) of + false -> + DSStream#ds_stream.beginning; + #range{iterator = ItNext} -> + ItNext + end. + +-spec get_streams(emqx_persistent_session_ds:id()) -> [ds_stream()]. +get_streams(SessionId) -> + mnesia:dirty_read(?SESSION_STREAM_TAB, SessionId). + +next_seqno(Seqno) -> + NextSeqno = Seqno + 1, + case seqno_to_packet_id(NextSeqno) of + 0 -> + %% We skip sequence numbers that lead to PacketId = 0 to + %% simplify math. Note: it leads to occasional gaps in the + %% sequence numbers. + NextSeqno + 1; + _ -> + NextSeqno + end. + %% Reconstruct session counter by adding most significant bits from %% the current counter to the packet id. --spec packet_id_to_seqno(non_neg_integer(), emqx_types:packet_id()) -> non_neg_integer(). +-spec packet_id_to_seqno(_Next :: seqno(), emqx_types:packet_id()) -> seqno(). packet_id_to_seqno(NextSeqNo, PacketId) -> Epoch = NextSeqNo bsr 16, case packet_id_to_seqno_(Epoch, PacketId) of @@ -243,10 +376,20 @@ packet_id_to_seqno(NextSeqNo, PacketId) -> packet_id_to_seqno_(Epoch - 1, PacketId) end. --spec packet_id_to_seqno_(non_neg_integer(), emqx_types:packet_id()) -> non_neg_integer(). +-spec packet_id_to_seqno_(non_neg_integer(), emqx_types:packet_id()) -> seqno(). packet_id_to_seqno_(Epoch, PacketId) -> (Epoch bsl 16) + PacketId. +-spec seqno_to_packet_id(seqno()) -> emqx_types:packet_id(). +seqno_to_packet_id(Seqno) -> + Seqno rem 16#10000. + +range_size(FirstSeqno, UntilSeqno) -> + %% This function assumes that gaps in the sequence ID occur _only_ when the + %% packet ID wraps. + Size = UntilSeqno - FirstSeqno, + Size + (FirstSeqno bsr 16) - (UntilSeqno bsr 16). + -spec shuffle([A]) -> [A]. shuffle(L0) -> L1 = lists:map( @@ -259,6 +402,10 @@ shuffle(L0) -> {_, L} = lists:unzip(L2), L. +ro_transaction(Fun) -> + {atomic, Res} = mria:ro_transaction(?DS_MRIA_SHARD, Fun), + Res. + -ifdef(TEST). %% This test only tests boundary conditions (to make sure property-based test didn't skip them): @@ -311,4 +458,40 @@ seqno_gen(NextSeqNo) -> Max = max(0, NextSeqNo - 1), range(Min, Max). +range_size_test_() -> + [ + ?_assertEqual(0, range_size(42, 42)), + ?_assertEqual(1, range_size(42, 43)), + ?_assertEqual(1, range_size(16#ffff, 16#10001)), + ?_assertEqual(16#ffff - 456 + 123, range_size(16#1f0000 + 456, 16#200000 + 123)) + ]. + +compute_inflight_range_test_() -> + [ + ?_assertEqual( + {1, 1}, + compute_inflight_range([]) + ), + ?_assertEqual( + {12, 42}, + compute_inflight_range([ + #range{first = 1, until = 2, type = checkpoint}, + #range{first = 4, until = 8, type = checkpoint}, + #range{first = 11, until = 12, type = checkpoint}, + #range{first = 12, until = 13, type = inflight}, + #range{first = 13, until = 20, type = inflight}, + #range{first = 20, until = 42, type = inflight} + ]) + ), + ?_assertEqual( + {13, 13}, + compute_inflight_range([ + #range{first = 1, until = 2, type = checkpoint}, + #range{first = 4, until = 8, type = checkpoint}, + #range{first = 11, until = 12, type = checkpoint}, + #range{first = 12, until = 13, type = checkpoint} + ]) + ) + ]. + -endif. diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index 3a7232747..7ba5aa527 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -76,7 +76,7 @@ list_all_sessions/0, list_all_subscriptions/0, list_all_streams/0, - list_all_iterators/0 + list_all_pubranges/0 ]). -endif. @@ -359,15 +359,16 @@ handle_timeout( end, ensure_timer(pull, Timeout), {ok, Publishes, Session#{inflight => Inflight}}; -handle_timeout(_ClientInfo, get_streams, Session = #{id := Id}) -> - renew_streams(Id), +handle_timeout(_ClientInfo, get_streams, Session) -> + renew_streams(Session), ensure_timer(get_streams), {ok, [], Session}. -spec replay(clientinfo(), [], session()) -> {ok, replies(), session()}. -replay(_ClientInfo, [], Session = #{}) -> - {ok, [], Session}. +replay(_ClientInfo, [], Session = #{inflight := Inflight0}) -> + {Replies, Inflight} = emqx_persistent_message_ds_replayer:replay(Inflight0), + {ok, Replies, Session#{inflight := Inflight}}. %%-------------------------------------------------------------------- @@ -474,17 +475,20 @@ create_tables() -> ] ), ok = mria:create_table( - ?SESSION_ITER_TAB, + ?SESSION_PUBRANGE_TAB, [ {rlog_shard, ?DS_MRIA_SHARD}, - {type, set}, + {type, ordered_set}, {storage, storage()}, - {record_name, ds_iter}, - {attributes, record_info(fields, ds_iter)} + {record_name, ds_pubrange}, + {attributes, record_info(fields, ds_pubrange)} ] ), ok = mria:wait_for_tables([ - ?SESSION_TAB, ?SESSION_SUBSCRIPTIONS_TAB, ?SESSION_STREAM_TAB, ?SESSION_ITER_TAB + ?SESSION_TAB, + ?SESSION_SUBSCRIPTIONS_TAB, + ?SESSION_STREAM_TAB, + ?SESSION_PUBRANGE_TAB ]), ok. @@ -512,9 +516,10 @@ session_open(SessionId) -> Session = export_session(Record), DSSubs = session_read_subscriptions(SessionId), Subscriptions = export_subscriptions(DSSubs), + Inflight = emqx_persistent_message_ds_replayer:open(SessionId), Session#{ subscriptions => Subscriptions, - inflight => emqx_persistent_message_ds_replayer:new() + inflight => Inflight }; [] -> false @@ -549,7 +554,7 @@ session_create(SessionId, Props) -> session_drop(DSSessionId) -> transaction(fun() -> ok = session_drop_subscriptions(DSSessionId), - ok = session_drop_iterators(DSSessionId), + ok = session_drop_pubranges(DSSessionId), ok = session_drop_streams(DSSessionId), ok = mnesia:delete(?SESSION_TAB, DSSessionId, write) end). @@ -663,77 +668,82 @@ do_ensure_all_iterators_closed(_DSSessionID) -> %% Reading batches %%-------------------------------------------------------------------- --spec renew_streams(id()) -> ok. -renew_streams(DSSessionId) -> - Subscriptions = ro_transaction(fun() -> session_read_subscriptions(DSSessionId) end), - ExistingStreams = ro_transaction(fun() -> mnesia:read(?SESSION_STREAM_TAB, DSSessionId) end), - lists:foreach( - fun(#ds_sub{id = {_, TopicFilter}, start_time = StartTime}) -> - renew_streams(DSSessionId, ExistingStreams, TopicFilter, StartTime) +-spec renew_streams(session()) -> ok. +renew_streams(#{id := SessionId, subscriptions := Subscriptions}) -> + transaction(fun() -> + ExistingStreams = mnesia:read(?SESSION_STREAM_TAB, SessionId, write), + maps:fold( + fun(TopicFilter, #{start_time := StartTime}, Streams) -> + TopicFilterWords = emqx_topic:words(TopicFilter), + renew_topic_streams(SessionId, TopicFilterWords, StartTime, Streams) + end, + ExistingStreams, + Subscriptions + ) + end), + ok. + +-spec renew_topic_streams(id(), topic_filter_words(), emqx_ds:time(), _Acc :: [ds_stream()]) -> ok. +renew_topic_streams(DSSessionId, TopicFilter, StartTime, ExistingStreams) -> + TopicStreams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime), + lists:foldl( + fun({Rank, Stream}, Streams) -> + case lists:keymember(Stream, #ds_stream.stream, Streams) of + true -> + Streams; + false -> + StreamRef = length(Streams) + 1, + DSStream = session_store_stream( + DSSessionId, + StreamRef, + Stream, + Rank, + TopicFilter, + StartTime + ), + [DSStream | Streams] + end end, - Subscriptions + ExistingStreams, + TopicStreams ). --spec renew_streams(id(), [ds_stream()], topic_filter_words(), emqx_ds:time()) -> ok. -renew_streams(DSSessionId, ExistingStreams, TopicFilter, StartTime) -> - AllStreams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime), - transaction( - fun() -> - lists:foreach( - fun({Rank, Stream}) -> - Rec = #ds_stream{ - session = DSSessionId, - topic_filter = TopicFilter, - stream = Stream, - rank = Rank - }, - case lists:member(Rec, ExistingStreams) of - true -> - ok; - false -> - mnesia:write(?SESSION_STREAM_TAB, Rec, write), - {ok, Iterator} = emqx_ds:make_iterator( - ?PERSISTENT_MESSAGE_DB, Stream, TopicFilter, StartTime - ), - %% Workaround: we convert `Stream' to a binary before - %% attempting to store it in mnesia(rocksdb) because of a bug - %% in `mnesia_rocksdb' when trying to do - %% `mnesia:dirty_all_keys' later. - StreamBin = term_to_binary(Stream), - IterRec = #ds_iter{id = {DSSessionId, StreamBin}, iter = Iterator}, - mnesia:write(?SESSION_ITER_TAB, IterRec, write) - end - end, - AllStreams - ) - end - ). +session_store_stream(DSSessionId, StreamRef, Stream, Rank, TopicFilter, StartTime) -> + {ok, ItBegin} = emqx_ds:make_iterator( + ?PERSISTENT_MESSAGE_DB, + Stream, + TopicFilter, + StartTime + ), + DSStream = #ds_stream{ + session = DSSessionId, + ref = StreamRef, + stream = Stream, + rank = Rank, + beginning = ItBegin + }, + mnesia:write(?SESSION_STREAM_TAB, DSStream, write), + DSStream. %% must be called inside a transaction -spec session_drop_streams(id()) -> ok. session_drop_streams(DSSessionId) -> - MS = ets:fun2ms( - fun(#ds_stream{session = DSSessionId0}) when DSSessionId0 =:= DSSessionId -> - DSSessionId0 - end - ), - StreamIDs = mnesia:select(?SESSION_STREAM_TAB, MS, write), - lists:foreach(fun(Key) -> mnesia:delete(?SESSION_STREAM_TAB, Key, write) end, StreamIDs). + mnesia:delete(?SESSION_STREAM_TAB, DSSessionId, write). %% must be called inside a transaction --spec session_drop_iterators(id()) -> ok. -session_drop_iterators(DSSessionId) -> +-spec session_drop_pubranges(id()) -> ok. +session_drop_pubranges(DSSessionId) -> MS = ets:fun2ms( - fun(#ds_iter{id = {DSSessionId0, StreamBin}}) when DSSessionId0 =:= DSSessionId -> - StreamBin + fun(#ds_pubrange{id = {DSSessionId0, First}}) when DSSessionId0 =:= DSSessionId -> + {DSSessionId, First} end ), - StreamBins = mnesia:select(?SESSION_ITER_TAB, MS, write), + RangeIds = mnesia:select(?SESSION_PUBRANGE_TAB, MS, write), lists:foreach( - fun(StreamBin) -> - mnesia:delete(?SESSION_ITER_TAB, {DSSessionId, StreamBin}, write) + fun(RangeId) -> + mnesia:delete(?SESSION_PUBRANGE_TAB, RangeId, write) end, - StreamBins + RangeIds ). %%-------------------------------------------------------------------------------- @@ -758,7 +768,7 @@ export_subscriptions(DSSubs) -> ). export_session(#session{} = Record) -> - export_record(Record, #session.id, [id, created_at, expires_at, inflight, props], #{}). + export_record(Record, #session.id, [id, created_at, expires_at, props], #{}). export_subscription(#ds_sub{} = Record) -> export_record(Record, #ds_sub.start_time, [start_time, props, extra], #{}). @@ -833,16 +843,18 @@ list_all_streams() -> ), maps:from_list(DSStreams). -list_all_iterators() -> - DSIterIds = mnesia:dirty_all_keys(?SESSION_ITER_TAB), - DSIters = lists:map( - fun(DSIterId) -> - [Record] = mnesia:dirty_read(?SESSION_ITER_TAB, DSIterId), - {DSIterId, export_record(Record, #ds_iter.id, [id, iter], #{})} +list_all_pubranges() -> + DSPubranges = mnesia:dirty_match_object(?SESSION_PUBRANGE_TAB, #ds_pubrange{_ = '_'}), + lists:foldl( + fun(Record = #ds_pubrange{id = {SessionId, First}}, Acc) -> + Range = export_record( + Record, #ds_pubrange.until, [until, stream, type, iterator], #{first => First} + ), + maps:put(SessionId, maps:get(SessionId, Acc, []) ++ [Range], Acc) end, - DSIterIds - ), - maps:from_list(DSIters). + #{}, + DSPubranges + ). %% ifdef(TEST) -endif. diff --git a/apps/emqx/src/emqx_persistent_session_ds.hrl b/apps/emqx/src/emqx_persistent_session_ds.hrl index cc995ce66..a3ea5a662 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.hrl +++ b/apps/emqx/src/emqx_persistent_session_ds.hrl @@ -21,7 +21,7 @@ -define(SESSION_TAB, emqx_ds_session). -define(SESSION_SUBSCRIPTIONS_TAB, emqx_ds_session_subscriptions). -define(SESSION_STREAM_TAB, emqx_ds_stream_tab). --define(SESSION_ITER_TAB, emqx_ds_iter_tab). +-define(SESSION_PUBRANGE_TAB, emqx_ds_pubrange_tab). -define(DS_MRIA_SHARD, emqx_ds_session_shard). -record(ds_sub, { @@ -34,17 +34,24 @@ -record(ds_stream, { session :: emqx_persistent_session_ds:id(), - topic_filter :: emqx_ds:topic_filter(), + ref :: _StreamRef, stream :: emqx_ds:stream(), - rank :: emqx_ds:stream_rank() + rank :: emqx_ds:stream_rank(), + beginning :: emqx_ds:iterator() }). -type ds_stream() :: #ds_stream{}. --type ds_stream_bin() :: binary(). --record(ds_iter, { - id :: {emqx_persistent_session_ds:id(), ds_stream_bin()}, - iter :: emqx_ds:iterator() +-record(ds_pubrange, { + id :: { + _Session :: emqx_persistent_session_ds:id(), + _First :: emqx_persistent_message_ds_replayer:seqno() + }, + until :: emqx_persistent_message_ds_replayer:seqno(), + stream :: _StreamRef, + type :: inflight | checkpoint, + iterator :: emqx_ds:iterator() }). +-type ds_pubrange() :: #ds_pubrange{}. -record(session, { %% same as clientid @@ -52,7 +59,7 @@ %% creation time created_at :: _Millisecond :: non_neg_integer(), expires_at = never :: _Millisecond :: non_neg_integer() | never, - inflight :: emqx_persistent_message_ds_replayer:inflight(), + % last_ack = 0 :: emqx_persistent_message_ds_replayer:seqno(), %% for future usage props = #{} :: map() }). From ba2133abb3e1e16d7485a4ba018bf802907e1c37 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Mon, 20 Nov 2023 09:12:38 +0100 Subject: [PATCH 017/101] chore: remove github copilot placeholder from pr template https://githubnext.com/copilot-for-prs-sunset --- .github/pull_request_template.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 24024f68a..d8c90965b 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,9 +1,6 @@ Fixes - - -## Summary -copilot:summary + ## PR Checklist Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked: From a5ff4144fea77658c0994b5247c4cfcd87cacc46 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Fri, 17 Nov 2023 20:21:53 +0700 Subject: [PATCH 018/101] test(sessds): add complex testcase for session replay --- .../test/emqx_persistent_session_SUITE.erl | 188 +++++++++++++++--- 1 file changed, 164 insertions(+), 24 deletions(-) diff --git a/apps/emqx/test/emqx_persistent_session_SUITE.erl b/apps/emqx/test/emqx_persistent_session_SUITE.erl index bd7ca1c46..3f4cbcd28 100644 --- a/apps/emqx/test/emqx_persistent_session_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_session_SUITE.erl @@ -181,18 +181,23 @@ client_info(Key, Client) -> maps:get(Key, maps:from_list(emqtt:info(Client)), undefined). receive_messages(Count) -> - receive_messages(Count, []). + receive_messages(Count, 15000). -receive_messages(0, Msgs) -> - Msgs; -receive_messages(Count, Msgs) -> +receive_messages(Count, Timeout) -> + Deadline = erlang:monotonic_time(millisecond) + Timeout, + receive_message_loop(Count, Deadline). + +receive_message_loop(0, _Deadline) -> + []; +receive_message_loop(Count, Deadline) -> + Timeout = max(0, Deadline - erlang:monotonic_time(millisecond)), receive {publish, Msg} -> - receive_messages(Count - 1, [Msg | Msgs]); + [Msg | receive_message_loop(Count - 1, Deadline)]; _Other -> - receive_messages(Count, Msgs) - after 15000 -> - Msgs + receive_message_loop(Count, Deadline) + after Timeout -> + [] end. maybe_kill_connection_process(ClientId, Config) -> @@ -229,16 +234,28 @@ wait_for_cm_unregister(ClientId, N) -> wait_for_cm_unregister(ClientId, N - 1) end. -publish(Topic, Payloads) -> - publish(Topic, Payloads, false, 2). +messages(Topic, Payloads) -> + messages(Topic, Payloads, ?QOS_2). -publish(Topic, Payloads, WaitForUnregister, QoS) -> - Fun = fun(Client, Payload) -> - {ok, _} = emqtt:publish(Client, Topic, Payload, QoS) +messages(Topic, Payloads, QoS) -> + [#mqtt_msg{topic = Topic, payload = P, qos = QoS} || P <- Payloads]. + +publish(Topic, Payload) -> + publish(Topic, Payload, ?QOS_2). + +publish(Topic, Payload, QoS) -> + publish_many(messages(Topic, [Payload], QoS)). + +publish_many(Messages) -> + publish_many(Messages, false). + +publish_many(Messages, WaitForUnregister) -> + Fun = fun(Client, Message) -> + {ok, _} = emqtt:publish(Client, Message) end, - do_publish(Payloads, Fun, WaitForUnregister). + do_publish(Messages, Fun, WaitForUnregister). -do_publish(Payloads = [_ | _], PublishFun, WaitForUnregister) -> +do_publish(Messages = [_ | _], PublishFun, WaitForUnregister) -> %% Publish from another process to avoid connection confusion. {Pid, Ref} = spawn_monitor( @@ -252,7 +269,7 @@ do_publish(Payloads = [_ | _], PublishFun, WaitForUnregister) -> {port, 1883} ]), {ok, _} = emqtt:connect(Client), - lists:foreach(fun(Payload) -> PublishFun(Client, Payload) end, Payloads), + lists:foreach(fun(Message) -> PublishFun(Client, Message) end, Messages), ok = emqtt:disconnect(Client), %% Snabbkaffe sometimes fails unless all processes are gone. case WaitForUnregister of @@ -277,9 +294,7 @@ do_publish(Payloads = [_ | _], PublishFun, WaitForUnregister) -> receive {'DOWN', Ref, process, Pid, normal} -> ok; {'DOWN', Ref, process, Pid, What} -> error({failed_publish, What}) - end; -do_publish(Payload, PublishFun, WaitForUnregister) -> - do_publish([Payload], PublishFun, WaitForUnregister). + end. %%-------------------------------------------------------------------- %% Test Cases @@ -494,7 +509,7 @@ t_process_dies_session_expires(Config) -> maybe_kill_connection_process(ClientId, Config), - ok = publish(Topic, [Payload]), + ok = publish(Topic, Payload), timer:sleep(1100), @@ -535,7 +550,7 @@ t_publish_while_client_is_gone_qos1(Config) -> ok = emqtt:disconnect(Client1), maybe_kill_connection_process(ClientId, Config), - ok = publish(Topic, [Payload1, Payload2], false, 1), + ok = publish_many(messages(Topic, [Payload1, Payload2], ?QOS_1)), {ok, Client2} = emqtt:start_link([ {proto_ver, v5}, @@ -547,7 +562,7 @@ t_publish_while_client_is_gone_qos1(Config) -> {ok, _} = emqtt:ConnFun(Client2), Msgs = receive_messages(2), ?assertMatch([_, _], Msgs), - [Msg2, Msg1] = Msgs, + [Msg1, Msg2] = Msgs, ?assertEqual({ok, iolist_to_binary(Payload1)}, maps:find(payload, Msg1)), ?assertEqual({ok, 1}, maps:find(qos, Msg1)), ?assertEqual({ok, iolist_to_binary(Payload2)}, maps:find(payload, Msg2)), @@ -555,6 +570,131 @@ t_publish_while_client_is_gone_qos1(Config) -> ok = emqtt:disconnect(Client2). +t_publish_many_while_client_is_gone_qos1(Config) -> + %% A persistent session should receive all of the still unacked messages + %% for its subscriptions after the client dies or reconnects, in addition + %% to new messages that were published while the client was gone. The order + %% of the messages should be consistent across reconnects. + ClientId = ?config(client_id, Config), + ConnFun = ?config(conn_fun, Config), + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, true}, + {auto_ack, false} + | Config + ]), + {ok, _} = emqtt:ConnFun(Client1), + + STopics = [ + <<"t/+/foo">>, + <<"msg/feed/#">>, + <<"loc/+/+/+">> + ], + [{ok, _, [?QOS_1]} = emqtt:subscribe(Client1, ST, ?QOS_1) || ST <- STopics], + + Pubs1 = [ + #mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M1">>, qos = 1}, + #mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M2">>, qos = 1}, + #mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M3">>, qos = 1}, + #mqtt_msg{topic = <<"loc/1/2/42">>, payload = <<"M4">>, qos = 1}, + #mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M5">>, qos = 1}, + #mqtt_msg{topic = <<"loc/3/4/5">>, payload = <<"M6">>, qos = 1}, + #mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M7">>, qos = 1} + ], + ok = publish_many(Pubs1), + NPubs1 = length(Pubs1), + + Msgs1 = receive_messages(NPubs1), + NMsgs1 = length(Msgs1), + ?assertEqual(NPubs1, NMsgs1), + + ct:pal("Msgs1 = ~p", [Msgs1]), + + %% TODO + %% This assertion doesn't currently hold because `emqx_ds` doesn't enforce + %% strict ordering reflecting client publishing order. Instead, per-topic + %% ordering is guaranteed per each client. In fact, this violates the MQTT + %% specification, but we deemed it acceptable for now. + %% ?assertMatch([ + %% #{payload := <<"M1">>}, + %% #{payload := <<"M2">>}, + %% #{payload := <<"M3">>}, + %% #{payload := <<"M4">>}, + %% #{payload := <<"M5">>}, + %% #{payload := <<"M6">>}, + %% #{payload := <<"M7">>} + %% ], Msgs1), + + ?assertEqual( + get_topicwise_order(Pubs1), + get_topicwise_order(Msgs1), + Msgs1 + ), + + NAcked = 4, + [ok = emqtt:puback(Client1, PktId) || #{packet_id := PktId} <- lists:sublist(Msgs1, NAcked)], + + %% Ensure that PUBACKs are propagated to the channel. + pong = emqtt:ping(Client1), + + ok = emqtt:disconnect(Client1), + maybe_kill_connection_process(ClientId, Config), + + Pubs2 = [ + #mqtt_msg{topic = <<"loc/3/4/5">>, payload = <<"M8">>, qos = 1}, + #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M9">>, qos = 1}, + #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M10">>, qos = 1}, + #mqtt_msg{topic = <<"msg/feed/friend">>, payload = <<"M11">>, qos = 1}, + #mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M12">>, qos = 1} + ], + ok = publish_many(Pubs2), + NPubs2 = length(Pubs2), + + {ok, Client2} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, false}, + {auto_ack, false} + | Config + ]), + {ok, _} = emqtt:ConnFun(Client2), + + %% Try to receive _at most_ `NPubs` messages. + %% There shouldn't be that much unacked messages in the replay anyway, + %% but it's an easy number to pick. + NPubs = NPubs1 + NPubs2, + Msgs2 = receive_messages(NPubs, _Timeout = 2000), + NMsgs2 = length(Msgs2), + + ct:pal("Msgs2 = ~p", [Msgs2]), + + ?assert(NMsgs2 < NPubs, Msgs2), + ?assert(NMsgs2 > NPubs2, Msgs2), + ?assert(NMsgs2 >= NPubs - NAcked, Msgs2), + NSame = NMsgs2 - NPubs2, + ?assertEqual( + [maps:with([packet_id, topic, payload], M) || M <- lists:nthtail(NMsgs1 - NSame, Msgs1)], + [maps:with([packet_id, topic, payload], M) || M <- lists:sublist(Msgs2, NSame)] + ), + + ok = emqtt:disconnect(Client2). + +get_topicwise_order(Msgs) -> + maps:groups_from_list(fun get_msgpub_topic/1, fun get_msgpub_payload/1, Msgs). + +get_msgpub_topic(#mqtt_msg{topic = Topic}) -> + Topic; +get_msgpub_topic(#{topic := Topic}) -> + Topic. + +get_msgpub_payload(#mqtt_msg{payload = Payload}) -> + Payload; +get_msgpub_payload(#{payload := Payload}) -> + Payload. + t_publish_while_client_is_gone(init, Config) -> skip_ds_tc(Config); t_publish_while_client_is_gone('end', _Config) -> ok. t_publish_while_client_is_gone(Config) -> @@ -579,7 +719,7 @@ t_publish_while_client_is_gone(Config) -> ok = emqtt:disconnect(Client1), maybe_kill_connection_process(ClientId, Config), - ok = publish(Topic, [Payload1, Payload2]), + ok = publish_many(messages(Topic, [Payload1, Payload2])), {ok, Client2} = emqtt:start_link([ {proto_ver, v5}, @@ -591,7 +731,7 @@ t_publish_while_client_is_gone(Config) -> {ok, _} = emqtt:ConnFun(Client2), Msgs = receive_messages(2), ?assertMatch([_, _], Msgs), - [Msg2, Msg1] = Msgs, + [Msg1, Msg2] = Msgs, ?assertEqual({ok, iolist_to_binary(Payload1)}, maps:find(payload, Msg1)), ?assertEqual({ok, 2}, maps:find(qos, Msg1)), ?assertEqual({ok, iolist_to_binary(Payload2)}, maps:find(payload, Msg2)), From 7081f1951f709def7216ff88e144194b29ee30ec Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Mon, 20 Nov 2023 14:10:53 +0700 Subject: [PATCH 019/101] refactor(sessds): use `ds_pubrange` record as is Instead of converting it into almost similar runtime representation. --- .../emqx_persistent_message_ds_replayer.erl | 152 +++++++----------- apps/emqx/src/emqx_persistent_session_ds.hrl | 13 ++ 2 files changed, 70 insertions(+), 95 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl b/apps/emqx/src/emqx_persistent_message_ds_replayer.erl index a95e1c152..2f5348938 100644 --- a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl +++ b/apps/emqx/src/emqx_persistent_message_ds_replayer.erl @@ -41,30 +41,11 @@ %% Note: sequence numbers are monotonic; they don't wrap around: -type seqno() :: non_neg_integer(). --record(range, { - stream :: _StreamRef, - first :: seqno(), - until :: seqno(), - %% Type of a range: - %% * Inflight range is a range of yet unacked messages from this stream. - %% * Checkpoint range was already acked, its purpose is to keep track of the - %% very last iterator for this stream. - type :: inflight | checkpoint, - %% Meaning of this depends on the type of the range: - %% * For inflight range, this is the iterator pointing to the first message in - %% the range. - %% * For checkpoint range, this is the iterator pointing right past the last - %% message in the range. - iterator :: emqx_ds:iterator() -}). - --type range() :: #range{}. - -record(inflight, { next_seqno = 1 :: seqno(), acked_until = 1 :: seqno(), %% Ranges are sorted in ascending order of their sequence numbers. - offset_ranges = [] :: [range()] + offset_ranges = [] :: [ds_pubrange()] }). -opaque inflight() :: #inflight{}. @@ -170,53 +151,51 @@ poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff compute_inflight_range([]) -> {1, 1}; compute_inflight_range(Ranges) -> - _RangeLast = #range{until = LastSeqno} = lists:last(Ranges), - RangesUnacked = lists:dropwhile(fun(#range{type = T}) -> T == checkpoint end, Ranges), + _RangeLast = #ds_pubrange{until = LastSeqno} = lists:last(Ranges), + RangesUnacked = lists:dropwhile( + fun(#ds_pubrange{type = T}) -> T == checkpoint end, + Ranges + ), case RangesUnacked of - [#range{first = AckedUntil} | _] -> + [#ds_pubrange{id = {_, AckedUntil}} | _] -> {AckedUntil, LastSeqno}; [] -> {LastSeqno, LastSeqno} end. +-spec get_ranges(emqx_persistent_session_ds:id()) -> [ds_pubrange()]. get_ranges(SessionId) -> - DSRanges = mnesia:match_object( - ?SESSION_PUBRANGE_TAB, - #ds_pubrange{id = {SessionId, '_'}, _ = '_'}, - read + Pat = erlang:make_tuple( + record_info(size, ds_pubrange), + '_', + [{1, ds_pubrange}, {#ds_pubrange.id, {SessionId, '_'}}] ), - lists:map(fun export_range/1, DSRanges). - -export_range(#ds_pubrange{ - type = Type, id = {_, First}, until = Until, stream = StreamRef, iterator = It -}) -> - #range{type = Type, stream = StreamRef, first = First, until = Until, iterator = It}. + mnesia:match_object(?SESSION_PUBRANGE_TAB, Pat, read). fetch(SessionId, Inflight0, [DSStream | Streams], N, Acc) when N > 0 -> - #inflight{next_seqno = FirstSeqno, offset_ranges = Ranges0} = Inflight0, - ItBegin = get_last_iterator(DSStream, Ranges0), + #inflight{next_seqno = FirstSeqno, offset_ranges = Ranges} = Inflight0, + ItBegin = get_last_iterator(DSStream, Ranges), {ok, ItEnd, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, N), {Publishes, UntilSeqno} = publish(FirstSeqno, Messages), case range_size(FirstSeqno, UntilSeqno) of Size when Size > 0 -> - Range0 = #range{ + %% We need to preserve the iterator pointing to the beginning of the + %% range, so that we can replay it if needed. + Range0 = #ds_pubrange{ + id = {SessionId, FirstSeqno}, type = inflight, - first = FirstSeqno, until = UntilSeqno, stream = DSStream#ds_stream.ref, iterator = ItBegin }, - %% We need to preserve the iterator pointing to the beginning of the - %% range, so that we can replay it if needed. - ok = preserve_range(SessionId, Range0), + ok = preserve_range(Range0), %% ...Yet we need to keep the iterator pointing past the end of the %% range, so that we can pick up where we left off: it will become %% `ItBegin` of the next range for this stream. - Range = Range0#range{iterator = ItEnd}, - Ranges = Ranges0 ++ [Range#range{iterator = ItEnd}], + Range = Range0#ds_pubrange{iterator = ItEnd}, Inflight = Inflight0#inflight{ next_seqno = UntilSeqno, - offset_ranges = Ranges + offset_ranges = Ranges ++ [Range] }, fetch(SessionId, Inflight, Streams, N - Size, [Publishes | Acc]); 0 -> @@ -238,7 +217,7 @@ discard_acked( find_checkpoints(Ranges) -> lists:foldl( - fun(#range{stream = StreamRef, until = Until}, Acc) -> + fun(#ds_pubrange{stream = StreamRef, until = Until}, Acc) -> %% For each stream, remember the last range over this stream. Acc#{StreamRef => Until} end, @@ -250,7 +229,7 @@ discard_acked_ranges( SessionId, AckedUntil, Checkpoints, - [Range = #range{until = Until, stream = StreamRef} | Rest] + [Range = #ds_pubrange{until = Until, stream = StreamRef} | Rest] ) when Until =< AckedUntil -> %% This range has been fully acked. %% Either discard it completely, or preserve the iterator for the next range @@ -258,11 +237,10 @@ discard_acked_ranges( RangeKept = case maps:get(StreamRef, Checkpoints) of CP when CP > Until -> - discard_range(SessionId, Range), + discard_range(Range), []; Until -> - checkpoint_range(SessionId, Range), - [Range#range{type = checkpoint}] + [checkpoint_range(Range)] end, %% Since we're (intentionally) not using transactions here, it's important to %% issue database writes in the same order in which ranges are stored: from @@ -274,7 +252,7 @@ discard_acked_ranges(_SessionId, _AckedUntil, _Checkpoints, Ranges) -> Ranges. replay_range( - Range0 = #range{type = inflight, first = First, until = Until, iterator = It}, + Range0 = #ds_pubrange{type = inflight, id = {_, First}, until = Until, iterator = It}, AckedUntil, Acc ) -> @@ -290,9 +268,11 @@ replay_range( end, %% Asserting that range is consistent with the message storage state. {Replies, Until} = publish(FirstUnacked, MessagesUnacked), - Range = Range0#range{iterator = ItNext}, + %% Again, we need to keep the iterator pointing past the end of the + %% range, so that we can pick up where we left off. + Range = Range0#ds_pubrange{iterator = ItNext}, {Range, Replies ++ Acc}; -replay_range(Range0 = #range{type = checkpoint}, _AckedUntil, Acc) -> +replay_range(Range0 = #ds_pubrange{type = checkpoint}, _AckedUntil, Acc) -> {Range0, Acc}. publish(FirstSeqno, Messages) -> @@ -305,46 +285,28 @@ publish(FirstSeqno, Messages) -> Messages ). --spec preserve_range(emqx_persistent_session_ds:id(), range()) -> ok. -preserve_range( - SessionId, - #range{first = First, until = Until, stream = StreamRef, iterator = It} -) -> - DSRange = #ds_pubrange{ - id = {SessionId, First}, - until = Until, - stream = StreamRef, - type = inflight, - iterator = It - }, - mria:dirty_write(?SESSION_PUBRANGE_TAB, DSRange). +-spec preserve_range(ds_pubrange()) -> ok. +preserve_range(Range = #ds_pubrange{type = inflight}) -> + mria:dirty_write(?SESSION_PUBRANGE_TAB, Range). --spec discard_range(emqx_persistent_session_ds:id(), range()) -> ok. -discard_range(SessionId, #range{first = First}) -> - mria:dirty_delete(?SESSION_PUBRANGE_TAB, {SessionId, First}). +-spec discard_range(ds_pubrange()) -> ok. +discard_range(#ds_pubrange{id = RangeId}) -> + mria:dirty_delete(?SESSION_PUBRANGE_TAB, RangeId). --spec checkpoint_range(emqx_persistent_session_ds:id(), range()) -> ok. -checkpoint_range( - SessionId, - #range{type = inflight, first = First, until = Until, stream = StreamRef, iterator = ItNext} -) -> - DSRange = #ds_pubrange{ - id = {SessionId, First}, - until = Until, - stream = StreamRef, - type = checkpoint, - iterator = ItNext - }, - mria:dirty_write(?SESSION_PUBRANGE_TAB, DSRange); -checkpoint_range(_SessionId, #range{type = checkpoint}) -> +-spec checkpoint_range(ds_pubrange()) -> ds_pubrange(). +checkpoint_range(Range0 = #ds_pubrange{type = inflight}) -> + Range = Range0#ds_pubrange{type = checkpoint}, + ok = mria:dirty_write(?SESSION_PUBRANGE_TAB, Range), + Range; +checkpoint_range(Range = #ds_pubrange{type = checkpoint}) -> %% This range should have been checkpointed already. - ok. + Range. get_last_iterator(DSStream = #ds_stream{ref = StreamRef}, Ranges) -> - case lists:keyfind(StreamRef, #range.stream, lists:reverse(Ranges)) of + case lists:keyfind(StreamRef, #ds_pubrange.stream, lists:reverse(Ranges)) of false -> DSStream#ds_stream.beginning; - #range{iterator = ItNext} -> + #ds_pubrange{iterator = ItNext} -> ItNext end. @@ -380,7 +342,7 @@ packet_id_to_seqno(NextSeqNo, PacketId) -> packet_id_to_seqno_(Epoch, PacketId) -> (Epoch bsl 16) + PacketId. --spec seqno_to_packet_id(seqno()) -> emqx_types:packet_id(). +-spec seqno_to_packet_id(seqno()) -> emqx_types:packet_id() | 0. seqno_to_packet_id(Seqno) -> Seqno rem 16#10000. @@ -475,21 +437,21 @@ compute_inflight_range_test_() -> ?_assertEqual( {12, 42}, compute_inflight_range([ - #range{first = 1, until = 2, type = checkpoint}, - #range{first = 4, until = 8, type = checkpoint}, - #range{first = 11, until = 12, type = checkpoint}, - #range{first = 12, until = 13, type = inflight}, - #range{first = 13, until = 20, type = inflight}, - #range{first = 20, until = 42, type = inflight} + #ds_pubrange{id = {<<>>, 1}, until = 2, type = checkpoint}, + #ds_pubrange{id = {<<>>, 4}, until = 8, type = checkpoint}, + #ds_pubrange{id = {<<>>, 11}, until = 12, type = checkpoint}, + #ds_pubrange{id = {<<>>, 12}, until = 13, type = inflight}, + #ds_pubrange{id = {<<>>, 13}, until = 20, type = inflight}, + #ds_pubrange{id = {<<>>, 20}, until = 42, type = inflight} ]) ), ?_assertEqual( {13, 13}, compute_inflight_range([ - #range{first = 1, until = 2, type = checkpoint}, - #range{first = 4, until = 8, type = checkpoint}, - #range{first = 11, until = 12, type = checkpoint}, - #range{first = 12, until = 13, type = checkpoint} + #ds_pubrange{id = {<<>>, 1}, until = 2, type = checkpoint}, + #ds_pubrange{id = {<<>>, 4}, until = 8, type = checkpoint}, + #ds_pubrange{id = {<<>>, 11}, until = 12, type = checkpoint}, + #ds_pubrange{id = {<<>>, 12}, until = 13, type = checkpoint} ]) ) ]. diff --git a/apps/emqx/src/emqx_persistent_session_ds.hrl b/apps/emqx/src/emqx_persistent_session_ds.hrl index a3ea5a662..f84519901 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.hrl +++ b/apps/emqx/src/emqx_persistent_session_ds.hrl @@ -43,12 +43,25 @@ -record(ds_pubrange, { id :: { + %% What session this range belongs to. _Session :: emqx_persistent_session_ds:id(), + %% Where this range starts. _First :: emqx_persistent_message_ds_replayer:seqno() }, + %% Where this range ends: the first seqno that is not included in the range. until :: emqx_persistent_message_ds_replayer:seqno(), + %% Which stream this range is over. stream :: _StreamRef, + %% Type of a range: + %% * Inflight range is a range of yet unacked messages from this stream. + %% * Checkpoint range was already acked, its purpose is to keep track of the + %% very last iterator for this stream. type :: inflight | checkpoint, + %% Meaning of this depends on the type of the range: + %% * For inflight range, this is the iterator pointing to the first message in + %% the range. + %% * For checkpoint range, this is the iterator pointing right past the last + %% message in the range. iterator :: emqx_ds:iterator() }). -type ds_pubrange() :: #ds_pubrange{}. From 84ff7b0b3849d139e068be36e3bdde507b4174bc Mon Sep 17 00:00:00 2001 From: Stefan Strigler Date: Fri, 17 Nov 2023 11:23:53 +0100 Subject: [PATCH 020/101] feat(emqx_bridge): action_info with dynamic lookup This allows a n:1 relation between v1 bridge_types to action/connector types as it's the case with mongodb for instance, where we had `mongodb_single` `mongodb_sharded` etc and the new implementation will just have `mongodb`. --- apps/emqx_bridge/src/emqx_action_info.erl | 89 +++++++++++++------ apps/emqx_bridge/src/emqx_bridge_api.erl | 6 +- apps/emqx_bridge/src/emqx_bridge_lib.erl | 22 +++-- apps/emqx_bridge/src/emqx_bridge_v2.erl | 6 +- .../src/emqx_bridge_kafka_impl_producer.erl | 7 +- .../src/emqx_rule_engine_api.erl | 24 ++++- 6 files changed, 108 insertions(+), 46 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index b5d88c4d8..02ed2fda8 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -22,7 +22,7 @@ -export([ action_type_to_connector_type/1, - action_type_to_bridge_v1_type/1, + action_type_to_bridge_v1_type/2, bridge_v1_type_to_action_type/1, is_action_type/1, registered_schema_modules/0, @@ -30,7 +30,12 @@ bridge_v1_to_action_fixup/2 ]). --callback bridge_v1_type_name() -> atom(). +-callback bridge_v1_type_name() -> + atom() + | { + fun(({ActionConfig :: map(), ConnectorConfig :: map()}) -> Type :: atom()), + TypeList :: [atom()] + }. -callback action_type_name() -> atom(). -callback connector_type_name() -> atom(). -callback schema_module() -> atom(). @@ -93,16 +98,22 @@ bridge_v1_type_to_action_type(Type) -> ActionType -> ActionType end. -action_type_to_bridge_v1_type(Bin) when is_binary(Bin) -> - action_type_to_bridge_v1_type(binary_to_existing_atom(Bin)); -action_type_to_bridge_v1_type(Type) -> +action_type_to_bridge_v1_type(Bin, Conf) when is_binary(Bin) -> + action_type_to_bridge_v1_type(binary_to_existing_atom(Bin), Conf); +action_type_to_bridge_v1_type(ActionType, Conf) -> ActionInfoMap = info_map(), ActionTypeToBridgeV1Type = maps:get(action_type_to_bridge_v1_type, ActionInfoMap), - case maps:get(Type, ActionTypeToBridgeV1Type, undefined) of - undefined -> Type; + case maps:get(ActionType, ActionTypeToBridgeV1Type, undefined) of + undefined -> ActionType; + BridgeV1TypeFun when is_function(BridgeV1TypeFun) -> BridgeV1TypeFun(get_confs(Conf)); BridgeV1Type -> BridgeV1Type end. +get_confs(#{connector := ConnectorName, type := ActionType} = ActionConfig) -> + ConnectorType = action_type_to_connector_type(ActionType), + ConnectorConfig = emqx_conf:get_raw([connectors, ConnectorType, ConnectorName]), + {ActionConfig, ConnectorConfig}. + %% This function should return true for all inputs that are bridge V1 types for %% bridges that have been refactored to bridge V2s, and for all all bridge V2 %% types. For everything else the function should return false. @@ -226,36 +237,56 @@ get_info_map(Module) -> %% Force the module to get loaded _ = code:ensure_loaded(Module), ActionType = Module:action_type_name(), - BridgeV1Type = + {BridgeV1TypeOrFun, BridgeV1Types} = case erlang:function_exported(Module, bridge_v1_type_name, 0) of true -> - Module:bridge_v1_type_name(); + case Module:bridge_v1_type_name() of + {_BridgeV1TypeFun, _BridgeV1Types} = BridgeV1TypeTuple -> + BridgeV1TypeTuple; + BridgeV1Type0 -> + {BridgeV1Type0, [BridgeV1Type0]} + end; false -> - Module:action_type_name() + {ActionType, [ActionType]} end, #{ - action_type_names => #{ - ActionType => true, - BridgeV1Type => true - }, - bridge_v1_type_to_action_type => #{ - BridgeV1Type => ActionType, - %% Alias the bridge V1 type to the action type - ActionType => ActionType - }, + action_type_names => + lists:foldl( + fun(BridgeV1Type, M) -> + M#{BridgeV1Type => true} + end, + #{ActionType => true}, + BridgeV1Types + ), + bridge_v1_type_to_action_type => + lists:foldl( + fun(BridgeV1Type, M) -> + %% Alias the bridge V1 type to the action type + M#{BridgeV1Type => ActionType} + end, + #{ActionType => ActionType}, + BridgeV1Types + ), action_type_to_bridge_v1_type => #{ - ActionType => BridgeV1Type - }, - action_type_to_connector_type => #{ - ActionType => Module:connector_type_name(), - %% Alias the bridge V1 type to the action type - BridgeV1Type => Module:connector_type_name() + ActionType => BridgeV1TypeOrFun }, + action_type_to_connector_type => + lists:foldl( + fun(BridgeV1Type, M) -> + M#{BridgeV1Type => Module:connector_type_name()} + end, + #{ActionType => Module:connector_type_name()}, + BridgeV1Types + ), action_type_to_schema_module => #{ ActionType => Module:schema_module() }, - action_type_to_info_module => #{ - ActionType => Module, - BridgeV1Type => Module - } + action_type_to_info_module => + lists:foldl( + fun(BridgeV1Type, M) -> + M#{BridgeV1Type => Module} + end, + #{ActionType => Module}, + BridgeV1Types + ) }. diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index 188f26ab5..a3c058abb 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -907,7 +907,7 @@ format_resource( redact( maps:merge( RawConfFull#{ - type => downgrade_type(Type), + type => downgrade_type(Type, RawConf), name => maps:get(<<"name">>, RawConf, BridgeName), node => Node }, @@ -1162,5 +1162,5 @@ non_compat_bridge_msg() -> upgrade_type(Type) -> emqx_bridge_lib:upgrade_type(Type). -downgrade_type(Type) -> - emqx_bridge_lib:downgrade_type(Type). +downgrade_type(Type, Conf) -> + emqx_bridge_lib:downgrade_type(Type, Conf). diff --git a/apps/emqx_bridge/src/emqx_bridge_lib.erl b/apps/emqx_bridge/src/emqx_bridge_lib.erl index 4be605745..04b3378ce 100644 --- a/apps/emqx_bridge/src/emqx_bridge_lib.erl +++ b/apps/emqx_bridge/src/emqx_bridge_lib.erl @@ -18,7 +18,7 @@ -export([ maybe_withdraw_rule_action/3, upgrade_type/1, - downgrade_type/1 + downgrade_type/2 ]). %% @doc A bridge can be used as a rule action. @@ -61,17 +61,17 @@ upgrade_type(Type) when is_list(Type) -> atom_to_list(emqx_bridge_v2:bridge_v1_type_to_bridge_v2_type(list_to_binary(Type))). %% @doc Kafka producer bridge type renamed from 'kafka' to 'kafka_bridge' since 5.3.1 -downgrade_type(Type) when is_atom(Type) -> - emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type); -downgrade_type(Type) when is_binary(Type) -> - atom_to_binary(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type)); -downgrade_type(Type) when is_list(Type) -> - atom_to_list(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(list_to_binary(Type))). +downgrade_type(Type, Conf) when is_atom(Type) -> + emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type, Conf); +downgrade_type(Type, Conf) when is_binary(Type) -> + atom_to_binary(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type, Conf)); +downgrade_type(Type, Conf) when is_list(Type) -> + atom_to_list(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(list_to_binary(Type), Conf)). %% A rule might be referencing an old version bridge type name %% i.e. 'kafka' instead of 'kafka_producer' so we need to try both external_ids(Type, Name) -> - case downgrade_type(Type) of + case downgrade_type(Type, get_conf(Type, Name)) of Type -> [external_id(Type, Name)]; Type0 -> @@ -87,3 +87,9 @@ external_id(BridgeType, BridgeName) -> bin(Bin) when is_binary(Bin) -> Bin; bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). + +get_conf(BridgeType, BridgeName) -> + case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of + true -> emqx_conf:get_raw([actions, BridgeType, BridgeName]); + false -> emqx_conf:get_raw([bridges, BridgeType, BridgeName]) + end. diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index f8939df8c..aa96be19b 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -111,7 +111,7 @@ bridge_v1_create_dry_run/2, bridge_v1_type_to_bridge_v2_type/1, %% Exception from the naming convention: - bridge_v2_type_to_bridge_v1_type/1, + bridge_v2_type_to_bridge_v1_type/2, bridge_v1_id_to_connector_resource_id/1, bridge_v1_enable_disable/3, bridge_v1_restart/2, @@ -1050,8 +1050,8 @@ bridge_v1_is_valid(BridgeV1Type, BridgeName) -> bridge_v1_type_to_bridge_v2_type(Type) -> emqx_action_info:bridge_v1_type_to_action_type(Type). -bridge_v2_type_to_bridge_v1_type(Type) -> - emqx_action_info:action_type_to_bridge_v1_type(Type). +bridge_v2_type_to_bridge_v1_type(Type, Conf) -> + emqx_action_info:action_type_to_bridge_v1_type(Type, Conf). is_bridge_v2_type(Type) -> emqx_action_info:is_action_type(Type). diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl index 4422d8dd5..e5821e6c7 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl @@ -621,8 +621,13 @@ partitioner(random) -> random; partitioner(key_dispatch) -> first_key_dispatch. replayq_dir(BridgeType, BridgeName) -> + RawConf = emqx_conf:get_raw([actions, BridgeType, BridgeName]), DirName = iolist_to_binary([ - emqx_bridge_lib:downgrade_type(BridgeType), ":", BridgeName, ":", atom_to_list(node()) + emqx_bridge_lib:downgrade_type(BridgeType, RawConf), + ":", + BridgeName, + ":", + atom_to_list(node()) ]), filename:join([emqx:data_dir(), "kafka", DirName]). diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl b/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl index 1e978828b..b24662e53 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl @@ -521,8 +521,9 @@ format_action(Actions) -> do_format_action({bridge, BridgeType, BridgeName, _ResId}) -> emqx_bridge_resource:bridge_id(BridgeType, BridgeName); -do_format_action({bridge_v2, BridgeType, BridgeName}) -> - emqx_bridge_resource:bridge_id(emqx_bridge_lib:downgrade_type(BridgeType), BridgeName); +do_format_action({bridge_v2, BridgeType0, BridgeName}) -> + BridgeType = try_downgrade(BridgeType0, BridgeName), + emqx_bridge_resource:bridge_id(BridgeType, BridgeName); do_format_action(#{mod := Mod, func := Func, args := Args}) -> #{ function => printable_function_name(Mod, Func), @@ -533,6 +534,25 @@ do_format_action(#{mod := Mod, func := Func}) -> function => printable_function_name(Mod, Func) }. +try_downgrade(BridgeType, BridgeName) -> + Conf = try_get_conf(BridgeType, BridgeName), + try emqx_bridge_lib:downgrade_type(BridgeType, Conf) of + DowngradedBridgeType -> + DowngradedBridgeType + catch + error:{config_not_found, _} -> + BridgeType + end. + +try_get_conf(BridgeType, BridgeName) -> + try emqx_conf:get_raw([actions, BridgeType, BridgeName]) of + RawConf -> + RawConf + catch + error:{config_not_found, _} -> + #{} + end. + printable_function_name(emqx_rule_actions, Func) -> Func; printable_function_name(Mod, Func) -> From 0939b66af5512f582de7811017466923a93c69d4 Mon Sep 17 00:00:00 2001 From: Kinplemelon Date: Mon, 20 Nov 2023 17:28:01 +0800 Subject: [PATCH 021/101] chore: upgrade dashboard to e1.3.2-beta.1 for ee --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0112776bb..7c9638dd5 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ endif # Dashboard version # from https://github.com/emqx/emqx-dashboard5 export EMQX_DASHBOARD_VERSION ?= v1.5.1 -export EMQX_EE_DASHBOARD_VERSION ?= e1.3.1 +export EMQX_EE_DASHBOARD_VERSION ?= e1.3.2-beta.1 PROFILE ?= emqx REL_PROFILES := emqx emqx-enterprise From 79a764f1176b4c06331b7b3ff61ccb82c6449d3a Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Mon, 20 Nov 2023 12:06:21 +0100 Subject: [PATCH 022/101] fix: bridge to action upgrade fix up hook should run after upgrade This commit changes how the `emqx_action_info` callback `bridge_v1_to_action_fixup/1` works. It is now called after the automatic upgrade instead of before. Since the full Bridge V1 config might be needed to do the fixup, it is provided in a special field `<<"__bridge_v1_conf__">>`. The `<<"__bridge_v1_conf__">>` field is removed after the callback is called and can thus be ignored if it is not needed. --- .../src/emqx_bridge_kafka_action_info.erl | 6 +- .../src/schema/emqx_connector_schema.erl | 55 +++++++++++++++++-- 2 files changed, 53 insertions(+), 8 deletions(-) diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl index 66ea2bbd7..8730c1541 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl @@ -27,8 +27,10 @@ action_to_bridge_v1_fixup(Config) -> emqx_utils_maps:rename(<<"parameters">>, <<"kafka">>, Config). bridge_v1_to_action_fixup(Config0) -> - Config = emqx_utils_maps:rename(<<"kafka">>, <<"parameters">>, Config0), - maps:with(producer_action_field_keys(), Config). + KafkaMap = emqx_utils_maps:deep_get([<<"parameters">>, <<"kafka">>], Config0), + Config1 = emqx_utils_maps:deep_remove([<<"parameters">>, <<"kafka">>], Config0), + Config2 = maps:put(<<"parameters">>, KafkaMap, Config1), + maps:with(producer_action_field_keys(), Config2). %%------------------------------------------------------------------------------------------ %% Internal helper fns diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index f2b764fdc..a2a79712a 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -98,16 +98,16 @@ bridge_configs_to_transform( end. split_bridge_to_connector_and_action( - {ConnectorsMap, {BridgeType, BridgeName, ActionConf, ConnectorFields, PreviousRawConfig}} + {ConnectorsMap, {BridgeType, BridgeName, BridgeV1Conf, ConnectorFields, PreviousRawConfig}} ) -> %% Get connector fields from bridge config ConnectorMap = lists:foldl( fun({ConnectorFieldName, _Spec}, ToTransformSoFar) -> - case maps:is_key(to_bin(ConnectorFieldName), ActionConf) of + case maps:is_key(to_bin(ConnectorFieldName), BridgeV1Conf) of true -> NewToTransform = maps:put( to_bin(ConnectorFieldName), - maps:get(to_bin(ConnectorFieldName), ActionConf), + maps:get(to_bin(ConnectorFieldName), BridgeV1Conf), ToTransformSoFar ), NewToTransform; @@ -125,9 +125,53 @@ split_bridge_to_connector_and_action( _ -> generate_connector_name(ConnectorsMap, BridgeName, 0) end, %% Add connector field to action map - ActionMap = maps:put(<<"connector">>, ConnectorName, ActionConf), + ActionMap = transform_bridge_v1_to_action( + BridgeType, BridgeV1Conf, ConnectorName, ConnectorFields + ), {BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}. +transform_bridge_v1_to_action(BridgeType, BridgeV1Conf, ConnectorName, ConnectorFields) -> + BridgeV1ConfKey = <<"__bridge_v1_conf__">>, + TopKeys = [ + <<"enable">>, + <<"connector">>, + <<"local_topic">>, + <<"resource_opts">>, + <<"description">>, + <<"parameters">>, + BridgeV1ConfKey + ], + %% Remove connector fields + ActionMap0 = lists:foldl( + fun + ({enable, _Spec}, ToTransformSoFar) -> + %% Enable filed is used in both + ToTransformSoFar; + ({ConnectorFieldName, _Spec}, ToTransformSoFar) -> + case maps:is_key(to_bin(ConnectorFieldName), BridgeV1Conf) of + true -> + maps:remove(to_bin(ConnectorFieldName), ToTransformSoFar); + false -> + ToTransformSoFar + end + end, + BridgeV1Conf, + ConnectorFields + ), + %% Add special key as the whole original bridge config might be needed by + %% the fixup function + ActionMap1 = emqx_utils_maps:deep_put([BridgeV1ConfKey], ActionMap0, BridgeV1Conf), + %% Add the connector field + ActionMap2 = maps:put(<<"connector">>, ConnectorName, ActionMap1), + TopMap = maps:with(TopKeys, ActionMap2), + RestMap = maps:without(TopKeys, ActionMap2), + %% Other parameters should be stuffed into `parameters' + ActionMap = emqx_utils_maps:deep_merge(TopMap, #{<<"parameters">> => RestMap}), + %% Run the fixup callback if it is defined + FixedActionMap = emqx_action_info:bridge_v1_to_action_fixup(BridgeType, ActionMap), + %% remove the special key as it is not needed anymore + maps:without([BridgeV1ConfKey], FixedActionMap). + generate_connector_name(ConnectorsMap, BridgeName, Attempt) -> ConnectorNameList = case Attempt of @@ -174,7 +218,7 @@ transform_old_style_bridges_to_connector_and_actions_of_type( ), %% Add connectors and actions and remove bridges lists:foldl( - fun({BridgeType, BridgeName, ActionMap0, ConnectorName, ConnectorMap}, RawConfigSoFar) -> + fun({BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}, RawConfigSoFar) -> %% Add connector RawConfigSoFar1 = emqx_utils_maps:deep_put( [<<"connectors">>, to_bin(ConnectorType), ConnectorName], @@ -186,7 +230,6 @@ transform_old_style_bridges_to_connector_and_actions_of_type( [<<"bridges">>, to_bin(BridgeType), BridgeName], RawConfigSoFar1 ), - ActionMap = emqx_action_info:bridge_v1_to_action_fixup(BridgeType, ActionMap0), %% Add action RawConfigSoFar3 = emqx_utils_maps:deep_put( [actions_config_name(), to_bin(maybe_rename(BridgeType)), BridgeName], From ef7cfd02028aa1b1f81d42bc33c53d9e67def5d5 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Mon, 20 Nov 2023 19:56:55 +0700 Subject: [PATCH 023/101] feat(sessds): add field to `ds_pubrange` for forward-compat --- apps/emqx/src/emqx_persistent_session_ds.hrl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_session_ds.hrl b/apps/emqx/src/emqx_persistent_session_ds.hrl index f84519901..653ac444a 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.hrl +++ b/apps/emqx/src/emqx_persistent_session_ds.hrl @@ -62,7 +62,9 @@ %% the range. %% * For checkpoint range, this is the iterator pointing right past the last %% message in the range. - iterator :: emqx_ds:iterator() + iterator :: emqx_ds:iterator(), + %% Reserved for future use. + misc = #{} :: map() }). -type ds_pubrange() :: #ds_pubrange{}. @@ -72,7 +74,6 @@ %% creation time created_at :: _Millisecond :: non_neg_integer(), expires_at = never :: _Millisecond :: non_neg_integer() | never, - % last_ack = 0 :: emqx_persistent_message_ds_replayer:seqno(), %% for future usage props = #{} :: map() }). From 1f1d9e58c6bd3da70c95c82da282715fca027e0f Mon Sep 17 00:00:00 2001 From: Stefan Strigler Date: Mon, 20 Nov 2023 16:23:46 +0100 Subject: [PATCH 024/101] fix(emqx_connector): don't crash in API on delete with active channels --- .../emqx_connector/src/emqx_connector_api.erl | 2 +- .../test/emqx_connector_api_SUITE.erl | 143 ++++++++++++++++-- 2 files changed, 130 insertions(+), 15 deletions(-) diff --git a/apps/emqx_connector/src/emqx_connector_api.erl b/apps/emqx_connector/src/emqx_connector_api.erl index b2267539b..f6e0c0f95 100644 --- a/apps/emqx_connector/src/emqx_connector_api.erl +++ b/apps/emqx_connector/src/emqx_connector_api.erl @@ -372,7 +372,7 @@ schema("/connectors_probe") -> case emqx_connector:remove(ConnectorType, ConnectorName) of ok -> ?NO_CONTENT; - {error, {active_channels, Channels}} -> + {error, {post_config_update, _HandlerMod, {active_channels, Channels}}} -> ?BAD_REQUEST( {<<"Cannot delete connector while there are active channels defined for this connector">>, Channels} diff --git a/apps/emqx_connector/test/emqx_connector_api_SUITE.erl b/apps/emqx_connector/test/emqx_connector_api_SUITE.erl index f6609808f..bd8aa9ddf 100644 --- a/apps/emqx_connector/test/emqx_connector_api_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_api_SUITE.erl @@ -25,7 +25,7 @@ -include_lib("snabbkaffe/include/test_macros.hrl"). -define(CONNECTOR_NAME, (atom_to_binary(?FUNCTION_NAME))). --define(CONNECTOR(NAME, TYPE), #{ +-define(RESOURCE(NAME, TYPE), #{ %<<"ssl">> => #{<<"enable">> => false}, <<"type">> => TYPE, <<"name">> => NAME @@ -52,12 +52,57 @@ -define(KAFKA_CONNECTOR_BASE, ?KAFKA_CONNECTOR_BASE(?KAFKA_BOOTSTRAP_HOST)). -define(KAFKA_CONNECTOR(Name, BootstrapHosts), maps:merge( - ?CONNECTOR(Name, ?CONNECTOR_TYPE), + ?RESOURCE(Name, ?CONNECTOR_TYPE), ?KAFKA_CONNECTOR_BASE(BootstrapHosts) ) ). -define(KAFKA_CONNECTOR(Name), ?KAFKA_CONNECTOR(Name, ?KAFKA_BOOTSTRAP_HOST)). +-define(BRIDGE_NAME, (atom_to_binary(?FUNCTION_NAME))). +-define(BRIDGE_TYPE_STR, "kafka_producer"). +-define(BRIDGE_TYPE, <>). +-define(KAFKA_BRIDGE(Name, Connector), ?RESOURCE(Name, ?BRIDGE_TYPE)#{ + <<"enable">> => true, + <<"connector">> => Connector, + <<"kafka">> => #{ + <<"buffer">> => #{ + <<"memory_overload_protection">> => true, + <<"mode">> => <<"hybrid">>, + <<"per_partition_limit">> => <<"2GB">>, + <<"segment_bytes">> => <<"100MB">> + }, + <<"compression">> => <<"no_compression">>, + <<"kafka_ext_headers">> => [ + #{ + <<"kafka_ext_header_key">> => <<"clientid">>, + <<"kafka_ext_header_value">> => <<"${clientid}">> + }, + #{ + <<"kafka_ext_header_key">> => <<"topic">>, + <<"kafka_ext_header_value">> => <<"${topic}">> + } + ], + <<"kafka_header_value_encode_mode">> => <<"none">>, + <<"kafka_headers">> => <<"${pub_props}">>, + <<"max_batch_bytes">> => <<"896KB">>, + <<"max_inflight">> => 10, + <<"message">> => #{ + <<"key">> => <<"${.clientid}">>, + <<"timestamp">> => <<"${.timestamp}">>, + <<"value">> => <<"${.}">> + }, + <<"partition_count_refresh_interval">> => <<"60s">>, + <<"partition_strategy">> => <<"random">>, + <<"required_acks">> => <<"all_isr">>, + <<"topic">> => <<"kafka-topic">> + }, + <<"local_topic">> => <<"mqtt/local/topic">>, + <<"resource_opts">> => #{ + <<"health_check_interval">> => <<"32s">> + } +}). +-define(KAFKA_BRIDGE(Name), ?KAFKA_BRIDGE(Name, ?CONNECTOR_NAME)). + %% -define(CONNECTOR_TYPE_MQTT, <<"mqtt">>). %% -define(MQTT_CONNECTOR(SERVER, NAME), ?CONNECTOR(NAME, ?CONNECTOR_TYPE_MQTT)#{ %% <<"server">> => SERVER, @@ -105,7 +150,8 @@ emqx, emqx_auth, emqx_management, - {emqx_connector, "connectors {}"} + {emqx_connector, "connectors {}"}, + {emqx_bridge, "actions {}"} ]). -define(APPSPEC_DASHBOARD, @@ -128,7 +174,8 @@ all() -> groups() -> AllTCs = emqx_common_test_helpers:all(?MODULE), SingleOnlyTests = [ - t_connectors_probe + t_connectors_probe, + t_fail_delete_with_action ], ClusterLaterJoinOnlyTCs = [ % t_cluster_later_join_metrics @@ -187,29 +234,38 @@ end_per_group(_, Config) -> emqx_cth_suite:stop(?config(group_apps, Config)), ok. -init_per_testcase(_TestCase, Config) -> +init_per_testcase(TestCase, Config) -> case ?config(cluster_nodes, Config) of undefined -> - init_mocks(); + init_mocks(TestCase); Nodes -> - [erpc:call(Node, ?MODULE, init_mocks, []) || Node <- Nodes] + [erpc:call(Node, ?MODULE, init_mocks, [TestCase]) || Node <- Nodes] end, Config. -end_per_testcase(_TestCase, Config) -> +end_per_testcase(TestCase, Config) -> + Node = ?config(node, Config), + ok = erpc:call(Node, ?MODULE, clear_resources, [TestCase]), case ?config(cluster_nodes, Config) of undefined -> meck:unload(); Nodes -> - [erpc:call(Node, meck, unload, []) || Node <- Nodes] + [erpc:call(N, meck, unload, []) || N <- Nodes] end, - Node = ?config(node, Config), ok = emqx_common_test_helpers:call_janitor(), - ok = erpc:call(Node, fun clear_resources/0), ok. -define(CONNECTOR_IMPL, dummy_connector_impl). -init_mocks() -> +init_mocks(t_fail_delete_with_action) -> + init_mocks(common), + meck:expect(?CONNECTOR_IMPL, on_add_channel, 4, {ok, connector_state}), + meck:expect(?CONNECTOR_IMPL, on_remove_channel, 3, {ok, connector_state}), + meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connected), + ok = meck:expect(?CONNECTOR_IMPL, on_get_channels, fun(ResId) -> + emqx_bridge_v2:get_channels_for_connector(ResId) + end), + ok; +init_mocks(_TestCase) -> meck:new(emqx_connector_ee_schema, [passthrough, no_link]), meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR_IMPL), meck:new(?CONNECTOR_IMPL, [non_strict, no_link]), @@ -235,7 +291,15 @@ init_mocks() -> ), [?CONNECTOR_IMPL, emqx_connector_ee_schema]. -clear_resources() -> +clear_resources(t_fail_delete_with_action) -> + lists:foreach( + fun(#{type := Type, name := Name}) -> + ok = emqx_bridge_v2:remove(Type, Name) + end, + emqx_bridge_v2:list() + ), + clear_resources(common); +clear_resources(_) -> lists:foreach( fun(#{type := Type, name := Name}) -> ok = emqx_connector:remove(Type, Name) @@ -646,7 +710,7 @@ t_connectors_probe(Config) -> request_json( post, uri(["connectors_probe"]), - ?CONNECTOR(<<"broken_connector">>, <<"unknown_type">>), + ?RESOURCE(<<"broken_connector">>, <<"unknown_type">>), Config ) ), @@ -674,6 +738,57 @@ t_create_with_bad_name(Config) -> ?assertMatch(#{<<"kind">> := <<"validation_error">>}, Msg), ok. +t_fail_delete_with_action(Config) -> + Name = ?CONNECTOR_NAME, + ?assertMatch( + {ok, 201, #{ + <<"type">> := ?CONNECTOR_TYPE, + <<"name">> := Name, + <<"enable">> := true, + <<"status">> := <<"connected">>, + <<"node_status">> := [_ | _] + }}, + request_json( + post, + uri(["connectors"]), + ?KAFKA_CONNECTOR(Name), + Config + ) + ), + ConnectorID = emqx_connector_resource:connector_id(?CONNECTOR_TYPE, Name), + BridgeName = ?BRIDGE_NAME, + ?assertMatch( + {ok, 201, #{ + <<"type">> := ?BRIDGE_TYPE, + <<"name">> := BridgeName, + <<"enable">> := true, + <<"status">> := <<"connected">>, + <<"node_status">> := [_ | _], + <<"connector">> := Name, + <<"kafka">> := #{}, + <<"local_topic">> := _, + <<"resource_opts">> := _ + }}, + request_json( + post, + uri(["actions"]), + ?KAFKA_BRIDGE(?BRIDGE_NAME), + Config + ) + ), + + %% delete the connector + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"BAD_REQUEST">>, + <<"message">> := + <<"{<<\"Cannot delete connector while there are active channels", + " defined for this connector\">>,", _/binary>> + }}, + request_json(delete, uri(["connectors", ConnectorID]), Config) + ), + ok. + %%% helpers listen_on_random_port() -> SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}], From f40f6bc5ddfb14b7d3a08c7a0cd3662da56fe4e4 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 20 Nov 2023 12:37:36 -0300 Subject: [PATCH 025/101] refactor: split `resource_opts` fields between connector and actions --- .../src/schema/emqx_bridge_v2_schema.erl | 26 ++++++++++++ .../emqx_bridge/test/emqx_bridge_v2_tests.erl | 41 +++++++++++++++++++ .../src/emqx_bridge_kafka.erl | 2 +- .../src/schema/emqx_connector_schema.erl | 20 +++++++++ 4 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 apps/emqx_bridge/test/emqx_bridge_v2_tests.erl diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl index ede783e97..b0ac870e7 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl @@ -39,6 +39,7 @@ ]). -export([types/0, types_sc/0]). +-export([resource_opts_fields/0, resource_opts_fields/1]). -export_type([action_type/0]). @@ -137,6 +138,31 @@ types() -> types_sc() -> hoconsc:enum(types()). +resource_opts_fields() -> + resource_opts_fields(_Overrides = []). + +resource_opts_fields(Overrides) -> + ActionROFields = [ + batch_size, + batch_time, + buffer_mode, + buffer_seg_bytes, + health_check_interval, + inflight_window, + max_buffer_bytes, + metrics_flush_interval, + query_mode, + request_ttl, + resume_interval, + start_after_created, + start_timeout, + worker_pool_size + ], + lists:filter( + fun({Key, _Sc}) -> lists:member(Key, ActionROFields) end, + emqx_resource_schema:create_opts(Overrides) + ). + examples(Method) -> MergeFun = fun(Example, Examples) -> diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_tests.erl b/apps/emqx_bridge/test/emqx_bridge_v2_tests.erl new file mode 100644 index 000000000..4e28f3d88 --- /dev/null +++ b/apps/emqx_bridge/test/emqx_bridge_v2_tests.erl @@ -0,0 +1,41 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_bridge_v2_tests). + +-include_lib("eunit/include/eunit.hrl"). + +resource_opts_union_connector_actions_test() -> + %% The purpose of this test is to ensure we have split `resource_opts' fields + %% consciouly between connector and actions, in particular when/if we introduce new + %% fields there. + AllROFields = non_deprecated_fields(emqx_resource_schema:create_opts([])), + ActionROFields = non_deprecated_fields(emqx_bridge_v2_schema:resource_opts_fields()), + ConnectorROFields = non_deprecated_fields(emqx_connector_schema:resource_opts_fields()), + UnionROFields = lists:usort(ConnectorROFields ++ ActionROFields), + ?assertEqual( + lists:usort(AllROFields), + UnionROFields, + #{ + missing_fields => AllROFields -- UnionROFields, + unexpected_fields => UnionROFields -- AllROFields, + action_fields => ActionROFields, + connector_fields => ConnectorROFields + } + ), + ok. + +non_deprecated_fields(Fields) -> + [K || {K, Schema} <- Fields, not hocon_schema:is_deprecated(Schema)]. diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl index 0eb015cd3..b3934c7bb 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl @@ -525,7 +525,7 @@ fields(consumer_kafka_opts) -> ]; fields(resource_opts) -> SupportedFields = [health_check_interval], - CreationOpts = emqx_resource_schema:create_opts(_Overrides = []), + CreationOpts = emqx_bridge_v2_schema:resource_opts_fields(), lists:filter(fun({Field, _}) -> lists:member(Field, SupportedFields) end, CreationOpts); fields(action_field) -> {kafka_producer, diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index 22eb523be..070c1a165 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -30,6 +30,8 @@ -export([connector_type_to_bridge_types/1]). +-export([resource_opts_fields/0, resource_opts_fields/1]). + -if(?EMQX_RELEASE_EDITION == ee). enterprise_api_schemas(Method) -> %% We *must* do this to ensure the module is really loaded, especially when we use @@ -296,6 +298,24 @@ desc(connectors) -> desc(_) -> undefined. +resource_opts_fields() -> + resource_opts_fields(_Overrides = []). + +resource_opts_fields(Overrides) -> + %% Note: these don't include buffer-related configurations because buffer workers are + %% tied to the action. + ConnectorROFields = [ + health_check_interval, + query_mode, + request_ttl, + start_after_created, + start_timeout + ], + lists:filter( + fun({Key, _Sc}) -> lists:member(Key, ConnectorROFields) end, + emqx_resource_schema:create_opts(Overrides) + ). + %%====================================================================================== %% Helper Functions %%====================================================================================== From 3aa8044475c846c13207aa09ab42608cb82522d4 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Mon, 20 Nov 2023 16:40:27 +0100 Subject: [PATCH 026/101] fix(action): upgrade and downgrade strategy This commit fixes the upgrade and downgrade strategy when upgrading from a bridge V1 to connector and action or the other way around so that the custom callbacks get the complete unchanged input instead of the result of the automatic translation. The automatic translation is used if the callback is not defined. --- apps/emqx_bridge/src/emqx_action_info.erl | 73 +++++++++---------- apps/emqx_bridge/src/emqx_bridge_v2.erl | 21 +++++- ...mqx_bridge_azure_event_hub_action_info.erl | 14 ++-- .../src/emqx_bridge_kafka_action_info.erl | 18 +++-- .../src/schema/emqx_connector_schema.erl | 58 +++++++++------ 5 files changed, 107 insertions(+), 77 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index b5d88c4d8..c48c29129 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -26,8 +26,11 @@ bridge_v1_type_to_action_type/1, is_action_type/1, registered_schema_modules/0, - action_to_bridge_v1_fixup/2, - bridge_v1_to_action_fixup/2 + connector_action_config_to_bridge_v1_config/3, + has_custom_connector_action_config_to_bridge_v1_config/1, + bridge_v1_config_to_action_config/3, + has_custom_bridge_v1_config_to_action_config/1, + transform_bridge_v1_config_to_action_config/4 ]). -callback bridge_v1_type_name() -> atom(). @@ -35,14 +38,20 @@ -callback connector_type_name() -> atom(). -callback schema_module() -> atom(). %% Define this if the automatic config downgrade is not enough for the bridge. --callback action_to_bridge_v1_fixup(map()) -> map(). +-callback connector_action_config_to_bridge_v1_config( + ConnectorConfig :: map(), ActionConfig :: map() +) -> map(). %% Define this if the automatic config upgrade is not enough for the bridge. --callback bridge_v1_to_action_fixup(map()) -> map(). +%% If you want to make use of the automatic config upgrade, you can call +%% emqx_action_info:transform_bridge_v1_config_to_action_config/4 in your +%% implementation and do some adjustments on the result. +-callback bridge_v1_config_to_action_config(BridgeV1Config :: map(), ConnectorName :: binary()) -> + map(). -optional_callbacks([ bridge_v1_type_name/0, - action_to_bridge_v1_fixup/1, - bridge_v1_to_action_fixup/1 + connector_action_config_to_bridge_v1_config/2, + bridge_v1_config_to_action_config/2 ]). %% ==================================================================== @@ -121,45 +130,29 @@ registered_schema_modules() -> Schemas = maps:get(action_type_to_schema_module, InfoMap), maps:to_list(Schemas). -action_to_bridge_v1_fixup(ActionOrBridgeType, Config) -> +has_custom_connector_action_config_to_bridge_v1_config(ActionOrBridgeType) -> Module = get_action_info_module(ActionOrBridgeType), - case erlang:function_exported(Module, action_to_bridge_v1_fixup, 1) of - true -> - Module:action_to_bridge_v1_fixup(Config); - false -> - Config - end. + erlang:function_exported(Module, connector_action_config_to_bridge_v1_config, 2). -bridge_v1_to_action_fixup(ActionOrBridgeType, Config0) -> +connector_action_config_to_bridge_v1_config(ActionOrBridgeType, ConnectorConfig, ActionConfig) -> Module = get_action_info_module(ActionOrBridgeType), - case erlang:function_exported(Module, bridge_v1_to_action_fixup, 1) of - true -> - Config1 = Module:bridge_v1_to_action_fixup(Config0), - common_bridge_v1_to_action_adapter(Config1); - false -> - common_bridge_v1_to_action_adapter(Config0) - end. + %% should only be called if defined + Module:connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig). -%% ==================================================================== -%% Helper fns -%% ==================================================================== +has_custom_bridge_v1_config_to_action_config(ActionOrBridgeType) -> + Module = get_action_info_module(ActionOrBridgeType), + erlang:function_exported(Module, bridge_v1_config_to_action_config, 2). -common_bridge_v1_to_action_adapter(RawConfig) -> - TopKeys = [ - <<"enable">>, - <<"connector">>, - <<"local_topic">>, - <<"resource_opts">>, - <<"description">>, - <<"parameters">> - ], - TopMap = maps:with(TopKeys, RawConfig), - RestMap = maps:without(TopKeys, RawConfig), - %% Other parameters should be stuffed into `parameters' - emqx_utils_maps:update_if_present( - <<"parameters">>, - fun(Old) -> emqx_utils_maps:deep_merge(Old, RestMap) end, - TopMap +bridge_v1_config_to_action_config(ActionOrBridgeType, BridgeV1Config, ConnectorName) -> + Module = get_action_info_module(ActionOrBridgeType), + %% should only be called if defined + Module:bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName). + +transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName +) -> + emqx_connector_schema:transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName ). %% ==================================================================== diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index f8939df8c..b419e2049 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -1102,10 +1102,23 @@ bridge_v1_lookup_and_transform_helper( <<"actions">>, emqx_bridge_v2_schema ), - BridgeV1Config1 = maps:remove(<<"connector">>, BridgeV2RawConfig2), - BridgeV1Config2 = maps:merge(BridgeV1Config1, ConnectorRawConfig2), - BridgeV1Config3 = emqx_action_info:action_to_bridge_v1_fixup(BridgeV2Type, BridgeV1Config2), - BridgeV1Tmp = maps:put(raw_config, BridgeV1Config3, BridgeV2), + BridgeV1ConfigFinal = + case + emqx_action_info:has_custom_connector_action_config_to_bridge_v1_config(BridgeV1Type) + of + false -> + BridgeV1Config1 = maps:remove(<<"connector">>, BridgeV2RawConfig2), + %% Move parameters to the top level + ParametersMap = maps:get(<<"parameters">>, BridgeV1Config1, #{}), + BridgeV1Config2 = maps:remove(<<"parameters">>, BridgeV1Config1), + BridgeV1Config3 = emqx_utils_maps:deep_merge(BridgeV1Config2, ParametersMap), + emqx_utils_maps:deep_merge(ConnectorRawConfig2, BridgeV1Config3); + true -> + emqx_action_info:connector_action_config_to_bridge_v1_config( + BridgeV1Type, ConnectorRawConfig2, BridgeV2RawConfig2 + ) + end, + BridgeV1Tmp = maps:put(raw_config, BridgeV1ConfigFinal, BridgeV2), BridgeV1 = maps:remove(status, BridgeV1Tmp), BridgeV2Status = maps:get(status, BridgeV2, undefined), BridgeV2Error = maps:get(error, BridgeV2, undefined), diff --git a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl index fd1f4f4ff..cd35a7dda 100644 --- a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl +++ b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl @@ -11,8 +11,8 @@ action_type_name/0, connector_type_name/0, schema_module/0, - action_to_bridge_v1_fixup/1, - bridge_v1_to_action_fixup/1 + connector_action_config_to_bridge_v1_config/2, + bridge_v1_config_to_action_config/2 ]). bridge_v1_type_name() -> azure_event_hub_producer. @@ -23,8 +23,10 @@ connector_type_name() -> azure_event_hub_producer. schema_module() -> emqx_bridge_azure_event_hub. -action_to_bridge_v1_fixup(Config) -> - emqx_bridge_kafka_action_info:action_to_bridge_v1_fixup(Config). +connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) -> + emqx_bridge_kafka_action_info:connector_action_config_to_bridge_v1_config( + ConnectorConfig, ActionConfig + ). -bridge_v1_to_action_fixup(Config) -> - emqx_bridge_kafka_action_info:bridge_v1_to_action_fixup(Config). +bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) -> + bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName). diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl index 8730c1541..154371807 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl @@ -11,8 +11,8 @@ action_type_name/0, connector_type_name/0, schema_module/0, - action_to_bridge_v1_fixup/1, - bridge_v1_to_action_fixup/1 + connector_action_config_to_bridge_v1_config/2, + bridge_v1_config_to_action_config/2 ]). bridge_v1_type_name() -> kafka. @@ -23,17 +23,23 @@ connector_type_name() -> kafka_producer. schema_module() -> emqx_bridge_kafka. -action_to_bridge_v1_fixup(Config) -> - emqx_utils_maps:rename(<<"parameters">>, <<"kafka">>, Config). +connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) -> + BridgeV1Config1 = maps:remove(<<"connector">>, ActionConfig), + BridgeV1Config2 = emqx_utils_maps:deep_merge(ConnectorConfig, BridgeV1Config1), + emqx_utils_maps:rename(<<"parameters">>, <<"kafka">>, BridgeV1Config2). -bridge_v1_to_action_fixup(Config0) -> +bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) -> + BridgeV1Conf, + Config0 = emqx_action_info:transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, schema_module(), kafka_producer + ), KafkaMap = emqx_utils_maps:deep_get([<<"parameters">>, <<"kafka">>], Config0), Config1 = emqx_utils_maps:deep_remove([<<"parameters">>, <<"kafka">>], Config0), Config2 = maps:put(<<"parameters">>, KafkaMap, Config1), maps:with(producer_action_field_keys(), Config2). %%------------------------------------------------------------------------------------------ -%% Internal helper fns +%% Internal helper functions %%------------------------------------------------------------------------------------------ producer_action_field_keys() -> diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index a2a79712a..712c4938e 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -22,7 +22,10 @@ -import(hoconsc, [mk/2, ref/2]). --export([transform_bridges_v1_to_connectors_and_bridges_v2/1]). +-export([ + transform_bridges_v1_to_connectors_and_bridges_v2/1, + transform_bridge_v1_config_to_action_config/4 +]). -export([roots/0, fields/1, desc/1, namespace/0, tags/0]). @@ -124,23 +127,39 @@ split_bridge_to_connector_and_action( #{<<"connector">> := ConnectorName0} -> ConnectorName0; _ -> generate_connector_name(ConnectorsMap, BridgeName, 0) end, - %% Add connector field to action map - ActionMap = transform_bridge_v1_to_action( - BridgeType, BridgeV1Conf, ConnectorName, ConnectorFields - ), + ActionMap = + case emqx_action_info:has_custom_bridge_v1_config_to_action_config(BridgeType) of + true -> + emqx_action_info:bridge_v1_config_to_action_config( + BridgeType, BridgeV1Conf, ConnectorName + ); + false -> + transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorFields + ) + end, {BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}. -transform_bridge_v1_to_action(BridgeType, BridgeV1Conf, ConnectorName, ConnectorFields) -> - BridgeV1ConfKey = <<"__bridge_v1_conf__">>, +transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName +) -> + ConnectorFields = ConnectorConfSchemaMod:fields(ConnectorConfSchemaName), + transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorFields + ). + +transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorFields +) -> TopKeys = [ <<"enable">>, <<"connector">>, <<"local_topic">>, <<"resource_opts">>, <<"description">>, - <<"parameters">>, - BridgeV1ConfKey + <<"parameters">> ], + TopKeysMap = maps:from_keys(TopKeys, true), %% Remove connector fields ActionMap0 = lists:foldl( fun @@ -148,7 +167,11 @@ transform_bridge_v1_to_action(BridgeType, BridgeV1Conf, ConnectorName, Connector %% Enable filed is used in both ToTransformSoFar; ({ConnectorFieldName, _Spec}, ToTransformSoFar) -> - case maps:is_key(to_bin(ConnectorFieldName), BridgeV1Conf) of + ConnectorFieldNameBin = to_bin(ConnectorFieldName), + case + maps:is_key(ConnectorFieldNameBin, BridgeV1Conf) andalso + (not maps:is_key(ConnectorFieldNameBin, TopKeysMap)) + of true -> maps:remove(to_bin(ConnectorFieldName), ToTransformSoFar); false -> @@ -158,19 +181,12 @@ transform_bridge_v1_to_action(BridgeType, BridgeV1Conf, ConnectorName, Connector BridgeV1Conf, ConnectorFields ), - %% Add special key as the whole original bridge config might be needed by - %% the fixup function - ActionMap1 = emqx_utils_maps:deep_put([BridgeV1ConfKey], ActionMap0, BridgeV1Conf), %% Add the connector field - ActionMap2 = maps:put(<<"connector">>, ConnectorName, ActionMap1), - TopMap = maps:with(TopKeys, ActionMap2), - RestMap = maps:without(TopKeys, ActionMap2), + ActionMap1 = maps:put(<<"connector">>, ConnectorName, ActionMap0), + TopMap = maps:with(TopKeys, ActionMap1), + RestMap = maps:without(TopKeys, ActionMap1), %% Other parameters should be stuffed into `parameters' - ActionMap = emqx_utils_maps:deep_merge(TopMap, #{<<"parameters">> => RestMap}), - %% Run the fixup callback if it is defined - FixedActionMap = emqx_action_info:bridge_v1_to_action_fixup(BridgeType, ActionMap), - %% remove the special key as it is not needed anymore - maps:without([BridgeV1ConfKey], FixedActionMap). + emqx_utils_maps:deep_merge(TopMap, #{<<"parameters">> => RestMap}). generate_connector_name(ConnectorsMap, BridgeName, Attempt) -> ConnectorNameList = From 8ec3b1db5df25dc5d024944eed15acc8d87af8ee Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Mon, 20 Nov 2023 16:51:39 +0100 Subject: [PATCH 027/101] fix(emqx_connection): handle socket activation error return --- apps/emqx/src/emqx_connection.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx/src/emqx_connection.erl b/apps/emqx/src/emqx_connection.erl index db36fbea9..31281b8c2 100644 --- a/apps/emqx/src/emqx_connection.erl +++ b/apps/emqx/src/emqx_connection.erl @@ -555,7 +555,7 @@ handle_msg({quic, Data, _Stream, #{len := Len}}, State) when is_binary(Data) -> handle_msg(check_cache, #state{limiter_buffer = Cache} = State) -> case queue:peek(Cache) of empty -> - activate_socket(State); + handle_info(activate_socket, State); {value, #pending_req{need = Needs, data = Data, next = Next}} -> State2 = State#state{limiter_buffer = queue:drop(Cache)}, check_limiter(Needs, Data, Next, [check_cache], State2) From 9684e79ee0d0aea29cf480cb870d163097d1f24d Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Mon, 20 Nov 2023 22:54:40 +0700 Subject: [PATCH 028/101] fix(sessds): ensure dup flag is on for replayed messages --- apps/emqx/src/emqx_persistent_message_ds_replayer.erl | 3 ++- apps/emqx/test/emqx_persistent_session_SUITE.erl | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl b/apps/emqx/src/emqx_persistent_message_ds_replayer.erl index 2f5348938..64b9cabb4 100644 --- a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl +++ b/apps/emqx/src/emqx_persistent_message_ds_replayer.erl @@ -266,8 +266,9 @@ replay_range( _ -> lists:nthtail(range_size(First, FirstUnacked), Messages) end, + MessagesReplay = [emqx_message:set_flag(dup, true, Msg) || Msg <- MessagesUnacked], %% Asserting that range is consistent with the message storage state. - {Replies, Until} = publish(FirstUnacked, MessagesUnacked), + {Replies, Until} = publish(FirstUnacked, MessagesReplay), %% Again, we need to keep the iterator pointing past the end of the %% range, so that we can pick up where we left off. Range = Range0#ds_pubrange{iterator = ItNext}, diff --git a/apps/emqx/test/emqx_persistent_session_SUITE.erl b/apps/emqx/test/emqx_persistent_session_SUITE.erl index 3f4cbcd28..77b625f05 100644 --- a/apps/emqx/test/emqx_persistent_session_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_session_SUITE.erl @@ -675,6 +675,12 @@ t_publish_many_while_client_is_gone_qos1(Config) -> ?assert(NMsgs2 > NPubs2, Msgs2), ?assert(NMsgs2 >= NPubs - NAcked, Msgs2), NSame = NMsgs2 - NPubs2, + ?assert( + lists:all(fun(#{dup := Dup}) -> Dup end, lists:sublist(Msgs2, NSame)) + ), + ?assertNot( + lists:all(fun(#{dup := Dup}) -> Dup end, lists:nthtail(NSame, Msgs2)) + ), ?assertEqual( [maps:with([packet_id, topic, payload], M) || M <- lists:nthtail(NMsgs1 - NSame, Msgs1)], [maps:with([packet_id, topic, payload], M) || M <- lists:sublist(Msgs2, NSame)] From ec19247271aac074d67f8a9e81b0e9542a4ff0be Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Mon, 20 Nov 2023 16:55:26 +0100 Subject: [PATCH 029/101] refactor: rename limiter buffer related messages and var names --- apps/emqx/src/emqx_connection.erl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/apps/emqx/src/emqx_connection.erl b/apps/emqx/src/emqx_connection.erl index 31281b8c2..11d42f9dd 100644 --- a/apps/emqx/src/emqx_connection.erl +++ b/apps/emqx/src/emqx_connection.erl @@ -552,13 +552,13 @@ handle_msg({quic, Data, _Stream, #{len := Len}}, State) when is_binary(Data) -> inc_counter(incoming_bytes, Len), ok = emqx_metrics:inc('bytes.received', Len), when_bytes_in(Len, Data, State); -handle_msg(check_cache, #state{limiter_buffer = Cache} = State) -> - case queue:peek(Cache) of +handle_msg(check_limiter_buffer, #state{limiter_buffer = Buffer} = State) -> + case queue:peek(Buffer) of empty -> handle_info(activate_socket, State); {value, #pending_req{need = Needs, data = Data, next = Next}} -> - State2 = State#state{limiter_buffer = queue:drop(Cache)}, - check_limiter(Needs, Data, Next, [check_cache], State2) + State2 = State#state{limiter_buffer = queue:drop(Buffer)}, + check_limiter(Needs, Data, Next, [check_limiter_buffer], State2) end; handle_msg( {incoming, Packet = ?CONNECT_PACKET(ConnPkt)}, @@ -1036,13 +1036,13 @@ check_limiter( Data, WhenOk, _Msgs, - #state{limiter_buffer = Cache} = State + #state{limiter_buffer = Buffer} = State ) -> %% if there has a retry timer, - %% cache the operation and execute it after the retry is over - %% the maximum length of the cache queue is equal to the active_n + %% Buffer the operation and execute it after the retry is over + %% the maximum length of the buffer queue is equal to the active_n New = #pending_req{need = Needs, data = Data, next = WhenOk}, - {ok, State#state{limiter_buffer = queue:in(New, Cache)}}. + {ok, State#state{limiter_buffer = queue:in(New, Buffer)}}. %% try to perform a retry -spec retry_limiter(state()) -> _. @@ -1053,7 +1053,7 @@ retry_limiter(#state{limiter = Limiter} = State) -> {ok, Limiter2} -> Next( Data, - [check_cache], + [check_limiter_buffer], State#state{ limiter = Limiter2, limiter_timer = undefined From edbfe090c9eb2ad0def6094cc93841185060766b Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Mon, 20 Nov 2023 17:23:51 +0100 Subject: [PATCH 030/101] feat: add custom callback for creating connector from bridge V1 config --- apps/emqx_bridge/src/emqx_action_info.erl | 14 ++++++ .../src/schema/emqx_connector_schema.erl | 44 ++++++++++++------- 2 files changed, 41 insertions(+), 17 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index c48c29129..a45b9f138 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -28,6 +28,8 @@ registered_schema_modules/0, connector_action_config_to_bridge_v1_config/3, has_custom_connector_action_config_to_bridge_v1_config/1, + bridge_v1_config_to_connector_config/2, + has_custom_bridge_v1_config_to_connector_config/1, bridge_v1_config_to_action_config/3, has_custom_bridge_v1_config_to_action_config/1, transform_bridge_v1_config_to_action_config/4 @@ -41,6 +43,8 @@ -callback connector_action_config_to_bridge_v1_config( ConnectorConfig :: map(), ActionConfig :: map() ) -> map(). +%% Define this if the automatic config upgrade is not enough for the connector. +-callback bridge_v1_config_to_connector_config(BridgeV1Config :: map()) -> map(). %% Define this if the automatic config upgrade is not enough for the bridge. %% If you want to make use of the automatic config upgrade, you can call %% emqx_action_info:transform_bridge_v1_config_to_action_config/4 in your @@ -51,6 +55,7 @@ -optional_callbacks([ bridge_v1_type_name/0, connector_action_config_to_bridge_v1_config/2, + bridge_v1_config_to_connector_config/1, bridge_v1_config_to_action_config/2 ]). @@ -139,6 +144,15 @@ connector_action_config_to_bridge_v1_config(ActionOrBridgeType, ConnectorConfig, %% should only be called if defined Module:connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig). +has_custom_bridge_v1_config_to_connector_config(ActionOrBridgeType) -> + Module = get_action_info_module(ActionOrBridgeType), + erlang:function_exported(Module, bridge_v1_config_to_connector_config, 1). + +bridge_v1_config_to_connector_config(ActionOrBridgeType, BridgeV1Config) -> + Module = get_action_info_module(ActionOrBridgeType), + %% should only be called if defined + Module:bridge_v1_config_to_connector_config(BridgeV1Config). + has_custom_bridge_v1_config_to_action_config(ActionOrBridgeType) -> Module = get_action_info_module(ActionOrBridgeType), erlang:function_exported(Module, bridge_v1_config_to_action_config, 2). diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index 712c4938e..10bf583c3 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -103,24 +103,34 @@ bridge_configs_to_transform( split_bridge_to_connector_and_action( {ConnectorsMap, {BridgeType, BridgeName, BridgeV1Conf, ConnectorFields, PreviousRawConfig}} ) -> - %% Get connector fields from bridge config - ConnectorMap = lists:foldl( - fun({ConnectorFieldName, _Spec}, ToTransformSoFar) -> - case maps:is_key(to_bin(ConnectorFieldName), BridgeV1Conf) of - true -> - NewToTransform = maps:put( - to_bin(ConnectorFieldName), - maps:get(to_bin(ConnectorFieldName), BridgeV1Conf), - ToTransformSoFar - ), - NewToTransform; - false -> - ToTransformSoFar - end + ConnectorMap = + case emqx_action_info:has_custom_bridge_v1_config_to_connector_config(BridgeType) of + true -> + emqx_action_info:bridge_v1_config_to_connector_config( + BridgeType, BridgeV1Conf + ); + false -> + %% We do an automatic transfomation to get the connector config + %% if the callback is not defined. + %% Get connector fields from bridge config + lists:foldl( + fun({ConnectorFieldName, _Spec}, ToTransformSoFar) -> + case maps:is_key(to_bin(ConnectorFieldName), BridgeV1Conf) of + true -> + NewToTransform = maps:put( + to_bin(ConnectorFieldName), + maps:get(to_bin(ConnectorFieldName), BridgeV1Conf), + ToTransformSoFar + ), + NewToTransform; + false -> + ToTransformSoFar + end + end, + #{}, + ConnectorFields + ) end, - #{}, - ConnectorFields - ), %% Generate a connector name, if needed. Avoid doing so if there was a previous config. ConnectorName = case PreviousRawConfig of From 7f078295c1d3a60803c885bd4b38af63702cf646 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Mon, 20 Nov 2023 17:44:39 +0100 Subject: [PATCH 031/101] docs: add changelog for PR 11987 --- changes/ce/fix-11987.en.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 changes/ce/fix-11987.en.md diff --git a/changes/ce/fix-11987.en.md b/changes/ce/fix-11987.en.md new file mode 100644 index 000000000..4d85cff41 --- /dev/null +++ b/changes/ce/fix-11987.en.md @@ -0,0 +1,3 @@ +Fix connection crash when trying to set TCP/SSL socket `active_n` option. + +Prior to this fix, if a socket is already closed when connection process tries to set `active_n` option, it causes a `case_clause` crash. From b02711af79d6b914beb55f6aefaa18bf60860c75 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Mon, 20 Nov 2023 17:47:20 +0100 Subject: [PATCH 032/101] refactor(emqx_ws_connection): rename cache to buffer for limiter --- apps/emqx/src/emqx_ws_connection.erl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/apps/emqx/src/emqx_ws_connection.erl b/apps/emqx/src/emqx_ws_connection.erl index 37ce72d74..07329721a 100644 --- a/apps/emqx/src/emqx_ws_connection.erl +++ b/apps/emqx/src/emqx_ws_connection.erl @@ -94,7 +94,7 @@ limiter :: container(), %% cache operation when overload - limiter_cache :: queue:queue(cache()), + limiter_buffer :: queue:queue(cache()), %% limiter timers limiter_timer :: undefined | reference() @@ -326,7 +326,7 @@ websocket_init([Req, Opts]) -> zone = Zone, listener = {Type, Listener}, limiter_timer = undefined, - limiter_cache = queue:new() + limiter_buffer = queue:new() }, hibernate}; {denny, Reason} -> @@ -462,13 +462,13 @@ websocket_info( State ) -> return(retry_limiter(State)); -websocket_info(check_cache, #state{limiter_cache = Cache} = State) -> - case queue:peek(Cache) of +websocket_info(check_limiter_buffer, #state{limiter_buffer = Buffer} = State) -> + case queue:peek(Buffer) of empty -> return(enqueue({active, true}, State#state{sockstate = running})); {value, #cache{need = Needs, data = Data, next = Next}} -> - State2 = State#state{limiter_cache = queue:drop(Cache)}, - return(check_limiter(Needs, Data, Next, [check_cache], State2)) + State2 = State#state{limiter_buffer = queue:drop(Buffer)}, + return(check_limiter(Needs, Data, Next, [check_limiter_buffer], State2)) end; websocket_info({timeout, TRef, Msg}, State) when is_reference(TRef) -> handle_timeout(TRef, Msg, State); @@ -630,10 +630,10 @@ check_limiter( Data, WhenOk, _Msgs, - #state{limiter_cache = Cache} = State + #state{limiter_buffer = Buffer} = State ) -> New = #cache{need = Needs, data = Data, next = WhenOk}, - State#state{limiter_cache = queue:in(New, Cache)}. + State#state{limiter_buffer = queue:in(New, Buffer)}. -spec retry_limiter(state()) -> state(). retry_limiter(#state{limiter = Limiter} = State) -> @@ -644,7 +644,7 @@ retry_limiter(#state{limiter = Limiter} = State) -> {ok, Limiter2} -> Next( Data, - [check_cache], + [check_limiter_buffer], State#state{ limiter = Limiter2, limiter_timer = undefined From 110a5a4896567423a23f67fb3c3a8c5e531994c9 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Mon, 20 Nov 2023 23:43:45 +0700 Subject: [PATCH 033/101] feat(schema): introduce separate root for new session persistence With some knobs to choose a storage backend. Support only builtin RocksDB-based backend with minimal configuration for now. --- .../emqx_persistent_session_ds_SUITE.erl | 2 +- apps/emqx/src/emqx_persistent_message.erl | 27 +++--- apps/emqx/src/emqx_schema.erl | 83 +++++++++++++++++-- apps/emqx/src/emqx_session.erl | 4 +- .../test/emqx_persistent_messages_SUITE.erl | 2 +- .../test/emqx_persistent_session_SUITE.erl | 24 +++--- ...mqx_persistent_session_ds_router_SUITE.erl | 2 +- apps/emqx_conf/src/emqx_conf_cli.erl | 3 +- .../src/emqx_mgmt_data_backup.erl | 1 + rel/i18n/emqx_schema.hocon | 13 +++ 10 files changed, 124 insertions(+), 37 deletions(-) diff --git a/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl b/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl index 6c5fdc56e..56246e743 100644 --- a/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl +++ b/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl @@ -79,7 +79,7 @@ cluster(#{n := N}) -> app_specs() -> [ emqx_durable_storage, - {emqx, "persistent_session_store = {ds = true}"} + {emqx, "session_persistence = {enable = true}"} ]. get_mqtt_port(Node, Type) -> diff --git a/apps/emqx/src/emqx_persistent_message.erl b/apps/emqx/src/emqx_persistent_message.erl index 632ff2a27..82a345eef 100644 --- a/apps/emqx/src/emqx_persistent_message.erl +++ b/apps/emqx/src/emqx_persistent_message.erl @@ -19,7 +19,7 @@ -include("emqx.hrl"). -export([init/0]). --export([is_store_enabled/0]). +-export([is_persistence_enabled/0]). %% Message persistence -export([ @@ -28,9 +28,8 @@ -define(PERSISTENT_MESSAGE_DB, emqx_persistent_message). -%% FIXME -define(WHEN_ENABLED(DO), - case is_store_enabled() of + case is_persistence_enabled() of true -> DO; false -> {skipped, disabled} end @@ -40,18 +39,26 @@ init() -> ?WHEN_ENABLED(begin - ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, #{ - backend => builtin, - storage => {emqx_ds_storage_bitfield_lts, #{}} - }), + Backend = storage_backend(), + ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, Backend), ok = emqx_persistent_session_ds_router:init_tables(), ok = emqx_persistent_session_ds:create_tables(), ok end). --spec is_store_enabled() -> boolean(). -is_store_enabled() -> - emqx_config:get([persistent_session_store, ds]). +-spec is_persistence_enabled() -> boolean(). +is_persistence_enabled() -> + emqx_config:get([session_persistence, enable]). + +-spec storage_backend() -> emqx_ds:create_db_opts(). +storage_backend() -> + storage_backend(emqx_config:get([session_persistence, storage])). + +storage_backend(#{builtin := #{enable := true}}) -> + #{ + backend => builtin, + storage => {emqx_ds_storage_bitfield_lts, #{}} + }. %%-------------------------------------------------------------------- diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 3ad03c4d4..ef34d3e8f 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -294,7 +294,19 @@ roots(low) -> {"persistent_session_store", sc( ref("persistent_session_store"), - #{importance => ?IMPORTANCE_HIDDEN} + #{ + %% NOTE + %% Due to some quirks in interaction between `emqx_config` and + %% `hocon_tconf`, schema roots cannot currently be deprecated. + importance => ?IMPORTANCE_HIDDEN + } + )}, + {"session_persistence", + sc( + ref("session_persistence"), + #{ + importance => ?IMPORTANCE_HIDDEN + } )}, {"trace", sc( @@ -309,11 +321,12 @@ roots(low) -> ]. fields("persistent_session_store") -> + Deprecated = #{deprecated => {since, "5.4.0"}}, [ {"enabled", sc( boolean(), - #{ + Deprecated#{ default => false, %% TODO(5.2): change field name to 'enable' and keep 'enabled' as an alias aliases => [enable], @@ -323,7 +336,7 @@ fields("persistent_session_store") -> {"ds", sc( boolean(), - #{ + Deprecated#{ default => false, importance => ?IMPORTANCE_HIDDEN } @@ -331,7 +344,7 @@ fields("persistent_session_store") -> {"on_disc", sc( boolean(), - #{ + Deprecated#{ default => true, desc => ?DESC(persistent_store_on_disc) } @@ -339,7 +352,7 @@ fields("persistent_session_store") -> {"ram_cache", sc( boolean(), - #{ + Deprecated#{ default => false, desc => ?DESC(persistent_store_ram_cache) } @@ -347,7 +360,7 @@ fields("persistent_session_store") -> {"backend", sc( hoconsc:union([ref("persistent_session_builtin")]), - #{ + Deprecated#{ default => #{ <<"type">> => <<"builtin">>, <<"session">> => @@ -363,7 +376,7 @@ fields("persistent_session_store") -> {"max_retain_undelivered", sc( duration(), - #{ + Deprecated#{ default => <<"1h">>, desc => ?DESC(persistent_session_store_max_retain_undelivered) } @@ -371,7 +384,7 @@ fields("persistent_session_store") -> {"message_gc_interval", sc( duration(), - #{ + Deprecated#{ default => <<"1h">>, desc => ?DESC(persistent_session_store_message_gc_interval) } @@ -379,7 +392,7 @@ fields("persistent_session_store") -> {"session_message_gc_interval", sc( duration(), - #{ + Deprecated#{ default => <<"1m">>, desc => ?DESC(persistent_session_store_session_message_gc_interval) } @@ -1740,6 +1753,45 @@ fields("trace") -> importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(fields_trace_payload_encode) })} + ]; +fields("session_persistence") -> + [ + {"enable", + sc( + boolean(), #{ + desc => ?DESC(session_persistence_enable), + default => false + } + )}, + {"storage", + sc( + ref("session_storage_backend"), #{ + desc => ?DESC(session_persistence_storage), + validator => fun validate_backend_enabled/1, + default => #{ + <<"builtin">> => #{} + } + } + )} + ]; +fields("session_storage_backend") -> + [ + {"builtin", + sc(ref("session_storage_backend_builtin"), #{ + desc => ?DESC(session_storage_backend_builtin), + required => {false, recursively} + })} + ]; +fields("session_storage_backend_builtin") -> + [ + {"enable", + sc( + boolean(), + #{ + desc => ?DESC(session_storage_backend_enable), + default => true + } + )} ]. mqtt_listener(Bind) -> @@ -1992,6 +2044,8 @@ desc("ocsp") -> "Per listener OCSP Stapling configuration."; desc("crl_cache") -> "Global CRL cache options."; +desc("session_persistence") -> + "Settings governing durable sessions persistence."; desc(_) -> undefined. @@ -2014,6 +2068,17 @@ ensure_list(V) -> filter(Opts) -> [{K, V} || {K, V} <- Opts, V =/= undefined]. +validate_backend_enabled(Config) -> + Enabled = maps:filter(fun(_, #{<<"enable">> := E}) -> E end, Config), + case maps:to_list(Enabled) of + [{_Type, _BackendConfig}] -> + ok; + _Conflicts = [_ | _] -> + {error, multiple_enabled_backends}; + _None = [] -> + {error, no_enabled_backend} + end. + %% @private This function defines the SSL opts which are commonly used by %% SSL listener and client. -spec common_ssl_opts_schema(map(), server | client) -> hocon_schema:field_schema(). diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index 4bae4ce03..ba49d3f85 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -617,11 +617,11 @@ maybe_mock_impl_mod(_) -> -spec choose_impl_mod(conninfo()) -> module(). choose_impl_mod(#{expiry_interval := EI}) -> - hd(choose_impl_candidates(EI, emqx_persistent_message:is_store_enabled())). + hd(choose_impl_candidates(EI, emqx_persistent_message:is_persistence_enabled())). -spec choose_impl_candidates(conninfo()) -> [module()]. choose_impl_candidates(#{expiry_interval := EI}) -> - choose_impl_candidates(EI, emqx_persistent_message:is_store_enabled()). + choose_impl_candidates(EI, emqx_persistent_message:is_persistence_enabled()). choose_impl_candidates(_, _IsPSStoreEnabled = false) -> [emqx_session_mem]; diff --git a/apps/emqx/test/emqx_persistent_messages_SUITE.erl b/apps/emqx/test/emqx_persistent_messages_SUITE.erl index 45cf85a05..922d7248f 100644 --- a/apps/emqx/test/emqx_persistent_messages_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_messages_SUITE.erl @@ -291,7 +291,7 @@ publish(Node, Message) -> app_specs() -> [ emqx_durable_storage, - {emqx, "persistent_session_store {ds = true}"} + {emqx, "session_persistence {enable = true}"} ]. cluster() -> diff --git a/apps/emqx/test/emqx_persistent_session_SUITE.erl b/apps/emqx/test/emqx_persistent_session_SUITE.erl index 77b625f05..f3af45fe0 100644 --- a/apps/emqx/test/emqx_persistent_session_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_session_SUITE.erl @@ -35,8 +35,8 @@ all() -> % NOTE % Tests are disabled while existing session persistence impl is being % phased out. - {group, persistent_store_disabled}, - {group, persistent_store_ds} + {group, persistence_disabled}, + {group, persistence_enabled} ]. %% A persistent session can be resumed in two ways: @@ -54,24 +54,24 @@ groups() -> TCs = emqx_common_test_helpers:all(?MODULE), TCsNonGeneric = [t_choose_impl], [ - {persistent_store_disabled, [{group, no_kill_connection_process}]}, - {persistent_store_ds, [{group, no_kill_connection_process}]}, + {persistence_disabled, [{group, no_kill_connection_process}]}, + {persistence_enabled, [{group, no_kill_connection_process}]}, {no_kill_connection_process, [], [{group, tcp}, {group, quic}, {group, ws}]}, {tcp, [], TCs}, {quic, [], TCs -- TCsNonGeneric}, {ws, [], TCs -- TCsNonGeneric} ]. -init_per_group(persistent_store_disabled, Config) -> +init_per_group(persistence_disabled, Config) -> [ - {emqx_config, "persistent_session_store { enabled = false }"}, - {persistent_store, false} + {emqx_config, "session_persistence { enable = false }"}, + {persistence, false} | Config ]; -init_per_group(persistent_store_ds, Config) -> +init_per_group(persistence_enabled, Config) -> [ - {emqx_config, "persistent_session_store { ds = true }"}, - {persistent_store, ds} + {emqx_config, "session_persistence { enable = true }"}, + {persistence, ds} | Config ]; init_per_group(Group, Config) when Group == tcp -> @@ -312,7 +312,7 @@ t_choose_impl(Config) -> {ok, _} = emqtt:ConnFun(Client), [ChanPid] = emqx_cm:lookup_channels(ClientId), ?assertEqual( - case ?config(persistent_store, Config) of + case ?config(persistence, Config) of false -> emqx_session_mem; ds -> emqx_persistent_session_ds end, @@ -878,7 +878,7 @@ t_multiple_subscription_matches(Config) -> ok = emqtt:disconnect(Client2). skip_ds_tc(Config) -> - case ?config(persistent_store, Config) of + case ?config(persistence, Config) of ds -> {skip, "Testcase not yet supported under 'emqx_persistent_session_ds' implementation"}; _ -> diff --git a/apps/emqx/test/emqx_persistent_session_ds_router_SUITE.erl b/apps/emqx/test/emqx_persistent_session_ds_router_SUITE.erl index 3e48173c3..cc50d66ee 100644 --- a/apps/emqx/test/emqx_persistent_session_ds_router_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_session_ds_router_SUITE.erl @@ -38,7 +38,7 @@ init_per_suite(Config) -> AppSpecs = [ emqx_durable_storage, {emqx, #{ - config => #{persistent_session_store => #{ds => true}}, + config => #{session_persistence => #{enable => true}}, override_env => [{boot_modules, [broker]}] }} ], diff --git a/apps/emqx_conf/src/emqx_conf_cli.erl b/apps/emqx_conf/src/emqx_conf_cli.erl index fc00c7dc9..7e55ada4f 100644 --- a/apps/emqx_conf/src/emqx_conf_cli.erl +++ b/apps/emqx_conf/src/emqx_conf_cli.erl @@ -194,7 +194,7 @@ keys() -> emqx_config:get_root_names() -- hidden_roots(). drop_hidden_roots(Conf) -> - lists:foldl(fun(K, Acc) -> maps:remove(K, Acc) end, Conf, hidden_roots()). + maps:without(hidden_roots(), Conf). hidden_roots() -> [ @@ -202,6 +202,7 @@ hidden_roots() -> <<"stats">>, <<"broker">>, <<"persistent_session_store">>, + <<"session_persistence">>, <<"plugins">>, <<"zones">> ]. diff --git a/apps/emqx_management/src/emqx_mgmt_data_backup.erl b/apps/emqx_management/src/emqx_mgmt_data_backup.erl index e75e5f935..9825b26cf 100644 --- a/apps/emqx_management/src/emqx_mgmt_data_backup.erl +++ b/apps/emqx_management/src/emqx_mgmt_data_backup.erl @@ -52,6 +52,7 @@ <<"limiter">>, <<"log">>, <<"persistent_session_store">>, + <<"session_persistence">>, <<"prometheus">>, <<"crl_cache">>, <<"conn_congestion">>, diff --git a/rel/i18n/emqx_schema.hocon b/rel/i18n/emqx_schema.hocon index 3eb816f3b..d12f6a2d1 100644 --- a/rel/i18n/emqx_schema.hocon +++ b/rel/i18n/emqx_schema.hocon @@ -1555,4 +1555,17 @@ description.label: description.desc: """Descriptive text.""" +session_persistence_enable.desc: +"""Use durable storage for client sessions persistence. +If enabled, sessions configured to outlive client connections, along with their corresponding messages, will be durably stored and survive broker downtime.""" + +session_persistence_storage.desc: +"""Durable storage backend to use for session persistence.""" + +session_storage_backend_enable.desc: +"""Enable this backend.""" + +session_storage_backend_builtin.desc: +"""Builtin session storage backend utilizing embedded RocksDB key-value store.""" + } From 3909e0cc089ec270395b9fbf54c0eb9af72c1d95 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Mon, 20 Nov 2023 18:06:27 +0100 Subject: [PATCH 034/101] docs: add changelog for PR 11975 --- changes/ce/fix-11975.en.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 changes/ce/fix-11975.en.md diff --git a/changes/ce/fix-11975.en.md b/changes/ce/fix-11975.en.md new file mode 100644 index 000000000..cba1c3a17 --- /dev/null +++ b/changes/ce/fix-11975.en.md @@ -0,0 +1,5 @@ +Resolve redundant error logging on socket closure + +Addressed a race condition causing duplicate error logs when a socket is closed by both a peer and the server. +Dual socket close events from the OS and EMQX previously led to excessive error logging. +The fix improves event handling to avoid redundant error-level logging. From d214ae8772b0d1e0fb86ecd7c5614454bfbd0faf Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Mon, 20 Nov 2023 18:59:57 +0100 Subject: [PATCH 035/101] fix: problems found by @thalesmg in code review Co-authored-by: Thales Macedo Garitezi --- .../src/emqx_bridge_azure_event_hub_action_info.erl | 2 +- apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl | 1 - apps/emqx_connector/src/schema/emqx_connector_schema.erl | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl index cd35a7dda..c4f395041 100644 --- a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl +++ b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl @@ -29,4 +29,4 @@ connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) -> ). bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) -> - bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName). + emqx_bridge_kafka_action_info:bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName). diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl index 154371807..38b57a2b2 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl @@ -29,7 +29,6 @@ connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) -> emqx_utils_maps:rename(<<"parameters">>, <<"kafka">>, BridgeV1Config2). bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) -> - BridgeV1Conf, Config0 = emqx_action_info:transform_bridge_v1_config_to_action_config( BridgeV1Conf, ConnectorName, schema_module(), kafka_producer ), diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index 10bf583c3..7c826085c 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -183,7 +183,7 @@ transform_bridge_v1_config_to_action_config( (not maps:is_key(ConnectorFieldNameBin, TopKeysMap)) of true -> - maps:remove(to_bin(ConnectorFieldName), ToTransformSoFar); + maps:remove(ConnectorFieldNameBin, ToTransformSoFar); false -> ToTransformSoFar end From 7fb5ade8321082c18ab65be91f250a9e161ea5d2 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Mon, 20 Nov 2023 19:18:22 +0100 Subject: [PATCH 036/101] fix(kafka_producer): hocon renames kafka field --- apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl index 38b57a2b2..7b6a946d0 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl @@ -32,9 +32,9 @@ bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) -> Config0 = emqx_action_info:transform_bridge_v1_config_to_action_config( BridgeV1Conf, ConnectorName, schema_module(), kafka_producer ), - KafkaMap = emqx_utils_maps:deep_get([<<"parameters">>, <<"kafka">>], Config0), + KafkaMap = emqx_utils_maps:deep_get([<<"parameters">>, <<"kafka">>], Config0, #{}), Config1 = emqx_utils_maps:deep_remove([<<"parameters">>, <<"kafka">>], Config0), - Config2 = maps:put(<<"parameters">>, KafkaMap, Config1), + Config2 = emqx_utils_maps:deep_merge(Config1, #{<<"parameters">> => KafkaMap}), maps:with(producer_action_field_keys(), Config2). %%------------------------------------------------------------------------------------------ From 6030bf6fa53edd27c1dcb25205563c294dbda07e Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Mon, 20 Nov 2023 12:06:21 +0100 Subject: [PATCH 037/101] fix(action): upgrade and downgrade strategy This commit adds upgrade and downgrade hooks that are called when upgrading from a bridge V1 to connector and action or the other way around. The automatic translation is used if the callback is not defined. NOTE: Backported from master --- apps/emqx_bridge/src/emqx_action_info.erl | 67 ++++++++- apps/emqx_bridge/src/emqx_bridge_v2.erl | 20 ++- ...mqx_bridge_azure_event_hub_action_info.erl | 12 +- .../src/emqx_bridge_kafka_action_info.erl | 32 ++++- .../src/schema/emqx_connector_schema.erl | 129 ++++++++++++------ 5 files changed, 215 insertions(+), 45 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index 8e8d51aff..1cdf61dfd 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -25,15 +25,39 @@ action_type_to_bridge_v1_type/1, bridge_v1_type_to_action_type/1, is_action_type/1, - registered_schema_modules/0 + registered_schema_modules/0, + connector_action_config_to_bridge_v1_config/3, + has_custom_connector_action_config_to_bridge_v1_config/1, + bridge_v1_config_to_connector_config/2, + has_custom_bridge_v1_config_to_connector_config/1, + bridge_v1_config_to_action_config/3, + has_custom_bridge_v1_config_to_action_config/1, + transform_bridge_v1_config_to_action_config/4 ]). -callback bridge_v1_type_name() -> atom(). -callback action_type_name() -> atom(). -callback connector_type_name() -> atom(). -callback schema_module() -> atom(). +%% Define this if the automatic config downgrade is not enough for the bridge. +-callback connector_action_config_to_bridge_v1_config( + ConnectorConfig :: map(), ActionConfig :: map() +) -> map(). +%% Define this if the automatic config upgrade is not enough for the connector. +-callback bridge_v1_config_to_connector_config(BridgeV1Config :: map()) -> map(). +%% Define this if the automatic config upgrade is not enough for the bridge. +%% If you want to make use of the automatic config upgrade, you can call +%% emqx_action_info:transform_bridge_v1_config_to_action_config/4 in your +%% implementation and do some adjustments on the result. +-callback bridge_v1_config_to_action_config(BridgeV1Config :: map(), ConnectorName :: binary()) -> + map(). --optional_callbacks([bridge_v1_type_name/0]). +-optional_callbacks([ + bridge_v1_type_name/0, + connector_action_config_to_bridge_v1_config/2, + bridge_v1_config_to_connector_config/1, + bridge_v1_config_to_action_config/2 +]). %% ==================================================================== %% Hadcoded list of info modules for actions @@ -110,10 +134,49 @@ registered_schema_modules() -> Schemas = maps:get(action_type_to_schema_module, InfoMap), maps:to_list(Schemas). +has_custom_connector_action_config_to_bridge_v1_config(ActionOrBridgeType) -> + Module = get_action_info_module(ActionOrBridgeType), + erlang:function_exported(Module, connector_action_config_to_bridge_v1_config, 2). + +connector_action_config_to_bridge_v1_config(ActionOrBridgeType, ConnectorConfig, ActionConfig) -> + Module = get_action_info_module(ActionOrBridgeType), + %% should only be called if defined + Module:connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig). + +has_custom_bridge_v1_config_to_connector_config(ActionOrBridgeType) -> + Module = get_action_info_module(ActionOrBridgeType), + erlang:function_exported(Module, bridge_v1_config_to_connector_config, 1). + +bridge_v1_config_to_connector_config(ActionOrBridgeType, BridgeV1Config) -> + Module = get_action_info_module(ActionOrBridgeType), + %% should only be called if defined + Module:bridge_v1_config_to_connector_config(BridgeV1Config). + +has_custom_bridge_v1_config_to_action_config(ActionOrBridgeType) -> + Module = get_action_info_module(ActionOrBridgeType), + erlang:function_exported(Module, bridge_v1_config_to_action_config, 2). + +bridge_v1_config_to_action_config(ActionOrBridgeType, BridgeV1Config, ConnectorName) -> + Module = get_action_info_module(ActionOrBridgeType), + %% should only be called if defined + Module:bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName). + +transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName +) -> + emqx_connector_schema:transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName + ). + %% ==================================================================== %% Internal functions for building the info map and accessing it %% ==================================================================== +get_action_info_module(ActionOrBridgeType) -> + InfoMap = info_map(), + ActionInfoModuleMap = maps:get(action_type_to_info_module, InfoMap), + maps:get(ActionOrBridgeType, ActionInfoModuleMap, undefined). + internal_emqx_action_persistent_term_info_key() -> ?FUNCTION_NAME. diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index 7ce266922..706849965 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -1105,9 +1105,23 @@ bridge_v1_lookup_and_transform_helper( <<"actions">>, emqx_bridge_v2_schema ), - BridgeV1Config1 = maps:remove(<<"connector">>, BridgeV2RawConfig2), - BridgeV1Config2 = maps:merge(BridgeV1Config1, ConnectorRawConfig2), - BridgeV1Tmp = maps:put(raw_config, BridgeV1Config2, BridgeV2), + BridgeV1ConfigFinal = + case + emqx_action_info:has_custom_connector_action_config_to_bridge_v1_config(BridgeV1Type) + of + false -> + BridgeV1Config1 = maps:remove(<<"connector">>, BridgeV2RawConfig2), + %% Move parameters to the top level + ParametersMap = maps:get(<<"parameters">>, BridgeV1Config1, #{}), + BridgeV1Config2 = maps:remove(<<"parameters">>, BridgeV1Config1), + BridgeV1Config3 = emqx_utils_maps:deep_merge(BridgeV1Config2, ParametersMap), + emqx_utils_maps:deep_merge(ConnectorRawConfig2, BridgeV1Config3); + true -> + emqx_action_info:connector_action_config_to_bridge_v1_config( + BridgeV1Type, ConnectorRawConfig2, BridgeV2RawConfig2 + ) + end, + BridgeV1Tmp = maps:put(raw_config, BridgeV1ConfigFinal, BridgeV2), BridgeV1 = maps:remove(status, BridgeV1Tmp), BridgeV2Status = maps:get(status, BridgeV2, undefined), BridgeV2Error = maps:get(error, BridgeV2, undefined), diff --git a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl index 8ebdb2435..c4f395041 100644 --- a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl +++ b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub_action_info.erl @@ -10,7 +10,9 @@ bridge_v1_type_name/0, action_type_name/0, connector_type_name/0, - schema_module/0 + schema_module/0, + connector_action_config_to_bridge_v1_config/2, + bridge_v1_config_to_action_config/2 ]). bridge_v1_type_name() -> azure_event_hub_producer. @@ -20,3 +22,11 @@ action_type_name() -> azure_event_hub_producer. connector_type_name() -> azure_event_hub_producer. schema_module() -> emqx_bridge_azure_event_hub. + +connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) -> + emqx_bridge_kafka_action_info:connector_action_config_to_bridge_v1_config( + ConnectorConfig, ActionConfig + ). + +bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) -> + emqx_bridge_kafka_action_info:bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName). diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl index 50d4f0c63..7b6a946d0 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl @@ -10,7 +10,9 @@ bridge_v1_type_name/0, action_type_name/0, connector_type_name/0, - schema_module/0 + schema_module/0, + connector_action_config_to_bridge_v1_config/2, + bridge_v1_config_to_action_config/2 ]). bridge_v1_type_name() -> kafka. @@ -20,3 +22,31 @@ action_type_name() -> kafka_producer. connector_type_name() -> kafka_producer. schema_module() -> emqx_bridge_kafka. + +connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) -> + BridgeV1Config1 = maps:remove(<<"connector">>, ActionConfig), + BridgeV1Config2 = emqx_utils_maps:deep_merge(ConnectorConfig, BridgeV1Config1), + emqx_utils_maps:rename(<<"parameters">>, <<"kafka">>, BridgeV1Config2). + +bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) -> + Config0 = emqx_action_info:transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, schema_module(), kafka_producer + ), + KafkaMap = emqx_utils_maps:deep_get([<<"parameters">>, <<"kafka">>], Config0, #{}), + Config1 = emqx_utils_maps:deep_remove([<<"parameters">>, <<"kafka">>], Config0), + Config2 = emqx_utils_maps:deep_merge(Config1, #{<<"parameters">> => KafkaMap}), + maps:with(producer_action_field_keys(), Config2). + +%%------------------------------------------------------------------------------------------ +%% Internal helper functions +%%------------------------------------------------------------------------------------------ + +producer_action_field_keys() -> + [ + to_bin(K) + || {K, _} <- emqx_bridge_kafka:fields(kafka_producer_action) + ]. + +to_bin(B) when is_binary(B) -> B; +to_bin(L) when is_list(L) -> list_to_binary(L); +to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8). diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index 22eb523be..9cb0bc931 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -22,7 +22,10 @@ -import(hoconsc, [mk/2, ref/2]). --export([transform_bridges_v1_to_connectors_and_bridges_v2/1]). +-export([ + transform_bridges_v1_to_connectors_and_bridges_v2/1, + transform_bridge_v1_config_to_action_config/4 +]). -export([roots/0, fields/1, desc/1, namespace/0, tags/0]). @@ -96,53 +99,103 @@ bridge_configs_to_transform( end. split_bridge_to_connector_and_action( - {ConnectorsMap, {BridgeType, BridgeName, BridgeConf, ConnectorFields, PreviousRawConfig}} + {ConnectorsMap, {BridgeType, BridgeName, BridgeV1Conf, ConnectorFields, PreviousRawConfig}} ) -> - %% Get connector fields from bridge config - ConnectorMap = lists:foldl( - fun({ConnectorFieldName, _Spec}, ToTransformSoFar) -> - case maps:is_key(to_bin(ConnectorFieldName), BridgeConf) of - true -> - NewToTransform = maps:put( - to_bin(ConnectorFieldName), - maps:get(to_bin(ConnectorFieldName), BridgeConf), - ToTransformSoFar - ), - NewToTransform; - false -> - ToTransformSoFar - end + ConnectorMap = + case emqx_action_info:has_custom_bridge_v1_config_to_connector_config(BridgeType) of + true -> + emqx_action_info:bridge_v1_config_to_connector_config( + BridgeType, BridgeV1Conf + ); + false -> + %% We do an automatic transfomation to get the connector config + %% if the callback is not defined. + %% Get connector fields from bridge config + lists:foldl( + fun({ConnectorFieldName, _Spec}, ToTransformSoFar) -> + case maps:is_key(to_bin(ConnectorFieldName), BridgeV1Conf) of + true -> + NewToTransform = maps:put( + to_bin(ConnectorFieldName), + maps:get(to_bin(ConnectorFieldName), BridgeV1Conf), + ToTransformSoFar + ), + NewToTransform; + false -> + ToTransformSoFar + end + end, + #{}, + ConnectorFields + ) end, - #{}, - ConnectorFields - ), - %% Remove connector fields from bridge config to create Action - ActionMap0 = lists:foldl( - fun - ({enable, _Spec}, ToTransformSoFar) -> - %% Enable filed is used in both - ToTransformSoFar; - ({ConnectorFieldName, _Spec}, ToTransformSoFar) -> - case maps:is_key(to_bin(ConnectorFieldName), BridgeConf) of - true -> - maps:remove(to_bin(ConnectorFieldName), ToTransformSoFar); - false -> - ToTransformSoFar - end - end, - BridgeConf, - ConnectorFields - ), %% Generate a connector name, if needed. Avoid doing so if there was a previous config. ConnectorName = case PreviousRawConfig of #{<<"connector">> := ConnectorName0} -> ConnectorName0; _ -> generate_connector_name(ConnectorsMap, BridgeName, 0) end, - %% Add connector field to action map - ActionMap = maps:put(<<"connector">>, ConnectorName, ActionMap0), + ActionMap = + case emqx_action_info:has_custom_bridge_v1_config_to_action_config(BridgeType) of + true -> + emqx_action_info:bridge_v1_config_to_action_config( + BridgeType, BridgeV1Conf, ConnectorName + ); + false -> + transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorFields + ) + end, {BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}. +transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName +) -> + ConnectorFields = ConnectorConfSchemaMod:fields(ConnectorConfSchemaName), + transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorFields + ). + +transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorFields +) -> + TopKeys = [ + <<"enable">>, + <<"connector">>, + <<"local_topic">>, + <<"resource_opts">>, + <<"description">>, + <<"parameters">> + ], + TopKeysMap = maps:from_keys(TopKeys, true), + %% Remove connector fields + ActionMap0 = lists:foldl( + fun + ({enable, _Spec}, ToTransformSoFar) -> + %% Enable filed is used in both + ToTransformSoFar; + ({ConnectorFieldName, _Spec}, ToTransformSoFar) -> + ConnectorFieldNameBin = to_bin(ConnectorFieldName), + case + maps:is_key(ConnectorFieldNameBin, BridgeV1Conf) andalso + (not maps:is_key(ConnectorFieldNameBin, TopKeysMap)) + of + true -> + maps:remove(ConnectorFieldNameBin, ToTransformSoFar); + false -> + ToTransformSoFar + end + end, + BridgeV1Conf, + ConnectorFields + ), + %% Add the connector field + ActionMap1 = maps:put(<<"connector">>, ConnectorName, ActionMap0), + TopMap = maps:with(TopKeys, ActionMap1), + RestMap = maps:without(TopKeys, ActionMap1), + %% Other parameters should be stuffed into `parameters' + emqx_utils_maps:deep_merge(TopMap, #{<<"parameters">> => RestMap}). + generate_connector_name(ConnectorsMap, BridgeName, Attempt) -> ConnectorNameList = case Attempt of From 75ee3ced6857d6fe102df0a0e7137dc091b8a361 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 20 Nov 2023 16:21:37 -0300 Subject: [PATCH 038/101] feat: add confluent connector/action Fixes https://emqx.atlassian.net/browse/EMQX-11044 See also: https://emqx.atlassian.net/wiki/spaces/EMQX/pages/712179934/Confluent+Integration --- apps/emqx_bridge/src/emqx_action_info.erl | 3 +- apps/emqx_bridge_confluent/BSL.txt | 94 ++++ apps/emqx_bridge_confluent/README.md | 27 ++ apps/emqx_bridge_confluent/docker-ct | 2 + apps/emqx_bridge_confluent/rebar.config | 15 + .../src/emqx_bridge_confluent.app.src | 15 + .../src/emqx_bridge_confluent_producer.erl | 406 ++++++++++++++++++ ..._bridge_confluent_producer_action_info.erl | 19 + .../emqx_bridge_confluent_producer_SUITE.erl | 343 +++++++++++++++ .../test/emqx_bridge_confluent_tests.erl | 179 ++++++++ .../src/schema/emqx_connector_ee_schema.erl | 40 +- .../src/schema/emqx_connector_schema.erl | 3 +- apps/emqx_machine/priv/reboot_lists.eterm | 3 +- mix.exs | 1 + rebar.config.erl | 1 + rel/i18n/emqx_bridge_confluent_producer.hocon | 342 +++++++++++++++ 16 files changed, 1478 insertions(+), 15 deletions(-) create mode 100644 apps/emqx_bridge_confluent/BSL.txt create mode 100644 apps/emqx_bridge_confluent/README.md create mode 100644 apps/emqx_bridge_confluent/docker-ct create mode 100644 apps/emqx_bridge_confluent/rebar.config create mode 100644 apps/emqx_bridge_confluent/src/emqx_bridge_confluent.app.src create mode 100644 apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer.erl create mode 100644 apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer_action_info.erl create mode 100644 apps/emqx_bridge_confluent/test/emqx_bridge_confluent_producer_SUITE.erl create mode 100644 apps/emqx_bridge_confluent/test/emqx_bridge_confluent_tests.erl create mode 100644 rel/i18n/emqx_bridge_confluent_producer.hocon diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index 589ebece9..57ede7c2f 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -73,8 +73,9 @@ -if(?EMQX_RELEASE_EDITION == ee). hard_coded_action_info_modules_ee() -> [ - emqx_bridge_kafka_action_info, emqx_bridge_azure_event_hub_action_info, + emqx_bridge_confluent_producer_action_info, + emqx_bridge_kafka_action_info, emqx_bridge_syskeeper_action_info ]. -else. diff --git a/apps/emqx_bridge_confluent/BSL.txt b/apps/emqx_bridge_confluent/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_confluent/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_confluent/README.md b/apps/emqx_bridge_confluent/README.md new file mode 100644 index 000000000..be297a14b --- /dev/null +++ b/apps/emqx_bridge_confluent/README.md @@ -0,0 +1,27 @@ +# Confluent Data Integration Bridge + +This application houses the Confluent Producer data integration bridge for EMQX Enterprise +Edition. It provides the means to connect to Confluent Producer and publish messages to +it via the Kafka protocol. + +Currently, our Kafka Producer library (`wolff`) has its own `replayq` buffering +implementation, so this bridge does not require buffer workers from `emqx_resource`. It +implements the connection management and interaction without need for a separate connector +app, since it's not used by authentication and authorization applications. + +# Documentation links + +For more information about Kafka interface for Confluent, please see [the official +docs](https://docs.confluent.io/cloud/current/overview.html). + +# Configurations + +Please see [Ingest Data into Confluent](https://docs.emqx.com/en/enterprise/v5.3/data-integration/data-bridge-confluent.html) for more detailed info. + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_confluent/docker-ct b/apps/emqx_bridge_confluent/docker-ct new file mode 100644 index 000000000..5288ee246 --- /dev/null +++ b/apps/emqx_bridge_confluent/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +kafka diff --git a/apps/emqx_bridge_confluent/rebar.config b/apps/emqx_bridge_confluent/rebar.config new file mode 100644 index 000000000..38173e74c --- /dev/null +++ b/apps/emqx_bridge_confluent/rebar.config @@ -0,0 +1,15 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.8.0"}}} + , {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}} + , {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}} + , {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}} + , {snappyer, "1.2.9"} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_confluent]} +]}. diff --git a/apps/emqx_bridge_confluent/src/emqx_bridge_confluent.app.src b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent.app.src new file mode 100644 index 000000000..3c096ad14 --- /dev/null +++ b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent.app.src @@ -0,0 +1,15 @@ +{application, emqx_bridge_confluent, [ + {description, "EMQX Enterprise Confluent Connector and Action"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + emqx_resource, + telemetry, + wolff + ]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer.erl b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer.erl new file mode 100644 index 000000000..7714b0b2e --- /dev/null +++ b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer.erl @@ -0,0 +1,406 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_confluent_producer). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-behaviour(hocon_schema). +-behaviour(emqx_connector_resource). + +%% `hocon_schema' API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% emqx_bridge_enterprise "unofficial" API +-export([ + bridge_v2_examples/1, + connector_examples/1 +]). + +%% emqx_connector_resource behaviour callbacks +-export([connector_config/1]). + +-export([host_opts/0]). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-define(CONFLUENT_CONNECTOR_TYPE, confluent_producer). +-define(CONFLUENT_CONNECTOR_TYPE_BIN, <<"confluent_producer">>). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> "confluent". + +roots() -> ["config_producer"]. + +fields("put_connector") -> + Fields = override( + emqx_bridge_kafka:fields("put_connector"), + connector_overrides() + ), + override_documentations(Fields); +fields("get_connector") -> + emqx_bridge_schema:status_fields() ++ + fields("post_connector"); +fields("post_connector") -> + Fields = override( + emqx_bridge_kafka:fields("post_connector"), + connector_overrides() + ), + override_documentations(Fields); +fields("put_bridge_v2") -> + Fields = override( + emqx_bridge_kafka:fields("put_bridge_v2"), + bridge_v2_overrides() + ), + override_documentations(Fields); +fields("get_bridge_v2") -> + emqx_bridge_schema:status_fields() ++ + fields("post_bridge_v2"); +fields("post_bridge_v2") -> + Fields = override( + emqx_bridge_kafka:fields("post_bridge_v2"), + bridge_v2_overrides() + ), + override_documentations(Fields); +fields("config_bridge_v2") -> + fields(actions); +fields("config_connector") -> + Fields = override( + emqx_bridge_kafka:fields("config_connector"), + connector_overrides() + ), + override_documentations(Fields); +fields(auth_username_password) -> + Fields = override( + emqx_bridge_kafka:fields(auth_username_password), + auth_overrides() + ), + override_documentations(Fields); +fields(ssl_client_opts) -> + Fields = override( + emqx_bridge_kafka:ssl_client_opts_fields(), + ssl_overrides() + ), + override_documentations(Fields); +fields(producer_kafka_opts) -> + Fields = override( + emqx_bridge_kafka:fields(producer_kafka_opts), + kafka_producer_overrides() + ), + override_documentations(Fields); +fields(kafka_message) -> + Fields0 = emqx_bridge_kafka:fields(kafka_message), + Fields = proplists:delete(timestamp, Fields0), + override_documentations(Fields); +fields(action) -> + {confluent_producer, + mk( + hoconsc:map(name, ref(emqx_bridge_confluent_producer, actions)), + #{ + desc => <<"Confluent Actions Config">>, + required => false + } + )}; +fields(actions) -> + Fields = + override( + emqx_bridge_kafka:producer_opts(), + bridge_v2_overrides() + ) ++ + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {connector, + mk(binary(), #{ + desc => ?DESC(emqx_connector_schema, "connector_field"), required => true + })}, + {description, emqx_schema:description_schema()} + ], + override_documentations(Fields); +fields(Method) -> + Fields = emqx_bridge_kafka:fields(Method), + override_documentations(Fields). + +desc("config") -> + ?DESC("desc_config"); +desc("config_connector") -> + ?DESC("desc_config"); +desc("get_" ++ Type) when Type == "connector"; Type == "bridge_v2" -> + ["Configuration for Confluent using `GET` method."]; +desc("put_" ++ Type) when Type == "connector"; Type == "bridge_v2" -> + ["Configuration for Confluent using `PUT` method."]; +desc("post_" ++ Type) when Type == "connector"; Type == "bridge_v2" -> + ["Configuration for Confluent using `POST` method."]; +desc(Name) -> + lists:member(Name, struct_names()) orelse throw({missing_desc, Name}), + ?DESC(Name). + +struct_names() -> + [ + auth_username_password, + kafka_message, + producer_kafka_opts, + actions, + ssl_client_opts + ]. + +bridge_v2_examples(Method) -> + [ + #{ + ?CONFLUENT_CONNECTOR_TYPE_BIN => #{ + summary => <<"Confluent Action">>, + value => values({Method, bridge_v2}) + } + } + ]. + +connector_examples(Method) -> + [ + #{ + ?CONFLUENT_CONNECTOR_TYPE_BIN => #{ + summary => <<"Confluent Connector">>, + value => values({Method, connector}) + } + } + ]. + +values({get, ConfluentType}) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + }, + values({post, ConfluentType}) + ); +values({post, bridge_v2}) -> + maps:merge( + values(action), + #{ + enable => true, + connector => <<"my_confluent_producer_connector">>, + name => <<"my_confluent_producer_action">>, + type => ?CONFLUENT_CONNECTOR_TYPE_BIN + } + ); +values({post, connector}) -> + maps:merge( + values(common_config), + #{ + name => <<"my_confluent_producer_connector">>, + type => ?CONFLUENT_CONNECTOR_TYPE_BIN, + ssl => #{ + enable => true, + server_name_indication => <<"auto">>, + verify => <<"verify_none">>, + versions => [<<"tlsv1.3">>, <<"tlsv1.2">>] + } + } + ); +values({put, connector}) -> + values(common_config); +values({put, bridge_v2}) -> + maps:merge( + values(action), + #{ + enable => true, + connector => <<"my_confluent_producer_connector">> + } + ); +values(common_config) -> + #{ + authentication => #{ + password => <<"******">> + }, + bootstrap_hosts => <<"xyz.sa-east1.gcp.confluent.cloud:9092">>, + connect_timeout => <<"5s">>, + enable => true, + metadata_request_timeout => <<"4s">>, + min_metadata_refresh_interval => <<"3s">>, + socket_opts => #{ + sndbuf => <<"1024KB">>, + recbuf => <<"1024KB">>, + nodelay => true, + tcp_keepalive => <<"none">> + } + }; +values(action) -> + #{ + parameters => #{ + topic => <<"topic">>, + message => #{ + key => <<"${.clientid}">>, + value => <<"${.}">> + }, + max_batch_bytes => <<"896KB">>, + partition_strategy => <<"random">>, + required_acks => <<"all_isr">>, + partition_count_refresh_interval => <<"60s">>, + kafka_headers => <<"${.pub_props}">>, + kafka_ext_headers => [ + #{ + kafka_ext_header_key => <<"clientid">>, + kafka_ext_header_value => <<"${clientid}">> + }, + #{ + kafka_ext_header_key => <<"topic">>, + kafka_ext_header_value => <<"${topic}">> + } + ], + kafka_header_value_encode_mode => none, + max_inflight => 10, + buffer => #{ + mode => <<"hybrid">>, + per_partition_limit => <<"2GB">>, + segment_bytes => <<"100MB">>, + memory_overload_protection => true + } + }, + local_topic => <<"mqtt/local/topic">> + }. + +%%------------------------------------------------------------------------------------------------- +%% `emqx_connector_resource' API +%%------------------------------------------------------------------------------------------------- + +connector_config(Config) -> + %% Default port for Confluent is 9092 + BootstrapHosts0 = maps:get(bootstrap_hosts, Config), + BootstrapHosts = emqx_schema:parse_servers( + BootstrapHosts0, + ?MODULE:host_opts() + ), + Config#{bootstrap_hosts := BootstrapHosts}. + +%%------------------------------------------------------------------------------------------------- +%% Internal fns +%%------------------------------------------------------------------------------------------------- + +ref(Name) -> + hoconsc:ref(?MODULE, Name). + +connector_overrides() -> + #{ + authentication => + mk( + ref(auth_username_password), + #{ + default => #{}, + required => true, + desc => ?DESC("authentication") + } + ), + bootstrap_hosts => + mk( + binary(), + #{ + required => true, + validator => emqx_schema:servers_validator( + host_opts(), _Required = true + ) + } + ), + ssl => mk( + ref(ssl_client_opts), + #{ + required => true, + default => #{<<"enable">> => true} + } + ), + type => mk( + ?CONFLUENT_CONNECTOR_TYPE, + #{ + required => true, + desc => ?DESC("connector_type") + } + ) + }. + +bridge_v2_overrides() -> + #{ + parameters => + mk(ref(producer_kafka_opts), #{ + required => true, + validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1 + }), + ssl => mk(ref(ssl_client_opts), #{ + default => #{ + <<"enable">> => true, + <<"verify">> => <<"verify_none">> + } + }), + type => mk( + ?CONFLUENT_CONNECTOR_TYPE, + #{ + required => true, + desc => ?DESC("bridge_v2_type") + } + ) + }. +auth_overrides() -> + #{ + mechanism => + mk(plain, #{ + required => true, + default => plain, + importance => ?IMPORTANCE_HIDDEN + }), + username => mk(binary(), #{required => true}), + password => emqx_connector_schema_lib:password_field(#{required => true}) + }. + +%% Kafka has SSL disabled by default +%% Confluent must use SSL +ssl_overrides() -> + #{ + "enable" => mk(true, #{default => true, importance => ?IMPORTANCE_HIDDEN}), + "verify" => mk(verify_none, #{default => verify_none, importance => ?IMPORTANCE_HIDDEN}) + }. + +kafka_producer_overrides() -> + #{ + message => mk(ref(kafka_message), #{}) + }. + +override_documentations(Fields) -> + lists:map( + fun({Name, Sc}) -> + case hocon_schema:field_schema(Sc, desc) of + ?DESC(emqx_bridge_kafka, Key) -> + %% to please dialyzer... + Override = #{type => hocon_schema:field_schema(Sc, type), desc => ?DESC(Key)}, + {Name, hocon_schema:override(Sc, Override)}; + _ -> + {Name, Sc} + end + end, + Fields + ). + +override(Fields, Overrides) -> + lists:map( + fun({Name, Sc}) -> + case maps:find(Name, Overrides) of + {ok, Override} -> + {Name, hocon_schema:override(Sc, Override)}; + error -> + {Name, Sc} + end + end, + Fields + ). + +host_opts() -> + #{default_port => 9092}. diff --git a/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer_action_info.erl b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer_action_info.erl new file mode 100644 index 000000000..f19920075 --- /dev/null +++ b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer_action_info.erl @@ -0,0 +1,19 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_confluent_producer_action_info). + +-behaviour(emqx_action_info). + +-export([ + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +action_type_name() -> confluent_producer. + +connector_type_name() -> confluent_producer. + +schema_module() -> emqx_bridge_confluent_producer. diff --git a/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_producer_SUITE.erl b/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_producer_SUITE.erl new file mode 100644 index 000000000..2977f72cf --- /dev/null +++ b/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_producer_SUITE.erl @@ -0,0 +1,343 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_confluent_producer_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-define(BRIDGE_TYPE, confluent_producer). +-define(BRIDGE_TYPE_BIN, <<"confluent_producer">>). +-define(CONNECTOR_TYPE, confluent_producer). +-define(CONNECTOR_TYPE_BIN, <<"confluent_producer">>). +-define(KAFKA_BRIDGE_TYPE, kafka_producer). + +-import(emqx_common_test_helpers, [on_exit/1]). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + KafkaHost = os:getenv("KAFKA_SASL_SSL_HOST", "toxiproxy.emqx.net"), + KafkaPort = list_to_integer(os:getenv("KAFKA_SASL_SSL_PORT", "9295")), + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + ProxyName = "kafka_sasl_ssl", + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + case emqx_common_test_helpers:is_tcp_server_available(KafkaHost, KafkaPort) of + true -> + Apps = emqx_cth_suite:start( + [ + emqx_conf, + emqx, + emqx_management, + emqx_resource, + emqx_bridge_confluent, + emqx_bridge, + emqx_rule_engine, + {emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"} + ], + #{work_dir => ?config(priv_dir, Config)} + ), + {ok, Api} = emqx_common_test_http:create_default_app(), + [ + {tc_apps, Apps}, + {api, Api}, + {proxy_name, ProxyName}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {kafka_host, KafkaHost}, + {kafka_port, KafkaPort} + | Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_kafka); + _ -> + {skip, no_kafka} + end + end. + +end_per_suite(Config) -> + Apps = ?config(tc_apps, Config), + emqx_cth_suite:stop(Apps), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +common_init_per_testcase(TestCase, Config) -> + ct:timetrap(timer:seconds(60)), + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), + emqx_config:delete_override_conf_files(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + Name = iolist_to_binary([atom_to_binary(TestCase), UniqueNum]), + KafkaHost = ?config(kafka_host, Config), + KafkaPort = ?config(kafka_port, Config), + KafkaTopic = Name, + ConnectorConfig = connector_config(Name, KafkaHost, KafkaPort), + {BridgeConfig, ExtraConfig} = bridge_config(Name, Name, KafkaTopic), + ensure_topic(Config, KafkaTopic, _Opts = #{}), + ok = snabbkaffe:start_trace(), + ExtraConfig ++ + [ + {connector_type, ?CONNECTOR_TYPE}, + {connector_name, Name}, + {connector_config, ConnectorConfig}, + {bridge_type, ?BRIDGE_TYPE}, + {bridge_name, Name}, + {bridge_config, BridgeConfig} + | Config + ]. + +end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +connector_config(Name, KafkaHost, KafkaPort) -> + InnerConfigMap0 = + #{ + <<"enable">> => true, + <<"bootstrap_hosts">> => iolist_to_binary([KafkaHost, ":", integer_to_binary(KafkaPort)]), + <<"authentication">> => + #{ + <<"mechanism">> => <<"plain">>, + <<"username">> => <<"emqxuser">>, + <<"password">> => <<"password">> + }, + <<"connect_timeout">> => <<"5s">>, + <<"socket_opts">> => + #{ + <<"nodelay">> => true, + <<"recbuf">> => <<"1024KB">>, + <<"sndbuf">> => <<"1024KB">>, + <<"tcp_keepalive">> => <<"none">> + }, + <<"ssl">> => + #{ + <<"cacertfile">> => shared_secret(client_cacertfile), + <<"certfile">> => shared_secret(client_certfile), + <<"keyfile">> => shared_secret(client_keyfile), + <<"ciphers">> => [], + <<"depth">> => 10, + <<"enable">> => true, + <<"hibernate_after">> => <<"5s">>, + <<"log_level">> => <<"notice">>, + <<"reuse_sessions">> => true, + <<"secure_renegotiate">> => true, + <<"server_name_indication">> => <<"disable">>, + %% currently, it seems our CI kafka certs fail peer verification + <<"verify">> => <<"verify_none">>, + <<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>] + } + }, + InnerConfigMap = serde_roundtrip(InnerConfigMap0), + parse_and_check_connector_config(InnerConfigMap, Name). + +parse_and_check_connector_config(InnerConfigMap, Name) -> + TypeBin = ?CONNECTOR_TYPE_BIN, + RawConf = #{<<"connectors">> => #{TypeBin => #{Name => InnerConfigMap}}}, + #{<<"connectors">> := #{TypeBin := #{Name := Config}}} = + hocon_tconf:check_plain(emqx_connector_schema, RawConf, #{ + required => false, atom_key => false + }), + ct:pal("parsed config: ~p", [Config]), + InnerConfigMap. + +bridge_config(Name, ConnectorId, KafkaTopic) -> + InnerConfigMap0 = + #{ + <<"enable">> => true, + <<"connector">> => ConnectorId, + <<"parameters">> => + #{ + <<"buffer">> => + #{ + <<"memory_overload_protection">> => true, + <<"mode">> => <<"memory">>, + <<"per_partition_limit">> => <<"2GB">>, + <<"segment_bytes">> => <<"100MB">> + }, + <<"compression">> => <<"no_compression">>, + <<"kafka_header_value_encode_mode">> => <<"none">>, + <<"max_batch_bytes">> => <<"896KB">>, + <<"max_inflight">> => <<"10">>, + <<"message">> => + #{ + <<"key">> => <<"${.clientid}">>, + <<"value">> => <<"${.}">> + }, + <<"partition_count_refresh_interval">> => <<"60s">>, + <<"partition_strategy">> => <<"random">>, + <<"query_mode">> => <<"async">>, + <<"required_acks">> => <<"all_isr">>, + <<"sync_query_timeout">> => <<"5s">>, + <<"topic">> => KafkaTopic + }, + <<"local_topic">> => <<"t/confluent">> + %%, + }, + InnerConfigMap = serde_roundtrip(InnerConfigMap0), + ExtraConfig = + [{kafka_topic, KafkaTopic}], + {parse_and_check_bridge_config(InnerConfigMap, Name), ExtraConfig}. + +%% check it serializes correctly +serde_roundtrip(InnerConfigMap0) -> + IOList = hocon_pp:do(InnerConfigMap0, #{}), + {ok, InnerConfigMap} = hocon:binary(IOList), + InnerConfigMap. + +parse_and_check_bridge_config(InnerConfigMap, Name) -> + TypeBin = ?BRIDGE_TYPE_BIN, + RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}}, + hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}), + InnerConfigMap. + +shared_secret_path() -> + os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret"). + +shared_secret(client_keyfile) -> + filename:join([shared_secret_path(), "client.key"]); +shared_secret(client_certfile) -> + filename:join([shared_secret_path(), "client.crt"]); +shared_secret(client_cacertfile) -> + filename:join([shared_secret_path(), "ca.crt"]); +shared_secret(rig_keytab) -> + filename:join([shared_secret_path(), "rig.keytab"]). + +ensure_topic(Config, KafkaTopic, Opts) -> + KafkaHost = ?config(kafka_host, Config), + KafkaPort = ?config(kafka_port, Config), + NumPartitions = maps:get(num_partitions, Opts, 3), + Endpoints = [{KafkaHost, KafkaPort}], + TopicConfigs = [ + #{ + name => KafkaTopic, + num_partitions => NumPartitions, + replication_factor => 1, + assignments => [], + configs => [] + } + ], + RequestConfig = #{timeout => 5_000}, + ConnConfig = + #{ + ssl => emqx_tls_lib:to_client_opts( + #{ + keyfile => shared_secret(client_keyfile), + certfile => shared_secret(client_certfile), + cacertfile => shared_secret(client_cacertfile), + verify => verify_none, + enable => true + } + ), + sasl => {plain, <<"emqxuser">>, <<"password">>} + }, + case brod:create_topics(Endpoints, TopicConfigs, RequestConfig, ConnConfig) of + ok -> ok; + {error, topic_already_exists} -> ok + end. + +make_message() -> + Time = erlang:unique_integer(), + BinTime = integer_to_binary(Time), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + #{ + clientid => BinTime, + payload => Payload, + timestamp => Time + }. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_start_stop(Config) -> + emqx_bridge_v2_testlib:t_start_stop(Config, kafka_producer_stopped), + ok. + +t_create_via_http(Config) -> + emqx_bridge_v2_testlib:t_create_via_http(Config), + ok. + +t_on_get_status(Config) -> + emqx_bridge_v2_testlib:t_on_get_status(Config, #{failure_status => connecting}), + ok. + +t_sync_query(Config) -> + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, + fun make_message/0, + fun(Res) -> ?assertEqual(ok, Res) end, + emqx_bridge_kafka_impl_producer_sync_query + ), + ok. + +t_same_name_confluent_kafka_bridges(Config) -> + BridgeName = ?config(bridge_name, Config), + TracePoint = emqx_bridge_kafka_impl_producer_sync_query, + %% creates the AEH bridge and check it's working + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, + fun make_message/0, + fun(Res) -> ?assertEqual(ok, Res) end, + TracePoint + ), + + %% then create a Kafka bridge with same name and delete it after creation + ConfigKafka0 = lists:keyreplace(bridge_type, 1, Config, {bridge_type, ?KAFKA_BRIDGE_TYPE}), + ConfigKafka = lists:keyreplace( + connector_type, 1, ConfigKafka0, {connector_type, ?KAFKA_BRIDGE_TYPE} + ), + ok = emqx_bridge_v2_testlib:t_create_via_http(ConfigKafka), + + AehResourceId = emqx_bridge_v2_testlib:resource_id(Config), + KafkaResourceId = emqx_bridge_v2_testlib:resource_id(ConfigKafka), + %% check that both bridges are healthy + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(AehResourceId)), + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(KafkaResourceId)), + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_connector:disable_enable(disable, ?KAFKA_BRIDGE_TYPE, BridgeName), + #{?snk_kind := kafka_producer_stopped}, + 5_000 + ) + ), + % check that AEH bridge is still working + ?check_trace( + begin + BridgeId = emqx_bridge_v2_testlib:bridge_id(Config), + Message = {BridgeId, make_message()}, + ?assertEqual(ok, emqx_resource:simple_sync_query(AehResourceId, Message)), + ok + end, + fun(Trace) -> + ?assertMatch([#{instance_id := AehResourceId}], ?of_kind(TracePoint, Trace)) + end + ), + ok. diff --git a/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_tests.erl b/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_tests.erl new file mode 100644 index 000000000..16e6e11fe --- /dev/null +++ b/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_tests.erl @@ -0,0 +1,179 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_confluent_tests). + +-include_lib("eunit/include/eunit.hrl"). + +%%=========================================================================== +%% Data Section +%%=========================================================================== + +%% erlfmt-ignore +confluent_producer_action_hocon() -> +""" +actions.confluent_producer.my_producer { + enable = true + connector = my_connector + parameters { + buffer { + memory_overload_protection = false + mode = memory + per_partition_limit = 2GB + segment_bytes = 100MB + } + compression = no_compression + kafka_header_value_encode_mode = none + max_batch_bytes = 896KB + max_inflight = 10 + message { + key = \"${.clientid}\" + value = \"${.}\" + } + partition_count_refresh_interval = 60s + partition_strategy = random + query_mode = async + required_acks = all_isr + sync_query_timeout = 5s + topic = test + } + local_topic = \"t/confluent\" +} +""". + +confluent_producer_connector_hocon() -> + "" + "\n" + "connectors.confluent_producer.my_producer {\n" + " enable = true\n" + " authentication {\n" + " username = \"user\"\n" + " password = \"xxx\"\n" + " }\n" + " bootstrap_hosts = \"xyz.sa-east1.gcp.confluent.cloud:9092\"\n" + " connect_timeout = 5s\n" + " metadata_request_timeout = 5s\n" + " min_metadata_refresh_interval = 3s\n" + " socket_opts {\n" + " recbuf = 1024KB\n" + " sndbuf = 1024KB\n" + " tcp_keepalive = none\n" + " }\n" + "}\n" + "". + +%%=========================================================================== +%% Helper functions +%%=========================================================================== + +parse(Hocon) -> + {ok, Conf} = hocon:binary(Hocon), + Conf. + +check(SchemaMod, Conf) when is_map(Conf) -> + hocon_tconf:check_plain(SchemaMod, Conf). + +check_action(Conf) when is_map(Conf) -> + check(emqx_bridge_v2_schema, Conf). + +check_connector(Conf) when is_map(Conf) -> + check(emqx_connector_schema, Conf). + +-define(validation_error(SchemaMod, Reason, Value), + {SchemaMod, [ + #{ + kind := validation_error, + reason := Reason, + value := Value + } + ]} +). +-define(action_validation_error(Reason, Value), + ?validation_error(emqx_bridge_v2_schema, Reason, Value) +). +-define(connector_validation_error(Reason, Value), + ?validation_error(emqx_connector_schema, Reason, Value) +). + +-define(ok_config(RootKey, Cfg), #{ + RootKey := + #{ + <<"confluent_producer">> := + #{ + <<"my_producer">> := + Cfg + } + } +}). +-define(ok_connector_config(Cfg), ?ok_config(<<"connectors">>, Cfg)). +-define(ok_action_config(Cfg), ?ok_config(<<"actions">>, Cfg)). + +%%=========================================================================== +%% Test cases +%%=========================================================================== + +confluent_producer_connector_test_() -> + %% ensure this module is loaded when testing only this file + _ = emqx_bridge_enterprise:module_info(), + BaseConf = parse(confluent_producer_connector_hocon()), + Override = fun(Cfg) -> + emqx_utils_maps:deep_merge( + BaseConf, + #{ + <<"connectors">> => + #{ + <<"confluent_producer">> => + #{<<"my_producer">> => Cfg} + } + } + ) + end, + [ + {"base config", + ?_assertMatch( + ?ok_connector_config( + #{ + <<"authentication">> := #{ + <<"mechanism">> := plain + }, + <<"ssl">> := #{ + <<"enable">> := true, + <<"verify">> := verify_none + } + } + ), + check_connector(BaseConf) + )}, + {"ssl disabled", + ?_assertThrow( + ?connector_validation_error(#{expected := "true"}, "false"), + check_connector(Override(#{<<"ssl">> => #{<<"enable">> => <<"false">>}})) + )}, + {"bad authn mechanism: scram sha256", + ?_assertThrow( + ?connector_validation_error(#{expected := "plain"}, "scram_sha_256"), + check_connector( + Override(#{<<"authentication">> => #{<<"mechanism">> => <<"scram_sha_256">>}}) + ) + )}, + {"bad authn mechanism: scram sha512", + ?_assertThrow( + ?connector_validation_error(#{expected := "plain"}, "scram_sha_512"), + check_connector( + Override(#{<<"authentication">> => #{<<"mechanism">> => <<"scram_sha_512">>}}) + ) + )} + ]. + +confluent_producer_action_test_() -> + %% ensure this module is loaded when testing only this file + _ = emqx_bridge_enterprise:module_info(), + BaseConf = parse(confluent_producer_action_hocon()), + [ + {"base config", + ?_assertMatch( + ?ok_action_config(_), + check_action(BaseConf) + )} + ]. diff --git a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl index 19b9fa244..1be7cc6ed 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl @@ -20,11 +20,13 @@ resource_type(Type) when is_binary(Type) -> resource_type(binary_to_atom(Type, utf8)); -resource_type(kafka_producer) -> - emqx_bridge_kafka_impl_producer; %% We use AEH's Kafka interface. resource_type(azure_event_hub_producer) -> emqx_bridge_kafka_impl_producer; +resource_type(confluent_producer) -> + emqx_bridge_kafka_impl_producer; +resource_type(kafka_producer) -> + emqx_bridge_kafka_impl_producer; resource_type(syskeeper_forwarder) -> emqx_bridge_syskeeper_connector; resource_type(syskeeper_proxy) -> @@ -37,6 +39,8 @@ connector_impl_module(ConnectorType) when is_binary(ConnectorType) -> connector_impl_module(binary_to_atom(ConnectorType, utf8)); connector_impl_module(azure_event_hub_producer) -> emqx_bridge_azure_event_hub; +connector_impl_module(confluent_producer) -> + emqx_bridge_confluent_producer; connector_impl_module(_ConnectorType) -> undefined. @@ -45,14 +49,6 @@ fields(connectors) -> connector_structs() -> [ - {kafka_producer, - mk( - hoconsc:map(name, ref(emqx_bridge_kafka, "config_connector")), - #{ - desc => <<"Kafka Connector Config">>, - required => false - } - )}, {azure_event_hub_producer, mk( hoconsc:map(name, ref(emqx_bridge_azure_event_hub, "config_connector")), @@ -61,6 +57,22 @@ connector_structs() -> required => false } )}, + {confluent_producer, + mk( + hoconsc:map(name, ref(emqx_bridge_confluent_producer, "config_connector")), + #{ + desc => <<"Confluent Connector Config">>, + required => false + } + )}, + {kafka_producer, + mk( + hoconsc:map(name, ref(emqx_bridge_kafka, "config_connector")), + #{ + desc => <<"Kafka Connector Config">>, + required => false + } + )}, {syskeeper_forwarder, mk( hoconsc:map(name, ref(emqx_bridge_syskeeper_connector, config)), @@ -93,8 +105,9 @@ examples(Method) -> schema_modules() -> [ - emqx_bridge_kafka, emqx_bridge_azure_event_hub, + emqx_bridge_confluent_producer, + emqx_bridge_kafka, emqx_bridge_syskeeper_connector, emqx_bridge_syskeeper_proxy ]. @@ -103,10 +116,13 @@ api_schemas(Method) -> [ %% We need to map the `type' field of a request (binary) to a %% connector schema module. - api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_connector"), api_ref( emqx_bridge_azure_event_hub, <<"azure_event_hub_producer">>, Method ++ "_connector" ), + api_ref( + emqx_bridge_confluent_producer, <<"confluent_producer">>, Method ++ "_connector" + ), + api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_connector"), api_ref(emqx_bridge_syskeeper_connector, <<"syskeeper_forwarder">>, Method), api_ref(emqx_bridge_syskeeper_proxy, <<"syskeeper_proxy">>, Method) ]. diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index 7c826085c..8397f1bba 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -62,8 +62,9 @@ enterprise_fields_connectors() -> []. -endif. -connector_type_to_bridge_types(kafka_producer) -> [kafka, kafka_producer]; connector_type_to_bridge_types(azure_event_hub_producer) -> [azure_event_hub_producer]; +connector_type_to_bridge_types(confluent_producer) -> [confluent_producer]; +connector_type_to_bridge_types(kafka_producer) -> [kafka, kafka_producer]; connector_type_to_bridge_types(syskeeper_forwarder) -> [syskeeper_forwarder]; connector_type_to_bridge_types(syskeeper_proxy) -> []. diff --git a/apps/emqx_machine/priv/reboot_lists.eterm b/apps/emqx_machine/priv/reboot_lists.eterm index ccb61d762..27f984f51 100644 --- a/apps/emqx_machine/priv/reboot_lists.eterm +++ b/apps/emqx_machine/priv/reboot_lists.eterm @@ -129,7 +129,8 @@ emqx_gateway_gbt32960, emqx_gateway_ocpp, emqx_gateway_jt808, - emqx_bridge_syskeeper + emqx_bridge_syskeeper, + emqx_bridge_confluent ], %% must always be of type `load' ce_business_apps => diff --git a/mix.exs b/mix.exs index d931b799d..3c8487b6a 100644 --- a/mix.exs +++ b/mix.exs @@ -183,6 +183,7 @@ defmodule EMQXUmbrella.MixProject do defp enterprise_umbrella_apps() do MapSet.new([ :emqx_bridge_kafka, + :emqx_bridge_confluent, :emqx_bridge_gcp_pubsub, :emqx_bridge_cassandra, :emqx_bridge_opents, diff --git a/rebar.config.erl b/rebar.config.erl index 6bb2fb985..98e29f32a 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -79,6 +79,7 @@ is_enterprise(ce) -> false; is_enterprise(ee) -> true. is_community_umbrella_app("apps/emqx_bridge_kafka") -> false; +is_community_umbrella_app("apps/emqx_bridge_confluent") -> false; is_community_umbrella_app("apps/emqx_bridge_gcp_pubsub") -> false; is_community_umbrella_app("apps/emqx_bridge_cassandra") -> false; is_community_umbrella_app("apps/emqx_bridge_opents") -> false; diff --git a/rel/i18n/emqx_bridge_confluent_producer.hocon b/rel/i18n/emqx_bridge_confluent_producer.hocon new file mode 100644 index 000000000..730f0e371 --- /dev/null +++ b/rel/i18n/emqx_bridge_confluent_producer.hocon @@ -0,0 +1,342 @@ +emqx_bridge_confluent_producer { + +connect_timeout.desc: +"""Maximum wait time for TCP connection establishment (including authentication time if enabled).""" + +connect_timeout.label: +"""Connect Timeout""" + +producer_opts.desc: +"""Local MQTT data source and Confluent bridge configs.""" + +producer_opts.label: +"""MQTT to Confluent""" + +min_metadata_refresh_interval.desc: +"""Minimum time interval the client has to wait before refreshing Confluent Kafka broker and topic metadata. Setting too small value may add extra load on Confluent.""" + +min_metadata_refresh_interval.label: +"""Min Metadata Refresh Interval""" + +kafka_producer.desc: +"""Confluent Producer configuration.""" + +kafka_producer.label: +"""Confluent Producer""" + +producer_buffer.desc: +"""Configure producer message buffer. + +Tell Confluent producer how to buffer messages when EMQX has more messages to send than Confluent can keep up, or when Confluent is down.""" + +producer_buffer.label: +"""Message Buffer""" + +socket_send_buffer.desc: +"""Fine tune the socket send buffer. The default value is tuned for high throughput.""" + +socket_send_buffer.label: +"""Socket Send Buffer Size""" + +socket_receive_buffer.desc: +"""Fine tune the socket receive buffer. The default value is tuned for high throughput.""" + +socket_receive_buffer.label: +"""Socket Receive Buffer Size""" + +socket_tcp_keepalive.desc: +"""Enable TCP keepalive for Confluent bridge connections. +The value is three comma separated numbers in the format of 'Idle,Interval,Probes' + - Idle: The number of seconds a connection needs to be idle before the server begins to send out keep-alive probes (Linux default 7200). + - Interval: The number of seconds between TCP keep-alive probes (Linux default 75). + - Probes: The maximum number of TCP keep-alive probes to send before giving up and killing the connection if no response is obtained from the other end (Linux default 9). +For example "240,30,5" means: TCP keepalive probes are sent after the connection is idle for 240 seconds, and the probes are sent every 30 seconds until a response is received, if it misses 5 consecutive responses, the connection should be closed. +Default: 'none'""" + +socket_tcp_keepalive.label: +"""TCP keepalive options""" + +desc_name.desc: +"""Action name, used as a human-readable description of the action.""" + +desc_name.label: +"""Action Name""" + +producer_kafka_opts.desc: +"""Confluent producer configs.""" + +producer_kafka_opts.label: +"""Confluent Producer""" + +kafka_topic.desc: +"""Event Hub name""" + +kafka_topic.label: +"""Event Hub Name""" + +kafka_message_timestamp.desc: +"""Which timestamp to use. The timestamp is expected to be a millisecond precision Unix epoch which can be in string format, e.g. 1661326462115 or '1661326462115'. When the desired data field for this template is not found, or if the found data is not a valid integer, the current system timestamp will be used.""" + +kafka_message_timestamp.label: +"""Message Timestamp""" + +buffer_mode.desc: +"""Message buffer mode. + +memory: Buffer all messages in memory. The messages will be lost in case of EMQX node restart +disk: Buffer all messages on disk. The messages on disk are able to survive EMQX node restart. +hybrid: Buffer message in memory first, when up to certain limit (see segment_bytes config for more information), then start offloading messages to disk, Like memory mode, the messages will be lost in case of EMQX node restart.""" + +buffer_mode.label: +"""Buffer Mode""" + +socket_opts.desc: +"""Extra socket options.""" + +socket_opts.label: +"""Socket Options""" + +partition_count_refresh_interval.desc: +"""The time interval for Confluent producer to discover increased number of partitions. +After the number of partitions is increased in Confluent, EMQX will start taking the +discovered partitions into account when dispatching messages per partition_strategy.""" + +partition_count_refresh_interval.label: +"""Partition Count Refresh Interval""" + +max_batch_bytes.desc: +"""Maximum bytes to collect in a Confluent message batch. Most of the Kafka brokers default to a limit of 1 MB batch size. EMQX's default value is less than 1 MB in order to compensate Kafka message encoding overheads (especially when each individual message is very small). When a single message is over the limit, it is still sent (as a single element batch).""" + +max_batch_bytes.label: +"""Max Batch Bytes""" + +required_acks.desc: +"""Required acknowledgements for Confluent partition leader to wait for its followers before it sends back the acknowledgement to EMQX Confluent producer + +all_isr: Require all in-sync replicas to acknowledge. +leader_only: Require only the partition-leader's acknowledgement.""" + +required_acks.label: +"""Required Acks""" + +kafka_headers.desc: +"""Please provide a placeholder to be used as Confluent Headers
+e.g. ${pub_props}
+Notice that the value of the placeholder must either be an object: +{\"foo\": \"bar\"} +or an array of key-value pairs: +[{\"key\": \"foo\", \"value\": \"bar\"}]""" + +kafka_headers.label: +"""Confluent Headers""" + +producer_kafka_ext_headers.desc: +"""Please provide more key-value pairs for Confluent headers
+The key-value pairs here will be combined with the +value of kafka_headers field before sending to Confluent.""" + +producer_kafka_ext_headers.label: +"""Extra Confluent headers""" + +producer_kafka_ext_header_key.desc: +"""Key of the Confluent header. Placeholders in format of ${var} are supported.""" + +producer_kafka_ext_header_key.label: +"""Confluent extra header key.""" + +producer_kafka_ext_header_value.desc: +"""Value of the Confluent header. Placeholders in format of ${var} are supported.""" + +producer_kafka_ext_header_value.label: +"""Value""" + +kafka_header_value_encode_mode.desc: +"""Confluent headers value encode mode
+ - NONE: only add binary values to Confluent headers;
+ - JSON: only add JSON values to Confluent headers, +and encode it to JSON strings before sending.""" + +kafka_header_value_encode_mode.label: +"""Confluent headers value encode mode""" + +metadata_request_timeout.desc: +"""Maximum wait time when fetching metadata from Confluent.""" + +metadata_request_timeout.label: +"""Metadata Request Timeout""" + +desc_type.desc: +"""The Action Type""" + +desc_type.label: +"""Action Type""" + +socket_nodelay.desc: +"""When set to 'true', TCP buffer is sent as soon as possible. Otherwise, the OS kernel may buffer small TCP packets for a while (40 ms by default).""" + +socket_nodelay.label: +"""No Delay""" + +authentication.desc: +"""Authentication configs.""" + +authentication.label: +"""Authentication""" + +connector_type.label: +"""Connector Type""" + +connector_type.desc: +"""The type of the connector.""" + +bridge_v2_type.label: +"""Action Type""" + +bridge_v2_type.desc: +"""The type of the action.""" + +actions.label: +"""Action Config""" +actions.desc: +"""The configuration for an action.""" + +buffer_memory_overload_protection.desc: +"""Applicable when buffer mode is set to memory +EMQX will drop old buffered messages under high memory pressure. The high memory threshold is defined in config sysmon.os.sysmem_high_watermark. NOTE: This config only works on Linux.""" + +buffer_memory_overload_protection.label: +"""Memory Overload Protection""" + +auth_sasl_mechanism.desc: +"""SASL authentication mechanism.""" + +auth_sasl_mechanism.label: +"""Mechanism""" + +config_enable.desc: +"""Enable (true) or disable (false) this action.""" + +config_enable.label: +"""Enable or Disable""" + +desc_config.desc: +"""Configuration for a Confluent action.""" + +desc_config.label: +"""Confluent Action Configuration""" + +buffer_per_partition_limit.desc: +"""Number of bytes allowed to buffer for each Confluent partition. When this limit is exceeded, old messages will be dropped in a trade for credits for new messages to be buffered.""" + +buffer_per_partition_limit.label: +"""Per-partition Buffer Limit""" + +bootstrap_hosts.desc: +"""A comma separated list of Confluent Kafka host[:port] namespace endpoints to bootstrap the client. Default port number is 9092.""" + +bootstrap_hosts.label: +"""Bootstrap Server""" + +kafka_message_key.desc: +"""Template to render Confluent message key. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Confluent's NULL (but not empty string) is used.""" + +kafka_message_key.label: +"""Message Key""" + +kafka_message.desc: +"""Template to render a Confluent message.""" + +kafka_message.label: +"""Confluent Message Template""" + +mqtt_topic.desc: +"""MQTT topic or topic filter as data source (action input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in Confluent.""" + +mqtt_topic.label: +"""Source MQTT Topic""" + +kafka_message_value.desc: +"""Template to render Confluent message value. If the template is rendered into a NULL value (i.e. there is no such data field in Rule Engine context) then Confluent's NULL (but not empty string) is used.""" + +kafka_message_value.label: +"""Message Value""" + +partition_strategy.desc: +"""Partition strategy is to tell the producer how to dispatch messages to Confluent partitions. + +random: Randomly pick a partition for each message +key_dispatch: Hash Confluent message key to a partition number""" + +partition_strategy.label: +"""Partition Strategy""" + +buffer_segment_bytes.desc: +"""Applicable when buffer mode is set to disk or hybrid. +This value is to specify the size of each on-disk buffer file.""" + +buffer_segment_bytes.label: +"""Segment File Bytes""" + +max_inflight.desc: +"""Maximum number of batches allowed for Confluent producer (per-partition) to send before receiving acknowledgement from Confluent. Greater value typically means better throughput. However, there can be a risk of message reordering when this value is greater than 1.""" + +max_inflight.label: +"""Max Inflight""" + +compression.desc: +"""Compression method.""" + +compression.label: +"""Compression""" + +query_mode.desc: +"""Query mode. Optional 'sync/async', default 'async'.""" + +query_mode.label: +"""Query mode""" + +sync_query_timeout.desc: +"""This parameter defines the timeout limit for synchronous queries. It applies only when the action query mode is configured to 'sync'.""" + +sync_query_timeout.label: +"""Synchronous Query Timeout""" + +auth_username_password.desc: +"""Username/password based authentication.""" + +auth_username_password.label: +"""Username/password Auth""" + +auth_sasl_username.desc: +"""Confluent Key.""" + +auth_sasl_username.label: +"""Key""" + +auth_sasl_password.desc: +"""Confluent Secret.""" + +auth_sasl_password.label: +"""Secret""" + +producer_kafka_opts.desc: +"""Confluent producer configs.""" + +producer_kafka_opts.label: +"""Confluent Producer""" + +ssl_client_opts.desc: +"""TLS/SSL options for Confluent client.""" +ssl_client_opts.label: +"""TLS/SSL options""" + +server_name_indication.desc: +"""Server Name Indication (SNI) setting for TLS handshake.
+- auto: The client will use "servicebus.windows.net" as SNI.
+- disable: If you wish to prevent the client from sending the SNI.
+- Other string values it will be sent as-is.""" + +server_name_indication.label: +"""SNI""" + +} From cd72dc11dda97e49576b4c8b35f9f28465691255 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Tue, 21 Nov 2023 13:19:58 +0100 Subject: [PATCH 039/101] fix: missing emqx_action_info module mapping --- apps/emqx_bridge/src/emqx_action_info.erl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index 1cdf61dfd..b236558e1 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -224,7 +224,8 @@ initial_info_map() -> bridge_v1_type_to_action_type => #{}, action_type_to_bridge_v1_type => #{}, action_type_to_connector_type => #{}, - action_type_to_schema_module => #{} + action_type_to_schema_module => #{}, + action_type_to_info_module => #{} }. get_info_map(Module) -> @@ -258,5 +259,10 @@ get_info_map(Module) -> }, action_type_to_schema_module => #{ ActionType => Module:schema_module() + }, + action_type_to_info_module => #{ + ActionType => Module, + %% Alias the bridge V1 type to the action type + BridgeV1Type => Module } }. From fa7151f255d928d78b05058a87b9625198caa331 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Tue, 21 Nov 2023 15:18:22 +0100 Subject: [PATCH 040/101] fix: port emqx_utils_maps:rename function from master The emqx_utils_maps:rename function is needed by action upgrade/downgrade hoos. --- apps/emqx_utils/src/emqx_utils_maps.erl | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/apps/emqx_utils/src/emqx_utils_maps.erl b/apps/emqx_utils/src/emqx_utils_maps.erl index 3945b7201..a3b6961f0 100644 --- a/apps/emqx_utils/src/emqx_utils_maps.erl +++ b/apps/emqx_utils/src/emqx_utils_maps.erl @@ -34,7 +34,8 @@ best_effort_recursive_sum/3, if_only_to_toggle_enable/2, update_if_present/3, - put_if/4 + put_if/4, + rename/3 ]). -export_type([config_key/0, config_key_path/0]). @@ -309,3 +310,11 @@ put_if(Acc, K, V, true) -> Acc#{K => V}; put_if(Acc, _K, _V, false) -> Acc. + +rename(OldKey, NewKey, Map) -> + case maps:find(OldKey, Map) of + {ok, Value} -> + maps:put(NewKey, Value, maps:remove(OldKey, Map)); + error -> + Map + end. From 9e1796ec4f43012059ebc374208ce1289f791451 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 16 Nov 2023 17:57:54 -0300 Subject: [PATCH 041/101] feat(gcp_pubsub_producer): migrate GCP PubSub producer to actions Fixes https://emqx.atlassian.net/browse/EMQX-11157 --- apps/emqx_bridge/src/emqx_action_info.erl | 1 + .../src/schema/emqx_bridge_v2_schema.erl | 30 ++- .../src/emqx_bridge_azure_event_hub.erl | 2 +- .../src/emqx_bridge_gcp_pubsub_client.erl | 2 +- .../emqx_bridge_gcp_pubsub_impl_producer.erl | 195 +++++++++------ ...bridge_gcp_pubsub_producer_action_info.erl | 46 ++++ ...emqx_bridge_gcp_pubsub_producer_schema.erl | 223 ++++++++++++++++++ .../emqx_bridge_gcp_pubsub_producer_SUITE.erl | 189 ++++++++++----- .../src/schema/emqx_connector_ee_schema.erl | 10 + .../src/schema/emqx_connector_schema.erl | 24 +- apps/emqx_resource/include/emqx_resource.hrl | 4 + ...qx_bridge_gcp_pubsub_producer_schema.hocon | 18 ++ rel/i18n/emqx_bridge_v2_schema.hocon | 10 + rel/i18n/emqx_connector_schema.hocon | 8 +- 14 files changed, 619 insertions(+), 143 deletions(-) create mode 100644 apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_action_info.erl create mode 100644 apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_schema.erl create mode 100644 rel/i18n/emqx_bridge_gcp_pubsub_producer_schema.hocon diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index 57ede7c2f..34d624af4 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -75,6 +75,7 @@ hard_coded_action_info_modules_ee() -> [ emqx_bridge_azure_event_hub_action_info, emqx_bridge_confluent_producer_action_info, + emqx_bridge_gcp_pubsub_producer_action_info, emqx_bridge_kafka_action_info, emqx_bridge_syskeeper_action_info ]. diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl index d2fa85f92..7346ae6c7 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl @@ -40,7 +40,11 @@ -export([types/0, types_sc/0]). --export([make_producer_action_schema/1, make_consumer_action_schema/1]). +-export([ + make_producer_action_schema/1, + make_consumer_action_schema/1, + top_level_common_action_keys/0 +]). -export_type([action_type/0]). @@ -130,6 +134,8 @@ registered_schema_fields() -> desc(actions) -> ?DESC("desc_bridges_v2"); +desc(resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); desc(_) -> undefined. @@ -154,6 +160,16 @@ examples(Method) -> SchemaModules = [Mod || {_, Mod} <- emqx_action_info:registered_schema_modules()], lists:foldl(Fun, #{}, SchemaModules). +top_level_common_action_keys() -> + [ + <<"connector">>, + <<"description">>, + <<"enable">>, + <<"local_topic">>, + <<"parameters">>, + <<"resource_opts">> + ]. + %%====================================================================================== %% Helper functions for making HOCON Schema %%====================================================================================== @@ -174,7 +190,10 @@ make_consumer_action_schema(ActionParametersRef) -> {description, emqx_schema:description_schema()}, {parameters, ActionParametersRef}, {resource_opts, - mk(ref(?MODULE, resource_opts), #{default => #{}, desc => ?DESC(resource_opts)})} + mk(ref(?MODULE, resource_opts), #{ + default => #{}, + desc => ?DESC(emqx_resource_schema, "resource_opts") + })} ]. -ifdef(TEST). @@ -196,7 +215,7 @@ schema_homogeneous_test() -> is_bad_schema(#{type := ?MAP(_, ?R_REF(Module, TypeName))}) -> Fields = Module:fields(TypeName), - ExpectedFieldNames = common_field_names(), + ExpectedFieldNames = lists:map(fun binary_to_atom/1, top_level_common_action_keys()), MissingFileds = lists:filter( fun(Name) -> lists:keyfind(Name, 1, Fields) =:= false end, ExpectedFieldNames ), @@ -211,9 +230,4 @@ is_bad_schema(#{type := ?MAP(_, ?R_REF(Module, TypeName))}) -> }} end. -common_field_names() -> - [ - enable, description, local_topic, connector, resource_opts, parameters - ]. - -endif. diff --git a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl index eb364bdff..e196aac30 100644 --- a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl +++ b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl @@ -17,7 +17,7 @@ desc/1 ]). -%% emqx_bridge_enterprise "unofficial" API +%% `emqx_bridge_v2_schema' "unofficial" API -export([ bridge_v2_examples/1, conn_bridge_examples/1, diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_client.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_client.erl index eeceb0c43..454c0d7ea 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_client.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_client.erl @@ -134,7 +134,7 @@ start( -spec stop(resource_id()) -> ok | {error, term()}. stop(ResourceId) -> - ?tp(gcp_pubsub_stop, #{resource_id => ResourceId}), + ?tp(gcp_pubsub_stop, #{instance_id => ResourceId, resource_id => ResourceId}), ?SLOG(info, #{ msg => "stopping_gcp_pubsub_bridge", connector => ResourceId diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_producer.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_producer.erl index cd7568001..487118b3e 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_producer.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_producer.erl @@ -8,23 +8,30 @@ -include_lib("emqx_resource/include/emqx_resource.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). --type config() :: #{ - attributes_template := [#{key := binary(), value := binary()}], +-type connector_config() :: #{ connect_timeout := emqx_schema:duration_ms(), max_retries := non_neg_integer(), - ordering_key_template := binary(), - payload_template := binary(), - pubsub_topic := binary(), resource_opts := #{request_ttl := infinity | emqx_schema:duration_ms(), any() => term()}, - service_account_json := emqx_bridge_gcp_pubsub_client:service_account_json(), - any() => term() + service_account_json := emqx_bridge_gcp_pubsub_client:service_account_json() }. --type state() :: #{ - attributes_template := #{emqx_placeholder:tmpl_token() => emqx_placeholder:tmpl_token()}, +-type action_config() :: #{ + parameters := #{ + attributes_template := [#{key := binary(), value := binary()}], + ordering_key_template := binary(), + payload_template := binary(), + pubsub_topic := binary() + }, + resource_opts := #{request_ttl := infinity | emqx_schema:duration_ms(), any() => term()} +}. +-type connector_state() :: #{ client := emqx_bridge_gcp_pubsub_client:state(), + installed_actions := #{action_resource_id() => action_state()}, + project_id := emqx_bridge_gcp_pubsub_client:project_id() +}. +-type action_state() :: #{ + attributes_template := #{emqx_placeholder:tmpl_token() => emqx_placeholder:tmpl_token()}, ordering_key_template := emqx_placeholder:tmpl_token(), payload_template := emqx_placeholder:tmpl_token(), - project_id := emqx_bridge_gcp_pubsub_client:project_id(), pubsub_topic := binary() }. -type headers() :: emqx_bridge_gcp_pubsub_client:headers(). @@ -41,7 +48,11 @@ on_query_async/4, on_batch_query/3, on_batch_query_async/4, - on_get_status/2 + on_get_status/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, + on_get_channel_status/3 ]). -export([reply_delegator/2]). @@ -54,53 +65,45 @@ callback_mode() -> async_if_possible. query_mode(_Config) -> async. --spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}. +-spec on_start(connector_resource_id(), connector_config()) -> + {ok, connector_state()} | {error, term()}. on_start(InstanceId, Config0) -> ?SLOG(info, #{ msg => "starting_gcp_pubsub_bridge", config => Config0 }), Config = maps:update_with(service_account_json, fun emqx_utils_maps:binary_key_map/1, Config0), - #{ - attributes_template := AttributesTemplate, - ordering_key_template := OrderingKeyTemplate, - payload_template := PayloadTemplate, - pubsub_topic := PubSubTopic, - service_account_json := #{<<"project_id">> := ProjectId} - } = Config, + #{service_account_json := #{<<"project_id">> := ProjectId}} = Config, case emqx_bridge_gcp_pubsub_client:start(InstanceId, Config) of {ok, Client} -> State = #{ client => Client, - attributes_template => preproc_attributes(AttributesTemplate), - ordering_key_template => emqx_placeholder:preproc_tmpl(OrderingKeyTemplate), - payload_template => emqx_placeholder:preproc_tmpl(PayloadTemplate), - project_id => ProjectId, - pubsub_topic => PubSubTopic + installed_actions => #{}, + project_id => ProjectId }, {ok, State}; Error -> Error end. --spec on_stop(resource_id(), state()) -> ok | {error, term()}. +-spec on_stop(connector_resource_id(), connector_state()) -> ok | {error, term()}. on_stop(InstanceId, _State) -> emqx_bridge_gcp_pubsub_client:stop(InstanceId). --spec on_get_status(resource_id(), state()) -> connected | disconnected. +-spec on_get_status(connector_resource_id(), connector_state()) -> connected | disconnected. on_get_status(_InstanceId, #{client := Client} = _State) -> emqx_bridge_gcp_pubsub_client:get_status(Client). -spec on_query( - resource_id(), - {send_message, map()}, - state() + connector_resource_id(), + {message_tag(), map()}, + connector_state() ) -> {ok, map()} | {error, {recoverable_error, term()}} | {error, term()}. -on_query(ResourceId, {send_message, Selected}, State) -> - Requests = [{send_message, Selected}], +on_query(ResourceId, {MessageTag, Selected}, State) -> + Requests = [{MessageTag, Selected}], ?TRACE( "QUERY_SYNC", "gcp_pubsub_received", @@ -109,24 +112,25 @@ on_query(ResourceId, {send_message, Selected}, State) -> do_send_requests_sync(State, Requests, ResourceId). -spec on_query_async( - resource_id(), - {send_message, map()}, + connector_resource_id(), + {message_tag(), map()}, {ReplyFun :: function(), Args :: list()}, - state() + connector_state() ) -> {ok, pid()} | {error, no_pool_worker_available}. -on_query_async(ResourceId, {send_message, Selected}, ReplyFunAndArgs, State) -> - Requests = [{send_message, Selected}], +on_query_async(ResourceId, {MessageTag, Selected}, ReplyFunAndArgs, State) -> + Requests = [{MessageTag, Selected}], ?TRACE( "QUERY_ASYNC", "gcp_pubsub_received", #{requests => Requests, connector => ResourceId, state => State} ), + ?tp(gcp_pubsub_producer_async, #{instance_id => ResourceId, requests => Requests}), do_send_requests_async(State, Requests, ReplyFunAndArgs). -spec on_batch_query( - resource_id(), - [{send_message, map()}], - state() + connector_resource_id(), + [{message_tag(), map()}], + connector_state() ) -> {ok, map()} | {error, {recoverable_error, term()}} @@ -140,10 +144,10 @@ on_batch_query(ResourceId, Requests, State) -> do_send_requests_sync(State, Requests, ResourceId). -spec on_batch_query_async( - resource_id(), - [{send_message, map()}], + connector_resource_id(), + [{message_tag(), map()}], {ReplyFun :: function(), Args :: list()}, - state() + connector_state() ) -> {ok, pid()} | {error, no_pool_worker_available}. on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) -> ?TRACE( @@ -151,32 +155,92 @@ on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) -> "gcp_pubsub_received", #{requests => Requests, connector => ResourceId, state => State} ), + ?tp(gcp_pubsub_producer_async, #{instance_id => ResourceId, requests => Requests}), do_send_requests_async(State, Requests, ReplyFunAndArgs). +-spec on_add_channel( + connector_resource_id(), + connector_state(), + action_resource_id(), + action_config() +) -> + {ok, connector_state()}. +on_add_channel(_ConnectorResId, ConnectorState0, ActionId, ActionConfig) -> + #{installed_actions := InstalledActions0} = ConnectorState0, + ChannelState = install_channel(ActionConfig), + InstalledActions = InstalledActions0#{ActionId => ChannelState}, + ConnectorState = ConnectorState0#{installed_actions := InstalledActions}, + {ok, ConnectorState}. + +-spec on_remove_channel( + connector_resource_id(), + connector_state(), + action_resource_id() +) -> + {ok, connector_state()}. +on_remove_channel(_ConnectorResId, ConnectorState0, ActionId) -> + #{installed_actions := InstalledActions0} = ConnectorState0, + InstalledActions = maps:remove(ActionId, InstalledActions0), + ConnectorState = ConnectorState0#{installed_actions := InstalledActions}, + {ok, ConnectorState}. + +-spec on_get_channels(connector_resource_id()) -> + [{action_resource_id(), action_config()}]. +on_get_channels(ConnectorResId) -> + emqx_bridge_v2:get_channels_for_connector(ConnectorResId). + +-spec on_get_channel_status(connector_resource_id(), action_resource_id(), connector_state()) -> + health_check_status(). +on_get_channel_status(_ConnectorResId, _ChannelId, _ConnectorState) -> + %% Should we check the underlying client? Same as on_get_status? + ?status_connected. + %%------------------------------------------------------------------------------------------------- %% Helper fns %%------------------------------------------------------------------------------------------------- +%% TODO: check if topic exists ("unhealthy target") +install_channel(ActionConfig) -> + #{ + parameters := #{ + attributes_template := AttributesTemplate, + ordering_key_template := OrderingKeyTemplate, + payload_template := PayloadTemplate, + pubsub_topic := PubSubTopic + } + } = ActionConfig, + #{ + attributes_template => preproc_attributes(AttributesTemplate), + ordering_key_template => emqx_placeholder:preproc_tmpl(OrderingKeyTemplate), + payload_template => emqx_placeholder:preproc_tmpl(PayloadTemplate), + pubsub_topic => PubSubTopic + }. + -spec do_send_requests_sync( - state(), - [{send_message, map()}], + connector_state(), + [{message_tag(), map()}], resource_id() ) -> {ok, status_code(), headers()} | {ok, status_code(), headers(), body()} | {error, {recoverable_error, term()}} | {error, term()}. -do_send_requests_sync(State, Requests, InstanceId) -> - #{client := Client} = State, +do_send_requests_sync(ConnectorState, Requests, InstanceId) -> + ?tp(gcp_pubsub_producer_sync, #{instance_id => InstanceId, requests => Requests}), + #{client := Client} = ConnectorState, + %% is it safe to assume the tag is the same??? And not empty??? + [{MessageTag, _} | _] = Requests, + #{installed_actions := InstalledActions} = ConnectorState, + ChannelState = maps:get(MessageTag, InstalledActions), Payloads = lists:map( - fun({send_message, Selected}) -> - encode_payload(State, Selected) + fun({_MessageTag, Selected}) -> + encode_payload(ChannelState, Selected) end, Requests ), Body = to_pubsub_request(Payloads), - Path = publish_path(State), + Path = publish_path(ConnectorState, ChannelState), Method = post, Request = {prepared_request, {Method, Path, Body}}, Result = emqx_bridge_gcp_pubsub_client:query_sync(Request, Client), @@ -184,21 +248,25 @@ do_send_requests_sync(State, Requests, InstanceId) -> handle_result(Result, Request, QueryMode, InstanceId). -spec do_send_requests_async( - state(), - [{send_message, map()}], + connector_state(), + [{message_tag(), map()}], {ReplyFun :: function(), Args :: list()} ) -> {ok, pid()} | {error, no_pool_worker_available}. -do_send_requests_async(State, Requests, ReplyFunAndArgs0) -> - #{client := Client} = State, +do_send_requests_async(ConnectorState, Requests, ReplyFunAndArgs0) -> + #{client := Client} = ConnectorState, + %% is it safe to assume the tag is the same??? And not empty??? + [{MessageTag, _} | _] = Requests, + #{installed_actions := InstalledActions} = ConnectorState, + ChannelState = maps:get(MessageTag, InstalledActions), Payloads = lists:map( - fun({send_message, Selected}) -> - encode_payload(State, Selected) + fun({_MessageTag, Selected}) -> + encode_payload(ChannelState, Selected) end, Requests ), Body = to_pubsub_request(Payloads), - Path = publish_path(State), + Path = publish_path(ConnectorState, ChannelState), Method = post, Request = {prepared_request, {Method, Path, Body}}, ReplyFunAndArgs = {fun ?MODULE:reply_delegator/2, [ReplyFunAndArgs0]}, @@ -206,18 +274,18 @@ do_send_requests_async(State, Requests, ReplyFunAndArgs0) -> Request, ReplyFunAndArgs, Client ). --spec encode_payload(state(), Selected :: map()) -> +-spec encode_payload(action_state(), Selected :: map()) -> #{ data := binary(), attributes => #{binary() => binary()}, 'orderingKey' => binary() }. -encode_payload(State, Selected) -> +encode_payload(ActionState, Selected) -> #{ attributes_template := AttributesTemplate, ordering_key_template := OrderingKeyTemplate, payload_template := PayloadTemplate - } = State, + } = ActionState, Data = render_payload(PayloadTemplate, Selected), OrderingKey = render_key(OrderingKeyTemplate, Selected), Attributes = proc_attributes(AttributesTemplate, Selected), @@ -307,13 +375,8 @@ proc_attributes(AttributesTemplate, Selected) -> to_pubsub_request(Payloads) -> emqx_utils_json:encode(#{messages => Payloads}). --spec publish_path(state()) -> binary(). -publish_path( - _State = #{ - project_id := ProjectId, - pubsub_topic := PubSubTopic - } -) -> +-spec publish_path(connector_state(), action_state()) -> binary(). +publish_path(#{project_id := ProjectId}, #{pubsub_topic := PubSubTopic}) -> <<"/v1/projects/", ProjectId/binary, "/topics/", PubSubTopic/binary, ":publish">>. handle_result({error, Reason}, _Request, QueryMode, ResourceId) when diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_action_info.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_action_info.erl new file mode 100644 index 000000000..6b5391b09 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_action_info.erl @@ -0,0 +1,46 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_gcp_pubsub_producer_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0, + bridge_v1_config_to_action_config/2 +]). + +bridge_v1_type_name() -> gcp_pubsub. + +action_type_name() -> gcp_pubsub_producer. + +connector_type_name() -> gcp_pubsub_producer. + +schema_module() -> emqx_bridge_gcp_pubsub_producer_schema. + +bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) -> + CommonActionKeys = emqx_bridge_v2_schema:top_level_common_action_keys(), + ParamsKeys = producer_action_parameters_field_keys(), + Config1 = maps:with(CommonActionKeys, BridgeV1Config), + Params = maps:with(ParamsKeys, BridgeV1Config), + Config1#{ + <<"connector">> => ConnectorName, + <<"parameters">> => Params + }. + +%%------------------------------------------------------------------------------------------ +%% Internal helper fns +%%------------------------------------------------------------------------------------------ + +producer_action_parameters_field_keys() -> + [ + to_bin(K) + || {K, _} <- emqx_bridge_gcp_pubsub_producer_schema:fields(action_parameters) + ]. + +to_bin(L) when is_list(L) -> list_to_binary(L); +to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8). diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_schema.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_schema.erl new file mode 100644 index 000000000..11ca16e0b --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_schema.erl @@ -0,0 +1,223 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_gcp_pubsub_producer_schema). + +-import(hoconsc, [mk/2, ref/2]). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +%% `hocon_schema' API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% `emqx_bridge_v2_schema' "unofficial" API +-export([ + bridge_v2_examples/1, + conn_bridge_examples/1, + connector_examples/1 +]). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> + "gcp_pubsub_producer". + +roots() -> + []. + +%%========================================= +%% Action fields +%%========================================= +fields(action) -> + {gcp_pubsub_producer, + mk( + hoconsc:map(name, ref(?MODULE, producer_action)), + #{ + desc => <<"GCP PubSub Producer Action Config">>, + required => false + } + )}; +fields(producer_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + mk( + ref(?MODULE, action_parameters), + #{ + required => true, + desc => ?DESC(producer_action) + } + ) + ); +fields(action_parameters) -> + UnsupportedFields = [local_topic], + lists:filter( + fun({Key, _Schema}) -> not lists:member(Key, UnsupportedFields) end, + emqx_bridge_gcp_pubsub:fields(producer) + ); +%%========================================= +%% Connector fields +%%========================================= +fields("config_connector") -> + %% FIXME + emqx_connector_schema:common_fields() ++ + emqx_bridge_gcp_pubsub:fields(connector_config) ++ + emqx_resource_schema:fields("resource_opts"); +%%========================================= +%% HTTP API fields +%%========================================= +fields("get_bridge_v2") -> + emqx_bridge_schema:status_fields() ++ fields("post_bridge_v2"); +fields("post_bridge_v2") -> + [type_field(), name_field() | fields("put_bridge_v2")]; +fields("put_bridge_v2") -> + fields(producer_action). + +desc("config_connector") -> + ?DESC("config_connector"); +desc(action_parameters) -> + ?DESC(action_parameters); +desc(producer_action) -> + ?DESC(producer_action); +desc(_Name) -> + undefined. + +type_field() -> + {type, mk(gcp_pubsub_producer, #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. + +%%------------------------------------------------------------------------------------------------- +%% `emqx_bridge_v2_schema' "unofficial" API +%%------------------------------------------------------------------------------------------------- + +bridge_v2_examples(Method) -> + [ + #{ + <<"gcp_pubsub_producer">> => #{ + summary => <<"GCP PubSub Producer Action">>, + value => action_example(Method) + } + } + ]. + +connector_examples(Method) -> + [ + #{ + <<"gcp_pubsub_producer">> => #{ + summary => <<"GCP PubSub Producer Connector">>, + value => connector_example(Method) + } + } + ]. + +conn_bridge_examples(Method) -> + emqx_bridge_gcp_pubsub:conn_bridge_examples(Method). + +action_example(post) -> + maps:merge( + action_example(put), + #{ + type => <<"gcp_pubsub_producer">>, + name => <<"my_action">> + } + ); +action_example(get) -> + maps:merge( + action_example(put), + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + } + ); +action_example(put) -> + #{ + enable => true, + connector => <<"my_connector_name">>, + description => <<"My action">>, + local_topic => <<"local/topic">>, + resource_opts => + #{batch_size => 5}, + parameters => + #{ + pubsub_topic => <<"mytopic">>, + ordering_key_template => <<"${payload.ok}">>, + payload_template => <<"${payload}">>, + attributes_template => + [ + #{ + key => <<"${payload.attrs.k}">>, + value => <<"${payload.attrs.v}">> + } + ] + } + }. + +connector_example(get) -> + maps:merge( + connector_example(put), + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + } + ); +connector_example(post) -> + maps:merge( + connector_example(put), + #{ + type => <<"gcp_pubsub_producer">>, + name => <<"my_connector">> + } + ); +connector_example(put) -> + #{ + enable => true, + connect_timeout => <<"10s">>, + pool_size => 8, + pipelining => 100, + max_retries => 2, + resource_opts => #{request_ttl => <<"60s">>}, + service_account_json => + #{ + auth_provider_x509_cert_url => + <<"https://www.googleapis.com/oauth2/v1/certs">>, + auth_uri => + <<"https://accounts.google.com/o/oauth2/auth">>, + client_email => + <<"test@myproject.iam.gserviceaccount.com">>, + client_id => <<"123812831923812319190">>, + client_x509_cert_url => + << + "https://www.googleapis.com/robot/v1/" + "metadata/x509/test%40myproject.iam.gserviceaccount.com" + >>, + private_key => + << + "-----BEGIN PRIVATE KEY-----\n" + "MIIEvQI..." + >>, + private_key_id => <<"kid">>, + project_id => <<"myproject">>, + token_uri => + <<"https://oauth2.googleapis.com/token">>, + type => <<"service_account">> + } + }. diff --git a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_producer_SUITE.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_producer_SUITE.erl index acfe3df8b..f65b80f90 100644 --- a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_producer_SUITE.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_producer_SUITE.erl @@ -13,8 +13,12 @@ -include_lib("jose/include/jose_jwt.hrl"). -include_lib("jose/include/jose_jws.hrl"). --define(BRIDGE_TYPE, gcp_pubsub). --define(BRIDGE_TYPE_BIN, <<"gcp_pubsub">>). +-define(ACTION_TYPE, gcp_pubsub_producer). +-define(ACTION_TYPE_BIN, <<"gcp_pubsub_producer">>). +-define(CONNECTOR_TYPE, gcp_pubsub_producer). +-define(CONNECTOR_TYPE_BIN, <<"gcp_pubsub_producer">>). +-define(BRIDGE_V1_TYPE, gcp_pubsub). +-define(BRIDGE_V1_TYPE_BIN, <<"gcp_pubsub">>). -import(emqx_common_test_helpers, [on_exit/1]). @@ -141,19 +145,24 @@ end_per_testcase(_TestCase, _Config) -> generate_config(Config0) -> #{ - name := Name, + name := ActionName, config_string := ConfigString, pubsub_config := PubSubConfig, service_account_json := ServiceAccountJSON } = gcp_pubsub_config(Config0), - ResourceId = emqx_bridge_resource:resource_id(?BRIDGE_TYPE_BIN, Name), - BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, Name), + %% FIXME + %% `emqx_bridge_resource:resource_id' requires an existing connector in the config..... + ConnectorName = <<"connector_", ActionName/binary>>, + ConnectorResourceId = <<"connector:", ?CONNECTOR_TYPE_BIN/binary, ":", ConnectorName/binary>>, + ActionResourceId = emqx_bridge_v2:id(?ACTION_TYPE_BIN, ActionName, ConnectorName), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_V1_TYPE_BIN, ActionName), [ - {gcp_pubsub_name, Name}, + {gcp_pubsub_name, ActionName}, {gcp_pubsub_config, PubSubConfig}, {gcp_pubsub_config_string, ConfigString}, {service_account_json, ServiceAccountJSON}, - {resource_id, ResourceId}, + {connector_resource_id, ConnectorResourceId}, + {action_resource_id, ActionResourceId}, {bridge_id, BridgeId} | Config0 ]. @@ -168,7 +177,7 @@ delete_all_bridges() -> ). delete_bridge(Config) -> - Type = ?BRIDGE_TYPE, + Type = ?BRIDGE_V1_TYPE, Name = ?config(gcp_pubsub_name, Config), ct:pal("deleting bridge ~p", [{Type, Name}]), emqx_bridge:remove(Type, Name). @@ -177,7 +186,7 @@ create_bridge(Config) -> create_bridge(Config, _GCPPubSubConfigOverrides = #{}). create_bridge(Config, GCPPubSubConfigOverrides) -> - TypeBin = ?BRIDGE_TYPE_BIN, + TypeBin = ?BRIDGE_V1_TYPE_BIN, Name = ?config(gcp_pubsub_name, Config), GCPPubSubConfig0 = ?config(gcp_pubsub_config, Config), GCPPubSubConfig = emqx_utils_maps:deep_merge(GCPPubSubConfig0, GCPPubSubConfigOverrides), @@ -190,7 +199,7 @@ create_bridge_http(Config) -> create_bridge_http(Config, _GCPPubSubConfigOverrides = #{}). create_bridge_http(Config, GCPPubSubConfigOverrides) -> - TypeBin = ?BRIDGE_TYPE_BIN, + TypeBin = ?BRIDGE_V1_TYPE_BIN, Name = ?config(gcp_pubsub_name, Config), GCPPubSubConfig0 = ?config(gcp_pubsub_config, Config), GCPPubSubConfig = emqx_utils_maps:deep_merge(GCPPubSubConfig0, GCPPubSubConfigOverrides), @@ -225,7 +234,7 @@ create_bridge_http(Config, GCPPubSubConfigOverrides) -> create_rule_and_action_http(Config) -> GCPPubSubName = ?config(gcp_pubsub_name, Config), - BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, GCPPubSubName), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_V1_TYPE_BIN, GCPPubSubName), Params = #{ enable => true, sql => <<"SELECT * FROM \"t/topic\"">>, @@ -382,9 +391,14 @@ assert_metrics(ExpectedMetrics, ResourceId) -> CurrentMetrics = current_metrics(ResourceId), TelemetryTable = get(telemetry_table), RecordedEvents = ets:tab2list(TelemetryTable), - ?assertEqual(ExpectedMetrics, Metrics, #{ - current_metrics => CurrentMetrics, recorded_events => RecordedEvents - }), + ?retry( + _Sleep0 = 300, + _Attempts = 20, + ?assertEqual(ExpectedMetrics, Metrics, #{ + current_metrics => CurrentMetrics, + recorded_events => RecordedEvents + }) + ), ok. assert_empty_metrics(ResourceId) -> @@ -535,8 +549,30 @@ install_telemetry_handler(TestCase) -> end), Tid. +mk_res_id_filter(ResourceId) -> + fun(Event) -> + case Event of + #{metadata := #{resource_id := ResId}} when ResId =:= ResourceId -> + true; + _ -> + false + end + end. + wait_until_gauge_is(GaugeName, ExpectedValue, Timeout) -> - Events = receive_all_events(GaugeName, Timeout), + wait_until_gauge_is(#{ + gauge_name => GaugeName, + expected => ExpectedValue, + timeout => Timeout + }). + +wait_until_gauge_is(#{} = Opts) -> + GaugeName = maps:get(gauge_name, Opts), + ExpectedValue = maps:get(expected, Opts), + Timeout = maps:get(timeout, Opts), + MaxEvents = maps:get(max_events, Opts, 10), + FilterFn = maps:get(filter_fn, Opts, fun(_Event) -> true end), + Events = receive_all_events(GaugeName, Timeout, MaxEvents, FilterFn), case length(Events) > 0 andalso lists:last(Events) of #{measurements := #{gauge_set := ExpectedValue}} -> ok; @@ -550,15 +586,36 @@ wait_until_gauge_is(GaugeName, ExpectedValue, Timeout) -> ct:pal("no ~p gauge events received!", [GaugeName]) end. -receive_all_events(EventName, Timeout) -> - receive_all_events(EventName, Timeout, _MaxEvents = 10, _Count = 0, _Acc = []). +receive_all_events(EventName, Timeout, MaxEvents, FilterFn) -> + receive_all_events(EventName, Timeout, MaxEvents, FilterFn, _Count = 0, _Acc = []). -receive_all_events(_EventName, _Timeout, MaxEvents, Count, Acc) when Count >= MaxEvents -> +receive_all_events(_EventName, _Timeout, MaxEvents, _FilterFn, Count, Acc) when + Count >= MaxEvents +-> lists:reverse(Acc); -receive_all_events(EventName, Timeout, MaxEvents, Count, Acc) -> +receive_all_events(EventName, Timeout, MaxEvents, FilterFn, Count, Acc) -> receive {telemetry, #{name := [_, _, EventName]} = Event} -> - receive_all_events(EventName, Timeout, MaxEvents, Count + 1, [Event | Acc]) + case FilterFn(Event) of + true -> + receive_all_events( + EventName, + Timeout, + MaxEvents, + FilterFn, + Count + 1, + [Event | Acc] + ); + false -> + receive_all_events( + EventName, + Timeout, + MaxEvents, + FilterFn, + Count, + Acc + ) + end after Timeout -> lists:reverse(Acc) end. @@ -597,14 +654,14 @@ wait_n_events(TelemetryTable, ResourceId, NEvents, Timeout, EventName) -> %%------------------------------------------------------------------------------ t_publish_success(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), ServiceAccountJSON = ?config(service_account_json, Config), TelemetryTable = ?config(telemetry_table, Config), Topic = <<"t/topic">>, ?assertMatch({ok, _}, create_bridge(Config)), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), Payload = <<"payload">>, Message = emqx_message:make(Topic, Payload), emqx:publish(Message), @@ -620,7 +677,7 @@ t_publish_success(Config) -> DecodedMessages ), %% to avoid test flakiness - wait_telemetry_event(TelemetryTable, success, ResourceId), + wait_telemetry_event(TelemetryTable, success, ActionResourceId), wait_until_gauge_is(queuing, 0, 500), wait_until_gauge_is(inflight, 0, 500), assert_metrics( @@ -633,7 +690,7 @@ t_publish_success(Config) -> retried => 0, success => 1 }, - ResourceId + ActionResourceId ), ok. @@ -662,12 +719,12 @@ t_publish_success_infinity_timeout(Config) -> ok. t_publish_success_local_topic(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), ServiceAccountJSON = ?config(service_account_json, Config), TelemetryTable = ?config(telemetry_table, Config), LocalTopic = <<"local/topic">>, {ok, _} = create_bridge(Config, #{<<"local_topic">> => LocalTopic}), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), Payload = <<"payload">>, Message = emqx_message:make(LocalTopic, Payload), emqx:publish(Message), @@ -682,7 +739,7 @@ t_publish_success_local_topic(Config) -> DecodedMessages ), %% to avoid test flakiness - wait_telemetry_event(TelemetryTable, success, ResourceId), + wait_telemetry_event(TelemetryTable, success, ActionResourceId), wait_until_gauge_is(queuing, 0, 500), wait_until_gauge_is(inflight, 0, 500), assert_metrics( @@ -695,7 +752,7 @@ t_publish_success_local_topic(Config) -> retried => 0, success => 1 }, - ResourceId + ActionResourceId ), ok. @@ -704,7 +761,7 @@ t_create_via_http(Config) -> ok. t_publish_templated(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), ServiceAccountJSON = ?config(service_account_json, Config), TelemetryTable = ?config(telemetry_table, Config), Topic = <<"t/topic">>, @@ -721,7 +778,7 @@ t_publish_templated(Config) -> ), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), Payload = <<"payload">>, Message = emqx_message:set_header( @@ -747,7 +804,7 @@ t_publish_templated(Config) -> DecodedMessages ), %% to avoid test flakiness - wait_telemetry_event(TelemetryTable, success, ResourceId), + wait_telemetry_event(TelemetryTable, success, ActionResourceId), wait_until_gauge_is(queuing, 0, 500), wait_until_gauge_is(inflight, 0, 500), assert_metrics( @@ -760,7 +817,7 @@ t_publish_templated(Config) -> retried => 0, success => 1 }, - ResourceId + ActionResourceId ), ok. @@ -774,7 +831,7 @@ t_publish_success_batch(Config) -> end. test_publish_success_batch(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), ServiceAccountJSON = ?config(service_account_json, Config), TelemetryTable = ?config(telemetry_table, Config), Topic = <<"t/topic">>, @@ -796,7 +853,7 @@ test_publish_success_batch(Config) -> ), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), NumMessages = BatchSize * 2, Messages = [emqx_message:make(Topic, integer_to_binary(N)) || N <- lists:seq(1, NumMessages)], %% publish in parallel to avoid each client blocking and then @@ -822,7 +879,7 @@ test_publish_success_batch(Config) -> wait_telemetry_event( TelemetryTable, success, - ResourceId, + ActionResourceId, #{timeout => 15_000, n_events => NumMessages} ), wait_until_gauge_is(queuing, 0, _Timeout = 400), @@ -837,7 +894,7 @@ test_publish_success_batch(Config) -> retried => 0, success => NumMessages }, - ResourceId + ActionResourceId ), ok. @@ -1045,7 +1102,7 @@ t_jose_other_error(Config) -> fun(Res, Trace) -> ?assertMatch({ok, _}, Res), ?assertMatch( - [#{error := {invalid_private_key, {unknown, error}}}], + [#{error := {invalid_private_key, {unknown, error}}} | _], ?of_kind(gcp_pubsub_connector_startup_error, Trace) ), ok @@ -1054,7 +1111,7 @@ t_jose_other_error(Config) -> ok. t_publish_econnrefused(Config) -> - ResourceId = ?config(resource_id, Config), + ResourceId = ?config(connector_resource_id, Config), %% set pipelining to 1 so that one of the 2 requests is `pending' %% in ehttpc. {ok, _} = create_bridge( @@ -1071,7 +1128,7 @@ t_publish_econnrefused(Config) -> do_econnrefused_or_timeout_test(Config, econnrefused). t_publish_timeout(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), %% set pipelining to 1 so that one of the 2 requests is `pending' %% in ehttpc. also, we set the batch size to 1 to also ensure the %% requests are done separately. @@ -1079,12 +1136,13 @@ t_publish_timeout(Config) -> <<"pipelining">> => 1, <<"resource_opts">> => #{ <<"batch_size">> => 1, - <<"resume_interval">> => <<"1s">> + <<"resume_interval">> => <<"1s">>, + <<"metrics_flush_interval">> => <<"700ms">> } }), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), TestPid = self(), TimeoutHandler = fun(Req0, State) -> @@ -1107,7 +1165,8 @@ t_publish_timeout(Config) -> do_econnrefused_or_timeout_test(Config, timeout). do_econnrefused_or_timeout_test(Config, Error) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), + ConnectorResourceId = ?config(connector_resource_id, Config), TelemetryTable = ?config(telemetry_table, Config), Topic = <<"t/topic">>, Payload = <<"payload">>, @@ -1156,9 +1215,9 @@ do_econnrefused_or_timeout_test(Config, Error) -> case Error of econnrefused -> case ?of_kind(gcp_pubsub_request_failed, Trace) of - [#{reason := Error, connector := ResourceId} | _] -> + [#{reason := Error, connector := ConnectorResourceId} | _] -> ok; - [#{reason := {closed, _Msg}, connector := ResourceId} | _] -> + [#{reason := {closed, _Msg}, connector := ConnectorResourceId} | _] -> %% _Msg = "The connection was lost." ok; Trace0 -> @@ -1182,7 +1241,7 @@ do_econnrefused_or_timeout_test(Config, Error) -> %% even waiting, hard to avoid flakiness... simpler to just sleep %% a bit until stabilization. ct:sleep(200), - CurrentMetrics = current_metrics(ResourceId), + CurrentMetrics = current_metrics(ActionResourceId), RecordedEvents = ets:tab2list(TelemetryTable), ct:pal("telemetry events: ~p", [RecordedEvents]), ?assertMatch( @@ -1198,7 +1257,19 @@ do_econnrefused_or_timeout_test(Config, Error) -> CurrentMetrics ); timeout -> - wait_until_gauge_is(inflight, 0, _Timeout = 1_000), + wait_telemetry_event( + TelemetryTable, + late_reply, + ActionResourceId, + #{timeout => 5_000, n_events => 2} + ), + wait_until_gauge_is(#{ + gauge_name => inflight, + expected => 0, + filter_fn => mk_res_id_filter(ActionResourceId), + timeout => 1_000, + max_events => 20 + }), wait_until_gauge_is(queuing, 0, _Timeout = 1_000), assert_metrics( #{ @@ -1211,7 +1282,7 @@ do_econnrefused_or_timeout_test(Config, Error) -> success => 0, late_reply => 2 }, - ResourceId + ActionResourceId ) end, @@ -1334,7 +1405,8 @@ t_failure_no_body(Config) -> ok. t_unrecoverable_error(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), + TelemetryTable = ?config(telemetry_table, Config), TestPid = self(), FailureNoBodyHandler = fun(Req0, State) -> @@ -1358,7 +1430,7 @@ t_unrecoverable_error(Config) -> ok = emqx_bridge_http_connector_test_server:set_handler(FailureNoBodyHandler), Topic = <<"t/topic">>, {ok, _} = create_bridge(Config), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), Payload = <<"payload">>, @@ -1386,6 +1458,7 @@ t_unrecoverable_error(Config) -> %% removed, this inflight should be 1, because we retry if %% the worker is killed. wait_until_gauge_is(inflight, 0, _Timeout = 400), + wait_telemetry_event(TelemetryTable, failed, ActionResourceId), assert_metrics( #{ dropped => 0, @@ -1398,7 +1471,7 @@ t_unrecoverable_error(Config) -> retried => 0, success => 0 }, - ResourceId + ActionResourceId ), ok. @@ -1407,7 +1480,7 @@ t_stop(Config) -> {ok, _} = create_bridge(Config), ?check_trace( ?wait_async_action( - emqx_bridge_resource:stop(?BRIDGE_TYPE, Name), + emqx_bridge_resource:stop(?BRIDGE_V1_TYPE, Name), #{?snk_kind := gcp_pubsub_stop}, 5_000 ), @@ -1421,13 +1494,13 @@ t_stop(Config) -> ok. t_get_status_ok(Config) -> - ResourceId = ?config(resource_id, Config), + ResourceId = ?config(connector_resource_id, Config), {ok, _} = create_bridge(Config), ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)), ok. t_get_status_no_worker(Config) -> - ResourceId = ?config(resource_id, Config), + ResourceId = ?config(connector_resource_id, Config), {ok, _} = create_bridge(Config), emqx_common_test_helpers:with_mock( ehttpc, @@ -1441,7 +1514,7 @@ t_get_status_no_worker(Config) -> ok. t_get_status_down(Config) -> - ResourceId = ?config(resource_id, Config), + ResourceId = ?config(connector_resource_id, Config), {ok, _} = create_bridge(Config), emqx_common_test_helpers:with_mock( ehttpc, @@ -1457,7 +1530,7 @@ t_get_status_down(Config) -> ok. t_get_status_timeout_calling_workers(Config) -> - ResourceId = ?config(resource_id, Config), + ResourceId = ?config(connector_resource_id, Config), {ok, _} = create_bridge(Config), emqx_common_test_helpers:with_mock( ehttpc, @@ -1520,7 +1593,7 @@ t_on_start_ehttpc_pool_start_failure(Config) -> ), fun(Trace) -> ?assertMatch( - [#{reason := some_error}], + [#{reason := some_error} | _], ?of_kind(gcp_pubsub_ehttpc_pool_start_failure, Trace) ), ok @@ -1668,7 +1741,7 @@ t_attributes(Config) -> ), %% ensure loading cluster override file doesn't mangle the attribute %% placeholders... - #{<<"bridges">> := #{?BRIDGE_TYPE_BIN := #{Name := RawConf}}} = + #{<<"actions">> := #{?ACTION_TYPE_BIN := #{Name := RawConf}}} = emqx_config:read_override_conf(#{override_to => cluster}), ?assertEqual( [ @@ -1689,7 +1762,7 @@ t_attributes(Config) -> <<"value">> => <<"${.payload.value}">> } ], - maps:get(<<"attributes_template">>, RawConf) + emqx_utils_maps:deep_get([<<"parameters">>, <<"attributes_template">>], RawConf) ), ok end, diff --git a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl index 1be7cc6ed..27b068461 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl @@ -25,6 +25,8 @@ resource_type(azure_event_hub_producer) -> emqx_bridge_kafka_impl_producer; resource_type(confluent_producer) -> emqx_bridge_kafka_impl_producer; +resource_type(gcp_pubsub_producer) -> + emqx_bridge_gcp_pubsub_impl_producer; resource_type(kafka_producer) -> emqx_bridge_kafka_impl_producer; resource_type(syskeeper_forwarder) -> @@ -65,6 +67,14 @@ connector_structs() -> required => false } )}, + {gcp_pubsub_producer, + mk( + hoconsc:map(name, ref(emqx_bridge_gcp_pubsub_producer_schema, "config_connector")), + #{ + desc => <<"GCP PubSub Producer Connector Config">>, + required => false + } + )}, {kafka_producer, mk( hoconsc:map(name, ref(emqx_bridge_kafka, "config_connector")), diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index 8397f1bba..6382d2bcd 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -24,7 +24,8 @@ -export([ transform_bridges_v1_to_connectors_and_bridges_v2/1, - transform_bridge_v1_config_to_action_config/4 + transform_bridge_v1_config_to_action_config/4, + top_level_common_connector_keys/0 ]). -export([roots/0, fields/1, desc/1, namespace/0, tags/0]). @@ -32,6 +33,7 @@ -export([get_response/0, put_request/0, post_request/0]). -export([connector_type_to_bridge_types/1]). +-export([common_fields/0]). -if(?EMQX_RELEASE_EDITION == ee). enterprise_api_schemas(Method) -> @@ -64,6 +66,7 @@ enterprise_fields_connectors() -> []. connector_type_to_bridge_types(azure_event_hub_producer) -> [azure_event_hub_producer]; connector_type_to_bridge_types(confluent_producer) -> [confluent_producer]; +connector_type_to_bridge_types(gcp_pubsub_producer) -> [gcp_pubsub_producer]; connector_type_to_bridge_types(kafka_producer) -> [kafka, kafka_producer]; connector_type_to_bridge_types(syskeeper_forwarder) -> [syskeeper_forwarder]; connector_type_to_bridge_types(syskeeper_proxy) -> []. @@ -159,17 +162,20 @@ transform_bridge_v1_config_to_action_config( BridgeV1Conf, ConnectorName, ConnectorFields ). -transform_bridge_v1_config_to_action_config( - BridgeV1Conf, ConnectorName, ConnectorFields -) -> - TopKeys = [ +top_level_common_connector_keys() -> + [ <<"enable">>, <<"connector">>, <<"local_topic">>, <<"resource_opts">>, <<"description">>, <<"parameters">> - ], + ]. + +transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorFields +) -> + TopKeys = top_level_common_connector_keys(), TopKeysMap = maps:from_keys(TopKeys, true), %% Remove connector fields ActionMap0 = lists:foldl( @@ -352,6 +358,12 @@ desc(connectors) -> desc(_) -> undefined. +common_fields() -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {description, emqx_schema:description_schema()} + ]. + %%====================================================================================== %% Helper Functions %%====================================================================================== diff --git a/apps/emqx_resource/include/emqx_resource.hrl b/apps/emqx_resource/include/emqx_resource.hrl index b34da9a63..7a8fdedef 100644 --- a/apps/emqx_resource/include/emqx_resource.hrl +++ b/apps/emqx_resource/include/emqx_resource.hrl @@ -111,6 +111,10 @@ | {error, {recoverable_error, term()}} | {error, term()}. +-type action_resource_id() :: resource_id(). +-type connector_resource_id() :: resource_id(). +-type message_tag() :: action_resource_id(). + -define(WORKER_POOL_SIZE, 16). -define(DEFAULT_BUFFER_BYTES, 256 * 1024 * 1024). diff --git a/rel/i18n/emqx_bridge_gcp_pubsub_producer_schema.hocon b/rel/i18n/emqx_bridge_gcp_pubsub_producer_schema.hocon new file mode 100644 index 000000000..a4ac2afaf --- /dev/null +++ b/rel/i18n/emqx_bridge_gcp_pubsub_producer_schema.hocon @@ -0,0 +1,18 @@ +emqx_bridge_gcp_pubsub_producer_schema { + + action_parameters.desc: + """Action specific configs.""" + action_parameters.label: + """Action""" + + producer_action.desc: + """Action configs.""" + producer_action.label: + """Action""" + + config_connector.desc: + """Configuration for a GCP PubSub Producer Client.""" + config_connector.label: + """GCP PubSub Producer Client Configuration""" + +} diff --git a/rel/i18n/emqx_bridge_v2_schema.hocon b/rel/i18n/emqx_bridge_v2_schema.hocon index 4543b8eb6..69f8a9109 100644 --- a/rel/i18n/emqx_bridge_v2_schema.hocon +++ b/rel/i18n/emqx_bridge_v2_schema.hocon @@ -6,4 +6,14 @@ desc_bridges_v2.desc: desc_bridges_v2.label: """Bridge Configuration""" +mqtt_topic.desc: +"""MQTT topic or topic filter as data source (action input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in the remote system.""" +mqtt_topic.label: +"""Source MQTT Topic""" + +config_enable.desc: +"""Enable (true) or disable (false) this action.""" +config_enable.label: +"""Enable or Disable""" + } diff --git a/rel/i18n/emqx_connector_schema.hocon b/rel/i18n/emqx_connector_schema.hocon index 8b9b2ccac..d3aa1c82b 100644 --- a/rel/i18n/emqx_connector_schema.hocon +++ b/rel/i18n/emqx_connector_schema.hocon @@ -2,15 +2,17 @@ emqx_connector_schema { desc_connectors.desc: """Connectors that are used to connect to external systems""" - desc_connectors.label: """Connectors""" - connector_field.desc: """Name of connector used to connect to the resource where the action is to be performed.""" - connector_field.label: """Connector""" +config_enable.desc: +"""Enable (true) or disable (false) this connector.""" +config_enable.label: +"""Enable or Disable""" + } From 55d1b07a417e9af2229607aab456b8f5f3452563 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 21 Nov 2023 19:36:38 +0100 Subject: [PATCH 042/101] ci: remove windows from CI Starting from v5.4.0, we will stop release windows packages. The windows build scripts are still around, will be deleted in follow-up PRs --- .github/ISSUE_TEMPLATE/bug-report.yaml | 4 -- .github/workflows/build_packages.yaml | 52 -------------------- .github/workflows/build_packages_cron.yaml | 56 ---------------------- 3 files changed, 112 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yaml b/.github/ISSUE_TEMPLATE/bug-report.yaml index 61f7afb6d..c79a13f21 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yaml +++ b/.github/ISSUE_TEMPLATE/bug-report.yaml @@ -61,10 +61,6 @@ body: # paste output here $ uname -a # paste output here - - # On Windows: - C:\> wmic os get Caption, Version, BuildNumber, OSArchitecture - # paste output here ``` diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 7f23bf85e..77be974af 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -65,58 +65,6 @@ on: default: '5.2-3' jobs: - windows: - runs-on: windows-2019 - if: inputs.profile == 'emqx' - strategy: - fail-fast: false - matrix: - profile: # for now only CE for windows - - emqx - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.ref }} - fetch-depth: 0 - - - uses: ilammy/msvc-dev-cmd@v1.12.0 - - uses: erlef/setup-beam@v1.16.0 - with: - otp-version: 25.3.2 - - name: build - env: - PYTHON: python - DIAGNOSTIC: 1 - run: | - # ensure crypto app (openssl) - erl -eval "erlang:display(crypto:info_lib())" -s init stop - make ${{ matrix.profile }}-tgz - - name: run emqx - timeout-minutes: 5 - run: | - $ErrorActionPreference = "Stop" - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx start - Start-Sleep -s 10 - $pingOutput = ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx ping - if ($pingOutput = 'pong') { - echo "EMQX started OK" - } else { - echo "Failed to ping EMQX $pingOutput" - Exit 1 - } - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx stop - echo "EMQX stopped" - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx install - echo "EMQX installed" - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx uninstall - echo "EMQX uninstalled" - - uses: actions/upload-artifact@v3 - if: success() - with: - name: ${{ matrix.profile }} - path: _packages/${{ matrix.profile }}/ - retention-days: 7 - mac: strategy: fail-fast: false diff --git a/.github/workflows/build_packages_cron.yaml b/.github/workflows/build_packages_cron.yaml index 244ffbd72..97fb9536c 100644 --- a/.github/workflows/build_packages_cron.yaml +++ b/.github/workflows/build_packages_cron.yaml @@ -130,59 +130,3 @@ jobs: with: payload: | {"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} - - windows: - if: github.repository_owner == 'emqx' - runs-on: windows-2019 - strategy: - fail-fast: false - matrix: - profile: - - emqx - otp: - - 25.3.2 - steps: - - uses: actions/checkout@v3 - - uses: ilammy/msvc-dev-cmd@v1.12.0 - - uses: erlef/setup-beam@v1.16.0 - with: - otp-version: ${{ matrix.otp }} - - name: build - env: - PYTHON: python - DIAGNOSTIC: 1 - run: | - # ensure crypto app (openssl) - erl -eval "erlang:display(crypto:info_lib())" -s init stop - make ${{ matrix.profile }}-tgz - - name: run emqx - timeout-minutes: 5 - run: | - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx start - Start-Sleep -s 10 - $pingOutput = ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx ping - if ($pingOutput = 'pong') { - echo "EMQX started OK" - } else { - echo "Failed to ping EMQX $pingOutput" - Exit 1 - } - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx stop - echo "EMQX stopped" - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx install - echo "EMQX installed" - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx uninstall - echo "EMQX uninstalled" - - uses: actions/upload-artifact@v3 - with: - name: windows - path: _packages/${{ matrix.profile }}/* - retention-days: 7 - - name: Send notification to Slack - uses: slackapi/slack-github-action@v1.23.0 - if: failure() - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - with: - payload: | - {"text": "Scheduled build of ${{ matrix.profile }} package for Windows failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} From 62542e58442b0d0d902601bfbcd0b36520653f98 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Sun, 19 Nov 2023 22:09:46 +0100 Subject: [PATCH 043/101] feat(ds): Metadata storage for the replication layer --- apps/emqx/src/emqx_persistent_message.erl | 4 +- apps/emqx/src/emqx_persistent_session_ds.erl | 4 +- apps/emqx_durable_storage/src/emqx_ds.erl | 9 +- apps/emqx_durable_storage/src/emqx_ds_app.erl | 1 + .../src/emqx_ds_replication_layer.erl | 26 ++- .../src/emqx_ds_replication_layer_meta.erl | 217 ++++++++++++++++++ .../src/emqx_ds_storage_layer.erl | 2 +- .../test/emqx_ds_SUITE.erl | 21 +- .../emqx_ds_storage_bitfield_lts_SUITE.erl | 8 +- 9 files changed, 269 insertions(+), 23 deletions(-) create mode 100644 apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl diff --git a/apps/emqx/src/emqx_persistent_message.erl b/apps/emqx/src/emqx_persistent_message.erl index 82a345eef..50e25e0be 100644 --- a/apps/emqx/src/emqx_persistent_message.erl +++ b/apps/emqx/src/emqx_persistent_message.erl @@ -57,7 +57,9 @@ storage_backend() -> storage_backend(#{builtin := #{enable := true}}) -> #{ backend => builtin, - storage => {emqx_ds_storage_bitfield_lts, #{}} + storage => {emqx_ds_storage_bitfield_lts, #{}}, + n_shards => 16, + replication_factor => 3 }. %%-------------------------------------------------------------------- diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index 7ba5aa527..34abba2cc 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -442,7 +442,9 @@ del_subscription(TopicFilter, DSSessionId) -> create_tables() -> ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, #{ backend => builtin, - storage => {emqx_ds_storage_bitfield_lts, #{}} + storage => {emqx_ds_storage_bitfield_lts, #{}}, + n_shards => 255, + replication_factor => 3 }), ok = mria:create_table( ?SESSION_TAB, diff --git a/apps/emqx_durable_storage/src/emqx_ds.erl b/apps/emqx_durable_storage/src/emqx_ds.erl index 725d62673..84631f38e 100644 --- a/apps/emqx_durable_storage/src/emqx_ds.erl +++ b/apps/emqx_durable_storage/src/emqx_ds.erl @@ -35,7 +35,6 @@ -export_type([ create_db_opts/0, - builtin_db_opts/0, db/0, time/0, topic_filter/0, @@ -87,14 +86,8 @@ -type message_store_opts() :: #{}. --type builtin_db_opts() :: - #{ - backend := builtin, - storage := emqx_ds_storage_layer:prototype() - }. - -type create_db_opts() :: - builtin_db_opts(). + emqx_ds_replication_layer:builtin_db_opts(). -type message_id() :: emqx_ds_replication_layer:message_id(). diff --git a/apps/emqx_durable_storage/src/emqx_ds_app.erl b/apps/emqx_durable_storage/src/emqx_ds_app.erl index 858855b6f..0528a0a2c 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_app.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_app.erl @@ -7,4 +7,5 @@ -export([start/2]). start(_Type, _Args) -> + emqx_ds_replication_layer_meta:init(), emqx_ds_sup:start_link(). diff --git a/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl b/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl index a06af104d..f359846eb 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl @@ -40,7 +40,7 @@ do_next_v1/4 ]). --export_type([shard_id/0, stream/0, iterator/0, message_id/0]). +-export_type([shard_id/0, builtin_db_opts/0, stream/0, iterator/0, message_id/0]). %%================================================================================ %% Type declarations @@ -58,7 +58,15 @@ -define(shard, 2). -define(enc, 3). --type shard_id() :: atom(). +-type shard_id() :: binary(). + +-type builtin_db_opts() :: + #{ + backend := builtin, + storage := emqx_ds_storage_layer:prototype(), + n_shards => pos_integer(), + replication_factor => pos_integer() + }. %% This enapsulates the stream entity from the replication level. %% @@ -89,11 +97,12 @@ -spec list_shards(emqx_ds:db()) -> [shard_id()]. list_shards(_DB) -> %% TODO: milestone 5 - list_nodes(). + lists:map(fun atom_to_binary/1, list_nodes()). --spec open_db(emqx_ds:db(), emqx_ds:create_db_opts()) -> ok | {error, _}. -open_db(DB, Opts) -> +-spec open_db(emqx_ds:db(), builtin_db_opts()) -> ok | {error, _}. +open_db(DB, CreateOpts) -> %% TODO: improve error reporting, don't just crash + Opts = emqx_ds_replication_layer_meta:open_db(DB, CreateOpts), lists:foreach( fun(Shard) -> Node = node_of_shard(DB, Shard), @@ -104,6 +113,7 @@ open_db(DB, Opts) -> -spec drop_db(emqx_ds:db()) -> ok | {error, _}. drop_db(DB) -> + _ = emqx_ds_replication_layer_meta:drop_db(DB), lists:foreach( fun(Shard) -> Node = node_of_shard(DB, Shard), @@ -116,7 +126,7 @@ drop_db(DB) -> emqx_ds:store_batch_result(). store_batch(DB, Batch, Opts) -> %% TODO: Currently we store messages locally. - Shard = node(), + Shard = atom_to_binary(node()), Node = node_of_shard(DB, Shard), emqx_ds_proto_v1:store_batch(Node, DB, Shard, Batch, Opts). @@ -238,8 +248,8 @@ do_next_v1(DB, Shard, Iter, BatchSize) -> %%================================================================================ -spec node_of_shard(emqx_ds:db(), shard_id()) -> node(). -node_of_shard(_DB, Node) -> - Node. +node_of_shard(_DB, Shard) -> + binary_to_atom(Shard). list_nodes() -> mria:running_nodes(). diff --git a/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl b/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl new file mode 100644 index 000000000..0bfa2a3ee --- /dev/null +++ b/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl @@ -0,0 +1,217 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc Metadata storage for the builtin sharded database. +%% +%% Currently metadata is stored in mria; that's not ideal, but +%% eventually we'll replace it, so it's important not to leak +%% implementation details from this module. +-module(emqx_ds_replication_layer_meta). + +%% API: +-export([init/0, shards/1, replica_set/2, sites/0, open_db/2, drop_db/1]). + +%% internal exports: +-export([open_db_trans/2, drop_db_trans/1, claim_site/2]). + +-export_type([site/0]). + +-include_lib("stdlib/include/qlc.hrl"). + +%%================================================================================ +%% Type declarations +%%================================================================================ + +-define(SHARD, emqx_ds_builtin_metadata_shard). +%% DS database metadata: +-define(META_TAB, emqx_ds_builtin_metadata_tab). +%% Mapping from Site to the actual Erlang node: +-define(NODE_TAB, emqx_ds_builtin_node_tab). +%% Shard metadata: +-define(SHARD_TAB, emqx_ds_builtin_shard_tab). + +-record(?META_TAB, { + db :: emqx_ds:db(), + db_props :: emqx_ds_replication_layer:builtin_db_opts() +}). + +-record(?NODE_TAB, { + site :: site(), + node :: node(), + misc = #{} :: map() +}). + +-record(?SHARD_TAB, { + shard :: {emqx_ds:db(), emqx_ds_replication_layer:shard_id()}, + replica_set :: [site()], + leader :: node() | undefined, + misc = #{} :: map() +}). + +%% Persistent ID of the node (independent from the IP/FQDN): +-type site() :: binary(). + +%%================================================================================ +%% API funcions +%%================================================================================ + +-spec init() -> ok. +init() -> + ensure_tables(), + ensure_site(). + +-spec shards(emqx_ds:db()) -> [emqx_ds_replication_layer:shard_id()]. +shards(DB) -> + eval_qlc( + qlc:q([Shard || #?SHARD_TAB{shard = {D, Shard}} <- mnesia:table(?SHARD_TAB), D =:= DB]) + ). + +-spec replica_set(emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> + {ok, [site()]} | {error, _}. +replica_set(DB, Shard) -> + case mnesia:dirty_read(?SHARD_TAB, {DB, Shard}) of + [#?SHARD_TAB{replica_set = ReplicaSet}] -> + {ok, ReplicaSet}; + [] -> + {error, no_shard} + end. + +-spec sites() -> [site()]. +sites() -> + eval_qlc(qlc:q([Site || #?NODE_TAB{site = Site} <- mnesia:table(?NODE_TAB)])). + +-spec open_db(emqx_ds:db(), emqx_ds_replication_layer:builtin_db_opts()) -> + emqx_ds_replication_layer:builtin_db_opts(). +open_db(DB, DefaultOpts) -> + {atomic, Opts} = mria:transaction(?SHARD, fun ?MODULE:open_db_trans/2, [DB, DefaultOpts]), + Opts. + +-spec drop_db(emqx_ds:db()) -> ok. +drop_db(DB) -> + _ = mria:transaction(?SHARD, fun ?MODULE:drop_db_trans/1, [DB]), + ok. + +%%================================================================================ +%% behavior callbacks +%%================================================================================ + +%%================================================================================ +%% Internal exports +%%================================================================================ + +-spec open_db_trans(emqx_ds:db(), emqx_ds_replication_layer:builtin_db_opts()) -> + emqx_ds_replication_layer:builtin_db_opts(). +open_db_trans(DB, CreateOpts) -> + case mnesia:wread({?META_TAB, DB}) of + [] -> + NShards = maps:get(n_shards, CreateOpts), + ReplicationFactor = maps:get(replication_factor, CreateOpts), + mnesia:write(#?META_TAB{db = DB, db_props = CreateOpts}), + create_shards(DB, NShards, ReplicationFactor), + CreateOpts; + [#?META_TAB{db_props = Opts}] -> + Opts + end. + +-spec drop_db_trans(emqx_ds:db()) -> ok. +drop_db_trans(DB) -> + mnesia:delete({?META_TAB, DB}), + [mnesia:delete({?SHARD_TAB, Shard}) || Shard <- shards(DB)], + ok. + +-spec claim_site(site(), node()) -> ok. +claim_site(Site, Node) -> + mnesia:write(#?NODE_TAB{site = Site, node = Node}). + +%%================================================================================ +%% Internal functions +%%================================================================================ + +ensure_tables() -> + %% TODO: seems like it may introduce flakiness + Majority = false, + ok = mria:create_table(?META_TAB, [ + {rlog_shard, ?SHARD}, + {majority, Majority}, + {type, ordered_set}, + {storage, rocksdb_copies}, + {record_name, ?META_TAB}, + {attributes, record_info(fields, ?META_TAB)} + ]), + ok = mria:create_table(?NODE_TAB, [ + {rlog_shard, ?SHARD}, + {majority, Majority}, + {type, ordered_set}, + {storage, rocksdb_copies}, + {record_name, ?NODE_TAB}, + {attributes, record_info(fields, ?NODE_TAB)} + ]), + ok = mria:create_table(?SHARD_TAB, [ + {rlog_shard, ?SHARD}, + {majority, Majority}, + {type, ordered_set}, + {storage, ram_copies}, + {record_name, ?SHARD_TAB}, + {attributes, record_info(fields, ?SHARD_TAB)} + ]), + ok = mria:wait_for_tables([?META_TAB, ?NODE_TAB, ?SHARD_TAB]). + +ensure_site() -> + Filename = filename:join(emqx:data_dir(), "emqx_ds_builtin_site.eterm"), + case file:consult(Filename) of + {ok, [Site]} -> + ok; + _ -> + Site = crypto:strong_rand_bytes(8), + ok = filelib:ensure_dir(Filename), + {ok, FD} = file:open(Filename, [write]), + io:format(FD, "~p.", [Site]), + file:close(FD) + end, + {atomic, ok} = mria:transaction(?SHARD, fun ?MODULE:claim_site/2, [Site, node()]), + ok. + +-spec create_shards(emqx_ds:db(), pos_integer(), pos_integer()) -> ok. +create_shards(DB, NShards, ReplicationFactor) -> + Shards = [integer_to_binary(I) || I <- lists:seq(0, NShards - 1)], + Sites = sites(), + lists:foreach( + fun(Shard) -> + Hashes0 = [{hash(Shard, Site), Site} || Site <- Sites], + Hashes = lists:sort(Hashes0), + {_, Sites} = lists:unzip(Hashes), + ReplicaSet = lists:sublist(Sites, 1, ReplicationFactor), + Record = #?SHARD_TAB{ + shard = {DB, Shard}, + replica_set = ReplicaSet + }, + mnesia:write(Record) + end, + Shards + ). + +-spec hash(emqx_ds_replication_layer:shard_id(), site()) -> any(). +hash(Shard, Site) -> + erlang:phash2({Shard, Site}). + +eval_qlc(Q) -> + case mnesia:is_transaction() of + true -> + qlc:eval(Q); + false -> + {atomic, Result} = mria:ro_transaction(?SHARD, fun() -> qlc:eval(Q) end), + Result + end. diff --git a/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl b/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl index 0fe719dbc..54530f428 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl @@ -384,7 +384,7 @@ rocksdb_open(Shard, Options) -> -spec db_dir(shard_id()) -> file:filename(). db_dir({DB, ShardId}) -> - filename:join([emqx:data_dir(), atom_to_list(DB), atom_to_list(ShardId)]). + filename:join([emqx:data_dir(), atom_to_list(DB), binary_to_list(ShardId)]). %%-------------------------------------------------------------------------------- %% Schema access diff --git a/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl b/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl index 9b74e3227..e42f576b2 100644 --- a/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl +++ b/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl @@ -23,10 +23,14 @@ -include_lib("stdlib/include/assert.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-define(N_SHARDS, 8). + opts() -> #{ backend => builtin, - storage => {emqx_ds_storage_reference, #{}} + storage => {emqx_ds_storage_reference, #{}}, + n_shards => ?N_SHARDS, + replication_factor => 3 }. %% A simple smoke test that verifies that opening/closing the DB @@ -34,6 +38,17 @@ opts() -> t_00_smoke_open_drop(_Config) -> DB = 'DB', ?assertMatch(ok, emqx_ds:open_db(DB, opts())), + [Site] = emqx_ds_replication_layer_meta:sites(), + Shards = emqx_ds_replication_layer_meta:shards(DB), + ?assertEqual(?N_SHARDS, length(Shards)), + lists:foreach( + fun(Shard) -> + ?assertEqual( + {ok, [Site]}, emqx_ds_replication_layer_meta:replica_set(DB, Shard), {DB, Shard} + ) + end, + Shards + ), ?assertMatch(ok, emqx_ds:open_db(DB, opts())), ?assertMatch(ok, emqx_ds:drop_db(DB)). @@ -143,4 +158,6 @@ init_per_testcase(_TC, Config) -> Config. end_per_testcase(_TC, _Config) -> - ok = application:stop(emqx_durable_storage). + ok = application:stop(emqx_durable_storage), + mnesia:delete_schema([node()]), + mria:stop(). diff --git a/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl b/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl index 6dc24a269..b6b84bce2 100644 --- a/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl +++ b/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl @@ -15,7 +15,9 @@ -define(DEFAULT_CONFIG, #{ backend => builtin, - storage => {emqx_ds_storage_bitfield_lts, #{}} + storage => {emqx_ds_storage_bitfield_lts, #{}}, + n_shards => 255, + replication_factor => 3 }). -define(COMPACT_CONFIG, #{ @@ -23,7 +25,9 @@ storage => {emqx_ds_storage_bitfield_lts, #{ bits_per_wildcard_level => 8 - }} + }}, + n_shards => 255, + replication_factor => 3 }). %% Smoke test for opening and reopening the database From 2a1f7d946a9552f60acc13d3a5b4fca1bfb0a722 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Mon, 20 Nov 2023 22:27:10 +0100 Subject: [PATCH 044/101] feat(ds): Shard messages by publisher client ID --- apps/emqx/src/emqx_persistent_session_ds.erl | 2 +- apps/emqx/src/emqx_schema.erl | 16 ++ apps/emqx_durable_storage/src/emqx_ds_app.erl | 1 - .../src/emqx_ds_replication_layer.erl | 85 ++++++----- .../src/emqx_ds_replication_layer_meta.erl | 138 +++++++++++++++++- apps/emqx_durable_storage/src/emqx_ds_sup.erl | 11 +- .../src/proto/emqx_ds_proto_v1.erl | 20 +-- .../test/emqx_ds_SUITE.erl | 26 +++- .../emqx_ds_storage_bitfield_lts_SUITE.erl | 6 +- 9 files changed, 237 insertions(+), 68 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index 34abba2cc..1e3ac69c8 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -443,7 +443,7 @@ create_tables() -> ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, #{ backend => builtin, storage => {emqx_ds_storage_bitfield_lts, #{}}, - n_shards => 255, + n_shards => 16, replication_factor => 3 }), ok = mria:create_table( diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index ef34d3e8f..1e6db805b 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -341,6 +341,22 @@ fields("persistent_session_store") -> importance => ?IMPORTANCE_HIDDEN } )}, + {"n_shards", + sc( + pos_integer(), + #{ + default => 16, + importance => ?IMPORTANCE_HIDDEN + } + )}, + {"replication_factor", + sc( + pos_integer(), + #{ + default => 3, + importance => ?IMPORTANCE_HIDDEN + } + )}, {"on_disc", sc( boolean(), diff --git a/apps/emqx_durable_storage/src/emqx_ds_app.erl b/apps/emqx_durable_storage/src/emqx_ds_app.erl index 0528a0a2c..858855b6f 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_app.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_app.erl @@ -7,5 +7,4 @@ -export([start/2]). start(_Type, _Args) -> - emqx_ds_replication_layer_meta:init(), emqx_ds_sup:start_link(). diff --git a/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl b/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl index f359846eb..50331a378 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl @@ -32,8 +32,7 @@ %% internal exports: -export([ - do_open_shard_v1/3, - do_drop_shard_v1/2, + do_drop_db_v1/1, do_store_batch_v1/4, do_get_streams_v1/4, do_make_iterator_v1/5, @@ -42,6 +41,8 @@ -export_type([shard_id/0, builtin_db_opts/0, stream/0, iterator/0, message_id/0]). +-include_lib("emqx_utils/include/emqx_message.hrl"). + %%================================================================================ %% Type declarations %%================================================================================ @@ -95,40 +96,34 @@ %%================================================================================ -spec list_shards(emqx_ds:db()) -> [shard_id()]. -list_shards(_DB) -> - %% TODO: milestone 5 - lists:map(fun atom_to_binary/1, list_nodes()). +list_shards(DB) -> + emqx_ds_replication_layer_meta:shards(DB). -spec open_db(emqx_ds:db(), builtin_db_opts()) -> ok | {error, _}. open_db(DB, CreateOpts) -> - %% TODO: improve error reporting, don't just crash Opts = emqx_ds_replication_layer_meta:open_db(DB, CreateOpts), + MyShards = emqx_ds_replication_layer_meta:my_shards(DB), lists:foreach( fun(Shard) -> - Node = node_of_shard(DB, Shard), - ok = emqx_ds_proto_v1:open_shard(Node, DB, Shard, Opts) + emqx_ds_storage_layer:open_shard({DB, Shard}, Opts), + maybe_set_myself_as_leader(DB, Shard) end, - list_shards(DB) + MyShards ). -spec drop_db(emqx_ds:db()) -> ok | {error, _}. drop_db(DB) -> + Nodes = list_nodes(), + _ = emqx_ds_proto_v1:drop_db(Nodes, DB), _ = emqx_ds_replication_layer_meta:drop_db(DB), - lists:foreach( - fun(Shard) -> - Node = node_of_shard(DB, Shard), - ok = emqx_ds_proto_v1:drop_shard(Node, DB, Shard) - end, - list_shards(DB) - ). + ok. --spec store_batch(emqx_ds:db(), [emqx_types:message()], emqx_ds:message_store_opts()) -> +-spec store_batch(emqx_ds:db(), [emqx_types:message(), ...], emqx_ds:message_store_opts()) -> emqx_ds:store_batch_result(). -store_batch(DB, Batch, Opts) -> - %% TODO: Currently we store messages locally. - Shard = atom_to_binary(node()), +store_batch(DB, Messages, Opts) -> + Shard = shard_of_messages(DB, Messages), Node = node_of_shard(DB, Shard), - emqx_ds_proto_v1:store_batch(Node, DB, Shard, Batch, Opts). + emqx_ds_proto_v1:store_batch(Node, DB, Shard, Messages, Opts). -spec get_streams(emqx_ds:db(), emqx_ds:topic_filter(), emqx_ds:time()) -> [{emqx_ds:stream_rank(), stream()}]. @@ -194,16 +189,15 @@ next(DB, Iter0, BatchSize) -> %% Internal exports (RPC targets) %%================================================================================ --spec do_open_shard_v1( - emqx_ds:db(), emqx_ds_replication_layer:shard_id(), emqx_ds:create_db_opts() -) -> - ok | {error, _}. -do_open_shard_v1(DB, Shard, Opts) -> - emqx_ds_storage_layer:open_shard({DB, Shard}, Opts). - --spec do_drop_shard_v1(emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> ok | {error, _}. -do_drop_shard_v1(DB, Shard) -> - emqx_ds_storage_layer:drop_shard({DB, Shard}). +-spec do_drop_db_v1(emqx_ds:db()) -> ok | {error, _}. +do_drop_db_v1(DB) -> + MyShards = emqx_ds_replication_layer_meta:my_shards(DB), + lists:foreach( + fun(Shard) -> + emqx_ds_storage_layer:drop_shard({DB, Shard}) + end, + MyShards + ). -spec do_store_batch_v1( emqx_ds:db(), @@ -247,9 +241,34 @@ do_next_v1(DB, Shard, Iter, BatchSize) -> %% Internal functions %%================================================================================ +%% TODO: there's no real leader election right now +-spec maybe_set_myself_as_leader(emqx_ds:db(), shard_id()) -> ok. +maybe_set_myself_as_leader(DB, Shard) -> + Site = emqx_ds_replication_layer_meta:this_site(), + case emqx_ds_replication_layer_meta:in_sync_replicas(DB, Shard) of + [Site | _] -> + %% Currently the first in-sync replica always becomes the + %% leader + ok = emqx_ds_replication_layer_meta:set_leader(DB, Shard, node()); + _Sites -> + ok + end. + -spec node_of_shard(emqx_ds:db(), shard_id()) -> node(). -node_of_shard(_DB, Shard) -> - binary_to_atom(Shard). +node_of_shard(DB, Shard) -> + case emqx_ds_replication_layer_meta:shard_leader(DB, Shard) of + {ok, Leader} -> + Leader; + {error, no_leader_for_shard} -> + %% TODO: use optvar + timer:sleep(500), + node_of_shard(DB, Shard) + end. + +%% Here we assume that all messages in the batch come from the same client +shard_of_messages(DB, [#message{from = From} | _]) -> + N = emqx_ds_replication_layer_meta:n_shards(DB), + integer_to_binary(erlang:phash2(From, N)). list_nodes() -> mria:running_nodes(). diff --git a/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl b/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl index 0bfa2a3ee..0f250022d 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl @@ -21,11 +21,34 @@ %% implementation details from this module. -module(emqx_ds_replication_layer_meta). +-behaviour(gen_server). + %% API: --export([init/0, shards/1, replica_set/2, sites/0, open_db/2, drop_db/1]). +-export([ + shards/1, + my_shards/1, + replica_set/2, + in_sync_replicas/2, + sites/0, + open_db/2, + drop_db/1, + shard_leader/2, + this_site/0, + set_leader/3 +]). + +%% gen_server +-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). %% internal exports: --export([open_db_trans/2, drop_db_trans/1, claim_site/2]). +-export([ + open_db_trans/2, + drop_db_trans/1, + claim_site/2, + in_sync_replicas_trans/2, + set_leader_trans/3, + n_shards/1 +]). -export_type([site/0]). @@ -35,6 +58,8 @@ %% Type declarations %%================================================================================ +-define(SERVER, ?MODULE). + -define(SHARD, emqx_ds_builtin_metadata_shard). %% DS database metadata: -define(META_TAB, emqx_ds_builtin_metadata_tab). @@ -56,7 +81,10 @@ -record(?SHARD_TAB, { shard :: {emqx_ds:db(), emqx_ds_replication_layer:shard_id()}, + %% Sites that the replica_set :: [site()], + %% Sites that contain the actual data: + in_sync_replicas :: [site()], leader :: node() | undefined, misc = #{} :: map() }). @@ -64,14 +92,21 @@ %% Persistent ID of the node (independent from the IP/FQDN): -type site() :: binary(). +%% Peristent term key: +-define(emqx_ds_builtin_site, emqx_ds_builtin_site). + %%================================================================================ %% API funcions %%================================================================================ --spec init() -> ok. -init() -> - ensure_tables(), - ensure_site(). +-spec n_shards(emqx_ds:db()) -> pos_integer(). +n_shards(DB) -> + [#?META_TAB{db_props = #{n_shards := NShards}}] = mnesia:dirty_read(?META_TAB, DB), + NShards. + +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). -spec shards(emqx_ds:db()) -> [emqx_ds_replication_layer:shard_id()]. shards(DB) -> @@ -79,6 +114,20 @@ shards(DB) -> qlc:q([Shard || #?SHARD_TAB{shard = {D, Shard}} <- mnesia:table(?SHARD_TAB), D =:= DB]) ). +-spec my_shards(emqx_ds:db()) -> [emqx_ds_replication_layer:shard_id()]. +my_shards(DB) -> + Site = this_site(), + eval_qlc( + qlc:q([ + Shard + || #?SHARD_TAB{shard = {D, Shard}, replica_set = ReplicaSet, in_sync_replicas = InSync} <- mnesia:table( + ?SHARD_TAB + ), + D =:= DB, + lists:member(Site, ReplicaSet) orelse lists:member(Site, InSync) + ]) + ). + -spec replica_set(emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> {ok, [site()]} | {error, _}. replica_set(DB, Shard) -> @@ -89,10 +138,37 @@ replica_set(DB, Shard) -> {error, no_shard} end. +-spec in_sync_replicas(emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> + [site()]. +in_sync_replicas(DB, ShardId) -> + {atomic, Result} = mria:transaction(?SHARD, fun ?MODULE:in_sync_replicas_trans/2, [DB, ShardId]), + case Result of + {ok, InSync} -> + InSync; + {error, _} -> + [] + end. + -spec sites() -> [site()]. sites() -> eval_qlc(qlc:q([Site || #?NODE_TAB{site = Site} <- mnesia:table(?NODE_TAB)])). +-spec shard_leader(emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> + {ok, node()} | {error, no_leader_for_shard}. +shard_leader(DB, Shard) -> + case mnesia:dirty_read(?SHARD_TAB, {DB, Shard}) of + [#?SHARD_TAB{leader = Leader}] -> + {ok, Leader}; + [] -> + {error, no_leader_for_shard} + end. + +-spec set_leader(emqx_ds:db(), emqx_ds_replication_layer:shard_id(), node()) -> + ok. +set_leader(DB, Shard, Node) -> + {atomic, _} = mria:transaction(?SHARD, fun ?MODULE:set_leader_trans/3, [DB, Shard, Node]), + ok. + -spec open_db(emqx_ds:db(), emqx_ds_replication_layer:builtin_db_opts()) -> emqx_ds_replication_layer:builtin_db_opts(). open_db(DB, DefaultOpts) -> @@ -108,6 +184,29 @@ drop_db(DB) -> %% behavior callbacks %%================================================================================ +-record(s, {}). + +init([]) -> + process_flag(trap_exit, true), + logger:set_process_metadata(#{domain => [ds, meta]}), + ensure_tables(), + ensure_site(), + S = #s{}, + {ok, S}. + +handle_call(_Call, _From, S) -> + {reply, {error, unknown_call}, S}. + +handle_cast(_Cast, S) -> + {noreply, S}. + +handle_info(_Info, S) -> + {noreply, S}. + +terminate(_Reason, #s{}) -> + persistent_term:erase(?emqx_ds_builtin_site), + ok. + %%================================================================================ %% Internal exports %%================================================================================ @@ -136,6 +235,23 @@ drop_db_trans(DB) -> claim_site(Site, Node) -> mnesia:write(#?NODE_TAB{site = Site, node = Node}). +-spec in_sync_replicas_trans(emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> + {ok, [site()]} | {error, no_shard}. +in_sync_replicas_trans(DB, Shard) -> + case mnesia:read(?SHARD_TAB, {DB, Shard}) of + [#?SHARD_TAB{in_sync_replicas = InSync}] -> + {ok, InSync}; + [] -> + {error, no_shard} + end. + +-spec set_leader_trans(emqx_ds:ds(), emqx_ds_replication_layer:shard_id(), node()) -> + ok. +set_leader_trans(DB, Shard, Node) -> + [Record0] = mnesia:wread({?SHARD_TAB, {DB, Shard}}), + Record = Record0#?SHARD_TAB{leader = Node}, + mnesia:write(Record). + %%================================================================================ %% Internal functions %%================================================================================ @@ -182,8 +298,13 @@ ensure_site() -> file:close(FD) end, {atomic, ok} = mria:transaction(?SHARD, fun ?MODULE:claim_site/2, [Site, node()]), + persistent_term:put(?emqx_ds_builtin_site, Site), ok. +-spec this_site() -> site(). +this_site() -> + persistent_term:get(?emqx_ds_builtin_site). + -spec create_shards(emqx_ds:db(), pos_integer(), pos_integer()) -> ok. create_shards(DB, NShards, ReplicationFactor) -> Shards = [integer_to_binary(I) || I <- lists:seq(0, NShards - 1)], @@ -193,10 +314,11 @@ create_shards(DB, NShards, ReplicationFactor) -> Hashes0 = [{hash(Shard, Site), Site} || Site <- Sites], Hashes = lists:sort(Hashes0), {_, Sites} = lists:unzip(Hashes), - ReplicaSet = lists:sublist(Sites, 1, ReplicationFactor), + [First | _] = ReplicaSet = lists:sublist(Sites, 1, ReplicationFactor), Record = #?SHARD_TAB{ shard = {DB, Shard}, - replica_set = ReplicaSet + replica_set = ReplicaSet, + in_sync_replicas = [First] }, mnesia:write(Record) end, diff --git a/apps/emqx_durable_storage/src/emqx_ds_sup.erl b/apps/emqx_durable_storage/src/emqx_ds_sup.erl index d371a2346..82e2711be 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_sup.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_sup.erl @@ -30,7 +30,7 @@ start_link() -> %%================================================================================ init([]) -> - Children = [storage_layer_sup()], + Children = [meta(), storage_layer_sup()], SupFlags = #{ strategy => one_for_all, intensity => 0, @@ -42,6 +42,15 @@ init([]) -> %% Internal functions %%================================================================================ +meta() -> + #{ + id => emqx_ds_replication_layer_meta, + start => {emqx_ds_replication_layer_meta, start_link, []}, + restart => permanent, + type => worker, + shutdown => 5000 + }. + storage_layer_sup() -> #{ id => local_store_shard_sup, diff --git a/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl b/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl index 10d1ed7a5..758b5148b 100644 --- a/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl +++ b/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl @@ -19,7 +19,7 @@ -include_lib("emqx_utils/include/bpapi.hrl"). %% API: --export([open_shard/4, drop_shard/3, store_batch/5, get_streams/5, make_iterator/6, next/5]). +-export([drop_db/2, store_batch/5, get_streams/5, make_iterator/6, next/5]). %% behavior callbacks: -export([introduced_in/0]). @@ -28,20 +28,10 @@ %% API funcions %%================================================================================ --spec open_shard( - node(), - emqx_ds:db(), - emqx_ds_replication_layer:shard_id(), - emqx_ds:create_db_opts() -) -> - ok. -open_shard(Node, DB, Shard, Opts) -> - erpc:call(Node, emqx_ds_replication_layer, do_open_shard_v1, [DB, Shard, Opts]). - --spec drop_shard(node(), emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> - ok. -drop_shard(Node, DB, Shard) -> - erpc:call(Node, emqx_ds_replication_layer, do_drop_shard_v1, [DB, Shard]). +-spec drop_db([node()], emqx_ds:db()) -> + [{ok, ok} | erpc:caught_call_exception()]. +drop_db(Node, DB) -> + erpc:multicall(Node, emqx_ds_replication_layer, do_drop_db_v1, [DB]). -spec get_streams( node(), diff --git a/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl b/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl index e42f576b2..8a46804b0 100644 --- a/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl +++ b/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl @@ -23,7 +23,7 @@ -include_lib("stdlib/include/assert.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). --define(N_SHARDS, 8). +-define(N_SHARDS, 1). opts() -> #{ @@ -38,18 +38,32 @@ opts() -> t_00_smoke_open_drop(_Config) -> DB = 'DB', ?assertMatch(ok, emqx_ds:open_db(DB, opts())), + %% Check metadata: + %% We have only one site: [Site] = emqx_ds_replication_layer_meta:sites(), + %% Check all shards: Shards = emqx_ds_replication_layer_meta:shards(DB), + %% Since there is only one site all shards should be allocated + %% to this site: + MyShards = emqx_ds_replication_layer_meta:my_shards(DB), ?assertEqual(?N_SHARDS, length(Shards)), lists:foreach( fun(Shard) -> ?assertEqual( - {ok, [Site]}, emqx_ds_replication_layer_meta:replica_set(DB, Shard), {DB, Shard} - ) + {ok, [Site]}, emqx_ds_replication_layer_meta:replica_set(DB, Shard) + ), + ?assertEqual( + [Site], emqx_ds_replication_layer_meta:in_sync_replicas(DB, Shard) + ), + %% Check that the leader is eleected; + ?assertEqual({ok, node()}, emqx_ds_replication_layer_meta:shard_leader(DB, Shard)) end, Shards ), + ?assertEqual(lists:sort(Shards), lists:sort(MyShards)), + %% Reopen the DB and make sure the operation is idempotent: ?assertMatch(ok, emqx_ds:open_db(DB, opts())), + %% Close the DB: ?assertMatch(ok, emqx_ds:drop_db(DB)). %% A simple smoke test that verifies that storing the messages doesn't @@ -153,11 +167,11 @@ end_per_suite(Config) -> ok. init_per_testcase(_TC, Config) -> - %% snabbkaffe:fix_ct_logging(), application:ensure_all_started(emqx_durable_storage), Config. end_per_testcase(_TC, _Config) -> ok = application:stop(emqx_durable_storage), - mnesia:delete_schema([node()]), - mria:stop(). + mria:stop(), + _ = mnesia:delete_schema([node()]), + ok. diff --git a/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl b/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl index b6b84bce2..7b733406d 100644 --- a/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl +++ b/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl @@ -16,7 +16,7 @@ -define(DEFAULT_CONFIG, #{ backend => builtin, storage => {emqx_ds_storage_bitfield_lts, #{}}, - n_shards => 255, + n_shards => 16, replication_factor => 3 }). @@ -26,7 +26,7 @@ {emqx_ds_storage_bitfield_lts, #{ bits_per_wildcard_level => 8 }}, - n_shards => 255, + n_shards => 16, replication_factor => 3 }). @@ -391,7 +391,7 @@ end_per_testcase(TC, _Config) -> ok = emqx_ds_storage_layer_sup:stop_shard(shard(TC)). shard(TC) -> - {?MODULE, TC}. + {?MODULE, atom_to_binary(TC)}. keyspace(TC) -> TC. From f5c71e80680ca833ae5599145f63d52392eb7de9 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Mon, 20 Nov 2023 23:58:23 +0100 Subject: [PATCH 045/101] refactor(ds): Add a wrapper to the store batch API --- .../src/emqx_ds_replication_layer.erl | 18 +++++++++++++----- .../src/proto/emqx_ds_proto_v1.erl | 2 +- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl b/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl index 50331a378..5128188b8 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl @@ -39,7 +39,7 @@ do_next_v1/4 ]). --export_type([shard_id/0, builtin_db_opts/0, stream/0, iterator/0, message_id/0]). +-export_type([shard_id/0, builtin_db_opts/0, stream/0, iterator/0, message_id/0, batch/0]). -include_lib("emqx_utils/include/emqx_message.hrl"). @@ -91,6 +91,13 @@ -type message_id() :: emqx_ds_storage_layer:message_id(). +-record(batch, { + messages :: [emqx_types:message()], + misc = #{} :: map() +}). + +-type batch() :: #batch{}. + %%================================================================================ %% API functions %%================================================================================ @@ -123,7 +130,8 @@ drop_db(DB) -> store_batch(DB, Messages, Opts) -> Shard = shard_of_messages(DB, Messages), Node = node_of_shard(DB, Shard), - emqx_ds_proto_v1:store_batch(Node, DB, Shard, Messages, Opts). + Batch = #batch{messages = Messages}, + emqx_ds_proto_v1:store_batch(Node, DB, Shard, Batch, Opts). -spec get_streams(emqx_ds:db(), emqx_ds:topic_filter(), emqx_ds:time()) -> [{emqx_ds:stream_rank(), stream()}]. @@ -202,12 +210,12 @@ do_drop_db_v1(DB) -> -spec do_store_batch_v1( emqx_ds:db(), emqx_ds_replication_layer:shard_id(), - [emqx_types:message()], + batch(), emqx_ds:message_store_opts() ) -> emqx_ds:store_batch_result(). -do_store_batch_v1(DB, Shard, Batch, Options) -> - emqx_ds_storage_layer:store_batch({DB, Shard}, Batch, Options). +do_store_batch_v1(DB, Shard, #batch{messages = Messages}, Options) -> + emqx_ds_storage_layer:store_batch({DB, Shard}, Messages, Options). -spec do_get_streams_v1( emqx_ds:db(), emqx_ds_replicationi_layer:shard_id(), emqx_ds:topic_filter(), emqx_ds:time() diff --git a/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl b/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl index 758b5148b..0d7972466 100644 --- a/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl +++ b/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl @@ -75,7 +75,7 @@ next(Node, DB, Shard, Iter, BatchSize) -> node(), emqx_ds:db(), emqx_ds_replication_layer:shard_id(), - [emqx_types:message()], + emqx_ds_replication_layer:batch(), emqx_ds:message_store_opts() ) -> emqx_ds:store_batch_result(). From 4d474907348b45bc1385ba15285719fc6ac015b8 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Tue, 21 Nov 2023 16:06:17 +0100 Subject: [PATCH 046/101] chore(ds): Rebase configuration --- apps/emqx/src/emqx_persistent_message.erl | 8 +++--- apps/emqx/src/emqx_schema.erl | 32 +++++++++++------------ rel/i18n/emqx_schema.hocon | 3 +++ 3 files changed, 24 insertions(+), 19 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_message.erl b/apps/emqx/src/emqx_persistent_message.erl index 50e25e0be..30ebe7417 100644 --- a/apps/emqx/src/emqx_persistent_message.erl +++ b/apps/emqx/src/emqx_persistent_message.erl @@ -54,12 +54,14 @@ is_persistence_enabled() -> storage_backend() -> storage_backend(emqx_config:get([session_persistence, storage])). -storage_backend(#{builtin := #{enable := true}}) -> +storage_backend(#{ + builtin := #{enable := true, n_shards := NShards, replication_factor := ReplicationFactor} +}) -> #{ backend => builtin, storage => {emqx_ds_storage_bitfield_lts, #{}}, - n_shards => 16, - replication_factor => 3 + n_shards => NShards, + replication_factor => ReplicationFactor }. %%-------------------------------------------------------------------- diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 1e6db805b..8e401a442 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -341,22 +341,6 @@ fields("persistent_session_store") -> importance => ?IMPORTANCE_HIDDEN } )}, - {"n_shards", - sc( - pos_integer(), - #{ - default => 16, - importance => ?IMPORTANCE_HIDDEN - } - )}, - {"replication_factor", - sc( - pos_integer(), - #{ - default => 3, - importance => ?IMPORTANCE_HIDDEN - } - )}, {"on_disc", sc( boolean(), @@ -1807,6 +1791,22 @@ fields("session_storage_backend_builtin") -> desc => ?DESC(session_storage_backend_enable), default => true } + )}, + {"n_shards", + sc( + pos_integer(), + #{ + desc => ?DESC(session_builtin_n_shards), + default => 16 + } + )}, + {"replication_factor", + sc( + pos_integer(), + #{ + default => 3, + importance => ?IMPORTANCE_HIDDEN + } )} ]. diff --git a/rel/i18n/emqx_schema.hocon b/rel/i18n/emqx_schema.hocon index d12f6a2d1..389a1f91b 100644 --- a/rel/i18n/emqx_schema.hocon +++ b/rel/i18n/emqx_schema.hocon @@ -1565,6 +1565,9 @@ session_persistence_storage.desc: session_storage_backend_enable.desc: """Enable this backend.""" +session_builtin_n_shards.desc: +"""Number of shards used for storing the messages.""" + session_storage_backend_builtin.desc: """Builtin session storage backend utilizing embedded RocksDB key-value store.""" From 3d823beb11c8efd30257607bff2f799de31a269e Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Tue, 21 Nov 2023 18:33:20 +0100 Subject: [PATCH 047/101] fix(ds): Apply review remarks --- .../src/emqx_ds_replication_layer.erl | 15 ++++++++------- .../src/emqx_ds_replication_layer_meta.erl | 15 ++++++++------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl b/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl index 5128188b8..7a26b696d 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl @@ -53,6 +53,7 @@ %% tags: -define(STREAM, 1). -define(IT, 2). +-define(BATCH, 3). %% keys: -define(tag, 1). @@ -91,12 +92,12 @@ -type message_id() :: emqx_ds_storage_layer:message_id(). --record(batch, { - messages :: [emqx_types:message()], - misc = #{} :: map() -}). +-define(batch_messages, 2). --type batch() :: #batch{}. +-type batch() :: #{ + ?tag := ?BATCH, + ?batch_messages := [emqx_types:message()] +}. %%================================================================================ %% API functions @@ -130,7 +131,7 @@ drop_db(DB) -> store_batch(DB, Messages, Opts) -> Shard = shard_of_messages(DB, Messages), Node = node_of_shard(DB, Shard), - Batch = #batch{messages = Messages}, + Batch = #{?tag => ?BATCH, ?batch_messages => Messages}, emqx_ds_proto_v1:store_batch(Node, DB, Shard, Batch, Opts). -spec get_streams(emqx_ds:db(), emqx_ds:topic_filter(), emqx_ds:time()) -> @@ -214,7 +215,7 @@ do_drop_db_v1(DB) -> emqx_ds:message_store_opts() ) -> emqx_ds:store_batch_result(). -do_store_batch_v1(DB, Shard, #batch{messages = Messages}, Options) -> +do_store_batch_v1(DB, Shard, #{?tag := ?BATCH, ?batch_messages := Messages}, Options) -> emqx_ds_storage_layer:store_batch({DB, Shard}, Messages, Options). -spec do_get_streams_v1( diff --git a/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl b/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl index 0f250022d..f7dbc828f 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl @@ -81,7 +81,8 @@ -record(?SHARD_TAB, { shard :: {emqx_ds:db(), emqx_ds_replication_layer:shard_id()}, - %% Sites that the + %% Sites that should contain the data when the cluster is in the + %% stable state (no nodes are being added or removed from it): replica_set :: [site()], %% Sites that contain the actual data: in_sync_replicas :: [site()], @@ -99,6 +100,10 @@ %% API funcions %%================================================================================ +-spec this_site() -> site(). +this_site() -> + persistent_term:get(?emqx_ds_builtin_site). + -spec n_shards(emqx_ds:db()) -> pos_integer(). n_shards(DB) -> [#?META_TAB{db_props = #{n_shards := NShards}}] = mnesia:dirty_read(?META_TAB, DB), @@ -301,17 +306,13 @@ ensure_site() -> persistent_term:put(?emqx_ds_builtin_site, Site), ok. --spec this_site() -> site(). -this_site() -> - persistent_term:get(?emqx_ds_builtin_site). - -spec create_shards(emqx_ds:db(), pos_integer(), pos_integer()) -> ok. create_shards(DB, NShards, ReplicationFactor) -> Shards = [integer_to_binary(I) || I <- lists:seq(0, NShards - 1)], - Sites = sites(), + AllSites = sites(), lists:foreach( fun(Shard) -> - Hashes0 = [{hash(Shard, Site), Site} || Site <- Sites], + Hashes0 = [{hash(Shard, Site), Site} || Site <- AllSites], Hashes = lists:sort(Hashes0), {_, Sites} = lists:unzip(Hashes), [First | _] = ReplicaSet = lists:sublist(Sites, 1, ReplicationFactor), From b3dffa4390eb1cc25a6d1db41fc81889252128b5 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Fri, 17 Nov 2023 10:26:20 -0300 Subject: [PATCH 048/101] fix(kafka): don't return `parameters` from `/bridges` API Fixes https://emqx.atlassian.net/browse/EMQX-11412 --- .../test/emqx_bridge_v2_testlib.erl | 19 +++++++++++++ .../src/emqx_bridge_azure_event_hub.erl | 2 +- .../emqx_bridge_azure_event_hub_v2_SUITE.erl | 27 +++++++++++++++++++ .../src/emqx_bridge_kafka.erl | 21 ++++++++------- .../src/emqx_bridge_kafka_action_info.erl | 5 ++-- .../test/emqx_bridge_kafka_tests.erl | 10 +++---- .../emqx_bridge_v2_kafka_producer_SUITE.erl | 27 +++++++++++++++++++ 7 files changed, 93 insertions(+), 18 deletions(-) diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl index 5a2b6b000..6c48f5663 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl @@ -312,6 +312,25 @@ create_rule_and_action_http(BridgeType, RuleTopic, Config, Opts) -> Error end. +api_spec_schemas(Root) -> + Method = get, + Path = emqx_mgmt_api_test_util:api_path(["schemas", Root]), + Params = [], + AuthHeader = [], + Opts = #{return_all => true}, + case emqx_mgmt_api_test_util:request_api(Method, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 200, _}, _, Res0}} -> + #{<<"components">> := #{<<"schemas">> := Schemas}} = + emqx_utils_json:decode(Res0, [return_maps]), + Schemas + end. + +bridges_api_spec_schemas() -> + api_spec_schemas("bridges"). + +actions_api_spec_schemas() -> + api_spec_schemas("actions"). + %%------------------------------------------------------------------------------ %% Testcases %%------------------------------------------------------------------------------ diff --git a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl index eb364bdff..553d77326 100644 --- a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl +++ b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl @@ -126,7 +126,7 @@ fields(action) -> fields(actions) -> Fields = override( - emqx_bridge_kafka:producer_opts(), + emqx_bridge_kafka:producer_opts(action), bridge_v2_overrides() ) ++ [ diff --git a/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl b/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl index 206cc08e0..4d441ea0b 100644 --- a/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl +++ b/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl @@ -272,6 +272,22 @@ make_message() -> timestamp => Time }. +bridge_api_spec_props_for_get() -> + #{ + <<"bridge_azure_event_hub.get_producer">> := + #{<<"properties">> := Props} + } = + emqx_bridge_v2_testlib:bridges_api_spec_schemas(), + Props. + +action_api_spec_props_for_get() -> + #{ + <<"bridge_azure_event_hub.get_bridge_v2">> := + #{<<"properties">> := Props} + } = + emqx_bridge_v2_testlib:actions_api_spec_schemas(), + Props. + %%------------------------------------------------------------------------------ %% Testcases %%------------------------------------------------------------------------------ @@ -341,3 +357,14 @@ t_same_name_azure_kafka_bridges(Config) -> end ), ok. + +t_parameters_key_api_spec(_Config) -> + BridgeProps = bridge_api_spec_props_for_get(), + ?assert(is_map_key(<<"kafka">>, BridgeProps), #{bridge_props => BridgeProps}), + ?assertNot(is_map_key(<<"parameters">>, BridgeProps), #{bridge_props => BridgeProps}), + + ActionProps = action_api_spec_props_for_get(), + ?assertNot(is_map_key(<<"kafka">>, ActionProps), #{action_props => ActionProps}), + ?assert(is_map_key(<<"parameters">>, ActionProps), #{action_props => ActionProps}), + + ok. diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl index b3934c7bb..f7205e6ae 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl @@ -29,7 +29,7 @@ desc/1, host_opts/0, ssl_client_opts_fields/0, - producer_opts/0 + producer_opts/1 ]). -export([ @@ -261,7 +261,7 @@ fields("config_producer") -> fields("config_consumer") -> fields(kafka_consumer); fields(kafka_producer) -> - connector_config_fields() ++ producer_opts(); + connector_config_fields() ++ producer_opts(v1); fields(kafka_producer_action) -> [ {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, @@ -270,7 +270,7 @@ fields(kafka_producer_action) -> desc => ?DESC(emqx_connector_schema, "connector_field"), required => true })}, {description, emqx_schema:description_schema()} - ] ++ producer_opts(); + ] ++ producer_opts(action); fields(kafka_consumer) -> connector_config_fields() ++ fields(consumer_opts); fields(ssl_client_opts) -> @@ -601,25 +601,28 @@ connector_config_fields() -> {ssl, mk(ref(ssl_client_opts), #{})} ]. -producer_opts() -> +producer_opts(ActionOrBridgeV1) -> [ %% Note: there's an implicit convention in `emqx_bridge' that, %% for egress bridges with this config, the published messages %% will be forwarded to such bridges. {local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})}, - parameters_field(), + parameters_field(ActionOrBridgeV1), {resource_opts, mk(ref(resource_opts), #{default => #{}, desc => ?DESC(resource_opts)})} ]. %% Since e5.3.1, we want to rename the field 'kafka' to 'parameters' %% Hoever we need to keep it backward compatible for generated schema json (version 0.1.0) %% since schema is data for the 'schemas' API. -parameters_field() -> +parameters_field(ActionOrBridgeV1) -> + OverriddenV1 = <<"0.1.0">> =:= get(emqx_bridge_schema_version), {Name, Alias} = - case get(emqx_bridge_schema_version) of - <<"0.1.0">> -> + case {OverriddenV1, ActionOrBridgeV1} of + {true, _} -> {kafka, parameters}; - _ -> + {_, v1} -> + {kafka, parameters}; + {_, action} -> {parameters, kafka} end, {Name, diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl index 7b6a946d0..31efc7c11 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_action_info.erl @@ -32,9 +32,8 @@ bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) -> Config0 = emqx_action_info:transform_bridge_v1_config_to_action_config( BridgeV1Conf, ConnectorName, schema_module(), kafka_producer ), - KafkaMap = emqx_utils_maps:deep_get([<<"parameters">>, <<"kafka">>], Config0, #{}), - Config1 = emqx_utils_maps:deep_remove([<<"parameters">>, <<"kafka">>], Config0), - Config2 = emqx_utils_maps:deep_merge(Config1, #{<<"parameters">> => KafkaMap}), + KafkaMap = maps:get(<<"kafka">>, BridgeV1Conf, #{}), + Config2 = emqx_utils_maps:deep_merge(Config0, #{<<"parameters">> => KafkaMap}), maps:with(producer_action_field_keys(), Config2). %%------------------------------------------------------------------------------------------ diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_tests.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_tests.erl index 1d9682b9b..69794f2b9 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_tests.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_tests.erl @@ -25,7 +25,7 @@ kafka_producer_test() -> <<"kafka_producer">> := #{ <<"myproducer">> := - #{<<"parameters">> := #{}} + #{<<"kafka">> := #{}} } } }, @@ -52,7 +52,7 @@ kafka_producer_test() -> #{ <<"myproducer">> := #{ - <<"parameters">> := #{}, + <<"kafka">> := #{}, <<"local_topic">> := <<"mqtt/local">> } } @@ -68,7 +68,7 @@ kafka_producer_test() -> #{ <<"myproducer">> := #{ - <<"parameters">> := #{}, + <<"kafka">> := #{}, <<"local_topic">> := <<"mqtt/local">> } } @@ -166,7 +166,7 @@ message_key_dispatch_validations_test() -> ?assertThrow( {_, [ #{ - path := "bridges.kafka_producer.myproducer.parameters", + path := "bridges.kafka_producer.myproducer.kafka", reason := "Message key cannot be empty when `key_dispatch` strategy is used" } ]}, @@ -175,7 +175,7 @@ message_key_dispatch_validations_test() -> ?assertThrow( {_, [ #{ - path := "bridges.kafka_producer.myproducer.parameters", + path := "bridges.kafka_producer.myproducer.kafka", reason := "Message key cannot be empty when `key_dispatch` strategy is used" } ]}, diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl index 6c48146cd..8ce3b7f6b 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl @@ -182,6 +182,22 @@ create_action(Name, Config) -> on_exit(fun() -> emqx_bridge_v2:remove(?TYPE, Name) end), Res. +bridge_api_spec_props_for_get() -> + #{ + <<"bridge_kafka.get_producer">> := + #{<<"properties">> := Props} + } = + emqx_bridge_v2_testlib:bridges_api_spec_schemas(), + Props. + +action_api_spec_props_for_get() -> + #{ + <<"bridge_kafka.get_bridge_v2">> := + #{<<"properties">> := Props} + } = + emqx_bridge_v2_testlib:actions_api_spec_schemas(), + Props. + %%------------------------------------------------------------------------------ %% Testcases %%------------------------------------------------------------------------------ @@ -342,3 +358,14 @@ t_bad_url(_Config) -> ), ?assertMatch({ok, #{status := connecting}}, emqx_bridge_v2:lookup(?TYPE, ActionName)), ok. + +t_parameters_key_api_spec(_Config) -> + BridgeProps = bridge_api_spec_props_for_get(), + ?assert(is_map_key(<<"kafka">>, BridgeProps), #{bridge_props => BridgeProps}), + ?assertNot(is_map_key(<<"parameters">>, BridgeProps), #{bridge_props => BridgeProps}), + + ActionProps = action_api_spec_props_for_get(), + ?assertNot(is_map_key(<<"kafka">>, ActionProps), #{action_props => ActionProps}), + ?assert(is_map_key(<<"parameters">>, ActionProps), #{action_props => ActionProps}), + + ok. From 11ec1a30a0a2d33617b008e46d51af2a11afbcb2 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 21 Nov 2023 16:00:19 -0300 Subject: [PATCH 049/101] test(flaky): fix flaky pulsar test --- apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_tests.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_tests.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_tests.erl index 29299dcc9..5492bb2a8 100644 --- a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_tests.erl +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_tests.erl @@ -10,8 +10,11 @@ %% Test cases %%=========================================================================== +atoms() -> + [my_producer]. + pulsar_producer_validations_test() -> - Name = list_to_atom("my_producer"), + Name = hd(atoms()), Conf0 = pulsar_producer_hocon(), Conf1 = Conf0 ++ From 3165b4f645b1b110abfc553a8ed6d2491a3e33e7 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Tue, 21 Nov 2023 18:53:34 +0100 Subject: [PATCH 050/101] fix(ds): Abort application startup when rocksdb is not avialable --- apps/emqx/src/emqx_persistent_session_ds.erl | 6 ------ apps/emqx_durable_storage/src/emqx_ds_sup.erl | 9 ++++++++- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index 1e3ac69c8..928115a52 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -440,12 +440,6 @@ del_subscription(TopicFilter, DSSessionId) -> %%-------------------------------------------------------------------- create_tables() -> - ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, #{ - backend => builtin, - storage => {emqx_ds_storage_bitfield_lts, #{}}, - n_shards => 16, - replication_factor => 3 - }), ok = mria:create_table( ?SESSION_TAB, [ diff --git a/apps/emqx_durable_storage/src/emqx_ds_sup.erl b/apps/emqx_durable_storage/src/emqx_ds_sup.erl index 82e2711be..081557a46 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_sup.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_sup.erl @@ -29,8 +29,15 @@ start_link() -> %% behaviour callbacks %%================================================================================ +-dialyzer({nowarn_function, init/1}). init([]) -> - Children = [meta(), storage_layer_sup()], + %% TODO: technically, we don't need rocksDB for the alternative + %% backends. But right now we have any: + Children = + case mria:rocksdb_backend_available() of + true -> [meta(), storage_layer_sup()]; + false -> [] + end, SupFlags = #{ strategy => one_for_all, intensity => 0, From 3a8c33280548f63e016f5a88d22bb165ef178be5 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 21 Nov 2023 16:27:54 -0300 Subject: [PATCH 051/101] fix(actions_api): don't crash on validation errors Fixes https://emqx.atlassian.net/browse/EMQX-11394 --- apps/emqx_bridge/src/emqx_bridge_v2_api.erl | 2 ++ .../test/emqx_bridge_v2_api_SUITE.erl | 22 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/apps/emqx_bridge/src/emqx_bridge_v2_api.erl b/apps/emqx_bridge/src/emqx_bridge_v2_api.erl index d5fd09631..cb1f7cc62 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2_api.erl @@ -791,6 +791,8 @@ do_create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) -> PreOrPostConfigUpdate =:= pre_config_update; PreOrPostConfigUpdate =:= post_config_update -> + ?BAD_REQUEST(map_to_json(redact(Reason))); + {error, Reason} when is_map(Reason) -> ?BAD_REQUEST(map_to_json(redact(Reason))) end. diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl index b99a462b4..ed6ef9eb9 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl @@ -1021,6 +1021,28 @@ t_action_types(Config) -> ?assert(lists:all(fun is_binary/1, Types), #{types => Types}), ok. +t_bad_name(Config) -> + Name = <<"_bad_name">>, + Res = request_json( + post, + uri([?ROOT]), + ?KAFKA_BRIDGE(Name), + Config + ), + ?assertMatch({ok, 400, #{<<"message">> := _}}, Res), + {ok, 400, #{<<"message">> := Msg0}} = Res, + Msg = emqx_utils_json:decode(Msg0, [return_maps]), + ?assertMatch( + #{ + <<"got">> := [<<"_bad_name">>], + <<"kind">> := <<"validation_error">>, + <<"path">> := <<"actions.kafka_producer">>, + <<"reason">> := <<"invalid_map_key">> + }, + Msg + ), + ok. + %%% helpers listen_on_random_port() -> SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}], From 5ebd954b166ba7c2731d17d4944c06a6921b2e80 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 21 Nov 2023 21:17:57 +0100 Subject: [PATCH 052/101] chore: bump release version to e5.3.2-alpha.1 --- apps/emqx/include/emqx_release.hrl | 4 ++-- deploy/charts/emqx-enterprise/Chart.yaml | 4 ++-- deploy/charts/emqx/Chart.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index 011d52595..2f9254d70 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -32,10 +32,10 @@ %% `apps/emqx/src/bpapi/README.md' %% Opensource edition --define(EMQX_RELEASE_CE, "5.3.1"). +-define(EMQX_RELEASE_CE, "5.3.2"). %% Enterprise edition --define(EMQX_RELEASE_EE, "5.3.1"). +-define(EMQX_RELEASE_EE, "5.3.2-alpha.1"). %% The HTTP API version -define(EMQX_API_VERSION, "5.0"). diff --git a/deploy/charts/emqx-enterprise/Chart.yaml b/deploy/charts/emqx-enterprise/Chart.yaml index d9ad72611..aed38cd63 100644 --- a/deploy/charts/emqx-enterprise/Chart.yaml +++ b/deploy/charts/emqx-enterprise/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 5.3.1 +version: 5.3.2-alpha.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 5.3.1 +appVersion: 5.3.2-alpha.1 diff --git a/deploy/charts/emqx/Chart.yaml b/deploy/charts/emqx/Chart.yaml index 76bcd3aaa..9444fe14c 100644 --- a/deploy/charts/emqx/Chart.yaml +++ b/deploy/charts/emqx/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 5.3.1 +version: 5.3.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 5.3.1 +appVersion: 5.3.2 From 39791511fcab8f7ebaedad9eb54433abbfad4ebc Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 21 Nov 2023 17:38:13 -0300 Subject: [PATCH 053/101] chore: remove obsolete workaround --- apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl | 9 +++------ apps/emqx_conf/src/emqx_conf.erl | 7 +------ 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl index f7205e6ae..8c90e0896 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl @@ -615,14 +615,11 @@ producer_opts(ActionOrBridgeV1) -> %% Hoever we need to keep it backward compatible for generated schema json (version 0.1.0) %% since schema is data for the 'schemas' API. parameters_field(ActionOrBridgeV1) -> - OverriddenV1 = <<"0.1.0">> =:= get(emqx_bridge_schema_version), {Name, Alias} = - case {OverriddenV1, ActionOrBridgeV1} of - {true, _} -> + case ActionOrBridgeV1 of + v1 -> {kafka, parameters}; - {_, v1} -> - {kafka, parameters}; - {_, action} -> + action -> {parameters, kafka} end, {Name, diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index c986a65ee..7ff06b0ef 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -193,12 +193,7 @@ hotconf_schema_json() -> bridge_schema_json() -> Version = <<"0.1.0">>, SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => Version}, - put(emqx_bridge_schema_version, Version), - try - gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo) - after - erase(emqx_bridge_schema_version) - end. + gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo). %% TODO: remove it and also remove hocon_md.erl and friends. %% markdown generation from schema is a failure and we are moving to an interactive From f1f62d3af1154696fe485f6f5de60e0236c8551a Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Wed, 22 Nov 2023 09:48:08 +0100 Subject: [PATCH 054/101] ci: stop building packages for macos-11 --- .github/workflows/build_packages.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 77be974af..6d642ebef 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -74,9 +74,9 @@ jobs: otp: - ${{ inputs.otp_vsn }} os: - - macos-11 - macos-12 - macos-12-arm64 + - macos-13 runs-on: ${{ matrix.os }} steps: - uses: emqx/self-hosted-cleanup-action@v1.0.3 From c074ed43ddc4c211479a9fab95bd7758ae379415 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Wed, 22 Nov 2023 09:58:51 +0100 Subject: [PATCH 055/101] chore: add changelog --- changes/ce/breaking-11994.en.md | 1 + changes/ce/breaking-11998.en.md | 1 + 2 files changed, 2 insertions(+) create mode 100644 changes/ce/breaking-11994.en.md create mode 100644 changes/ce/breaking-11998.en.md diff --git a/changes/ce/breaking-11994.en.md b/changes/ce/breaking-11994.en.md new file mode 100644 index 000000000..45a3fe23c --- /dev/null +++ b/changes/ce/breaking-11994.en.md @@ -0,0 +1 @@ +Stop releasing packages for Windows. diff --git a/changes/ce/breaking-11998.en.md b/changes/ce/breaking-11998.en.md new file mode 100644 index 000000000..aa0e16501 --- /dev/null +++ b/changes/ce/breaking-11998.en.md @@ -0,0 +1 @@ +Stop releasing packages for MacOS 11 (BigSur). From 3261a12140ddbae7564e4f54ddc2cb6435f98563 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 21 Nov 2023 21:42:55 +0100 Subject: [PATCH 056/101] fix(emqx_resource): do not allow leading _ or - as resource name --- apps/emqx_bridge/test/emqx_bridge_SUITE.erl | 2 +- apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl | 2 +- .../emqx_bridge_v1_compatibility_layer_SUITE.erl | 2 +- apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl | 4 +--- apps/emqx_connector/test/emqx_connector_SUITE.erl | 2 +- apps/emqx_resource/src/emqx_resource.erl | 13 +++++++++---- 6 files changed, 14 insertions(+), 11 deletions(-) diff --git a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl index b29ba154e..bc8be5476 100644 --- a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl @@ -199,7 +199,7 @@ t_create_with_bad_name(_Config) -> ?assertMatch( {error, {pre_config_update, emqx_bridge_app, #{ - reason := <<"only 0-9a-zA-Z_- is allowed in resource name", _/binary>>, + reason := <<"Invalid name format.", _/binary>>, kind := validation_error }}}, emqx:update_config(Path, Conf) diff --git a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl index 99a2bc8cd..ccc944572 100644 --- a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl @@ -1365,7 +1365,7 @@ t_create_with_bad_name(Config) -> ?assertMatch( #{ <<"kind">> := <<"validation_error">>, - <<"reason">> := <<"only 0-9a-zA-Z_- is allowed in resource name", _/binary>> + <<"reason">> := <<"Invalid name format.", _/binary>> }, Msg ), diff --git a/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl index c714b858a..aa564aa9c 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl @@ -829,7 +829,7 @@ t_create_with_bad_name(_Config) -> <<"code">> := <<"BAD_REQUEST">>, <<"message">> := #{ <<"kind">> := <<"validation_error">>, - <<"reason">> := <<"only 0-9a-zA-Z_- is allowed in resource name", _/binary>> + <<"reason">> := <<"Invalid name format.", _/binary>> } }}} = create_bridge_http_api_v1(Opts), ok. diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl index ed6ef9eb9..cf58eefde 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl @@ -1034,10 +1034,8 @@ t_bad_name(Config) -> Msg = emqx_utils_json:decode(Msg0, [return_maps]), ?assertMatch( #{ - <<"got">> := [<<"_bad_name">>], <<"kind">> := <<"validation_error">>, - <<"path">> := <<"actions.kafka_producer">>, - <<"reason">> := <<"invalid_map_key">> + <<"reason">> := <<"Invalid name format.", _/binary>> }, Msg ), diff --git a/apps/emqx_connector/test/emqx_connector_SUITE.erl b/apps/emqx_connector/test/emqx_connector_SUITE.erl index ee7e29741..669d05442 100644 --- a/apps/emqx_connector/test/emqx_connector_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_SUITE.erl @@ -229,7 +229,7 @@ t_create_with_bad_name_direct_path(_Config) -> {error, {pre_config_update, _ConfigHandlerMod, #{ kind := validation_error, - reason := <<"only 0-9a-zA-Z_- is allowed in resource name", _/binary>> + reason := <<"Invalid name format.", _/binary>> }}}, emqx:update_config(Path, ConnConfig) ), diff --git a/apps/emqx_resource/src/emqx_resource.erl b/apps/emqx_resource/src/emqx_resource.erl index 90df229e4..0bc1eb615 100644 --- a/apps/emqx_resource/src/emqx_resource.erl +++ b/apps/emqx_resource/src/emqx_resource.erl @@ -812,11 +812,11 @@ validate_name(Name) -> ok. validate_name(<<>>, _Opts) -> - invalid_data("name cannot be empty string"); + invalid_data("Name cannot be empty string"); validate_name(Name, _Opts) when size(Name) >= 255 -> - invalid_data("name length must be less than 255"); + invalid_data("Name length must be less than 255"); validate_name(Name, Opts) -> - case re:run(Name, <<"^[-0-9a-zA-Z_]+$">>, [{capture, none}]) of + case re:run(Name, <<"^[0-9a-zA-Z][-0-9a-zA-Z_]*$">>, [{capture, none}]) of match -> case maps:get(atom_name, Opts, true) of %% NOTE @@ -827,7 +827,12 @@ validate_name(Name, Opts) -> end; nomatch -> invalid_data( - <<"only 0-9a-zA-Z_- is allowed in resource name, got: ", Name/binary>> + << + "Invalid name format. The name must begin with a letter or number " + "(0-9, a-z, A-Z) and can only include underscores and hyphens as " + "non-initial characters. Got: ", + Name/binary + >> ) end. From a1b9a14fa1e04ed3ac5dbcc56016da3db8951caf Mon Sep 17 00:00:00 2001 From: Ilya Averyanov Date: Wed, 22 Nov 2023 14:43:57 +0300 Subject: [PATCH 057/101] feat(ds): allow fdb implementation for durable storage --- apps/emqx/src/emqx_persistent_message.erl | 6 +++++- apps/emqx/src/emqx_schema.erl | 2 +- apps/emqx_durable_storage/src/emqx_ds.erl | 13 ++++++++++--- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_message.erl b/apps/emqx/src/emqx_persistent_message.erl index 30ebe7417..01539ba73 100644 --- a/apps/emqx/src/emqx_persistent_message.erl +++ b/apps/emqx/src/emqx_persistent_message.erl @@ -62,7 +62,11 @@ storage_backend(#{ storage => {emqx_ds_storage_bitfield_lts, #{}}, n_shards => NShards, replication_factor => ReplicationFactor - }. + }; +storage_backend(#{ + fdb := #{enable := true} = FDBConfig +}) -> + FDBConfig#{backend => fdb}. %%-------------------------------------------------------------------- diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 8e401a442..e28347897 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -1781,7 +1781,7 @@ fields("session_storage_backend") -> desc => ?DESC(session_storage_backend_builtin), required => {false, recursively} })} - ]; + ] ++ emqx_schema_hooks:injection_point('session_persistence.storage_backends', []); fields("session_storage_backend_builtin") -> [ {"enable", diff --git a/apps/emqx_durable_storage/src/emqx_ds.erl b/apps/emqx_durable_storage/src/emqx_ds.erl index 84631f38e..f5872ea1e 100644 --- a/apps/emqx_durable_storage/src/emqx_ds.erl +++ b/apps/emqx_durable_storage/src/emqx_ds.erl @@ -86,8 +86,14 @@ -type message_store_opts() :: #{}. +-type generic_db_opts() :: + #{ + backend := atom(), + _ => _ + }. + -type create_db_opts() :: - emqx_ds_replication_layer:builtin_db_opts(). + emqx_ds_replication_layer:builtin_db_opts() | generic_db_opts(). -type message_id() :: emqx_ds_replication_layer:message_id(). @@ -120,10 +126,11 @@ %% @doc Different DBs are completely independent from each other. They %% could represent something like different tenants. -spec open_db(db(), create_db_opts()) -> ok. -open_db(DB, Opts = #{backend := Backend}) when Backend =:= builtin -> +open_db(DB, Opts = #{backend := Backend}) when Backend =:= builtin orelse Backend =:= fdb -> Module = case Backend of - builtin -> emqx_ds_replication_layer + builtin -> emqx_ds_replication_layer; + fdb -> emqx_fdb_ds end, persistent_term:put(?persistent_term(DB), Module), ?module(DB):open_db(DB, Opts). From 1b2c0526468792b8984630404977f420a6d935d6 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Thu, 16 Nov 2023 20:36:55 +0100 Subject: [PATCH 058/101] docs: add type namespaces --- .../src/emqx_authz/sources/emqx_authz_file_schema.erl | 3 +++ apps/emqx_conf/src/emqx_conf_schema.erl | 3 +-- apps/emqx_dashboard_sso/src/emqx_dashboard_sso_saml.erl | 3 +++ apps/emqx_dashboard_sso/src/emqx_dashboard_sso_schema.erl | 2 +- apps/emqx_gateway_coap/src/emqx_coap_schema.erl | 4 +++- apps/emqx_gateway_exproto/src/emqx_exproto_schema.erl | 4 +++- apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src | 2 +- apps/emqx_gateway_lwm2m/src/emqx_gateway_lwm2m.app.src | 2 +- apps/emqx_gateway_lwm2m/src/emqx_lwm2m_schema.erl | 4 +++- apps/emqx_gateway_mqttsn/src/emqx_gateway_mqttsn.app.src | 2 +- apps/emqx_gateway_mqttsn/src/emqx_mqttsn_schema.erl | 4 +++- apps/emqx_gateway_stomp/src/emqx_stomp_schema.erl | 4 +++- apps/emqx_retainer/src/emqx_retainer_schema.erl | 2 +- apps/emqx_schema_registry/src/emqx_schema_registry.app.src | 2 +- apps/emqx_schema_registry/src/emqx_schema_registry_schema.erl | 3 +++ 15 files changed, 31 insertions(+), 13 deletions(-) diff --git a/apps/emqx_auth/src/emqx_authz/sources/emqx_authz_file_schema.erl b/apps/emqx_auth/src/emqx_authz/sources/emqx_authz_file_schema.erl index cea697d66..ae06147ff 100644 --- a/apps/emqx_auth/src/emqx_authz/sources/emqx_authz_file_schema.erl +++ b/apps/emqx_auth/src/emqx_authz/sources/emqx_authz_file_schema.erl @@ -22,6 +22,7 @@ -behaviour(emqx_authz_schema). -export([ + namespace/0, type/0, fields/1, desc/1, @@ -30,6 +31,8 @@ select_union_member/1 ]). +namespace() -> "authz". + type() -> ?AUTHZ_TYPE. fields(file) -> diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index 3a2b5d972..a872a6a56 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -79,8 +79,7 @@ upgrade_raw_conf(RawConf) -> emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2(RawConf). -%% root config should not have a namespace -namespace() -> undefined. +namespace() -> emqx. tags() -> [<<"EMQX">>]. diff --git a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_saml.erl b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_saml.erl index 907d2dcde..42a0e8f74 100644 --- a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_saml.erl +++ b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_saml.erl @@ -12,6 +12,7 @@ -behaviour(emqx_dashboard_sso). -export([ + namespace/0, hocon_ref/0, login_ref/0, fields/1, @@ -43,6 +44,8 @@ %% Hocon Schema %%------------------------------------------------------------------------------ +namespace() -> "dashboard". + hocon_ref() -> hoconsc:ref(?MODULE, saml). diff --git a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_schema.erl b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_schema.erl index aa032a3cc..a73f13ca8 100644 --- a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_schema.erl +++ b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_schema.erl @@ -21,7 +21,7 @@ %%------------------------------------------------------------------------------ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "sso". +namespace() -> dashboard. fields(sso) -> lists:map( diff --git a/apps/emqx_gateway_coap/src/emqx_coap_schema.erl b/apps/emqx_gateway_coap/src/emqx_coap_schema.erl index b7ce88451..c4879f553 100644 --- a/apps/emqx_gateway_coap/src/emqx_coap_schema.erl +++ b/apps/emqx_gateway_coap/src/emqx_coap_schema.erl @@ -26,7 +26,9 @@ -reflect_type([duration/0]). %% config schema provides --export([fields/1, desc/1]). +-export([namespace/0, fields/1, desc/1]). + +namespace() -> "gateway". fields(coap) -> [ diff --git a/apps/emqx_gateway_exproto/src/emqx_exproto_schema.erl b/apps/emqx_gateway_exproto/src/emqx_exproto_schema.erl index 10583e41a..7eeceb3cb 100644 --- a/apps/emqx_gateway_exproto/src/emqx_exproto_schema.erl +++ b/apps/emqx_gateway_exproto/src/emqx_exproto_schema.erl @@ -28,7 +28,9 @@ ]). %% config schema provides --export([fields/1, desc/1]). +-export([namespace/0, fields/1, desc/1]). + +namespace() -> "gateway". fields(exproto) -> [ diff --git a/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src b/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src index 09622763b..ffd8fd3d1 100644 --- a/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src +++ b/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src @@ -1,6 +1,6 @@ {application, emqx_gateway_exproto, [ {description, "ExProto Gateway"}, - {vsn, "0.1.4"}, + {vsn, "0.1.5"}, {registered, []}, {applications, [kernel, stdlib, grpc, emqx, emqx_gateway]}, {env, []}, diff --git a/apps/emqx_gateway_lwm2m/src/emqx_gateway_lwm2m.app.src b/apps/emqx_gateway_lwm2m/src/emqx_gateway_lwm2m.app.src index e5afd7871..371f74625 100644 --- a/apps/emqx_gateway_lwm2m/src/emqx_gateway_lwm2m.app.src +++ b/apps/emqx_gateway_lwm2m/src/emqx_gateway_lwm2m.app.src @@ -1,6 +1,6 @@ {application, emqx_gateway_lwm2m, [ {description, "LwM2M Gateway"}, - {vsn, "0.1.3"}, + {vsn, "0.1.4"}, {registered, []}, {applications, [kernel, stdlib, emqx, emqx_gateway, emqx_gateway_coap]}, {env, []}, diff --git a/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_schema.erl b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_schema.erl index b674c3260..41df3b970 100644 --- a/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_schema.erl +++ b/apps/emqx_gateway_lwm2m/src/emqx_lwm2m_schema.erl @@ -28,7 +28,9 @@ -reflect_type([duration/0, duration_s/0]). %% config schema provides --export([fields/1, desc/1]). +-export([namespace/0, fields/1, desc/1]). + +namespace() -> gateway. fields(lwm2m) -> [ diff --git a/apps/emqx_gateway_mqttsn/src/emqx_gateway_mqttsn.app.src b/apps/emqx_gateway_mqttsn/src/emqx_gateway_mqttsn.app.src index c2f6d642b..a7de83b74 100644 --- a/apps/emqx_gateway_mqttsn/src/emqx_gateway_mqttsn.app.src +++ b/apps/emqx_gateway_mqttsn/src/emqx_gateway_mqttsn.app.src @@ -1,6 +1,6 @@ {application, emqx_gateway_mqttsn, [ {description, "MQTT-SN Gateway"}, - {vsn, "0.1.5"}, + {vsn, "0.1.6"}, {registered, []}, {applications, [kernel, stdlib, emqx, emqx_gateway]}, {env, []}, diff --git a/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_schema.erl b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_schema.erl index 08fb854b4..e028a698b 100644 --- a/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_schema.erl +++ b/apps/emqx_gateway_mqttsn/src/emqx_mqttsn_schema.erl @@ -21,7 +21,9 @@ -include_lib("typerefl/include/types.hrl"). %% config schema provides --export([fields/1, desc/1]). +-export([namespace/0, fields/1, desc/1]). + +namespace() -> "gateway". fields(mqttsn) -> [ diff --git a/apps/emqx_gateway_stomp/src/emqx_stomp_schema.erl b/apps/emqx_gateway_stomp/src/emqx_stomp_schema.erl index b1c6a92e2..d4dcd2897 100644 --- a/apps/emqx_gateway_stomp/src/emqx_stomp_schema.erl +++ b/apps/emqx_gateway_stomp/src/emqx_stomp_schema.erl @@ -20,7 +20,9 @@ -include_lib("typerefl/include/types.hrl"). %% config schema provides --export([fields/1, desc/1]). +-export([namespace/0, fields/1, desc/1]). + +namespace() -> "gateway". fields(stomp) -> [ diff --git a/apps/emqx_retainer/src/emqx_retainer_schema.erl b/apps/emqx_retainer/src/emqx_retainer_schema.erl index 983b27601..1c5d8e55f 100644 --- a/apps/emqx_retainer/src/emqx_retainer_schema.erl +++ b/apps/emqx_retainer/src/emqx_retainer_schema.erl @@ -30,7 +30,7 @@ -define(INVALID_SPEC(_REASON_), throw({_REASON_, #{default => ?DEFAULT_INDICES}})). -namespace() -> "retainer". +namespace() -> retainer. roots() -> [ diff --git a/apps/emqx_schema_registry/src/emqx_schema_registry.app.src b/apps/emqx_schema_registry/src/emqx_schema_registry.app.src index e64d104f7..f4089fdc1 100644 --- a/apps/emqx_schema_registry/src/emqx_schema_registry.app.src +++ b/apps/emqx_schema_registry/src/emqx_schema_registry.app.src @@ -1,6 +1,6 @@ {application, emqx_schema_registry, [ {description, "EMQX Schema Registry"}, - {vsn, "0.1.7"}, + {vsn, "0.1.8"}, {registered, [emqx_schema_registry_sup]}, {mod, {emqx_schema_registry_app, []}}, {included_applications, [ diff --git a/apps/emqx_schema_registry/src/emqx_schema_registry_schema.erl b/apps/emqx_schema_registry/src/emqx_schema_registry_schema.erl index d131aa48f..564496629 100644 --- a/apps/emqx_schema_registry/src/emqx_schema_registry_schema.erl +++ b/apps/emqx_schema_registry/src/emqx_schema_registry_schema.erl @@ -10,6 +10,7 @@ %% `hocon_schema' API -export([ + namespace/0, roots/0, fields/1, desc/1, @@ -26,6 +27,8 @@ %% `hocon_schema' APIs %%------------------------------------------------------------------------------ +namespace() -> ?CONF_KEY_ROOT. + roots() -> [{?CONF_KEY_ROOT, mk(ref(?CONF_KEY_ROOT), #{required => false})}]. From db33bc616ae1b942fd73b23c788c98a5d2af6105 Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Wed, 8 Nov 2023 23:31:29 +0100 Subject: [PATCH 059/101] feat(schema): Add v2 scheam JSON dump --- .../emqx_authn_password_hashing.erl | 2 +- .../src/emqx_bridge_http_connector.erl | 2 +- .../src/emqx_bridge_mqtt.app.src | 2 +- .../src/emqx_bridge_mqtt_connector_schema.erl | 2 +- apps/emqx_conf/src/emqx_conf.erl | 271 +++++++++++++++++- apps/emqx_psk/src/emqx_psk.app.src | 2 +- apps/emqx_psk/src/emqx_psk_schema.erl | 2 +- 7 files changed, 266 insertions(+), 17 deletions(-) diff --git a/apps/emqx_auth/src/emqx_authn/emqx_authn_password_hashing.erl b/apps/emqx_auth/src/emqx_authn/emqx_authn_password_hashing.erl index 756f39d06..16af4fd23 100644 --- a/apps/emqx_auth/src/emqx_authn/emqx_authn_password_hashing.erl +++ b/apps/emqx_auth/src/emqx_authn/emqx_authn_password_hashing.erl @@ -67,7 +67,7 @@ -define(SALT_ROUNDS_MIN, 5). -define(SALT_ROUNDS_MAX, 10). -namespace() -> "authn-hash". +namespace() -> "authn_hash". roots() -> [pbkdf2, bcrypt, bcrypt_rw, bcrypt_rw_api, simple]. fields(bcrypt_rw) -> diff --git a/apps/emqx_bridge_http/src/emqx_bridge_http_connector.erl b/apps/emqx_bridge_http/src/emqx_bridge_http_connector.erl index 743ab97fe..5a5e790e5 100644 --- a/apps/emqx_bridge_http/src/emqx_bridge_http_connector.erl +++ b/apps/emqx_bridge_http/src/emqx_bridge_http_connector.erl @@ -54,7 +54,7 @@ %%===================================================================== %% Hocon schema -namespace() -> "connector-http". +namespace() -> "connector_http". roots() -> fields(config). diff --git a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src index e39c4df69..cbef0dda8 100644 --- a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src +++ b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge_mqtt, [ {description, "EMQX MQTT Broker Bridge"}, - {vsn, "0.1.4"}, + {vsn, "0.1.5"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector_schema.erl b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector_schema.erl index 1dc3ca5f8..f671bec71 100644 --- a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector_schema.erl +++ b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector_schema.erl @@ -36,7 +36,7 @@ -define(MQTT_HOST_OPTS, #{default_port => 1883}). -namespace() -> "connector-mqtt". +namespace() -> "connector_mqtt". roots() -> fields("config"). diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index 7ff06b0ef..0925141de 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -28,7 +28,7 @@ -export([remove/2, remove/3]). -export([tombstone/2]). -export([reset/2, reset/3]). --export([dump_schema/2]). +-export([dump_schema/2, reformat_schema_dump/1]). -export([schema_module/0]). %% TODO: move to emqx_dashboard when we stop building api schema at build time @@ -180,9 +180,263 @@ gen_schema_json(Dir, SchemaModule, Lang) -> include_importance_up_from => IncludeImportance, desc_resolver => make_desc_resolver(Lang) }, - JsonMap = hocon_schema_json:gen(SchemaModule, Opts), - IoData = emqx_utils_json:encode(JsonMap, [pretty, force_utf8]), - ok = file:write_file(SchemaJsonFile, IoData). + StructsJsonArray = hocon_schema_json:gen(SchemaModule, Opts), + IoData = emqx_utils_json:encode(StructsJsonArray, [pretty, force_utf8]), + ok = file:write_file(SchemaJsonFile, IoData), + ok = gen_preformat_md_json_files(Dir, StructsJsonArray, Lang). + +gen_preformat_md_json_files(Dir, StructsJsonArray, Lang) -> + NestedStruct = reformat_schema_dump(StructsJsonArray), + %% write to files + NestedJsonFile = filename:join([Dir, "schmea-v2-" ++ Lang ++ ".json"]), + io:format(user, "===< Generating: ~s~n", [NestedJsonFile]), + ok = file:write_file( + NestedJsonFile, emqx_utils_json:encode(NestedStruct, [pretty, force_utf8]) + ), + ok. + +%% @doc This function is exported for scripts/schema-dump-reformat.escript +reformat_schema_dump(StructsJsonArray0) -> + %% prepare + StructsJsonArray = deduplicate_by_full_name(StructsJsonArray0), + #{fields := RootFields} = hd(StructsJsonArray), + RootNames0 = lists:map(fun(#{name := RootName}) -> RootName end, RootFields), + RootNames = lists:map(fun to_bin/1, RootNames0), + %% reformat + [Root | FlatStructs0] = lists:map( + fun(Struct) -> gen_flat_doc(RootNames, Struct) end, StructsJsonArray + ), + FlatStructs = [Root#{text => <<"root">>, hash => <<"root">>} | FlatStructs0], + gen_nested_doc(FlatStructs). + +deduplicate_by_full_name(Structs) -> + deduplicate_by_full_name(Structs, #{}, []). + +deduplicate_by_full_name([], _Seen, Acc) -> + lists:reverse(Acc); +deduplicate_by_full_name([#{full_name := FullName} = H | T], Seen, Acc) -> + case maps:get(FullName, Seen, false) of + false -> + deduplicate_by_full_name(T, Seen#{FullName => H}, [H | Acc]); + H -> + %% Name clash, but identical, ignore + deduplicate_by_full_name(T, Seen, Acc); + _Different -> + %% ADD NAMESPACE! + throw({duplicate_full_name, FullName}) + end. + +%% Ggenerate nested docs from root struct. +%% Due to the fact that the same struct can be referenced by multiple fields, +%% we need to generate a unique nested doc for each reference. +%% The unique path to each type and is of the below format: +%% - A a path starts either with 'T-' or 'V-'. T stands for type, V stands for value. +%% - A path is a list of strings delimited by '-'. +%% - The letter S is used to separate struct name from field name. +%% - Field names are however NOT denoted by a leading 'F-'. +%% For example: +%% - T-root: the root struct; +%% - T-foo-S-footype: the struct named "footype" in the foo field of root struct; +%% - V-foo-S-footype-bar: the field named "bar" in the struct named "footype" in the foo field of root struct +gen_nested_doc(Structs) -> + KeyByFullName = lists:foldl( + fun(#{hash := FullName} = Struct, Acc) -> + maps:put(FullName, Struct, Acc) + end, + #{}, + Structs + ), + FindFn = fun(Hash) -> maps:get(Hash, KeyByFullName) end, + gen_nested_doc(hd(Structs), FindFn, []). + +gen_nested_doc(#{fields := Fields} = Struct, FindFn, Path) -> + TypeAnchor = make_type_anchor(Path), + ValueAnchor = fun(FieldName) -> make_value_anchor(Path, FieldName) end, + NewFields = lists:map( + fun(#{text := Name} = Field) -> + NewField = expand_field(Field, FindFn, Path), + NewField#{hash => ValueAnchor(Name)} + end, + Fields + ), + Struct#{ + fields => NewFields, + hash => TypeAnchor + }. + +%% Make anchor for type. +%% Start with "T-" to distinguish from value anchor. +make_type_anchor([]) -> + <<"T-root">>; +make_type_anchor(Path) -> + to_bin(["T-", lists:join("-", lists:reverse(Path))]). + +%% Value anchor is used to link to the field's struct. +%% Start with "V-" to distinguish from type anchor. +make_value_anchor(Path, FieldName) -> + to_bin(["V-", join_path_hash(Path, FieldName)]). + +%% Make a globally unique "hash" (the http anchor) for each struct field. +join_path_hash([], Name) -> + Name; +join_path_hash(Path, Name) -> + to_bin(lists:join("-", lists:reverse([Name | Path]))). + +%% Expand field's struct reference to nested doc. +expand_field(#{text := Name, refs := References} = Field, FindFn, Path) -> + %% Add struct type name in path to make it unique. + NewReferences = lists:map( + fun(#{text := StructName} = Ref) -> + expand_ref(Ref, FindFn, [StructName, "S", Name | Path]) + end, + References + ), + Field#{refs => NewReferences}; +expand_field(Field, _FindFn, _Path) -> + %% No reference, no need to expand. + Field. + +expand_ref(#{hash := FullName}, FindFn, Path) -> + Struct = FindFn(FullName), + gen_nested_doc(Struct, FindFn, Path). + +%% generate flat docs for each struct. +%% using references to link to other structs. +gen_flat_doc(RootNames, #{full_name := FullName, fields := Fields} = S) -> + ShortName = short_name(FullName), + case is_missing_namespace(ShortName, to_bin(FullName), RootNames) of + true -> + io:format(standard_error, "WARN: no_namespace_for: ~s~n", [FullName]); + false -> + ok + end, + #{ + text => short_name(FullName), + hash => format_hash(FullName), + doc => maps:get(desc, S, <<"">>), + fields => format_fields(Fields) + }. + +format_fields([]) -> + []; +format_fields([Field | Fields]) -> + [format_field(Field) | format_fields(Fields)]. + +format_field(#{name := Name, aliases := Aliases, type := Type} = F) -> + L = [ + {text, Name}, + {type, format_type(Type)}, + {refs, format_refs(Type)}, + {aliases, + case Aliases of + [] -> undefined; + _ -> Aliases + end}, + {default, maps:get(hocon, maps:get(default, F, #{}), undefined)}, + {doc, maps:get(desc, F, undefined)} + ], + maps:from_list([{K, V} || {K, V} <- L, V =/= undefined]). + +format_refs(Type) -> + References = find_refs(Type), + case lists:map(fun format_ref/1, References) of + [] -> undefined; + L -> L + end. + +format_ref(FullName) -> + #{text => short_name(FullName), hash => format_hash(FullName)}. + +find_refs(Type) -> + lists:reverse(find_refs(Type, [])). + +%% go deep into union, array, and map to find references +find_refs(#{kind := union, members := Members}, Acc) -> + lists:foldl(fun find_refs/2, Acc, Members); +find_refs(#{kind := array, elements := Elements}, Acc) -> + find_refs(Elements, Acc); +find_refs(#{kind := map, values := Values}, Acc) -> + find_refs(Values, Acc); +find_refs(#{kind := struct, name := FullName}, Acc) -> + [FullName | Acc]; +find_refs(_, Acc) -> + Acc. + +format_type(#{kind := primitive, name := Name}) -> + format_primitive_type(Name); +format_type(#{kind := singleton, name := Name}) -> + to_bin(["String(\"", to_bin(Name), "\")"]); +format_type(#{kind := enum, symbols := Symbols}) -> + CommaSep = lists:join(",", lists:map(fun(S) -> to_bin(S) end, Symbols)), + to_bin(["Enum(", CommaSep, ")"]); +format_type(#{kind := array, elements := ElementsType}) -> + to_bin(["Array(", format_type(ElementsType), ")"]); +format_type(#{kind := union, members := MemberTypes} = U) -> + DN = maps:get(display_name, U, undefined), + case DN of + undefined -> + to_bin(["OneOf(", format_union_members(MemberTypes), ")"]); + Name -> + format_primitive_type(Name) + end; +format_type(#{kind := struct, name := FullName}) -> + to_bin(["Struct(", short_name(FullName), ")"]); +format_type(#{kind := map, name := Name, values := ValuesType}) -> + to_bin(["Map($", Name, "->", format_type(ValuesType), ")"]). + +format_union_members(Members) -> + format_union_members(Members, []). + +format_union_members([], Acc) -> + lists:join(",", lists:reverse(Acc)); +format_union_members([Member | Members], Acc) -> + NewAcc = [format_type(Member) | Acc], + format_union_members(Members, NewAcc). + +format_primitive_type(TypeStr) -> + Spec = emqx_conf_schema_types:readable_docgen(?MODULE, TypeStr), + to_bin(maps:get(type, Spec)). + +%% All types should have a namespace to avlid name clashing. +is_missing_namespace(ShortName, FullName, RootNames) -> + case lists:member(ShortName, RootNames) of + true -> + false; + false -> + ShortName =:= FullName + end. + +%% Returns short name from full name, fullname delemited by colon(:). +short_name(FullName) -> + case string:split(FullName, ":") of + [_, Name] -> to_bin(Name); + _ -> to_bin(FullName) + end. + +%% Returns the hash-anchor from full name, fullname delemited by colon(:). +format_hash(FullName) -> + case string:split(FullName, ":") of + [Namespace, Name] -> + ok = warn_bad_namespace(Namespace), + iolist_to_binary([Namespace, "__", Name]); + _ -> + iolist_to_binary(FullName) + end. + +%% namespace should only have letters, numbers, and underscores. +warn_bad_namespace(Namespace) -> + case re:run(Namespace, "^[a-zA-Z0-9_]+$", [{capture, none}]) of + nomatch -> + case erlang:get({bad_namespace, Namespace}) of + true -> + ok; + _ -> + erlang:put({bad_namespace, Namespace}, true), + io:format(standard_error, "WARN: bad_namespace: ~s~n", [Namespace]) + end; + _ -> + ok + end. %% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. hotconf_schema_json() -> @@ -306,12 +560,7 @@ hocon_schema_to_spec(Atom, _LocalModule) when is_atom(Atom) -> typename_to_spec(TypeStr, Module) -> emqx_conf_schema_types:readable_dashboard(Module, TypeStr). -to_bin(List) when is_list(List) -> - case io_lib:printable_list(List) of - true -> unicode:characters_to_binary(List); - false -> List - end; +to_bin(List) when is_list(List) -> iolist_to_binary(List); to_bin(Boolean) when is_boolean(Boolean) -> Boolean; to_bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8); -to_bin(X) -> - X. +to_bin(X) -> X. diff --git a/apps/emqx_psk/src/emqx_psk.app.src b/apps/emqx_psk/src/emqx_psk.app.src index be24112e4..abd862613 100644 --- a/apps/emqx_psk/src/emqx_psk.app.src +++ b/apps/emqx_psk/src/emqx_psk.app.src @@ -2,7 +2,7 @@ {application, emqx_psk, [ {description, "EMQX PSK"}, % strict semver, bump manually! - {vsn, "5.0.4"}, + {vsn, "5.0.5"}, {modules, []}, {registered, [emqx_psk_sup]}, {applications, [kernel, stdlib]}, diff --git a/apps/emqx_psk/src/emqx_psk_schema.erl b/apps/emqx_psk/src/emqx_psk_schema.erl index e6c922c1e..0a6e5d298 100644 --- a/apps/emqx_psk/src/emqx_psk_schema.erl +++ b/apps/emqx_psk/src/emqx_psk_schema.erl @@ -28,7 +28,7 @@ fields/1 ]). -namespace() -> "authn-psk". +namespace() -> "psk". roots() -> ["psk_authentication"]. From b643741920fcad8eb3a87ec9542950c8cc12d43c Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Tue, 21 Nov 2023 20:30:21 +0100 Subject: [PATCH 060/101] feat: add a escript to help re-format older version schema dumps --- scripts/schema-dump-reformat.escript | 132 +++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100755 scripts/schema-dump-reformat.escript diff --git a/scripts/schema-dump-reformat.escript b/scripts/schema-dump-reformat.escript new file mode 100755 index 000000000..31cfdd7d9 --- /dev/null +++ b/scripts/schema-dump-reformat.escript @@ -0,0 +1,132 @@ +#!/usr/bin/env escript + +%% This script translates the hocon_schema_json's schema dump to a new format. +%% It is used to convert older version EMQX's schema dumps to the new format +%% after all files are upgraded to the new format, this script can be removed. + +-mode(compile). + +main([Input]) -> + ok = add_libs(), + _ = atoms(), + {ok, Data} = file:read_file(Input), + Json = jsx:decode(Data), + NewJson = reformat(Json), + io:format("~s~n", [jsx:encode(NewJson)]); +main(_) -> + io:format("Usage: schema-dump-reformat.escript ~n"), + halt(1). + +reformat(Json) -> + emqx_conf:reformat_schema_dump(fix(Json)). + +%% fix old type specs to make them compatible with new type specs +fix(#{ + <<"kind">> := <<"union">>, + <<"members">> := [#{<<"name">> := <<"string()">>}, #{<<"name">> := <<"function()">>}] +}) -> + %% s3_exporter.secret_access_key + #{ + kind => primitive, + name => <<"string()">> + }; +fix(#{<<"kind">> := <<"primitive">>, <<"name">> := <<"emqx_conf_schema:log_level()">>}) -> + #{ + kind => enum, + symbols => [emergency, alert, critical, error, warning, notice, info, debug, none, all] + }; +fix(#{<<"kind">> := <<"primitive">>, <<"name">> := <<"emqx_connector_http:pool_type()">>}) -> + #{kind => enum, symbols => [random, hash]}; +fix(#{<<"kind">> := <<"primitive">>, <<"name">> := <<"emqx_bridge_http_connector:pool_type()">>}) -> + #{kind => enum, symbols => [random, hash]}; +fix(Map) when is_map(Map) -> + maps:from_list(fix(maps:to_list(Map))); +fix(List) when is_list(List) -> + lists:map(fun fix/1, List); +fix({<<"kind">>, Kind}) -> + {kind, binary_to_atom(Kind, utf8)}; +fix({<<"name">>, Type}) -> + {name, fix_type(Type)}; +fix({K, V}) -> + {binary_to_atom(K, utf8), fix(V)}; +fix(V) when is_number(V) -> + V; +fix(V) when is_atom(V) -> + V; +fix(V) when is_binary(V) -> + V. + +%% ensure below ebin dirs are added to code path: +%% _build/default/lib/*/ebin +%% _build/emqx/lib/*/ebin +%% _build/emqx-enterprise/lib/*/ebin +add_libs() -> + Profile = os:getenv("PROFILE"), + case Profile of + "emqx" -> + ok; + "emqx-enterprise" -> + ok; + _ -> + io:format("PROFILE is not set~n"), + halt(1) + end, + Dirs = + filelib:wildcard("_build/default/lib/*/ebin") ++ + filelib:wildcard("_build/" ++ Profile ++ "/lib/*/ebin"), + lists:foreach(fun add_lib/1, Dirs). + +add_lib(Dir) -> + code:add_patha(Dir), + Beams = filelib:wildcard(Dir ++ "/*.beam"), + _ = spawn(fun() -> lists:foreach(fun load_beam/1, Beams) end), + ok. + +load_beam(BeamFile) -> + ModuleName = filename:basename(BeamFile, ".beam"), + Module = list_to_atom(ModuleName), + %% load the beams to make sure the atoms are existing + code:ensure_loaded(Module), + ok. + +fix_type(<<"[{string(), string()}]">>) -> + <<"map()">>; +fix_type(<<"[{binary(), binary()}]">>) -> + <<"map()">>; +fix_type(<<"emqx_limiter_schema:rate()">>) -> + <<"string()">>; +fix_type(<<"emqx_limiter_schema:burst_rate()">>) -> + <<"string()">>; +fix_type(<<"emqx_limiter_schema:capacity()">>) -> + <<"string()">>; +fix_type(<<"emqx_limiter_schema:initial()">>) -> + <<"string()">>; +fix_type(<<"emqx_limiter_schema:failure_strategy()">>) -> + <<"string()">>; +fix_type(<<"emqx_conf_schema:file()">>) -> + <<"string()">>; +fix_type(<<"#{term() => binary()}">>) -> + <<"map()">>; +fix_type(<<"[term()]">>) -> + %% jwt claims + <<"map()">>; +fix_type(<<"emqx_ee_bridge_influxdb:write_syntax()">>) -> + <<"string()">>; +fix_type(<<"emqx_bridge_influxdb:write_syntax()">>) -> + <<"string()">>; +fix_type(<<"emqx_schema:mqtt_max_packet_size()">>) -> + <<"non_neg_integer()">>; +fix_type(<<"emqx_s3_schema:secret_access_key()">>) -> + <<"string()">>; +fix_type(Type) -> + Type. + +%% ensure atoms are loaded +%% these atoms are from older version of emqx +atoms() -> + [ + emqx_ee_connector_clickhouse, + emqx_ee_bridge_gcp_pubsub, + emqx_ee_bridge_influxdb, + emqx_connector_http + ]. From 38d3a1d7d0254b3bc90650def178b22600438bbe Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 22 Nov 2023 09:25:28 -0300 Subject: [PATCH 061/101] feat(actions): allow multiple action info modules per application --- apps/emqx_bridge/src/emqx_action_info.erl | 6 +++--- .../src/emqx_bridge_azure_event_hub.app.src | 2 +- apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index b236558e1..7c246a797 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -211,9 +211,9 @@ action_info_modules() -> lists:usort(lists:flatten(ActionInfoModules) ++ hard_coded_action_info_modules()). action_info_modules(App) -> - case application:get_env(App, emqx_action_info_module) of - {ok, Module} -> - [Module]; + case application:get_env(App, emqx_action_info_modules) of + {ok, Modules} -> + Modules; _ -> [] end. diff --git a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src index 40ea79334..f1c097d29 100644 --- a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src +++ b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src @@ -9,7 +9,7 @@ telemetry, wolff ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_azure_event_hub_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src index 00b9d8968..da8df2ddc 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src @@ -12,7 +12,7 @@ brod, brod_gssapi ]}, - {env, [{emqx_action_info_module, emqx_bridge_kafka_action_info}]}, + {env, [{emqx_action_info_modules, [emqx_bridge_kafka_action_info]}]}, {modules, []}, {links, []} From bb549cdf8b1ec3be33ec6fa96227255f9f40f2ae Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 22 Nov 2023 09:29:42 -0300 Subject: [PATCH 062/101] feat(actions): allow multiple action info modules per application --- apps/emqx_bridge/src/emqx_action_info.erl | 6 +++--- .../src/emqx_bridge_azure_event_hub.app.src | 2 +- .../emqx_bridge_confluent/src/emqx_bridge_confluent.app.src | 2 +- .../src/emqx_bridge_gcp_pubsub.app.src | 2 +- apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index 34d624af4..44f871d53 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -225,9 +225,9 @@ action_info_modules() -> lists:usort(lists:flatten(ActionInfoModules) ++ hard_coded_action_info_modules()). action_info_modules(App) -> - case application:get_env(App, emqx_action_info_module) of - {ok, Module} -> - [Module]; + case application:get_env(App, emqx_action_info_modules) of + {ok, Modules} -> + Modules; _ -> [] end. diff --git a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src index 40ea79334..f1c097d29 100644 --- a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src +++ b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src @@ -9,7 +9,7 @@ telemetry, wolff ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_azure_event_hub_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_confluent/src/emqx_bridge_confluent.app.src b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent.app.src index 3c096ad14..64d1dec09 100644 --- a/apps/emqx_bridge_confluent/src/emqx_bridge_confluent.app.src +++ b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent.app.src @@ -9,7 +9,7 @@ telemetry, wolff ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_confluent_producer_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src index d4c16e13c..6e2c93d20 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src @@ -8,7 +8,7 @@ emqx_resource, ehttpc ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_gcp_pubsub_producer_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src index 00b9d8968..da8df2ddc 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src @@ -12,7 +12,7 @@ brod, brod_gssapi ]}, - {env, [{emqx_action_info_module, emqx_bridge_kafka_action_info}]}, + {env, [{emqx_action_info_modules, [emqx_bridge_kafka_action_info]}]}, {modules, []}, {links, []} From fc849f0c05325c8d289dc53aed128efad248e93e Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 22 Nov 2023 12:36:10 -0300 Subject: [PATCH 063/101] ci(test): add info to help diagnose flaky test --- .../test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl index be6a306e0..5e1e885db 100644 --- a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl @@ -1494,10 +1494,11 @@ t_pull_worker_death(Config) -> ok. t_async_worker_death_mid_pull(Config) -> - ct:timetrap({seconds, 120}), + ct:timetrap({seconds, 122}), [#{pubsub_topic := PubSubTopic}] = ?config(topic_mapping, Config), Payload = emqx_guid:to_hexstr(emqx_guid:gen()), ?check_trace( + #{timetrap => 120_000}, begin start_and_subscribe_mqtt(Config), @@ -1513,18 +1514,22 @@ t_async_worker_death_mid_pull(Config) -> #{?snk_kind := gcp_pubsub_consumer_worker_reply_delegator} ), spawn_link(fun() -> + ct:pal("will kill async worker"), ?tp_span( kill_async_worker, #{}, begin %% produce a message while worker is being killed Messages = [#{<<"data">> => Payload}], + ct:pal("publishing message"), pubsub_publish(Config, PubSubTopic, Messages), + ct:pal("published message"), AsyncWorkerPids = get_async_worker_pids(Config), emqx_utils:pmap( fun(AsyncWorkerPid) -> Ref = monitor(process, AsyncWorkerPid), + ct:pal("killing pid ~p", [AsyncWorkerPid]), sys:terminate(AsyncWorkerPid, die), receive {'DOWN', Ref, process, AsyncWorkerPid, _} -> @@ -1538,7 +1543,8 @@ t_async_worker_death_mid_pull(Config) -> ok end - ) + ), + ct:pal("killed async worker") end), ?assertMatch( From d9f964a44f9bc1e3116584e84d894c83fc242ebc Mon Sep 17 00:00:00 2001 From: "Zaiming (Stone) Shi" Date: Wed, 22 Nov 2023 16:58:05 +0100 Subject: [PATCH 064/101] test: fix test cases after schema type namespace change --- apps/emqx_auth/test/emqx_authn/emqx_authn_schema_SUITE.erl | 2 +- apps/emqx_retainer/test/emqx_retainer_SUITE.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/emqx_auth/test/emqx_authn/emqx_authn_schema_SUITE.erl b/apps/emqx_auth/test/emqx_authn/emqx_authn_schema_SUITE.erl index 23532b4af..f2688fff9 100644 --- a/apps/emqx_auth/test/emqx_authn/emqx_authn_schema_SUITE.erl +++ b/apps/emqx_auth/test/emqx_authn/emqx_authn_schema_SUITE.erl @@ -54,7 +54,7 @@ t_check_schema(_Config) -> ?assertThrow( #{ path := "authentication.1.password_hash_algorithm.name", - matched_type := "authn:builtin_db/authn-hash:simple", + matched_type := "authn:builtin_db/authn_hash:simple", reason := unable_to_convert_to_enum_symbol }, Check(ConfigNotOk) diff --git a/apps/emqx_retainer/test/emqx_retainer_SUITE.erl b/apps/emqx_retainer/test/emqx_retainer_SUITE.erl index d75e2ca07..595f37fff 100644 --- a/apps/emqx_retainer/test/emqx_retainer_SUITE.erl +++ b/apps/emqx_retainer/test/emqx_retainer_SUITE.erl @@ -555,7 +555,7 @@ t_page_read(_) -> ok = emqtt:disconnect(C1). t_only_for_coverage(_) -> - ?assertEqual("retainer", emqx_retainer_schema:namespace()), + ?assertEqual(retainer, emqx_retainer_schema:namespace()), ignored = gen_server:call(emqx_retainer, unexpected), ok = gen_server:cast(emqx_retainer, unexpected), unexpected = erlang:send(erlang:whereis(emqx_retainer), unexpected), From db83457d13a50c62f1b2465caa8c57ff4dff75a4 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 22 Nov 2023 16:02:23 -0300 Subject: [PATCH 065/101] test: fix flaky test The cause was that the call `sys:terminate/2` was timing out... `exit/2` doens't always work: ``` 2023-11-22 19:14:40.974 killed async workers Error: -22T19:14:40.974563+00:00 [error] crasher: initial call: gun:proc_lib_hack/5, pid: <0.15908.7>, registered_name: [], exit: {{{owner_gone,killed},[{gun,owner_gone,1,[{file,"gun.erl"},{line,970}]},{gun,proc_lib_hack,5,[{file,"gun.erl"},{line,649}]},{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]},[{gun,proc_lib_hack,5,[{file,"gun.erl"},{line,654}]},{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]}, ancestors: [gun_sup,<0.15387.7>], message_queue_len: 0, messages: [], links: [<0.15388.7>], dictionary: [], trap_exit: false, status: running, heap_size: 987, stack_size: 28, reductions: 1822; neighbours: Error: -22T19:14:40.998051+00:00 [error] Supervisor: {local,gun_sup}. Context: child_terminated. Reason: {{owner_gone,killed},[{gun,owner_gone,1,[{file,"gun.erl"},{line,970}]},{gun,proc_lib_hack,5,[{file,"gun.erl"},{line,649}]},{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]}. Offender: id=gun,pid=<0.15908.7>. 2023-11-22T19:15:41.088752+00:00 [critical] Run stage failed: error:{badmatch,{timeout,#{expected_remaining => 1,mailbox => {messages,[]},msgs_so_far => []}}}, Stacktrace: [{emqx_bridge_gcp_pubsub_consumer_SUITE,'-t_async_worker_death_mid_pull/1-fun-17-',3,[{file,"/emqx/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl"},{line,1576}]},{emqx_bridge_gcp_pubsub_consumer_SUITE,t_async_worker_death_mid_pull,1,[{file,"/emqx/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl"},{line,1505}]}], Trace dump: "/emqx/_build/test/logs/ct_run.test@127.0.0.1.2023-11-22_19.14.27/snabbkaffe/1700680540975786370.log", mfa: undefined Error: -22T19:15:46.095702+00:00 [error] crasher: initial call: gun:proc_lib_hack/5, pid: <0.15934.7>, registered_name: [], exit: {{{owner_gone,killed},[{gun,owner_gone,1,[{file,"gun.erl"},{line,970}]},{gun,proc_lib_hack,5,[{file,"gun.erl"},{line,649}]},{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]},[{gun,proc_lib_hack,5,[{file,"gun.erl"},{line,654}]},{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]}, ancestors: [gun_sup,<0.15387.7>], message_queue_len: 0, messages: [], links: [<0.15388.7>], dictionary: [], trap_exit: false, status: running, heap_size: 610, stack_size: 28, reductions: 1471; neighbours: Error: -22T19:15:46.095192+00:00 [error] Supervisor: {local,ehttpc_sup}. Context: shutdown_error. Reason: killed. Offender: id={ehttpc_pool_sup,<<98,114,105,100,103,101,58,103,99,112,95,112,117,98,115,117,98,95,99,111,110,115,117,109,101,114,58,116,95,97,115,121,110,99,95,119,111,114,107,101,114,95,100,101,97,116,104,95,109,105,100,95,112,117,108,108,45,53,55,54,52,54,48,55,53,50,51,48,51,52,50,50,55,53,49>>},pid=<0.15903.7>. Error: -22T19:15:46.095470+00:00 [error] Supervisor: {<0.15906.7>,ehttpc_worker_sup}. Context: shutdown_error. Reason: killed. Offender: id={worker,1},pid=<0.15924.7>. Error: -22T19:15:46.096762+00:00 [error] Supervisor: {local,gun_sup}. Context: child_terminated. Reason: {{owner_gone,killed},[{gun,owner_gone,1,[{file,"gun.erl"},{line,970}]},{gun,proc_lib_hack,5,[{file,"gun.erl"},{line,649}]},{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]}. Offender: id=gun,pid=<0.15934.7>. Warning: 2T19:15:46.098278+00:00 [warning] msg: remove_local_resource_failed, mfa: emqx_resource:remove_local/1(362), error: {error,timeout}, resource_id: <<"bridge:gcp_pubsub_consumer:t_async_worker_death_mid_pull-576460752303422751">> Error: -22T19:15:46.149090+00:00 [error] Generic server <0.15904.7> terminating. Reason: killed. Last message: {'EXIT',<0.15903.7>,killed}. State: {state,<<"bridge:gcp_pubsub_consumer:t_async_worker_death_mid_pull-576460752303422751">>,1,random}. Error: -22T19:15:46.149525+00:00 [error] crasher: initial call: ehttpc_pool:init/1, pid: <0.15904.7>, registered_name: [], exit: {killed,[{gen_server,decode_msg,9,[{file,"gen_server.erl"},{line,909}]},{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,240}]}]}, ancestors: [<0.15903.7>,ehttpc_sup,<0.15731.7>], message_queue_len: 0, messages: [], links: [], dictionary: [], trap_exit: true, status: running, heap_size: 376, stack_size: 28, reductions: 3428; neighbours: ``` --- .../emqx_bridge_gcp_pubsub_consumer_SUITE.erl | 30 +++++++++++-------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl index 5e1e885db..b0e4e4ac8 100644 --- a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl @@ -34,16 +34,22 @@ init_per_suite(Config) -> emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), case emqx_common_test_helpers:is_tcp_server_available(GCPEmulatorHost, GCPEmulatorPort) of true -> - ok = emqx_common_test_helpers:start_apps([emqx_conf]), - ok = emqx_connector_test_helpers:start_apps([ - emqx_resource, emqx_bridge, emqx_rule_engine - ]), - {ok, _} = application:ensure_all_started(emqx_connector), + Apps = emqx_cth_suite:start( + [ + emqx, + emqx_conf, + emqx_bridge_gcp_pubsub, + emqx_bridge, + emqx_rule_engine + ], + #{work_dir => emqx_cth_suite:work_dir(Config)} + ), emqx_mgmt_api_test_util:init_suite(), HostPort = GCPEmulatorHost ++ ":" ++ GCPEmulatorPortStr, true = os:putenv("PUBSUB_EMULATOR_HOST", HostPort), Client = start_control_client(), [ + {apps, Apps}, {proxy_name, ProxyName}, {proxy_host, ProxyHost}, {proxy_port, ProxyPort}, @@ -62,12 +68,11 @@ init_per_suite(Config) -> end. end_per_suite(Config) -> + Apps = ?config(apps, Config), Client = ?config(client, Config), stop_control_client(Client), emqx_mgmt_api_test_util:end_suite(), - ok = emqx_common_test_helpers:stop_apps([emqx_conf]), - ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]), - _ = application:stop(emqx_connector), + emqx_cth_suite:stop(Apps), os:unsetenv("PUBSUB_EMULATOR_HOST"), ok. @@ -1514,7 +1519,7 @@ t_async_worker_death_mid_pull(Config) -> #{?snk_kind := gcp_pubsub_consumer_worker_reply_delegator} ), spawn_link(fun() -> - ct:pal("will kill async worker"), + ct:pal("will kill async workers"), ?tp_span( kill_async_worker, #{}, @@ -1530,11 +1535,12 @@ t_async_worker_death_mid_pull(Config) -> fun(AsyncWorkerPid) -> Ref = monitor(process, AsyncWorkerPid), ct:pal("killing pid ~p", [AsyncWorkerPid]), - sys:terminate(AsyncWorkerPid, die), + sys:terminate(AsyncWorkerPid, die, 20_000), receive {'DOWN', Ref, process, AsyncWorkerPid, _} -> + ct:pal("killed pid ~p", [AsyncWorkerPid]), ok - after 500 -> ct:fail("async worker didn't die") + after 500 -> ct:fail("async worker ~p didn't die", [AsyncWorkerPid]) end, ok end, @@ -1544,7 +1550,7 @@ t_async_worker_death_mid_pull(Config) -> ok end ), - ct:pal("killed async worker") + ct:pal("killed async workers") end), ?assertMatch( From f3693e5dbc7afd1c46e35f8deb99c96ad69f7939 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 22 Nov 2023 16:23:28 -0300 Subject: [PATCH 066/101] fix(gcp_pubsub_producer): add missing references to api specs --- .../src/emqx_bridge_gcp_pubsub_producer_schema.erl | 13 +++++++++++-- .../src/schema/emqx_connector_ee_schema.erl | 6 ++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_schema.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_schema.erl index 11ca16e0b..0ee625824 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_schema.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_schema.erl @@ -71,14 +71,23 @@ fields("config_connector") -> emqx_bridge_gcp_pubsub:fields(connector_config) ++ emqx_resource_schema:fields("resource_opts"); %%========================================= -%% HTTP API fields +%% HTTP API fields: action %%========================================= fields("get_bridge_v2") -> emqx_bridge_schema:status_fields() ++ fields("post_bridge_v2"); fields("post_bridge_v2") -> [type_field(), name_field() | fields("put_bridge_v2")]; fields("put_bridge_v2") -> - fields(producer_action). + fields(producer_action); +%%========================================= +%% HTTP API fields: connector +%%========================================= +fields("get_connector") -> + emqx_bridge_schema:status_fields() ++ fields("post_connector"); +fields("post_connector") -> + [type_field(), name_field() | fields("put_connector")]; +fields("put_connector") -> + fields("config_connector"). desc("config_connector") -> ?DESC("config_connector"); diff --git a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl index 27b068461..6c303dd7e 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl @@ -117,6 +117,7 @@ schema_modules() -> [ emqx_bridge_azure_event_hub, emqx_bridge_confluent_producer, + emqx_bridge_gcp_pubsub_producer_schema, emqx_bridge_kafka, emqx_bridge_syskeeper_connector, emqx_bridge_syskeeper_proxy @@ -133,6 +134,11 @@ api_schemas(Method) -> emqx_bridge_confluent_producer, <<"confluent_producer">>, Method ++ "_connector" ), api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_connector"), + api_ref( + emqx_bridge_gcp_pubsub_producer_schema, + <<"gcp_pubsub_producer">>, + Method ++ "_connector" + ), api_ref(emqx_bridge_syskeeper_connector, <<"syskeeper_forwarder">>, Method), api_ref(emqx_bridge_syskeeper_proxy, <<"syskeeper_proxy">>, Method) ]. From c89ec0b1f7a9897170bef95dfb998f6cadf4394b Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 23 Nov 2023 10:25:36 -0300 Subject: [PATCH 067/101] fix(bridge_api): don't mangle configs, use correct type as argument Fixes https://emqx.atlassian.net/browse/EMQX-11412 - The wrong type was being used in a list lookup function, resulting in the automatic transformation being called erroneously and mangling the config. - There was a left-over workaround still around which could still mangle the config. --- apps/emqx_bridge/src/emqx_bridge.erl | 1 - apps/emqx_bridge/src/emqx_bridge_api.erl | 18 +---------- apps/emqx_bridge/src/emqx_bridge_v2.erl | 30 +++++++++---------- apps/emqx_bridge/test/emqx_bridge_testlib.erl | 16 ++++++++++ .../test/emqx_bridge_v2_testlib.erl | 1 + .../emqx_bridge_azure_event_hub_v2_SUITE.erl | 9 ++++++ .../emqx_bridge_v2_kafka_producer_SUITE.erl | 14 +++++++++ 7 files changed, 56 insertions(+), 33 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index 64bec3a4e..569c1e75a 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -313,7 +313,6 @@ list() -> BridgeV2Bridges = emqx_bridge_v2:bridge_v1_list_and_transform(), BridgeV1Bridges ++ BridgeV2Bridges. -%%BridgeV2Bridges = emqx_bridge_v2:list(). lookup(Id) -> {Type, Name} = emqx_bridge_resource:parse_bridge_id(Id), diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index d263817bf..188f26ab5 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -900,7 +900,7 @@ format_resource( case emqx_bridge_v2:is_bridge_v2_type(Type) of true -> %% The defaults are already filled in - downgrade_raw_conf(Type, RawConf); + RawConf; false -> fill_defaults(Type, RawConf) end, @@ -1164,19 +1164,3 @@ upgrade_type(Type) -> downgrade_type(Type) -> emqx_bridge_lib:downgrade_type(Type). - -%% TODO: move it to callback -downgrade_raw_conf(kafka_producer, RawConf) -> - rename(<<"parameters">>, <<"kafka">>, RawConf); -downgrade_raw_conf(azure_event_hub_producer, RawConf) -> - rename(<<"parameters">>, <<"kafka">>, RawConf); -downgrade_raw_conf(_Type, RawConf) -> - RawConf. - -rename(OldKey, NewKey, Map) -> - case maps:find(OldKey, Map) of - {ok, Value} -> - maps:remove(OldKey, maps:put(NewKey, Value, Map)); - error -> - Map - end. diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index 706849965..d9ca1acce 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -1063,17 +1063,17 @@ bridge_v1_list_and_transform() -> Bridges = list_with_lookup_fun(fun bridge_v1_lookup_and_transform/2), [B || B <- Bridges, B =/= not_bridge_v1_compatible_error()]. -bridge_v1_lookup_and_transform(BridgeV1Type, Name) -> +bridge_v1_lookup_and_transform(ActionType, Name) -> + BridgeV1Type = ?MODULE:bridge_v2_type_to_bridge_v1_type(ActionType), case ?MODULE:bridge_v1_is_valid(BridgeV1Type, Name) of true -> - Type = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type), - case lookup(Type, Name) of + case lookup(ActionType, Name) of {ok, #{raw_config := #{<<"connector">> := ConnectorName}} = BridgeV2} -> - ConnectorType = connector_type(Type), + ConnectorType = connector_type(ActionType), case emqx_connector:lookup(ConnectorType, ConnectorName) of {ok, Connector} -> bridge_v1_lookup_and_transform_helper( - BridgeV1Type, Name, Type, BridgeV2, ConnectorType, Connector + BridgeV1Type, Name, ActionType, BridgeV2, ConnectorType, Connector ); Error -> Error @@ -1089,7 +1089,7 @@ not_bridge_v1_compatible_error() -> {error, not_bridge_v1_compatible}. bridge_v1_lookup_and_transform_helper( - BridgeV1Type, BridgeName, BridgeV2Type, BridgeV2, ConnectorType, Connector + BridgeV1Type, BridgeName, ActionType, Action, ConnectorType, Connector ) -> ConnectorRawConfig1 = maps:get(raw_config, Connector), ConnectorRawConfig2 = fill_defaults( @@ -1098,10 +1098,10 @@ bridge_v1_lookup_and_transform_helper( <<"connectors">>, emqx_connector_schema ), - BridgeV2RawConfig1 = maps:get(raw_config, BridgeV2), - BridgeV2RawConfig2 = fill_defaults( - BridgeV2Type, - BridgeV2RawConfig1, + ActionRawConfig1 = maps:get(raw_config, Action), + ActionRawConfig2 = fill_defaults( + ActionType, + ActionRawConfig1, <<"actions">>, emqx_bridge_v2_schema ), @@ -1110,7 +1110,7 @@ bridge_v1_lookup_and_transform_helper( emqx_action_info:has_custom_connector_action_config_to_bridge_v1_config(BridgeV1Type) of false -> - BridgeV1Config1 = maps:remove(<<"connector">>, BridgeV2RawConfig2), + BridgeV1Config1 = maps:remove(<<"connector">>, ActionRawConfig2), %% Move parameters to the top level ParametersMap = maps:get(<<"parameters">>, BridgeV1Config1, #{}), BridgeV1Config2 = maps:remove(<<"parameters">>, BridgeV1Config1), @@ -1118,13 +1118,13 @@ bridge_v1_lookup_and_transform_helper( emqx_utils_maps:deep_merge(ConnectorRawConfig2, BridgeV1Config3); true -> emqx_action_info:connector_action_config_to_bridge_v1_config( - BridgeV1Type, ConnectorRawConfig2, BridgeV2RawConfig2 + BridgeV1Type, ConnectorRawConfig2, ActionRawConfig2 ) end, - BridgeV1Tmp = maps:put(raw_config, BridgeV1ConfigFinal, BridgeV2), + BridgeV1Tmp = maps:put(raw_config, BridgeV1ConfigFinal, Action), BridgeV1 = maps:remove(status, BridgeV1Tmp), - BridgeV2Status = maps:get(status, BridgeV2, undefined), - BridgeV2Error = maps:get(error, BridgeV2, undefined), + BridgeV2Status = maps:get(status, Action, undefined), + BridgeV2Error = maps:get(error, Action, undefined), ResourceData1 = maps:get(resource_data, BridgeV1, #{}), %% Replace id in resouce data BridgeV1Id = <<"bridge:", (bin(BridgeV1Type))/binary, ":", (bin(BridgeName))/binary>>, diff --git a/apps/emqx_bridge/test/emqx_bridge_testlib.erl b/apps/emqx_bridge/test/emqx_bridge_testlib.erl index df404d9b0..f486e5d64 100644 --- a/apps/emqx_bridge/test/emqx_bridge_testlib.erl +++ b/apps/emqx_bridge/test/emqx_bridge_testlib.erl @@ -120,6 +120,22 @@ create_bridge(Config, Overrides) -> ct:pal("creating bridge with config: ~p", [BridgeConfig]), emqx_bridge:create(BridgeType, BridgeName, BridgeConfig). +list_bridges_api() -> + Params = [], + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("listing bridges (via http)"), + Res = + case emqx_mgmt_api_test_util:request_api(get, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("list bridge result: ~p", [Res]), + Res. + create_bridge_api(Config) -> create_bridge_api(Config, _Overrides = #{}). diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl index 6c48f5663..5cb9b043f 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl @@ -139,6 +139,7 @@ create_bridge(Config, Overrides) -> ConnectorName = ?config(connector_name, Config), ConnectorType = ?config(connector_type, Config), ConnectorConfig = ?config(connector_config, Config), + ct:pal("creating connector with config: ~p", [ConnectorConfig]), {ok, _} = emqx_connector:create(ConnectorType, ConnectorName, ConnectorConfig), diff --git a/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl b/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl index 4d441ea0b..9661004d0 100644 --- a/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl +++ b/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl @@ -368,3 +368,12 @@ t_parameters_key_api_spec(_Config) -> ?assert(is_map_key(<<"parameters">>, ActionProps), #{action_props => ActionProps}), ok. + +t_http_api_get(Config) -> + ?assertMatch({ok, _}, emqx_bridge_v2_testlib:create_bridge(Config)), + %% v1 api; no mangling of configs; has `kafka' top level config key + ?assertMatch( + {ok, {{_, 200, _}, _, [#{<<"kafka">> := _}]}}, + emqx_bridge_testlib:list_bridges_api() + ), + ok. diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl index 8ce3b7f6b..2ad0504b4 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl @@ -369,3 +369,17 @@ t_parameters_key_api_spec(_Config) -> ?assert(is_map_key(<<"parameters">>, ActionProps), #{action_props => ActionProps}), ok. + +t_http_api_get(_Config) -> + ConnectorName = <<"test_connector">>, + ActionName = <<"test_action">>, + ActionConfig = bridge_v2_config(<<"test_connector">>), + ConnectorConfig = connector_config(), + ?assertMatch({ok, _}, create_connector(ConnectorName, ConnectorConfig)), + ?assertMatch({ok, _}, create_action(ActionName, ActionConfig)), + %% v1 api; no mangling of configs; has `kafka' top level config key + ?assertMatch( + {ok, {{_, 200, _}, _, [#{<<"kafka">> := _}]}}, + emqx_bridge_testlib:list_bridges_api() + ), + ok. From 29d42506d504554cfd5ef98e84af877598d4b237 Mon Sep 17 00:00:00 2001 From: Ilya Averyanov Date: Thu, 23 Nov 2023 17:16:11 +0300 Subject: [PATCH 068/101] fix(ds): impose more flexible types for ds implementations --- apps/emqx_durable_storage/src/emqx_ds.erl | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/apps/emqx_durable_storage/src/emqx_ds.erl b/apps/emqx_durable_storage/src/emqx_ds.erl index f5872ea1e..649341eb5 100644 --- a/apps/emqx_durable_storage/src/emqx_ds.erl +++ b/apps/emqx_durable_storage/src/emqx_ds.erl @@ -46,7 +46,10 @@ next_result/1, next_result/0, store_batch_result/0, make_iterator_result/1, make_iterator_result/0, - get_iterator_result/1 + get_iterator_result/1, + + ds_specific_stream/0, + ds_specific_iterator/0 ]). %%================================================================================ @@ -63,9 +66,13 @@ -type stream_rank() :: {term(), integer()}. --opaque stream() :: emqx_ds_replication_layer:stream(). +-opaque iterator() :: ds_specific_iterator(). --opaque iterator() :: emqx_ds_replication_layer:iterator(). +-opaque stream() :: ds_specific_stream(). + +-type ds_specific_iterator() :: term(). + +-type ds_specific_stream() :: term(). -type store_batch_result() :: ok | {error, _}. @@ -113,9 +120,10 @@ -callback store_batch(db(), [emqx_types:message()], message_store_opts()) -> store_batch_result(). --callback get_streams(db(), topic_filter(), time()) -> [{stream_rank(), stream()}]. +-callback get_streams(db(), topic_filter(), time()) -> [{stream_rank(), ds_specific_stream()}]. --callback make_iterator(db(), _Stream, topic_filter(), time()) -> make_iterator_result(_Iterator). +-callback make_iterator(db(), ds_specific_stream(), topic_filter(), time()) -> + make_iterator_result(ds_specific_iterator()). -callback next(db(), Iterator, pos_integer()) -> next_result(Iterator). From fdfa3213ccf8dc6a65990dd64b541d6e56fccba7 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 23 Nov 2023 10:25:36 -0300 Subject: [PATCH 069/101] fix(bridge_api): don't mangle configs, use correct type as argument Fixes https://emqx.atlassian.net/browse/EMQX-11412 - The wrong type was being used in a list lookup function, resulting in the automatic transformation being called erroneously and mangling the config. - There was a left-over workaround still around which could still mangle the config. --- apps/emqx_bridge/src/emqx_action_info.erl | 11 +++-- apps/emqx_bridge/src/emqx_bridge.erl | 1 - apps/emqx_bridge/src/emqx_bridge_v2.erl | 49 ++++++++++--------- apps/emqx_bridge/test/emqx_bridge_testlib.erl | 16 ++++++ .../test/emqx_bridge_v2_testlib.erl | 1 + .../emqx_bridge_azure_event_hub_v2_SUITE.erl | 9 ++++ .../emqx_bridge_v2_kafka_producer_SUITE.erl | 14 ++++++ 7 files changed, 74 insertions(+), 27 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index 44f871d53..12988b163 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -120,12 +120,15 @@ action_type_to_bridge_v1_type(ActionType, Conf) -> ActionInfoMap = info_map(), ActionTypeToBridgeV1Type = maps:get(action_type_to_bridge_v1_type, ActionInfoMap), case maps:get(ActionType, ActionTypeToBridgeV1Type, undefined) of - undefined -> ActionType; - BridgeV1TypeFun when is_function(BridgeV1TypeFun) -> BridgeV1TypeFun(get_confs(Conf)); - BridgeV1Type -> BridgeV1Type + undefined -> + ActionType; + BridgeV1TypeFun when is_function(BridgeV1TypeFun) -> + BridgeV1TypeFun(get_confs(ActionType, Conf)); + BridgeV1Type -> + BridgeV1Type end. -get_confs(#{connector := ConnectorName, type := ActionType} = ActionConfig) -> +get_confs(ActionType, #{<<"connector">> := ConnectorName} = ActionConfig) -> ConnectorType = action_type_to_connector_type(ActionType), ConnectorConfig = emqx_conf:get_raw([connectors, ConnectorType, ConnectorName]), {ActionConfig, ConnectorConfig}. diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index 44945e22f..0e116589b 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -314,7 +314,6 @@ list() -> BridgeV2Bridges = emqx_bridge_v2:bridge_v1_list_and_transform(), BridgeV1Bridges ++ BridgeV2Bridges. -%%BridgeV2Bridges = emqx_bridge_v2:list(). lookup(Id) -> {Type, Name} = emqx_bridge_resource:parse_bridge_id(Id), diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index e02783de4..6f296c63c 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -1063,33 +1063,38 @@ bridge_v1_list_and_transform() -> Bridges = list_with_lookup_fun(fun bridge_v1_lookup_and_transform/2), [B || B <- Bridges, B =/= not_bridge_v1_compatible_error()]. -bridge_v1_lookup_and_transform(BridgeV1Type, Name) -> - case ?MODULE:bridge_v1_is_valid(BridgeV1Type, Name) of - true -> - Type = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type), - case lookup(Type, Name) of - {ok, #{raw_config := #{<<"connector">> := ConnectorName}} = BridgeV2} -> - ConnectorType = connector_type(Type), +bridge_v1_lookup_and_transform(ActionType, Name) -> + case lookup(ActionType, Name) of + {ok, #{raw_config := #{<<"connector">> := ConnectorName}} = ActionConfig} -> + BridgeV1Type = ?MODULE:bridge_v2_type_to_bridge_v1_type(ActionType, ActionConfig), + case ?MODULE:bridge_v1_is_valid(BridgeV1Type, Name) of + true -> + ConnectorType = connector_type(ActionType), case emqx_connector:lookup(ConnectorType, ConnectorName) of {ok, Connector} -> bridge_v1_lookup_and_transform_helper( - BridgeV1Type, Name, Type, BridgeV2, ConnectorType, Connector + BridgeV1Type, + Name, + ActionType, + ActionConfig, + ConnectorType, + Connector ); Error -> Error end; - Error -> - Error + false -> + not_bridge_v1_compatible_error() end; - false -> - not_bridge_v1_compatible_error() + Error -> + Error end. not_bridge_v1_compatible_error() -> {error, not_bridge_v1_compatible}. bridge_v1_lookup_and_transform_helper( - BridgeV1Type, BridgeName, BridgeV2Type, BridgeV2, ConnectorType, Connector + BridgeV1Type, BridgeName, ActionType, Action, ConnectorType, Connector ) -> ConnectorRawConfig1 = maps:get(raw_config, Connector), ConnectorRawConfig2 = fill_defaults( @@ -1098,10 +1103,10 @@ bridge_v1_lookup_and_transform_helper( <<"connectors">>, emqx_connector_schema ), - BridgeV2RawConfig1 = maps:get(raw_config, BridgeV2), - BridgeV2RawConfig2 = fill_defaults( - BridgeV2Type, - BridgeV2RawConfig1, + ActionRawConfig1 = maps:get(raw_config, Action), + ActionRawConfig2 = fill_defaults( + ActionType, + ActionRawConfig1, <<"actions">>, emqx_bridge_v2_schema ), @@ -1110,7 +1115,7 @@ bridge_v1_lookup_and_transform_helper( emqx_action_info:has_custom_connector_action_config_to_bridge_v1_config(BridgeV1Type) of false -> - BridgeV1Config1 = maps:remove(<<"connector">>, BridgeV2RawConfig2), + BridgeV1Config1 = maps:remove(<<"connector">>, ActionRawConfig2), %% Move parameters to the top level ParametersMap = maps:get(<<"parameters">>, BridgeV1Config1, #{}), BridgeV1Config2 = maps:remove(<<"parameters">>, BridgeV1Config1), @@ -1118,13 +1123,13 @@ bridge_v1_lookup_and_transform_helper( emqx_utils_maps:deep_merge(ConnectorRawConfig2, BridgeV1Config3); true -> emqx_action_info:connector_action_config_to_bridge_v1_config( - BridgeV1Type, ConnectorRawConfig2, BridgeV2RawConfig2 + BridgeV1Type, ConnectorRawConfig2, ActionRawConfig2 ) end, - BridgeV1Tmp = maps:put(raw_config, BridgeV1ConfigFinal, BridgeV2), + BridgeV1Tmp = maps:put(raw_config, BridgeV1ConfigFinal, Action), BridgeV1 = maps:remove(status, BridgeV1Tmp), - BridgeV2Status = maps:get(status, BridgeV2, undefined), - BridgeV2Error = maps:get(error, BridgeV2, undefined), + BridgeV2Status = maps:get(status, Action, undefined), + BridgeV2Error = maps:get(error, Action, undefined), ResourceData1 = maps:get(resource_data, BridgeV1, #{}), %% Replace id in resouce data BridgeV1Id = <<"bridge:", (bin(BridgeV1Type))/binary, ":", (bin(BridgeName))/binary>>, diff --git a/apps/emqx_bridge/test/emqx_bridge_testlib.erl b/apps/emqx_bridge/test/emqx_bridge_testlib.erl index df404d9b0..f486e5d64 100644 --- a/apps/emqx_bridge/test/emqx_bridge_testlib.erl +++ b/apps/emqx_bridge/test/emqx_bridge_testlib.erl @@ -120,6 +120,22 @@ create_bridge(Config, Overrides) -> ct:pal("creating bridge with config: ~p", [BridgeConfig]), emqx_bridge:create(BridgeType, BridgeName, BridgeConfig). +list_bridges_api() -> + Params = [], + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("listing bridges (via http)"), + Res = + case emqx_mgmt_api_test_util:request_api(get, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("list bridge result: ~p", [Res]), + Res. + create_bridge_api(Config) -> create_bridge_api(Config, _Overrides = #{}). diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl index 6c48f5663..5cb9b043f 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl @@ -139,6 +139,7 @@ create_bridge(Config, Overrides) -> ConnectorName = ?config(connector_name, Config), ConnectorType = ?config(connector_type, Config), ConnectorConfig = ?config(connector_config, Config), + ct:pal("creating connector with config: ~p", [ConnectorConfig]), {ok, _} = emqx_connector:create(ConnectorType, ConnectorName, ConnectorConfig), diff --git a/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl b/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl index 4d441ea0b..9661004d0 100644 --- a/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl +++ b/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_v2_SUITE.erl @@ -368,3 +368,12 @@ t_parameters_key_api_spec(_Config) -> ?assert(is_map_key(<<"parameters">>, ActionProps), #{action_props => ActionProps}), ok. + +t_http_api_get(Config) -> + ?assertMatch({ok, _}, emqx_bridge_v2_testlib:create_bridge(Config)), + %% v1 api; no mangling of configs; has `kafka' top level config key + ?assertMatch( + {ok, {{_, 200, _}, _, [#{<<"kafka">> := _}]}}, + emqx_bridge_testlib:list_bridges_api() + ), + ok. diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl index 8ce3b7f6b..2ad0504b4 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl @@ -369,3 +369,17 @@ t_parameters_key_api_spec(_Config) -> ?assert(is_map_key(<<"parameters">>, ActionProps), #{action_props => ActionProps}), ok. + +t_http_api_get(_Config) -> + ConnectorName = <<"test_connector">>, + ActionName = <<"test_action">>, + ActionConfig = bridge_v2_config(<<"test_connector">>), + ConnectorConfig = connector_config(), + ?assertMatch({ok, _}, create_connector(ConnectorName, ConnectorConfig)), + ?assertMatch({ok, _}, create_action(ActionName, ActionConfig)), + %% v1 api; no mangling of configs; has `kafka' top level config key + ?assertMatch( + {ok, {{_, 200, _}, _, [#{<<"kafka">> := _}]}}, + emqx_bridge_testlib:list_bridges_api() + ), + ok. From 8ba116d3782205494536ec529ef52a522bcfa67b Mon Sep 17 00:00:00 2001 From: Stefan Strigler Date: Thu, 23 Nov 2023 14:54:55 +0100 Subject: [PATCH 070/101] fix(emqx_auth): check authenticator exists in /authenticator/:id/users --- apps/emqx_auth/src/emqx_authn/emqx_authn_api.erl | 15 +++++++++++---- .../test/emqx_authn/emqx_authn_api_SUITE.erl | 13 +++++++++++++ 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/apps/emqx_auth/src/emqx_authn/emqx_authn_api.erl b/apps/emqx_auth/src/emqx_authn/emqx_authn_api.erl index f30f7f473..1b299fa64 100644 --- a/apps/emqx_auth/src/emqx_authn/emqx_authn_api.erl +++ b/apps/emqx_auth/src/emqx_authn/emqx_authn_api.erl @@ -1111,10 +1111,7 @@ list_users(ChainName, AuthenticatorID, QueryString) -> {error, page_limit_invalid} -> {400, #{code => <<"INVALID_PARAMETER">>, message => <<"page_limit_invalid">>}}; {error, Reason} -> - {400, #{ - code => <<"INVALID_PARAMETER">>, - message => list_to_binary(io_lib:format("Reason ~p", [Reason])) - }}; + serialize_error({user_error, Reason}); Result -> {200, Result} end. @@ -1176,6 +1173,16 @@ serialize_error({user_error, not_found}) -> code => <<"NOT_FOUND">>, message => binfmt("User not found", []) }}; +serialize_error({user_error, {not_found, {chain, ?GLOBAL}}}) -> + {404, #{ + code => <<"NOT_FOUND">>, + message => <<"Authenticator not found in the 'global' scope">> + }}; +serialize_error({user_error, {not_found, {chain, Name}}}) -> + {400, #{ + code => <<"BAD_REQUEST">>, + message => binfmt("No authentication has been created for listener ~p", [Name]) + }}; serialize_error({user_error, already_exist}) -> {409, #{ code => <<"ALREADY_EXISTS">>, diff --git a/apps/emqx_auth/test/emqx_authn/emqx_authn_api_SUITE.erl b/apps/emqx_auth/test/emqx_authn/emqx_authn_api_SUITE.erl index 45a605e6e..cceab0d54 100644 --- a/apps/emqx_auth/test/emqx_authn/emqx_authn_api_SUITE.erl +++ b/apps/emqx_auth/test/emqx_authn/emqx_authn_api_SUITE.erl @@ -435,6 +435,19 @@ test_authenticator_position(PathPrefix) -> PathPrefix ++ [?CONF_NS] ). +t_authenticator_users_not_found(_) -> + GlobalUser = #{user_id => <<"global_user">>, password => <<"p1">>}, + {ok, 404, _} = request( + get, + uri([?CONF_NS, "password_based:built_in_database", "users"]) + ), + {ok, 404, _} = request( + post, + uri([?CONF_NS, "password_based:built_in_database", "users"]), + GlobalUser + ), + ok. + %% listener authn api is not supported since 5.1.0 %% Don't support listener switch to global chain. ignore_switch_to_global_chain(_) -> From 82e74d02015fce9ade5ed5c87ed0d29070af0041 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Tue, 21 Nov 2023 20:06:27 +0100 Subject: [PATCH 071/101] feat(ds): Add a flag that forces all sessions to become durable --- apps/emqx/src/emqx_persistent_message.erl | 8 +++++++- apps/emqx/src/emqx_session.erl | 18 ++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_message.erl b/apps/emqx/src/emqx_persistent_message.erl index 30ebe7417..2a852627d 100644 --- a/apps/emqx/src/emqx_persistent_message.erl +++ b/apps/emqx/src/emqx_persistent_message.erl @@ -19,7 +19,7 @@ -include("emqx.hrl"). -export([init/0]). --export([is_persistence_enabled/0]). +-export([is_persistence_enabled/0, force_ds/0]). %% Message persistence -export([ @@ -54,6 +54,12 @@ is_persistence_enabled() -> storage_backend() -> storage_backend(emqx_config:get([session_persistence, storage])). +%% Dev-only option: force all messages to go through +%% `emqx_persistent_session_ds': +-spec force_ds() -> boolean(). +force_ds() -> + emqx_config:get([session_persistence, force_ds]). + storage_backend(#{ builtin := #{enable := true, n_shards := NShards, replication_factor := ReplicationFactor} }) -> diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index ba49d3f85..64ef2e30d 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -626,12 +626,18 @@ choose_impl_candidates(#{expiry_interval := EI}) -> choose_impl_candidates(_, _IsPSStoreEnabled = false) -> [emqx_session_mem]; choose_impl_candidates(0, _IsPSStoreEnabled = true) -> - %% NOTE - %% If ExpiryInterval is 0, the natural choice is `emqx_session_mem`. Yet we still - %% need to look the existing session up in the `emqx_persistent_session_ds` store - %% first, because previous connection may have set ExpiryInterval to a non-zero - %% value. - [emqx_session_mem, emqx_persistent_session_ds]; + case emqx_persistent_message:force_ds() of + false -> + %% NOTE + %% If ExpiryInterval is 0, the natural choice is + %% `emqx_session_mem'. Yet we still need to look the + %% existing session up in the `emqx_persistent_session_ds' + %% store first, because previous connection may have set + %% ExpiryInterval to a non-zero value. + [emqx_session_mem, emqx_persistent_session_ds]; + true -> + [emqx_persistent_session_ds] + end; choose_impl_candidates(EI, _IsPSStoreEnabled = true) when EI > 0 -> [emqx_persistent_session_ds]. From 1ced8786fdea13c51ad47016d9dd6eec0367ab74 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Wed, 22 Nov 2023 01:55:58 +0100 Subject: [PATCH 072/101] feat(ds): Make session poll interval configurable --- apps/emqx/src/emqx_persistent_message.erl | 2 +- apps/emqx/src/emqx_persistent_session_ds.erl | 4 ++-- apps/emqx/src/emqx_schema.erl | 17 +++++++++++++++++ 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_message.erl b/apps/emqx/src/emqx_persistent_message.erl index 2a852627d..c8ad490f8 100644 --- a/apps/emqx/src/emqx_persistent_message.erl +++ b/apps/emqx/src/emqx_persistent_message.erl @@ -58,7 +58,7 @@ storage_backend() -> %% `emqx_persistent_session_ds': -spec force_ds() -> boolean(). force_ds() -> - emqx_config:get([session_persistence, force_ds]). + emqx_config:get([session_persistence, force_persistence]). storage_backend(#{ builtin := #{enable := true, n_shards := NShards, replication_factor := ReplicationFactor} diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index 928115a52..420defb18 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -349,11 +349,11 @@ handle_timeout( Session = #{id := Id, inflight := Inflight0, receive_maximum := ReceiveMaximum} ) -> {Publishes, Inflight} = emqx_persistent_message_ds_replayer:poll(Id, Inflight0, ReceiveMaximum), - %% TODO: make these values configurable: + IdlePollInterval = emqx_config:get([session_persistence, idle_poll_interval]), Timeout = case Publishes of [] -> - 100; + IdlePollInterval; [_ | _] -> 0 end, diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 8e401a442..2986950e7 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -1772,6 +1772,23 @@ fields("session_persistence") -> <<"builtin">> => #{} } } + )}, + {"idle_poll_interval", + sc( + duration(), + #{ + default => 100, + desc => ?DESC(session_ds_idle_poll_interval) + } + )}, + {"force_persistence", + sc( + boolean(), + #{ + default => false, + %% Only for testing, shall remain hidden + importance => ?IMPORTANCE_HIDDEN + } )} ]; fields("session_storage_backend") -> From c69b82455ed5abeaab427430510ae66e123d180b Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Thu, 23 Nov 2023 17:50:43 +0100 Subject: [PATCH 073/101] feat(ds): Don't store #message record in the DB --- .../src/emqx_ds_storage_bitfield_lts.erl | 69 ++++++++++++++++++- 1 file changed, 67 insertions(+), 2 deletions(-) diff --git a/apps/emqx_durable_storage/src/emqx_ds_storage_bitfield_lts.erl b/apps/emqx_durable_storage/src/emqx_ds_storage_bitfield_lts.erl index 2d4949919..6a69a20f3 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_storage_bitfield_lts.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_storage_bitfield_lts.erl @@ -99,8 +99,27 @@ %% Limit on the number of wildcard levels in the learned topic trie: -define(WILDCARD_LIMIT, 10). +%% Persistent (durable) term representing `#message{}' record. Must +%% not change. +-type value_v1() :: + { + _Id :: binary(), + _Qos :: 0..2, + _From :: atom() | binary(), + _Flags :: emqx_types:flags(), + _Headsers :: emqx_types:headers(), + _Topic :: emqx_types:topic(), + _Payload :: emqx_types:payload(), + _Timestamp :: integer(), + _Extra :: term() + }. + -include("emqx_ds_bitmask.hrl"). +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). +-endif. + %%================================================================================ %% API funcions %%================================================================================ @@ -389,11 +408,39 @@ hash_topic_level(TopicLevel) -> <> = erlang:md5(TopicLevel), Int. +-spec message_to_value_v1(emqx_types:message()) -> value_v1(). +message_to_value_v1(#message{ + id = Id, + qos = Qos, + from = From, + flags = Flags, + headers = Headers, + topic = Topic, + payload = Payload, + timestamp = Timestamp, + extra = Extra +}) -> + {Id, Qos, From, Flags, Headers, Topic, Payload, Timestamp, Extra}. + +-spec value_v1_to_message(value_v1()) -> emqx_types:message(). +value_v1_to_message({Id, Qos, From, Flags, Headers, Topic, Payload, Timestamp, Extra}) -> + #message{ + id = Id, + qos = Qos, + from = From, + flags = Flags, + headers = Headers, + topic = Topic, + payload = Payload, + timestamp = Timestamp, + extra = Extra + }. + serialize(Msg) -> - term_to_binary(Msg). + term_to_binary(message_to_value_v1(Msg)). deserialize(Blob) -> - binary_to_term(Blob). + value_v1_to_message(binary_to_term(Blob)). -define(BYTE_SIZE, 8). @@ -452,3 +499,21 @@ data_cf(GenId) -> -spec trie_cf(emqx_ds_storage_layer:gen_id()) -> [char()]. trie_cf(GenId) -> "emqx_ds_storage_bitfield_lts_trie" ++ integer_to_list(GenId). + +-ifdef(TEST). + +serialize_deserialize_test() -> + Msg = #message{ + id = <<"message_id_val">>, + qos = 2, + from = <<"from_val">>, + flags = #{sys => true, dup => true}, + headers = #{foo => bar}, + topic = <<"topic/value">>, + payload = [<<"foo">>, <<"bar">>], + timestamp = 42424242, + extra = "extra_val" + }, + ?assertEqual(Msg, deserialize(serialize(Msg))). + +-endif. From c5bb86db67a06c8b7c4415ddd5a86ea8c4703f2d Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Wed, 15 Nov 2023 13:24:49 +0100 Subject: [PATCH 074/101] feat(ds): Support QoS 0 --- apps/emqx/src/emqx_message.erl | 4 +- .../emqx_persistent_message_ds_replayer.erl | 37 +++++++++++-------- apps/emqx/src/emqx_persistent_session_ds.erl | 2 +- apps/emqx/src/emqx_schema.erl | 4 +- .../test/emqx_persistent_messages_SUITE.erl | 29 ++++++++++++++- .../src/emqx_ds_replication_layer_meta.erl | 33 ++++++++++++++++- 6 files changed, 87 insertions(+), 22 deletions(-) diff --git a/apps/emqx/src/emqx_message.erl b/apps/emqx/src/emqx_message.erl index 4ff36504d..b65c8360f 100644 --- a/apps/emqx/src/emqx_message.erl +++ b/apps/emqx/src/emqx_message.erl @@ -301,7 +301,9 @@ update_expiry(Msg) -> Msg. %% @doc Message to PUBLISH Packet. --spec to_packet(emqx_types:packet_id(), emqx_types:message()) -> +%% +%% When QoS=0 then packet id must be `undefined' +-spec to_packet(emqx_types:packet_id() | undefined, emqx_types:message()) -> emqx_types:packet(). to_packet( PacketId, diff --git a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl b/apps/emqx/src/emqx_persistent_message_ds_replayer.erl index 64b9cabb4..d622444e9 100644 --- a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl +++ b/apps/emqx/src/emqx_persistent_message_ds_replayer.erl @@ -27,6 +27,7 @@ -export_type([inflight/0, seqno/0]). -include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_utils/include/emqx_message.hrl"). -include("emqx_persistent_session_ds.hrl"). -ifdef(TEST). @@ -176,9 +177,12 @@ fetch(SessionId, Inflight0, [DSStream | Streams], N, Acc) when N > 0 -> #inflight{next_seqno = FirstSeqno, offset_ranges = Ranges} = Inflight0, ItBegin = get_last_iterator(DSStream, Ranges), {ok, ItEnd, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, N), - {Publishes, UntilSeqno} = publish(FirstSeqno, Messages), - case range_size(FirstSeqno, UntilSeqno) of - Size when Size > 0 -> + case Messages of + [] -> + fetch(SessionId, Inflight0, Streams, N, Acc); + _ -> + {Publishes, UntilSeqno} = publish(FirstSeqno, Messages, _PreserveQoS0 = true), + Size = range_size(FirstSeqno, UntilSeqno), %% We need to preserve the iterator pointing to the beginning of the %% range, so that we can replay it if needed. Range0 = #ds_pubrange{ @@ -197,9 +201,7 @@ fetch(SessionId, Inflight0, [DSStream | Streams], N, Acc) when N > 0 -> next_seqno = UntilSeqno, offset_ranges = Ranges ++ [Range] }, - fetch(SessionId, Inflight, Streams, N - Size, [Publishes | Acc]); - 0 -> - fetch(SessionId, Inflight0, Streams, N, Acc) + fetch(SessionId, Inflight, Streams, N - Size, [Publishes | Acc]) end; fetch(_SessionId, Inflight, _Streams, _N, Acc) -> Publishes = lists:append(lists:reverse(Acc)), @@ -268,7 +270,7 @@ replay_range( end, MessagesReplay = [emqx_message:set_flag(dup, true, Msg) || Msg <- MessagesUnacked], %% Asserting that range is consistent with the message storage state. - {Replies, Until} = publish(FirstUnacked, MessagesReplay), + {Replies, Until} = publish(FirstUnacked, MessagesReplay, _PreserveQoS0 = false), %% Again, we need to keep the iterator pointing past the end of the %% range, so that we can pick up where we left off. Range = Range0#ds_pubrange{iterator = ItNext}, @@ -276,15 +278,18 @@ replay_range( replay_range(Range0 = #ds_pubrange{type = checkpoint}, _AckedUntil, Acc) -> {Range0, Acc}. -publish(FirstSeqno, Messages) -> - lists:mapfoldl( - fun(Message, Seqno) -> - PacketId = seqno_to_packet_id(Seqno), - {{PacketId, Message}, next_seqno(Seqno)} - end, - FirstSeqno, - Messages - ). +publish(FirstSeqNo, Messages, PreserveQos0) -> + do_publish(FirstSeqNo, Messages, PreserveQos0, []). + +do_publish(SeqNo, [], _, Acc) -> + {lists:reverse(Acc), SeqNo}; +do_publish(SeqNo, [#message{qos = 0} | Messages], false, Acc) -> + do_publish(SeqNo, Messages, false, Acc); +do_publish(SeqNo, [#message{qos = 0} = Message | Messages], true, Acc) -> + do_publish(SeqNo, Messages, true, [{undefined, Message} | Acc]); +do_publish(SeqNo, [Message | Messages], PreserveQos0, Acc) -> + PacketId = seqno_to_packet_id(SeqNo), + do_publish(next_seqno(SeqNo), Messages, PreserveQos0, [{PacketId, Message} | Acc]). -spec preserve_range(ds_pubrange()) -> ok. preserve_range(Range = #ds_pubrange{type = inflight}) -> diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index 420defb18..6df8de892 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -338,7 +338,7 @@ pubcomp(_ClientInfo, _PacketId, _Session = #{}) -> -spec deliver(clientinfo(), [emqx_types:deliver()], session()) -> {ok, replies(), session()}. deliver(_ClientInfo, _Delivers, Session) -> - %% TODO: QoS0 and system messages end up here. + %% TODO: system messages end up here. {ok, [], Session}. -spec handle_timeout(clientinfo(), _Timeout, session()) -> diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 2986950e7..045a9acb3 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -1775,9 +1775,9 @@ fields("session_persistence") -> )}, {"idle_poll_interval", sc( - duration(), + timeout_duration(), #{ - default => 100, + default => <<"100ms">>, desc => ?DESC(session_ds_idle_poll_interval) } )}, diff --git a/apps/emqx/test/emqx_persistent_messages_SUITE.erl b/apps/emqx/test/emqx_persistent_messages_SUITE.erl index 922d7248f..e750b1251 100644 --- a/apps/emqx/test/emqx_persistent_messages_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_messages_SUITE.erl @@ -233,6 +233,31 @@ t_session_subscription_iterators(Config) -> ), ok. +t_qos0(Config) -> + Sub = connect(<>, true, 30), + Pub = connect(<>, true, 0), + try + {ok, _, [1]} = emqtt:subscribe(Sub, <<"t/#">>, qos1), + + Messages = [ + {<<"t/1">>, <<"1">>, 0}, + {<<"t/1">>, <<"2">>, 1}, + {<<"t/1">>, <<"3">>, 0} + ], + [emqtt:publish(Pub, Topic, Payload, Qos) || {Topic, Payload, Qos} <- Messages], + ?assertMatch( + [ + #{qos := 0, topic := <<"t/1">>, payload := <<"1">>}, + #{qos := 1, topic := <<"t/1">>, payload := <<"2">>}, + #{qos := 0, topic := <<"t/1">>, payload := <<"3">>} + ], + receive_messages(3) + ) + after + emqtt:stop(Sub), + emqtt:stop(Pub) + end. + %% connect(ClientId, CleanStart, EI) -> @@ -273,7 +298,7 @@ consume(It) -> end. receive_messages(Count) -> - receive_messages(Count, []). + lists:reverse(receive_messages(Count, [])). receive_messages(0, Msgs) -> Msgs; @@ -307,4 +332,6 @@ get_mqtt_port(Node, Type) -> clear_db() -> ok = emqx_ds:drop_db(?PERSISTENT_MESSAGE_DB), + mria:stop(), + ok = mnesia:delete_schema([node()]), ok. diff --git a/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl b/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl index f7dbc828f..077df28d0 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl @@ -34,7 +34,8 @@ drop_db/1, shard_leader/2, this_site/0, - set_leader/3 + set_leader/3, + print_status/0 ]). %% gen_server @@ -100,6 +101,35 @@ %% API funcions %%================================================================================ +-spec print_status() -> ok. +print_status() -> + io:format("THIS SITE:~n~s~n", [base64:encode(this_site())]), + io:format("~nSITES:~n", []), + Nodes = [node() | nodes()], + lists:foreach( + fun(#?NODE_TAB{site = Site, node = Node}) -> + Status = + case lists:member(Node, Nodes) of + true -> up; + false -> down + end, + io:format("~s ~p ~p~n", [base64:encode(Site), Node, Status]) + end, + eval_qlc(mnesia:table(?NODE_TAB)) + ), + io:format("~nSHARDS~n", []), + lists:foreach( + fun(#?SHARD_TAB{shard = {DB, Shard}, leader = Leader}) -> + Status = + case lists:member(Leader, Nodes) of + true -> up; + false -> down + end, + io:format("~p/~s ~p ~p~n", [DB, Shard, Leader, Status]) + end, + eval_qlc(mnesia:table(?SHARD_TAB)) + ). + -spec this_site() -> site(). this_site() -> persistent_term:get(?emqx_ds_builtin_site). @@ -297,6 +327,7 @@ ensure_site() -> ok; _ -> Site = crypto:strong_rand_bytes(8), + logger:notice("Creating a new site with ID=~s", [base64:encode(Site)]), ok = filelib:ensure_dir(Filename), {ok, FD} = file:open(Filename, [write]), io:format(FD, "~p.", [Site]), From a158f25a403fe077aec4c224b8219363e9248c9f Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Thu, 23 Nov 2023 20:08:55 +0100 Subject: [PATCH 075/101] fix(ds): Fix return type of emqx_persistent_session_ds:publish --- apps/emqx/src/emqx_persistent_session_ds.erl | 4 +-- .../test/emqx_persistent_messages_SUITE.erl | 25 +++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index 6df8de892..eb4eb0b1b 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -274,12 +274,12 @@ get_subscription(TopicFilter, #{subscriptions := Subs}) -> %%-------------------------------------------------------------------- -spec publish(emqx_types:packet_id(), emqx_types:message(), session()) -> - {ok, emqx_types:publish_result(), replies(), session()} + {ok, emqx_types:publish_result(), session()} | {error, emqx_types:reason_code()}. publish(_PacketId, Msg, Session) -> %% TODO: QoS2 Result = emqx_broker:publish(Msg), - {ok, Result, [], Session}. + {ok, Result, Session}. %%-------------------------------------------------------------------- %% Client -> Broker: PUBACK diff --git a/apps/emqx/test/emqx_persistent_messages_SUITE.erl b/apps/emqx/test/emqx_persistent_messages_SUITE.erl index e750b1251..f8f7baaf1 100644 --- a/apps/emqx/test/emqx_persistent_messages_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_messages_SUITE.erl @@ -258,6 +258,31 @@ t_qos0(Config) -> emqtt:stop(Pub) end. +t_publish_as_persistent(Config) -> + Sub = connect(<>, true, 30), + Pub = connect(<>, true, 30), + try + {ok, _, [1]} = emqtt:subscribe(Sub, <<"t/#">>, qos1), + Messages = [ + {<<"t/1">>, <<"1">>, 0}, + {<<"t/1">>, <<"2">>, 1}, + {<<"t/1">>, <<"3">>, 2} + ], + [emqtt:publish(Pub, Topic, Payload, Qos) || {Topic, Payload, Qos} <- Messages], + ?assertMatch( + [ + #{qos := 0, topic := <<"t/1">>, payload := <<"1">>}, + #{qos := 1, topic := <<"t/1">>, payload := <<"2">>} + %% TODO: QoS 2 + %% #{qos := 2, topic := <<"t/1">>, payload := <<"3">>} + ], + receive_messages(3) + ) + after + emqtt:stop(Sub), + emqtt:stop(Pub) + end. + %% connect(ClientId, CleanStart, EI) -> From 449bafc27ef1029f32ed403df1cb1df1a1045bf0 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Thu, 23 Nov 2023 20:29:13 +0100 Subject: [PATCH 076/101] fix(ds): LTS trie handles empty topic levels --- apps/emqx_durable_storage/src/emqx_ds_lts.erl | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/apps/emqx_durable_storage/src/emqx_ds_lts.erl b/apps/emqx_durable_storage/src/emqx_ds_lts.erl index d06854fd0..d148e8cbc 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_lts.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_lts.erl @@ -119,7 +119,7 @@ trie_restore(Options, Dump) -> Trie. %% @doc Lookup the topic key. Create a new one, if not found. --spec topic_key(trie(), threshold_fun(), [binary()]) -> msg_storage_key(). +-spec topic_key(trie(), threshold_fun(), [binary() | '']) -> msg_storage_key(). topic_key(Trie, ThresholdFun, Tokens) -> do_topic_key(Trie, ThresholdFun, 0, ?PREFIX, Tokens, []). @@ -363,12 +363,12 @@ emanating(#trie{trie = Tab}, State, ?EOT) -> [#trans{next = Next}] -> [{?EOT, Next}]; [] -> [] end; -emanating(#trie{trie = Tab}, State, Bin) when is_binary(Bin) -> +emanating(#trie{trie = Tab}, State, Token) when is_binary(Token); Token =:= '' -> [ {Edge, Next} || #trans{key = {_, Edge}, next = Next} <- ets:lookup(Tab, {State, ?PLUS}) ++ - ets:lookup(Tab, {State, Bin}) + ets:lookup(Tab, {State, Token}) ]. %%================================================================================ @@ -533,6 +533,7 @@ topic_match_test() -> {S11, []} = test_key(T, ThresholdFun, [1, 1]), {S12, []} = test_key(T, ThresholdFun, [1, 2]), {S111, []} = test_key(T, ThresholdFun, [1, 1, 1]), + {S11e, []} = test_key(T, ThresholdFun, [1, 1, '']), %% Match concrete topics: assert_match_topics(T, [1], [{S1, []}]), assert_match_topics(T, [1, 1], [{S11, []}]), @@ -540,14 +541,16 @@ topic_match_test() -> %% Match topics with +: assert_match_topics(T, [1, '+'], [{S11, []}, {S12, []}]), assert_match_topics(T, [1, '+', 1], [{S111, []}]), + assert_match_topics(T, [1, '+', ''], [{S11e, []}]), %% Match topics with #: assert_match_topics(T, [1, '#'], [{S1, []}, {S11, []}, {S12, []}, - {S111, []}]), + {S111, []}, {S11e, []}]), assert_match_topics(T, [1, 1, '#'], [{S11, []}, - {S111, []}]), + {S111, []}, + {S11e, []}]), %% Now add learned wildcards: {S21, []} = test_key(T, ThresholdFun, [2, 1]), {S22, []} = test_key(T, ThresholdFun, [2, 2]), @@ -587,7 +590,10 @@ assert_match_topics(Trie, Filter0, Expected) -> %% erlfmt-ignore test_key(Trie, Threshold, Topic0) -> - Topic = [integer_to_binary(I) || I <- Topic0], + Topic = lists:map(fun('') -> ''; + (I) -> integer_to_binary(I) + end, + Topic0), Ret = topic_key(Trie, Threshold, Topic), %% Test idempotency: Ret1 = topic_key(Trie, Threshold, Topic), From 8dfcb69e52806206108b1c89dac432c9d8f8ac5d Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Fri, 24 Nov 2023 04:17:28 +0100 Subject: [PATCH 077/101] feat(ds): Discard session when client connects with CleanStart=1 --- apps/emqx/src/emqx_persistent_session_ds.erl | 18 ++++++++++++------ apps/emqx/src/emqx_shared_sub.erl | 4 +++- .../test/emqx_persistent_session_SUITE.erl | 14 ++++++-------- 3 files changed, 21 insertions(+), 15 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index eb4eb0b1b..514cc5bcf 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -142,13 +142,19 @@ open(#{clientid := ClientID} = _ClientInfo, ConnInfo) -> %% somehow isolate those idling not-yet-expired sessions into a separate process %% space, and move this call back into `emqx_cm` where it belongs. ok = emqx_cm:discard_session(ClientID), - case session_open(ClientID) of - Session0 = #{} -> - ensure_timers(), - ReceiveMaximum = receive_maximum(ConnInfo), - Session = Session0#{receive_maximum => ReceiveMaximum}, - {true, Session, []}; + case maps:get(clean_start, ConnInfo, false) of false -> + case session_open(ClientID) of + Session0 = #{} -> + ensure_timers(), + ReceiveMaximum = receive_maximum(ConnInfo), + Session = Session0#{receive_maximum => ReceiveMaximum}, + {true, Session, []}; + false -> + false + end; + true -> + session_drop(ClientID), false end. diff --git a/apps/emqx/src/emqx_shared_sub.erl b/apps/emqx/src/emqx_shared_sub.erl index 89a785590..0a6538282 100644 --- a/apps/emqx/src/emqx_shared_sub.erl +++ b/apps/emqx/src/emqx_shared_sub.erl @@ -242,7 +242,9 @@ with_redispatch_to(Msg, Group, Topic) -> is_redispatch_needed(#message{qos = ?QOS_0}) -> false; is_redispatch_needed(#message{headers = #{redispatch_to := ?REDISPATCH_TO(_, _)}}) -> - true. + true; +is_redispatch_needed(#message{}) -> + false. %% @doc Redispatch shared deliveries to other members in the group. redispatch(Messages0) -> diff --git a/apps/emqx/test/emqx_persistent_session_SUITE.erl b/apps/emqx/test/emqx_persistent_session_SUITE.erl index f3af45fe0..1be929c7f 100644 --- a/apps/emqx/test/emqx_persistent_session_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_session_SUITE.erl @@ -745,9 +745,6 @@ t_publish_while_client_is_gone(Config) -> ok = emqtt:disconnect(Client2). -%% TODO: don't skip after QoS2 support is added to DS. -t_clean_start_drops_subscriptions(init, Config) -> skip_ds_tc(Config); -t_clean_start_drops_subscriptions('end', _Config) -> ok. t_clean_start_drops_subscriptions(Config) -> %% 1. A persistent session is started and disconnected. %% 2. While disconnected, a message is published and persisted. @@ -773,13 +770,13 @@ t_clean_start_drops_subscriptions(Config) -> | Config ]), {ok, _} = emqtt:ConnFun(Client1), - {ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2), + {ok, _, [1]} = emqtt:subscribe(Client1, STopic, qos1), ok = emqtt:disconnect(Client1), maybe_kill_connection_process(ClientId, Config), %% 2. - ok = publish(Topic, Payload1), + ok = publish(Topic, Payload1, ?QOS_1), %% 3. {ok, Client2} = emqtt:start_link([ @@ -791,9 +788,10 @@ t_clean_start_drops_subscriptions(Config) -> ]), {ok, _} = emqtt:ConnFun(Client2), ?assertEqual(0, client_info(session_present, Client2)), - {ok, _, [2]} = emqtt:subscribe(Client2, STopic, qos2), + {ok, _, [1]} = emqtt:subscribe(Client2, STopic, qos1), - ok = publish(Topic, Payload2), + timer:sleep(100), + ok = publish(Topic, Payload2, ?QOS_1), [Msg1] = receive_messages(1), ?assertEqual({ok, iolist_to_binary(Payload2)}, maps:find(payload, Msg1)), @@ -810,7 +808,7 @@ t_clean_start_drops_subscriptions(Config) -> ]), {ok, _} = emqtt:ConnFun(Client3), - ok = publish(Topic, Payload3), + ok = publish(Topic, Payload3, ?QOS_1), [Msg2] = receive_messages(1), ?assertEqual({ok, iolist_to_binary(Payload3)}, maps:find(payload, Msg2)), From e616e0746a300df1759910c348789e8bcf3e4fee Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Fri, 24 Nov 2023 04:18:29 +0100 Subject: [PATCH 078/101] feat(ds): Implement a function for dumping persistent session state --- apps/emqx/src/emqx_persistent_session_ds.erl | 48 ++++++++++++++++---- 1 file changed, 39 insertions(+), 9 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index 514cc5bcf..76b54e34a 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -70,6 +70,8 @@ do_ensure_all_iterators_closed/1 ]). +-export([print_session/1]). + -ifdef(TEST). -export([ session_open/1, @@ -226,6 +228,25 @@ info(await_rel_timeout, #{props := Conf}) -> stats(Session) -> info(?STATS_KEYS, Session). +%% Debug/troubleshooting +-spec print_session(emqx_types:client_id()) -> map() | undefined. +print_session(ClientId) -> + catch ro_transaction( + fun() -> + case mnesia:read(?SESSION_TAB, ClientId) of + [Session] -> + #{ + session => Session, + streams => mnesia:read(?SESSION_STREAM_TAB, ClientId), + pubranges => session_read_pubranges(ClientId), + subscriptions => session_read_subscriptions(ClientId) + }; + [] -> + undefined + end + end + ). + %%-------------------------------------------------------------------- %% Client -> Broker: SUBSCRIBE / UNSUBSCRIBE %%-------------------------------------------------------------------- @@ -563,7 +584,7 @@ session_drop(DSSessionId) -> -spec session_drop_subscriptions(id()) -> ok. session_drop_subscriptions(DSSessionId) -> - Subscriptions = session_read_subscriptions(DSSessionId), + Subscriptions = session_read_subscriptions(DSSessionId, write), lists:foreach( fun(#ds_sub{id = DSSubId} = DSSub) -> TopicFilter = subscription_id_to_topic_filter(DSSubId), @@ -626,13 +647,27 @@ session_del_subscription(DSSessionId, TopicFilter) -> session_del_subscription(#ds_sub{id = DSSubId}) -> mnesia:delete(?SESSION_SUBSCRIPTIONS_TAB, DSSubId, write). -session_read_subscriptions(DSSessionId) -> +session_read_subscriptions(DSSessionID) -> + session_read_subscriptions(DSSessionID, read). + +session_read_subscriptions(DSSessionId, LockKind) -> MS = ets:fun2ms( fun(Sub = #ds_sub{id = {Sess, _}}) when Sess =:= DSSessionId -> Sub end ), - mnesia:select(?SESSION_SUBSCRIPTIONS_TAB, MS, read). + mnesia:select(?SESSION_SUBSCRIPTIONS_TAB, MS, LockKind). + +session_read_pubranges(DSSessionID) -> + session_read_pubranges(DSSessionID, read). + +session_read_pubranges(DSSessionId, LockKind) -> + MS = ets:fun2ms( + fun(#ds_pubrange{id = {Sess, First}}) when Sess =:= DSSessionId -> + {DSSessionId, First} + end + ), + mnesia:select(?SESSION_PUBRANGE_TAB, MS, LockKind). -spec new_subscription_id(id(), topic_filter()) -> {subscription_id(), integer()}. new_subscription_id(DSSessionId, TopicFilter) -> @@ -735,12 +770,7 @@ session_drop_streams(DSSessionId) -> %% must be called inside a transaction -spec session_drop_pubranges(id()) -> ok. session_drop_pubranges(DSSessionId) -> - MS = ets:fun2ms( - fun(#ds_pubrange{id = {DSSessionId0, First}}) when DSSessionId0 =:= DSSessionId -> - {DSSessionId, First} - end - ), - RangeIds = mnesia:select(?SESSION_PUBRANGE_TAB, MS, write), + RangeIds = session_read_pubranges(DSSessionId, write), lists:foreach( fun(RangeId) -> mnesia:delete(?SESSION_PUBRANGE_TAB, RangeId, write) From d2901afd1b4f1734dfdc9479c7b3a6f682e66ad9 Mon Sep 17 00:00:00 2001 From: Stefan Strigler Date: Thu, 16 Nov 2023 09:41:56 +0100 Subject: [PATCH 079/101] fix(emqx_bridge_kafka): match example in api schema --- .../src/emqx_bridge_kafka.erl | 85 ++++++++++--------- 1 file changed, 43 insertions(+), 42 deletions(-) diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl index 5b3e3ca01..93515b5db 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl @@ -112,16 +112,15 @@ values({put, connector}) -> values({put, KafkaType}) -> maps:merge(values(common_config), values(KafkaType)); values(bridge_v2_producer) -> - maps:merge( - #{ - enable => true, - connector => <<"my_kafka_producer_connector">>, - resource_opts => #{ - health_check_interval => "32s" - } - }, - values(producer) - ); + #{ + enable => true, + connector => <<"my_kafka_producer_connector">>, + parameters => values(producer_values), + local_topic => <<"mqtt/local/topic">>, + resource_opts => #{ + health_check_interval => "32s" + } + }; values(common_config) -> #{ authentication => #{ @@ -143,40 +142,42 @@ values(common_config) -> }; values(producer) -> #{ - kafka => #{ - topic => <<"kafka-topic">>, - message => #{ - key => <<"${.clientid}">>, - value => <<"${.}">>, - timestamp => <<"${.timestamp}">> - }, - max_batch_bytes => <<"896KB">>, - compression => <<"no_compression">>, - partition_strategy => <<"random">>, - required_acks => <<"all_isr">>, - partition_count_refresh_interval => <<"60s">>, - kafka_headers => <<"${pub_props}">>, - kafka_ext_headers => [ - #{ - kafka_ext_header_key => <<"clientid">>, - kafka_ext_header_value => <<"${clientid}">> - }, - #{ - kafka_ext_header_key => <<"topic">>, - kafka_ext_header_value => <<"${topic}">> - } - ], - kafka_header_value_encode_mode => none, - max_inflight => 10, - buffer => #{ - mode => <<"hybrid">>, - per_partition_limit => <<"2GB">>, - segment_bytes => <<"100MB">>, - memory_overload_protection => true - } - }, + kafka => values(producer_values), local_topic => <<"mqtt/local/topic">> }; +values(producer_values) -> + #{ + topic => <<"kafka-topic">>, + message => #{ + key => <<"${.clientid}">>, + value => <<"${.}">>, + timestamp => <<"${.timestamp}">> + }, + max_batch_bytes => <<"896KB">>, + compression => <<"no_compression">>, + partition_strategy => <<"random">>, + required_acks => <<"all_isr">>, + partition_count_refresh_interval => <<"60s">>, + kafka_headers => <<"${pub_props}">>, + kafka_ext_headers => [ + #{ + kafka_ext_header_key => <<"clientid">>, + kafka_ext_header_value => <<"${clientid}">> + }, + #{ + kafka_ext_header_key => <<"topic">>, + kafka_ext_header_value => <<"${topic}">> + } + ], + kafka_header_value_encode_mode => none, + max_inflight => 10, + buffer => #{ + mode => <<"hybrid">>, + per_partition_limit => <<"2GB">>, + segment_bytes => <<"100MB">>, + memory_overload_protection => true + } + }; values(consumer) -> #{ kafka => #{ From 9ebbc9bbea763db9021904630ee2bcca88311f7c Mon Sep 17 00:00:00 2001 From: Stefan Strigler Date: Thu, 16 Nov 2023 13:57:51 +0100 Subject: [PATCH 080/101] refactor(emqx_bridge): use more simplistic function to validate connector --- apps/emqx_bridge/src/emqx_bridge_v2.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index 6f296c63c..ba557d0b1 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -410,10 +410,10 @@ uninstall_bridge_v2( CreationOpts = emqx_resource:fetch_creation_opts(Config), ok = emqx_resource_buffer_worker_sup:stop_workers(BridgeV2Id, CreationOpts), ok = emqx_resource:clear_metrics(BridgeV2Id), - case combine_connector_and_bridge_v2_config(BridgeV2Type, BridgeName, Config) of + case validate_referenced_connectors(BridgeV2Type, ConnectorName, BridgeName) of {error, _} -> ok; - _CombinedConfig -> + ok -> %% Deinstall from connector ConnectorId = emqx_connector_resource:resource_id( connector_type(BridgeV2Type), ConnectorName From e95ec5b15052d17262a3b122e097b1fcdd07f51f Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Fri, 24 Nov 2023 09:24:21 -0300 Subject: [PATCH 081/101] test: fix another flaky test --- .../test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl index b0e4e4ac8..d82a61fee 100644 --- a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl @@ -1477,7 +1477,7 @@ t_pull_worker_death(Config) -> [PullWorkerPid | _] = get_pull_worker_pids(Config), Ref = monitor(process, PullWorkerPid), - sys:terminate(PullWorkerPid, die), + sys:terminate(PullWorkerPid, die, 20_000), receive {'DOWN', Ref, process, PullWorkerPid, _} -> ok From 4e077c951b10bf363bc6934386b2e91f100ebbc3 Mon Sep 17 00:00:00 2001 From: Stefan Strigler Date: Wed, 15 Nov 2023 15:49:56 +0100 Subject: [PATCH 082/101] feat(emqx_bridge_mongodb): port mongodb to shared connector and actions --- apps/emqx_bridge/src/emqx_action_info.erl | 12 +- apps/emqx_bridge/src/emqx_bridge.erl | 18 +- apps/emqx_bridge/src/emqx_bridge_lib.erl | 14 +- apps/emqx_bridge/src/emqx_bridge_v2.erl | 10 +- .../test/emqx_bridge_v2_testlib.erl | 34 +-- .../src/emqx_bridge_mongodb.app.src | 2 +- .../src/emqx_bridge_mongodb.erl | 207 +++++++++++++--- .../src/emqx_bridge_mongodb_action_info.erl | 95 +++++++ .../src/emqx_bridge_mongodb_connector.erl | 117 ++++++--- .../test/emqx_bridge_mongodb_SUITE.erl | 42 +++- .../test/emqx_bridge_v2_mongodb_SUITE.erl | 232 ++++++++++++++++++ .../src/schema/emqx_connector_ee_schema.erl | 16 +- .../src/schema/emqx_connector_schema.erl | 12 +- apps/emqx_mongodb/src/emqx_mongodb.erl | 82 ++++--- rel/i18n/emqx_bridge_mongodb.hocon | 21 ++ 15 files changed, 763 insertions(+), 151 deletions(-) create mode 100644 apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_action_info.erl create mode 100644 apps/emqx_bridge_mongodb/test/emqx_bridge_v2_mongodb_SUITE.erl diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index 12988b163..129142f24 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -77,6 +77,7 @@ hard_coded_action_info_modules_ee() -> emqx_bridge_confluent_producer_action_info, emqx_bridge_gcp_pubsub_producer_action_info, emqx_bridge_kafka_action_info, + emqx_bridge_mongodb_action_info, emqx_bridge_syskeeper_action_info ]. -else. @@ -116,14 +117,17 @@ bridge_v1_type_to_action_type(Type) -> action_type_to_bridge_v1_type(Bin, Conf) when is_binary(Bin) -> action_type_to_bridge_v1_type(binary_to_existing_atom(Bin), Conf); -action_type_to_bridge_v1_type(ActionType, Conf) -> +action_type_to_bridge_v1_type(ActionType, ActionConf) -> ActionInfoMap = info_map(), ActionTypeToBridgeV1Type = maps:get(action_type_to_bridge_v1_type, ActionInfoMap), case maps:get(ActionType, ActionTypeToBridgeV1Type, undefined) of undefined -> ActionType; BridgeV1TypeFun when is_function(BridgeV1TypeFun) -> - BridgeV1TypeFun(get_confs(ActionType, Conf)); + case get_confs(ActionType, ActionConf) of + {ConnectorConfig, ActionConfig} -> BridgeV1TypeFun({ConnectorConfig, ActionConfig}); + undefined -> ActionType + end; BridgeV1Type -> BridgeV1Type end. @@ -131,7 +135,9 @@ action_type_to_bridge_v1_type(ActionType, Conf) -> get_confs(ActionType, #{<<"connector">> := ConnectorName} = ActionConfig) -> ConnectorType = action_type_to_connector_type(ActionType), ConnectorConfig = emqx_conf:get_raw([connectors, ConnectorType, ConnectorName]), - {ActionConfig, ConnectorConfig}. + {ConnectorConfig, ActionConfig}; +get_confs(_, _) -> + undefined. %% This function should return true for all inputs that are bridge V1 types for %% bridges that have been refactored to bridge V2s, and for all all bridge V2 diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index 0e116589b..f557210ed 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -237,9 +237,15 @@ send_to_matched_egress_bridges_loop(Topic, Msg, [Id | Ids]) -> send_to_matched_egress_bridges_loop(Topic, Msg, Ids). send_message(BridgeId, Message) -> - {BridgeType, BridgeName} = emqx_bridge_resource:parse_bridge_id(BridgeId), - ResId = emqx_bridge_resource:resource_id(BridgeType, BridgeName), - send_message(BridgeType, BridgeName, ResId, Message, #{}). + {BridgeV1Type, BridgeName} = emqx_bridge_resource:parse_bridge_id(BridgeId), + case emqx_bridge_v2:is_bridge_v2_type(BridgeV1Type) of + true -> + BridgeV2Type = emqx_bridge_v2:bridge_v1_type_to_bridge_v2_type(BridgeV1Type), + emqx_bridge_v2:send_message(BridgeV2Type, BridgeName, Message, #{}); + false -> + ResId = emqx_bridge_resource:resource_id(BridgeV1Type, BridgeName), + send_message(BridgeV1Type, BridgeName, ResId, Message, #{}) + end. send_message(BridgeType, BridgeName, ResId, Message, QueryOpts0) -> case emqx:get_config([?ROOT_KEY, BridgeType, BridgeName], not_found) of @@ -377,8 +383,8 @@ disable_enable(Action, BridgeType0, BridgeName) when ) end. -create(BridgeType0, BridgeName, RawConf) -> - BridgeType = upgrade_type(BridgeType0), +create(BridgeV1Type, BridgeName, RawConf) -> + BridgeType = upgrade_type(BridgeV1Type), ?SLOG(debug, #{ bridge_action => create, bridge_type => BridgeType, @@ -387,7 +393,7 @@ create(BridgeType0, BridgeName, RawConf) -> }), case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of true -> - emqx_bridge_v2:bridge_v1_split_config_and_create(BridgeType, BridgeName, RawConf); + emqx_bridge_v2:bridge_v1_split_config_and_create(BridgeV1Type, BridgeName, RawConf); false -> emqx_conf:update( emqx_bridge:config_key_path() ++ [BridgeType, BridgeName], diff --git a/apps/emqx_bridge/src/emqx_bridge_lib.erl b/apps/emqx_bridge/src/emqx_bridge_lib.erl index 04b3378ce..ed8e918fa 100644 --- a/apps/emqx_bridge/src/emqx_bridge_lib.erl +++ b/apps/emqx_bridge/src/emqx_bridge_lib.erl @@ -78,6 +78,14 @@ external_ids(Type, Name) -> [external_id(Type0, Name), external_id(Type, Name)] end. +get_conf(BridgeType, BridgeName) -> + case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of + true -> + emqx_conf:get_raw([actions, BridgeType, BridgeName]); + false -> + undefined + end. + %% Creates the external id for the bridge_v2 that is used by the rule actions %% to refer to the bridge_v2 external_id(BridgeType, BridgeName) -> @@ -87,9 +95,3 @@ external_id(BridgeType, BridgeName) -> bin(Bin) when is_binary(Bin) -> Bin; bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). - -get_conf(BridgeType, BridgeName) -> - case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of - true -> emqx_conf:get_raw([actions, BridgeType, BridgeName]); - false -> emqx_conf:get_raw([bridges, BridgeType, BridgeName]) - end. diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index ba557d0b1..54ccf1b24 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -1053,8 +1053,8 @@ bridge_v1_is_valid(BridgeV1Type, BridgeName) -> bridge_v1_type_to_bridge_v2_type(Type) -> emqx_action_info:bridge_v1_type_to_action_type(Type). -bridge_v2_type_to_bridge_v1_type(Type, Conf) -> - emqx_action_info:action_type_to_bridge_v1_type(Type, Conf). +bridge_v2_type_to_bridge_v1_type(ActionType, ActionConf) -> + emqx_action_info:action_type_to_bridge_v1_type(ActionType, ActionConf). is_bridge_v2_type(Type) -> emqx_action_info:is_action_type(Type). @@ -1065,8 +1065,8 @@ bridge_v1_list_and_transform() -> bridge_v1_lookup_and_transform(ActionType, Name) -> case lookup(ActionType, Name) of - {ok, #{raw_config := #{<<"connector">> := ConnectorName}} = ActionConfig} -> - BridgeV1Type = ?MODULE:bridge_v2_type_to_bridge_v1_type(ActionType, ActionConfig), + {ok, #{raw_config := #{<<"connector">> := ConnectorName} = RawConfig} = ActionConfig} -> + BridgeV1Type = ?MODULE:bridge_v2_type_to_bridge_v1_type(ActionType, RawConfig), case ?MODULE:bridge_v1_is_valid(BridgeV1Type, Name) of true -> ConnectorType = connector_type(ActionType), @@ -1244,6 +1244,8 @@ split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf, PreviousR #{bin(BridgeV2Type) => #{bin(BridgeName) => PreviousRawConf}}, PreviousRawConf =/= undefined ), + %% [FIXME] this will loop through all connector types, instead pass the + %% connector type and just do it for that one Output = emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2( FakeGlobalConfig ), diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl index 5cb9b043f..1ed0eb31b 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl @@ -552,18 +552,24 @@ t_on_get_status(Config, Opts) -> _Attempts = 20, ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) ), - emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> - ct:sleep(500), - ?retry( - _Interval0 = 200, - _Attempts0 = 10, - ?assertEqual({ok, FailureStatus}, emqx_resource_manager:health_check(ResourceId)) - ) - end), - %% Check that it recovers itself. - ?retry( - _Sleep = 1_000, - _Attempts = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) - ), + case ProxyHost of + undefined -> + ok; + _ -> + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?retry( + _Interval0 = 100, + _Attempts0 = 20, + ?assertEqual( + {ok, FailureStatus}, emqx_resource_manager:health_check(ResourceId) + ) + ) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ) + end, ok. diff --git a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src index 5545ac967..f361d5276 100644 --- a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src +++ b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src @@ -9,7 +9,7 @@ emqx_resource, emqx_mongodb ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_mongodb_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.erl b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.erl index b108f654f..ac7aa6280 100644 --- a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.erl +++ b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.erl @@ -12,7 +12,9 @@ %% emqx_bridge_enterprise "callbacks" -export([ - conn_bridge_examples/1 + bridge_v2_examples/1, + conn_bridge_examples/1, + connector_examples/1 ]). %% hocon_schema callbacks @@ -27,10 +29,13 @@ %% hocon_schema API %%================================================================================================= +%% [TODO] Namespace should be different depending on whether this is used for a +%% connector, an action or a legacy bridge type. namespace() -> "bridge_mongodb". roots() -> + %% ??? []. fields("config") -> @@ -44,6 +49,18 @@ fields("config") -> #{required => true, desc => ?DESC(emqx_resource_schema, "creation_opts")} )} ]; +fields("config_connector") -> + emqx_connector_schema:common_fields() ++ + [ + {parameters, + mk( + hoconsc:union([ + ref(emqx_mongodb, "connector_" ++ T) + || T <- ["single", "sharded", "rs"] + ]), + #{required => true, desc => ?DESC("mongodb_parameters")} + )} + ] ++ emqx_mongodb:fields(mongodb); fields("creation_opts") -> %% so far, mongodb connector does not support batching %% but we cannot delete this field due to compatibility reasons @@ -55,12 +72,47 @@ fields("creation_opts") -> desc => ?DESC("batch_size") }} ]); +fields(action) -> + {mongodb, + mk( + hoconsc:map(name, ref(?MODULE, mongodb_action)), + #{desc => <<"MongoDB Action Config">>, required => false} + )}; +fields(mongodb_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + mk(ref(?MODULE, action_parameters), #{ + required => true, desc => ?DESC(action_parameters) + }) + ); +fields(action_parameters) -> + [ + {collection, mk(binary(), #{desc => ?DESC("collection"), default => <<"mqtt">>})}, + {payload_template, mk(binary(), #{required => false, desc => ?DESC("payload_template")})} + ]; +fields(resource_opts) -> + fields("creation_opts"); fields(mongodb_rs) -> emqx_mongodb:fields(rs) ++ fields("config"); fields(mongodb_sharded) -> emqx_mongodb:fields(sharded) ++ fields("config"); fields(mongodb_single) -> emqx_mongodb:fields(single) ++ fields("config"); +fields("post_connector") -> + type_and_name_fields(mongodb) ++ + fields("config_connector"); +fields("put_connector") -> + fields("config_connector"); +fields("get_connector") -> + emqx_bridge_schema:status_fields() ++ + fields("post_connector"); +fields("get_bridge_v2") -> + emqx_bridge_schema:status_fields() ++ + fields("post_bridge_v2"); +fields("post_bridge_v2") -> + type_and_name_fields(mongodb) ++ + fields(mongodb_action); +fields("put_bridge_v2") -> + fields(mongodb_action); fields("post_rs") -> fields(mongodb_rs) ++ type_and_name_fields(mongodb_rs); fields("post_sharded") -> @@ -86,6 +138,16 @@ fields("get_single") -> fields(mongodb_single) ++ type_and_name_fields(mongodb_single). +bridge_v2_examples(Method) -> + [ + #{ + <<"mongodb">> => #{ + summary => <<"MongoDB Action">>, + value => action_values(Method) + } + } + ]. + conn_bridge_examples(Method) -> [ #{ @@ -108,16 +170,46 @@ conn_bridge_examples(Method) -> } ]. +connector_examples(Method) -> + [ + #{ + <<"mongodb_rs">> => #{ + summary => <<"MongoDB Replica Set Connector">>, + value => connector_values(mongodb_rs, Method) + } + }, + #{ + <<"mongodb_sharded">> => #{ + summary => <<"MongoDB Sharded Connector">>, + value => connector_values(mongodb_sharded, Method) + } + }, + #{ + <<"mongodb_single">> => #{ + summary => <<"MongoDB Standalone Connector">>, + value => connector_values(mongodb_single, Method) + } + } + ]. + +desc("config_connector") -> + ?DESC("desc_config"); desc("config") -> ?DESC("desc_config"); desc("creation_opts") -> ?DESC(emqx_resource_schema, "creation_opts"); +desc(resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); desc(mongodb_rs) -> ?DESC(mongodb_rs_conf); desc(mongodb_sharded) -> ?DESC(mongodb_sharded_conf); desc(mongodb_single) -> ?DESC(mongodb_single_conf); +desc(mongodb_action) -> + ?DESC(mongodb_action); +desc(action_parameters) -> + ?DESC(action_parameters); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for MongoDB using `", string:to_upper(Method), "` method."]; desc(_) -> @@ -133,49 +225,102 @@ type_and_name_fields(MongoType) -> {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})} ]. -values(mongodb_rs = MongoType, Method) -> - TypeOpts = #{ +connector_values(Type, Method) -> + lists:foldl( + fun(M1, M2) -> + maps:merge(M1, M2) + end, + #{ + description => <<"My example connector">>, + parameters => mongo_type_opts(Type) + }, + [ + common_values(), + method_values(mongodb, Method) + ] + ). + +action_values(Method) -> + maps:merge( + method_values(mongodb, Method), + #{ + description => <<"My example action">>, + enable => true, + connector => <<"my_mongodb_connector">>, + parameters => #{ + collection => <<"mycol">> + } + } + ). + +values(MongoType, Method) -> + maps:merge( + mongo_type_opts(MongoType), + bridge_values(MongoType, Method) + ). + +mongo_type_opts(mongodb_rs) -> + #{ + mongo_type => <<"rs">>, servers => <<"localhost:27017, localhost:27018">>, w_mode => <<"safe">>, r_mode => <<"safe">>, replica_set_name => <<"rs">> - }, - values(common, MongoType, Method, TypeOpts); -values(mongodb_sharded = MongoType, Method) -> - TypeOpts = #{ + }; +mongo_type_opts(mongodb_sharded) -> + #{ + mongo_type => <<"sharded">>, servers => <<"localhost:27017, localhost:27018">>, w_mode => <<"safe">> - }, - values(common, MongoType, Method, TypeOpts); -values(mongodb_single = MongoType, Method) -> - TypeOpts = #{ + }; +mongo_type_opts(mongodb_single) -> + #{ + mongo_type => <<"single">>, server => <<"localhost:27017">>, w_mode => <<"safe">> - }, - values(common, MongoType, Method, TypeOpts). + }. -values(common, MongoType, Method, TypeOpts) -> - MongoTypeBin = atom_to_binary(MongoType), - Common = #{ - name => <>, - type => MongoTypeBin, +bridge_values(Type, _Method) -> + %% [FIXME] _Method makes a difference since PUT doesn't allow name and type + %% for connectors. + TypeBin = atom_to_binary(Type), + maps:merge( + #{ + name => <>, + type => TypeBin, + collection => <<"mycol">> + }, + common_values() + ). + +common_values() -> + #{ enable => true, - collection => <<"mycol">>, database => <<"mqtt">>, srv_record => false, pool_size => 8, username => <<"myuser">>, password => <<"******">> - }, - MethodVals = method_values(MongoType, Method), - Vals0 = maps:merge(MethodVals, Common), - maps:merge(Vals0, TypeOpts). + }. -method_values(MongoType, _) -> - ConnectorType = - case MongoType of - mongodb_rs -> <<"rs">>; - mongodb_sharded -> <<"sharded">>; - mongodb_single -> <<"single">> - end, - #{mongo_type => ConnectorType}. +method_values(Type, post) -> + TypeBin = atom_to_binary(Type), + #{ + name => <>, + type => TypeBin + }; +method_values(Type, get) -> + maps:merge( + method_values(Type, post), + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + } + ); +method_values(_Type, put) -> + #{}. diff --git a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_action_info.erl b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_action_info.erl new file mode 100644 index 000000000..8bbe5ff3a --- /dev/null +++ b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_action_info.erl @@ -0,0 +1,95 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_mongodb_action_info). + +-behaviour(emqx_action_info). + +%% behaviour callbacks +-export([ + bridge_v1_config_to_action_config/2, + bridge_v1_config_to_connector_config/1, + connector_action_config_to_bridge_v1_config/2, + action_type_name/0, + bridge_v1_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +%% dynamic callback +-export([ + bridge_v1_type_name_fun/1 +]). + +-import(emqx_utils_conv, [bin/1]). + +-define(SCHEMA_MODULE, emqx_bridge_mongodb). + +connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) -> + fix_v1_type( + maps:merge( + maps:without( + [<<"connector">>], + map_unindent(<<"parameters">>, ActionConfig) + ), + map_unindent(<<"parameters">>, ConnectorConfig) + ) + ). + +fix_v1_type(#{<<"mongo_type">> := MongoType} = Conf) -> + Conf#{<<"type">> => v1_type(MongoType)}. + +bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) -> + ActionTopLevelKeys = schema_keys(mongodb_action), + ActionParametersKeys = schema_keys(action_parameters), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ActionConfig = make_config_map(ActionKeys, ActionParametersKeys, BridgeV1Config), + ActionConfig#{<<"connector">> => ConnectorName}. + +bridge_v1_config_to_connector_config(BridgeV1Config) -> + ActionTopLevelKeys = schema_keys(mongodb_action), + ActionParametersKeys = schema_keys(action_parameters), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ConnectorTopLevelKeys = schema_keys("config_connector"), + ConnectorKeys = maps:keys(BridgeV1Config) -- (ActionKeys -- ConnectorTopLevelKeys), + ConnectorParametersKeys = ConnectorKeys -- ConnectorTopLevelKeys, + make_config_map(ConnectorKeys, ConnectorParametersKeys, BridgeV1Config). + +make_config_map(PickKeys, IndentKeys, Config) -> + Conf0 = maps:with(PickKeys, Config), + map_indent(<<"parameters">>, IndentKeys, Conf0). + +bridge_v1_type_name() -> + {fun ?MODULE:bridge_v1_type_name_fun/1, bridge_v1_type_names()}. + +action_type_name() -> mongodb. + +connector_type_name() -> mongodb. + +schema_module() -> ?SCHEMA_MODULE. + +bridge_v1_type_names() -> [mongodb_rs, mongodb_sharded, mongodb_single]. + +bridge_v1_type_name_fun({#{<<"parameters">> := #{<<"mongo_type">> := MongoType}}, _}) -> + v1_type(MongoType). + +v1_type(<<"rs">>) -> mongodb_rs; +v1_type(<<"sharded">>) -> mongodb_sharded; +v1_type(<<"single">>) -> mongodb_single. + +map_unindent(Key, Map) -> + maps:merge( + maps:get(Key, Map), + maps:remove(Key, Map) + ). + +map_indent(IndentKey, PickKeys, Map) -> + maps:put( + IndentKey, + maps:with(PickKeys, Map), + maps:without(PickKeys, Map) + ). + +schema_keys(Name) -> + [bin(Key) || Key <- proplists:get_keys(?SCHEMA_MODULE:fields(Name))]. diff --git a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_connector.erl b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_connector.erl index 741db9550..d0ea93ebc 100644 --- a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_connector.erl +++ b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_connector.erl @@ -6,16 +6,19 @@ -behaviour(emqx_resource). --include_lib("emqx/include/logger.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). %% `emqx_resource' API -export([ + on_remove_channel/3, callback_mode/0, - on_start/2, - on_stop/2, + on_add_channel/4, + on_get_channel_status/3, + on_get_channels/1, + on_get_status/2, on_query/3, - on_get_status/2 + on_start/2, + on_stop/2 ]). %%======================================================================================== @@ -24,44 +27,94 @@ callback_mode() -> emqx_mongodb:callback_mode(). -on_start(InstanceId, Config) -> +on_add_channel( + _InstanceId, + #{channels := Channels} = OldState, + ChannelId, + #{parameters := Parameters} = ChannelConfig0 +) -> + PayloadTemplate0 = maps:get(payload_template, Parameters, undefined), + PayloadTemplate = preprocess_template(PayloadTemplate0), + CollectionTemplateSource = maps:get(collection, Parameters), + CollectionTemplate = preprocess_template(CollectionTemplateSource), + ChannelConfig = maps:merge( + Parameters, + ChannelConfig0#{ + payload_template => PayloadTemplate, + collection_template => CollectionTemplate + } + ), + NewState = OldState#{channels => maps:put(ChannelId, ChannelConfig, Channels)}, + {ok, NewState}. + +on_get_channel_status(InstanceId, _ChannelId, State) -> + case on_get_status(InstanceId, State) of + connected -> + connected; + _ -> + connecting + end. + +on_get_channels(InstanceId) -> + emqx_bridge_v2:get_channels_for_connector(InstanceId). + +on_get_status(InstanceId, _State = #{connector_state := ConnectorState}) -> + emqx_mongodb:on_get_status(InstanceId, ConnectorState). + +on_query(InstanceId, {Channel, Message0}, #{channels := Channels, connector_state := ConnectorState}) -> + #{ + payload_template := PayloadTemplate, + collection_template := CollectionTemplate + } = ChannelState0 = maps:get(Channel, Channels), + ChannelState = ChannelState0#{ + collection => emqx_placeholder:proc_tmpl(CollectionTemplate, Message0) + }, + Message = render_message(PayloadTemplate, Message0), + Res = emqx_mongodb:on_query( + InstanceId, + {Channel, Message}, + maps:merge(ConnectorState, ChannelState) + ), + ?tp(mongo_bridge_connector_on_query_return, #{instance_id => InstanceId, result => Res}), + Res; +on_query(InstanceId, Request, _State = #{connector_state := ConnectorState}) -> + emqx_mongodb:on_query(InstanceId, Request, ConnectorState). + +on_remove_channel(_InstanceId, #{channels := Channels} = State, ChannelId) -> + NewState = State#{channels => maps:remove(ChannelId, Channels)}, + {ok, NewState}. + +on_start(InstanceId, Config0) -> + Config = config_transform(Config0), case emqx_mongodb:on_start(InstanceId, Config) of {ok, ConnectorState} -> - PayloadTemplate0 = maps:get(payload_template, Config, undefined), - PayloadTemplate = preprocess_template(PayloadTemplate0), - CollectionTemplateSource = maps:get(collection, Config), - CollectionTemplate = preprocess_template(CollectionTemplateSource), State = #{ - payload_template => PayloadTemplate, - collection_template => CollectionTemplate, - connector_state => ConnectorState + connector_state => ConnectorState, + channels => #{} }, {ok, State}; Error -> Error end. +config_transform(#{parameters := #{mongo_type := MongoType} = Parameters} = Config) -> + maps:put( + type, + connector_type(MongoType), + maps:merge( + maps:remove(parameters, Config), + Parameters + ) + ). + +connector_type(rs) -> mongodb_rs; +connector_type(sharded) -> mongodb_sharded; +connector_type(single) -> mongodb_single. + on_stop(InstanceId, _State = #{connector_state := ConnectorState}) -> - emqx_mongodb:on_stop(InstanceId, ConnectorState). - -on_query(InstanceId, {send_message, Message0}, State) -> - #{ - payload_template := PayloadTemplate, - collection_template := CollectionTemplate, - connector_state := ConnectorState - } = State, - NewConnectorState = ConnectorState#{ - collection => emqx_placeholder:proc_tmpl(CollectionTemplate, Message0) - }, - Message = render_message(PayloadTemplate, Message0), - Res = emqx_mongodb:on_query(InstanceId, {send_message, Message}, NewConnectorState), - ?tp(mongo_bridge_connector_on_query_return, #{result => Res}), - Res; -on_query(InstanceId, Request, _State = #{connector_state := ConnectorState}) -> - emqx_mongodb:on_query(InstanceId, Request, ConnectorState). - -on_get_status(InstanceId, _State = #{connector_state := ConnectorState}) -> - emqx_mongodb:on_get_status(InstanceId, ConnectorState). + ok = emqx_mongodb:on_stop(InstanceId, ConnectorState), + ?tp(mongodb_stopped, #{instance_id => InstanceId}), + ok. %%======================================================================================== %% Helper fns diff --git a/apps/emqx_bridge_mongodb/test/emqx_bridge_mongodb_SUITE.erl b/apps/emqx_bridge_mongodb/test/emqx_bridge_mongodb_SUITE.erl index cedb19b88..d87e1665f 100644 --- a/apps/emqx_bridge_mongodb/test/emqx_bridge_mongodb_SUITE.erl +++ b/apps/emqx_bridge_mongodb/test/emqx_bridge_mongodb_SUITE.erl @@ -132,7 +132,17 @@ init_per_suite(Config) -> end_per_suite(_Config) -> emqx_mgmt_api_test_util:end_suite(), - ok = emqx_common_test_helpers:stop_apps([emqx_mongodb, emqx_bridge, emqx_rule_engine, emqx_conf]), + ok = emqx_common_test_helpers:stop_apps( + [ + emqx_management, + emqx_bridge_mongodb, + emqx_mongodb, + emqx_bridge, + emqx_connector, + emqx_rule_engine, + emqx_conf + ] + ), ok. init_per_testcase(_Testcase, Config) -> @@ -144,6 +154,7 @@ init_per_testcase(_Testcase, Config) -> end_per_testcase(_Testcase, Config) -> clear_db(Config), delete_bridge(Config), + [] = emqx_connector:list(), snabbkaffe:stop(), ok. @@ -157,9 +168,17 @@ start_apps() -> %% we want to make sure they are loaded before %% ekka start in emqx_common_test_helpers:start_apps/1 emqx_common_test_helpers:render_and_load_app_config(emqx_conf), - ok = emqx_common_test_helpers:start_apps([ - emqx_conf, emqx_rule_engine, emqx_bridge, emqx_mongodb - ]). + ok = emqx_common_test_helpers:start_apps( + [ + emqx_conf, + emqx_rule_engine, + emqx_connector, + emqx_bridge, + emqx_mongodb, + emqx_bridge_mongodb, + emqx_management + ] + ). ensure_loaded() -> _ = application:load(emqtt), @@ -198,6 +217,7 @@ mongo_config(MongoHost, MongoPort0, rs = Type, Config) -> "\n w_mode = safe" "\n use_legacy_protocol = auto" "\n database = mqtt" + "\n mongo_type = rs" "\n resource_opts = {" "\n query_mode = ~s" "\n worker_pool_size = 1" @@ -224,6 +244,7 @@ mongo_config(MongoHost, MongoPort0, sharded = Type, Config) -> "\n w_mode = safe" "\n use_legacy_protocol = auto" "\n database = mqtt" + "\n mongo_type = sharded" "\n resource_opts = {" "\n query_mode = ~s" "\n worker_pool_size = 1" @@ -253,6 +274,7 @@ mongo_config(MongoHost, MongoPort0, single = Type, Config) -> "\n auth_source = ~s" "\n username = ~s" "\n password = \"file://~s\"" + "\n mongo_type = single" "\n resource_opts = {" "\n query_mode = ~s" "\n worker_pool_size = 1" @@ -290,13 +312,17 @@ create_bridge(Config, Overrides) -> delete_bridge(Config) -> Type = mongo_type_bin(?config(mongo_type, Config)), Name = ?config(mongo_name, Config), - emqx_bridge:remove(Type, Name). + emqx_bridge:check_deps_and_remove(Type, Name, [connector, rule_actions]). create_bridge_http(Params) -> Path = emqx_mgmt_api_test_util:api_path(["bridges"]), AuthHeader = emqx_mgmt_api_test_util:auth_header_(), - case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of - {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + case + emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, #{ + return_all => true + }) + of + {ok, {{_, 201, _}, _, Body}} -> {ok, emqx_utils_json:decode(Body, [return_maps])}; Error -> Error end. @@ -564,8 +590,8 @@ t_get_status_server_selection_too_short(Config) -> ok. t_use_legacy_protocol_option(Config) -> - ResourceID = resource_id(Config), {ok, _} = create_bridge(Config, #{<<"use_legacy_protocol">> => <<"true">>}), + ResourceID = resource_id(Config), ?retry( _Interval0 = 200, _NAttempts0 = 20, diff --git a/apps/emqx_bridge_mongodb/test/emqx_bridge_v2_mongodb_SUITE.erl b/apps/emqx_bridge_mongodb/test/emqx_bridge_v2_mongodb_SUITE.erl new file mode 100644 index 000000000..9fd13c50b --- /dev/null +++ b/apps/emqx_bridge_mongodb/test/emqx_bridge_v2_mongodb_SUITE.erl @@ -0,0 +1,232 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_bridge_v2_mongodb_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-define(BRIDGE_TYPE, mongodb). +-define(BRIDGE_TYPE_BIN, <<"mongodb">>). +-define(CONNECTOR_TYPE, mongodb). +-define(CONNECTOR_TYPE_BIN, <<"mongodb">>). + +-import(emqx_common_test_helpers, [on_exit/1]). +-import(emqx_utils_conv, [bin/1]). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + MongoHost = os:getenv("MONGO_SINGLE_HOST", "mongo"), + MongoPort = list_to_integer(os:getenv("MONGO_SINGLE_PORT", "27017")), + case emqx_common_test_helpers:is_tcp_server_available(MongoHost, MongoPort) of + true -> + Apps = emqx_cth_suite:start( + [ + emqx, + emqx_conf, + emqx_connector, + emqx_bridge, + emqx_bridge_mongodb, + emqx_rule_engine, + emqx_management, + {emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"} + ], + #{work_dir => emqx_cth_suite:work_dir(Config)} + ), + {ok, Api} = emqx_common_test_http:create_default_app(), + [ + {apps, Apps}, + {api, Api}, + {mongo_host, MongoHost}, + {mongo_port, MongoPort} + | Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_mongo); + _ -> + {skip, no_mongo} + end + end. + +end_per_suite(Config) -> + Apps = ?config(apps, Config), + emqx_cth_suite:stop(Apps), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +common_init_per_testcase(TestCase, Config) -> + ct:timetrap(timer:seconds(60)), + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), + emqx_config:delete_override_conf_files(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + Name = iolist_to_binary([atom_to_binary(TestCase), UniqueNum]), + AuthSource = bin(os:getenv("MONGO_AUTHSOURCE", "admin")), + Username = bin(os:getenv("MONGO_USERNAME", "")), + Password = bin(os:getenv("MONGO_PASSWORD", "")), + Passfile = filename:join(?config(priv_dir, Config), "passfile"), + ok = file:write_file(Passfile, Password), + NConfig = [ + {mongo_authsource, AuthSource}, + {mongo_username, Username}, + {mongo_password, Password}, + {mongo_passfile, Passfile} + | Config + ], + ConnectorConfig = connector_config(Name, NConfig), + BridgeConfig = bridge_config(Name, Name), + ok = snabbkaffe:start_trace(), + [ + {connector_type, ?CONNECTOR_TYPE}, + {connector_name, Name}, + {connector_config, ConnectorConfig}, + {bridge_type, ?BRIDGE_TYPE}, + {bridge_name, Name}, + {bridge_config, BridgeConfig} + | NConfig + ]. + +end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +connector_config(Name, Config) -> + MongoHost = ?config(mongo_host, Config), + MongoPort = ?config(mongo_port, Config), + AuthSource = ?config(mongo_authsource, Config), + Username = ?config(mongo_username, Config), + PassFile = ?config(mongo_passfile, Config), + InnerConfigMap0 = + #{ + <<"enable">> => true, + <<"database">> => <<"mqtt">>, + <<"parameters">> => + #{ + <<"mongo_type">> => <<"single">>, + <<"server">> => iolist_to_binary([MongoHost, ":", integer_to_binary(MongoPort)]), + <<"w_mode">> => <<"safe">> + }, + <<"pool_size">> => 8, + <<"srv_record">> => false, + <<"username">> => Username, + <<"password">> => iolist_to_binary(["file://", PassFile]), + <<"auth_source">> => AuthSource + }, + InnerConfigMap = serde_roundtrip(InnerConfigMap0), + parse_and_check_connector_config(InnerConfigMap, Name). + +parse_and_check_connector_config(InnerConfigMap, Name) -> + TypeBin = ?CONNECTOR_TYPE_BIN, + RawConf = #{<<"connectors">> => #{TypeBin => #{Name => InnerConfigMap}}}, + #{<<"connectors">> := #{TypeBin := #{Name := Config}}} = + hocon_tconf:check_plain(emqx_connector_schema, RawConf, #{ + required => false, atom_key => false + }), + ct:pal("parsed config: ~p", [Config]), + InnerConfigMap. + +bridge_config(Name, ConnectorId) -> + InnerConfigMap0 = + #{ + <<"enable">> => true, + <<"connector">> => ConnectorId, + <<"parameters">> => + #{}, + <<"local_topic">> => <<"t/aeh">> + %%, + }, + InnerConfigMap = serde_roundtrip(InnerConfigMap0), + parse_and_check_bridge_config(InnerConfigMap, Name). + +%% check it serializes correctly +serde_roundtrip(InnerConfigMap0) -> + IOList = hocon_pp:do(InnerConfigMap0, #{}), + {ok, InnerConfigMap} = hocon:binary(IOList), + InnerConfigMap. + +parse_and_check_bridge_config(InnerConfigMap, Name) -> + TypeBin = ?BRIDGE_TYPE_BIN, + RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}}, + hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}), + InnerConfigMap. + +shared_secret_path() -> + os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret"). + +shared_secret(client_keyfile) -> + filename:join([shared_secret_path(), "client.key"]); +shared_secret(client_certfile) -> + filename:join([shared_secret_path(), "client.crt"]); +shared_secret(client_cacertfile) -> + filename:join([shared_secret_path(), "ca.crt"]); +shared_secret(rig_keytab) -> + filename:join([shared_secret_path(), "rig.keytab"]). + +make_message() -> + Time = erlang:unique_integer(), + BinTime = integer_to_binary(Time), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + #{ + clientid => BinTime, + payload => Payload, + timestamp => Time + }. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_start_stop(Config) -> + emqx_bridge_v2_testlib:t_start_stop(Config, mongodb_stopped), + ok. + +t_create_via_http(Config) -> + emqx_bridge_v2_testlib:t_create_via_http(Config), + ok. + +t_on_get_status(Config) -> + emqx_bridge_v2_testlib:t_on_get_status(Config, #{failure_status => connecting}), + ok. + +t_sync_query(Config) -> + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, + fun make_message/0, + fun(Res) -> ?assertEqual(ok, Res) end, + mongo_bridge_connector_on_query_return + ), + ok. diff --git a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl index 6c303dd7e..535917e4e 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl @@ -20,8 +20,8 @@ resource_type(Type) when is_binary(Type) -> resource_type(binary_to_atom(Type, utf8)); -%% We use AEH's Kafka interface. resource_type(azure_event_hub_producer) -> + %% We use AEH's Kafka interface. emqx_bridge_kafka_impl_producer; resource_type(confluent_producer) -> emqx_bridge_kafka_impl_producer; @@ -29,6 +29,8 @@ resource_type(gcp_pubsub_producer) -> emqx_bridge_gcp_pubsub_impl_producer; resource_type(kafka_producer) -> emqx_bridge_kafka_impl_producer; +resource_type(mongodb) -> + emqx_bridge_mongodb_connector; resource_type(syskeeper_forwarder) -> emqx_bridge_syskeeper_connector; resource_type(syskeeper_proxy) -> @@ -83,6 +85,14 @@ connector_structs() -> required => false } )}, + {mongodb, + mk( + hoconsc:map(name, ref(emqx_bridge_mongodb, "config_connector")), + #{ + desc => <<"MongoDB Connector Config">>, + required => false + } + )}, {syskeeper_forwarder, mk( hoconsc:map(name, ref(emqx_bridge_syskeeper_connector, config)), @@ -119,6 +129,7 @@ schema_modules() -> emqx_bridge_confluent_producer, emqx_bridge_gcp_pubsub_producer_schema, emqx_bridge_kafka, + emqx_bridge_mongodb, emqx_bridge_syskeeper_connector, emqx_bridge_syskeeper_proxy ]. @@ -133,12 +144,13 @@ api_schemas(Method) -> api_ref( emqx_bridge_confluent_producer, <<"confluent_producer">>, Method ++ "_connector" ), - api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_connector"), api_ref( emqx_bridge_gcp_pubsub_producer_schema, <<"gcp_pubsub_producer">>, Method ++ "_connector" ), + api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_connector"), + api_ref(emqx_bridge_mongodb, <<"mongodb">>, Method ++ "_connector"), api_ref(emqx_bridge_syskeeper_connector, <<"syskeeper_forwarder">>, Method), api_ref(emqx_bridge_syskeeper_proxy, <<"syskeeper_proxy">>, Method) ]. diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index 2330e5491..765a693e2 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -68,8 +68,9 @@ enterprise_fields_connectors() -> []. connector_type_to_bridge_types(azure_event_hub_producer) -> [azure_event_hub_producer]; connector_type_to_bridge_types(confluent_producer) -> [confluent_producer]; -connector_type_to_bridge_types(gcp_pubsub_producer) -> [gcp_pubsub_producer]; +connector_type_to_bridge_types(gcp_pubsub_producer) -> [gcp_pubsub, gcp_pubsub_producer]; connector_type_to_bridge_types(kafka_producer) -> [kafka, kafka_producer]; +connector_type_to_bridge_types(mongodb) -> [mongodb, mongodb_rs, mongodb_sharded, mongodb_single]; connector_type_to_bridge_types(syskeeper_forwarder) -> [syskeeper_forwarder]; connector_type_to_bridge_types(syskeeper_proxy) -> []. @@ -266,8 +267,9 @@ transform_old_style_bridges_to_connector_and_actions_of_type( RawConfigSoFar1 ), %% Add action + ActionType = emqx_action_info:bridge_v1_type_to_action_type(to_bin(BridgeType)), RawConfigSoFar3 = emqx_utils_maps:deep_put( - [actions_config_name(), to_bin(maybe_rename(BridgeType)), BridgeName], + [actions_config_name(), to_bin(ActionType), BridgeName], RawConfigSoFar2, ActionMap ), @@ -286,12 +288,6 @@ transform_bridges_v1_to_connectors_and_bridges_v2(RawConfig) -> ), NewRawConf. -%% v1 uses 'kafka' as bridge type v2 uses 'kafka_producer' -maybe_rename(kafka) -> - kafka_producer; -maybe_rename(Name) -> - Name. - %%====================================================================================== %% HOCON Schema Callbacks %%====================================================================================== diff --git a/apps/emqx_mongodb/src/emqx_mongodb.erl b/apps/emqx_mongodb/src/emqx_mongodb.erl index 3adf52e6d..2c246e506 100644 --- a/apps/emqx_mongodb/src/emqx_mongodb.erl +++ b/apps/emqx_mongodb/src/emqx_mongodb.erl @@ -68,19 +68,10 @@ roots() -> }} ]. -fields(single) -> - [ - {mongo_type, #{ - type => single, - default => single, - desc => ?DESC("single_mongo_type") - }}, - {server, server()}, - {w_mode, fun w_mode/1} - ] ++ mongo_fields(); -fields(rs) -> +fields("connector_rs") -> [ {mongo_type, #{ + required => true, type => rs, default => rs, desc => ?DESC("rs_mongo_type") @@ -89,17 +80,51 @@ fields(rs) -> {w_mode, fun w_mode/1}, {r_mode, fun r_mode/1}, {replica_set_name, fun replica_set_name/1} - ] ++ mongo_fields(); -fields(sharded) -> + ]; +fields("connector_sharded") -> [ {mongo_type, #{ + required => true, type => sharded, default => sharded, desc => ?DESC("sharded_mongo_type") }}, {servers, servers()}, {w_mode, fun w_mode/1} - ] ++ mongo_fields(); + ]; +fields("connector_single") -> + [ + {mongo_type, #{ + required => true, + type => single, + default => single, + desc => ?DESC("single_mongo_type") + }}, + {server, server()}, + {w_mode, fun w_mode/1} + ]; +fields(Type) when Type =:= rs; Type =:= single; Type =:= sharded -> + fields("connector_" ++ atom_to_list(Type)) ++ fields(mongodb); +fields(mongodb) -> + [ + {srv_record, fun srv_record/1}, + {pool_size, fun emqx_connector_schema_lib:pool_size/1}, + {username, fun emqx_connector_schema_lib:username/1}, + {password, emqx_connector_schema_lib:password_field()}, + {use_legacy_protocol, + hoconsc:mk(hoconsc:enum([auto, true, false]), #{ + default => auto, + desc => ?DESC("use_legacy_protocol") + })}, + {auth_source, #{ + type => binary(), + required => false, + desc => ?DESC("auth_source") + }}, + {database, fun emqx_connector_schema_lib:database/1}, + {topology, #{type => hoconsc:ref(?MODULE, topology), required => false}} + ] ++ + emqx_connector_schema_lib:ssl_fields(); fields(topology) -> [ {pool_size, @@ -129,6 +154,12 @@ fields(topology) -> {min_heartbeat_frequency_ms, duration("min_heartbeat_period")} ]. +desc("connector_single") -> + ?DESC("desc_single"); +desc("connector_rs") -> + ?DESC("desc_rs"); +desc("connector_sharded") -> + ?DESC("desc_sharded"); desc(single) -> ?DESC("desc_single"); desc(rs) -> @@ -140,27 +171,6 @@ desc(topology) -> desc(_) -> undefined. -mongo_fields() -> - [ - {srv_record, fun srv_record/1}, - {pool_size, fun emqx_connector_schema_lib:pool_size/1}, - {username, fun emqx_connector_schema_lib:username/1}, - {password, emqx_connector_schema_lib:password_field()}, - {use_legacy_protocol, - hoconsc:mk(hoconsc:enum([auto, true, false]), #{ - default => auto, - desc => ?DESC("use_legacy_protocol") - })}, - {auth_source, #{ - type => binary(), - required => false, - desc => ?DESC("auth_source") - }}, - {database, fun emqx_connector_schema_lib:database/1}, - {topology, #{type => hoconsc:ref(?MODULE, topology), required => false}} - ] ++ - emqx_connector_schema_lib:ssl_fields(). - %% =================================================================== callback_mode() -> always_sync. @@ -236,7 +246,7 @@ on_stop(InstId, _State) -> on_query( InstId, - {send_message, Document}, + {_ChannelId, Document}, #{pool_name := PoolName, collection := Collection} = State ) -> Request = {insert, Collection, Document}, diff --git a/rel/i18n/emqx_bridge_mongodb.hocon b/rel/i18n/emqx_bridge_mongodb.hocon index 4edd1182d..d7c14588b 100644 --- a/rel/i18n/emqx_bridge_mongodb.hocon +++ b/rel/i18n/emqx_bridge_mongodb.hocon @@ -48,6 +48,12 @@ mongodb_single_conf.desc: mongodb_single_conf.label: """MongoDB (Standalone) Configuration""" +mongodb_parameters.label: +"""MongoDB Type Specific Parameters""" + +mongodb_parameters.desc: +"""Set of parameters specific for the given type of this MongoDB connector, `mongo_type` can be one of `single` (Standalone), `sharded` (Sharded) or `rs` (Replica Set).""" + payload_template.desc: """The template for formatting the outgoing messages. If undefined, rule engine will use JSON format to serialize all visible inputs, such as clientid, topic, payload etc.""" @@ -59,4 +65,19 @@ batch_size.desc: batch_size.label: """Batch Size""" +action_parameters.label: +"""Action Parameters""" +action_parameters.desc: +"""Additional parameters specific to this action type""" + +mongodb_action.label: +"""MongoDB Action""" +mongodb_action.desc: +"""Action to interact with a MongoDB connector""" + +mqtt_topic.desc: +"""MQTT topic or topic filter as data source (bridge input). If rule action is used as data source, this config should be left empty, otherwise messages will be duplicated in MongoDB.""" +mqtt_topic.label: +"""Source MQTT Topic""" + } From c1ef773e770797807053ce95614d548dfdd47fdf Mon Sep 17 00:00:00 2001 From: Stefan Strigler Date: Thu, 23 Nov 2023 13:25:03 +0100 Subject: [PATCH 083/101] fix: check for sane state after regular shutdown --- .../test/emqx_bridge_kafka_impl_producer_SUITE.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_producer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_producer_SUITE.erl index 2a8a42a09..09d3f78aa 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_producer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_producer_SUITE.erl @@ -483,11 +483,10 @@ t_failed_creation_then_fix(Config) -> {ok, {_, [KafkaMsg]}} = brod:fetch(kafka_hosts(), KafkaTopic, 0, Offset), ?assertMatch(#kafka_message{key = BinTime}, KafkaMsg), % %% TODO: refactor those into init/end per testcase - ok = ?PRODUCER:on_stop(ResourceId, State), - ?assertEqual([], supervisor:which_children(wolff_client_sup)), - ?assertEqual([], supervisor:which_children(wolff_producers_sup)), ok = emqx_bridge:remove(list_to_atom(Type), list_to_atom(Name)), delete_all_bridges(), + ?assertEqual([], supervisor:which_children(wolff_client_sup)), + ?assertEqual([], supervisor:which_children(wolff_producers_sup)), ok. t_custom_timestamp(_Config) -> From d5b62eead0152e5221bd667571ea7116987ddd14 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Tue, 14 Nov 2023 17:16:09 +0100 Subject: [PATCH 084/101] feat: split pgsql, matrix and timescale into connector action This commit splits the bridges pgsql, matrix and timescale into connector and action. Fixes: https://emqx.atlassian.net/browse/EMQX-11155 --- apps/emqx_bridge/src/emqx_action_info.erl | 6 +- apps/emqx_bridge/src/emqx_bridge.erl | 6 +- apps/emqx_bridge/src/emqx_bridge_v2.erl | 55 ++++- .../src/emqx_bridge_matrix.app.src | 2 +- .../src/emqx_bridge_matrix.erl | 53 ++++- .../src/emqx_bridge_matrix_action_info.erl | 22 ++ .../src/emqx_bridge_pgsql_action_info.erl | 22 ++ .../src/schema/emqx_bridge_pgsql_schema.erl | 172 +++++++++++++++ .../test/emqx_bridge_pgsql_SUITE.erl | 50 +++-- .../src/emqx_bridge_timescale.app.src | 2 +- .../src/emqx_bridge_timescale.erl | 53 ++++- .../src/emqx_bridge_timescale_action_info.erl | 22 ++ .../src/schema/emqx_connector_ee_schema.erl | 40 +++- .../src/schema/emqx_connector_schema.erl | 5 +- apps/emqx_postgresql/src/emqx_postgresql.erl | 178 ++++++++++++++-- .../emqx_postgresql_connector_schema.erl | 201 ++++++++++++++++++ 16 files changed, 832 insertions(+), 57 deletions(-) create mode 100644 apps/emqx_bridge_matrix/src/emqx_bridge_matrix_action_info.erl create mode 100644 apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql_action_info.erl create mode 100644 apps/emqx_bridge_pgsql/src/schema/emqx_bridge_pgsql_schema.erl create mode 100644 apps/emqx_bridge_timescale/src/emqx_bridge_timescale_action_info.erl create mode 100644 apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index 129142f24..74fd38811 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -78,7 +78,11 @@ hard_coded_action_info_modules_ee() -> emqx_bridge_gcp_pubsub_producer_action_info, emqx_bridge_kafka_action_info, emqx_bridge_mongodb_action_info, - emqx_bridge_syskeeper_action_info + emqx_bridge_syskeeper_action_info, + emqx_bridge_syskeeper_action_info, + emqx_bridge_pgsql_action_info, + emqx_bridge_timescale_action_info, + emqx_bridge_matrix_action_info ]. -else. hard_coded_action_info_modules_ee() -> diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index f557210ed..9e14c0c9a 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -240,8 +240,8 @@ send_message(BridgeId, Message) -> {BridgeV1Type, BridgeName} = emqx_bridge_resource:parse_bridge_id(BridgeId), case emqx_bridge_v2:is_bridge_v2_type(BridgeV1Type) of true -> - BridgeV2Type = emqx_bridge_v2:bridge_v1_type_to_bridge_v2_type(BridgeV1Type), - emqx_bridge_v2:send_message(BridgeV2Type, BridgeName, Message, #{}); + ActionType = emqx_action_info:bridge_v1_type_to_action_type(BridgeV1Type), + emqx_bridge_v2:send_message(ActionType, BridgeName, Message, #{}); false -> ResId = emqx_bridge_resource:resource_id(BridgeV1Type, BridgeName), send_message(BridgeV1Type, BridgeName, ResId, Message, #{}) @@ -414,7 +414,7 @@ remove(BridgeType0, BridgeName) -> }), case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of true -> - emqx_bridge_v2:remove(BridgeType, BridgeName); + emqx_bridge_v2:bridge_v1_remove(BridgeType0, BridgeName); false -> remove_v1(BridgeType, BridgeName) end. diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index 54ccf1b24..63874d67e 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -55,6 +55,7 @@ disable_enable/3, health_check/2, send_message/4, + query/4, start/2, reset_metrics/2, create_dry_run/2, @@ -116,7 +117,9 @@ bridge_v1_enable_disable/3, bridge_v1_restart/2, bridge_v1_stop/2, - bridge_v1_start/2 + bridge_v1_start/2, + %% For test cases only + bridge_v1_remove/2 ]). %%==================================================================== @@ -547,25 +550,25 @@ get_query_mode(BridgeV2Type, Config) -> ResourceType = emqx_connector_resource:connector_to_resource_type(ConnectorType), emqx_resource:query_mode(ResourceType, Config, CreationOpts). --spec send_message(bridge_v2_type(), bridge_v2_name(), Message :: term(), QueryOpts :: map()) -> +-spec query(bridge_v2_type(), bridge_v2_name(), Message :: term(), QueryOpts :: map()) -> term() | {error, term()}. -send_message(BridgeType, BridgeName, Message, QueryOpts0) -> +query(BridgeType, BridgeName, Message, QueryOpts0) -> case lookup_conf(BridgeType, BridgeName) of #{enable := true} = Config0 -> Config = combine_connector_and_bridge_v2_config(BridgeType, BridgeName, Config0), - do_send_msg_with_enabled_config(BridgeType, BridgeName, Message, QueryOpts0, Config); + do_query_with_enabled_config(BridgeType, BridgeName, Message, QueryOpts0, Config); #{enable := false} -> {error, bridge_stopped}; _Error -> {error, bridge_not_found} end. -do_send_msg_with_enabled_config( +do_query_with_enabled_config( _BridgeType, _BridgeName, _Message, _QueryOpts0, {error, Reason} = Error ) -> ?SLOG(error, Reason), Error; -do_send_msg_with_enabled_config( +do_query_with_enabled_config( BridgeType, BridgeName, Message, QueryOpts0, Config ) -> QueryMode = get_query_mode(BridgeType, Config), @@ -579,7 +582,17 @@ do_send_msg_with_enabled_config( } ), BridgeV2Id = id(BridgeType, BridgeName), - emqx_resource:query(BridgeV2Id, {BridgeV2Id, Message}, QueryOpts). + case Message of + {send_message, Msg} -> + emqx_resource:query(BridgeV2Id, {BridgeV2Id, Msg}, QueryOpts); + Msg -> + emqx_resource:query(BridgeV2Id, Msg, QueryOpts) + end. + +-spec send_message(bridge_v2_type(), bridge_v2_name(), Message :: term(), QueryOpts :: map()) -> + term() | {error, term()}. +send_message(BridgeType, BridgeName, Message, QueryOpts0) -> + query(BridgeType, BridgeName, {send_message, Message}, QueryOpts0). -spec health_check(BridgeType :: term(), BridgeName :: term()) -> #{status := emqx_resource:resource_status(), error := term()} | {error, Reason :: term()}. @@ -1325,6 +1338,34 @@ bridge_v1_create_dry_run(BridgeType, RawConfig0) -> } = split_and_validate_bridge_v1_config(BridgeType, TmpName, RawConf, PreviousRawConf), create_dry_run_helper(BridgeV2Type, ConnectorRawConf, BridgeV2RawConf). +%% Only called by test cases (may create broken references) +bridge_v1_remove(BridgeV1Type, BridgeName) -> + ActionType = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type), + bridge_v1_remove( + ActionType, + BridgeName, + lookup_conf(ActionType, BridgeName) + ). + +bridge_v1_remove( + ActionType, + Name, + #{connector := ConnectorName} +) -> + case remove(ActionType, Name) of + ok -> + ConnectorType = connector_type(ActionType), + emqx_connector:remove(ConnectorType, ConnectorName); + Error -> + Error + end; +bridge_v1_remove( + _ActionType, + _Name, + Error +) -> + Error. + bridge_v1_check_deps_and_remove(BridgeV1Type, BridgeName, RemoveDeps) -> BridgeV2Type = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type), bridge_v1_check_deps_and_remove( diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src index 14aca1f75..e2a63a01e 100644 --- a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_matrix, [ {description, "EMQX Enterprise MatrixDB Bridge"}, - {vsn, "0.1.2"}, + {vsn, "0.1.3"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl index abd98adb6..acfd86ded 100644 --- a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl @@ -14,6 +14,12 @@ desc/1 ]). +%% Examples +-export([ + bridge_v2_examples/1, + connector_examples/1 +]). + %% ------------------------------------------------------------------------------------------------- %% api @@ -22,7 +28,7 @@ conn_bridge_examples(Method) -> #{ <<"matrix">> => #{ summary => <<"Matrix Bridge">>, - value => emqx_bridge_pgsql:values(Method, matrix) + value => emqx_bridge_pgsql_schema:values_conn_bridge_examples(Method, matrix) } } ]. @@ -35,8 +41,53 @@ roots() -> []. fields("post") -> emqx_bridge_pgsql:fields("post", matrix); +fields("config_connector") -> + emqx_bridge_pgsql_schema:fields("config_connector"); +fields(action) -> + {matrix, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql_schema, pgsql_action)), + #{ + desc => <<"Matrix Action Config">>, + required => false + } + )}; +fields("put_bridge_v2") -> + emqx_bridge_pgsql_schema:fields(pgsql_action); +fields("get_bridge_v2") -> + emqx_bridge_pgsql_schema:fields(pgsql_action); +fields("post_bridge_v2") -> + emqx_bridge_pgsql_schema:fields(pgsql_action); +fields("put_connector") -> + emqx_bridge_pgsql_schema:fields("config_connector"); +fields("get_connector") -> + emqx_bridge_pgsql_schema:fields("config_connector"); +fields("post_connector") -> + emqx_bridge_pgsql_schema:fields("config_connector"); fields(Method) -> emqx_bridge_pgsql:fields(Method). desc(_) -> undefined. + +%% Examples + +connector_examples(Method) -> + [ + #{ + <<"matrix">> => #{ + summary => <<"Matrix Connector">>, + value => emqx_postgresql_connector_schema:values({Method, connector}) + } + } + ]. + +bridge_v2_examples(Method) -> + [ + #{ + <<"matrix">> => #{ + summary => <<"Matrix Action">>, + value => emqx_bridge_pgsql_schema:values({Method, matrix}) + } + } + ]. diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix_action_info.erl b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix_action_info.erl new file mode 100644 index 000000000..4eae13415 --- /dev/null +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix_action_info.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_matrix_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +bridge_v1_type_name() -> matrix. + +action_type_name() -> matrix. + +connector_type_name() -> matrix. + +schema_module() -> emqx_bridge_matrix. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql_action_info.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql_action_info.erl new file mode 100644 index 000000000..e353eb440 --- /dev/null +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql_action_info.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_pgsql_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +bridge_v1_type_name() -> pgsql. + +action_type_name() -> pgsql. + +connector_type_name() -> pgsql. + +schema_module() -> emqx_bridge_pgsql_schema. diff --git a/apps/emqx_bridge_pgsql/src/schema/emqx_bridge_pgsql_schema.erl b/apps/emqx_bridge_pgsql/src/schema/emqx_bridge_pgsql_schema.erl new file mode 100644 index 000000000..a3ad4bb11 --- /dev/null +++ b/apps/emqx_bridge_pgsql/src/schema/emqx_bridge_pgsql_schema.erl @@ -0,0 +1,172 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_pgsql_schema). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("emqx_postgresql/include/emqx_postgresql.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("epgsql/include/epgsql.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-export([roots/0, fields/1]). + +%% Examples +-export([ + bridge_v2_examples/1, + conn_bridge_examples/1 +]). + +%% Exported for timescale and matrix bridges +-export([ + values/1, + values_conn_bridge_examples/2 +]). + +-define(PGSQL_HOST_OPTIONS, #{ + default_port => ?PGSQL_DEFAULT_PORT +}). + +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields("config_connector") -> + emqx_postgresql_connector_schema:fields("config_connector"); +fields(config) -> + fields("config_connector") ++ + fields(action); +fields(action) -> + {pgsql, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql_schema, pgsql_action)), + #{ + desc => <<"PostgreSQL Action Config">>, + required => false + } + )}; +fields(action_parameters) -> + [ + {sql, + hoconsc:mk( + binary(), + #{desc => ?DESC("sql_template"), default => default_sql(), format => <<"sql">>} + )} + ] ++ + emqx_connector_schema_lib:prepare_statement_fields(); +fields(pgsql_action) -> + emqx_bridge_v2_schema:make_producer_action_schema(hoconsc:ref(?MODULE, action_parameters)); +%% TODO: All of these needs to be fixed +fields("put_bridge_v2") -> + fields(pgsql_action); +fields("get_bridge_v2") -> + fields(pgsql_action); +fields("post_bridge_v2") -> + fields(pgsql_action). + +default_sql() -> + << + "insert into t_mqtt_msg(msgid, topic, qos, payload, arrived) " + "values (${id}, ${topic}, ${qos}, ${payload}, TO_TIMESTAMP((${timestamp} :: bigint)/1000))" + >>. + +%% Examples + +bridge_v2_examples(Method) -> + [ + #{ + <<"pgsql">> => #{ + summary => <<"PostgreSQL Producer Action">>, + value => values({Method, bridge_v2_producer}) + } + } + ]. + +conn_bridge_examples(Method) -> + [ + #{ + <<"pgsql">> => #{ + summary => <<"PostgreSQL Producer Bridge">>, + value => values_conn_bridge_examples(Method, pgsql) + } + } + ]. + +values({get, PostgreSQLType}) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + }, + values({post, PostgreSQLType}) + ); +values({post, PostgreSQLType}) -> + maps:merge( + #{ + name => <<"my_pgsql_action">>, + type => PostgreSQLType + }, + values({put, PostgreSQLType}) + ); +values({put, PostgreSQLType}) -> + maps:merge( + #{ + enable => true, + connector => <<"my_pgsql_connector">>, + resource_opts => #{ + health_check_interval => "32s" + } + }, + values({producer, PostgreSQLType}) + ); +values({producer, _PostgreSQLType}) -> + #{ + <<"enable">> => true, + <<"connector">> => <<"connector_pgsql_test">>, + <<"parameters">> => #{ + <<"sql">> => + <<"INSERT INTO client_events(clientid, event, created_at) VALUES (\n ${clientid},\n ${event},\n TO_TIMESTAMP((${timestamp} :: bigint))\n)">> + }, + <<"resource_opts">> => #{ + <<"batch_size">> => 1, + <<"batch_time">> => <<"0ms">>, + <<"health_check_interval">> => <<"15s">>, + <<"inflight_window">> => 100, + <<"max_buffer_bytes">> => <<"256MB">>, + <<"query_mode">> => <<"async">>, + <<"request_ttl">> => <<"45s">>, + <<"start_after_created">> => true, + <<"start_timeout">> => <<"5s">>, + <<"worker_pool_size">> => 16 + } + }. + +values_conn_bridge_examples(_Method, Type) -> + #{ + enable => true, + type => Type, + name => <<"foo">>, + server => <<"127.0.0.1:5432">>, + database => <<"mqtt">>, + pool_size => 8, + username => <<"root">>, + password => <<"******">>, + sql => default_sql(), + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. diff --git a/apps/emqx_bridge_pgsql/test/emqx_bridge_pgsql_SUITE.erl b/apps/emqx_bridge_pgsql/test/emqx_bridge_pgsql_SUITE.erl index 722489ba6..58aaa7d71 100644 --- a/apps/emqx_bridge_pgsql/test/emqx_bridge_pgsql_SUITE.erl +++ b/apps/emqx_bridge_pgsql/test/emqx_bridge_pgsql_SUITE.erl @@ -114,7 +114,7 @@ init_per_suite(Config) -> end_per_suite(_Config) -> emqx_mgmt_api_test_util:end_suite(), - ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok = emqx_common_test_helpers:stop_apps([emqx, emqx_postgresql, emqx_conf, emqx_bridge]), ok. init_per_testcase(_Testcase, Config) -> @@ -147,7 +147,7 @@ common_init(Config0) -> ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), % Ensure enterprise bridge module is loaded - ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + ok = emqx_common_test_helpers:start_apps([emqx, emqx_postgresql, emqx_conf, emqx_bridge]), _ = emqx_bridge_enterprise:module_info(), emqx_mgmt_api_test_util:init_suite(), % Connect to pgsql directly and create the table @@ -259,17 +259,16 @@ send_message(Config, Payload) -> BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), emqx_bridge:send_message(BridgeID, Payload). -query_resource(Config, Request) -> +query_resource(Config, Msg = _Request) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - emqx_resource:query(ResourceID, Request, #{timeout => 1_000}). + emqx_bridge_v2:query(BridgeType, Name, Msg, #{timeout => 1_000}). query_resource_sync(Config, Request) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - emqx_resource_buffer_worker:simple_sync_query(ResourceID, Request). + ActionId = emqx_bridge_v2:id(BridgeType, Name), + emqx_resource_buffer_worker:simple_sync_query(ActionId, Request). query_resource_async(Config, Request) -> query_resource_async(Config, Request, _Opts = #{}). @@ -279,9 +278,8 @@ query_resource_async(Config, Request, Opts) -> BridgeType = ?config(pgsql_bridge_type, Config), Ref = alias([reply]), AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end, - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), Timeout = maps:get(timeout, Opts, 500), - Return = emqx_resource:query(ResourceID, Request, #{ + Return = emqx_bridge_v2:query(BridgeType, Name, Request, #{ timeout => Timeout, async_reply_fun => {AsyncReplyFun, []} }), @@ -441,13 +439,12 @@ t_get_status(Config) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + ?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name)), emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> ?assertMatch( - {ok, Status} when Status =:= disconnected orelse Status =:= connecting, - emqx_resource_manager:health_check(ResourceID) + #{status := Status} when Status =:= disconnected orelse Status =:= connecting, + emqx_bridge_v2:health_check(BridgeType, Name) ) end), ok. @@ -655,7 +652,7 @@ t_nasty_sql_string(Config) -> t_missing_table(Config) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + % ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), ?check_trace( begin @@ -665,21 +662,20 @@ t_missing_table(Config) -> _Sleep = 1_000, _Attempts = 20, ?assertMatch( - {ok, Status} when Status == connecting orelse Status == disconnected, - emqx_resource_manager:health_check(ResourceID) + #{status := Status} when Status == connecting orelse Status == disconnected, + emqx_bridge_v2:health_check(BridgeType, Name) ) ), Val = integer_to_binary(erlang:unique_integer()), SentData = #{payload => Val, timestamp => 1668602148000}, - Timeout = 1000, ?assertMatch( {error, {resource_error, #{reason := unhealthy_target}}}, - query_resource(Config, {send_message, SentData, [], Timeout}) + query_resource(Config, {send_message, SentData}) ), ok end, fun(Trace) -> - ?assertMatch([_], ?of_kind(pgsql_undefined_table, Trace)), + ?assertMatch([_ | _], ?of_kind(pgsql_undefined_table, Trace)), ok end ), @@ -689,7 +685,7 @@ t_missing_table(Config) -> t_table_removed(Config) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + %%ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), ?check_trace( begin connect_and_create_table(Config), @@ -697,13 +693,14 @@ t_table_removed(Config) -> ?retry( _Sleep = 1_000, _Attempts = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)) + ?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name)) ), connect_and_drop_table(Config), Val = integer_to_binary(erlang:unique_integer()), SentData = #{payload => Val, timestamp => 1668602148000}, - case query_resource_sync(Config, {send_message, SentData, []}) of - {error, {unrecoverable_error, {error, error, <<"42P01">>, undefined_table, _, _}}} -> + ActionId = emqx_bridge_v2:id(BridgeType, Name), + case query_resource_sync(Config, {ActionId, SentData}) of + {error, {unrecoverable_error, _}} -> ok; ?RESOURCE_ERROR_M(not_connected, _) -> ok; @@ -720,7 +717,6 @@ t_table_removed(Config) -> t_concurrent_health_checks(Config) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), ?check_trace( begin connect_and_create_table(Config), @@ -728,11 +724,13 @@ t_concurrent_health_checks(Config) -> ?retry( _Sleep = 1_000, _Attempts = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)) + ?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name)) ), emqx_utils:pmap( fun(_) -> - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)) + ?assertMatch( + #{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name) + ) end, lists:seq(1, 20) ), diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src index adb024591..4842f5c93 100644 --- a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_timescale, [ {description, "EMQX Enterprise TimescaleDB Bridge"}, - {vsn, "0.1.2"}, + {vsn, "0.1.3"}, {registered, []}, {applications, [kernel, stdlib, emqx_resource]}, {env, []}, diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl index c4dedf07c..edeef26d4 100644 --- a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl @@ -14,6 +14,12 @@ desc/1 ]). +%% Examples +-export([ + bridge_v2_examples/1, + connector_examples/1 +]). + %% ------------------------------------------------------------------------------------------------- %% api @@ -22,7 +28,7 @@ conn_bridge_examples(Method) -> #{ <<"timescale">> => #{ summary => <<"Timescale Bridge">>, - value => emqx_bridge_pgsql:values(Method, timescale) + value => emqx_bridge_pgsql_schema:values_conn_bridge_examples(Method, timescale) } } ]. @@ -35,8 +41,53 @@ roots() -> []. fields("post") -> emqx_bridge_pgsql:fields("post", timescale); +fields("config_connector") -> + emqx_bridge_pgsql_schema:fields("config_connector"); +fields(action) -> + {timescale, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql_schema, pgsql_action)), + #{ + desc => <<"Timescale Action Config">>, + required => false + } + )}; +fields("put_bridge_v2") -> + emqx_bridge_pgsql_schema:fields(pgsql_action); +fields("get_bridge_v2") -> + emqx_bridge_pgsql_schema:fields(pgsql_action); +fields("post_bridge_v2") -> + emqx_bridge_pgsql_schema:fields(pgsql_action); +fields("put_connector") -> + emqx_bridge_pgsql_schema:fields("config_connector"); +fields("get_connector") -> + emqx_bridge_pgsql_schema:fields("config_connector"); +fields("post_connector") -> + emqx_bridge_pgsql_schema:fields("config_connector"); fields(Method) -> emqx_bridge_pgsql:fields(Method). desc(_) -> undefined. + +%% Examples + +connector_examples(Method) -> + [ + #{ + <<"timescale">> => #{ + summary => <<"Timescale Connector">>, + value => emqx_postgresql_connector_schema:values({Method, connector}) + } + } + ]. + +bridge_v2_examples(Method) -> + [ + #{ + <<"timescale">> => #{ + summary => <<"Timescale Action">>, + value => emqx_bridge_pgsql_schema:values({Method, timescale}) + } + } + ]. diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale_action_info.erl b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale_action_info.erl new file mode 100644 index 000000000..fff74b578 --- /dev/null +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale_action_info.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_timescale_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +bridge_v1_type_name() -> timescale. + +action_type_name() -> timescale. + +connector_type_name() -> timescale. + +schema_module() -> emqx_bridge_timescale. diff --git a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl index 535917e4e..1ffe306e9 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl @@ -35,6 +35,12 @@ resource_type(syskeeper_forwarder) -> emqx_bridge_syskeeper_connector; resource_type(syskeeper_proxy) -> emqx_bridge_syskeeper_proxy_server; +resource_type(pgsql) -> + emqx_postgresql; +resource_type(timescale) -> + emqx_postgresql; +resource_type(matrix) -> + emqx_postgresql; resource_type(Type) -> error({unknown_connector_type, Type}). @@ -108,6 +114,30 @@ connector_structs() -> desc => <<"Syskeeper Proxy Connector Config">>, required => false } + )}, + {pgsql, + mk( + hoconsc:map(name, ref(emqx_postgresql_connector_schema, "config_connector")), + #{ + desc => <<"PostgreSQL Connector Config">>, + required => false + } + )}, + {timescale, + mk( + hoconsc:map(name, ref(emqx_bridge_timescale, "config_connector")), + #{ + desc => <<"Timescale Connector Config">>, + required => false + } + )}, + {matrix, + mk( + hoconsc:map(name, ref(emqx_bridge_matrix, "config_connector")), + #{ + desc => <<"Matrix Connector Config">>, + required => false + } )} ]. @@ -131,7 +161,10 @@ schema_modules() -> emqx_bridge_kafka, emqx_bridge_mongodb, emqx_bridge_syskeeper_connector, - emqx_bridge_syskeeper_proxy + emqx_bridge_syskeeper_proxy, + emqx_postgresql_connector_schema, + emqx_bridge_timescale, + emqx_bridge_matrix ]. api_schemas(Method) -> @@ -152,7 +185,10 @@ api_schemas(Method) -> api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_connector"), api_ref(emqx_bridge_mongodb, <<"mongodb">>, Method ++ "_connector"), api_ref(emqx_bridge_syskeeper_connector, <<"syskeeper_forwarder">>, Method), - api_ref(emqx_bridge_syskeeper_proxy, <<"syskeeper_proxy">>, Method) + api_ref(emqx_bridge_syskeeper_proxy, <<"syskeeper_proxy">>, Method), + api_ref(emqx_postgresql_connector_schema, <<"pgsql">>, Method ++ "_connector"), + api_ref(emqx_bridge_timescale, <<"timescale">>, Method ++ "_connector"), + api_ref(emqx_bridge_matrix, <<"matrix">>, Method ++ "_connector") ]. api_ref(Module, Type, Method) -> diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index 765a693e2..cd99f0fe6 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -72,7 +72,10 @@ connector_type_to_bridge_types(gcp_pubsub_producer) -> [gcp_pubsub, gcp_pubsub_p connector_type_to_bridge_types(kafka_producer) -> [kafka, kafka_producer]; connector_type_to_bridge_types(mongodb) -> [mongodb, mongodb_rs, mongodb_sharded, mongodb_single]; connector_type_to_bridge_types(syskeeper_forwarder) -> [syskeeper_forwarder]; -connector_type_to_bridge_types(syskeeper_proxy) -> []. +connector_type_to_bridge_types(syskeeper_proxy) -> []; +connector_type_to_bridge_types(pgsql) -> [pgsql]; +connector_type_to_bridge_types(timescale) -> [timescale]; +connector_type_to_bridge_types(matrix) -> [matrix]. actions_config_name() -> <<"actions">>. diff --git a/apps/emqx_postgresql/src/emqx_postgresql.erl b/apps/emqx_postgresql/src/emqx_postgresql.erl index ba1ad4be5..68fa1da00 100644 --- a/apps/emqx_postgresql/src/emqx_postgresql.erl +++ b/apps/emqx_postgresql/src/emqx_postgresql.erl @@ -34,7 +34,11 @@ on_stop/2, on_query/3, on_batch_query/3, - on_get_status/2 + on_get_status/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, + on_get_channel_status/3 ]). -export([connect/1]). @@ -136,10 +140,11 @@ on_start( {auto_reconnect, ?AUTO_RECONNECT_INTERVAL}, {pool_size, PoolSize} ], - State = parse_prepare_sql(Config), + State1 = parse_prepare_sql(Config, <<"send_message">>), + State2 = State1#{installed_channels => #{}}, case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of ok -> - {ok, init_prepare(State#{pool_name => InstId, prepares => #{}})}; + {ok, init_prepare(State2#{pool_name => InstId, prepares => #{}})}; {error, Reason} -> ?tp( pgsql_connector_start_failed, @@ -148,13 +153,140 @@ on_start( {error, Reason} end. -on_stop(InstId, _State) -> +on_stop(InstId, State) -> ?SLOG(info, #{ msg => "stopping_postgresql_connector", connector => InstId }), + close_connections(State), emqx_resource_pool:stop(InstId). +close_connections(#{pool_name := PoolName} = _State) -> + WorkerPids = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)], + close_connections_with_worker_pids(WorkerPids), + ok. + +close_connections_with_worker_pids([WorkerPid | Rest]) -> + %% We ignore errors since any error probably means that the + %% connection is closed already. + try ecpool_worker:client(WorkerPid) of + {ok, Conn} -> + _ = epgsql:close(Conn), + close_connections_with_worker_pids(Rest); + _ -> + close_connections_with_worker_pids(Rest) + catch + _:_ -> + close_connections_with_worker_pids(Rest) + end; +close_connections_with_worker_pids([]) -> + ok. + +on_add_channel( + _InstId, + #{ + installed_channels := InstalledChannels + } = OldState, + ChannelId, + ChannelConfig +) -> + %% The following will throw an exception if the bridge producers fails to start + {ok, ChannelState} = create_channel_state(ChannelId, OldState, ChannelConfig), + case ChannelState of + #{prepares := {error, Reason}} -> + {error, {unhealthy_target, Reason}}; + _ -> + NewInstalledChannels = maps:put(ChannelId, ChannelState, InstalledChannels), + %% Update state + NewState = OldState#{installed_channels => NewInstalledChannels}, + {ok, NewState} + end. + +create_channel_state( + ChannelId, + #{pool_name := PoolName} = _ConnectorState, + #{parameters := Parameters} = _ChannelConfig +) -> + State1 = parse_prepare_sql(Parameters, ChannelId), + {ok, + init_prepare(State1#{ + pool_name => PoolName, + prepare_statement => #{} + })}. + +on_remove_channel( + _InstId, + #{ + installed_channels := InstalledChannels + } = OldState, + ChannelId +) -> + %% Close prepared statements + ok = close_prepared_statement(ChannelId, OldState), + NewInstalledChannels = maps:remove(ChannelId, InstalledChannels), + %% Update state + NewState = OldState#{installed_channels => NewInstalledChannels}, + {ok, NewState}. + +close_prepared_statement(ChannelId, #{pool_name := PoolName} = State) -> + WorkerPids = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)], + close_prepared_statement(WorkerPids, ChannelId, State), + ok. + +close_prepared_statement([WorkerPid | Rest], ChannelId, State) -> + %% We ignore errors since any error probably means that the + %% prepared statement doesn't exist. + try ecpool_worker:client(WorkerPid) of + {ok, Conn} -> + Statement = get_prepared_statement(ChannelId, State), + _ = epgsql:close(Conn, Statement), + close_prepared_statement(Rest, ChannelId, State); + _ -> + close_prepared_statement(Rest, ChannelId, State) + catch + _:_ -> + close_prepared_statement(Rest, ChannelId, State) + end; +close_prepared_statement([], _ChannelId, _State) -> + ok. + +on_get_channel_status( + _ResId, + ChannelId, + #{ + pool_name := PoolName, + installed_channels := Channels + } = _State +) -> + ChannelState = maps:get(ChannelId, Channels), + case + do_check_channel_sql( + PoolName, + ChannelId, + ChannelState + ) + of + ok -> + connected; + {error, undefined_table} -> + {error, {unhealthy_target, <<"Table does not exist">>}}; + {error, _Reason} -> + %% do not log error, it is logged in prepare_sql_to_conn + connecting + end. + +do_check_channel_sql( + PoolName, + ChannelId, + #{query_templates := ChannelQueryTemplates} = _ChannelState +) -> + {SQL, _RowTemplate} = maps:get(ChannelId, ChannelQueryTemplates), + WorkerPids = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)], + validate_table_existence(WorkerPids, SQL). + +on_get_channels(ResId) -> + emqx_bridge_v2:get_channels_for_connector(ResId). + on_query(InstId, {TypeOrKey, NameOrSQL}, State) -> on_query(InstId, {TypeOrKey, NameOrSQL, []}, State); on_query( @@ -187,10 +319,10 @@ pgsql_query_type(_) -> on_batch_query( InstId, [{Key, _} = Request | _] = BatchReq, - #{pool_name := PoolName, query_templates := Templates, prepares := PrepStatements} = State + #{pool_name := PoolName} = State ) -> BinKey = to_bin(Key), - case maps:get(BinKey, Templates, undefined) of + case get_template(BinKey, State) of undefined -> Log = #{ connector => InstId, @@ -201,7 +333,7 @@ on_batch_query( ?SLOG(error, Log), {error, {unrecoverable_error, batch_prepare_not_implemented}}; {_Statement, RowTemplate} -> - PrepStatement = maps:get(BinKey, PrepStatements), + PrepStatement = get_prepared_statement(BinKey, State), Rows = [render_prepare_sql_row(RowTemplate, Data) || {_Key, Data} <- BatchReq], case on_sql_query(InstId, PoolName, execute_batch, PrepStatement, Rows) of {error, _Error} = Result -> @@ -223,15 +355,35 @@ proc_sql_params(query, SQLOrKey, Params, _State) -> {SQLOrKey, Params}; proc_sql_params(prepared_query, SQLOrKey, Params, _State) -> {SQLOrKey, Params}; -proc_sql_params(TypeOrKey, SQLOrData, Params, #{query_templates := Templates}) -> - Key = to_bin(TypeOrKey), - case maps:get(Key, Templates, undefined) of +proc_sql_params(TypeOrKey, SQLOrData, Params, State) -> + BinKey = to_bin(TypeOrKey), + case get_template(BinKey, State) of undefined -> {SQLOrData, Params}; {_Statement, RowTemplate} -> - {Key, render_prepare_sql_row(RowTemplate, SQLOrData)} + {BinKey, render_prepare_sql_row(RowTemplate, SQLOrData)} end. +get_template(Key, #{installed_channels := Channels} = _State) when is_map_key(Key, Channels) -> + BinKey = to_bin(Key), + ChannelState = maps:get(BinKey, Channels), + ChannelQueryTemplates = maps:get(query_templates, ChannelState), + maps:get(BinKey, ChannelQueryTemplates); +get_template(Key, #{query_templates := Templates}) -> + BinKey = to_bin(Key), + maps:get(BinKey, Templates, undefined). + +get_prepared_statement(Key, #{installed_channels := Channels} = _State) when + is_map_key(Key, Channels) +-> + BinKey = to_bin(Key), + ChannelState = maps:get(BinKey, Channels), + ChannelPreparedStatements = maps:get(prepares, ChannelState), + maps:get(BinKey, ChannelPreparedStatements); +get_prepared_statement(Key, #{prepares := PrepStatements}) -> + BinKey = to_bin(Key), + maps:get(BinKey, PrepStatements). + on_sql_query(InstId, PoolName, Type, NameOrSQL, Data) -> try ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Data]}, no_handover) of {error, Reason} = Result -> @@ -415,13 +567,13 @@ conn_opts([Opt = {ssl_opts, _} | Opts], Acc) -> conn_opts([_Opt | Opts], Acc) -> conn_opts(Opts, Acc). -parse_prepare_sql(Config) -> +parse_prepare_sql(Config, SQLID) -> Queries = case Config of #{prepare_statement := Qs} -> Qs; #{sql := Query} -> - #{<<"send_message">> => Query}; + #{SQLID => Query}; #{} -> #{} end, diff --git a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl new file mode 100644 index 000000000..d709f87c3 --- /dev/null +++ b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl @@ -0,0 +1,201 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_postgresql_connector_schema). + +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_postgresql/include/emqx_postgresql.hrl"). + +-define(PGSQL_HOST_OPTIONS, #{ + default_port => ?PGSQL_DEFAULT_PORT +}). + +-export([ + roots/0, + fields/1 +]). + +%% Examples +-export([ + connector_examples/1, + values/1 +]). + +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields("config_connector") -> + [{server, server()}] ++ + adjust_fields(emqx_connector_schema_lib:relational_db_fields()) ++ + emqx_connector_schema_lib:ssl_fields(); +fields(config) -> + fields("config_connector") ++ + fields(action); +fields(action) -> + {pgsql, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql_schema, pgsql_action)), + #{ + desc => <<"PostgreSQL Action Config">>, + required => false + } + )}; +fields(pgsql_action) -> + emqx_bridge_v2_schema:make_producer_action_schema(hoconsc:ref(?MODULE, action_parameters)); +%% TODO: All of these needs to be fixed +fields("put_bridge_v2") -> + fields(pgsql_action); +fields("get_bridge_v2") -> + fields(pgsql_action); +fields("post_bridge_v2") -> + fields(pgsql_action); +fields("put_connector") -> + fields("config_connector"); +fields("get_connector") -> + fields("config_connector"); +fields("post_connector") -> + fields("config_connector"). + +server() -> + Meta = #{desc => ?DESC("server")}, + emqx_schema:servers_sc(Meta, ?PGSQL_HOST_OPTIONS). + +adjust_fields(Fields) -> + lists:map( + fun + ({username, Sc}) -> + %% to please dialyzer... + Override = #{type => hocon_schema:field_schema(Sc, type), required => true}, + {username, hocon_schema:override(Sc, Override)}; + (Field) -> + Field + end, + Fields + ). + +%% Examples +connector_examples(Method) -> + [ + #{ + <<"pgsql">> => #{ + summary => <<"PostgreSQL Producer Connector">>, + value => values({Method, connector}) + } + } + ]. + +%% TODO: All of these needs to be adjusted from Kafka to PostgreSQL +values({get, PostgreSQLType}) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + }, + values({post, PostgreSQLType}) + ); +values({post, connector}) -> + maps:merge( + #{ + name => <<"my_pgsql_connector">>, + type => <<"pgsql">> + }, + values(common_config) + ); +values({post, PostgreSQLType}) -> + maps:merge( + #{ + name => <<"my_pgsql_action">>, + type => <<"pgsql">> + }, + values({put, PostgreSQLType}) + ); +values({put, bridge_v2_producer}) -> + values(bridge_v2_producer); +values({put, connector}) -> + values(common_config); +values({put, PostgreSQLType}) -> + maps:merge(values(common_config), values(PostgreSQLType)); +values(bridge_v2_producer) -> + maps:merge( + #{ + enable => true, + connector => <<"my_pgsql_connector">>, + resource_opts => #{ + health_check_interval => "32s" + } + }, + values(producer) + ); +values(common_config) -> + #{ + authentication => #{ + mechanism => <<"plain">>, + username => <<"username">>, + password => <<"******">> + }, + bootstrap_hosts => <<"localhost:9092">>, + connect_timeout => <<"5s">>, + enable => true, + metadata_request_timeout => <<"4s">>, + min_metadata_refresh_interval => <<"3s">>, + socket_opts => #{ + sndbuf => <<"1024KB">>, + recbuf => <<"1024KB">>, + nodelay => true, + tcp_keepalive => <<"none">> + } + }; +values(producer) -> + #{ + kafka => #{ + topic => <<"kafka-topic">>, + message => #{ + key => <<"${.clientid}">>, + value => <<"${.}">>, + timestamp => <<"${.timestamp}">> + }, + max_batch_bytes => <<"896KB">>, + compression => <<"no_compression">>, + partition_strategy => <<"random">>, + required_acks => <<"all_isr">>, + partition_count_refresh_interval => <<"60s">>, + kafka_headers => <<"${pub_props}">>, + kafka_ext_headers => [ + #{ + kafka_ext_header_key => <<"clientid">>, + kafka_ext_header_value => <<"${clientid}">> + }, + #{ + kafka_ext_header_key => <<"topic">>, + kafka_ext_header_value => <<"${topic}">> + } + ], + kafka_header_value_encode_mode => none, + max_inflight => 10, + buffer => #{ + mode => <<"hybrid">>, + per_partition_limit => <<"2GB">>, + segment_bytes => <<"100MB">>, + memory_overload_protection => true + } + }, + local_topic => <<"mqtt/local/topic">> + }. From dbe73c70b12120ab3c68dbdfcda7b98760206598 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Thu, 23 Nov 2023 17:29:57 +0100 Subject: [PATCH 085/101] fix: dialyzer problem --- apps/emqx_postgresql/src/emqx_postgresql.erl | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/apps/emqx_postgresql/src/emqx_postgresql.erl b/apps/emqx_postgresql/src/emqx_postgresql.erl index 68fa1da00..ce62fa30b 100644 --- a/apps/emqx_postgresql/src/emqx_postgresql.erl +++ b/apps/emqx_postgresql/src/emqx_postgresql.erl @@ -269,10 +269,7 @@ on_get_channel_status( ok -> connected; {error, undefined_table} -> - {error, {unhealthy_target, <<"Table does not exist">>}}; - {error, _Reason} -> - %% do not log error, it is logged in prepare_sql_to_conn - connecting + {error, {unhealthy_target, <<"Table does not exist">>}} end. do_check_channel_sql( From 2e3028a8f8800d3cacd67a8055eddd28f98a9fa6 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Thu, 23 Nov 2023 17:41:28 +0100 Subject: [PATCH 086/101] fix(emqx_postgresql): fix lifecycle test --- apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl b/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl index 5a93a0578..1b41e2dd0 100644 --- a/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl +++ b/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl @@ -61,13 +61,15 @@ end_per_testcase(_, _Config) -> t_lifecycle(_Config) -> perform_lifecycle_check( - <<"emqx_postgresql_SUITE">>, + <<"connector:pgsql:emqx_postgresql_SUITE">>, pgsql_config() ). perform_lifecycle_check(ResourceId, InitialConfig) -> + x:show(initial_config, InitialConfig), {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?PGSQL_RESOURCE_MOD, InitialConfig), + x:show(check_config_ok, CheckedConfig), {ok, #{ state := #{pool_name := PoolName} = State, status := InitialStatus From b8f510d956469775d2278646348d3488949b8e10 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Thu, 23 Nov 2023 18:02:54 +0100 Subject: [PATCH 087/101] fix: add env variables about action info --- apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src | 2 +- apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src | 2 +- apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src index e2a63a01e..9175e5d4e 100644 --- a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src @@ -7,7 +7,7 @@ stdlib, emqx_resource ]}, - {env, []}, + {env, [{emqx_action_info_module, emqx_bridge_matrix_action_info}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src index 7a17652e0..58d093f6f 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src @@ -8,7 +8,7 @@ emqx_resource, emqx_postgresql ]}, - {env, []}, + {env, [{emqx_action_info_module, emqx_bridge_pgsql_action_info}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src index 4842f5c93..53302a21f 100644 --- a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src @@ -3,6 +3,7 @@ {vsn, "0.1.3"}, {registered, []}, {applications, [kernel, stdlib, emqx_resource]}, + {env, [{emqx_action_info_module, emqx_bridge_timescale_action_info}]}, {env, []}, {modules, []}, {links, []} From 64c015cf6fe9a49e81334846b0d85f2085687eb8 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Thu, 23 Nov 2023 18:50:19 +0100 Subject: [PATCH 088/101] fix: unify the schema modules in emqx_bridge_pgsql --- .../src/emqx_bridge_matrix.erl | 20 +- .../src/emqx_bridge_pgsql.erl | 229 +++++++++++++----- .../src/emqx_bridge_pgsql_action_info.erl | 2 +- .../src/schema/emqx_bridge_pgsql_schema.erl | 172 ------------- .../src/emqx_bridge_timescale.erl | 20 +- .../emqx_postgresql_connector_schema.erl | 2 +- 6 files changed, 193 insertions(+), 252 deletions(-) delete mode 100644 apps/emqx_bridge_pgsql/src/schema/emqx_bridge_pgsql_schema.erl diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl index acfd86ded..78810bc9e 100644 --- a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl @@ -28,7 +28,7 @@ conn_bridge_examples(Method) -> #{ <<"matrix">> => #{ summary => <<"Matrix Bridge">>, - value => emqx_bridge_pgsql_schema:values_conn_bridge_examples(Method, matrix) + value => emqx_bridge_pgsql:values_conn_bridge_examples(Method, matrix) } } ]. @@ -42,28 +42,28 @@ roots() -> []. fields("post") -> emqx_bridge_pgsql:fields("post", matrix); fields("config_connector") -> - emqx_bridge_pgsql_schema:fields("config_connector"); + emqx_bridge_pgsql:fields("config_connector"); fields(action) -> {matrix, hoconsc:mk( - hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql_schema, pgsql_action)), + hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql, pgsql_action)), #{ desc => <<"Matrix Action Config">>, required => false } )}; fields("put_bridge_v2") -> - emqx_bridge_pgsql_schema:fields(pgsql_action); + emqx_bridge_pgsql:fields(pgsql_action); fields("get_bridge_v2") -> - emqx_bridge_pgsql_schema:fields(pgsql_action); + emqx_bridge_pgsql:fields(pgsql_action); fields("post_bridge_v2") -> - emqx_bridge_pgsql_schema:fields(pgsql_action); + emqx_bridge_pgsql:fields(pgsql_action); fields("put_connector") -> - emqx_bridge_pgsql_schema:fields("config_connector"); + emqx_bridge_pgsql:fields("config_connector"); fields("get_connector") -> - emqx_bridge_pgsql_schema:fields("config_connector"); + emqx_bridge_pgsql:fields("config_connector"); fields("post_connector") -> - emqx_bridge_pgsql_schema:fields("config_connector"); + emqx_bridge_pgsql:fields("config_connector"); fields(Method) -> emqx_bridge_pgsql:fields(Method). @@ -87,7 +87,7 @@ bridge_v2_examples(Method) -> #{ <<"matrix">> => #{ summary => <<"Matrix Action">>, - value => emqx_bridge_pgsql_schema:values({Method, matrix}) + value => emqx_bridge_pgsql:values({Method, matrix}) } } ]. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl index bb15dfad9..fc0680aba 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl @@ -1,83 +1,91 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- + -module(emqx_bridge_pgsql). +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("emqx_postgresql/include/emqx_postgresql.hrl"). -include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). -include_lib("hocon/include/hoconsc.hrl"). +-include_lib("epgsql/include/epgsql.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("emqx_resource/include/emqx_resource.hrl"). --import(hoconsc, [mk/2, enum/1, ref/2]). - --export([ - conn_bridge_examples/1, - values/2, - fields/2 -]). - -export([ namespace/0, roots/0, fields/1, - desc/1 + desc/1, + values/2, + fields/2 ]). --define(DEFAULT_SQL, << - "insert into t_mqtt_msg(msgid, topic, qos, payload, arrived) " - "values (${id}, ${topic}, ${qos}, ${payload}, TO_TIMESTAMP((${timestamp} :: bigint)/1000))" ->>). +%% Examples +-export([ + bridge_v2_examples/1, + conn_bridge_examples/1 +]). -%% ------------------------------------------------------------------------------------------------- -%% api +%% Exported for timescale and matrix bridges +-export([ + values/1, + values_conn_bridge_examples/2 +]). -conn_bridge_examples(Method) -> - [ - #{ - <<"pgsql">> => #{ - summary => <<"PostgreSQL Bridge">>, - value => values(Method, pgsql) - } - } - ]. +-define(PGSQL_HOST_OPTIONS, #{ + default_port => ?PGSQL_DEFAULT_PORT +}). -values(_Method, Type) -> - #{ - enable => true, - type => Type, - name => <<"foo">>, - server => <<"127.0.0.1:5432">>, - database => <<"mqtt">>, - pool_size => 8, - username => <<"root">>, - password => <<"******">>, - sql => ?DEFAULT_SQL, - local_topic => <<"local/topic/#">>, - resource_opts => #{ - worker_pool_size => 8, - health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, - batch_size => ?DEFAULT_BATCH_SIZE, - batch_time => ?DEFAULT_BATCH_TIME, - query_mode => async, - max_buffer_bytes => ?DEFAULT_BUFFER_BYTES - } - }. - -%% ------------------------------------------------------------------------------------------------- %% Hocon Schema Definitions namespace() -> "bridge_pgsql". -roots() -> []. +roots() -> + []. +fields("config_connector") -> + emqx_postgresql_connector_schema:fields("config_connector"); +fields(config) -> + fields("config_connector") ++ + fields(action); +fields(action) -> + {pgsql, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql, pgsql_action)), + #{ + desc => <<"PostgreSQL Action Config">>, + required => false + } + )}; +fields(action_parameters) -> + [ + {sql, + hoconsc:mk( + binary(), + #{desc => ?DESC("sql_template"), default => default_sql(), format => <<"sql">>} + )} + ] ++ + emqx_connector_schema_lib:prepare_statement_fields(); +fields(pgsql_action) -> + emqx_bridge_v2_schema:make_producer_action_schema(hoconsc:ref(?MODULE, action_parameters)); +%% TODO: All of these needs to be fixed +fields("put_bridge_v2") -> + fields(pgsql_action); +fields("get_bridge_v2") -> + fields(pgsql_action); +fields("post_bridge_v2") -> + fields(pgsql_action); fields("config") -> [ - {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {enable, hoconsc:mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, {sql, - mk( + hoconsc:mk( binary(), - #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + #{desc => ?DESC("sql_template"), default => default_sql(), format => <<"sql">>} )}, {local_topic, - mk( + hoconsc:mk( binary(), #{desc => ?DESC("local_topic"), default => undefined} )} @@ -94,6 +102,12 @@ fields("get") -> fields("post", Type) -> [type_field(Type), name_field() | fields("config")]. +type_field(Type) -> + {type, hoconsc:mk(hoconsc:enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, hoconsc:mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. + desc("config") -> ?DESC("desc_config"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> @@ -101,10 +115,109 @@ desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> desc(_) -> undefined. -%% ------------------------------------------------------------------------------------------------- +default_sql() -> + << + "insert into t_mqtt_msg(msgid, topic, qos, payload, arrived) " + "values (${id}, ${topic}, ${qos}, ${payload}, TO_TIMESTAMP((${timestamp} :: bigint)/1000))" + >>. -type_field(Type) -> - {type, mk(enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. +%% Examples -name_field() -> - {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. +bridge_v2_examples(Method) -> + [ + #{ + <<"pgsql">> => #{ + summary => <<"PostgreSQL Producer Action">>, + value => values({Method, bridge_v2_producer}) + } + } + ]. + +conn_bridge_examples(Method) -> + [ + #{ + <<"pgsql">> => #{ + summary => <<"PostgreSQL Producer Bridge">>, + value => values_conn_bridge_examples(Method, pgsql) + } + } + ]. + +values({get, PostgreSQLType}) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + }, + values({post, PostgreSQLType}) + ); +values({post, PostgreSQLType}) -> + maps:merge( + #{ + name => <<"my_pgsql_action">>, + type => PostgreSQLType + }, + values({put, PostgreSQLType}) + ); +values({put, PostgreSQLType}) -> + maps:merge( + #{ + enable => true, + connector => <<"my_pgsql_connector">>, + resource_opts => #{ + health_check_interval => "32s" + } + }, + values({producer, PostgreSQLType}) + ); +values({producer, _PostgreSQLType}) -> + #{ + <<"enable">> => true, + <<"connector">> => <<"connector_pgsql_test">>, + <<"parameters">> => #{ + <<"sql">> => + <<"INSERT INTO client_events(clientid, event, created_at) VALUES (\n ${clientid},\n ${event},\n TO_TIMESTAMP((${timestamp} :: bigint))\n)">> + }, + <<"resource_opts">> => #{ + <<"batch_size">> => 1, + <<"batch_time">> => <<"0ms">>, + <<"health_check_interval">> => <<"15s">>, + <<"inflight_window">> => 100, + <<"max_buffer_bytes">> => <<"256MB">>, + <<"query_mode">> => <<"async">>, + <<"request_ttl">> => <<"45s">>, + <<"start_after_created">> => true, + <<"start_timeout">> => <<"5s">>, + <<"worker_pool_size">> => 16 + } + }. + +values_conn_bridge_examples(_Method, Type) -> + #{ + enable => true, + type => Type, + name => <<"foo">>, + server => <<"127.0.0.1:5432">>, + database => <<"mqtt">>, + pool_size => 8, + username => <<"root">>, + password => <<"******">>, + sql => default_sql(), + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +values(Method, Type) -> + values_conn_bridge_examples(Method, Type). diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql_action_info.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql_action_info.erl index e353eb440..c702b396b 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql_action_info.erl +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql_action_info.erl @@ -19,4 +19,4 @@ action_type_name() -> pgsql. connector_type_name() -> pgsql. -schema_module() -> emqx_bridge_pgsql_schema. +schema_module() -> emqx_bridge_pgsql. diff --git a/apps/emqx_bridge_pgsql/src/schema/emqx_bridge_pgsql_schema.erl b/apps/emqx_bridge_pgsql/src/schema/emqx_bridge_pgsql_schema.erl deleted file mode 100644 index a3ad4bb11..000000000 --- a/apps/emqx_bridge_pgsql/src/schema/emqx_bridge_pgsql_schema.erl +++ /dev/null @@ -1,172 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%%-------------------------------------------------------------------- - --module(emqx_bridge_pgsql_schema). - --include_lib("emqx_connector/include/emqx_connector.hrl"). --include_lib("emqx_postgresql/include/emqx_postgresql.hrl"). --include_lib("typerefl/include/types.hrl"). --include_lib("emqx/include/logger.hrl"). --include_lib("hocon/include/hoconsc.hrl"). --include_lib("epgsql/include/epgsql.hrl"). --include_lib("snabbkaffe/include/snabbkaffe.hrl"). --include_lib("emqx_resource/include/emqx_resource.hrl"). - --export([roots/0, fields/1]). - -%% Examples --export([ - bridge_v2_examples/1, - conn_bridge_examples/1 -]). - -%% Exported for timescale and matrix bridges --export([ - values/1, - values_conn_bridge_examples/2 -]). - --define(PGSQL_HOST_OPTIONS, #{ - default_port => ?PGSQL_DEFAULT_PORT -}). - -roots() -> - [{config, #{type => hoconsc:ref(?MODULE, config)}}]. - -fields("config_connector") -> - emqx_postgresql_connector_schema:fields("config_connector"); -fields(config) -> - fields("config_connector") ++ - fields(action); -fields(action) -> - {pgsql, - hoconsc:mk( - hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql_schema, pgsql_action)), - #{ - desc => <<"PostgreSQL Action Config">>, - required => false - } - )}; -fields(action_parameters) -> - [ - {sql, - hoconsc:mk( - binary(), - #{desc => ?DESC("sql_template"), default => default_sql(), format => <<"sql">>} - )} - ] ++ - emqx_connector_schema_lib:prepare_statement_fields(); -fields(pgsql_action) -> - emqx_bridge_v2_schema:make_producer_action_schema(hoconsc:ref(?MODULE, action_parameters)); -%% TODO: All of these needs to be fixed -fields("put_bridge_v2") -> - fields(pgsql_action); -fields("get_bridge_v2") -> - fields(pgsql_action); -fields("post_bridge_v2") -> - fields(pgsql_action). - -default_sql() -> - << - "insert into t_mqtt_msg(msgid, topic, qos, payload, arrived) " - "values (${id}, ${topic}, ${qos}, ${payload}, TO_TIMESTAMP((${timestamp} :: bigint)/1000))" - >>. - -%% Examples - -bridge_v2_examples(Method) -> - [ - #{ - <<"pgsql">> => #{ - summary => <<"PostgreSQL Producer Action">>, - value => values({Method, bridge_v2_producer}) - } - } - ]. - -conn_bridge_examples(Method) -> - [ - #{ - <<"pgsql">> => #{ - summary => <<"PostgreSQL Producer Bridge">>, - value => values_conn_bridge_examples(Method, pgsql) - } - } - ]. - -values({get, PostgreSQLType}) -> - maps:merge( - #{ - status => <<"connected">>, - node_status => [ - #{ - node => <<"emqx@localhost">>, - status => <<"connected">> - } - ] - }, - values({post, PostgreSQLType}) - ); -values({post, PostgreSQLType}) -> - maps:merge( - #{ - name => <<"my_pgsql_action">>, - type => PostgreSQLType - }, - values({put, PostgreSQLType}) - ); -values({put, PostgreSQLType}) -> - maps:merge( - #{ - enable => true, - connector => <<"my_pgsql_connector">>, - resource_opts => #{ - health_check_interval => "32s" - } - }, - values({producer, PostgreSQLType}) - ); -values({producer, _PostgreSQLType}) -> - #{ - <<"enable">> => true, - <<"connector">> => <<"connector_pgsql_test">>, - <<"parameters">> => #{ - <<"sql">> => - <<"INSERT INTO client_events(clientid, event, created_at) VALUES (\n ${clientid},\n ${event},\n TO_TIMESTAMP((${timestamp} :: bigint))\n)">> - }, - <<"resource_opts">> => #{ - <<"batch_size">> => 1, - <<"batch_time">> => <<"0ms">>, - <<"health_check_interval">> => <<"15s">>, - <<"inflight_window">> => 100, - <<"max_buffer_bytes">> => <<"256MB">>, - <<"query_mode">> => <<"async">>, - <<"request_ttl">> => <<"45s">>, - <<"start_after_created">> => true, - <<"start_timeout">> => <<"5s">>, - <<"worker_pool_size">> => 16 - } - }. - -values_conn_bridge_examples(_Method, Type) -> - #{ - enable => true, - type => Type, - name => <<"foo">>, - server => <<"127.0.0.1:5432">>, - database => <<"mqtt">>, - pool_size => 8, - username => <<"root">>, - password => <<"******">>, - sql => default_sql(), - local_topic => <<"local/topic/#">>, - resource_opts => #{ - worker_pool_size => 8, - health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, - batch_size => ?DEFAULT_BATCH_SIZE, - batch_time => ?DEFAULT_BATCH_TIME, - query_mode => async, - max_buffer_bytes => ?DEFAULT_BUFFER_BYTES - } - }. diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl index edeef26d4..9cefabc15 100644 --- a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl @@ -28,7 +28,7 @@ conn_bridge_examples(Method) -> #{ <<"timescale">> => #{ summary => <<"Timescale Bridge">>, - value => emqx_bridge_pgsql_schema:values_conn_bridge_examples(Method, timescale) + value => emqx_bridge_pgsql:values_conn_bridge_examples(Method, timescale) } } ]. @@ -42,28 +42,28 @@ roots() -> []. fields("post") -> emqx_bridge_pgsql:fields("post", timescale); fields("config_connector") -> - emqx_bridge_pgsql_schema:fields("config_connector"); + emqx_bridge_pgsql:fields("config_connector"); fields(action) -> {timescale, hoconsc:mk( - hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql_schema, pgsql_action)), + hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql, pgsql_action)), #{ desc => <<"Timescale Action Config">>, required => false } )}; fields("put_bridge_v2") -> - emqx_bridge_pgsql_schema:fields(pgsql_action); + emqx_bridge_pgsql:fields(pgsql_action); fields("get_bridge_v2") -> - emqx_bridge_pgsql_schema:fields(pgsql_action); + emqx_bridge_pgsql:fields(pgsql_action); fields("post_bridge_v2") -> - emqx_bridge_pgsql_schema:fields(pgsql_action); + emqx_bridge_pgsql:fields(pgsql_action); fields("put_connector") -> - emqx_bridge_pgsql_schema:fields("config_connector"); + emqx_bridge_pgsql:fields("config_connector"); fields("get_connector") -> - emqx_bridge_pgsql_schema:fields("config_connector"); + emqx_bridge_pgsql:fields("config_connector"); fields("post_connector") -> - emqx_bridge_pgsql_schema:fields("config_connector"); + emqx_bridge_pgsql:fields("config_connector"); fields(Method) -> emqx_bridge_pgsql:fields(Method). @@ -87,7 +87,7 @@ bridge_v2_examples(Method) -> #{ <<"timescale">> => #{ summary => <<"Timescale Action">>, - value => emqx_bridge_pgsql_schema:values({Method, timescale}) + value => emqx_bridge_pgsql:values({Method, timescale}) } } ]. diff --git a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl index d709f87c3..1d6949856 100644 --- a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl +++ b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl @@ -47,7 +47,7 @@ fields(config) -> fields(action) -> {pgsql, hoconsc:mk( - hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql_schema, pgsql_action)), + hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql, pgsql_action)), #{ desc => <<"PostgreSQL Action Config">>, required => false From f7296d549f9faf8ca28a68be8d50b0395d4b4278 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Thu, 23 Nov 2023 18:58:01 +0100 Subject: [PATCH 089/101] fix: elvis problem --- apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl index fc0680aba..c32504124 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl @@ -181,7 +181,14 @@ values({producer, _PostgreSQLType}) -> <<"connector">> => <<"connector_pgsql_test">>, <<"parameters">> => #{ <<"sql">> => - <<"INSERT INTO client_events(clientid, event, created_at) VALUES (\n ${clientid},\n ${event},\n TO_TIMESTAMP((${timestamp} :: bigint))\n)">> + << + "INSERT INTO client_events(clientid, event, created_at)" + "VALUES (\n" + " ${clientid},\n" + " ${event},\n" + " TO_TIMESTAMP((${timestamp} :: bigint))\n" + ")" + >> }, <<"resource_opts">> => #{ <<"batch_size">> => 1, From 6ef9c6fe4ae5ad31db4ce3f6509e8d3e8176d4fa Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Thu, 23 Nov 2023 19:15:02 +0100 Subject: [PATCH 090/101] fix: ops --- apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl | 2 -- 1 file changed, 2 deletions(-) diff --git a/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl b/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl index 1b41e2dd0..c7aee3019 100644 --- a/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl +++ b/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl @@ -66,10 +66,8 @@ t_lifecycle(_Config) -> ). perform_lifecycle_check(ResourceId, InitialConfig) -> - x:show(initial_config, InitialConfig), {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?PGSQL_RESOURCE_MOD, InitialConfig), - x:show(check_config_ok, CheckedConfig), {ok, #{ state := #{pool_name := PoolName} = State, status := InitialStatus From c5e281b84b3dd2a5fed2120c54e894778d3871c8 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 24 Nov 2023 11:01:44 +0100 Subject: [PATCH 091/101] fix: emqx_auth_postgresql test suites --- apps/emqx_bridge/src/emqx_bridge_v2.erl | 29 ++++++++++++------- .../test/emqx_postgresql_SUITE.erl | 2 +- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index 63874d67e..1863ed84b 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -798,17 +798,24 @@ parse_id(Id) -> end. get_channels_for_connector(ConnectorId) -> - {ConnectorType, ConnectorName} = emqx_connector_resource:parse_connector_id(ConnectorId), - RootConf = maps:keys(emqx:get_config([?ROOT_KEY], #{})), - RelevantBridgeV2Types = [ - Type - || Type <- RootConf, - connector_type(Type) =:= ConnectorType - ], - lists:flatten([ - get_channels_for_connector(ConnectorName, BridgeV2Type) - || BridgeV2Type <- RelevantBridgeV2Types - ]). + try emqx_connector_resource:parse_connector_id(ConnectorId) of + {ConnectorType, ConnectorName} -> + RootConf = maps:keys(emqx:get_config([?ROOT_KEY], #{})), + RelevantBridgeV2Types = [ + Type + || Type <- RootConf, + connector_type(Type) =:= ConnectorType + ], + lists:flatten([ + get_channels_for_connector(ConnectorName, BridgeV2Type) + || BridgeV2Type <- RelevantBridgeV2Types + ]) + catch + _:_ -> + %% ConnectorId is not a valid connector id so we assume the connector + %% has no channels (e.g. it is a a connector for authn or authz) + [] + end. get_channels_for_connector(ConnectorName, BridgeV2Type) -> BridgeV2s = emqx:get_config([?ROOT_KEY, BridgeV2Type], #{}), diff --git a/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl b/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl index c7aee3019..5a93a0578 100644 --- a/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl +++ b/apps/emqx_postgresql/test/emqx_postgresql_SUITE.erl @@ -61,7 +61,7 @@ end_per_testcase(_, _Config) -> t_lifecycle(_Config) -> perform_lifecycle_check( - <<"connector:pgsql:emqx_postgresql_SUITE">>, + <<"emqx_postgresql_SUITE">>, pgsql_config() ). From 3dca83c854e7867ba02ffde72f6926c8166ab30c Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 24 Nov 2023 15:20:07 +0100 Subject: [PATCH 092/101] fix: all missing descriptions --- .../src/emqx_bridge_matrix.erl | 4 ++++ .../src/emqx_bridge_pgsql.erl | 6 ++++++ .../src/emqx_bridge_timescale.erl | 4 ++++ .../src/schema/emqx_connector_ee_schema.erl | 2 +- .../emqx_postgresql_connector_schema.erl | 10 ++++++++-- rel/i18n/emqx_bridge_pgsql.hocon | 13 +++++++++++++ rel/i18n/emqx_postgresql.hocon | 6 ++++++ .../emqx_postgresql_connector_schema.hocon | 18 ++++++++++++++++++ 8 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 rel/i18n/emqx_postgresql_connector_schema.hocon diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl index 78810bc9e..fb8ee9d4d 100644 --- a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl @@ -3,6 +3,8 @@ %%-------------------------------------------------------------------- -module(emqx_bridge_matrix). +-include_lib("hocon/include/hoconsc.hrl"). + -export([ conn_bridge_examples/1 ]). @@ -67,6 +69,8 @@ fields("post_connector") -> fields(Method) -> emqx_bridge_pgsql:fields(Method). +desc("config_connector") -> + ?DESC(emqx_postgresql_connector_schema, "config_connector"); desc(_) -> undefined. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl index c32504124..2fe3960d5 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl @@ -112,6 +112,12 @@ desc("config") -> ?DESC("desc_config"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for PostgreSQL using `", string:to_upper(Method), "` method."]; +desc(pgsql_action) -> + ?DESC("pgsql_action"); +desc(action_parameters) -> + ?DESC("action_parameters"); +desc("config_connector") -> + ?DESC(emqx_postgresql_connector_schema, "config_connector"); desc(_) -> undefined. diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl index 9cefabc15..759f69ed7 100644 --- a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl @@ -3,6 +3,8 @@ %%-------------------------------------------------------------------- -module(emqx_bridge_timescale). +-include_lib("hocon/include/hoconsc.hrl"). + -export([ conn_bridge_examples/1 ]). @@ -67,6 +69,8 @@ fields("post_connector") -> fields(Method) -> emqx_bridge_pgsql:fields(Method). +desc("config_connector") -> + ?DESC(emqx_postgresql_connector_schema, "config_connector"); desc(_) -> undefined. diff --git a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl index 1ffe306e9..c2ce2568c 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl @@ -117,7 +117,7 @@ connector_structs() -> )}, {pgsql, mk( - hoconsc:map(name, ref(emqx_postgresql_connector_schema, "config_connector")), + hoconsc:map(name, ref(emqx_bridge_pgsql, "config_connector")), #{ desc => <<"PostgreSQL Connector Config">>, required => false diff --git a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl index 1d6949856..366d9c71d 100644 --- a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl +++ b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl @@ -25,7 +25,8 @@ -export([ roots/0, - fields/1 + fields/1, + desc/1 ]). %% Examples @@ -35,7 +36,7 @@ ]). roots() -> - [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + []. fields("config_connector") -> [{server, server()}] ++ @@ -199,3 +200,8 @@ values(producer) -> }, local_topic => <<"mqtt/local/topic">> }. + +desc("config_connector") -> + ?DESC("config_connector"); +desc(_) -> + undefined. diff --git a/rel/i18n/emqx_bridge_pgsql.hocon b/rel/i18n/emqx_bridge_pgsql.hocon index 0a5ca2b04..8fcf9139e 100644 --- a/rel/i18n/emqx_bridge_pgsql.hocon +++ b/rel/i18n/emqx_bridge_pgsql.hocon @@ -40,4 +40,17 @@ sql_template.desc: sql_template.label: """SQL Template""" +pgsql_action.desc: +"""Configuration for PostgreSQL Action""" + +pgsql_action.label: +"""PostgreSQL Action Configuration""" + + +action_parameters.desc: +"""Configuration Parameters Specific to the PostgreSQL Action""" + +action_parameters.label: +"""Action Parameters""" + } diff --git a/rel/i18n/emqx_postgresql.hocon b/rel/i18n/emqx_postgresql.hocon index c6d2581c1..9740b0814 100644 --- a/rel/i18n/emqx_postgresql.hocon +++ b/rel/i18n/emqx_postgresql.hocon @@ -8,4 +8,10 @@ The PostgreSQL default port 5432 is used if `[:Port]` is not specified.""" server.label: """Server Host""" +config_connector.desc: +"""The configuration for the PostgreSQL connector.""" + +config_connector.label: +"""PostgreSQL Connector Config""" + } diff --git a/rel/i18n/emqx_postgresql_connector_schema.hocon b/rel/i18n/emqx_postgresql_connector_schema.hocon new file mode 100644 index 000000000..8ecfb958a --- /dev/null +++ b/rel/i18n/emqx_postgresql_connector_schema.hocon @@ -0,0 +1,18 @@ + +emqx_postgresql_connector_schema { + +server.desc: +"""The IPv4 or IPv6 address or the hostname to connect to.
+A host entry has the following form: `Host[:Port]`.
+The PostgreSQL default port 5432 is used if `[:Port]` is not specified.""" + +server.label: +"""Server Host""" + +config_connector.desc: +"""The configuration for the PostgreSQL connector.""" + +config_connector.label: +"""PostgreSQL Connector Config""" + +} From e920160805b0b7f1ebb3d6f605bbf693bf33af06 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 24 Nov 2023 15:42:51 +0100 Subject: [PATCH 093/101] fix: add enable and description fields to PostgreSQL connector --- .../src/schema/emqx_postgresql_connector_schema.erl | 9 ++++++++- rel/i18n/emqx_postgresql_connector_schema.hocon | 6 ++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl index 366d9c71d..ee92c62fc 100644 --- a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl +++ b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl @@ -38,10 +38,17 @@ roots() -> []. -fields("config_connector") -> +fields("connection_fields") -> [{server, server()}] ++ adjust_fields(emqx_connector_schema_lib:relational_db_fields()) ++ emqx_connector_schema_lib:ssl_fields(); +fields("config_connector") -> + fields("connection_fields") ++ fields(enable_and_desc); +fields(enable_and_desc) -> + [ + {enable, hoconsc:mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {description, emqx_schema:description_schema()} + ]; fields(config) -> fields("config_connector") ++ fields(action); diff --git a/rel/i18n/emqx_postgresql_connector_schema.hocon b/rel/i18n/emqx_postgresql_connector_schema.hocon index 8ecfb958a..056e66d09 100644 --- a/rel/i18n/emqx_postgresql_connector_schema.hocon +++ b/rel/i18n/emqx_postgresql_connector_schema.hocon @@ -15,4 +15,10 @@ config_connector.desc: config_connector.label: """PostgreSQL Connector Config""" +config_enable.desc: +"""Enable (true) or disable (false) this Kafka bridge.""" + +config_enable.label: +"""Enable or Disable""" + } From fc7bedb81a37eeae5b3d93f15280c4070f168324 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 24 Nov 2023 17:37:58 +0100 Subject: [PATCH 094/101] fix: remove duplicated entry --- apps/emqx_bridge/src/emqx_action_info.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index 74fd38811..f206c664d 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -79,7 +79,6 @@ hard_coded_action_info_modules_ee() -> emqx_bridge_kafka_action_info, emqx_bridge_mongodb_action_info, emqx_bridge_syskeeper_action_info, - emqx_bridge_syskeeper_action_info, emqx_bridge_pgsql_action_info, emqx_bridge_timescale_action_info, emqx_bridge_matrix_action_info From d03674a50549a2d7db63bdc5dfdd15377f85ad90 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 24 Nov 2023 17:59:56 +0100 Subject: [PATCH 095/101] fix: duplicate key in example --- .../src/emqx_bridge_pgsql.erl | 21 ++++++------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl index 2fe3960d5..4e05c2b1d 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl @@ -176,15 +176,18 @@ values({put, PostgreSQLType}) -> enable => true, connector => <<"my_pgsql_connector">>, resource_opts => #{ - health_check_interval => "32s" + batch_size => 1, + batch_time => <<"50ms">>, + inflight_window => 100, + max_buffer_bytes => <<"256MB">>, + request_ttl => <<"45s">>, + worker_pool_size => 16 } }, values({producer, PostgreSQLType}) ); values({producer, _PostgreSQLType}) -> #{ - <<"enable">> => true, - <<"connector">> => <<"connector_pgsql_test">>, <<"parameters">> => #{ <<"sql">> => << @@ -195,18 +198,6 @@ values({producer, _PostgreSQLType}) -> " TO_TIMESTAMP((${timestamp} :: bigint))\n" ")" >> - }, - <<"resource_opts">> => #{ - <<"batch_size">> => 1, - <<"batch_time">> => <<"0ms">>, - <<"health_check_interval">> => <<"15s">>, - <<"inflight_window">> => 100, - <<"max_buffer_bytes">> => <<"256MB">>, - <<"query_mode">> => <<"async">>, - <<"request_ttl">> => <<"45s">>, - <<"start_after_created">> => true, - <<"start_timeout">> => <<"5s">>, - <<"worker_pool_size">> => 16 } }. From f070d80b1a6b5283e7660b47a0f080ee7bb554fa Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 24 Nov 2023 18:50:52 +0100 Subject: [PATCH 096/101] fix: swagger examples for PostgreSQL, Matrix and Timescale --- .../src/emqx_bridge_matrix.erl | 2 +- .../src/emqx_bridge_pgsql.erl | 41 ++++--- .../src/emqx_bridge_timescale.erl | 2 +- .../emqx_postgresql_connector_schema.erl | 104 ++++-------------- 4 files changed, 48 insertions(+), 101 deletions(-) diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl index fb8ee9d4d..f74e18d3b 100644 --- a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl @@ -81,7 +81,7 @@ connector_examples(Method) -> #{ <<"matrix">> => #{ summary => <<"Matrix Connector">>, - value => emqx_postgresql_connector_schema:values({Method, connector}) + value => emqx_postgresql_connector_schema:values({Method, <<"matrix">>}) } } ]. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl index 4e05c2b1d..534570ac9 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl @@ -18,7 +18,6 @@ roots/0, fields/1, desc/1, - values/2, fields/2 ]). @@ -133,8 +132,8 @@ bridge_v2_examples(Method) -> [ #{ <<"pgsql">> => #{ - summary => <<"PostgreSQL Producer Action">>, - value => values({Method, bridge_v2_producer}) + summary => <<"PostgreSQL Action">>, + value => values({Method, pgsql}) } } ]. @@ -143,7 +142,7 @@ conn_bridge_examples(Method) -> [ #{ <<"pgsql">> => #{ - summary => <<"PostgreSQL Producer Bridge">>, + summary => <<"PostgreSQL Bridge">>, value => values_conn_bridge_examples(Method, pgsql) } } @@ -160,21 +159,17 @@ values({get, PostgreSQLType}) -> } ] }, - values({post, PostgreSQLType}) - ); -values({post, PostgreSQLType}) -> - maps:merge( - #{ - name => <<"my_pgsql_action">>, - type => PostgreSQLType - }, values({put, PostgreSQLType}) ); +values({post, PostgreSQLType}) -> + values({put, PostgreSQLType}); values({put, PostgreSQLType}) -> maps:merge( #{ + name => <<"my_action">>, + type => PostgreSQLType, enable => true, - connector => <<"my_pgsql_connector">>, + connector => <<"my_connector">>, resource_opts => #{ batch_size => 1, batch_time => <<"50ms">>, @@ -184,9 +179,9 @@ values({put, PostgreSQLType}) -> worker_pool_size => 16 } }, - values({producer, PostgreSQLType}) + values(parameters) ); -values({producer, _PostgreSQLType}) -> +values(parameters) -> #{ <<"parameters">> => #{ <<"sql">> => @@ -201,6 +196,19 @@ values({producer, _PostgreSQLType}) -> } }. +values_conn_bridge_examples(get, Type) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + }, + values_conn_bridge_examples(post, Type) + ); values_conn_bridge_examples(_Method, Type) -> #{ enable => true, @@ -222,6 +230,3 @@ values_conn_bridge_examples(_Method, Type) -> max_buffer_bytes => ?DEFAULT_BUFFER_BYTES } }. - -values(Method, Type) -> - values_conn_bridge_examples(Method, Type). diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl index 759f69ed7..796d9d9f6 100644 --- a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl @@ -81,7 +81,7 @@ connector_examples(Method) -> #{ <<"timescale">> => #{ summary => <<"Timescale Connector">>, - value => emqx_postgresql_connector_schema:values({Method, connector}) + value => emqx_postgresql_connector_schema:values({Method, <<"timescale">>}) } } ]. diff --git a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl index ee92c62fc..ffdc3771c 100644 --- a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl +++ b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl @@ -99,8 +99,8 @@ connector_examples(Method) -> [ #{ <<"pgsql">> => #{ - summary => <<"PostgreSQL Producer Connector">>, - value => values({Method, connector}) + summary => <<"PostgreSQL Connector">>, + value => values({Method, pgsql}) } } ]. @@ -119,93 +119,35 @@ values({get, PostgreSQLType}) -> }, values({post, PostgreSQLType}) ); -values({post, connector}) -> - maps:merge( - #{ - name => <<"my_pgsql_connector">>, - type => <<"pgsql">> - }, - values(common_config) - ); values({post, PostgreSQLType}) -> - maps:merge( - #{ - name => <<"my_pgsql_action">>, - type => <<"pgsql">> - }, - values({put, PostgreSQLType}) - ); -values({put, bridge_v2_producer}) -> - values(bridge_v2_producer); -values({put, connector}) -> - values(common_config); + values({put, PostgreSQLType}); values({put, PostgreSQLType}) -> - maps:merge(values(common_config), values(PostgreSQLType)); -values(bridge_v2_producer) -> maps:merge( #{ - enable => true, - connector => <<"my_pgsql_connector">>, - resource_opts => #{ - health_check_interval => "32s" - } + name => <<"my_action">>, + type => PostgreSQLType }, - values(producer) + values(common) ); -values(common_config) -> +values(common) -> #{ - authentication => #{ - mechanism => <<"plain">>, - username => <<"username">>, - password => <<"******">> + <<"database">> => <<"emqx_data">>, + <<"enable">> => true, + <<"password">> => <<"public">>, + <<"pool_size">> => 8, + <<"server">> => <<"127.0.0.1:5432">>, + <<"ssl">> => #{ + <<"ciphers">> => [], + <<"depth">> => 10, + <<"enable">> => false, + <<"hibernate_after">> => <<"5s">>, + <<"log_level">> => <<"notice">>, + <<"reuse_sessions">> => true, + <<"secure_renegotiate">> => true, + <<"verify">> => <<"verify_peer">>, + <<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>] }, - bootstrap_hosts => <<"localhost:9092">>, - connect_timeout => <<"5s">>, - enable => true, - metadata_request_timeout => <<"4s">>, - min_metadata_refresh_interval => <<"3s">>, - socket_opts => #{ - sndbuf => <<"1024KB">>, - recbuf => <<"1024KB">>, - nodelay => true, - tcp_keepalive => <<"none">> - } - }; -values(producer) -> - #{ - kafka => #{ - topic => <<"kafka-topic">>, - message => #{ - key => <<"${.clientid}">>, - value => <<"${.}">>, - timestamp => <<"${.timestamp}">> - }, - max_batch_bytes => <<"896KB">>, - compression => <<"no_compression">>, - partition_strategy => <<"random">>, - required_acks => <<"all_isr">>, - partition_count_refresh_interval => <<"60s">>, - kafka_headers => <<"${pub_props}">>, - kafka_ext_headers => [ - #{ - kafka_ext_header_key => <<"clientid">>, - kafka_ext_header_value => <<"${clientid}">> - }, - #{ - kafka_ext_header_key => <<"topic">>, - kafka_ext_header_value => <<"${topic}">> - } - ], - kafka_header_value_encode_mode => none, - max_inflight => 10, - buffer => #{ - mode => <<"hybrid">>, - per_partition_limit => <<"2GB">>, - segment_bytes => <<"100MB">>, - memory_overload_protection => true - } - }, - local_topic => <<"mqtt/local/topic">> + <<"username">> => <<"postgres">> }. desc("config_connector") -> From f79d38983dd287950cb57e4b3a2a9be72154e0d5 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 24 Nov 2023 18:56:57 +0100 Subject: [PATCH 097/101] docs: add changelog entry --- changes/ee/feat-12013.en.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/ee/feat-12013.en.md diff --git a/changes/ee/feat-12013.en.md b/changes/ee/feat-12013.en.md new file mode 100644 index 000000000..b72b7b5be --- /dev/null +++ b/changes/ee/feat-12013.en.md @@ -0,0 +1 @@ +The bridges for PostgreSQL, Timescale and Matrix have been split so they are available via the connectors and actions APIs. They are still backwards compatible with the old bridge API. From 30e248061fcb6677534de083bbcb7722079074bc Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 24 Nov 2023 20:07:37 +0100 Subject: [PATCH 098/101] fix: problems found by @thalesmg in code review Co-authored-by: Thales Macedo Garitezi --- apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src | 2 +- apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src | 2 +- rel/i18n/emqx_postgresql_connector_schema.hocon | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src index 9175e5d4e..479aa13df 100644 --- a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src @@ -7,7 +7,7 @@ stdlib, emqx_resource ]}, - {env, [{emqx_action_info_module, emqx_bridge_matrix_action_info}]}, + {env, [{emqx_action_info_modules, [emqx_bridge_matrix_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src index 58d093f6f..614747254 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src @@ -8,7 +8,7 @@ emqx_resource, emqx_postgresql ]}, - {env, [{emqx_action_info_module, emqx_bridge_pgsql_action_info}]}, + {env, [{emqx_action_info_modules, [emqx_bridge_pgsql_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/rel/i18n/emqx_postgresql_connector_schema.hocon b/rel/i18n/emqx_postgresql_connector_schema.hocon index 056e66d09..4546b86ef 100644 --- a/rel/i18n/emqx_postgresql_connector_schema.hocon +++ b/rel/i18n/emqx_postgresql_connector_schema.hocon @@ -16,7 +16,7 @@ config_connector.label: """PostgreSQL Connector Config""" config_enable.desc: -"""Enable (true) or disable (false) this Kafka bridge.""" +"""Enable (true) or disable (false) this PostgreSQL bridge.""" config_enable.label: """Enable or Disable""" From 66945dcc5c54ca10b335fc655dffa9ff08c84a32 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 24 Nov 2023 20:31:29 +0100 Subject: [PATCH 099/101] fix: address more comments from @thalesmg --- apps/emqx_bridge/src/emqx_action_info.erl | 6 +++--- apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl | 3 +-- .../src/schema/emqx_connector_ee_schema.erl | 16 ++++++++-------- .../src/schema/emqx_connector_schema.erl | 6 +++--- .../schema/emqx_postgresql_connector_schema.erl | 7 +------ rel/i18n/emqx_postgresql_connector_schema.hocon | 6 ------ 6 files changed, 16 insertions(+), 28 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index f206c664d..4f195b417 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -77,11 +77,11 @@ hard_coded_action_info_modules_ee() -> emqx_bridge_confluent_producer_action_info, emqx_bridge_gcp_pubsub_producer_action_info, emqx_bridge_kafka_action_info, + emqx_bridge_matrix_action_info, emqx_bridge_mongodb_action_info, - emqx_bridge_syskeeper_action_info, emqx_bridge_pgsql_action_info, - emqx_bridge_timescale_action_info, - emqx_bridge_matrix_action_info + emqx_bridge_syskeeper_action_info, + emqx_bridge_timescale_action_info ]. -else. hard_coded_action_info_modules_ee() -> diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl index 534570ac9..949016336 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl @@ -1,5 +1,5 @@ %%-------------------------------------------------------------------- -%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- -module(emqx_bridge_pgsql). @@ -68,7 +68,6 @@ fields(action_parameters) -> emqx_connector_schema_lib:prepare_statement_fields(); fields(pgsql_action) -> emqx_bridge_v2_schema:make_producer_action_schema(hoconsc:ref(?MODULE, action_parameters)); -%% TODO: All of these needs to be fixed fields("put_bridge_v2") -> fields(pgsql_action); fields("get_bridge_v2") -> diff --git a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl index c2ce2568c..389623b0a 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl @@ -29,18 +29,18 @@ resource_type(gcp_pubsub_producer) -> emqx_bridge_gcp_pubsub_impl_producer; resource_type(kafka_producer) -> emqx_bridge_kafka_impl_producer; +resource_type(matrix) -> + emqx_postgresql; resource_type(mongodb) -> emqx_bridge_mongodb_connector; +resource_type(pgsql) -> + emqx_postgresql; resource_type(syskeeper_forwarder) -> emqx_bridge_syskeeper_connector; resource_type(syskeeper_proxy) -> emqx_bridge_syskeeper_proxy_server; -resource_type(pgsql) -> - emqx_postgresql; resource_type(timescale) -> emqx_postgresql; -resource_type(matrix) -> - emqx_postgresql; resource_type(Type) -> error({unknown_connector_type, Type}). @@ -159,12 +159,12 @@ schema_modules() -> emqx_bridge_confluent_producer, emqx_bridge_gcp_pubsub_producer_schema, emqx_bridge_kafka, + emqx_bridge_matrix, emqx_bridge_mongodb, emqx_bridge_syskeeper_connector, emqx_bridge_syskeeper_proxy, - emqx_postgresql_connector_schema, emqx_bridge_timescale, - emqx_bridge_matrix + emqx_postgresql_connector_schema ]. api_schemas(Method) -> @@ -183,12 +183,12 @@ api_schemas(Method) -> Method ++ "_connector" ), api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_connector"), + api_ref(emqx_bridge_matrix, <<"matrix">>, Method ++ "_connector"), api_ref(emqx_bridge_mongodb, <<"mongodb">>, Method ++ "_connector"), api_ref(emqx_bridge_syskeeper_connector, <<"syskeeper_forwarder">>, Method), api_ref(emqx_bridge_syskeeper_proxy, <<"syskeeper_proxy">>, Method), - api_ref(emqx_postgresql_connector_schema, <<"pgsql">>, Method ++ "_connector"), api_ref(emqx_bridge_timescale, <<"timescale">>, Method ++ "_connector"), - api_ref(emqx_bridge_matrix, <<"matrix">>, Method ++ "_connector") + api_ref(emqx_postgresql_connector_schema, <<"pgsql">>, Method ++ "_connector") ]. api_ref(Module, Type, Method) -> diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index cd99f0fe6..a7de0cf52 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -70,12 +70,12 @@ connector_type_to_bridge_types(azure_event_hub_producer) -> [azure_event_hub_pro connector_type_to_bridge_types(confluent_producer) -> [confluent_producer]; connector_type_to_bridge_types(gcp_pubsub_producer) -> [gcp_pubsub, gcp_pubsub_producer]; connector_type_to_bridge_types(kafka_producer) -> [kafka, kafka_producer]; +connector_type_to_bridge_types(matrix) -> [matrix]; connector_type_to_bridge_types(mongodb) -> [mongodb, mongodb_rs, mongodb_sharded, mongodb_single]; +connector_type_to_bridge_types(pgsql) -> [pgsql]; connector_type_to_bridge_types(syskeeper_forwarder) -> [syskeeper_forwarder]; connector_type_to_bridge_types(syskeeper_proxy) -> []; -connector_type_to_bridge_types(pgsql) -> [pgsql]; -connector_type_to_bridge_types(timescale) -> [timescale]; -connector_type_to_bridge_types(matrix) -> [matrix]. +connector_type_to_bridge_types(timescale) -> [timescale]. actions_config_name() -> <<"actions">>. diff --git a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl index ffdc3771c..d9ccbdc79 100644 --- a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl +++ b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl @@ -43,12 +43,7 @@ fields("connection_fields") -> adjust_fields(emqx_connector_schema_lib:relational_db_fields()) ++ emqx_connector_schema_lib:ssl_fields(); fields("config_connector") -> - fields("connection_fields") ++ fields(enable_and_desc); -fields(enable_and_desc) -> - [ - {enable, hoconsc:mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, - {description, emqx_schema:description_schema()} - ]; + fields("connection_fields") ++ emqx_connector_schema:common_fields(); fields(config) -> fields("config_connector") ++ fields(action); diff --git a/rel/i18n/emqx_postgresql_connector_schema.hocon b/rel/i18n/emqx_postgresql_connector_schema.hocon index 4546b86ef..8ecfb958a 100644 --- a/rel/i18n/emqx_postgresql_connector_schema.hocon +++ b/rel/i18n/emqx_postgresql_connector_schema.hocon @@ -15,10 +15,4 @@ config_connector.desc: config_connector.label: """PostgreSQL Connector Config""" -config_enable.desc: -"""Enable (true) or disable (false) this PostgreSQL bridge.""" - -config_enable.label: -"""Enable or Disable""" - } From c6c1d886f0007ca360bb87f9edfc3e708bfeff24 Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 24 Nov 2023 21:19:26 +0100 Subject: [PATCH 100/101] fix: make pgsql action schema properly --- apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl index 949016336..4c0efe269 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl @@ -67,7 +67,15 @@ fields(action_parameters) -> ] ++ emqx_connector_schema_lib:prepare_statement_fields(); fields(pgsql_action) -> - emqx_bridge_v2_schema:make_producer_action_schema(hoconsc:ref(?MODULE, action_parameters)); + emqx_bridge_v2_schema:make_producer_action_schema( + hoconsc:mk( + hoconsc:ref(?MODULE, action_parameters), + #{ + required => true, + desc => ?DESC("action_parameters") + } + ) + ); fields("put_bridge_v2") -> fields(pgsql_action); fields("get_bridge_v2") -> From c85004b7ef6fb05b0d490a1584fa32105d82968e Mon Sep 17 00:00:00 2001 From: Kjell Winblad Date: Fri, 24 Nov 2023 21:22:27 +0100 Subject: [PATCH 101/101] chore: remove obsolete TODO --- .../src/schema/emqx_postgresql_connector_schema.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl index d9ccbdc79..74591beee 100644 --- a/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl +++ b/apps/emqx_postgresql/src/schema/emqx_postgresql_connector_schema.erl @@ -58,7 +58,6 @@ fields(action) -> )}; fields(pgsql_action) -> emqx_bridge_v2_schema:make_producer_action_schema(hoconsc:ref(?MODULE, action_parameters)); -%% TODO: All of these needs to be fixed fields("put_bridge_v2") -> fields(pgsql_action); fields("get_bridge_v2") ->