feat: add connector schema scaffold and break out Kafka conector
This commit is the beginning of an effort to split bridges into a connector part and a bridge part. Several bridges should be able to share a connector pool defined by a single connector. The connectors should be possible to enable and disable similar to how one can disable and enable bridges. There should also be an API for checking the status of a connector and for add/edit/delete connectors similar to the current bridge API. Issues: https://emqx.atlassian.net/browse/EMQX-10805
This commit is contained in:
parent
eddd2c2a99
commit
f7984be946
|
@ -325,19 +325,20 @@ init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) ->
|
||||||
ok = save_schema_mod_and_names(SchemaMod),
|
ok = save_schema_mod_and_names(SchemaMod),
|
||||||
HasDeprecatedFile = has_deprecated_file(),
|
HasDeprecatedFile = has_deprecated_file(),
|
||||||
RawConf0 = load_config_files(HasDeprecatedFile, Conf),
|
RawConf0 = load_config_files(HasDeprecatedFile, Conf),
|
||||||
warning_deprecated_root_key(RawConf0),
|
RawConf1 = emqx_connector_schema:transform_old_style_bridges_to_connector_and_actions(RawConf0),
|
||||||
RawConf1 =
|
warning_deprecated_root_key(RawConf1),
|
||||||
|
RawConf2 =
|
||||||
case HasDeprecatedFile of
|
case HasDeprecatedFile of
|
||||||
true ->
|
true ->
|
||||||
overlay_v0(SchemaMod, RawConf0);
|
overlay_v0(SchemaMod, RawConf1);
|
||||||
false ->
|
false ->
|
||||||
overlay_v1(SchemaMod, RawConf0)
|
overlay_v1(SchemaMod, RawConf1)
|
||||||
end,
|
end,
|
||||||
RawConf = fill_defaults_for_all_roots(SchemaMod, RawConf1),
|
RawConf3 = fill_defaults_for_all_roots(SchemaMod, RawConf2),
|
||||||
%% check configs against the schema
|
%% check configs against the schema
|
||||||
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf, #{}),
|
{AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf3, #{}),
|
||||||
save_to_app_env(AppEnvs),
|
save_to_app_env(AppEnvs),
|
||||||
ok = save_to_config_map(CheckedConf, RawConf),
|
ok = save_to_config_map(CheckedConf, RawConf3),
|
||||||
maybe_init_default_zone(),
|
maybe_init_default_zone(),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
|
|
@ -65,6 +65,8 @@
|
||||||
import_config/1
|
import_config/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
-export([query_opts/1]).
|
||||||
|
|
||||||
-define(EGRESS_DIR_BRIDGES(T),
|
-define(EGRESS_DIR_BRIDGES(T),
|
||||||
T == webhook;
|
T == webhook;
|
||||||
T == mysql;
|
T == mysql;
|
||||||
|
|
|
@ -28,21 +28,29 @@
|
||||||
|
|
||||||
-define(TOP_LELVE_HDLR_PATH, (emqx_bridge:config_key_path())).
|
-define(TOP_LELVE_HDLR_PATH, (emqx_bridge:config_key_path())).
|
||||||
-define(LEAF_NODE_HDLR_PATH, (emqx_bridge:config_key_path() ++ ['?', '?'])).
|
-define(LEAF_NODE_HDLR_PATH, (emqx_bridge:config_key_path() ++ ['?', '?'])).
|
||||||
|
-define(TOP_LELVE_HDLR_PATH_BRIDGE_V2, (emqx_bridge_v2:config_key_path())).
|
||||||
|
-define(LEAF_NODE_HDLR_PATH_BRIDGE_V2, (emqx_bridge_v2:config_key_path() ++ ['?', '?'])).
|
||||||
|
|
||||||
start(_StartType, _StartArgs) ->
|
start(_StartType, _StartArgs) ->
|
||||||
{ok, Sup} = emqx_bridge_sup:start_link(),
|
{ok, Sup} = emqx_bridge_sup:start_link(),
|
||||||
ok = ensure_enterprise_schema_loaded(),
|
ok = ensure_enterprise_schema_loaded(),
|
||||||
ok = emqx_bridge:load(),
|
ok = emqx_bridge:load(),
|
||||||
|
ok = emqx_bridge_v2:load(),
|
||||||
ok = emqx_bridge:load_hook(),
|
ok = emqx_bridge:load_hook(),
|
||||||
ok = emqx_config_handler:add_handler(?LEAF_NODE_HDLR_PATH, ?MODULE),
|
ok = emqx_config_handler:add_handler(?LEAF_NODE_HDLR_PATH, ?MODULE),
|
||||||
ok = emqx_config_handler:add_handler(?TOP_LELVE_HDLR_PATH, emqx_bridge),
|
ok = emqx_config_handler:add_handler(?TOP_LELVE_HDLR_PATH, emqx_bridge),
|
||||||
|
ok = emqx_config_handler:add_handler(?LEAF_NODE_HDLR_PATH_BRIDGE_V2, emqx_bridge_v2),
|
||||||
|
ok = emqx_config_handler:add_handler(?TOP_LELVE_HDLR_PATH_BRIDGE_V2, emqx_bridge_v2),
|
||||||
?tp(emqx_bridge_app_started, #{}),
|
?tp(emqx_bridge_app_started, #{}),
|
||||||
{ok, Sup}.
|
{ok, Sup}.
|
||||||
|
|
||||||
stop(_State) ->
|
stop(_State) ->
|
||||||
emqx_conf:remove_handler(?LEAF_NODE_HDLR_PATH),
|
emqx_conf:remove_handler(?LEAF_NODE_HDLR_PATH),
|
||||||
emqx_conf:remove_handler(?TOP_LELVE_HDLR_PATH),
|
emqx_conf:remove_handler(?TOP_LELVE_HDLR_PATH),
|
||||||
|
emqx_conf:remove_handler(emqx_bridge_v2:config_key_path()),
|
||||||
|
emqx_conf:remove_handler(?LEAF_NODE_HDLR_PATH_BRIDGE_V2),
|
||||||
ok = emqx_bridge:unload(),
|
ok = emqx_bridge:unload(),
|
||||||
|
ok = emqx_bridge_v2:unload(),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
-if(?EMQX_RELEASE_EDITION == ee).
|
-if(?EMQX_RELEASE_EDITION == ee).
|
||||||
|
|
|
@ -0,0 +1,484 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_bridge_v2).
|
||||||
|
|
||||||
|
-behaviour(emqx_config_handler).
|
||||||
|
% -behaviour(emqx_config_backup).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/emqx.hrl").
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
-include_lib("emqx/include/emqx_hooks.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
|
-export([
|
||||||
|
load/0,
|
||||||
|
unload/0,
|
||||||
|
is_bridge_v2_type/1,
|
||||||
|
id/2,
|
||||||
|
id/3,
|
||||||
|
parse_id/1,
|
||||||
|
send_message/4,
|
||||||
|
bridge_v2_type_to_connector_type/1,
|
||||||
|
is_bridge_v2_id/1,
|
||||||
|
extract_connector_id_from_bridge_v2_id/1,
|
||||||
|
is_bridge_v2_installed_in_connector_state/2,
|
||||||
|
get_channels_for_connector/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
%% CRUD API
|
||||||
|
|
||||||
|
-export([
|
||||||
|
list/0,
|
||||||
|
lookup/1,
|
||||||
|
lookup/2,
|
||||||
|
get_metrics/2,
|
||||||
|
config_key_path/0,
|
||||||
|
disable_enable/3,
|
||||||
|
create/3,
|
||||||
|
remove/2,
|
||||||
|
health_check/2
|
||||||
|
]).
|
||||||
|
|
||||||
|
%% Config Update Handler API
|
||||||
|
|
||||||
|
-export([
|
||||||
|
post_config_update/5
|
||||||
|
]).
|
||||||
|
|
||||||
|
-define(ROOT_KEY, bridges_v2).
|
||||||
|
|
||||||
|
get_channels_for_connector(ConnectorId) ->
|
||||||
|
{ConnectorType, ConnectorName} = emqx_connector_resource:parse_connector_id(ConnectorId),
|
||||||
|
RootConf = maps:keys(emqx:get_config([?ROOT_KEY], #{})),
|
||||||
|
RelevantBridgeV2Types = [
|
||||||
|
Type
|
||||||
|
|| Type <- RootConf,
|
||||||
|
bridge_v2_type_to_connector_type(Type) =:= ConnectorType
|
||||||
|
],
|
||||||
|
lists:flatten([
|
||||||
|
get_channels_for_connector(ConnectorName, BridgeV2Type)
|
||||||
|
|| BridgeV2Type <- RelevantBridgeV2Types
|
||||||
|
]).
|
||||||
|
|
||||||
|
get_channels_for_connector(ConnectorName, BridgeV2Type) ->
|
||||||
|
BridgeV2s = emqx:get_config([?ROOT_KEY, BridgeV2Type], #{}),
|
||||||
|
[
|
||||||
|
{id(BridgeV2Type, Name, ConnectorName), Conf}
|
||||||
|
|| {Name, Conf} <- maps:to_list(BridgeV2s),
|
||||||
|
bin(ConnectorName) =:= maps:get(connector, Conf, no_name)
|
||||||
|
].
|
||||||
|
|
||||||
|
load() ->
|
||||||
|
% Bridge_V2s = emqx:get_config([?ROOT_KEY], #{}),
|
||||||
|
% lists:foreach(
|
||||||
|
% fun({Type, NamedConf}) ->
|
||||||
|
% lists:foreach(
|
||||||
|
% fun({Name, Conf}) ->
|
||||||
|
% install_bridge_v2(
|
||||||
|
% Type,
|
||||||
|
% Name,
|
||||||
|
% Conf
|
||||||
|
% )
|
||||||
|
% end,
|
||||||
|
% maps:to_list(NamedConf)
|
||||||
|
% )
|
||||||
|
% end,
|
||||||
|
% maps:to_list(Bridge_V2s)
|
||||||
|
% ),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
unload() ->
|
||||||
|
% Bridge_V2s = emqx:get_config([?ROOT_KEY], #{}),
|
||||||
|
% lists:foreach(
|
||||||
|
% fun({Type, NamedConf}) ->
|
||||||
|
% lists:foreach(
|
||||||
|
% fun({Name, Conf}) ->
|
||||||
|
% uninstall_bridge_v2(
|
||||||
|
% Type,
|
||||||
|
% Name,
|
||||||
|
% Conf
|
||||||
|
% )
|
||||||
|
% end,
|
||||||
|
% maps:to_list(NamedConf)
|
||||||
|
% )
|
||||||
|
% end,
|
||||||
|
% maps:to_list(Bridge_V2s)
|
||||||
|
% ),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
install_bridge_v2(
|
||||||
|
_BridgeType,
|
||||||
|
_BridgeName,
|
||||||
|
#{enable := false}
|
||||||
|
) ->
|
||||||
|
ok;
|
||||||
|
install_bridge_v2(
|
||||||
|
BridgeV2Type,
|
||||||
|
BridgeName,
|
||||||
|
#{connector := ConnectorName} = Config
|
||||||
|
) ->
|
||||||
|
CreationOpts = emqx_resource:fetch_creation_opts(Config),
|
||||||
|
BridgeV2Id = id(BridgeV2Type, BridgeName, ConnectorName),
|
||||||
|
%% Create metrics for Bridge V2
|
||||||
|
ok = emqx_resource:create_metrics(BridgeV2Id),
|
||||||
|
%% We might need to create buffer workers for Bridge V2
|
||||||
|
case get_query_mode(BridgeV2Type, Config) of
|
||||||
|
%% the Bridge V2 has built-in buffer, so there is no need for resource workers
|
||||||
|
simple_sync ->
|
||||||
|
ok;
|
||||||
|
simple_async ->
|
||||||
|
ok;
|
||||||
|
%% The Bridge V2 is a consumer Bridge V2, so there is no need for resource workers
|
||||||
|
no_queries ->
|
||||||
|
ok;
|
||||||
|
_ ->
|
||||||
|
%% start resource workers as the query type requires them
|
||||||
|
ok = emqx_resource_buffer_worker_sup:start_workers(BridgeV2Id, CreationOpts)
|
||||||
|
end,
|
||||||
|
%% If there is a running connector, we need to install the Bridge V2 in it
|
||||||
|
ConnectorId = emqx_connector_resource:resource_id(
|
||||||
|
bridge_v2_type_to_connector_type(BridgeV2Type), ConnectorName
|
||||||
|
),
|
||||||
|
emqx_resource_manager:add_channel(ConnectorId, BridgeV2Id, Config),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
uninstall_bridge_v2(
|
||||||
|
_BridgeType,
|
||||||
|
_BridgeName,
|
||||||
|
#{enable := false}
|
||||||
|
) ->
|
||||||
|
%% Already not installed
|
||||||
|
ok;
|
||||||
|
uninstall_bridge_v2(
|
||||||
|
BridgeV2Type,
|
||||||
|
BridgeName,
|
||||||
|
#{connector := ConnectorName} = Config
|
||||||
|
) ->
|
||||||
|
BridgeV2Id = id(BridgeV2Type, BridgeName, ConnectorName),
|
||||||
|
CreationOpts = emqx_resource:fetch_creation_opts(Config),
|
||||||
|
ok = emqx_resource_buffer_worker_sup:stop_workers(BridgeV2Id, CreationOpts),
|
||||||
|
ok = emqx_resource:clear_metrics(BridgeV2Id),
|
||||||
|
%% Deinstall from connector
|
||||||
|
ConnectorId = emqx_connector_resource:resource_id(
|
||||||
|
bridge_v2_type_to_connector_type(BridgeV2Type), ConnectorName
|
||||||
|
),
|
||||||
|
emqx_resource_manager:remove_channel(ConnectorId, BridgeV2Id).
|
||||||
|
|
||||||
|
get_query_mode(BridgeV2Type, Config) ->
|
||||||
|
CreationOpts = emqx_resource:fetch_creation_opts(Config),
|
||||||
|
ResourceType = emqx_bridge_resource:bridge_to_resource_type(BridgeV2Type),
|
||||||
|
emqx_resource:query_mode(ResourceType, Config, CreationOpts).
|
||||||
|
|
||||||
|
send_message(BridgeType, BridgeName, Message, QueryOpts0) ->
|
||||||
|
case lookup(BridgeType, BridgeName) of
|
||||||
|
#{enable := true} = Config ->
|
||||||
|
do_send_msg_with_enabled_config(BridgeType, BridgeName, Message, QueryOpts0, Config);
|
||||||
|
#{enable := false} ->
|
||||||
|
{error, bridge_stopped};
|
||||||
|
Error ->
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
health_check(BridgeType, BridgeName) ->
|
||||||
|
case lookup(BridgeType, BridgeName) of
|
||||||
|
#{
|
||||||
|
enable := true,
|
||||||
|
connector := ConnectorName
|
||||||
|
} ->
|
||||||
|
ConnectorId = emqx_connector_resource:resource_id(
|
||||||
|
bridge_v2_type_to_connector_type(BridgeType), ConnectorName
|
||||||
|
),
|
||||||
|
emqx_resource_manager:channel_health_check(
|
||||||
|
ConnectorId, id(BridgeType, BridgeName, ConnectorName)
|
||||||
|
);
|
||||||
|
#{enable := false} ->
|
||||||
|
{error, bridge_stopped};
|
||||||
|
Error ->
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
% do_send_msg_with_enabled_config(BridgeType, BridgeName, Message, QueryOpts0, Config) ->
|
||||||
|
% BridgeV2Id = emqx_bridge_v2:id(BridgeType, BridgeName),
|
||||||
|
% ConnectorResourceId = emqx_bridge_v2:extract_connector_id_from_bridge_v2_id(BridgeV2Id),
|
||||||
|
% try
|
||||||
|
% case emqx_resource_manager:maybe_install_bridge_v2(ConnectorResourceId, BridgeV2Id) of
|
||||||
|
% ok ->
|
||||||
|
% do_send_msg_after_bridge_v2_installed(
|
||||||
|
% BridgeType,
|
||||||
|
% BridgeName,
|
||||||
|
% BridgeV2Id,
|
||||||
|
% Message,
|
||||||
|
% QueryOpts0,
|
||||||
|
% Config
|
||||||
|
% );
|
||||||
|
% InstallError ->
|
||||||
|
% throw(InstallError)
|
||||||
|
% end
|
||||||
|
% catch
|
||||||
|
% Error:Reason:Stack ->
|
||||||
|
% Msg = iolist_to_binary(
|
||||||
|
% io_lib:format(
|
||||||
|
% "Failed to install bridge_v2 ~p in connector ~p: ~p",
|
||||||
|
% [BridgeV2Id, ConnectorResourceId, Reason]
|
||||||
|
% )
|
||||||
|
% ),
|
||||||
|
% ?SLOG(error, #{
|
||||||
|
% msg => Msg,
|
||||||
|
% error => Error,
|
||||||
|
% reason => Reason,
|
||||||
|
% stacktrace => Stack
|
||||||
|
% })
|
||||||
|
% end.
|
||||||
|
|
||||||
|
do_send_msg_with_enabled_config(
|
||||||
|
BridgeType, BridgeName, Message, QueryOpts0, Config
|
||||||
|
) ->
|
||||||
|
QueryMode = get_query_mode(BridgeType, Config),
|
||||||
|
QueryOpts = maps:merge(
|
||||||
|
emqx_bridge:query_opts(Config),
|
||||||
|
QueryOpts0#{
|
||||||
|
query_mode => QueryMode
|
||||||
|
}
|
||||||
|
),
|
||||||
|
BridgeV2Id = emqx_bridge_v2:id(BridgeType, BridgeName),
|
||||||
|
emqx_resource:query(BridgeV2Id, {BridgeV2Id, Message}, QueryOpts).
|
||||||
|
|
||||||
|
parse_id(Id) ->
|
||||||
|
case binary:split(Id, <<":">>, [global]) of
|
||||||
|
[Type, Name] ->
|
||||||
|
{Type, Name};
|
||||||
|
[<<"bridge_v2">>, Type, Name | _] ->
|
||||||
|
{Type, Name};
|
||||||
|
_X ->
|
||||||
|
error({error, iolist_to_binary(io_lib:format("Invalid id: ~p", [Id]))})
|
||||||
|
end.
|
||||||
|
|
||||||
|
id(BridgeType, BridgeName) ->
|
||||||
|
case lookup(BridgeType, BridgeName) of
|
||||||
|
#{connector := ConnectorName} ->
|
||||||
|
id(BridgeType, BridgeName, ConnectorName);
|
||||||
|
Error ->
|
||||||
|
error(Error)
|
||||||
|
end.
|
||||||
|
|
||||||
|
id(BridgeType, BridgeName, ConnectorName) ->
|
||||||
|
ConnectorType = bin(bridge_v2_type_to_connector_type(BridgeType)),
|
||||||
|
<<"bridge_v2:", (bin(BridgeType))/binary, ":", (bin(BridgeName))/binary, ":connector:",
|
||||||
|
(bin(ConnectorType))/binary, ":", (bin(ConnectorName))/binary>>.
|
||||||
|
|
||||||
|
bridge_v2_type_to_connector_type(kafka) ->
|
||||||
|
kafka.
|
||||||
|
|
||||||
|
is_bridge_v2_type(kafka) -> true;
|
||||||
|
is_bridge_v2_type(_) -> false.
|
||||||
|
|
||||||
|
is_bridge_v2_id(<<"bridge_v2:", _/binary>>) -> true;
|
||||||
|
is_bridge_v2_id(_) -> false.
|
||||||
|
|
||||||
|
extract_connector_id_from_bridge_v2_id(Id) ->
|
||||||
|
case binary:split(Id, <<":">>, [global]) of
|
||||||
|
[<<"bridge_v2">>, _Type, _Name, <<"connector">>, ConnectorType, ConnecorName] ->
|
||||||
|
<<"connector:", ConnectorType/binary, ":", ConnecorName/binary>>;
|
||||||
|
_X ->
|
||||||
|
error({error, iolist_to_binary(io_lib:format("Invalid bridge V2 ID: ~p", [Id]))})
|
||||||
|
end.
|
||||||
|
|
||||||
|
bin(Bin) when is_binary(Bin) -> Bin;
|
||||||
|
bin(Str) when is_list(Str) -> list_to_binary(Str);
|
||||||
|
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
|
||||||
|
|
||||||
|
%% Basic CRUD Operations
|
||||||
|
|
||||||
|
list() ->
|
||||||
|
maps:fold(
|
||||||
|
fun(Type, NameAndConf, Bridges) ->
|
||||||
|
maps:fold(
|
||||||
|
fun(Name, RawConf, Acc) ->
|
||||||
|
[
|
||||||
|
#{
|
||||||
|
type => Type,
|
||||||
|
name => Name,
|
||||||
|
raw_config => RawConf
|
||||||
|
}
|
||||||
|
| Acc
|
||||||
|
]
|
||||||
|
end,
|
||||||
|
Bridges,
|
||||||
|
NameAndConf
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
[],
|
||||||
|
emqx:get_raw_config([?ROOT_KEY], #{})
|
||||||
|
).
|
||||||
|
|
||||||
|
lookup(Id) ->
|
||||||
|
{Type, Name} = parse_id(Id),
|
||||||
|
lookup(Type, Name).
|
||||||
|
|
||||||
|
lookup(Type, Name) ->
|
||||||
|
case emqx:get_config([?ROOT_KEY, Type, Name], not_found) of
|
||||||
|
not_found ->
|
||||||
|
{error, bridge_not_found};
|
||||||
|
Config ->
|
||||||
|
Config
|
||||||
|
end.
|
||||||
|
|
||||||
|
get_metrics(Type, Name) ->
|
||||||
|
emqx_resource:get_metrics(id(Type, Name)).
|
||||||
|
|
||||||
|
config_key_path() ->
|
||||||
|
[?ROOT_KEY].
|
||||||
|
|
||||||
|
disable_enable(Action, BridgeType, BridgeName) when
|
||||||
|
Action =:= disable; Action =:= enable
|
||||||
|
->
|
||||||
|
emqx_conf:update(
|
||||||
|
config_key_path() ++ [BridgeType, BridgeName],
|
||||||
|
{Action, BridgeType, BridgeName},
|
||||||
|
#{override_to => cluster}
|
||||||
|
).
|
||||||
|
|
||||||
|
create(BridgeType, BridgeName, RawConf) ->
|
||||||
|
?SLOG(debug, #{
|
||||||
|
brige_action => create,
|
||||||
|
bridge_version => 2,
|
||||||
|
bridge_type => BridgeType,
|
||||||
|
bridge_name => BridgeName,
|
||||||
|
bridge_raw_config => emqx_utils:redact(RawConf)
|
||||||
|
}),
|
||||||
|
emqx_conf:update(
|
||||||
|
config_key_path() ++ [BridgeType, BridgeName],
|
||||||
|
RawConf,
|
||||||
|
#{override_to => cluster}
|
||||||
|
).
|
||||||
|
|
||||||
|
remove(BridgeType, BridgeName) ->
|
||||||
|
?SLOG(debug, #{
|
||||||
|
brige_action => remove,
|
||||||
|
bridge_version => 2,
|
||||||
|
bridge_type => BridgeType,
|
||||||
|
bridge_name => BridgeName
|
||||||
|
}),
|
||||||
|
emqx_conf:remove(
|
||||||
|
config_key_path() ++ [BridgeType, BridgeName],
|
||||||
|
#{override_to => cluster}
|
||||||
|
).
|
||||||
|
|
||||||
|
%% This top level handler will be triggered when the bridges_v2 path is updated
|
||||||
|
%% with calls to emqx_conf:update([bridges_v2], BridgesConf, #{}).
|
||||||
|
%%
|
||||||
|
%% A public API that can trigger this is:
|
||||||
|
%% bin/emqx ctl conf load data/configs/cluster.hocon
|
||||||
|
post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) ->
|
||||||
|
#{added := Added, removed := Removed, changed := Updated} =
|
||||||
|
diff_confs(NewConf, OldConf),
|
||||||
|
%% The config update will be failed if any task in `perform_bridge_changes` failed.
|
||||||
|
RemoveFun = fun uninstall_bridge_v2/3,
|
||||||
|
CreateFun = fun install_bridge_v2/3,
|
||||||
|
UpdateFun = fun(Type, Name, {OldBridgeConf, Conf}) ->
|
||||||
|
uninstall_bridge_v2(Type, Name, OldBridgeConf),
|
||||||
|
install_bridge_v2(Type, Name, Conf)
|
||||||
|
end,
|
||||||
|
Result = perform_bridge_changes([
|
||||||
|
#{action => RemoveFun, data => Removed},
|
||||||
|
#{
|
||||||
|
action => CreateFun,
|
||||||
|
data => Added,
|
||||||
|
on_exception_fn => fun emqx_bridge_resource:remove/4
|
||||||
|
},
|
||||||
|
#{action => UpdateFun, data => Updated}
|
||||||
|
]),
|
||||||
|
?tp(bridge_post_config_update_done, #{}),
|
||||||
|
Result;
|
||||||
|
post_config_update([?ROOT_KEY, BridgeType, BridgeName], '$remove', _, _OldConf, _AppEnvs) ->
|
||||||
|
Conf = emqx:get_config([?ROOT_KEY, BridgeType, BridgeName]),
|
||||||
|
ok = uninstall_bridge_v2(BridgeType, BridgeName, Conf),
|
||||||
|
?tp(bridge_post_config_update_done, #{}),
|
||||||
|
ok;
|
||||||
|
post_config_update([?ROOT_KEY, BridgeType, BridgeName], _Req, NewConf, undefined, _AppEnvs) ->
|
||||||
|
ok = install_bridge_v2(BridgeType, BridgeName, NewConf),
|
||||||
|
?tp(bridge_post_config_update_done, #{}),
|
||||||
|
ok;
|
||||||
|
post_config_update([?ROOT_KEY, BridgeType, BridgeName], _Req, NewConf, OldConf, _AppEnvs) ->
|
||||||
|
ok = uninstall_bridge_v2(BridgeType, BridgeName, OldConf),
|
||||||
|
ok = install_bridge_v2(BridgeType, BridgeName, NewConf),
|
||||||
|
?tp(bridge_post_config_update_done, #{}),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
diff_confs(NewConfs, OldConfs) ->
|
||||||
|
emqx_utils_maps:diff_maps(
|
||||||
|
flatten_confs(NewConfs),
|
||||||
|
flatten_confs(OldConfs)
|
||||||
|
).
|
||||||
|
|
||||||
|
flatten_confs(Conf0) ->
|
||||||
|
maps:from_list(
|
||||||
|
lists:flatmap(
|
||||||
|
fun({Type, Conf}) ->
|
||||||
|
do_flatten_confs(Type, Conf)
|
||||||
|
end,
|
||||||
|
maps:to_list(Conf0)
|
||||||
|
)
|
||||||
|
).
|
||||||
|
|
||||||
|
do_flatten_confs(Type, Conf0) ->
|
||||||
|
[{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)].
|
||||||
|
|
||||||
|
perform_bridge_changes(Tasks) ->
|
||||||
|
perform_bridge_changes(Tasks, ok).
|
||||||
|
|
||||||
|
perform_bridge_changes([], Result) ->
|
||||||
|
Result;
|
||||||
|
perform_bridge_changes([#{action := Action, data := MapConfs} = Task | Tasks], Result0) ->
|
||||||
|
OnException = maps:get(on_exception_fn, Task, fun(_Type, _Name, _Conf, _Opts) -> ok end),
|
||||||
|
Result = maps:fold(
|
||||||
|
fun
|
||||||
|
({_Type, _Name}, _Conf, {error, Reason}) ->
|
||||||
|
{error, Reason};
|
||||||
|
%% for update
|
||||||
|
({Type, Name}, {OldConf, Conf}, _) ->
|
||||||
|
case Action(Type, Name, {OldConf, Conf}) of
|
||||||
|
{error, Reason} -> {error, Reason};
|
||||||
|
Return -> Return
|
||||||
|
end;
|
||||||
|
({Type, Name}, Conf, _) ->
|
||||||
|
try Action(Type, Name, Conf) of
|
||||||
|
{error, Reason} -> {error, Reason};
|
||||||
|
Return -> Return
|
||||||
|
catch
|
||||||
|
Kind:Error:Stacktrace ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "bridge_config_update_exception",
|
||||||
|
kind => Kind,
|
||||||
|
error => Error,
|
||||||
|
type => Type,
|
||||||
|
name => Name,
|
||||||
|
stacktrace => Stacktrace
|
||||||
|
}),
|
||||||
|
OnException(Type, Name, Conf),
|
||||||
|
erlang:raise(Kind, Error, Stacktrace)
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
Result0,
|
||||||
|
MapConfs
|
||||||
|
),
|
||||||
|
perform_bridge_changes(Tasks, Result).
|
||||||
|
|
||||||
|
is_bridge_v2_installed_in_connector_state(Tag, State) when is_map(State) ->
|
||||||
|
BridgeV2s = maps:get(installed_bridge_v2s, State, #{}),
|
||||||
|
maps:is_key(Tag, BridgeV2s);
|
||||||
|
is_bridge_v2_installed_in_connector_state(_Tag, _State) ->
|
||||||
|
false.
|
|
@ -0,0 +1,32 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_action_enterprise).
|
||||||
|
|
||||||
|
-if(?EMQX_RELEASE_EDITION == ee).
|
||||||
|
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
fields/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
fields(bridges_v2) ->
|
||||||
|
kafka_structs().
|
||||||
|
|
||||||
|
kafka_structs() ->
|
||||||
|
[
|
||||||
|
{kafka,
|
||||||
|
mk(
|
||||||
|
hoconsc:map(name, ref(emqx_bridge_kafka, kafka_producer_action)),
|
||||||
|
#{
|
||||||
|
desc => <<"Kafka Producer Bridge V2 Config">>,
|
||||||
|
required => false
|
||||||
|
}
|
||||||
|
)}
|
||||||
|
].
|
||||||
|
|
||||||
|
-else.
|
||||||
|
|
||||||
|
-endif.
|
|
@ -0,0 +1,60 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_action_schema).
|
||||||
|
|
||||||
|
-include_lib("typerefl/include/types.hrl").
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
|
||||||
|
-import(hoconsc, [mk/2, ref/2]).
|
||||||
|
|
||||||
|
-export([roots/0, fields/1, desc/1, namespace/0, tags/0]).
|
||||||
|
|
||||||
|
-if(?EMQX_RELEASE_EDITION == ee).
|
||||||
|
|
||||||
|
enterprise_fields_actions() ->
|
||||||
|
%% We *must* do this to ensure the module is really loaded, especially when we use
|
||||||
|
%% `call_hocon' from `nodetool' to generate initial configurations.
|
||||||
|
_ = emqx_action_enterprise:module_info(),
|
||||||
|
case erlang:function_exported(emqx_action_enterprise, fields, 1) of
|
||||||
|
true ->
|
||||||
|
emqx_action_enterprise:fields(bridges_v2);
|
||||||
|
false ->
|
||||||
|
[]
|
||||||
|
end.
|
||||||
|
|
||||||
|
-else.
|
||||||
|
|
||||||
|
enterprise_fields_actions() -> [].
|
||||||
|
|
||||||
|
-endif.
|
||||||
|
|
||||||
|
%%======================================================================================
|
||||||
|
%% HOCON Schema Callbacks
|
||||||
|
%%======================================================================================
|
||||||
|
|
||||||
|
namespace() -> "bridge_v2".
|
||||||
|
|
||||||
|
tags() ->
|
||||||
|
[<<"Bridge V2">>].
|
||||||
|
|
||||||
|
roots() -> [{bridges_v2, ?HOCON(?R_REF(bridges_v2), #{importance => ?IMPORTANCE_LOW})}].
|
||||||
|
|
||||||
|
fields(bridges_v2) ->
|
||||||
|
[] ++ enterprise_fields_actions().
|
||||||
|
|
||||||
|
desc(_) ->
|
||||||
|
undefined.
|
|
@ -161,6 +161,14 @@ fields("config_consumer") ->
|
||||||
fields(kafka_consumer);
|
fields(kafka_consumer);
|
||||||
fields(kafka_producer) ->
|
fields(kafka_producer) ->
|
||||||
fields("config") ++ fields(producer_opts);
|
fields("config") ++ fields(producer_opts);
|
||||||
|
fields(kafka_producer_action) ->
|
||||||
|
[
|
||||||
|
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
|
||||||
|
{connector,
|
||||||
|
mk(binary(), #{
|
||||||
|
desc => ?DESC(emqx_connector_schema, "connector_field"), required => true
|
||||||
|
})}
|
||||||
|
] ++ fields(producer_opts);
|
||||||
fields(kafka_consumer) ->
|
fields(kafka_consumer) ->
|
||||||
fields("config") ++ fields(consumer_opts);
|
fields("config") ++ fields(consumer_opts);
|
||||||
fields("config") ->
|
fields("config") ->
|
||||||
|
@ -478,6 +486,8 @@ desc("put_" ++ Type) when Type =:= "consumer"; Type =:= "producer" ->
|
||||||
["Configuration for Kafka using `PUT` method."];
|
["Configuration for Kafka using `PUT` method."];
|
||||||
desc("post_" ++ Type) when Type =:= "consumer"; Type =:= "producer" ->
|
desc("post_" ++ Type) when Type =:= "consumer"; Type =:= "producer" ->
|
||||||
["Configuration for Kafka using `POST` method."];
|
["Configuration for Kafka using `POST` method."];
|
||||||
|
desc(kafka_producer_action) ->
|
||||||
|
?DESC("kafka_producer_action");
|
||||||
desc(Name) ->
|
desc(Name) ->
|
||||||
lists:member(Name, struct_names()) orelse throw({missing_desc, Name}),
|
lists:member(Name, struct_names()) orelse throw({missing_desc, Name}),
|
||||||
?DESC(Name).
|
?DESC(Name).
|
||||||
|
|
|
@ -16,7 +16,11 @@
|
||||||
on_stop/2,
|
on_stop/2,
|
||||||
on_query/3,
|
on_query/3,
|
||||||
on_query_async/4,
|
on_query_async/4,
|
||||||
on_get_status/2
|
on_get_status/2,
|
||||||
|
on_add_channel/4,
|
||||||
|
on_remove_channel/3,
|
||||||
|
on_get_channels/1,
|
||||||
|
on_get_channel_status/3
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
@ -27,9 +31,10 @@
|
||||||
-include_lib("emqx/include/logger.hrl").
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
|
||||||
%% Allocatable resources
|
%% Allocatable resources
|
||||||
-define(kafka_resource_id, kafka_resource_id).
|
-define(kafka_telementry_id, kafka_telementry_id).
|
||||||
-define(kafka_client_id, kafka_client_id).
|
-define(kafka_client_id, kafka_client_id).
|
||||||
-define(kafka_producers, kafka_producers).
|
-define(kafka_producers, kafka_producers).
|
||||||
|
-define(CONNECTOR_TYPE, kafka).
|
||||||
|
|
||||||
query_mode(#{kafka := #{query_mode := sync}}) ->
|
query_mode(#{kafka := #{query_mode := sync}}) ->
|
||||||
simple_sync_internal_buffer;
|
simple_sync_internal_buffer;
|
||||||
|
@ -38,32 +43,22 @@ query_mode(_) ->
|
||||||
|
|
||||||
callback_mode() -> async_if_possible.
|
callback_mode() -> async_if_possible.
|
||||||
|
|
||||||
%% @doc Config schema is defined in emqx_bridge_kafka.
|
%% @doc Config schema is defined in emqx_connector_kafka.
|
||||||
on_start(InstId, Config) ->
|
on_start(<<"connector:", _/binary>> = InstId, Config) ->
|
||||||
#{
|
#{
|
||||||
authentication := Auth,
|
authentication := Auth,
|
||||||
bootstrap_hosts := Hosts0,
|
bootstrap_hosts := Hosts0,
|
||||||
bridge_name := BridgeName,
|
connector_name := ConnectorName,
|
||||||
bridge_type := BridgeType,
|
|
||||||
connect_timeout := ConnTimeout,
|
connect_timeout := ConnTimeout,
|
||||||
kafka := KafkaConfig = #{
|
|
||||||
message := MessageTemplate,
|
|
||||||
topic := KafkaTopic,
|
|
||||||
sync_query_timeout := SyncQueryTimeout
|
|
||||||
},
|
|
||||||
metadata_request_timeout := MetaReqTimeout,
|
metadata_request_timeout := MetaReqTimeout,
|
||||||
min_metadata_refresh_interval := MinMetaRefreshInterval,
|
min_metadata_refresh_interval := MinMetaRefreshInterval,
|
||||||
socket_opts := SocketOpts,
|
socket_opts := SocketOpts,
|
||||||
ssl := SSL
|
ssl := SSL
|
||||||
} = Config,
|
} = Config,
|
||||||
KafkaHeadersTokens = preproc_kafka_headers(maps:get(kafka_headers, KafkaConfig, undefined)),
|
ConnectorType = ?CONNECTOR_TYPE,
|
||||||
KafkaExtHeadersTokens = preproc_ext_headers(maps:get(kafka_ext_headers, KafkaConfig, [])),
|
ResourceId = emqx_connector_resource:resource_id(ConnectorType, ConnectorName),
|
||||||
KafkaHeadersValEncodeMode = maps:get(kafka_header_value_encode_mode, KafkaConfig, none),
|
|
||||||
ResourceId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
|
|
||||||
ok = emqx_resource:allocate_resource(InstId, ?kafka_resource_id, ResourceId),
|
|
||||||
_ = maybe_install_wolff_telemetry_handlers(ResourceId),
|
|
||||||
Hosts = emqx_bridge_kafka_impl:hosts(Hosts0),
|
Hosts = emqx_bridge_kafka_impl:hosts(Hosts0),
|
||||||
ClientId = emqx_bridge_kafka_impl:make_client_id(BridgeType, BridgeName),
|
ClientId = emqx_bridge_kafka_impl:make_client_id(?CONNECTOR_TYPE, ConnectorName),
|
||||||
ok = emqx_resource:allocate_resource(InstId, ?kafka_client_id, ClientId),
|
ok = emqx_resource:allocate_resource(InstId, ?kafka_client_id, ClientId),
|
||||||
ClientConfig = #{
|
ClientConfig = #{
|
||||||
min_metadata_refresh_interval => MinMetaRefreshInterval,
|
min_metadata_refresh_interval => MinMetaRefreshInterval,
|
||||||
|
@ -74,12 +69,6 @@ on_start(InstId, Config) ->
|
||||||
sasl => emqx_bridge_kafka_impl:sasl(Auth),
|
sasl => emqx_bridge_kafka_impl:sasl(Auth),
|
||||||
ssl => ssl(SSL)
|
ssl => ssl(SSL)
|
||||||
},
|
},
|
||||||
case do_get_topic_status(Hosts, KafkaConfig, KafkaTopic) of
|
|
||||||
unhealthy_target ->
|
|
||||||
throw(unhealthy_target);
|
|
||||||
_ ->
|
|
||||||
ok
|
|
||||||
end,
|
|
||||||
case wolff:ensure_supervised_client(ClientId, Hosts, ClientConfig) of
|
case wolff:ensure_supervised_client(ClientId, Hosts, ClientConfig) of
|
||||||
{ok, _} ->
|
{ok, _} ->
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
|
@ -97,7 +86,51 @@ on_start(InstId, Config) ->
|
||||||
throw(failed_to_start_kafka_client)
|
throw(failed_to_start_kafka_client)
|
||||||
end,
|
end,
|
||||||
%% Check if this is a dry run
|
%% Check if this is a dry run
|
||||||
TestIdStart = string:find(InstId, ?TEST_ID_PREFIX),
|
{ok, #{
|
||||||
|
client_id => ClientId,
|
||||||
|
resource_id => ResourceId,
|
||||||
|
hosts => Hosts,
|
||||||
|
installed_bridge_v2s => #{}
|
||||||
|
}}.
|
||||||
|
|
||||||
|
on_add_channel(
|
||||||
|
InstId,
|
||||||
|
#{
|
||||||
|
client_id := ClientId,
|
||||||
|
hosts := Hosts,
|
||||||
|
installed_bridge_v2s := InstalledBridgeV2s
|
||||||
|
} = OldState,
|
||||||
|
BridgeV2Id,
|
||||||
|
BridgeV2Config
|
||||||
|
) ->
|
||||||
|
%% The following will throw an exception if the bridge producers fails to start
|
||||||
|
{ok, BridgeV2State} = create_producers_for_bridge_v2(
|
||||||
|
InstId, BridgeV2Id, ClientId, Hosts, BridgeV2Config
|
||||||
|
),
|
||||||
|
NewInstalledBridgeV2s = maps:put(BridgeV2Id, BridgeV2State, InstalledBridgeV2s),
|
||||||
|
%% Update state
|
||||||
|
NewState = OldState#{installed_bridge_v2s => NewInstalledBridgeV2s},
|
||||||
|
{ok, NewState}.
|
||||||
|
|
||||||
|
create_producers_for_bridge_v2(
|
||||||
|
InstId,
|
||||||
|
BridgeV2Id,
|
||||||
|
ClientId,
|
||||||
|
Hosts,
|
||||||
|
#{
|
||||||
|
bridge_type := BridgeType,
|
||||||
|
kafka := #{
|
||||||
|
message := MessageTemplate,
|
||||||
|
topic := KafkaTopic,
|
||||||
|
sync_query_timeout := SyncQueryTimeout
|
||||||
|
} = KafkaConfig
|
||||||
|
}
|
||||||
|
) ->
|
||||||
|
KafkaHeadersTokens = preproc_kafka_headers(maps:get(kafka_headers, KafkaConfig, undefined)),
|
||||||
|
KafkaExtHeadersTokens = preproc_ext_headers(maps:get(kafka_ext_headers, KafkaConfig, [])),
|
||||||
|
KafkaHeadersValEncodeMode = maps:get(kafka_header_value_encode_mode, KafkaConfig, none),
|
||||||
|
{_BridgeType, BridgeName} = emqx_bridge_v2:parse_id(BridgeV2Id),
|
||||||
|
TestIdStart = string:find(BridgeV2Id, ?TEST_ID_PREFIX),
|
||||||
IsDryRun =
|
IsDryRun =
|
||||||
case TestIdStart of
|
case TestIdStart of
|
||||||
nomatch ->
|
nomatch ->
|
||||||
|
@ -105,18 +138,24 @@ on_start(InstId, Config) ->
|
||||||
_ ->
|
_ ->
|
||||||
string:equal(TestIdStart, InstId)
|
string:equal(TestIdStart, InstId)
|
||||||
end,
|
end,
|
||||||
WolffProducerConfig = producers_config(BridgeType, BridgeName, ClientId, KafkaConfig, IsDryRun),
|
ok = check_topic_status(Hosts, KafkaConfig, KafkaTopic),
|
||||||
|
ok = check_if_healthy_leaders(ClientId, KafkaTopic),
|
||||||
|
WolffProducerConfig = producers_config(BridgeType, BridgeName, ClientId, KafkaConfig, IsDryRun, BridgeV2Id),
|
||||||
case wolff:ensure_supervised_producers(ClientId, KafkaTopic, WolffProducerConfig) of
|
case wolff:ensure_supervised_producers(ClientId, KafkaTopic, WolffProducerConfig) of
|
||||||
{ok, Producers} ->
|
{ok, Producers} ->
|
||||||
ok = emqx_resource:allocate_resource(InstId, ?kafka_producers, Producers),
|
ok = emqx_resource:allocate_resource(InstId, {?kafka_producers, BridgeV2Id}, Producers),
|
||||||
|
ok = emqx_resource:allocate_resource(
|
||||||
|
InstId, {?kafka_telementry_id, BridgeV2Id}, BridgeV2Id
|
||||||
|
),
|
||||||
|
_ = maybe_install_wolff_telemetry_handlers(BridgeV2Id),
|
||||||
{ok, #{
|
{ok, #{
|
||||||
message_template => compile_message_template(MessageTemplate),
|
message_template => compile_message_template(MessageTemplate),
|
||||||
client_id => ClientId,
|
client_id => ClientId,
|
||||||
kafka_topic => KafkaTopic,
|
kafka_topic => KafkaTopic,
|
||||||
producers => Producers,
|
producers => Producers,
|
||||||
resource_id => ResourceId,
|
resource_id => BridgeV2Id,
|
||||||
|
connector_resource_id => InstId,
|
||||||
sync_query_timeout => SyncQueryTimeout,
|
sync_query_timeout => SyncQueryTimeout,
|
||||||
hosts => Hosts,
|
|
||||||
kafka_config => KafkaConfig,
|
kafka_config => KafkaConfig,
|
||||||
headers_tokens => KafkaHeadersTokens,
|
headers_tokens => KafkaHeadersTokens,
|
||||||
ext_headers_tokens => KafkaExtHeadersTokens,
|
ext_headers_tokens => KafkaExtHeadersTokens,
|
||||||
|
@ -130,20 +169,6 @@ on_start(InstId, Config) ->
|
||||||
kafka_topic => KafkaTopic,
|
kafka_topic => KafkaTopic,
|
||||||
reason => Reason2
|
reason => Reason2
|
||||||
}),
|
}),
|
||||||
%% Need to stop the already running client; otherwise, the
|
|
||||||
%% next `on_start' call will try to ensure the client
|
|
||||||
%% exists and it will be already present and using the old
|
|
||||||
%% config. This is specially bad if the original crash
|
|
||||||
%% was due to misconfiguration and we are trying to fix
|
|
||||||
%% it...
|
|
||||||
_ = with_log_at_error(
|
|
||||||
fun() -> wolff:stop_and_delete_supervised_client(ClientId) end,
|
|
||||||
#{
|
|
||||||
msg => "failed_to_delete_kafka_client",
|
|
||||||
client_id => ClientId
|
|
||||||
}
|
|
||||||
),
|
|
||||||
|
|
||||||
throw(
|
throw(
|
||||||
"Failed to start Kafka client. Please check the logs for errors and check"
|
"Failed to start Kafka client. Please check the logs for errors and check"
|
||||||
" the connection parameters."
|
" the connection parameters."
|
||||||
|
@ -151,68 +176,94 @@ on_start(InstId, Config) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_stop(InstanceId, _State) ->
|
on_stop(InstanceId, _State) ->
|
||||||
case emqx_resource:get_allocated_resources(InstanceId) of
|
AllocatedResources = emqx_resource:get_allocated_resources(InstanceId),
|
||||||
|
ClientId = maps:get(?kafka_client_id, AllocatedResources, undefined),
|
||||||
|
case ClientId of
|
||||||
|
undefined ->
|
||||||
|
ok;
|
||||||
|
ClientId ->
|
||||||
|
deallocate_client(ClientId)
|
||||||
|
end,
|
||||||
|
maps:foreach(
|
||||||
|
fun
|
||||||
|
({?kafka_producers, _BridgeV2Id}, Producers) ->
|
||||||
|
deallocate_producers(ClientId, Producers);
|
||||||
|
({?kafka_telementry_id, _BridgeV2Id}, TelementryId) ->
|
||||||
|
deallocate_telementry_handlers(TelementryId);
|
||||||
|
(_, _) ->
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
AllocatedResources
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
deallocate_client(ClientId) ->
|
||||||
|
_ = with_log_at_error(
|
||||||
|
fun() -> wolff:stop_and_delete_supervised_client(ClientId) end,
|
||||||
#{
|
#{
|
||||||
?kafka_client_id := ClientId,
|
msg => "failed_to_delete_kafka_client",
|
||||||
?kafka_producers := Producers,
|
client_id => ClientId
|
||||||
?kafka_resource_id := ResourceId
|
}
|
||||||
} ->
|
).
|
||||||
|
|
||||||
|
deallocate_producers(ClientId, Producers) ->
|
||||||
_ = with_log_at_error(
|
_ = with_log_at_error(
|
||||||
fun() -> wolff:stop_and_delete_supervised_producers(Producers) end,
|
fun() -> wolff:stop_and_delete_supervised_producers(Producers) end,
|
||||||
#{
|
#{
|
||||||
msg => "failed_to_delete_kafka_producer",
|
msg => "failed_to_delete_kafka_producer",
|
||||||
client_id => ClientId
|
client_id => ClientId
|
||||||
}
|
}
|
||||||
),
|
).
|
||||||
|
|
||||||
|
deallocate_telementry_handlers(TelementryId) ->
|
||||||
_ = with_log_at_error(
|
_ = with_log_at_error(
|
||||||
fun() -> wolff:stop_and_delete_supervised_client(ClientId) end,
|
fun() -> uninstall_telemetry_handlers(TelementryId) end,
|
||||||
#{
|
|
||||||
msg => "failed_to_delete_kafka_client",
|
|
||||||
client_id => ClientId
|
|
||||||
}
|
|
||||||
),
|
|
||||||
_ = with_log_at_error(
|
|
||||||
fun() -> uninstall_telemetry_handlers(ResourceId) end,
|
|
||||||
#{
|
#{
|
||||||
msg => "failed_to_uninstall_telemetry_handlers",
|
msg => "failed_to_uninstall_telemetry_handlers",
|
||||||
resource_id => ResourceId
|
resource_id => TelementryId
|
||||||
}
|
}
|
||||||
),
|
).
|
||||||
ok;
|
|
||||||
#{?kafka_client_id := ClientId, ?kafka_resource_id := ResourceId} ->
|
remove_producers_for_bridge_v2(
|
||||||
_ = with_log_at_error(
|
InstId, BridgeV2Id
|
||||||
fun() -> wolff:stop_and_delete_supervised_client(ClientId) end,
|
) ->
|
||||||
#{
|
AllocatedResources = emqx_resource:get_allocated_resources(InstId),
|
||||||
msg => "failed_to_delete_kafka_client",
|
ClientId = maps:get(?kafka_client_id, AllocatedResources, no_client_id),
|
||||||
client_id => ClientId
|
maps:foreach(
|
||||||
}
|
fun
|
||||||
),
|
({?kafka_producers, BridgeV2IdCheck}, Producers) when BridgeV2IdCheck =:= BridgeV2Id ->
|
||||||
_ = with_log_at_error(
|
deallocate_producers(ClientId, Producers);
|
||||||
fun() -> uninstall_telemetry_handlers(ResourceId) end,
|
({?kafka_telementry_id, BridgeV2IdCheck}, TelementryId) when
|
||||||
#{
|
BridgeV2IdCheck =:= BridgeV2Id
|
||||||
msg => "failed_to_uninstall_telemetry_handlers",
|
->
|
||||||
resource_id => ResourceId
|
deallocate_telementry_handlers(TelementryId);
|
||||||
}
|
(_, _) ->
|
||||||
),
|
|
||||||
ok;
|
|
||||||
#{?kafka_resource_id := ResourceId} ->
|
|
||||||
_ = with_log_at_error(
|
|
||||||
fun() -> uninstall_telemetry_handlers(ResourceId) end,
|
|
||||||
#{
|
|
||||||
msg => "failed_to_uninstall_telemetry_handlers",
|
|
||||||
resource_id => ResourceId
|
|
||||||
}
|
|
||||||
),
|
|
||||||
ok;
|
|
||||||
_ ->
|
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
?tp(kafka_producer_stopped, #{instance_id => InstanceId}),
|
AllocatedResources
|
||||||
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
on_remove_channel(
|
||||||
|
InstId,
|
||||||
|
#{
|
||||||
|
client_id := _ClientId,
|
||||||
|
hosts := _Hosts,
|
||||||
|
installed_bridge_v2s := InstalledBridgeV2s
|
||||||
|
} = OldState,
|
||||||
|
BridgeV2Id
|
||||||
|
) ->
|
||||||
|
ok = remove_producers_for_bridge_v2(InstId, BridgeV2Id),
|
||||||
|
NewInstalledBridgeV2s = maps:remove(BridgeV2Id, InstalledBridgeV2s),
|
||||||
|
%% Update state
|
||||||
|
NewState = OldState#{installed_bridge_v2s => NewInstalledBridgeV2s},
|
||||||
|
{ok, NewState}.
|
||||||
|
|
||||||
on_query(
|
on_query(
|
||||||
InstId,
|
InstId,
|
||||||
{send_message, Message},
|
{MessageTag, Message},
|
||||||
|
#{installed_bridge_v2s := BridgeV2Configs} = _ConnectorState
|
||||||
|
) ->
|
||||||
#{
|
#{
|
||||||
message_template := Template,
|
message_template := Template,
|
||||||
producers := Producers,
|
producers := Producers,
|
||||||
|
@ -220,8 +271,7 @@ on_query(
|
||||||
headers_tokens := KafkaHeadersTokens,
|
headers_tokens := KafkaHeadersTokens,
|
||||||
ext_headers_tokens := KafkaExtHeadersTokens,
|
ext_headers_tokens := KafkaExtHeadersTokens,
|
||||||
headers_val_encode_mode := KafkaHeadersValEncodeMode
|
headers_val_encode_mode := KafkaHeadersValEncodeMode
|
||||||
}
|
} = maps:get(MessageTag, BridgeV2Configs),
|
||||||
) ->
|
|
||||||
KafkaHeaders = #{
|
KafkaHeaders = #{
|
||||||
headers_tokens => KafkaHeadersTokens,
|
headers_tokens => KafkaHeadersTokens,
|
||||||
ext_headers_tokens => KafkaExtHeadersTokens,
|
ext_headers_tokens => KafkaExtHeadersTokens,
|
||||||
|
@ -257,6 +307,9 @@ on_query(
|
||||||
{error, {unrecoverable_error, Error}}
|
{error, {unrecoverable_error, Error}}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
on_get_channels(ResId) ->
|
||||||
|
emqx_bridge_v2:get_channels_for_connector(ResId).
|
||||||
|
|
||||||
%% @doc The callback API for rule-engine (or bridge without rules)
|
%% @doc The callback API for rule-engine (or bridge without rules)
|
||||||
%% The input argument `Message' is an enriched format (as a map())
|
%% The input argument `Message' is an enriched format (as a map())
|
||||||
%% of the original #message{} record.
|
%% of the original #message{} record.
|
||||||
|
@ -265,16 +318,17 @@ on_query(
|
||||||
%% or the direct mapping from an MQTT message.
|
%% or the direct mapping from an MQTT message.
|
||||||
on_query_async(
|
on_query_async(
|
||||||
InstId,
|
InstId,
|
||||||
{send_message, Message},
|
{MessageTag, Message},
|
||||||
AsyncReplyFn,
|
AsyncReplyFn,
|
||||||
|
#{installed_bridge_v2s := BridgeV2Configs} = _ConnectorState
|
||||||
|
) ->
|
||||||
#{
|
#{
|
||||||
message_template := Template,
|
message_template := Template,
|
||||||
producers := Producers,
|
producers := Producers,
|
||||||
headers_tokens := KafkaHeadersTokens,
|
headers_tokens := KafkaHeadersTokens,
|
||||||
ext_headers_tokens := KafkaExtHeadersTokens,
|
ext_headers_tokens := KafkaExtHeadersTokens,
|
||||||
headers_val_encode_mode := KafkaHeadersValEncodeMode
|
headers_val_encode_mode := KafkaHeadersValEncodeMode
|
||||||
}
|
} = maps:get(MessageTag, BridgeV2Configs),
|
||||||
) ->
|
|
||||||
KafkaHeaders = #{
|
KafkaHeaders = #{
|
||||||
headers_tokens => KafkaHeadersTokens,
|
headers_tokens => KafkaHeadersTokens,
|
||||||
ext_headers_tokens => KafkaExtHeadersTokens,
|
ext_headers_tokens => KafkaExtHeadersTokens,
|
||||||
|
@ -399,32 +453,63 @@ on_kafka_ack(_Partition, buffer_overflow_discarded, _Callback) ->
|
||||||
%% Note: since wolff client has its own replayq that is not managed by
|
%% Note: since wolff client has its own replayq that is not managed by
|
||||||
%% `emqx_resource_buffer_worker', we must avoid returning `disconnected' here. Otherwise,
|
%% `emqx_resource_buffer_worker', we must avoid returning `disconnected' here. Otherwise,
|
||||||
%% `emqx_resource_manager' will kill the wolff producers and messages might be lost.
|
%% `emqx_resource_manager' will kill the wolff producers and messages might be lost.
|
||||||
on_get_status(_InstId, #{client_id := ClientId} = State) ->
|
on_get_status(
|
||||||
|
<<"connector:", _/binary>> = _InstId,
|
||||||
|
#{client_id := ClientId} = _State
|
||||||
|
) ->
|
||||||
case wolff_client_sup:find_client(ClientId) of
|
case wolff_client_sup:find_client(ClientId) of
|
||||||
{ok, Pid} ->
|
{ok, Pid} ->
|
||||||
case do_get_status(Pid, State) of
|
case wolff_client:check_connectivity(Pid) of
|
||||||
ok -> connected;
|
ok -> connected;
|
||||||
unhealthy_target -> {disconnected, State, unhealthy_target};
|
{error, Error} -> {connecting, Error}
|
||||||
error -> connecting
|
|
||||||
end;
|
end;
|
||||||
{error, _Reason} ->
|
{error, _Reason} ->
|
||||||
connecting
|
connecting
|
||||||
end.
|
end.
|
||||||
|
|
||||||
do_get_status(Client, #{kafka_topic := KafkaTopic, hosts := Hosts, kafka_config := KafkaConfig}) ->
|
on_get_channel_status(
|
||||||
case do_get_topic_status(Hosts, KafkaConfig, KafkaTopic) of
|
_ResId,
|
||||||
unhealthy_target ->
|
ChannelId,
|
||||||
unhealthy_target;
|
#{
|
||||||
_ ->
|
client_id := ClientId,
|
||||||
case do_get_healthy_leaders(Client, KafkaTopic) of
|
hosts := Hosts,
|
||||||
[] -> error;
|
installed_bridge_v2s := Channels
|
||||||
_ -> ok
|
} = _State
|
||||||
end
|
) ->
|
||||||
|
ChannelState = maps:get(ChannelId, Channels),
|
||||||
|
case wolff_client_sup:find_client(ClientId) of
|
||||||
|
{ok, Pid} ->
|
||||||
|
case wolff_client:check_connectivity(Pid) of
|
||||||
|
ok ->
|
||||||
|
try check_leaders_and_topic(Pid, Hosts, ChannelState) of
|
||||||
|
ok ->
|
||||||
|
connected
|
||||||
|
catch
|
||||||
|
_ErrorType:Reason ->
|
||||||
|
{connecting, Reason}
|
||||||
|
end;
|
||||||
|
{error, Error} ->
|
||||||
|
{connecting, Error}
|
||||||
|
end;
|
||||||
|
{error, _Reason} ->
|
||||||
|
connecting
|
||||||
end.
|
end.
|
||||||
|
|
||||||
do_get_healthy_leaders(Client, KafkaTopic) ->
|
check_leaders_and_topic(
|
||||||
|
Client,
|
||||||
|
Hosts,
|
||||||
|
#{
|
||||||
|
kafka_config := KafkaConfig,
|
||||||
|
kafka_topic := KafkaTopic
|
||||||
|
} = _ChannelState
|
||||||
|
) ->
|
||||||
|
check_if_healthy_leaders(Client, KafkaTopic),
|
||||||
|
check_topic_status(Hosts, KafkaConfig, KafkaTopic).
|
||||||
|
|
||||||
|
check_if_healthy_leaders(Client, KafkaTopic) when is_pid(Client) ->
|
||||||
|
Leaders =
|
||||||
case wolff_client:get_leader_connections(Client, KafkaTopic) of
|
case wolff_client:get_leader_connections(Client, KafkaTopic) of
|
||||||
{ok, Leaders} ->
|
{ok, LeadersToCheck} ->
|
||||||
%% Kafka is considered healthy as long as any of the partition leader is reachable.
|
%% Kafka is considered healthy as long as any of the partition leader is reachable.
|
||||||
lists:filtermap(
|
lists:filtermap(
|
||||||
fun({_Partition, Pid}) ->
|
fun({_Partition, Pid}) ->
|
||||||
|
@ -433,26 +518,51 @@ do_get_healthy_leaders(Client, KafkaTopic) ->
|
||||||
_ -> false
|
_ -> false
|
||||||
end
|
end
|
||||||
end,
|
end,
|
||||||
Leaders
|
LeadersToCheck
|
||||||
);
|
);
|
||||||
{error, _} ->
|
{error, _} ->
|
||||||
[]
|
[]
|
||||||
|
end,
|
||||||
|
case Leaders of
|
||||||
|
[] ->
|
||||||
|
throw(
|
||||||
|
iolist_to_binary(
|
||||||
|
io_lib:format("Could not find any healthy partion leader for topic ~s", [
|
||||||
|
KafkaTopic
|
||||||
|
])
|
||||||
|
)
|
||||||
|
);
|
||||||
|
_ ->
|
||||||
|
ok
|
||||||
|
end;
|
||||||
|
check_if_healthy_leaders(ClientId, KafkaTopic) ->
|
||||||
|
case wolff_client_sup:find_client(ClientId) of
|
||||||
|
{ok, Pid} ->
|
||||||
|
check_if_healthy_leaders(Pid, KafkaTopic);
|
||||||
|
{error, _Reason} ->
|
||||||
|
throw(iolist_to_binary(io_lib:format("Could not find Kafka client: ~p", [ClientId])))
|
||||||
end.
|
end.
|
||||||
|
|
||||||
do_get_topic_status(Hosts, KafkaConfig, KafkaTopic) ->
|
check_topic_status(Hosts, KafkaConfig, KafkaTopic) ->
|
||||||
CheckTopicFun =
|
CheckTopicFun =
|
||||||
fun() ->
|
fun() ->
|
||||||
wolff_client:check_if_topic_exists(Hosts, KafkaConfig, KafkaTopic)
|
wolff_client:check_if_topic_exists(Hosts, KafkaConfig, KafkaTopic)
|
||||||
end,
|
end,
|
||||||
try
|
try
|
||||||
case emqx_utils:nolink_apply(CheckTopicFun, 5_000) of
|
case emqx_utils:nolink_apply(CheckTopicFun, 5_000) of
|
||||||
ok -> ok;
|
ok ->
|
||||||
{error, unknown_topic_or_partition} -> unhealthy_target;
|
ok;
|
||||||
_ -> error
|
{error, unknown_topic_or_partition} ->
|
||||||
|
throw(
|
||||||
|
iolist_to_binary(io_lib:format("Unknown topic or partition ~s", [KafkaTopic]))
|
||||||
|
);
|
||||||
|
_ ->
|
||||||
|
ok
|
||||||
end
|
end
|
||||||
catch
|
catch
|
||||||
_:_ ->
|
error:_:_ ->
|
||||||
error
|
%% Some other error not related to unknown_topic_or_partition
|
||||||
|
ok
|
||||||
end.
|
end.
|
||||||
|
|
||||||
ssl(#{enable := true} = SSL) ->
|
ssl(#{enable := true} = SSL) ->
|
||||||
|
@ -460,7 +570,7 @@ ssl(#{enable := true} = SSL) ->
|
||||||
ssl(_) ->
|
ssl(_) ->
|
||||||
[].
|
[].
|
||||||
|
|
||||||
producers_config(BridgeType, BridgeName, ClientId, Input, IsDryRun) ->
|
producers_config(BridgeType, BridgeName, ClientId, Input, IsDryRun, BridgeV2Id) ->
|
||||||
#{
|
#{
|
||||||
max_batch_bytes := MaxBatchBytes,
|
max_batch_bytes := MaxBatchBytes,
|
||||||
compression := Compression,
|
compression := Compression,
|
||||||
|
@ -486,7 +596,6 @@ producers_config(BridgeType, BridgeName, ClientId, Input, IsDryRun) ->
|
||||||
disk -> {false, replayq_dir(ClientId)};
|
disk -> {false, replayq_dir(ClientId)};
|
||||||
hybrid -> {true, replayq_dir(ClientId)}
|
hybrid -> {true, replayq_dir(ClientId)}
|
||||||
end,
|
end,
|
||||||
ResourceID = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
|
|
||||||
#{
|
#{
|
||||||
name => make_producer_name(BridgeType, BridgeName, IsDryRun),
|
name => make_producer_name(BridgeType, BridgeName, IsDryRun),
|
||||||
partitioner => partitioner(PartitionStrategy),
|
partitioner => partitioner(PartitionStrategy),
|
||||||
|
@ -500,7 +609,7 @@ producers_config(BridgeType, BridgeName, ClientId, Input, IsDryRun) ->
|
||||||
max_batch_bytes => MaxBatchBytes,
|
max_batch_bytes => MaxBatchBytes,
|
||||||
max_send_ahead => MaxInflight - 1,
|
max_send_ahead => MaxInflight - 1,
|
||||||
compression => Compression,
|
compression => Compression,
|
||||||
telemetry_meta_data => #{bridge_id => ResourceID}
|
telemetry_meta_data => #{bridge_id => BridgeV2Id}
|
||||||
}.
|
}.
|
||||||
|
|
||||||
%% Wolff API is a batch API.
|
%% Wolff API is a batch API.
|
||||||
|
|
|
@ -58,13 +58,16 @@ groups() ->
|
||||||
All = emqx_common_test_helpers:all(?MODULE),
|
All = emqx_common_test_helpers:all(?MODULE),
|
||||||
[{on_query, All}, {on_query_async, All}].
|
[{on_query, All}, {on_query_async, All}].
|
||||||
|
|
||||||
|
test_topic_one_partition() ->
|
||||||
|
"test-topic-one-partition".
|
||||||
|
|
||||||
wait_until_kafka_is_up() ->
|
wait_until_kafka_is_up() ->
|
||||||
wait_until_kafka_is_up(0).
|
wait_until_kafka_is_up(0).
|
||||||
|
|
||||||
wait_until_kafka_is_up(300) ->
|
wait_until_kafka_is_up(300) ->
|
||||||
ct:fail("Kafka is not up even though we have waited for a while");
|
ct:fail("Kafka is not up even though we have waited for a while");
|
||||||
wait_until_kafka_is_up(Attempts) ->
|
wait_until_kafka_is_up(Attempts) ->
|
||||||
KafkaTopic = "test-topic-one-partition",
|
KafkaTopic = test_topic_one_partition(),
|
||||||
case resolve_kafka_offset(kafka_hosts(), KafkaTopic, 0) of
|
case resolve_kafka_offset(kafka_hosts(), KafkaTopic, 0) of
|
||||||
{ok, _} ->
|
{ok, _} ->
|
||||||
ok;
|
ok;
|
||||||
|
@ -297,7 +300,7 @@ kafka_bridge_rest_api_helper(Config) ->
|
||||||
end,
|
end,
|
||||||
false = MyKafkaBridgeExists(),
|
false = MyKafkaBridgeExists(),
|
||||||
%% Create new Kafka bridge
|
%% Create new Kafka bridge
|
||||||
KafkaTopic = "test-topic-one-partition",
|
KafkaTopic = test_topic_one_partition(),
|
||||||
CreateBodyTmp = #{
|
CreateBodyTmp = #{
|
||||||
<<"type">> => <<?BRIDGE_TYPE>>,
|
<<"type">> => <<?BRIDGE_TYPE>>,
|
||||||
<<"name">> => <<"my_kafka_bridge">>,
|
<<"name">> => <<"my_kafka_bridge">>,
|
||||||
|
@ -413,7 +416,7 @@ t_failed_creation_then_fix(Config) ->
|
||||||
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
||||||
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
||||||
BridgeId = emqx_bridge_resource:bridge_id(Type, Name),
|
BridgeId = emqx_bridge_resource:bridge_id(Type, Name),
|
||||||
KafkaTopic = "test-topic-one-partition",
|
KafkaTopic = test_topic_one_partition(),
|
||||||
WrongConf = config(#{
|
WrongConf = config(#{
|
||||||
"authentication" => WrongAuthSettings,
|
"authentication" => WrongAuthSettings,
|
||||||
"kafka_hosts_string" => HostsString,
|
"kafka_hosts_string" => HostsString,
|
||||||
|
@ -478,7 +481,7 @@ t_custom_timestamp(_Config) ->
|
||||||
Type = ?BRIDGE_TYPE,
|
Type = ?BRIDGE_TYPE,
|
||||||
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
||||||
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
||||||
KafkaTopic = "test-topic-one-partition",
|
KafkaTopic = test_topic_one_partition(),
|
||||||
MQTTTopic = <<"t/local/kafka">>,
|
MQTTTopic = <<"t/local/kafka">>,
|
||||||
emqx:subscribe(MQTTTopic),
|
emqx:subscribe(MQTTTopic),
|
||||||
Conf0 = config(#{
|
Conf0 = config(#{
|
||||||
|
@ -555,7 +558,7 @@ t_send_message_with_headers(Config) ->
|
||||||
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
||||||
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
||||||
BridgeId = emqx_bridge_resource:bridge_id(Type, Name),
|
BridgeId = emqx_bridge_resource:bridge_id(Type, Name),
|
||||||
KafkaTopic = "test-topic-one-partition",
|
KafkaTopic = test_topic_one_partition(),
|
||||||
Conf = config_with_headers(#{
|
Conf = config_with_headers(#{
|
||||||
"authentication" => AuthSettings,
|
"authentication" => AuthSettings,
|
||||||
"kafka_hosts_string" => HostsString,
|
"kafka_hosts_string" => HostsString,
|
||||||
|
@ -715,7 +718,7 @@ t_wrong_headers(_Config) ->
|
||||||
Type = ?BRIDGE_TYPE,
|
Type = ?BRIDGE_TYPE,
|
||||||
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
||||||
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
||||||
KafkaTopic = "test-topic-one-partition",
|
KafkaTopic = test_topic_one_partition(),
|
||||||
?assertThrow(
|
?assertThrow(
|
||||||
{
|
{
|
||||||
emqx_bridge_schema,
|
emqx_bridge_schema,
|
||||||
|
@ -789,7 +792,7 @@ t_wrong_headers_from_message(Config) ->
|
||||||
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
||||||
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
||||||
BridgeId = emqx_bridge_resource:bridge_id(Type, Name),
|
BridgeId = emqx_bridge_resource:bridge_id(Type, Name),
|
||||||
KafkaTopic = "test-topic-one-partition",
|
KafkaTopic = test_topic_one_partition(),
|
||||||
Conf = config_with_headers(#{
|
Conf = config_with_headers(#{
|
||||||
"authentication" => AuthSettings,
|
"authentication" => AuthSettings,
|
||||||
"kafka_hosts_string" => HostsString,
|
"kafka_hosts_string" => HostsString,
|
||||||
|
@ -939,7 +942,7 @@ publish_helper(
|
||||||
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash),
|
||||||
Type = ?BRIDGE_TYPE,
|
Type = ?BRIDGE_TYPE,
|
||||||
InstId = emqx_bridge_resource:resource_id(Type, Name),
|
InstId = emqx_bridge_resource:resource_id(Type, Name),
|
||||||
KafkaTopic = "test-topic-one-partition",
|
KafkaTopic = test_topic_one_partition(),
|
||||||
Conf = config(
|
Conf = config(
|
||||||
#{
|
#{
|
||||||
"authentication" => AuthSettings,
|
"authentication" => AuthSettings,
|
||||||
|
|
|
@ -0,0 +1,212 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_bridge_v2_kafka_producer_SUITE).
|
||||||
|
|
||||||
|
-compile(nowarn_export_all).
|
||||||
|
-compile(export_all).
|
||||||
|
|
||||||
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
-include_lib("brod/include/brod.hrl").
|
||||||
|
all() ->
|
||||||
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
|
init_per_suite(Config) ->
|
||||||
|
_ = application:load(emqx_conf),
|
||||||
|
ok = emqx_common_test_helpers:start_apps(apps_to_start_and_stop()),
|
||||||
|
application:ensure_all_started(telemetry),
|
||||||
|
application:ensure_all_started(wolff),
|
||||||
|
application:ensure_all_started(brod),
|
||||||
|
emqx_bridge_kafka_impl_producer_SUITE:wait_until_kafka_is_up(),
|
||||||
|
Config.
|
||||||
|
|
||||||
|
end_per_suite(_Config) ->
|
||||||
|
emqx_common_test_helpers:stop_apps(apps_to_start_and_stop()).
|
||||||
|
|
||||||
|
apps_to_start_and_stop() ->
|
||||||
|
[
|
||||||
|
emqx,
|
||||||
|
emqx_conf,
|
||||||
|
emqx_connector,
|
||||||
|
emqx_bridge,
|
||||||
|
emqx_rule_engine
|
||||||
|
].
|
||||||
|
|
||||||
|
t_create_remove_list(_) ->
|
||||||
|
[] = emqx_bridge_v2:list(),
|
||||||
|
ConnectorConfig = connector_config(),
|
||||||
|
{ok, _} = emqx_connector:create(kafka, test_connector, ConnectorConfig),
|
||||||
|
Config = bridge_v2_config(<<"test_connector">>),
|
||||||
|
{ok, _Config} = emqx_bridge_v2:create(kafka, test_bridge_v2, Config),
|
||||||
|
[BridgeV2Info] = emqx_bridge_v2:list(),
|
||||||
|
#{
|
||||||
|
name := <<"test_bridge_v2">>,
|
||||||
|
type := <<"kafka">>,
|
||||||
|
raw_config := _RawConfig
|
||||||
|
} = BridgeV2Info,
|
||||||
|
{ok, _Config2} = emqx_bridge_v2:create(kafka, test_bridge_v2_2, Config),
|
||||||
|
2 = length(emqx_bridge_v2:list()),
|
||||||
|
{ok, _} = emqx_bridge_v2:remove(kafka, test_bridge_v2),
|
||||||
|
1 = length(emqx_bridge_v2:list()),
|
||||||
|
{ok, _} = emqx_bridge_v2:remove(kafka, test_bridge_v2_2),
|
||||||
|
[] = emqx_bridge_v2:list(),
|
||||||
|
emqx_connector:remove(kafka, test_connector),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%% Test sending a message to a bridge V2
|
||||||
|
t_send_message(_) ->
|
||||||
|
BridgeV2Config = bridge_v2_config(<<"test_connector2">>),
|
||||||
|
ConnectorConfig = connector_config(),
|
||||||
|
{ok, _} = emqx_connector:create(kafka, test_connector2, ConnectorConfig),
|
||||||
|
{ok, _} = emqx_bridge_v2:create(kafka, test_bridge_v2_1, BridgeV2Config),
|
||||||
|
%% Use the bridge to send a message
|
||||||
|
check_send_message_with_bridge(test_bridge_v2_1),
|
||||||
|
%% Create a few more bridges with the same connector and test them
|
||||||
|
BridgeNames1 = [
|
||||||
|
list_to_atom("test_bridge_v2_" ++ integer_to_list(I))
|
||||||
|
|| I <- lists:seq(2, 10)
|
||||||
|
],
|
||||||
|
lists:foreach(
|
||||||
|
fun(BridgeName) ->
|
||||||
|
{ok, _} = emqx_bridge_v2:create(kafka, BridgeName, BridgeV2Config),
|
||||||
|
check_send_message_with_bridge(BridgeName)
|
||||||
|
end,
|
||||||
|
BridgeNames1
|
||||||
|
),
|
||||||
|
BridgeNames = [test_bridge_v2_1 | BridgeNames1],
|
||||||
|
%% Send more messages to the bridges
|
||||||
|
lists:foreach(
|
||||||
|
fun(BridgeName) ->
|
||||||
|
lists:foreach(
|
||||||
|
fun(_) ->
|
||||||
|
check_send_message_with_bridge(BridgeName)
|
||||||
|
end,
|
||||||
|
lists:seq(1, 10)
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
BridgeNames
|
||||||
|
),
|
||||||
|
%% Remove all the bridges
|
||||||
|
lists:foreach(
|
||||||
|
fun(BridgeName) ->
|
||||||
|
{ok, _} = emqx_bridge_v2:remove(kafka, BridgeName)
|
||||||
|
end,
|
||||||
|
BridgeNames
|
||||||
|
),
|
||||||
|
emqx_connector:remove(kafka, test_connector2),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%% Test that we can get the status of the bridge V2
|
||||||
|
t_health_check(_) ->
|
||||||
|
BridgeV2Config = bridge_v2_config(<<"test_connector3">>),
|
||||||
|
ConnectorConfig = connector_config(),
|
||||||
|
{ok, _} = emqx_connector:create(kafka, test_connector3, ConnectorConfig),
|
||||||
|
{ok, _} = emqx_bridge_v2:create(kafka, test_bridge_v2, BridgeV2Config),
|
||||||
|
connected = emqx_bridge_v2:health_check(kafka, test_bridge_v2),
|
||||||
|
{ok, _} = emqx_bridge_v2:remove(kafka, test_bridge_v2),
|
||||||
|
%% Check behaviour when bridge does not exist
|
||||||
|
{error, bridge_not_found} = emqx_bridge_v2:health_check(kafka, test_bridge_v2),
|
||||||
|
{ok, _} = emqx_connector:remove(kafka, test_connector3),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
check_send_message_with_bridge(BridgeName) ->
|
||||||
|
%% ######################################
|
||||||
|
%% Create Kafka message
|
||||||
|
%% ######################################
|
||||||
|
KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(),
|
||||||
|
Partition = 0,
|
||||||
|
Time = erlang:unique_integer(),
|
||||||
|
BinTime = integer_to_binary(Time),
|
||||||
|
Msg = #{
|
||||||
|
clientid => BinTime,
|
||||||
|
payload => <<"payload">>,
|
||||||
|
timestamp => Time
|
||||||
|
},
|
||||||
|
Hosts = emqx_bridge_kafka_impl_producer_SUITE:kafka_hosts(),
|
||||||
|
{ok, Offset0} = emqx_bridge_kafka_impl_producer_SUITE:resolve_kafka_offset(
|
||||||
|
Hosts, KafkaTopic, Partition
|
||||||
|
),
|
||||||
|
%% ######################################
|
||||||
|
%% Send message
|
||||||
|
%% ######################################
|
||||||
|
emqx_bridge_v2:send_message(kafka, BridgeName, Msg, #{}),
|
||||||
|
%% ######################################
|
||||||
|
%% Check if message is sent to Kafka
|
||||||
|
%% ######################################
|
||||||
|
{ok, {_, [KafkaMsg0]}} = brod:fetch(Hosts, KafkaTopic, Partition, Offset0),
|
||||||
|
?assertMatch(#kafka_message{key = BinTime}, KafkaMsg0).
|
||||||
|
|
||||||
|
bridge_v2_config(ConnectorName) ->
|
||||||
|
#{
|
||||||
|
<<"connector">> => ConnectorName,
|
||||||
|
<<"enable">> => true,
|
||||||
|
<<"kafka">> => #{
|
||||||
|
<<"buffer">> => #{
|
||||||
|
<<"memory_overload_protection">> => false,
|
||||||
|
<<"mode">> => <<"memory">>,
|
||||||
|
<<"per_partition_limit">> => <<"2GB">>,
|
||||||
|
<<"segment_bytes">> => <<"100MB">>
|
||||||
|
},
|
||||||
|
<<"compression">> => <<"no_compression">>,
|
||||||
|
<<"kafka_header_value_encode_mode">> => <<"none">>,
|
||||||
|
<<"max_batch_bytes">> => <<"896KB">>,
|
||||||
|
<<"max_inflight">> => 10,
|
||||||
|
<<"message">> => #{
|
||||||
|
<<"key">> => <<"${.clientid}">>,
|
||||||
|
<<"timestamp">> => <<"${.timestamp}">>,
|
||||||
|
<<"value">> => <<"${.}">>
|
||||||
|
},
|
||||||
|
<<"partition_count_refresh_interval">> => <<"60s">>,
|
||||||
|
<<"partition_strategy">> => <<"random">>,
|
||||||
|
<<"query_mode">> => <<"sync">>,
|
||||||
|
<<"required_acks">> => <<"all_isr">>,
|
||||||
|
<<"sync_query_timeout">> => <<"5s">>,
|
||||||
|
<<"topic">> => emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition()
|
||||||
|
},
|
||||||
|
<<"local_topic">> => <<"kafka_t/#">>,
|
||||||
|
<<"resource_opts">> => #{
|
||||||
|
<<"health_check_interval">> => <<"15s">>
|
||||||
|
}
|
||||||
|
}.
|
||||||
|
|
||||||
|
connector_config() ->
|
||||||
|
#{
|
||||||
|
<<"authentication">> => <<"none">>,
|
||||||
|
<<"bootstrap_hosts">> => <<"127.0.0.1:9092">>,
|
||||||
|
<<"connect_timeout">> => <<"5s">>,
|
||||||
|
<<"enable">> => true,
|
||||||
|
<<"metadata_request_timeout">> => <<"5s">>,
|
||||||
|
<<"min_metadata_refresh_interval">> => <<"3s">>,
|
||||||
|
<<"socket_opts">> =>
|
||||||
|
#{
|
||||||
|
<<"recbuf">> => <<"1024KB">>,
|
||||||
|
<<"sndbuf">> => <<"1024KB">>,
|
||||||
|
<<"tcp_keepalive">> => <<"none">>
|
||||||
|
},
|
||||||
|
<<"ssl">> =>
|
||||||
|
#{
|
||||||
|
<<"ciphers">> => [],
|
||||||
|
<<"depth">> => 10,
|
||||||
|
<<"enable">> => false,
|
||||||
|
<<"hibernate_after">> => <<"5s">>,
|
||||||
|
<<"log_level">> => <<"notice">>,
|
||||||
|
<<"reuse_sessions">> => true,
|
||||||
|
<<"secure_renegotiate">> => true,
|
||||||
|
<<"verify">> => <<"verify_peer">>,
|
||||||
|
<<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>]
|
||||||
|
}
|
||||||
|
}.
|
|
@ -53,6 +53,8 @@
|
||||||
%% by nodetool to generate app.<time>.config before EMQX is started
|
%% by nodetool to generate app.<time>.config before EMQX is started
|
||||||
-define(MERGED_CONFIGS, [
|
-define(MERGED_CONFIGS, [
|
||||||
emqx_bridge_schema,
|
emqx_bridge_schema,
|
||||||
|
emqx_connector_schema,
|
||||||
|
emqx_action_schema,
|
||||||
emqx_retainer_schema,
|
emqx_retainer_schema,
|
||||||
emqx_authn_schema,
|
emqx_authn_schema,
|
||||||
emqx_authz_schema,
|
emqx_authz_schema,
|
||||||
|
|
|
@ -0,0 +1,580 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_connector).
|
||||||
|
|
||||||
|
-behaviour(emqx_config_handler).
|
||||||
|
-behaviour(emqx_config_backup).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/emqx.hrl").
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
-include_lib("emqx/include/emqx_hooks.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
|
-export([
|
||||||
|
pre_config_update/3,
|
||||||
|
post_config_update/5
|
||||||
|
]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
% load_hook/0,
|
||||||
|
% unload_hook/0
|
||||||
|
]).
|
||||||
|
|
||||||
|
% -export([on_message_publish/1]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
load/0,
|
||||||
|
unload/0,
|
||||||
|
lookup/1,
|
||||||
|
lookup/2,
|
||||||
|
get_metrics/2,
|
||||||
|
create/3,
|
||||||
|
disable_enable/3,
|
||||||
|
remove/2,
|
||||||
|
check_deps_and_remove/3,
|
||||||
|
list/0
|
||||||
|
% ,
|
||||||
|
% reload_hook/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-export([config_key_path/0]).
|
||||||
|
|
||||||
|
%% exported for `emqx_telemetry'
|
||||||
|
-export([get_basic_usage_info/0]).
|
||||||
|
|
||||||
|
%% Data backup
|
||||||
|
-export([
|
||||||
|
import_config/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-define(ROOT_KEY, connectors).
|
||||||
|
|
||||||
|
load() ->
|
||||||
|
Connectors = emqx:get_config([?ROOT_KEY], #{}),
|
||||||
|
lists:foreach(
|
||||||
|
fun({Type, NamedConf}) ->
|
||||||
|
lists:foreach(
|
||||||
|
fun({Name, Conf}) ->
|
||||||
|
safe_load_connector(Type, Name, Conf)
|
||||||
|
end,
|
||||||
|
maps:to_list(NamedConf)
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
maps:to_list(Connectors)
|
||||||
|
).
|
||||||
|
|
||||||
|
unload() ->
|
||||||
|
%% unload_hook(),
|
||||||
|
Connectors = emqx:get_config([?ROOT_KEY], #{}),
|
||||||
|
lists:foreach(
|
||||||
|
fun({Type, NamedConf}) ->
|
||||||
|
lists:foreach(
|
||||||
|
fun({Name, _Conf}) ->
|
||||||
|
_ = emqx_connector_resource:stop(Type, Name)
|
||||||
|
end,
|
||||||
|
maps:to_list(NamedConf)
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
maps:to_list(Connectors)
|
||||||
|
).
|
||||||
|
|
||||||
|
safe_load_connector(Type, Name, Conf) ->
|
||||||
|
try
|
||||||
|
_Res = emqx_connector_resource:create(Type, Name, Conf),
|
||||||
|
?tp(
|
||||||
|
emqx_connector_loaded,
|
||||||
|
#{
|
||||||
|
type => Type,
|
||||||
|
name => Name,
|
||||||
|
res => _Res
|
||||||
|
}
|
||||||
|
)
|
||||||
|
catch
|
||||||
|
Err:Reason:ST ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "load_connector_failed",
|
||||||
|
type => Type,
|
||||||
|
name => Name,
|
||||||
|
error => Err,
|
||||||
|
reason => Reason,
|
||||||
|
stacktrace => ST
|
||||||
|
})
|
||||||
|
end.
|
||||||
|
|
||||||
|
% reload_hook(Connectors) ->
|
||||||
|
% ok = unload_hook(),
|
||||||
|
% ok = load_hook(Connectors).
|
||||||
|
|
||||||
|
% load_hook() ->
|
||||||
|
% Connectors = emqx:get_config([?ROOT_KEY], #{}),
|
||||||
|
% load_hook(Connectors).
|
||||||
|
|
||||||
|
% load_hook(Connectors) ->
|
||||||
|
% lists:foreach(
|
||||||
|
% fun({Type, Connector}) ->
|
||||||
|
% lists:foreach(
|
||||||
|
% fun({_Name, ConnectorConf}) ->
|
||||||
|
% do_load_hook(Type, ConnectorConf)
|
||||||
|
% end,
|
||||||
|
% maps:to_list(Connector)
|
||||||
|
% )
|
||||||
|
% end,
|
||||||
|
% maps:to_list(Connectors)
|
||||||
|
% ).
|
||||||
|
|
||||||
|
% do_load_hook(Type, #{local_topic := LocalTopic}) when
|
||||||
|
% ?EGRESS_DIR_BRIDGES(Type) andalso is_binary(LocalTopic)
|
||||||
|
% ->
|
||||||
|
% emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE);
|
||||||
|
% do_load_hook(mqtt, #{egress := #{local := #{topic := _}}}) ->
|
||||||
|
% emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE);
|
||||||
|
% do_load_hook(_Type, _Conf) ->
|
||||||
|
% ok.
|
||||||
|
|
||||||
|
% unload_hook() ->
|
||||||
|
% ok = emqx_hooks:del('message.publish', {?MODULE, on_message_publish}).
|
||||||
|
|
||||||
|
% on_message_publish(Message = #message{topic = Topic, flags = Flags}) ->
|
||||||
|
% case maps:get(sys, Flags, false) of
|
||||||
|
% false ->
|
||||||
|
% {Msg, _} = emqx_rule_events:eventmsg_publish(Message),
|
||||||
|
% send_to_matched_egress_connectors(Topic, Msg);
|
||||||
|
% true ->
|
||||||
|
% ok
|
||||||
|
% end,
|
||||||
|
% {ok, Message}.
|
||||||
|
|
||||||
|
% send_to_matched_egress_connectors(Topic, Msg) ->
|
||||||
|
% MatchedConnectorIds = get_matched_egress_connectors(Topic),
|
||||||
|
% lists:foreach(
|
||||||
|
% fun(Id) ->
|
||||||
|
% try send_message(Id, Msg) of
|
||||||
|
% {error, Reason} ->
|
||||||
|
% ?SLOG(error, #{
|
||||||
|
% msg => "send_message_to_connector_failed",
|
||||||
|
% connector => Id,
|
||||||
|
% error => Reason
|
||||||
|
% });
|
||||||
|
% _ ->
|
||||||
|
% ok
|
||||||
|
% catch
|
||||||
|
% Err:Reason:ST ->
|
||||||
|
% ?SLOG(error, #{
|
||||||
|
% msg => "send_message_to_connector_exception",
|
||||||
|
% connector => Id,
|
||||||
|
% error => Err,
|
||||||
|
% reason => Reason,
|
||||||
|
% stacktrace => ST
|
||||||
|
% })
|
||||||
|
% end
|
||||||
|
% end,
|
||||||
|
% MatchedConnectorIds
|
||||||
|
% ).
|
||||||
|
|
||||||
|
% send_message(ConnectorId, Message) ->
|
||||||
|
% {ConnectorType, ConnectorName} = emqx_connector_resource:parse_connector_id(ConnectorId),
|
||||||
|
% ResId = emqx_connector_resource:resource_id(ConnectorType, ConnectorName),
|
||||||
|
% send_message(ConnectorType, ConnectorName, ResId, Message, #{}).
|
||||||
|
|
||||||
|
% send_message(ConnectorType, ConnectorName, ResId, Message, QueryOpts0) ->
|
||||||
|
% case emqx:get_config([?ROOT_KEY, ConnectorType, ConnectorName], not_found) of
|
||||||
|
% not_found ->
|
||||||
|
% {error, connector_not_found};
|
||||||
|
% #{enable := true} = Config ->
|
||||||
|
% QueryOpts = maps:merge(query_opts(Config), QueryOpts0),
|
||||||
|
% emqx_resource:query(ResId, {send_message, Message}, QueryOpts);
|
||||||
|
% #{enable := false} ->
|
||||||
|
% {error, connector_stopped}
|
||||||
|
% end.
|
||||||
|
|
||||||
|
% query_opts(Config) ->
|
||||||
|
% case emqx_utils_maps:deep_get([resource_opts, request_ttl], Config, false) of
|
||||||
|
% Timeout when is_integer(Timeout) orelse Timeout =:= infinity ->
|
||||||
|
% %% request_ttl is configured
|
||||||
|
% #{timeout => Timeout};
|
||||||
|
% _ ->
|
||||||
|
% %% emqx_resource has a default value (15s)
|
||||||
|
% #{}
|
||||||
|
% end.
|
||||||
|
|
||||||
|
config_key_path() ->
|
||||||
|
[?ROOT_KEY].
|
||||||
|
|
||||||
|
pre_config_update([?ROOT_KEY], RawConf, RawConf) ->
|
||||||
|
{ok, RawConf};
|
||||||
|
pre_config_update([?ROOT_KEY], NewConf, _RawConf) ->
|
||||||
|
{ok, convert_certs(NewConf)};
|
||||||
|
pre_config_update(_, {_Oper, _, _}, undefined) ->
|
||||||
|
{error, connector_not_found};
|
||||||
|
pre_config_update(_, {Oper, _Type, _Name}, OldConfig) ->
|
||||||
|
%% to save the 'enable' to the config files
|
||||||
|
{ok, OldConfig#{<<"enable">> => operation_to_enable(Oper)}};
|
||||||
|
pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) ->
|
||||||
|
case emqx_connector_ssl:convert_certs(filename:join(Path), Conf) of
|
||||||
|
{error, Reason} ->
|
||||||
|
{error, Reason};
|
||||||
|
{ok, ConfNew} ->
|
||||||
|
{ok, ConfNew}
|
||||||
|
end.
|
||||||
|
|
||||||
|
operation_to_enable(disable) -> false;
|
||||||
|
operation_to_enable(enable) -> true.
|
||||||
|
|
||||||
|
post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) ->
|
||||||
|
#{added := Added, removed := Removed, changed := Updated} =
|
||||||
|
diff_confs(NewConf, OldConf),
|
||||||
|
%% The config update will be failed if any task in `perform_connector_changes` failed.
|
||||||
|
Result = perform_connector_changes([
|
||||||
|
#{action => fun emqx_connector_resource:remove/4, data => Removed},
|
||||||
|
#{
|
||||||
|
action => fun emqx_connector_resource:create/4,
|
||||||
|
data => Added,
|
||||||
|
on_exception_fn => fun emqx_connector_resource:remove/4
|
||||||
|
},
|
||||||
|
#{action => fun emqx_connector_resource:update/4, data => Updated}
|
||||||
|
]),
|
||||||
|
% ok = unload_hook(),
|
||||||
|
% ok = load_hook(NewConf),
|
||||||
|
?tp(connector_post_config_update_done, #{}),
|
||||||
|
Result;
|
||||||
|
post_config_update([?ROOT_KEY, BridgeType, BridgeName], '$remove', _, _OldConf, _AppEnvs) ->
|
||||||
|
ok = emqx_connector_resource:remove(BridgeType, BridgeName),
|
||||||
|
Bridges = emqx_utils_maps:deep_remove([BridgeType, BridgeName], emqx:get_config([connectors])),
|
||||||
|
emqx_connector:reload_hook(Bridges),
|
||||||
|
?tp(connector_post_config_update_done, #{}),
|
||||||
|
ok;
|
||||||
|
post_config_update([?ROOT_KEY, BridgeType, BridgeName], _Req, NewConf, undefined, _AppEnvs) ->
|
||||||
|
ok = emqx_connector_resource:create(BridgeType, BridgeName, NewConf),
|
||||||
|
?tp(connector_post_config_update_done, #{}),
|
||||||
|
ok;
|
||||||
|
post_config_update([connectors, BridgeType, BridgeName], _Req, NewConf, OldConf, _AppEnvs) ->
|
||||||
|
ResOpts = emqx_resource:fetch_creation_opts(NewConf),
|
||||||
|
ok = emqx_connector_resource:update(BridgeType, BridgeName, {OldConf, NewConf}, ResOpts),
|
||||||
|
Bridges = emqx_utils_maps:deep_put(
|
||||||
|
[BridgeType, BridgeName], emqx:get_config([connectors]), NewConf
|
||||||
|
),
|
||||||
|
emqx_connector:reload_hook(Bridges),
|
||||||
|
?tp(connector_post_config_update_done, #{}),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
list() ->
|
||||||
|
maps:fold(
|
||||||
|
fun(Type, NameAndConf, Connectors) ->
|
||||||
|
maps:fold(
|
||||||
|
fun(Name, RawConf, Acc) ->
|
||||||
|
case lookup(Type, Name, RawConf) of
|
||||||
|
{error, not_found} -> Acc;
|
||||||
|
{ok, Res} -> [Res | Acc]
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
Connectors,
|
||||||
|
NameAndConf
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
[],
|
||||||
|
emqx:get_raw_config([connectors], #{})
|
||||||
|
).
|
||||||
|
|
||||||
|
lookup(Id) ->
|
||||||
|
{Type, Name} = emqx_connector_resource:parse_connector_id(Id),
|
||||||
|
lookup(Type, Name).
|
||||||
|
|
||||||
|
lookup(Type, Name) ->
|
||||||
|
RawConf = emqx:get_raw_config([connectors, Type, Name], #{}),
|
||||||
|
lookup(Type, Name, RawConf).
|
||||||
|
|
||||||
|
lookup(Type, Name, RawConf) ->
|
||||||
|
case emqx_resource:get_instance(emqx_connector_resource:resource_id(Type, Name)) of
|
||||||
|
{error, not_found} ->
|
||||||
|
{error, not_found};
|
||||||
|
{ok, _, Data} ->
|
||||||
|
{ok, #{
|
||||||
|
type => Type,
|
||||||
|
name => Name,
|
||||||
|
resource_data => Data,
|
||||||
|
raw_config => RawConf
|
||||||
|
}}
|
||||||
|
end.
|
||||||
|
|
||||||
|
get_metrics(Type, Name) ->
|
||||||
|
emqx_resource:get_metrics(emqx_connector_resource:resource_id(Type, Name)).
|
||||||
|
|
||||||
|
% maybe_upgrade(mqtt, Config) ->
|
||||||
|
% emqx_connector_compatible_config:maybe_upgrade(Config);
|
||||||
|
% maybe_upgrade(webhook, Config) ->
|
||||||
|
% emqx_connector_compatible_config:webhook_maybe_upgrade(Config);
|
||||||
|
% maybe_upgrade(_Other, Config) ->
|
||||||
|
% Config.
|
||||||
|
|
||||||
|
disable_enable(Action, ConnectorType, ConnectorName) when
|
||||||
|
Action =:= disable; Action =:= enable
|
||||||
|
->
|
||||||
|
emqx_conf:update(
|
||||||
|
config_key_path() ++ [ConnectorType, ConnectorName],
|
||||||
|
{Action, ConnectorType, ConnectorName},
|
||||||
|
#{override_to => cluster}
|
||||||
|
).
|
||||||
|
|
||||||
|
create(ConnectorType, ConnectorName, RawConf) ->
|
||||||
|
?SLOG(debug, #{
|
||||||
|
connector_action => create,
|
||||||
|
connector_type => ConnectorType,
|
||||||
|
connector_name => ConnectorName,
|
||||||
|
connector_raw_config => emqx_utils:redact(RawConf)
|
||||||
|
}),
|
||||||
|
emqx_conf:update(
|
||||||
|
emqx_connector:config_key_path() ++ [ConnectorType, ConnectorName],
|
||||||
|
RawConf,
|
||||||
|
#{override_to => cluster}
|
||||||
|
).
|
||||||
|
|
||||||
|
remove(ConnectorType, ConnectorName) ->
|
||||||
|
?SLOG(debug, #{
|
||||||
|
brige_action => remove,
|
||||||
|
connector_type => ConnectorType,
|
||||||
|
connector_name => ConnectorName
|
||||||
|
}),
|
||||||
|
emqx_conf:remove(
|
||||||
|
emqx_connector:config_key_path() ++ [ConnectorType, ConnectorName],
|
||||||
|
#{override_to => cluster}
|
||||||
|
).
|
||||||
|
|
||||||
|
check_deps_and_remove(ConnectorType, ConnectorName, RemoveDeps) ->
|
||||||
|
ConnectorId = emqx_connector_resource:connector_id(ConnectorType, ConnectorName),
|
||||||
|
%% NOTE: This violates the design: Rule depends on data-connector but not vice versa.
|
||||||
|
case emqx_rule_engine:get_rule_ids_by_action(ConnectorId) of
|
||||||
|
[] ->
|
||||||
|
remove(ConnectorType, ConnectorName);
|
||||||
|
RuleIds when RemoveDeps =:= false ->
|
||||||
|
{error, {rules_deps_on_this_connector, RuleIds}};
|
||||||
|
RuleIds when RemoveDeps =:= true ->
|
||||||
|
lists:foreach(
|
||||||
|
fun(R) ->
|
||||||
|
emqx_rule_engine:ensure_action_removed(R, ConnectorId)
|
||||||
|
end,
|
||||||
|
RuleIds
|
||||||
|
),
|
||||||
|
remove(ConnectorType, ConnectorName)
|
||||||
|
end.
|
||||||
|
|
||||||
|
%%----------------------------------------------------------------------------------------
|
||||||
|
%% Data backup
|
||||||
|
%%----------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
import_config(RawConf) ->
|
||||||
|
RootKeyPath = config_key_path(),
|
||||||
|
ConnectorsConf = maps:get(<<"connectors">>, RawConf, #{}),
|
||||||
|
OldConnectorsConf = emqx:get_raw_config(RootKeyPath, #{}),
|
||||||
|
MergedConf = merge_confs(OldConnectorsConf, ConnectorsConf),
|
||||||
|
case emqx_conf:update(RootKeyPath, MergedConf, #{override_to => cluster}) of
|
||||||
|
{ok, #{raw_config := NewRawConf}} ->
|
||||||
|
{ok, #{root_key => ?ROOT_KEY, changed => changed_paths(OldConnectorsConf, NewRawConf)}};
|
||||||
|
Error ->
|
||||||
|
{error, #{root_key => ?ROOT_KEY, reason => Error}}
|
||||||
|
end.
|
||||||
|
|
||||||
|
merge_confs(OldConf, NewConf) ->
|
||||||
|
AllTypes = maps:keys(maps:merge(OldConf, NewConf)),
|
||||||
|
lists:foldr(
|
||||||
|
fun(Type, Acc) ->
|
||||||
|
NewConnectors = maps:get(Type, NewConf, #{}),
|
||||||
|
OldConnectors = maps:get(Type, OldConf, #{}),
|
||||||
|
Acc#{Type => maps:merge(OldConnectors, NewConnectors)}
|
||||||
|
end,
|
||||||
|
#{},
|
||||||
|
AllTypes
|
||||||
|
).
|
||||||
|
|
||||||
|
changed_paths(OldRawConf, NewRawConf) ->
|
||||||
|
maps:fold(
|
||||||
|
fun(Type, Connectors, ChangedAcc) ->
|
||||||
|
OldConnectors = maps:get(Type, OldRawConf, #{}),
|
||||||
|
Changed = maps:get(changed, emqx_utils_maps:diff_maps(Connectors, OldConnectors)),
|
||||||
|
[[?ROOT_KEY, Type, K] || K <- maps:keys(Changed)] ++ ChangedAcc
|
||||||
|
end,
|
||||||
|
[],
|
||||||
|
NewRawConf
|
||||||
|
).
|
||||||
|
|
||||||
|
%%========================================================================================
|
||||||
|
%% Helper functions
|
||||||
|
%%========================================================================================
|
||||||
|
|
||||||
|
convert_certs(ConnectorsConf) ->
|
||||||
|
maps:map(
|
||||||
|
fun(Type, Connectors) ->
|
||||||
|
maps:map(
|
||||||
|
fun(Name, ConnectorConf) ->
|
||||||
|
Path = filename:join([?ROOT_KEY, Type, Name]),
|
||||||
|
case emqx_connector_ssl:convert_certs(Path, ConnectorConf) of
|
||||||
|
{error, Reason} ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "bad_ssl_config",
|
||||||
|
type => Type,
|
||||||
|
name => Name,
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
throw({bad_ssl_config, Reason});
|
||||||
|
{ok, ConnectorConf1} ->
|
||||||
|
ConnectorConf1
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
Connectors
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
ConnectorsConf
|
||||||
|
).
|
||||||
|
|
||||||
|
perform_connector_changes(Tasks) ->
|
||||||
|
perform_connector_changes(Tasks, ok).
|
||||||
|
|
||||||
|
perform_connector_changes([], Result) ->
|
||||||
|
Result;
|
||||||
|
perform_connector_changes([#{action := Action, data := MapConfs} = Task | Tasks], Result0) ->
|
||||||
|
OnException = maps:get(on_exception_fn, Task, fun(_Type, _Name, _Conf, _Opts) -> ok end),
|
||||||
|
Result = maps:fold(
|
||||||
|
fun
|
||||||
|
({_Type, _Name}, _Conf, {error, Reason}) ->
|
||||||
|
{error, Reason};
|
||||||
|
%% for emqx_connector_resource:update/4
|
||||||
|
({Type, Name}, {OldConf, Conf}, _) ->
|
||||||
|
ResOpts = emqx_resource:fetch_creation_opts(Conf),
|
||||||
|
case Action(Type, Name, {OldConf, Conf}, ResOpts) of
|
||||||
|
{error, Reason} -> {error, Reason};
|
||||||
|
Return -> Return
|
||||||
|
end;
|
||||||
|
({Type, Name}, Conf, _) ->
|
||||||
|
ResOpts = emqx_resource:fetch_creation_opts(Conf),
|
||||||
|
try Action(Type, Name, Conf, ResOpts) of
|
||||||
|
{error, Reason} -> {error, Reason};
|
||||||
|
Return -> Return
|
||||||
|
catch
|
||||||
|
Kind:Error:Stacktrace ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "connector_config_update_exception",
|
||||||
|
kind => Kind,
|
||||||
|
error => Error,
|
||||||
|
type => Type,
|
||||||
|
name => Name,
|
||||||
|
stacktrace => Stacktrace
|
||||||
|
}),
|
||||||
|
OnException(Type, Name, Conf, ResOpts),
|
||||||
|
erlang:raise(Kind, Error, Stacktrace)
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
Result0,
|
||||||
|
MapConfs
|
||||||
|
),
|
||||||
|
perform_connector_changes(Tasks, Result).
|
||||||
|
|
||||||
|
diff_confs(NewConfs, OldConfs) ->
|
||||||
|
emqx_utils_maps:diff_maps(
|
||||||
|
flatten_confs(NewConfs),
|
||||||
|
flatten_confs(OldConfs)
|
||||||
|
).
|
||||||
|
|
||||||
|
flatten_confs(Conf0) ->
|
||||||
|
maps:from_list(
|
||||||
|
lists:flatmap(
|
||||||
|
fun({Type, Conf}) ->
|
||||||
|
do_flatten_confs(Type, Conf)
|
||||||
|
end,
|
||||||
|
maps:to_list(Conf0)
|
||||||
|
)
|
||||||
|
).
|
||||||
|
|
||||||
|
do_flatten_confs(Type, Conf0) ->
|
||||||
|
[{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)].
|
||||||
|
|
||||||
|
% get_matched_egress_connectors(Topic) ->
|
||||||
|
% Connectors = emqx:get_config([connectors], #{}),
|
||||||
|
% maps:fold(
|
||||||
|
% fun(BType, Conf, Acc0) ->
|
||||||
|
% maps:fold(
|
||||||
|
% fun
|
||||||
|
% (BName, #{egress := _} = BConf, Acc1) when BType =:= mqtt ->
|
||||||
|
% get_matched_connector_id(BType, BConf, Topic, BName, Acc1);
|
||||||
|
% (_BName, #{ingress := _}, Acc1) when BType =:= mqtt ->
|
||||||
|
% %% ignore ingress only connector
|
||||||
|
% Acc1;
|
||||||
|
% (BName, BConf, Acc1) ->
|
||||||
|
% get_matched_connector_id(BType, BConf, Topic, BName, Acc1)
|
||||||
|
% end,
|
||||||
|
% Acc0,
|
||||||
|
% Conf
|
||||||
|
% )
|
||||||
|
% end,
|
||||||
|
% [],
|
||||||
|
% Connectors
|
||||||
|
% ).
|
||||||
|
|
||||||
|
% get_matched_connector_id(_BType, #{enable := false}, _Topic, _BName, Acc) ->
|
||||||
|
% Acc;
|
||||||
|
% get_matched_connector_id(BType, Conf, Topic, BName, Acc) when ?EGRESS_DIR_BRIDGES(BType) ->
|
||||||
|
% case maps:get(local_topic, Conf, undefined) of
|
||||||
|
% undefined ->
|
||||||
|
% Acc;
|
||||||
|
% Filter ->
|
||||||
|
% do_get_matched_connector_id(Topic, Filter, BType, BName, Acc)
|
||||||
|
% end;
|
||||||
|
% get_matched_connector_id(mqtt, #{egress := #{local := #{topic := Filter}}}, Topic, BName, Acc) ->
|
||||||
|
% do_get_matched_connector_id(Topic, Filter, mqtt, BName, Acc);
|
||||||
|
% get_matched_connector_id(_BType, _Conf, _Topic, _BName, Acc) ->
|
||||||
|
% Acc.
|
||||||
|
|
||||||
|
% do_get_matched_connector_id(Topic, Filter, BType, BName, Acc) ->
|
||||||
|
% case emqx_topic:match(Topic, Filter) of
|
||||||
|
% true -> [emqx_connector_resource:connector_id(BType, BName) | Acc];
|
||||||
|
% false -> Acc
|
||||||
|
% end.
|
||||||
|
|
||||||
|
-spec get_basic_usage_info() ->
|
||||||
|
#{
|
||||||
|
num_connectors => non_neg_integer(),
|
||||||
|
count_by_type =>
|
||||||
|
#{ConnectorType => non_neg_integer()}
|
||||||
|
}
|
||||||
|
when
|
||||||
|
ConnectorType :: atom().
|
||||||
|
get_basic_usage_info() ->
|
||||||
|
InitialAcc = #{num_connectors => 0, count_by_type => #{}},
|
||||||
|
try
|
||||||
|
lists:foldl(
|
||||||
|
fun
|
||||||
|
(#{resource_data := #{config := #{enable := false}}}, Acc) ->
|
||||||
|
Acc;
|
||||||
|
(#{type := ConnectorType}, Acc) ->
|
||||||
|
NumConnectors = maps:get(num_connectors, Acc),
|
||||||
|
CountByType0 = maps:get(count_by_type, Acc),
|
||||||
|
CountByType = maps:update_with(
|
||||||
|
binary_to_atom(ConnectorType, utf8),
|
||||||
|
fun(X) -> X + 1 end,
|
||||||
|
1,
|
||||||
|
CountByType0
|
||||||
|
),
|
||||||
|
Acc#{
|
||||||
|
num_connectors => NumConnectors + 1,
|
||||||
|
count_by_type => CountByType
|
||||||
|
}
|
||||||
|
end,
|
||||||
|
InitialAcc,
|
||||||
|
list()
|
||||||
|
)
|
||||||
|
catch
|
||||||
|
%% for instance, when the connector app is not ready yet.
|
||||||
|
_:_ ->
|
||||||
|
InitialAcc
|
||||||
|
end.
|
|
@ -20,7 +20,13 @@
|
||||||
|
|
||||||
-export([start/2, stop/1]).
|
-export([start/2, stop/1]).
|
||||||
|
|
||||||
|
-define(TOP_LELVE_HDLR_PATH, (emqx_connector:config_key_path())).
|
||||||
|
-define(LEAF_NODE_HDLR_PATH, (emqx_connector:config_key_path() ++ ['?', '?'])).
|
||||||
|
|
||||||
start(_StartType, _StartArgs) ->
|
start(_StartType, _StartArgs) ->
|
||||||
|
ok = emqx_connector:load(),
|
||||||
|
ok = emqx_config_handler:add_handler(?TOP_LELVE_HDLR_PATH, emqx_connector),
|
||||||
|
ok = emqx_config_handler:add_handler(?LEAF_NODE_HDLR_PATH, emqx_connector),
|
||||||
emqx_connector_sup:start_link().
|
emqx_connector_sup:start_link().
|
||||||
|
|
||||||
stop(_State) ->
|
stop(_State) ->
|
||||||
|
|
|
@ -0,0 +1,429 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_connector_resource).
|
||||||
|
|
||||||
|
-include_lib("emqx_bridge/include/emqx_bridge_resource.hrl").
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
-include_lib("emqx_resource/include/emqx_resource.hrl").
|
||||||
|
|
||||||
|
-export([
|
||||||
|
connector_to_resource_type/1,
|
||||||
|
resource_id/1,
|
||||||
|
resource_id/2,
|
||||||
|
connector_id/2,
|
||||||
|
parse_connector_id/1,
|
||||||
|
parse_connector_id/2,
|
||||||
|
connector_hookpoint/1,
|
||||||
|
connector_hookpoint_to_connector_id/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
create/3,
|
||||||
|
create_dry_run/2,
|
||||||
|
recreate/2,
|
||||||
|
recreate/3,
|
||||||
|
remove/1,
|
||||||
|
remove/2,
|
||||||
|
remove/4,
|
||||||
|
reset_metrics/1,
|
||||||
|
restart/2,
|
||||||
|
start/2,
|
||||||
|
stop/2
|
||||||
|
% update/2,
|
||||||
|
% update/3%,
|
||||||
|
% update/4
|
||||||
|
]).
|
||||||
|
|
||||||
|
-callback connector_config(ParsedConfig, ConnectorName :: atom() | binary()) ->
|
||||||
|
ParsedConfig
|
||||||
|
when
|
||||||
|
ParsedConfig :: #{atom() => any()}.
|
||||||
|
-optional_callbacks([connector_config/2]).
|
||||||
|
|
||||||
|
%% bi-directional connector with producer/consumer or ingress/egress configs
|
||||||
|
-define(IS_BI_DIR_BRIDGE(TYPE),
|
||||||
|
(TYPE) =:= <<"mqtt">>
|
||||||
|
).
|
||||||
|
-define(IS_INGRESS_BRIDGE(TYPE),
|
||||||
|
(TYPE) =:= <<"kafka_consumer">> orelse
|
||||||
|
(TYPE) =:= <<"gcp_pubsub_consumer">> orelse
|
||||||
|
?IS_BI_DIR_BRIDGE(TYPE)
|
||||||
|
).
|
||||||
|
|
||||||
|
-if(?EMQX_RELEASE_EDITION == ee).
|
||||||
|
connector_to_resource_type(ConnectorType) -> emqx_connector_enterprise:resource_type(ConnectorType).
|
||||||
|
|
||||||
|
connector_impl_module(ConnectorType) ->
|
||||||
|
emqx_connector_enterprise:connector_impl_module(ConnectorType).
|
||||||
|
-else.
|
||||||
|
connector_to_resource_type(_) -> undefined.
|
||||||
|
|
||||||
|
connector_impl_module(_ConnectorType) -> undefined.
|
||||||
|
-endif.
|
||||||
|
|
||||||
|
resource_id(ConnectorId) when is_binary(ConnectorId) ->
|
||||||
|
<<"connector:", ConnectorId/binary>>.
|
||||||
|
|
||||||
|
resource_id(ConnectorType, ConnectorName) ->
|
||||||
|
ConnectorId = connector_id(ConnectorType, ConnectorName),
|
||||||
|
resource_id(ConnectorId).
|
||||||
|
|
||||||
|
connector_id(ConnectorType, ConnectorName) ->
|
||||||
|
Name = bin(ConnectorName),
|
||||||
|
Type = bin(ConnectorType),
|
||||||
|
<<Type/binary, ":", Name/binary>>.
|
||||||
|
|
||||||
|
parse_connector_id(ConnectorId) ->
|
||||||
|
parse_connector_id(ConnectorId, #{atom_name => true}).
|
||||||
|
|
||||||
|
-spec parse_connector_id(list() | binary() | atom(), #{atom_name => boolean()}) ->
|
||||||
|
{atom(), atom() | binary()}.
|
||||||
|
parse_connector_id(ConnectorId, Opts) ->
|
||||||
|
case string:split(bin(ConnectorId), ":", all) of
|
||||||
|
[Type, Name] ->
|
||||||
|
{to_type_atom(Type), validate_name(Name, Opts)};
|
||||||
|
[<<"connector">>, Type, Name] ->
|
||||||
|
{to_type_atom(Type), validate_name(Name, Opts)};
|
||||||
|
_ ->
|
||||||
|
invalid_data(
|
||||||
|
<<"should be of pattern {type}:{name} or connector:{type}:{name}, but got ",
|
||||||
|
ConnectorId/binary>>
|
||||||
|
)
|
||||||
|
end.
|
||||||
|
|
||||||
|
connector_hookpoint(ConnectorId) ->
|
||||||
|
<<"$connectors/", (bin(ConnectorId))/binary>>.
|
||||||
|
|
||||||
|
connector_hookpoint_to_connector_id(?BRIDGE_HOOKPOINT(ConnectorId)) ->
|
||||||
|
{ok, ConnectorId};
|
||||||
|
connector_hookpoint_to_connector_id(_) ->
|
||||||
|
{error, bad_connector_hookpoint}.
|
||||||
|
|
||||||
|
validate_name(Name0, Opts) ->
|
||||||
|
Name = unicode:characters_to_list(Name0, utf8),
|
||||||
|
case is_list(Name) andalso Name =/= [] of
|
||||||
|
true ->
|
||||||
|
case lists:all(fun is_id_char/1, Name) of
|
||||||
|
true ->
|
||||||
|
case maps:get(atom_name, Opts, true) of
|
||||||
|
% NOTE
|
||||||
|
% Rule may be created before connector, thus not `list_to_existing_atom/1`,
|
||||||
|
% also it is infrequent user input anyway.
|
||||||
|
true -> list_to_atom(Name);
|
||||||
|
false -> Name0
|
||||||
|
end;
|
||||||
|
false ->
|
||||||
|
invalid_data(<<"bad name: ", Name0/binary>>)
|
||||||
|
end;
|
||||||
|
false ->
|
||||||
|
invalid_data(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>)
|
||||||
|
end.
|
||||||
|
|
||||||
|
-spec invalid_data(binary()) -> no_return().
|
||||||
|
invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}).
|
||||||
|
|
||||||
|
is_id_char(C) when C >= $0 andalso C =< $9 -> true;
|
||||||
|
is_id_char(C) when C >= $a andalso C =< $z -> true;
|
||||||
|
is_id_char(C) when C >= $A andalso C =< $Z -> true;
|
||||||
|
is_id_char($_) -> true;
|
||||||
|
is_id_char($-) -> true;
|
||||||
|
is_id_char($.) -> true;
|
||||||
|
is_id_char(_) -> false.
|
||||||
|
|
||||||
|
to_type_atom(Type) ->
|
||||||
|
try
|
||||||
|
erlang:binary_to_existing_atom(Type, utf8)
|
||||||
|
catch
|
||||||
|
_:_ ->
|
||||||
|
invalid_data(<<"unknown connector type: ", Type/binary>>)
|
||||||
|
end.
|
||||||
|
|
||||||
|
reset_metrics(ResourceId) ->
|
||||||
|
emqx_resource:reset_metrics(ResourceId).
|
||||||
|
|
||||||
|
restart(Type, Name) ->
|
||||||
|
emqx_resource:restart(resource_id(Type, Name)).
|
||||||
|
|
||||||
|
stop(Type, Name) ->
|
||||||
|
emqx_resource:stop(resource_id(Type, Name)).
|
||||||
|
|
||||||
|
start(Type, Name) ->
|
||||||
|
emqx_resource:start(resource_id(Type, Name)).
|
||||||
|
|
||||||
|
% create(ConnectorId, Conf) ->
|
||||||
|
% {ConnectorType, ConnectorName} = parse_connector_id(ConnectorId),
|
||||||
|
% create(ConnectorType, ConnectorName, Conf).
|
||||||
|
|
||||||
|
create(Type, Name, Conf) ->
|
||||||
|
?SLOG(info, #{
|
||||||
|
msg => "create connector",
|
||||||
|
type => Type,
|
||||||
|
name => Name,
|
||||||
|
config => emqx_utils:redact(Conf)
|
||||||
|
}),
|
||||||
|
TypeBin = bin(Type),
|
||||||
|
{ok, _Data} = emqx_resource:create_local(
|
||||||
|
resource_id(Type, Name),
|
||||||
|
<<"emqx_connector">>,
|
||||||
|
connector_to_resource_type(Type),
|
||||||
|
parse_confs(TypeBin, Name, Conf),
|
||||||
|
parse_opts(Conf, #{})
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
% update(ConnectorId, {OldConf, Conf}) ->
|
||||||
|
% {ConnectorType, ConnectorName} = parse_connector_id(ConnectorId),
|
||||||
|
% update(ConnectorType, ConnectorName, {OldConf, Conf}).
|
||||||
|
|
||||||
|
% update(Type, Name, {OldConf, Conf}) ->
|
||||||
|
% update(Type, Name, {OldConf, Conf}, #{}).
|
||||||
|
|
||||||
|
%update(Type, Name, {OldConf, Conf}, Opts) ->
|
||||||
|
% %% TODO: sometimes its not necessary to restart the connector connection.
|
||||||
|
% %%
|
||||||
|
% %% - if the connection related configs like `servers` is updated, we should restart/start
|
||||||
|
% %% or stop connectors according to the change.
|
||||||
|
% %% - if the connection related configs are not update, only non-connection configs like
|
||||||
|
% %% the `method` or `headers` of a WebHook is changed, then the connector can be updated
|
||||||
|
% %% without restarting the connector.
|
||||||
|
% %%
|
||||||
|
% case emqx_utils_maps:if_only_to_toggle_enable(OldConf, Conf) of
|
||||||
|
% false ->
|
||||||
|
% ?SLOG(info, #{
|
||||||
|
% msg => "update connector",
|
||||||
|
% type => Type,
|
||||||
|
% name => Name,
|
||||||
|
% config => emqx_utils:redact(Conf)
|
||||||
|
% }),
|
||||||
|
% case recreate(Type, Name, Conf, Opts) of
|
||||||
|
% {ok, _} ->
|
||||||
|
% ok;
|
||||||
|
% {error, not_found} ->
|
||||||
|
% ?SLOG(warning, #{
|
||||||
|
% msg => "updating_a_non_existing_connector",
|
||||||
|
% type => Type,
|
||||||
|
% name => Name,
|
||||||
|
% config => emqx_utils:redact(Conf)
|
||||||
|
% }),
|
||||||
|
% create(Type, Name, Conf, Opts);
|
||||||
|
% {error, Reason} ->
|
||||||
|
% {error, {update_connector_failed, Reason}}
|
||||||
|
% end;
|
||||||
|
% true ->
|
||||||
|
% %% we don't need to recreate the connector if this config change is only to
|
||||||
|
% %% toggole the config 'connector.{type}.{name}.enable'
|
||||||
|
% _ =
|
||||||
|
% case maps:get(enable, Conf, true) of
|
||||||
|
% true ->
|
||||||
|
% restart(Type, Name);
|
||||||
|
% false ->
|
||||||
|
% stop(Type, Name)
|
||||||
|
% end,
|
||||||
|
% ok
|
||||||
|
% end.
|
||||||
|
|
||||||
|
recreate(Type, Name) ->
|
||||||
|
recreate(Type, Name, emqx:get_config([connectors, Type, Name])).
|
||||||
|
|
||||||
|
recreate(Type, Name, Conf) ->
|
||||||
|
recreate(Type, Name, Conf, #{}).
|
||||||
|
|
||||||
|
recreate(Type, Name, Conf, Opts) ->
|
||||||
|
TypeBin = bin(Type),
|
||||||
|
emqx_resource:recreate_local(
|
||||||
|
resource_id(Type, Name),
|
||||||
|
connector_to_resource_type(Type),
|
||||||
|
parse_confs(TypeBin, Name, Conf),
|
||||||
|
parse_opts(Conf, Opts)
|
||||||
|
).
|
||||||
|
|
||||||
|
create_dry_run(Type, Conf0) ->
|
||||||
|
TmpName = iolist_to_binary([?TEST_ID_PREFIX, emqx_utils:gen_id(8)]),
|
||||||
|
TmpPath = emqx_utils:safe_filename(TmpName),
|
||||||
|
%% Already typechecked, no need to catch errors
|
||||||
|
TypeBin = bin(Type),
|
||||||
|
TypeAtom = safe_atom(Type),
|
||||||
|
Conf1 = maps:without([<<"name">>], Conf0),
|
||||||
|
RawConf = #{<<"connectors">> => #{TypeBin => #{<<"temp_name">> => Conf1}}},
|
||||||
|
try
|
||||||
|
#{connectors := #{TypeAtom := #{temp_name := Conf}}} =
|
||||||
|
hocon_tconf:check_plain(
|
||||||
|
emqx_connector_schema,
|
||||||
|
RawConf,
|
||||||
|
#{atom_key => true, required => false}
|
||||||
|
),
|
||||||
|
case emqx_connector_ssl:convert_certs(TmpPath, Conf) of
|
||||||
|
{error, Reason} ->
|
||||||
|
{error, Reason};
|
||||||
|
{ok, ConfNew} ->
|
||||||
|
ParseConf = parse_confs(bin(Type), TmpName, ConfNew),
|
||||||
|
emqx_resource:create_dry_run_local(connector_to_resource_type(Type), ParseConf)
|
||||||
|
end
|
||||||
|
catch
|
||||||
|
%% validation errors
|
||||||
|
throw:Reason1 ->
|
||||||
|
{error, Reason1}
|
||||||
|
after
|
||||||
|
_ = file:del_dir_r(emqx_tls_lib:pem_dir(TmpPath))
|
||||||
|
end.
|
||||||
|
|
||||||
|
remove(ConnectorId) ->
|
||||||
|
{ConnectorType, ConnectorName} = parse_connector_id(ConnectorId),
|
||||||
|
remove(ConnectorType, ConnectorName, #{}, #{}).
|
||||||
|
|
||||||
|
remove(Type, Name) ->
|
||||||
|
remove(Type, Name, #{}, #{}).
|
||||||
|
|
||||||
|
%% just for perform_connector_changes/1
|
||||||
|
remove(Type, Name, _Conf, _Opts) ->
|
||||||
|
?SLOG(info, #{msg => "remove_connector", type => Type, name => Name}),
|
||||||
|
case emqx_resource:remove_local(resource_id(Type, Name)) of
|
||||||
|
ok -> ok;
|
||||||
|
{error, not_found} -> ok;
|
||||||
|
{error, Reason} -> {error, Reason}
|
||||||
|
end.
|
||||||
|
|
||||||
|
%% convert connector configs to what the connector modules want
|
||||||
|
parse_confs(
|
||||||
|
<<"webhook">>,
|
||||||
|
_Name,
|
||||||
|
#{
|
||||||
|
url := Url,
|
||||||
|
method := Method,
|
||||||
|
headers := Headers,
|
||||||
|
max_retries := Retry
|
||||||
|
} = Conf
|
||||||
|
) ->
|
||||||
|
Url1 = bin(Url),
|
||||||
|
{BaseUrl, Path} = parse_url(Url1),
|
||||||
|
BaseUrl1 =
|
||||||
|
case emqx_http_lib:uri_parse(BaseUrl) of
|
||||||
|
{ok, BUrl} ->
|
||||||
|
BUrl;
|
||||||
|
{error, Reason} ->
|
||||||
|
Reason1 = emqx_utils:readable_error_msg(Reason),
|
||||||
|
invalid_data(<<"Invalid URL: ", Url1/binary, ", details: ", Reason1/binary>>)
|
||||||
|
end,
|
||||||
|
RequestTTL = emqx_utils_maps:deep_get(
|
||||||
|
[resource_opts, request_ttl],
|
||||||
|
Conf
|
||||||
|
),
|
||||||
|
Conf#{
|
||||||
|
base_url => BaseUrl1,
|
||||||
|
request =>
|
||||||
|
#{
|
||||||
|
path => Path,
|
||||||
|
method => Method,
|
||||||
|
body => maps:get(body, Conf, undefined),
|
||||||
|
headers => Headers,
|
||||||
|
request_ttl => RequestTTL,
|
||||||
|
max_retries => Retry
|
||||||
|
}
|
||||||
|
};
|
||||||
|
parse_confs(<<"iotdb">>, Name, Conf) ->
|
||||||
|
%% [FIXME] this has no place here, it's used in parse_confs/3, which should
|
||||||
|
%% rather delegate to a behavior callback than implementing domain knowledge
|
||||||
|
%% here (reversed dependency)
|
||||||
|
InsertTabletPathV1 = <<"rest/v1/insertTablet">>,
|
||||||
|
InsertTabletPathV2 = <<"rest/v2/insertTablet">>,
|
||||||
|
#{
|
||||||
|
base_url := BaseURL,
|
||||||
|
authentication :=
|
||||||
|
#{
|
||||||
|
username := Username,
|
||||||
|
password := Password
|
||||||
|
}
|
||||||
|
} = Conf,
|
||||||
|
BasicToken = base64:encode(<<Username/binary, ":", Password/binary>>),
|
||||||
|
%% This version atom correspond to the macro ?VSN_1_1_X in
|
||||||
|
%% emqx_connector_iotdb.hrl. It would be better to use the macro directly, but
|
||||||
|
%% this cannot be done without introducing a dependency on the
|
||||||
|
%% emqx_iotdb_connector app (which is an EE app).
|
||||||
|
DefaultIOTDBConnector = 'v1.1.x',
|
||||||
|
Version = maps:get(iotdb_version, Conf, DefaultIOTDBConnector),
|
||||||
|
InsertTabletPath =
|
||||||
|
case Version of
|
||||||
|
DefaultIOTDBConnector -> InsertTabletPathV2;
|
||||||
|
_ -> InsertTabletPathV1
|
||||||
|
end,
|
||||||
|
WebhookConfig =
|
||||||
|
Conf#{
|
||||||
|
method => <<"post">>,
|
||||||
|
url => <<BaseURL/binary, InsertTabletPath/binary>>,
|
||||||
|
headers => [
|
||||||
|
{<<"Content-type">>, <<"application/json">>},
|
||||||
|
{<<"Authorization">>, BasicToken}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
parse_confs(
|
||||||
|
<<"webhook">>,
|
||||||
|
Name,
|
||||||
|
WebhookConfig
|
||||||
|
);
|
||||||
|
parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) ->
|
||||||
|
%% For some drivers that can be used as data-sources, we need to provide a
|
||||||
|
%% hookpoint. The underlying driver will run `emqx_hooks:run/3` when it
|
||||||
|
%% receives a message from the external database.
|
||||||
|
BId = connector_id(Type, Name),
|
||||||
|
ConnectorHookpoint = connector_hookpoint(BId),
|
||||||
|
Conf#{hookpoint => ConnectorHookpoint, connector_name => Name};
|
||||||
|
%% TODO: rename this to `kafka_producer' after alias support is added
|
||||||
|
%% to hocon; keeping this as just `kafka' for backwards compatibility.
|
||||||
|
parse_confs(<<"kafka">> = _Type, Name, Conf) ->
|
||||||
|
Conf#{connector_name => Name};
|
||||||
|
parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) ->
|
||||||
|
Conf#{connector_name => Name};
|
||||||
|
parse_confs(<<"kinesis_producer">> = _Type, Name, Conf) ->
|
||||||
|
Conf#{connector_name => Name};
|
||||||
|
parse_confs(ConnectorType, ConnectorName, Config) ->
|
||||||
|
connector_config(ConnectorType, ConnectorName, Config).
|
||||||
|
|
||||||
|
connector_config(ConnectorType, ConnectorName, Config) ->
|
||||||
|
Mod = connector_impl_module(ConnectorType),
|
||||||
|
case erlang:function_exported(Mod, connector_config, 2) of
|
||||||
|
true ->
|
||||||
|
Mod:connector_config(Config, ConnectorName);
|
||||||
|
false ->
|
||||||
|
Config
|
||||||
|
end.
|
||||||
|
|
||||||
|
parse_url(Url) ->
|
||||||
|
case string:split(Url, "//", leading) of
|
||||||
|
[Scheme, UrlRem] ->
|
||||||
|
case string:split(UrlRem, "/", leading) of
|
||||||
|
[HostPort, Path] ->
|
||||||
|
{iolist_to_binary([Scheme, "//", HostPort]), Path};
|
||||||
|
[HostPort] ->
|
||||||
|
{iolist_to_binary([Scheme, "//", HostPort]), <<>>}
|
||||||
|
end;
|
||||||
|
[Url] ->
|
||||||
|
invalid_data(<<"Missing scheme in URL: ", Url/binary>>)
|
||||||
|
end.
|
||||||
|
|
||||||
|
bin(Bin) when is_binary(Bin) -> Bin;
|
||||||
|
bin(Str) when is_list(Str) -> list_to_binary(Str);
|
||||||
|
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
|
||||||
|
|
||||||
|
safe_atom(Bin) when is_binary(Bin) -> binary_to_existing_atom(Bin, utf8);
|
||||||
|
safe_atom(Atom) when is_atom(Atom) -> Atom.
|
||||||
|
|
||||||
|
parse_opts(Conf, Opts0) ->
|
||||||
|
override_start_after_created(Conf, Opts0).
|
||||||
|
|
||||||
|
override_start_after_created(Config, Opts) ->
|
||||||
|
Enabled = maps:get(enable, Config, true),
|
||||||
|
StartAfterCreated = Enabled andalso maps:get(start_after_created, Opts, Enabled),
|
||||||
|
Opts#{start_after_created => StartAfterCreated}.
|
|
@ -0,0 +1,46 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_connector_enterprise).
|
||||||
|
|
||||||
|
-if(?EMQX_RELEASE_EDITION == ee).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
resource_type/1,
|
||||||
|
connector_impl_module/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
fields/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
fields(connectors) ->
|
||||||
|
kafka_structs().
|
||||||
|
|
||||||
|
kafka_structs() ->
|
||||||
|
[
|
||||||
|
{kafka,
|
||||||
|
mk(
|
||||||
|
hoconsc:map(name, ref(emqx_bridge_kafka, "config")),
|
||||||
|
#{
|
||||||
|
desc => <<"Kafka Connector Config">>,
|
||||||
|
required => false
|
||||||
|
}
|
||||||
|
)}
|
||||||
|
].
|
||||||
|
|
||||||
|
resource_type(Type) when is_binary(Type) -> resource_type(binary_to_atom(Type, utf8));
|
||||||
|
resource_type(kafka) -> emqx_bridge_kafka_impl_producer.
|
||||||
|
|
||||||
|
%% For connectors that need to override connector configurations.
|
||||||
|
connector_impl_module(ConnectorType) when is_binary(ConnectorType) ->
|
||||||
|
connector_impl_module(binary_to_atom(ConnectorType, utf8));
|
||||||
|
connector_impl_module(_ConnectorType) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
-else.
|
||||||
|
|
||||||
|
-endif.
|
|
@ -0,0 +1,223 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_connector_schema).
|
||||||
|
|
||||||
|
-include_lib("typerefl/include/types.hrl").
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
|
||||||
|
-import(hoconsc, [mk/2, ref/2]).
|
||||||
|
|
||||||
|
-export([transform_old_style_bridges_to_connector_and_actions/1]).
|
||||||
|
|
||||||
|
-export([roots/0, fields/1, desc/1, namespace/0, tags/0]).
|
||||||
|
|
||||||
|
-if(?EMQX_RELEASE_EDITION == ee).
|
||||||
|
|
||||||
|
enterprise_fields_connectors() ->
|
||||||
|
%% We *must* do this to ensure the module is really loaded, especially when we use
|
||||||
|
%% `call_hocon' from `nodetool' to generate initial configurations.
|
||||||
|
_ = emqx_connector_enterprise:module_info(),
|
||||||
|
case erlang:function_exported(emqx_connector_enterprise, fields, 1) of
|
||||||
|
true ->
|
||||||
|
emqx_connector_enterprise:fields(connectors);
|
||||||
|
false ->
|
||||||
|
[]
|
||||||
|
end.
|
||||||
|
|
||||||
|
-else.
|
||||||
|
|
||||||
|
enterprise_fields_connectors() -> [].
|
||||||
|
|
||||||
|
-endif.
|
||||||
|
|
||||||
|
connector_type_to_bridge_types(kafka) -> [kafka].
|
||||||
|
|
||||||
|
actions_config_name() -> <<"bridges_v2">>.
|
||||||
|
|
||||||
|
has_connector_field(BridgeConf, ConnectorFields) ->
|
||||||
|
lists:any(
|
||||||
|
fun({ConnectorFieldName, _Spec}) ->
|
||||||
|
maps:is_key(to_bin(ConnectorFieldName), BridgeConf)
|
||||||
|
end,
|
||||||
|
ConnectorFields
|
||||||
|
).
|
||||||
|
|
||||||
|
bridge_configs_to_transform(_BridgeType, [] = _BridgeNameBridgeConfList, _ConnectorFields) ->
|
||||||
|
[];
|
||||||
|
bridge_configs_to_transform(BridgeType, [{BridgeName, BridgeConf} | Rest], ConnectorFields) ->
|
||||||
|
case has_connector_field(BridgeConf, ConnectorFields) of
|
||||||
|
true ->
|
||||||
|
[
|
||||||
|
{BridgeType, BridgeName, BridgeConf, ConnectorFields}
|
||||||
|
| bridge_configs_to_transform(BridgeType, Rest, ConnectorFields)
|
||||||
|
];
|
||||||
|
false ->
|
||||||
|
bridge_configs_to_transform(BridgeType, Rest, ConnectorFields)
|
||||||
|
end.
|
||||||
|
|
||||||
|
split_bridge_to_connector_and_action(
|
||||||
|
{ConnectorsMap, {BridgeType, BridgeName, BridgeConf, ConnectorFields}}
|
||||||
|
) ->
|
||||||
|
%% Get connector fields from bridge config
|
||||||
|
ConnectorMap = lists:foldl(
|
||||||
|
fun({ConnectorFieldName, _Spec}, ToTransformSoFar) ->
|
||||||
|
case maps:is_key(to_bin(ConnectorFieldName), BridgeConf) of
|
||||||
|
true ->
|
||||||
|
NewToTransform = maps:put(
|
||||||
|
to_bin(ConnectorFieldName),
|
||||||
|
maps:get(to_bin(ConnectorFieldName), BridgeConf),
|
||||||
|
ToTransformSoFar
|
||||||
|
),
|
||||||
|
NewToTransform;
|
||||||
|
false ->
|
||||||
|
ToTransformSoFar
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
#{},
|
||||||
|
ConnectorFields
|
||||||
|
),
|
||||||
|
%% Remove connector fields from bridge config to create Action
|
||||||
|
ActionMap0 = lists:foldl(
|
||||||
|
fun
|
||||||
|
({enable, _Spec}, ToTransformSoFar) ->
|
||||||
|
%% Enable filed is used in both
|
||||||
|
ToTransformSoFar;
|
||||||
|
({ConnectorFieldName, _Spec}, ToTransformSoFar) ->
|
||||||
|
case maps:is_key(to_bin(ConnectorFieldName), BridgeConf) of
|
||||||
|
true ->
|
||||||
|
maps:remove(to_bin(ConnectorFieldName), ToTransformSoFar);
|
||||||
|
false ->
|
||||||
|
ToTransformSoFar
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
BridgeConf,
|
||||||
|
ConnectorFields
|
||||||
|
),
|
||||||
|
%% Generate a connector name
|
||||||
|
ConnectorName = generate_connector_name(ConnectorsMap, BridgeName, 0),
|
||||||
|
%% Add connector field to action map
|
||||||
|
ActionMap = maps:put(<<"connector">>, ConnectorName, ActionMap0),
|
||||||
|
{BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}.
|
||||||
|
|
||||||
|
generate_connector_name(ConnectorsMap, BridgeName, Attempt) ->
|
||||||
|
ConnectorNameList =
|
||||||
|
case Attempt of
|
||||||
|
0 ->
|
||||||
|
io_lib:format("connector_~s", [BridgeName]);
|
||||||
|
_ ->
|
||||||
|
io_lib:format("connector_~s_~p", [BridgeName, Attempt + 1])
|
||||||
|
end,
|
||||||
|
ConnectorName = iolist_to_binary(ConnectorNameList),
|
||||||
|
case maps:is_key(ConnectorName, ConnectorsMap) of
|
||||||
|
true ->
|
||||||
|
generate_connector_name(ConnectorsMap, BridgeName, Attempt + 1);
|
||||||
|
false ->
|
||||||
|
ConnectorName
|
||||||
|
end.
|
||||||
|
|
||||||
|
transform_old_style_bridges_to_connector_and_actions_of_type(
|
||||||
|
{ConnectorType, #{type := {map, name, {ref, ConnectorConfSchemaMod, ConnectorConfSchemaName}}}},
|
||||||
|
RawConfig
|
||||||
|
) ->
|
||||||
|
ConnectorFields = ConnectorConfSchemaMod:fields(ConnectorConfSchemaName),
|
||||||
|
BridgeTypes = connector_type_to_bridge_types(ConnectorType),
|
||||||
|
BridgesConfMap = maps:get(<<"bridges">>, RawConfig, #{}),
|
||||||
|
ConnectorsConfMap = maps:get(<<"connectors">>, RawConfig, #{}),
|
||||||
|
BridgeConfigsToTransform1 =
|
||||||
|
lists:foldl(
|
||||||
|
fun(BridgeType, ToTranformSoFar) ->
|
||||||
|
BridgeNameToBridgeMap = maps:get(to_bin(BridgeType), BridgesConfMap, #{}),
|
||||||
|
BridgeNameBridgeConfList = maps:to_list(BridgeNameToBridgeMap),
|
||||||
|
NewToTransform = bridge_configs_to_transform(
|
||||||
|
BridgeType, BridgeNameBridgeConfList, ConnectorFields
|
||||||
|
),
|
||||||
|
[NewToTransform, ToTranformSoFar]
|
||||||
|
end,
|
||||||
|
[],
|
||||||
|
BridgeTypes
|
||||||
|
),
|
||||||
|
BridgeConfigsToTransform = lists:flatten(BridgeConfigsToTransform1),
|
||||||
|
BridgeConfigsToTransformWithConnectorConf = lists:zip(
|
||||||
|
lists:duplicate(length(BridgeConfigsToTransform), ConnectorsConfMap),
|
||||||
|
BridgeConfigsToTransform
|
||||||
|
),
|
||||||
|
ActionConnectorTuples = lists:map(
|
||||||
|
fun split_bridge_to_connector_and_action/1,
|
||||||
|
BridgeConfigsToTransformWithConnectorConf
|
||||||
|
),
|
||||||
|
%% Add connectors and actions and remove bridges
|
||||||
|
lists:foldl(
|
||||||
|
fun({BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}, RawConfigSoFar) ->
|
||||||
|
%% Add connector
|
||||||
|
RawConfigSoFar1 = emqx_utils_maps:deep_put(
|
||||||
|
[<<"connectors">>, to_bin(ConnectorType), ConnectorName],
|
||||||
|
RawConfigSoFar,
|
||||||
|
ConnectorMap
|
||||||
|
),
|
||||||
|
%% Remove bridge
|
||||||
|
RawConfigSoFar2 = emqx_utils_maps:deep_remove(
|
||||||
|
[<<"bridges">>, to_bin(BridgeType), BridgeName],
|
||||||
|
RawConfigSoFar1
|
||||||
|
),
|
||||||
|
%% Add action
|
||||||
|
RawConfigSoFar3 = emqx_utils_maps:deep_put(
|
||||||
|
[actions_config_name(), to_bin(BridgeType), BridgeName],
|
||||||
|
RawConfigSoFar2,
|
||||||
|
ActionMap
|
||||||
|
),
|
||||||
|
RawConfigSoFar3
|
||||||
|
end,
|
||||||
|
RawConfig,
|
||||||
|
ActionConnectorTuples
|
||||||
|
).
|
||||||
|
|
||||||
|
transform_old_style_bridges_to_connector_and_actions(RawConfig) ->
|
||||||
|
ConnectorFields = fields(connectors),
|
||||||
|
NewRawConf = lists:foldl(
|
||||||
|
fun transform_old_style_bridges_to_connector_and_actions_of_type/2,
|
||||||
|
RawConfig,
|
||||||
|
ConnectorFields
|
||||||
|
),
|
||||||
|
NewRawConf.
|
||||||
|
|
||||||
|
%%======================================================================================
|
||||||
|
%% HOCON Schema Callbacks
|
||||||
|
%%======================================================================================
|
||||||
|
|
||||||
|
namespace() -> "connector".
|
||||||
|
|
||||||
|
tags() ->
|
||||||
|
[<<"Connector">>].
|
||||||
|
|
||||||
|
roots() -> [{connectors, ?HOCON(?R_REF(connectors), #{importance => ?IMPORTANCE_LOW})}].
|
||||||
|
|
||||||
|
fields(connectors) ->
|
||||||
|
[] ++ enterprise_fields_connectors().
|
||||||
|
|
||||||
|
desc(_) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
%%======================================================================================
|
||||||
|
%% Helper Functions
|
||||||
|
%%======================================================================================
|
||||||
|
|
||||||
|
to_bin(Atom) when is_atom(Atom) ->
|
||||||
|
list_to_binary(atom_to_list(Atom));
|
||||||
|
to_bin(Bin) when is_binary(Bin) ->
|
||||||
|
Bin;
|
||||||
|
to_bin(Something) ->
|
||||||
|
Something.
|
|
@ -15,6 +15,7 @@
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
-type resource_type() :: module().
|
-type resource_type() :: module().
|
||||||
-type resource_id() :: binary().
|
-type resource_id() :: binary().
|
||||||
|
-type channel_id() :: binary().
|
||||||
-type raw_resource_config() :: binary() | raw_term_resource_config().
|
-type raw_resource_config() :: binary() | raw_term_resource_config().
|
||||||
-type raw_term_resource_config() :: #{binary() => term()} | [raw_term_resource_config()].
|
-type raw_term_resource_config() :: #{binary() => term()} | [raw_term_resource_config()].
|
||||||
-type resource_config() :: term().
|
-type resource_config() :: term().
|
||||||
|
|
|
@ -59,11 +59,15 @@
|
||||||
remove/1,
|
remove/1,
|
||||||
remove_local/1,
|
remove_local/1,
|
||||||
reset_metrics/1,
|
reset_metrics/1,
|
||||||
reset_metrics_local/1
|
reset_metrics_local/1,
|
||||||
|
%% Create metrics for a resource ID
|
||||||
|
create_metrics/1,
|
||||||
|
%% Delete metrics for a resource ID
|
||||||
|
clear_metrics/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
%% Calls to the callback module with current resource state
|
%% Calls to the callback module with current resource state
|
||||||
%% They also save the state after the call finished (except query/2,3).
|
%% They also save the state after the call finished (except call_get_channel_config/3).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
start/1,
|
start/1,
|
||||||
|
@ -72,6 +76,7 @@
|
||||||
restart/2,
|
restart/2,
|
||||||
%% verify if the resource is working normally
|
%% verify if the resource is working normally
|
||||||
health_check/1,
|
health_check/1,
|
||||||
|
channel_health_check/2,
|
||||||
%% set resource status to disconnected
|
%% set resource status to disconnected
|
||||||
set_resource_status_connecting/1,
|
set_resource_status_connecting/1,
|
||||||
%% stop the instance
|
%% stop the instance
|
||||||
|
@ -87,7 +92,9 @@
|
||||||
has_allocated_resources/1,
|
has_allocated_resources/1,
|
||||||
get_allocated_resources/1,
|
get_allocated_resources/1,
|
||||||
get_allocated_resources_list/1,
|
get_allocated_resources_list/1,
|
||||||
forget_allocated_resources/1
|
forget_allocated_resources/1,
|
||||||
|
%% Get channel config from resource
|
||||||
|
call_get_channel_config/3
|
||||||
]).
|
]).
|
||||||
|
|
||||||
%% Direct calls to the callback module
|
%% Direct calls to the callback module
|
||||||
|
@ -99,10 +106,18 @@
|
||||||
call_start/3,
|
call_start/3,
|
||||||
%% verify if the resource is working normally
|
%% verify if the resource is working normally
|
||||||
call_health_check/3,
|
call_health_check/3,
|
||||||
|
%% verify if the resource channel is working normally
|
||||||
|
call_channel_health_check/4,
|
||||||
%% stop the instance
|
%% stop the instance
|
||||||
call_stop/3,
|
call_stop/3,
|
||||||
%% get the query mode of the resource
|
%% get the query mode of the resource
|
||||||
query_mode/3
|
query_mode/3,
|
||||||
|
%% Add channel to resource
|
||||||
|
call_add_channel/5,
|
||||||
|
%% Remove channel from resource
|
||||||
|
call_remove_channel/4,
|
||||||
|
%% Get channels from resource
|
||||||
|
call_get_channels/2
|
||||||
]).
|
]).
|
||||||
|
|
||||||
%% list all the instances, id only.
|
%% list all the instances, id only.
|
||||||
|
@ -125,6 +140,7 @@
|
||||||
-export_type([
|
-export_type([
|
||||||
query_mode/0,
|
query_mode/0,
|
||||||
resource_id/0,
|
resource_id/0,
|
||||||
|
channel_id/0,
|
||||||
resource_data/0,
|
resource_data/0,
|
||||||
resource_status/0
|
resource_status/0
|
||||||
]).
|
]).
|
||||||
|
@ -135,6 +151,10 @@
|
||||||
on_query_async/4,
|
on_query_async/4,
|
||||||
on_batch_query_async/4,
|
on_batch_query_async/4,
|
||||||
on_get_status/2,
|
on_get_status/2,
|
||||||
|
on_get_channel_status/3,
|
||||||
|
on_add_channel/4,
|
||||||
|
on_remove_channel/3,
|
||||||
|
on_get_channels/1,
|
||||||
query_mode/1
|
query_mode/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
@ -176,8 +196,45 @@
|
||||||
| {resource_status(), resource_state()}
|
| {resource_status(), resource_state()}
|
||||||
| {resource_status(), resource_state(), term()}.
|
| {resource_status(), resource_state(), term()}.
|
||||||
|
|
||||||
|
-callback on_get_channel_status(resource_id(), channel_id(), resource_state()) ->
|
||||||
|
resource_status()
|
||||||
|
| {resource_status(), term()}.
|
||||||
|
|
||||||
-callback query_mode(Config :: term()) -> query_mode().
|
-callback query_mode(Config :: term()) -> query_mode().
|
||||||
|
|
||||||
|
%% This callback handles the installation of a specified Bridge V2 resource.
|
||||||
|
%%
|
||||||
|
%% It's guaranteed that the provided Bridge V2 is not already installed when this
|
||||||
|
%% function is invoked. Upon successful installation, the function should return a
|
||||||
|
%% new state with the installed Bridge V2 encapsulated within the `installed_bridge_v2s` map.
|
||||||
|
%%
|
||||||
|
%% The Bridge V2 state must be stored in the `installed_bridge_v2s` map using the
|
||||||
|
%% Bridge V2 resource ID as the key, as the caching mechanism depends on this structure.
|
||||||
|
%%
|
||||||
|
%% If the Bridge V2 cannot be successfully installed, the callback shall
|
||||||
|
%% throw an exception.
|
||||||
|
-callback on_add_channel(
|
||||||
|
ResId :: term(), ResourceState :: term(), BridgeV2Id :: binary(), ChannelConfig :: map()
|
||||||
|
) -> {ok, NewState :: #{installed_bridge_v2s := map()}}.
|
||||||
|
|
||||||
|
%% This callback handles the deinstallation of a specified Bridge V2 resource.
|
||||||
|
%%
|
||||||
|
%% It's guaranteed that the provided Bridge V2 is installed when this
|
||||||
|
%% function is invoked. Upon successful deinstallation, the function should return
|
||||||
|
%% a new state where the Bridge V2 id key has been removed from the `installed_bridge_v2s` map.
|
||||||
|
%%
|
||||||
|
%% If the Bridge V2 cannot be successfully deinstalled, the callback shall
|
||||||
|
%% log an error.
|
||||||
|
%%
|
||||||
|
%% Also see the documentation for `on_add_channel/4`.
|
||||||
|
-callback on_remove_channel(
|
||||||
|
ResId :: term(), ResourceState :: term(), BridgeV2Id :: binary()
|
||||||
|
) -> {ok, NewState :: term()}.
|
||||||
|
|
||||||
|
-callback on_get_channels(
|
||||||
|
ResId :: term()
|
||||||
|
) -> {ok, NewState :: term()}.
|
||||||
|
|
||||||
-spec list_types() -> [module()].
|
-spec list_types() -> [module()].
|
||||||
list_types() ->
|
list_types() ->
|
||||||
discover_resource_mods().
|
discover_resource_mods().
|
||||||
|
@ -292,8 +349,11 @@ query(ResId, Request) ->
|
||||||
-spec query(resource_id(), Request :: term(), query_opts()) ->
|
-spec query(resource_id(), Request :: term(), query_opts()) ->
|
||||||
Result :: term().
|
Result :: term().
|
||||||
query(ResId, Request, Opts) ->
|
query(ResId, Request, Opts) ->
|
||||||
case emqx_resource_manager:lookup_cached(ResId) of
|
%% We keep this A
|
||||||
{ok, _Group, #{query_mode := QM, error := Error}} ->
|
case get_query_mode_error(ResId, Opts) of
|
||||||
|
{error, _} = ErrorTuple ->
|
||||||
|
ErrorTuple;
|
||||||
|
{QM, Error} ->
|
||||||
case {QM, Error} of
|
case {QM, Error} of
|
||||||
{_, unhealthy_target} ->
|
{_, unhealthy_target} ->
|
||||||
emqx_resource_metrics:matched_inc(ResId),
|
emqx_resource_metrics:matched_inc(ResId),
|
||||||
|
@ -329,9 +389,25 @@ query(ResId, Request, Opts) ->
|
||||||
emqx_resource_buffer_worker:sync_query(ResId, Request, Opts);
|
emqx_resource_buffer_worker:sync_query(ResId, Request, Opts);
|
||||||
{async, _} ->
|
{async, _} ->
|
||||||
emqx_resource_buffer_worker:async_query(ResId, Request, Opts)
|
emqx_resource_buffer_worker:async_query(ResId, Request, Opts)
|
||||||
|
end
|
||||||
|
end.
|
||||||
|
|
||||||
|
get_query_mode_error(ResId, Opts) ->
|
||||||
|
case emqx_bridge_v2:is_bridge_v2_id(ResId) of
|
||||||
|
true ->
|
||||||
|
case Opts of
|
||||||
|
#{query_mode := QueryMode} ->
|
||||||
|
{QueryMode, ok};
|
||||||
|
_ ->
|
||||||
|
{async, unhealthy_target}
|
||||||
end;
|
end;
|
||||||
|
false ->
|
||||||
|
case emqx_resource_manager:lookup_cached(ResId) of
|
||||||
|
{ok, _Group, #{query_mode := QM, error := Error}} ->
|
||||||
|
{QM, Error};
|
||||||
{error, not_found} ->
|
{error, not_found} ->
|
||||||
?RESOURCE_ERROR(not_found, "resource not found")
|
?RESOURCE_ERROR(not_found, "resource not found")
|
||||||
|
end
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec simple_sync_query(resource_id(), Request :: term()) -> Result :: term().
|
-spec simple_sync_query(resource_id(), Request :: term()) -> Result :: term().
|
||||||
|
@ -362,6 +438,11 @@ stop(ResId) ->
|
||||||
health_check(ResId) ->
|
health_check(ResId) ->
|
||||||
emqx_resource_manager:health_check(ResId).
|
emqx_resource_manager:health_check(ResId).
|
||||||
|
|
||||||
|
-spec channel_health_check(resource_id(), channel_id()) ->
|
||||||
|
{ok, resource_status()} | {error, term()}.
|
||||||
|
channel_health_check(ResId, ChannelId) ->
|
||||||
|
emqx_resource_manager:channel_health_check(ResId, ChannelId).
|
||||||
|
|
||||||
set_resource_status_connecting(ResId) ->
|
set_resource_status_connecting(ResId) ->
|
||||||
emqx_resource_manager:set_resource_status_connecting(ResId).
|
emqx_resource_manager:set_resource_status_connecting(ResId).
|
||||||
|
|
||||||
|
@ -436,6 +517,85 @@ call_start(ResId, Mod, Config) ->
|
||||||
call_health_check(ResId, Mod, ResourceState) ->
|
call_health_check(ResId, Mod, ResourceState) ->
|
||||||
?SAFE_CALL(Mod:on_get_status(ResId, ResourceState)).
|
?SAFE_CALL(Mod:on_get_status(ResId, ResourceState)).
|
||||||
|
|
||||||
|
-spec call_channel_health_check(resource_id(), channel_id(), module(), resource_state()) ->
|
||||||
|
resource_status()
|
||||||
|
| {resource_status()}
|
||||||
|
| {resource_status(), term()}
|
||||||
|
| {error, term()}.
|
||||||
|
call_channel_health_check(ResId, ChannelId, Mod, ResourceState) ->
|
||||||
|
?SAFE_CALL(Mod:on_get_channel_status(ResId, ChannelId, ResourceState)).
|
||||||
|
|
||||||
|
call_add_channel(ResId, Mod, ResourceState, ChannelId, ChannelConfig) ->
|
||||||
|
%% Check if maybe_install_insert_template is exported
|
||||||
|
case erlang:function_exported(Mod, on_add_channel, 4) of
|
||||||
|
true ->
|
||||||
|
try
|
||||||
|
Mod:on_add_channel(
|
||||||
|
ResId, ResourceState, ChannelId, ChannelConfig
|
||||||
|
)
|
||||||
|
catch
|
||||||
|
throw:Error ->
|
||||||
|
{error, Error};
|
||||||
|
Kind:Reason:Stacktrace ->
|
||||||
|
{error, #{
|
||||||
|
exception => Kind,
|
||||||
|
reason => Reason,
|
||||||
|
stacktrace => emqx_utils:redact(Stacktrace)
|
||||||
|
}}
|
||||||
|
end;
|
||||||
|
false ->
|
||||||
|
{error,
|
||||||
|
<<<<"on_add_channel callback function not available for connector with resource id ">>/binary,
|
||||||
|
ResId/binary>>}
|
||||||
|
end.
|
||||||
|
|
||||||
|
call_remove_channel(ResId, Mod, ResourceState, ChannelId) ->
|
||||||
|
%% Check if maybe_install_insert_template is exported
|
||||||
|
case erlang:function_exported(Mod, on_remove_channel, 3) of
|
||||||
|
true ->
|
||||||
|
try
|
||||||
|
Mod:on_remove_channel(
|
||||||
|
ResId, ResourceState, ChannelId
|
||||||
|
)
|
||||||
|
catch
|
||||||
|
Kind:Reason:Stacktrace ->
|
||||||
|
{error, #{
|
||||||
|
exception => Kind,
|
||||||
|
reason => Reason,
|
||||||
|
stacktrace => emqx_utils:redact(Stacktrace)
|
||||||
|
}}
|
||||||
|
end;
|
||||||
|
false ->
|
||||||
|
{error,
|
||||||
|
<<<<"on_remove_channel callback function not available for connector with resource id ">>/binary,
|
||||||
|
ResId/binary>>}
|
||||||
|
end.
|
||||||
|
|
||||||
|
call_get_channels(ResId, Mod) ->
|
||||||
|
case erlang:function_exported(Mod, on_get_channels, 1) of
|
||||||
|
true ->
|
||||||
|
Mod:on_get_channels(ResId);
|
||||||
|
false ->
|
||||||
|
[]
|
||||||
|
end.
|
||||||
|
|
||||||
|
call_get_channel_config(ResId, ChannelId, Mod) ->
|
||||||
|
case erlang:function_exported(Mod, on_get_channels, 1) of
|
||||||
|
true ->
|
||||||
|
ChConfigs = Mod:on_get_channels(ResId),
|
||||||
|
case [Conf || {ChId, Conf} <- ChConfigs, ChId =:= ChannelId] of
|
||||||
|
[ChannelConf] ->
|
||||||
|
ChannelConf;
|
||||||
|
_ ->
|
||||||
|
{error,
|
||||||
|
<<"Channel ", ChannelId/binary,
|
||||||
|
"not found. There seems to be a broken reference">>}
|
||||||
|
end;
|
||||||
|
false ->
|
||||||
|
{error,
|
||||||
|
<<"on_get_channels callback function not available for resource id", ResId/binary>>}
|
||||||
|
end.
|
||||||
|
|
||||||
-spec call_stop(resource_id(), module(), resource_state()) -> term().
|
-spec call_stop(resource_id(), module(), resource_state()) -> term().
|
||||||
call_stop(ResId, Mod, ResourceState) ->
|
call_stop(ResId, Mod, ResourceState) ->
|
||||||
?SAFE_CALL(begin
|
?SAFE_CALL(begin
|
||||||
|
@ -575,6 +735,33 @@ forget_allocated_resources(InstanceId) ->
|
||||||
true = ets:delete(?RESOURCE_ALLOCATION_TAB, InstanceId),
|
true = ets:delete(?RESOURCE_ALLOCATION_TAB, InstanceId),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
-spec create_metrics(resource_id()) -> ok.
|
||||||
|
create_metrics(ResId) ->
|
||||||
|
emqx_metrics_worker:create_metrics(
|
||||||
|
?RES_METRICS,
|
||||||
|
ResId,
|
||||||
|
[
|
||||||
|
'matched',
|
||||||
|
'retried',
|
||||||
|
'retried.success',
|
||||||
|
'retried.failed',
|
||||||
|
'success',
|
||||||
|
'late_reply',
|
||||||
|
'failed',
|
||||||
|
'dropped',
|
||||||
|
'dropped.expired',
|
||||||
|
'dropped.queue_full',
|
||||||
|
'dropped.resource_not_found',
|
||||||
|
'dropped.resource_stopped',
|
||||||
|
'dropped.other',
|
||||||
|
'received'
|
||||||
|
],
|
||||||
|
[matched]
|
||||||
|
).
|
||||||
|
|
||||||
|
-spec clear_metrics(resource_id()) -> ok.
|
||||||
|
clear_metrics(ResId) ->
|
||||||
|
emqx_metrics_worker:clear_metrics(?RES_METRICS, ResId).
|
||||||
%% =================================================================================
|
%% =================================================================================
|
||||||
|
|
||||||
filter_instances(Filter) ->
|
filter_instances(Filter) ->
|
||||||
|
|
|
@ -1076,7 +1076,7 @@ handle_async_worker_down(Data0, Pid) ->
|
||||||
-spec call_query(force_sync | async_if_possible, _, _, _, _, _) -> _.
|
-spec call_query(force_sync | async_if_possible, _, _, _, _, _) -> _.
|
||||||
call_query(QM, Id, Index, Ref, Query, QueryOpts) ->
|
call_query(QM, Id, Index, Ref, Query, QueryOpts) ->
|
||||||
?tp(call_query_enter, #{id => Id, query => Query, query_mode => QM}),
|
?tp(call_query_enter, #{id => Id, query => Query, query_mode => QM}),
|
||||||
case emqx_resource_manager:lookup_cached(Id) of
|
case emqx_resource_manager:lookup_cached(extract_connector_id(Id)) of
|
||||||
{ok, _Group, #{status := stopped}} ->
|
{ok, _Group, #{status := stopped}} ->
|
||||||
?RESOURCE_ERROR(stopped, "resource stopped or disabled");
|
?RESOURCE_ERROR(stopped, "resource stopped or disabled");
|
||||||
{ok, _Group, #{status := connecting, error := unhealthy_target}} ->
|
{ok, _Group, #{status := connecting, error := unhealthy_target}} ->
|
||||||
|
@ -1087,20 +1087,89 @@ call_query(QM, Id, Index, Ref, Query, QueryOpts) ->
|
||||||
?RESOURCE_ERROR(not_found, "resource not found")
|
?RESOURCE_ERROR(not_found, "resource not found")
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
extract_connector_id(Id) when is_binary(Id) ->
|
||||||
|
case binary:split(Id, <<":">>, [global]) of
|
||||||
|
[
|
||||||
|
_ChannelGlobalType,
|
||||||
|
_ChannelSubType,
|
||||||
|
_ChannelName,
|
||||||
|
<<"connector">>,
|
||||||
|
ConnectorType,
|
||||||
|
ConnectorName
|
||||||
|
] ->
|
||||||
|
<<"connector:", ConnectorType/binary, ":", ConnectorName/binary>>;
|
||||||
|
_ ->
|
||||||
|
Id
|
||||||
|
end;
|
||||||
|
extract_connector_id(Id) ->
|
||||||
|
Id.
|
||||||
|
|
||||||
|
is_channel_id(Id) ->
|
||||||
|
extract_connector_id(Id) =/= Id.
|
||||||
|
|
||||||
|
%% Check if channel is installed in the connector state so that we
|
||||||
|
%% can add it if it's not installed. We will fail with a recoverable error if
|
||||||
|
%% the installation fails so that the query can be retried.
|
||||||
|
pre_query_channel_check({Id, _} = _Request, State, Channels, _Mod) when is_map_key(Id, Channels) ->
|
||||||
|
State;
|
||||||
|
pre_query_channel_check({Id, _} = _Request, State, _Channels, Mod) ->
|
||||||
|
case is_channel_id(Id) of
|
||||||
|
true ->
|
||||||
|
ResId = extract_connector_id(Id),
|
||||||
|
case emqx_resource:call_get_channel_config(ResId, Id, Mod) of
|
||||||
|
ChannelConfig when is_map(ChannelConfig) ->
|
||||||
|
add_channel(ResId, Id, ChannelConfig);
|
||||||
|
Error ->
|
||||||
|
%% Broken reference: this should not happen
|
||||||
|
erlang:error({unrecoverable_error, Error})
|
||||||
|
end;
|
||||||
|
false ->
|
||||||
|
State
|
||||||
|
end;
|
||||||
|
pre_query_channel_check(_Request, State, _Channels, _Mod) ->
|
||||||
|
State.
|
||||||
|
|
||||||
|
add_channel(ResId, ChannelId, ChannelConfig) ->
|
||||||
|
case emqx_resource_manager:add_channel(ResId, ChannelId, ChannelConfig) of
|
||||||
|
ok ->
|
||||||
|
read_new_state_from_resource_cache(ResId);
|
||||||
|
{error, Reason} ->
|
||||||
|
erlang:error(
|
||||||
|
{recoverable_error,
|
||||||
|
iolist_to_binary(
|
||||||
|
io_lib:format("channel:~p could not be installed in its connector: (~p)", [
|
||||||
|
ChannelId, Reason
|
||||||
|
])
|
||||||
|
)}
|
||||||
|
)
|
||||||
|
end.
|
||||||
|
|
||||||
|
read_new_state_from_resource_cache(ResId) ->
|
||||||
|
case emqx_resource_manager:lookup_cached(ResId) of
|
||||||
|
{ok, _Group, #{status := stopped}} ->
|
||||||
|
error({recoverable_error, <<"resource stopped or disabled">>});
|
||||||
|
{ok, _Group, #{status := connecting, error := unhealthy_target}} ->
|
||||||
|
error({unrecoverable_error, unhealthy_target});
|
||||||
|
{ok, _Group, #{state := State}} ->
|
||||||
|
State;
|
||||||
|
{error, not_found} ->
|
||||||
|
error({recoverable_error, <<"resource not found">>})
|
||||||
|
end.
|
||||||
|
|
||||||
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, #{query_mode := ResQM} = Resource) when
|
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, #{query_mode := ResQM} = Resource) when
|
||||||
ResQM =:= simple_sync_internal_buffer; ResQM =:= simple_async_internal_buffer
|
ResQM =:= simple_sync_internal_buffer; ResQM =:= simple_async_internal_buffer
|
||||||
->
|
->
|
||||||
%% The connector supports buffer, send even in disconnected state
|
%% The connector supports buffer, send even in disconnected state
|
||||||
#{mod := Mod, state := ResSt, callback_mode := CBM} = Resource,
|
#{mod := Mod, state := ResSt, callback_mode := CBM, added_channels := Channels} = Resource,
|
||||||
CallMode = call_mode(QM, CBM),
|
CallMode = call_mode(QM, CBM),
|
||||||
apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, QueryOpts);
|
apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, Channels, QueryOpts);
|
||||||
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, #{status := connected} = Resource) ->
|
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, #{status := connected} = Resource) ->
|
||||||
%% when calling from the buffer worker or other simple queries,
|
%% when calling from the buffer worker or other simple queries,
|
||||||
%% only apply the query fun when it's at connected status
|
%% only apply the query fun when it's at connected status
|
||||||
#{mod := Mod, state := ResSt, callback_mode := CBM} = Resource,
|
#{mod := Mod, state := ResSt, callback_mode := CBM, added_channels := Channels} = Resource,
|
||||||
CallMode = call_mode(QM, CBM),
|
CallMode = call_mode(QM, CBM),
|
||||||
apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, QueryOpts);
|
apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, Channels, QueryOpts);
|
||||||
do_call_query(_QM, _Id, _Index, _Ref, _Query, _QueryOpts, _Resource) ->
|
do_call_query(_QM, _Id, _Index, _Ref, _Query, _QueryOpts, _Data) ->
|
||||||
?RESOURCE_ERROR(not_connected, "resource not connected").
|
?RESOURCE_ERROR(not_connected, "resource not connected").
|
||||||
|
|
||||||
-define(APPLY_RESOURCE(NAME, EXPR, REQ),
|
-define(APPLY_RESOURCE(NAME, EXPR, REQ),
|
||||||
|
@ -1131,14 +1200,23 @@ do_call_query(_QM, _Id, _Index, _Ref, _Query, _QueryOpts, _Resource) ->
|
||||||
).
|
).
|
||||||
|
|
||||||
apply_query_fun(
|
apply_query_fun(
|
||||||
sync, Mod, Id, _Index, _Ref, ?QUERY(_, Request, _, _) = _Query, ResSt, QueryOpts
|
sync, Mod, Id, _Index, _Ref, ?QUERY(_, Request, _, _) = _Query, ResSt, Channels, QueryOpts
|
||||||
) ->
|
) ->
|
||||||
?tp(call_query, #{id => Id, mod => Mod, query => _Query, res_st => ResSt, call_mode => sync}),
|
?tp(call_query, #{id => Id, mod => Mod, query => _Query, res_st => ResSt, call_mode => sync}),
|
||||||
maybe_reply_to(
|
maybe_reply_to(
|
||||||
?APPLY_RESOURCE(call_query, Mod:on_query(Id, Request, ResSt), Request),
|
?APPLY_RESOURCE(
|
||||||
|
call_query,
|
||||||
|
begin
|
||||||
|
NewResSt = pre_query_channel_check(Request, ResSt, Channels, Mod),
|
||||||
|
Mod:on_query(extract_connector_id(Id), Request, NewResSt)
|
||||||
|
end,
|
||||||
|
Request
|
||||||
|
),
|
||||||
QueryOpts
|
QueryOpts
|
||||||
);
|
);
|
||||||
apply_query_fun(async, Mod, Id, Index, Ref, ?QUERY(_, Request, _, _) = Query, ResSt, QueryOpts) ->
|
apply_query_fun(
|
||||||
|
async, Mod, Id, Index, Ref, ?QUERY(_, Request, _, _) = Query, ResSt, Channels, QueryOpts
|
||||||
|
) ->
|
||||||
?tp(call_query_async, #{
|
?tp(call_query_async, #{
|
||||||
id => Id, mod => Mod, query => Query, res_st => ResSt, call_mode => async
|
id => Id, mod => Mod, query => Query, res_st => ResSt, call_mode => async
|
||||||
}),
|
}),
|
||||||
|
@ -1160,23 +1238,51 @@ apply_query_fun(async, Mod, Id, Index, Ref, ?QUERY(_, Request, _, _) = Query, Re
|
||||||
AsyncWorkerMRef = undefined,
|
AsyncWorkerMRef = undefined,
|
||||||
InflightItem = ?INFLIGHT_ITEM(Ref, Query, IsRetriable, AsyncWorkerMRef),
|
InflightItem = ?INFLIGHT_ITEM(Ref, Query, IsRetriable, AsyncWorkerMRef),
|
||||||
ok = inflight_append(InflightTID, InflightItem),
|
ok = inflight_append(InflightTID, InflightItem),
|
||||||
Result = Mod:on_query_async(Id, Request, {ReplyFun, [ReplyContext]}, ResSt),
|
NewResSt = pre_query_channel_check(Request, ResSt, Channels, Mod),
|
||||||
|
Result = Mod:on_query_async(
|
||||||
|
extract_connector_id(Id), Request, {ReplyFun, [ReplyContext]}, NewResSt
|
||||||
|
),
|
||||||
{async_return, Result}
|
{async_return, Result}
|
||||||
end,
|
end,
|
||||||
Request
|
Request
|
||||||
);
|
);
|
||||||
apply_query_fun(
|
apply_query_fun(
|
||||||
sync, Mod, Id, _Index, _Ref, [?QUERY(_, _, _, _) | _] = Batch, ResSt, QueryOpts
|
sync,
|
||||||
|
Mod,
|
||||||
|
Id,
|
||||||
|
_Index,
|
||||||
|
_Ref,
|
||||||
|
[?QUERY(_, FirstRequest, _, _) | _] = Batch,
|
||||||
|
ResSt,
|
||||||
|
Channels,
|
||||||
|
QueryOpts
|
||||||
) ->
|
) ->
|
||||||
?tp(call_batch_query, #{
|
?tp(call_batch_query, #{
|
||||||
id => Id, mod => Mod, batch => Batch, res_st => ResSt, call_mode => sync
|
id => Id, mod => Mod, batch => Batch, res_st => ResSt, call_mode => sync
|
||||||
}),
|
}),
|
||||||
Requests = lists:map(fun(?QUERY(_ReplyTo, Request, _, _ExpireAt)) -> Request end, Batch),
|
Requests = lists:map(fun(?QUERY(_ReplyTo, Request, _, _ExpireAt)) -> Request end, Batch),
|
||||||
maybe_reply_to(
|
maybe_reply_to(
|
||||||
?APPLY_RESOURCE(call_batch_query, Mod:on_batch_query(Id, Requests, ResSt), Batch),
|
?APPLY_RESOURCE(
|
||||||
|
call_batch_query,
|
||||||
|
begin
|
||||||
|
NewResSt = pre_query_channel_check(FirstRequest, ResSt, Channels, Mod),
|
||||||
|
Mod:on_batch_query(extract_connector_id(Id), Requests, NewResSt)
|
||||||
|
end,
|
||||||
|
Batch
|
||||||
|
),
|
||||||
QueryOpts
|
QueryOpts
|
||||||
);
|
);
|
||||||
apply_query_fun(async, Mod, Id, Index, Ref, [?QUERY(_, _, _, _) | _] = Batch, ResSt, QueryOpts) ->
|
apply_query_fun(
|
||||||
|
async,
|
||||||
|
Mod,
|
||||||
|
Id,
|
||||||
|
Index,
|
||||||
|
Ref,
|
||||||
|
[?QUERY(_, FirstRequest, _, _) | _] = Batch,
|
||||||
|
ResSt,
|
||||||
|
Channels,
|
||||||
|
QueryOpts
|
||||||
|
) ->
|
||||||
?tp(call_batch_query_async, #{
|
?tp(call_batch_query_async, #{
|
||||||
id => Id, mod => Mod, batch => Batch, res_st => ResSt, call_mode => async
|
id => Id, mod => Mod, batch => Batch, res_st => ResSt, call_mode => async
|
||||||
}),
|
}),
|
||||||
|
@ -1201,7 +1307,10 @@ apply_query_fun(async, Mod, Id, Index, Ref, [?QUERY(_, _, _, _) | _] = Batch, Re
|
||||||
AsyncWorkerMRef = undefined,
|
AsyncWorkerMRef = undefined,
|
||||||
InflightItem = ?INFLIGHT_ITEM(Ref, Batch, IsRetriable, AsyncWorkerMRef),
|
InflightItem = ?INFLIGHT_ITEM(Ref, Batch, IsRetriable, AsyncWorkerMRef),
|
||||||
ok = inflight_append(InflightTID, InflightItem),
|
ok = inflight_append(InflightTID, InflightItem),
|
||||||
Result = Mod:on_batch_query_async(Id, Requests, {ReplyFun, [ReplyContext]}, ResSt),
|
NewResSt = pre_query_channel_check(FirstRequest, ResSt, Channels, Mod),
|
||||||
|
Result = Mod:on_batch_query_async(
|
||||||
|
extract_connector_id(Id), Requests, {ReplyFun, [ReplyContext]}, NewResSt
|
||||||
|
),
|
||||||
{async_return, Result}
|
{async_return, Result}
|
||||||
end,
|
end,
|
||||||
Batch
|
Batch
|
||||||
|
|
|
@ -29,7 +29,10 @@
|
||||||
restart/2,
|
restart/2,
|
||||||
start/2,
|
start/2,
|
||||||
stop/1,
|
stop/1,
|
||||||
health_check/1
|
health_check/1,
|
||||||
|
channel_health_check/2,
|
||||||
|
add_channel/3,
|
||||||
|
remove_channel/2
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
@ -64,7 +67,8 @@
|
||||||
state,
|
state,
|
||||||
error,
|
error,
|
||||||
pid,
|
pid,
|
||||||
extra
|
extra,
|
||||||
|
added_channels
|
||||||
}).
|
}).
|
||||||
-type data() :: #data{}.
|
-type data() :: #data{}.
|
||||||
|
|
||||||
|
@ -123,27 +127,8 @@ create_and_return_data(ResId, Group, ResourceType, Config, Opts) ->
|
||||||
create(ResId, Group, ResourceType, Config, Opts) ->
|
create(ResId, Group, ResourceType, Config, Opts) ->
|
||||||
% The state machine will make the actual call to the callback/resource module after init
|
% The state machine will make the actual call to the callback/resource module after init
|
||||||
ok = emqx_resource_manager_sup:ensure_child(ResId, Group, ResourceType, Config, Opts),
|
ok = emqx_resource_manager_sup:ensure_child(ResId, Group, ResourceType, Config, Opts),
|
||||||
ok = emqx_metrics_worker:create_metrics(
|
% Create metrics for the resource
|
||||||
?RES_METRICS,
|
ok = emqx_resource:create_metrics(ResId),
|
||||||
ResId,
|
|
||||||
[
|
|
||||||
'matched',
|
|
||||||
'retried',
|
|
||||||
'retried.success',
|
|
||||||
'retried.failed',
|
|
||||||
'success',
|
|
||||||
'late_reply',
|
|
||||||
'failed',
|
|
||||||
'dropped',
|
|
||||||
'dropped.expired',
|
|
||||||
'dropped.queue_full',
|
|
||||||
'dropped.resource_not_found',
|
|
||||||
'dropped.resource_stopped',
|
|
||||||
'dropped.other',
|
|
||||||
'received'
|
|
||||||
],
|
|
||||||
[matched]
|
|
||||||
),
|
|
||||||
QueryMode = emqx_resource:query_mode(ResourceType, Config, Opts),
|
QueryMode = emqx_resource:query_mode(ResourceType, Config, Opts),
|
||||||
case QueryMode of
|
case QueryMode of
|
||||||
%% the resource has built-in buffer, so there is no need for resource workers
|
%% the resource has built-in buffer, so there is no need for resource workers
|
||||||
|
@ -292,6 +277,25 @@ list_group(Group) ->
|
||||||
health_check(ResId) ->
|
health_check(ResId) ->
|
||||||
safe_call(ResId, health_check, ?T_OPERATION).
|
safe_call(ResId, health_check, ?T_OPERATION).
|
||||||
|
|
||||||
|
-spec channel_health_check(resource_id(), channel_id()) ->
|
||||||
|
{ok, resource_status()} | {error, term()}.
|
||||||
|
channel_health_check(ResId, ChannelId) ->
|
||||||
|
safe_call(ResId, {channel_health_check, ChannelId}, ?T_OPERATION).
|
||||||
|
|
||||||
|
add_channel(ResId, ChannelId, Config) ->
|
||||||
|
%% Use cache to avoid doing inter process communication on every call
|
||||||
|
Data = read_cache(ResId),
|
||||||
|
AddedChannels = Data#data.added_channels,
|
||||||
|
case maps:get(ChannelId, AddedChannels, false) of
|
||||||
|
true ->
|
||||||
|
ok;
|
||||||
|
false ->
|
||||||
|
safe_call(ResId, {add_channel, ChannelId, Config}, ?T_OPERATION)
|
||||||
|
end.
|
||||||
|
|
||||||
|
remove_channel(ResId, ChannelId) ->
|
||||||
|
safe_call(ResId, {remove_channel, ChannelId}, ?T_OPERATION).
|
||||||
|
|
||||||
%% Server start/stop callbacks
|
%% Server start/stop callbacks
|
||||||
|
|
||||||
%% @doc Function called from the supervisor to actually start the server
|
%% @doc Function called from the supervisor to actually start the server
|
||||||
|
@ -310,7 +314,8 @@ start_link(ResId, Group, ResourceType, Config, Opts) ->
|
||||||
config = Config,
|
config = Config,
|
||||||
opts = Opts,
|
opts = Opts,
|
||||||
state = undefined,
|
state = undefined,
|
||||||
error = undefined
|
error = undefined,
|
||||||
|
added_channels = #{}
|
||||||
},
|
},
|
||||||
gen_statem:start_link(?REF(ResId), ?MODULE, {Data, Opts}, []).
|
gen_statem:start_link(?REF(ResId), ?MODULE, {Data, Opts}, []).
|
||||||
|
|
||||||
|
@ -374,8 +379,13 @@ handle_event({call, From}, lookup, _State, #data{group = Group} = Data) ->
|
||||||
handle_event({call, From}, health_check, stopped, _Data) ->
|
handle_event({call, From}, health_check, stopped, _Data) ->
|
||||||
Actions = [{reply, From, {error, resource_is_stopped}}],
|
Actions = [{reply, From, {error, resource_is_stopped}}],
|
||||||
{keep_state_and_data, Actions};
|
{keep_state_and_data, Actions};
|
||||||
|
handle_event({call, From}, {channel_health_check, _}, stopped, _Data) ->
|
||||||
|
Actions = [{reply, From, {error, resource_is_stopped}}],
|
||||||
|
{keep_state_and_data, Actions};
|
||||||
handle_event({call, From}, health_check, _State, Data) ->
|
handle_event({call, From}, health_check, _State, Data) ->
|
||||||
handle_manually_health_check(From, Data);
|
handle_manually_health_check(From, Data);
|
||||||
|
handle_event({call, From}, {channel_health_check, ChannelId}, _State, Data) ->
|
||||||
|
handle_manually_channel_health_check(From, Data, ChannelId);
|
||||||
% State: CONNECTING
|
% State: CONNECTING
|
||||||
handle_event(enter, _OldState, connecting = State, Data) ->
|
handle_event(enter, _OldState, connecting = State, Data) ->
|
||||||
ok = log_state_consistency(State, Data),
|
ok = log_state_consistency(State, Data),
|
||||||
|
@ -408,6 +418,14 @@ handle_event(enter, _OldState, stopped = State, Data) ->
|
||||||
ok = log_state_consistency(State, Data),
|
ok = log_state_consistency(State, Data),
|
||||||
{keep_state_and_data, []};
|
{keep_state_and_data, []};
|
||||||
% Ignore all other events
|
% Ignore all other events
|
||||||
|
handle_event(
|
||||||
|
{call, From}, {add_channel, ChannelId, Config}, _State, Data
|
||||||
|
) ->
|
||||||
|
handle_add_channel(From, Data, ChannelId, Config);
|
||||||
|
handle_event(
|
||||||
|
{call, From}, {remove_channel, ChannelId}, _State, Data
|
||||||
|
) ->
|
||||||
|
handle_remove_channel(From, ChannelId, Data);
|
||||||
handle_event(EventType, EventData, State, Data) ->
|
handle_event(EventType, EventData, State, Data) ->
|
||||||
?SLOG(
|
?SLOG(
|
||||||
error,
|
error,
|
||||||
|
@ -483,10 +501,11 @@ start_resource(Data, From) ->
|
||||||
%% in case the emqx_resource:call_start/2 hangs, the lookup/1 can read status from the cache
|
%% in case the emqx_resource:call_start/2 hangs, the lookup/1 can read status from the cache
|
||||||
case emqx_resource:call_start(Data#data.id, Data#data.mod, Data#data.config) of
|
case emqx_resource:call_start(Data#data.id, Data#data.mod, Data#data.config) of
|
||||||
{ok, ResourceState} ->
|
{ok, ResourceState} ->
|
||||||
UpdatedData = Data#data{status = connecting, state = ResourceState},
|
UpdatedData1 = Data#data{status = connecting, state = ResourceState},
|
||||||
%% Perform an initial health_check immediately before transitioning into a connected state
|
%% Perform an initial health_check immediately before transitioning into a connected state
|
||||||
|
UpdatedData2 = add_channels(UpdatedData1),
|
||||||
Actions = maybe_reply([{state_timeout, 0, health_check}], From, ok),
|
Actions = maybe_reply([{state_timeout, 0, health_check}], From, ok),
|
||||||
{next_state, connecting, update_state(UpdatedData, Data), Actions};
|
{next_state, connecting, update_state(UpdatedData2, Data), Actions};
|
||||||
{error, Reason} = Err ->
|
{error, Reason} = Err ->
|
||||||
?SLOG(warning, #{
|
?SLOG(warning, #{
|
||||||
msg => "start_resource_failed",
|
msg => "start_resource_failed",
|
||||||
|
@ -501,6 +520,36 @@ start_resource(Data, From) ->
|
||||||
{next_state, disconnected, update_state(UpdatedData, Data), Actions}
|
{next_state, disconnected, update_state(UpdatedData, Data), Actions}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
add_channels(Data) ->
|
||||||
|
ChannelIDConfigTuples = emqx_resource:call_get_channels(Data#data.id, Data#data.mod),
|
||||||
|
add_channels_in_list(ChannelIDConfigTuples, Data).
|
||||||
|
|
||||||
|
add_channels_in_list([], Data) ->
|
||||||
|
Data;
|
||||||
|
add_channels_in_list([{ChannelID, ChannelConfig} | Rest], Data) ->
|
||||||
|
case
|
||||||
|
emqx_resource:call_add_channel(
|
||||||
|
Data#data.id, Data#data.mod, Data#data.state, ChannelID, ChannelConfig
|
||||||
|
)
|
||||||
|
of
|
||||||
|
{ok, NewState} ->
|
||||||
|
AddedChannelsMap = Data#data.added_channels,
|
||||||
|
NewAddedChannelsMap = maps:put(ChannelID, true, AddedChannelsMap),
|
||||||
|
NewData = Data#data{
|
||||||
|
state = NewState,
|
||||||
|
added_channels = NewAddedChannelsMap
|
||||||
|
},
|
||||||
|
add_channels_in_list(Rest, NewData);
|
||||||
|
{error, Reason} ->
|
||||||
|
?SLOG(warning, #{
|
||||||
|
msg => add_channel_failed,
|
||||||
|
id => Data#data.id,
|
||||||
|
channel_id => ChannelID,
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
add_channels_in_list(Rest, Data)
|
||||||
|
end.
|
||||||
|
|
||||||
maybe_stop_resource(#data{status = Status} = Data) when Status /= stopped ->
|
maybe_stop_resource(#data{status = Status} = Data) when Status /= stopped ->
|
||||||
stop_resource(Data);
|
stop_resource(Data);
|
||||||
maybe_stop_resource(#data{status = stopped} = Data) ->
|
maybe_stop_resource(#data{status = stopped} = Data) ->
|
||||||
|
@ -513,8 +562,10 @@ stop_resource(#data{state = ResState, id = ResId} = Data) ->
|
||||||
HasAllocatedResources = emqx_resource:has_allocated_resources(ResId),
|
HasAllocatedResources = emqx_resource:has_allocated_resources(ResId),
|
||||||
case ResState =/= undefined orelse HasAllocatedResources of
|
case ResState =/= undefined orelse HasAllocatedResources of
|
||||||
true ->
|
true ->
|
||||||
|
%% Before stop is called we remove all the channels from the resource
|
||||||
|
NewData = remove_channels(Data),
|
||||||
%% we clear the allocated resources after stop is successful
|
%% we clear the allocated resources after stop is successful
|
||||||
emqx_resource:call_stop(Data#data.id, Data#data.mod, ResState);
|
emqx_resource:call_stop(NewData#data.id, NewData#data.mod, ResState);
|
||||||
false ->
|
false ->
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
|
@ -522,10 +573,122 @@ stop_resource(#data{state = ResState, id = ResId} = Data) ->
|
||||||
ok = emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId),
|
ok = emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId),
|
||||||
Data#data{status = stopped}.
|
Data#data{status = stopped}.
|
||||||
|
|
||||||
|
remove_channels(Data) ->
|
||||||
|
Channels = maps:keys(Data#data.added_channels),
|
||||||
|
remove_channels_in_list(Channels, Data).
|
||||||
|
|
||||||
|
remove_channels_in_list([], Data) ->
|
||||||
|
Data;
|
||||||
|
remove_channels_in_list([ChannelID | Rest], Data) ->
|
||||||
|
case
|
||||||
|
emqx_resource:call_remove_channel(Data#data.id, Data#data.mod, Data#data.state, ChannelID)
|
||||||
|
of
|
||||||
|
{ok, NewState} ->
|
||||||
|
AddedChannelsMap = Data#data.added_channels,
|
||||||
|
NewAddedChannelsMap = maps:remove(ChannelID, AddedChannelsMap),
|
||||||
|
NewData = Data#data{
|
||||||
|
state = NewState,
|
||||||
|
added_channels = NewAddedChannelsMap
|
||||||
|
},
|
||||||
|
remove_channels_in_list(Rest, NewData);
|
||||||
|
{error, Reason} ->
|
||||||
|
?SLOG(warning, #{
|
||||||
|
msg => add_channel_failed,
|
||||||
|
id => Data#data.id,
|
||||||
|
channel_id => ChannelID,
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
remove_channels_in_list(Rest, Data)
|
||||||
|
end.
|
||||||
|
|
||||||
make_test_id() ->
|
make_test_id() ->
|
||||||
RandId = iolist_to_binary(emqx_utils:gen_id(16)),
|
RandId = iolist_to_binary(emqx_utils:gen_id(16)),
|
||||||
<<?TEST_ID_PREFIX, RandId/binary>>.
|
<<?TEST_ID_PREFIX, RandId/binary>>.
|
||||||
|
|
||||||
|
handle_add_channel(From, Data, ChannelId, ChannelConfig) ->
|
||||||
|
Channels = Data#data.added_channels,
|
||||||
|
case maps:get(ChannelId, Channels, false) of
|
||||||
|
true ->
|
||||||
|
%% The channel is already installed in the connector state
|
||||||
|
%% We don't need to install it again
|
||||||
|
{keep_state_and_data, [{reply, From, ok}]};
|
||||||
|
false ->
|
||||||
|
%% The channel is not installed in the connector state
|
||||||
|
%% We need to install it
|
||||||
|
handle_add_channel_need_insert(From, Data, ChannelId, Data, ChannelConfig)
|
||||||
|
end.
|
||||||
|
|
||||||
|
handle_add_channel_need_insert(From, Data, ChannelId, Data, ChannelConfig) ->
|
||||||
|
case add_channel_need_insert_update_data(Data, ChannelId, ChannelConfig) of
|
||||||
|
{ok, NewData} ->
|
||||||
|
{keep_state, NewData, [{reply, From, ok}]};
|
||||||
|
{error, _Reason} = Error ->
|
||||||
|
{keep_state_and_data, [{reply, From, Error}]}
|
||||||
|
end.
|
||||||
|
|
||||||
|
add_channel_need_insert_update_data(Data, ChannelId, ChannelConfig) ->
|
||||||
|
case
|
||||||
|
emqx_resource:call_add_channel(
|
||||||
|
Data#data.id, Data#data.mod, Data#data.state, ChannelId, ChannelConfig
|
||||||
|
)
|
||||||
|
of
|
||||||
|
{ok, NewState} ->
|
||||||
|
AddedChannelsMap = Data#data.added_channels,
|
||||||
|
NewAddedChannelsMap = maps:put(ChannelId, true, AddedChannelsMap),
|
||||||
|
UpdatedData = Data#data{
|
||||||
|
state = NewState,
|
||||||
|
added_channels = NewAddedChannelsMap
|
||||||
|
},
|
||||||
|
update_state(UpdatedData, Data),
|
||||||
|
{ok, UpdatedData};
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
%% Log the error as a warning
|
||||||
|
?SLOG(warning, #{
|
||||||
|
msg => add_channel_failed,
|
||||||
|
id => Data#data.id,
|
||||||
|
channel_id => ChannelId,
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
handle_remove_channel(From, ChannelId, Data) ->
|
||||||
|
Channels = Data#data.added_channels,
|
||||||
|
case maps:get(ChannelId, Channels, false) of
|
||||||
|
false ->
|
||||||
|
%% The channel is already not installed in the connector state
|
||||||
|
{keep_state_and_data, [{reply, From, ok}]};
|
||||||
|
true ->
|
||||||
|
%% The channel is installed in the connector state
|
||||||
|
handle_remove_channel_exists(From, ChannelId, Data)
|
||||||
|
end.
|
||||||
|
|
||||||
|
handle_remove_channel_exists(From, ChannelId, Data) ->
|
||||||
|
case
|
||||||
|
emqx_resource:call_remove_channel(
|
||||||
|
Data#data.id, Data#data.mod, Data#data.state, ChannelId
|
||||||
|
)
|
||||||
|
of
|
||||||
|
{ok, NewState} ->
|
||||||
|
AddedChannelsMap = Data#data.added_channels,
|
||||||
|
NewAddedChannelsMap = maps:remove(ChannelId, AddedChannelsMap),
|
||||||
|
UpdatedData = Data#data{
|
||||||
|
state = NewState,
|
||||||
|
added_channels = NewAddedChannelsMap
|
||||||
|
},
|
||||||
|
update_state(UpdatedData, Data),
|
||||||
|
{keep_state, UpdatedData, [{reply, From, ok}]};
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
%% Log the error as a warning
|
||||||
|
?SLOG(warning, #{
|
||||||
|
msg => remove_channel_failed,
|
||||||
|
id => Data#data.id,
|
||||||
|
channel_id => ChannelId,
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
{keep_state_and_data, [{reply, From, Error}]}
|
||||||
|
end.
|
||||||
|
|
||||||
handle_manually_health_check(From, Data) ->
|
handle_manually_health_check(From, Data) ->
|
||||||
with_health_check(
|
with_health_check(
|
||||||
Data,
|
Data,
|
||||||
|
@ -535,6 +698,55 @@ handle_manually_health_check(From, Data) ->
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
|
||||||
|
handle_manually_channel_health_check(From, #data{state = undefined}, _ChannelId) ->
|
||||||
|
{keep_state_and_data, [{reply, From, {ok, disconnected}}]};
|
||||||
|
handle_manually_channel_health_check(
|
||||||
|
From,
|
||||||
|
#data{added_channels = Channels} = Data,
|
||||||
|
ChannelId
|
||||||
|
) when
|
||||||
|
is_map_key(ChannelId, Channels)
|
||||||
|
->
|
||||||
|
{keep_state_and_data, [{reply, From, get_channel_status_channel_added(Data, ChannelId)}]};
|
||||||
|
handle_manually_channel_health_check(
|
||||||
|
From,
|
||||||
|
Data,
|
||||||
|
ChannelId
|
||||||
|
) ->
|
||||||
|
%% add channel
|
||||||
|
ResId = Data#data.id,
|
||||||
|
Mod = Data#data.mod,
|
||||||
|
case emqx_resource:call_get_channel_config(ResId, ChannelId, Mod) of
|
||||||
|
ChannelConfig when is_map(ChannelConfig) ->
|
||||||
|
case add_channel_need_insert_update_data(Data, ChannelId, ChannelConfig) of
|
||||||
|
{ok, UpdatedData} ->
|
||||||
|
{keep_state, UpdatedData, [
|
||||||
|
{reply, From, get_channel_status_channel_added(UpdatedData, ChannelId)}
|
||||||
|
]};
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
%% Log the error as a warning
|
||||||
|
?SLOG(warning, #{
|
||||||
|
msg => add_channel_failed_when_doing_status_check,
|
||||||
|
id => ResId,
|
||||||
|
channel_id => ChannelId,
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
{keep_state_and_data, [{reply, From, Error}]}
|
||||||
|
end;
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
%% Log the error as a warning
|
||||||
|
?SLOG(warning, #{
|
||||||
|
msg => get_channel_config_failed_when_doing_status_check,
|
||||||
|
id => ResId,
|
||||||
|
channel_id => ChannelId,
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
{keep_state_and_data, [{reply, From, Error}]}
|
||||||
|
end.
|
||||||
|
|
||||||
|
get_channel_status_channel_added(#data{id = ResId, mod = Mod, state = State}, ChannelId) ->
|
||||||
|
emqx_resource:call_channel_health_check(ResId, ChannelId, Mod, State).
|
||||||
|
|
||||||
handle_connecting_health_check(Data) ->
|
handle_connecting_health_check(Data) ->
|
||||||
with_health_check(
|
with_health_check(
|
||||||
Data,
|
Data,
|
||||||
|
@ -663,7 +875,8 @@ data_record_to_external_map(Data) ->
|
||||||
query_mode => Data#data.query_mode,
|
query_mode => Data#data.query_mode,
|
||||||
config => Data#data.config,
|
config => Data#data.config,
|
||||||
status => Data#data.status,
|
status => Data#data.status,
|
||||||
state => Data#data.state
|
state => Data#data.state,
|
||||||
|
added_channels => Data#data.added_channels
|
||||||
}.
|
}.
|
||||||
|
|
||||||
-spec wait_for_ready(resource_id(), integer()) -> ok | timeout | {error, term()}.
|
-spec wait_for_ready(resource_id(), integer()) -> ok | timeout | {error, term()}.
|
||||||
|
|
|
@ -43,6 +43,14 @@
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% APIs
|
%% APIs
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
parse_action(BridgeId) when is_binary(BridgeId) ->
|
||||||
|
{Type, Name} = emqx_bridge_resource:parse_bridge_id(BridgeId),
|
||||||
|
case emqx_bridge_v2:is_bridge_v2_type(Type) of
|
||||||
|
true ->
|
||||||
|
{bridge_v2, Type, Name};
|
||||||
|
false ->
|
||||||
|
{bridge, Type, Name, emqx_bridge_resource:resource_id(Type, Name)}
|
||||||
|
end;
|
||||||
parse_action(#{function := ActionFunc} = Action) ->
|
parse_action(#{function := ActionFunc} = Action) ->
|
||||||
{Mod, Func} = parse_action_func(ActionFunc),
|
{Mod, Func} = parse_action_func(ActionFunc),
|
||||||
Res = #{mod => Mod, func => Func},
|
Res = #{mod => Mod, func => Func},
|
||||||
|
|
|
@ -515,11 +515,8 @@ do_delete_rule_index(#{id := Id, from := From}) ->
|
||||||
parse_actions(Actions) ->
|
parse_actions(Actions) ->
|
||||||
[do_parse_action(Act) || Act <- Actions].
|
[do_parse_action(Act) || Act <- Actions].
|
||||||
|
|
||||||
do_parse_action(Action) when is_map(Action) ->
|
do_parse_action(Action) ->
|
||||||
emqx_rule_actions:parse_action(Action);
|
emqx_rule_actions:parse_action(Action).
|
||||||
do_parse_action(BridgeId) when is_binary(BridgeId) ->
|
|
||||||
{Type, Name} = emqx_bridge_resource:parse_bridge_id(BridgeId),
|
|
||||||
{bridge, Type, Name, emqx_bridge_resource:resource_id(Type, Name)}.
|
|
||||||
|
|
||||||
get_all_records(Tab) ->
|
get_all_records(Tab) ->
|
||||||
[Rule#{id => Id} || {Id, Rule} <- ets:tab2list(Tab)].
|
[Rule#{id => Id} || {Id, Rule} <- ets:tab2list(Tab)].
|
||||||
|
|
|
@ -521,6 +521,8 @@ format_action(Actions) ->
|
||||||
|
|
||||||
do_format_action({bridge, BridgeType, BridgeName, _ResId}) ->
|
do_format_action({bridge, BridgeType, BridgeName, _ResId}) ->
|
||||||
emqx_bridge_resource:bridge_id(BridgeType, BridgeName);
|
emqx_bridge_resource:bridge_id(BridgeType, BridgeName);
|
||||||
|
do_format_action({bridge_v2, BridgeType, BridgeName}) ->
|
||||||
|
emqx_bridge_v2:id(BridgeType, BridgeName);
|
||||||
do_format_action(#{mod := Mod, func := Func, args := Args}) ->
|
do_format_action(#{mod := Mod, func := Func, args := Args}) ->
|
||||||
#{
|
#{
|
||||||
function => printable_function_name(Mod, Func),
|
function => printable_function_name(Mod, Func),
|
||||||
|
|
|
@ -361,6 +361,33 @@ do_handle_action(RuleId, {bridge, BridgeType, BridgeName, ResId}, Selected, _Env
|
||||||
Result ->
|
Result ->
|
||||||
Result
|
Result
|
||||||
end;
|
end;
|
||||||
|
do_handle_action(
|
||||||
|
RuleId,
|
||||||
|
{bridge_v2, BridgeType, BridgeName},
|
||||||
|
Selected,
|
||||||
|
_Envs
|
||||||
|
) ->
|
||||||
|
?TRACE(
|
||||||
|
"BRIDGE",
|
||||||
|
"bridge_action",
|
||||||
|
#{bridge_id => {bridge_v2, BridgeType, BridgeName}}
|
||||||
|
),
|
||||||
|
ReplyTo = {fun ?MODULE:inc_action_metrics/2, [RuleId], #{reply_dropped => true}},
|
||||||
|
case
|
||||||
|
emqx_bridge_v2:send_message(
|
||||||
|
BridgeType,
|
||||||
|
BridgeName,
|
||||||
|
Selected,
|
||||||
|
#{reply_to => ReplyTo}
|
||||||
|
)
|
||||||
|
of
|
||||||
|
{error, Reason} when Reason == bridge_not_found; Reason == bridge_stopped ->
|
||||||
|
throw(out_of_service);
|
||||||
|
?RESOURCE_ERROR_M(R, _) when ?IS_RES_DOWN(R) ->
|
||||||
|
throw(out_of_service);
|
||||||
|
Result ->
|
||||||
|
Result
|
||||||
|
end;
|
||||||
do_handle_action(RuleId, #{mod := Mod, func := Func} = Action, Selected, Envs) ->
|
do_handle_action(RuleId, #{mod := Mod, func := Func} = Action, Selected, Envs) ->
|
||||||
%% the function can also throw 'out_of_service'
|
%% the function can also throw 'out_of_service'
|
||||||
Args = maps:get(args, Action, []),
|
Args = maps:get(args, Action, []),
|
||||||
|
|
|
@ -283,6 +283,13 @@ config_enable.desc:
|
||||||
config_enable.label:
|
config_enable.label:
|
||||||
"""Enable or Disable"""
|
"""Enable or Disable"""
|
||||||
|
|
||||||
|
|
||||||
|
config_connector.desc:
|
||||||
|
"""Reference to connector"""
|
||||||
|
|
||||||
|
config_connector.label:
|
||||||
|
"""Connector"""
|
||||||
|
|
||||||
consumer_mqtt_payload.desc:
|
consumer_mqtt_payload.desc:
|
||||||
"""The template for transforming the incoming Kafka message. By default, it will use JSON format to serialize inputs from the Kafka message. Such fields are:
|
"""The template for transforming the incoming Kafka message. By default, it will use JSON format to serialize inputs from the Kafka message. Such fields are:
|
||||||
<code>headers</code>: an object containing string key-value pairs.
|
<code>headers</code>: an object containing string key-value pairs.
|
||||||
|
@ -422,4 +429,11 @@ sync_query_timeout.desc:
|
||||||
sync_query_timeout.label:
|
sync_query_timeout.label:
|
||||||
"""Synchronous Query Timeout"""
|
"""Synchronous Query Timeout"""
|
||||||
|
|
||||||
|
|
||||||
|
kafka_producer_action.desc:
|
||||||
|
"""Kafka Producer Action"""
|
||||||
|
|
||||||
|
kafka_producer_action.label:
|
||||||
|
"""Kafka Producer Action"""
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
emqx_connector_schema {
|
||||||
|
|
||||||
|
connector_field.desc:
|
||||||
|
"""Name of connector used to connect to the resource where the action is to be performed."""
|
||||||
|
|
||||||
|
connector_field.label:
|
||||||
|
"""Connector"""
|
||||||
|
|
||||||
|
}
|
Loading…
Reference in New Issue